diff --git a/AGENTS.md b/AGENTS.md index 3874821..b0199ea 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -212,11 +212,43 @@ Important: - `batch` is the canonical high-level remote background execution surface - `yolo` is an accepted alias for `batch` - `team` is the phase-1 high-level multi-agent authoring surface +- `sandbox` is the contract-first compute bridge surface for reusable + environments and snapshot-based restore flows +- `pool` is the contract-first compute control-plane surface for warm capacity + and future leasing over sandbox specs - current phase-1 `team` limitations: - `depends_on` is not supported yet - `sequential` preserves ordering only; task outputs are not threaded between agents +- current phase-1 `sandbox` limitation: + - bridge routes are mock-backed and persisted by `void-control` + - the live `VoidBoxRuntimeClient` still reports sandbox lifecycle calls as + unsupported until the `void-box` daemon exposes matching routes +- current phase-1 `pool` limitation: + - pool routes are mock-backed and persisted by `void-control` + - they express desired warm capacity only; they do not yet drive live daemon + prewarm behavior - use `voidctl execution ...` for terminal operator workflows; use the bridge HTTP API or UI when you need direct API-driven inspection or browser workflows +- non-interactive compute commands: + - `voidctl sandbox create [ | --stdin]` + - `voidctl sandbox list` + - `voidctl sandbox get ` + - `voidctl sandbox exec [ | --stdin]` + - `voidctl sandbox stop ` + - `voidctl sandbox delete ` + - `voidctl snapshot create [ | --stdin]` + - `voidctl snapshot list` + - `voidctl snapshot get ` + - `voidctl snapshot replicate [ | --stdin]` + - `voidctl snapshot delete ` + - `voidctl pool create [ | --stdin]` + - `voidctl pool get ` + - `voidctl pool scale [ | --stdin]` +- interactive compute commands: + - `/sandbox create ` + - `/sandbox list` + - `/snapshot replicate ` + - `/pool scale ` - quote URLs that contain `?` when using `curl` from `zsh` - template-first bridge endpoints: - `GET /v1/templates` @@ -235,6 +267,23 @@ Important: - `POST /v1/teams/dry-run` - `POST /v1/teams/run` - `GET /v1/team-runs/{id}` +- sandbox bridge endpoints: + - `POST /v1/sandboxes` + - `GET /v1/sandboxes` + - `GET /v1/sandboxes/{id}` + - `POST /v1/sandboxes/{id}/exec` + - `POST /v1/sandboxes/{id}/stop` + - `DELETE /v1/sandboxes/{id}` +- snapshot bridge endpoints: + - `POST /v1/snapshots` + - `GET /v1/snapshots` + - `GET /v1/snapshots/{id}` + - `POST /v1/snapshots/{id}/replicate` + - `DELETE /v1/snapshots/{id}` +- pool bridge endpoints: + - `POST /v1/pools` + - `GET /v1/pools/{id}` + - `POST /v1/pools/{id}/scale` ## Runtime compatibility commands diff --git a/README.md b/README.md index 27ad9c0..da7f3b8 100644 --- a/README.md +++ b/README.md @@ -374,6 +374,133 @@ Interactive console: /team run examples/team/rust_article_team.yaml ``` +### Sandbox + +`sandbox` is the new compute-oriented bridge surface for contract-first work on +reusable environments, snapshots, and prewarm flows. + +Current limitation: +- these routes are currently bridge-managed and mock-backed +- the live `VoidBoxRuntimeClient` still returns unsupported for sandbox + lifecycle calls until the `void-box` daemon exposes matching routes + +HTTP: + +```bash +curl -sS -X POST http://127.0.0.1:43210/v1/sandboxes \ + -H 'Content-Type: text/yaml' \ + --data-binary @examples/compute/sandbox-python.yaml + +curl -sS http://127.0.0.1:43210/v1/sandboxes + +curl -sS -X POST http://127.0.0.1:43210/v1/sandboxes//exec \ + -H 'Content-Type: application/json' \ + -d '{ + "kind": "command", + "command": ["python3", "-V"] + }' + +curl -sS -X POST http://127.0.0.1:43210/v1/sandboxes//stop \ + -H 'Content-Type: application/json' \ + -d '{}' + +curl -sS -X DELETE http://127.0.0.1:43210/v1/sandboxes/ +``` + +SDKs: + +```python +from void_control import VoidControlClient + +async with VoidControlClient(base_url="http://127.0.0.1:43210") as client: + sandbox = await client.sandboxes.create( + { + "api_version": "v1", + "kind": "sandbox", + "runtime": { + "image": "python:3.12-slim", + "cpus": 2, + "memory_mb": 2048, + }, + } + ) +``` + +```js +import { VoidControlClient } from "./sdks/node/src/index.js"; + +const client = new VoidControlClient({ baseUrl: "http://127.0.0.1:43210" }); +const sandbox = await client.sandboxes.create({ + api_version: "v1", + kind: "sandbox", + runtime: { + image: "python:3.12-slim", + cpus: 2, + memory_mb: 2048 + } +}); +``` + +```go +client := voidcontrol.NewClient("http://127.0.0.1:43210") +sandbox, err := client.Sandboxes.Create(map[string]any{ + "api_version": "v1", + "kind": "sandbox", + "runtime": map[string]any{ + "image": "python:3.12-slim", + "cpus": 2, + "memory_mb": 2048, + }, +}) +``` + +### Snapshot + +`snapshot` is the bridge-managed resource for checkpoint creation metadata and +distribution policy. In phase 1, replication updates the persisted control-plane +record only; it does not copy artifacts across live `void-box` nodes yet. + +HTTP: + +```bash +curl -sS -X POST http://127.0.0.1:43210/v1/snapshots \ + -H 'Content-Type: text/yaml' \ + --data-binary @examples/compute/snapshot-from-sandbox.yaml + +curl -sS http://127.0.0.1:43210/v1/snapshots + +curl -sS -X POST http://127.0.0.1:43210/v1/snapshots//replicate \ + -H 'Content-Type: text/yaml' \ + --data-binary @examples/compute/snapshot-replicate.yaml + +curl -sS -X DELETE http://127.0.0.1:43210/v1/snapshots/ +``` + +### Pool + +`pool` is a `void-control` control-plane abstraction over reusable sandbox +shapes. It is where prewarm targets, warm capacity, and future lease policy +belong. + +Current limitation: +- pool routes are bridge-managed and mock-backed +- they define desired warm capacity, but they do not yet drive a live + `void-box` daemon fleet + +HTTP: + +```bash +curl -sS -X POST http://127.0.0.1:43210/v1/pools \ + -H 'Content-Type: text/yaml' \ + --data-binary @examples/compute/pool-python.yaml + +curl -sS http://127.0.0.1:43210/v1/pools/ + +curl -sS -X POST http://127.0.0.1:43210/v1/pools//scale \ + -H 'Content-Type: text/yaml' \ + --data-binary @examples/compute/pool-scale.yaml +``` + ### 7) Run the supervision example Use the checked-in supervision example to exercise the flat @@ -490,6 +617,48 @@ The interactive `voidctl` console exposes the same path: /template execute warm-agent-basic template-inputs.json ``` +The contract-first compute surface is available from the same CLI. These routes +are bridge-managed today, so they are good for contract work and mocks even +before live `void-box` daemon support lands. + +Create and inspect reusable sandboxes: + +```bash +voidctl sandbox create sandbox.json +cat sandbox.json | voidctl sandbox create --stdin +voidctl sandbox list +voidctl sandbox get +voidctl sandbox stop +voidctl sandbox delete +``` + +Create and replicate snapshots: + +```bash +voidctl snapshot create snapshot.json +cat replicate.json | voidctl snapshot replicate --stdin +voidctl snapshot list +voidctl snapshot get +voidctl snapshot delete +``` + +Manage pool warm-capacity targets in the control plane: + +```bash +voidctl pool create pool.json +voidctl pool get +cat scale.json | voidctl pool scale --stdin +``` + +The interactive console exposes the same compute routes: + +```text +/sandbox create sandbox.json +/sandbox list +/snapshot replicate replicate.json +/pool scale scale.json +``` + Example execution: ```text diff --git a/docs/superpowers/plans/2026-04-21-compute-sandbox-api-phase1-implementation.md b/docs/superpowers/plans/2026-04-21-compute-sandbox-api-phase1-implementation.md new file mode 100644 index 0000000..ff70bb3 --- /dev/null +++ b/docs/superpowers/plans/2026-04-21-compute-sandbox-api-phase1-implementation.md @@ -0,0 +1,382 @@ +# Compute Sandbox API Phase 1 Implementation Plan + +## Date + +2026-04-21 + +## Scope + +This plan covers the first credible compute-API slice after the template, +batch/yolo, and team authoring work. + +It does **not** pretend that `void-control` can ship a full BoxLite/E2B-style +compute API by itself today. The current `void-box` daemon still exposes a +run-centric surface, not first-class sandbox, exec, and snapshot-management +routes. + +So this plan is deliberately split into: + +1. control-plane contract work in `void-control` +2. prerequisite daemon/runtime work in `void-box` +3. bridge and SDK wiring only after those runtime primitives exist + +## Current Boundary + +`void-control` currently has: + +- `src/bridge.rs` + - execution/template/batch/team bridge routes +- `src/runtime/mod.rs` + - `ExecutionRuntime` with run-only lifecycle methods +- `src/runtime/void_box.rs` + - `VoidBoxRuntimeClient` over `/v1/runs` + +`void-box` currently has: + +- daemon routes for `/v1/runs` +- internal snapshot store support +- run creation support for snapshot restore +- no public daemon routes yet for: + - sandbox create/get/list/remove + - exec against an existing sandbox + - snapshot create/list/get/delete/replicate + +## Goal + +Define and stage a compute-oriented API around: + +- `SandboxSpec` +- `SnapshotSpec` +- `SandboxPoolSpec` + +without violating the `void-control` / `void-box` boundary. + +## File Map + +### `void-control` + +- `docs/superpowers/specs/2026-04-21-compute-sandbox-api-draft.md` + - design source of truth +- `docs/superpowers/plans/2026-04-21-compute-sandbox-api-phase1-implementation.md` + - this implementation plan +- future Rust files after prerequisite runtime support exists: + - `src/sandbox/mod.rs` + - `src/sandbox/schema.rs` + - `src/sandbox/compile.rs` + - `src/bridge.rs` + - `src/bin/voidctl.rs` + - `tests/sandbox_api.rs` + - `tests/voidctl_execution_cli.rs` + - `sdks/python/src/void_control/...` + - `sdks/node/src/...` + - `sdks/go/...` + +### `void-box` + +- `src/daemon.rs` +- `src/snapshot_store.rs` +- runtime/backend modules for sandbox lifecycle and exec + +## Phase Split + +## Chunk 1: Freeze The Control-Plane Contract + +Purpose: +- make the compute object model explicit and truthful in docs before code + +Files: +- `docs/superpowers/specs/2026-04-21-compute-sandbox-api-draft.md` +- `README.md` +- `AGENTS.md` + +Steps: +- [ ] **Step 1: tighten the draft around current runtime reality** + - note that `void-control` cannot yet expose a real sandbox API because the + `void-box` daemon remains run-centric + - define canonical nouns: + - `SandboxSpec` + - `SnapshotSpec` + - `SandboxPoolSpec` + +- [ ] **Step 2: define the phase-1 public API shape** + - document the intended control-plane routes: + - `POST /v1/sandboxes` + - `GET /v1/sandboxes` + - `GET /v1/sandboxes/{id}` + - `POST /v1/sandboxes/{id}/exec` + - `POST /v1/sandboxes/{id}/stop` + - `DELETE /v1/sandboxes/{id}` + - document that these stay unimplemented in `void-control` until the runtime + daemon contract exists + +- [ ] **Step 3: define snapshot and pool routes** + - snapshots: + - `POST /v1/snapshots` + - `GET /v1/snapshots` + - `GET /v1/snapshots/{id}` + - `POST /v1/snapshots/{id}/replicate` + - `DELETE /v1/snapshots/{id}` + - pools: + - `POST /v1/pools` + - `GET /v1/pools/{id}` + - `POST /v1/pools/{id}/scale` + +- [ ] **Step 4: document the ComputeSDK compatibility mapping** + - `compute.sandbox.create` -> `/v1/sandboxes` + - `compute.sandbox.runCommand` -> `/v1/sandboxes/{id}/exec` + - `compute.sandbox.runCode` -> `/v1/sandboxes/{id}/exec` + - `compute.sandbox.destroy` -> `DELETE /v1/sandboxes/{id}` + +- [ ] **Step 5: commit** + +```bash +git add docs/superpowers/specs/2026-04-21-compute-sandbox-api-draft.md README.md AGENTS.md +git commit -m "docs: freeze compute sandbox contract" +``` + +## Chunk 2: Define The `void-box` Prerequisite Work + +Purpose: +- avoid implementing a fake `void-control` API against missing runtime routes + +Files: +- external dependency work in `/home/diego/github/agent-infra/void-box` +- optional follow-up note in `docs/` +- `docs/superpowers/specs/2026-04-27-void-box-compute-daemon-prerequisites.md` + +Steps: +- [ ] **Step 1: write the daemon prerequisite list** + - `void-box` daemon must expose: + - sandbox create/get/list/remove + - exec against an existing sandbox + - snapshot create/get/list/delete + - snapshot replication or a lower-level primitive that `void-control` + can orchestrate + +- [ ] **Step 2: specify the minimum request/response contracts** + - sandbox create returns sandbox id, state, node, restored snapshot metadata + - exec returns command/code result plus timing and exit status + - snapshot create returns snapshot id plus source sandbox metadata + - snapshot replicate returns target-node state per node + +- [ ] **Step 3: define what can remain internal** + - raw microVM internals + - node-local snapshot storage layout + - backend-specific restore mechanics + +- [ ] **Step 4: document the cross-repo dependency** + - `void-control` implementation starts only after those daemon routes exist + +## Chunk 3: Add `SandboxSpec` Types In `void-control` + +Purpose: +- once daemon support exists, add typed control-plane models with no bridge + routes yet + +Files: +- `src/sandbox/mod.rs` +- `src/sandbox/schema.rs` +- `src/lib.rs` +- `tests/sandbox_api.rs` + +Steps: +- [ ] **Step 1: write failing parser tests for `SandboxSpec`** + - parse YAML and JSON + - reject missing runtime section + - reject invalid lifecycle values + - reject invalid snapshot distribution modes + +- [ ] **Step 2: implement schema types** + - `SandboxSpec` + - `SnapshotSpec` + - `SandboxPoolSpec` + - use public doc comments per `rustdoc` + +- [ ] **Step 3: export the new module** + - wire `src/lib.rs` + +- [ ] **Step 4: run targeted tests** + +```bash +cargo test --features serde --test sandbox_api -- --nocapture +``` + +- [ ] **Step 5: commit** + +```bash +git add src/sandbox src/lib.rs tests/sandbox_api.rs +git commit -m "sandbox: add compute api schemas" +``` + +## Chunk 4: Add Runtime Adapter Traits + +Purpose: +- avoid shoving sandbox lifecycle into the run-only `ExecutionRuntime` trait + +Files: +- `src/runtime/mod.rs` +- `src/runtime/void_box.rs` +- `src/runtime/mock.rs` +- new tests near runtime modules + +Steps: +- [ ] **Step 1: define a dedicated sandbox runtime trait** + - `create_sandbox` + - `inspect_sandbox` + - `list_sandboxes` + - `exec_sandbox` + - `stop_sandbox` + - `delete_sandbox` + - later snapshot operations through either the same trait or a sibling trait + +- [ ] **Step 2: keep `ExecutionRuntime` unchanged** + - do not overload orchestration runtime calls with compute semantics + +- [ ] **Step 3: implement the trait for mock runtime first** + - establish testable bridge behavior without live daemon dependency + +- [ ] **Step 4: implement the trait for `VoidBoxRuntimeClient`** + - only after the daemon contract exists + +- [ ] **Step 5: run focused runtime tests** + +```bash +cargo test --features serde runtime:: -- --nocapture +``` + +- [ ] **Step 6: commit** + +```bash +git add src/runtime +git commit -m "runtime: add sandbox lifecycle adapter" +``` + +## Chunk 5: Add Bridge Routes + +Purpose: +- expose the compute API through `void-control` + +Files: +- `src/bridge.rs` +- `tests/sandbox_api.rs` + +Steps: +- [ ] **Step 1: write failing bridge tests** + - `POST /v1/sandboxes` + - `GET /v1/sandboxes` + - `GET /v1/sandboxes/{id}` + - `POST /v1/sandboxes/{id}/exec` + - `POST /v1/sandboxes/{id}/stop` + - `DELETE /v1/sandboxes/{id}` + +- [ ] **Step 2: implement bridge request parsing and responses** + - mirror current execution/template route style + - return stable JSON resource views + +- [ ] **Step 3: add snapshot routes** + - list/get/create/delete/replicate + +- [ ] **Step 4: add pool routes** + - create, inspect, scale + +- [ ] **Step 5: run bridge tests** + +```bash +cargo test --features serde --test sandbox_api -- --nocapture +``` + +- [ ] **Step 6: commit** + +```bash +git add src/bridge.rs tests/sandbox_api.rs +git commit -m "bridge: add compute sandbox routes" +``` + +## Chunk 6: Add CLI And SDK Surfaces + +Purpose: +- make the new compute API usable from operators and clients + +Files: +- `src/bin/voidctl.rs` +- `tests/voidctl_execution_cli.rs` +- `sdks/python/...` +- `sdks/node/...` +- `sdks/go/...` +- `README.md` +- `AGENTS.md` + +Steps: +- [ ] **Step 1: add `voidctl sandbox ...`** + - `create` + - `list` + - `get` + - `exec` + - `stop` + - `delete` + +- [ ] **Step 2: add `voidctl snapshot ...`** + - `create` + - `list` + - `get` + - `replicate` + - `delete` + +- [ ] **Step 3: add SDK clients** + - Python async-first client + - Node client + - Go client + +- [ ] **Step 4: add examples** + - sandbox create + exec + - snapshot create + restore + - pool prewarm example once pool routes exist + +- [ ] **Step 5: run CLI and SDK tests** + +```bash +cargo test --features serde --test voidctl_execution_cli -- --nocapture +python3 -m unittest sdks.python.tests.test_client +node --test sdks/node/test/client.test.mjs +cd sdks/go && GOCACHE=/tmp/go-build go test ./... +``` + +- [ ] **Step 6: commit** + +```bash +git add src/bin/voidctl.rs tests/voidctl_execution_cli.rs sdks README.md AGENTS.md +git commit -m "sdk: add compute sandbox clients" +``` + +## Chunk 7: Final Verification + +- [ ] **Step 1: run formatting** + +```bash +cargo fmt --all -- --check +``` + +- [ ] **Step 2: run linting** + +```bash +cargo clippy --all-targets --all-features -- -D warnings +``` + +- [ ] **Step 3: run tests** + +```bash +cargo test +cargo test --features serde +``` + +- [ ] **Step 4: run docs** + +```bash +RUSTDOCFLAGS="-D warnings" cargo doc --no-deps --all-features +``` + +## Recommendation + +Do **not** start Chunk 3 until Chunk 2 is satisfied or intentionally mocked +behind a clearly documented adapter seam. Otherwise `void-control` will grow a +fake compute surface that no real runtime can satisfy. diff --git a/docs/superpowers/specs/2026-04-21-compute-sandbox-api-draft.md b/docs/superpowers/specs/2026-04-21-compute-sandbox-api-draft.md new file mode 100644 index 0000000..fc23846 --- /dev/null +++ b/docs/superpowers/specs/2026-04-21-compute-sandbox-api-draft.md @@ -0,0 +1,491 @@ +# Compute Sandbox API Draft + +## Date + +2026-04-21 + +## Status + +Draft + +## Problem + +`void-control` is currently strong as an orchestration and execution-tracking +API, but weak as a direct compute API. + +That leaves a product gap against systems like: + +- E2B +- BoxLite / BoxRun +- ComputeSDK providers + +Today, `void-control` can: + +- create executions from raw specs +- compile and execute checked-in templates +- inspect execution progress, results, and events + +Today, it cannot expose a first-class compute surface such as: + +- create a reusable sandbox +- execute a command in that sandbox +- execute code in that sandbox +- manage snapshots explicitly +- replicate snapshots to multiple nodes +- maintain prewarmed sandbox pools + +## Current Code Reality + +The current `void-control` runtime boundary is still execution-centric. + +- `ExecutionRuntime` only exposes one-shot run lifecycle methods: + `start_run`, `inspect_run`, and `take_structured_output` +- `VoidBoxRuntimeClient` talks to the `void-box` daemon as a run client over + `/v1/runs` +- the bridge currently wraps: + - executions + - templates + - batch/yolo + - teams + +The current `void-box` daemon surface does not yet expose a general sandbox +management API equivalent to E2B or BoxRun. + +What exists today: + +- `/v1/runs` +- `/v1/runs/{id}/...` +- `/v1/sessions/{id}/messages` +- run submission already accepts snapshot restore input via run creation + +What is not exposed today as daemon routes: + +- `POST /v1/sandboxes` +- `POST /v1/sandboxes/{id}/exec` +- `POST /v1/snapshots` +- `GET /v1/snapshots` +- `POST /v1/snapshots/{id}/replicate` + +The concrete daemon contract expected by this branch is captured in: + +- `docs/superpowers/specs/2026-04-27-void-box-compute-daemon-prerequisites.md` + +So this design must be implemented in phases: + +1. align the control-plane object model and API shape in `void-control` +2. add the missing daemon/runtime primitives in `void-box` +3. wire the bridge and SDKs only after the runtime support exists + +## Design Principle + +Use a dedicated compute-oriented `SandboxSpec` as the base primitive. + +This must stay separate from the orchestration-oriented `ExecutionSpec`. + +The distinction is: + +- `SandboxSpec` + - defines the runtime environment and lifecycle +- `ExecutionSpec` + - defines orchestration, evaluation, variation, and reduction +- templates + - compile user-facing inputs into either of the above + +This separation is necessary to support both: + +- direct compute APIs similar to BoxRun / E2B +- higher-level orchestration and swarm APIs + +## Architectural Boundary + +### `void-box` + +Owns: + +- sandbox lifecycle +- runtime creation +- command/code execution +- filesystem operations +- snapshot create/restore/delete primitives +- node-local snapshot storage and runtime restore +- low-level resource and isolation behavior + +### `void-control` + +Owns: + +- `SandboxSpec` +- sandbox metadata and persistence +- pool and lease management +- prewarm policy +- snapshot inventory and replication planning +- node placement policy +- ComputeSDK-style or BoxRun-style management APIs +- orchestration over sandbox-backed runs when needed + +## Primary Objects + +### `SandboxSpec` + +Defines one reusable or ephemeral compute environment. + +Suggested shape: + +```yaml +api_version: v1 +kind: sandbox + +metadata: + name: python-benchmark-box + labels: + workload: benchmark + language: python + +runtime: + image: python:3.12-slim + cpus: 2 + memory_mb: 2048 + network: true + env: + FOO: bar + mounts: + - host: /data/fixtures + guest: /workspace/fixtures + mode: ro + ports: + - 3000 + - 8080 + +snapshot: + restore_from: snapshot-transform-v1 + +lifecycle: + auto_remove: false + detach: true + idle_timeout_secs: 900 + prewarm: true + +identity: + reusable: true + pool: benchmark-python +``` + +### `SnapshotSpec` + +Defines snapshot metadata plus replication intent. + +Suggested shape: + +```yaml +api_version: v1 +kind: snapshot + +metadata: + name: snapshot-transform-v1 + labels: + workload: benchmark + +source: + sandbox_id: sbx-123 + +distribution: + mode: cached + targets: + - node-a + - node-b + - node-c +``` + +### `SandboxPoolSpec` + +Defines prewarmed capacity for a common sandbox shape. + +Suggested shape: + +```yaml +api_version: v1 +kind: sandbox_pool + +metadata: + name: benchmark-python-pool + +sandbox_spec: + runtime: + image: python:3.12-slim + cpus: 2 + memory_mb: 2048 + snapshot: + restore_from: snapshot-transform-v1 + lifecycle: + auto_remove: false + detach: true + idle_timeout_secs: 900 + prewarm: true + identity: + reusable: true + pool: benchmark-python + +capacity: + warm: 5 + max: 20 +``` + +## Why This Enables Prewarm + +Prewarm is not a separate primitive. It is pool management over `SandboxSpec`. + +Flow: + +1. define a sandbox shape +2. optionally restore from a snapshot +3. keep `N` instances already started +4. lease one for execution +5. return, recycle, or destroy it according to policy + +This is a better fit than forcing prewarm to live inside orchestration specs. + +## Why This Enables ComputeSDK Compatibility + +ComputeSDK-style flows are sandbox-action flows. + +They map naturally to this model: + +- `compute.sandbox.create` + - create one sandbox from `SandboxSpec` +- `compute.sandbox.runCommand` + - execute a command inside an existing sandbox +- `compute.sandbox.runCode` + - execute code inside an existing sandbox +- filesystem actions + - read/write/list/remove inside an existing sandbox +- `compute.sandbox.destroy` + - stop and remove one sandbox + +That compatibility layer should be built on top of the compute API, not on top +of orchestration templates. + +## Snapshot Model + +Snapshots must be first-class. + +Required operations: + +- create snapshot from a sandbox +- inspect snapshot metadata +- list snapshots +- delete snapshots +- restore a sandbox from a snapshot +- replicate a snapshot to multiple nodes + +Important distinction: + +- `restore_from` + - boot a sandbox from an existing snapshot +- `replicate` + - distribute snapshot data to target nodes +- `prewarm` + - keep already-restored sandboxes warm and ready + +These are related but different lifecycle operations. + +## Multi-Node Snapshot Replication + +Snapshot replication should be modeled as control-plane policy plus runtime +execution. + +Recommended responsibilities: + +- `void-control` + - decides target nodes + - tracks replication state + - exposes replication APIs and status +- `void-box` + - performs the actual node-local snapshot import/export and restore + +Suggested states: + +- `Pending` +- `Copying` +- `Ready` +- `Failed` + +Suggested replication modes: + +- `copy` + - eagerly copy to all target nodes +- `cached` + - copy on demand, then retain locally +- `lazy` + - register targets but do not pre-copy + +## Proposed HTTP API + +### Sandbox lifecycle + +- `POST /v1/sandboxes` +- `GET /v1/sandboxes` +- `GET /v1/sandboxes/{id}` +- `POST /v1/sandboxes/{id}/exec` +- `POST /v1/sandboxes/{id}/stop` +- `DELETE /v1/sandboxes/{id}` + +Create request: + +```json +{ + "spec": { + "metadata": { "name": "python-benchmark-box" }, + "runtime": { + "image": "python:3.12-slim", + "cpus": 2, + "memory_mb": 2048, + "network": true + }, + "snapshot": { + "restore_from": "snapshot-transform-v1" + }, + "lifecycle": { + "auto_remove": false, + "detach": true, + "idle_timeout_secs": 900, + "prewarm": false + }, + "identity": { + "reusable": true + } + } +} +``` + +Exec request: + +```json +{ + "kind": "command", + "command": ["python3", "-c", "print('hello')"] +} +``` + +Later extension: + +```json +{ + "kind": "code", + "runtime": "python", + "code": "print('hello')" +} +``` + +### Snapshot lifecycle + +- `POST /v1/snapshots` +- `GET /v1/snapshots` +- `GET /v1/snapshots/{id}` +- `POST /v1/snapshots/{id}/replicate` +- `DELETE /v1/snapshots/{id}` + +Create request: + +```json +{ + "source_sandbox_id": "sbx-123", + "name": "snapshot-transform-v1" +} +``` + +Replicate request: + +```json +{ + "targets": ["node-a", "node-b", "node-c"], + "mode": "copy" +} +``` + +### Pool lifecycle + +- `POST /v1/pools` +- `GET /v1/pools` +- `GET /v1/pools/{id}` +- `POST /v1/pools/{id}/scale` +- `POST /v1/pools/{id}/lease` +- `POST /v1/pools/{id}/release` + +## Suggested Response Shape + +Sandbox response: + +```json +{ + "sandbox_id": "sbx-123", + "status": "Running", + "node_id": "node-a", + "spec": { "...": "..." } +} +``` + +Snapshot response: + +```json +{ + "snapshot_id": "snapshot-transform-v1", + "status": "Ready", + "source_sandbox_id": "sbx-123", + "replication": { + "mode": "copy", + "targets": [ + { "node_id": "node-a", "status": "Ready" }, + { "node_id": "node-b", "status": "Ready" }, + { "node_id": "node-c", "status": "Copying" } + ] + } +} +``` + +Pool response: + +```json +{ + "pool_id": "pool-benchmark-python", + "warm": 5, + "leased": 2, + "available": 3, + "max": 20 +} +``` + +## Recommended Rollout + +### Phase 1 + +- define `SandboxSpec` +- add sandbox create/get/list/stop/delete +- add sandbox `exec` for commands +- add snapshot create/list/get/delete +- add snapshot restore on sandbox creation + +### Phase 2 + +- add snapshot replication status and control +- add pool create/get/scale +- add prewarm and lease/release + +### Phase 3 + +- add code helpers such as `runCode` +- add filesystem APIs +- add ComputeSDK-style compatibility routes + +## Recommendation + +The next implementation work should start with `SandboxSpec` and the sandbox +lifecycle routes. + +That creates a base compute API that can later support: + +- prewarmed pools +- multi-node snapshot restore +- ComputeSDK compatibility +- BoxRun-style management APIs + +without overloading the current orchestration spec model. diff --git a/docs/superpowers/specs/2026-04-27-void-box-compute-daemon-prerequisites.md b/docs/superpowers/specs/2026-04-27-void-box-compute-daemon-prerequisites.md new file mode 100644 index 0000000..6b5ccfd --- /dev/null +++ b/docs/superpowers/specs/2026-04-27-void-box-compute-daemon-prerequisites.md @@ -0,0 +1,431 @@ +# `void-box` Compute Daemon Prerequisites + +## Date + +2026-04-27 + +## Status + +Draft + +## Purpose + +`feat/compute-sandbox-api` in `void-control` now defines a control-plane +contract for: + +- sandboxes +- snapshots +- pools +- compute-oriented CLI and SDK surfaces + +The control-plane side is intentionally ahead of the live runtime integration. +This document defines the `void-box` daemon changes required before +`VoidBoxRuntimeClient` can stop returning unsupported errors for the compute +surface. + +## Important Boundary + +### Must be implemented in `void-box` + +- sandbox lifecycle primitives +- exec inside an existing sandbox +- snapshot lifecycle primitives +- node-local snapshot restore support +- replication primitive or an equivalent lower-level distribution primitive + +### Must remain in `void-control` + +- pool definitions +- warm-capacity targets +- pool scaling policy +- lease/reuse policy +- global placement decisions +- orchestration over multiple sandboxes + +`pool` is not a `void-box` abstraction. + +## Current `void-control` Assumption + +The compute branch currently assumes the daemon can eventually expose: + +- `POST /v1/sandboxes` +- `GET /v1/sandboxes` +- `GET /v1/sandboxes/{id}` +- `POST /v1/sandboxes/{id}/exec` +- `POST /v1/sandboxes/{id}/stop` +- `DELETE /v1/sandboxes/{id}` +- `POST /v1/snapshots` +- `GET /v1/snapshots` +- `GET /v1/snapshots/{id}` +- `POST /v1/snapshots/{id}/replicate` +- `DELETE /v1/snapshots/{id}` + +The current `VoidBoxRuntimeClient` still returns: + +- `sandbox api is not supported by the current void-box daemon` + +for sandbox lifecycle calls because those routes do not exist live yet. + +## Required Daemon Routes + +## 1. Sandbox Lifecycle + +### `POST /v1/sandboxes` + +Purpose: +- create one sandbox from a `SandboxSpec` + +Request body: + +```json +{ + "api_version": "v1", + "kind": "sandbox", + "metadata": { + "name": "python-benchmark-box" + }, + "runtime": { + "image": "python:3.12-slim", + "cpus": 2, + "memory_mb": 2048 + }, + "snapshot": { + "restore_from": "snapshot-transform-v1" + } +} +``` + +Minimum response: + +```json +{ + "kind": "sandbox", + "sandbox": { + "sandbox_id": "sbx-123", + "state": "running", + "restore_from_snapshot": "snapshot-transform-v1" + } +} +``` + +Required semantics: +- daemon assigns a stable `sandbox_id` +- if `snapshot.restore_from` is present, response must report the restored + snapshot id in `restore_from_snapshot` +- initial state should be `running` after successful creation + +### `GET /v1/sandboxes` + +Purpose: +- list sandboxes known to the daemon + +Minimum response: + +```json +{ + "kind": "sandbox_list", + "sandboxes": [ + { + "sandbox_id": "sbx-123", + "state": "running", + "restore_from_snapshot": "snapshot-transform-v1" + } + ] +} +``` + +### `GET /v1/sandboxes/{id}` + +Purpose: +- inspect one sandbox + +Minimum success response: + +```json +{ + "kind": "sandbox", + "sandbox": { + "sandbox_id": "sbx-123", + "state": "running", + "restore_from_snapshot": "snapshot-transform-v1" + } +} +``` + +Minimum failure response: + +```json +{ + "message": "sandbox 'sbx-123' not found" +} +``` + +Recommended status: +- `404` when missing + +### `POST /v1/sandboxes/{id}/exec` + +Purpose: +- execute work inside an existing sandbox + +Minimum request forms: + +Command execution: + +```json +{ + "kind": "command", + "command": ["python3", "-V"] +} +``` + +Code execution: + +```json +{ + "kind": "code", + "runtime": "python", + "code": "print('hello')" +} +``` + +Minimum response: + +```json +{ + "kind": "sandbox_exec", + "result": { + "exit_code": 0, + "stdout": "python3 -V", + "stderr": "" + } +} +``` + +Required semantics: +- `kind=command` must execute the provided argv inside the sandbox +- `kind=code` must execute code using the requested runtime if supported +- missing sandbox should return `404` +- stopped sandbox should return a non-success error with a clear message + +### `POST /v1/sandboxes/{id}/stop` + +Purpose: +- stop a running sandbox without deleting its identity immediately + +Minimum success response: + +```json +{ + "kind": "sandbox", + "sandbox": { + "sandbox_id": "sbx-123", + "state": "stopped", + "restore_from_snapshot": "snapshot-transform-v1" + } +} +``` + +### `DELETE /v1/sandboxes/{id}` + +Purpose: +- delete a sandbox identity and its live runtime instance + +Minimum success response: + +```json +{ + "kind": "sandbox_deleted", + "sandbox_id": "sbx-123" +} +``` + +Recommended status: +- `404` when missing + +## 2. Snapshot Lifecycle + +### `POST /v1/snapshots` + +Purpose: +- create a snapshot from an existing sandbox + +Request body: + +```json +{ + "api_version": "v1", + "kind": "snapshot", + "metadata": { + "name": "snapshot-transform-v1" + }, + "source": { + "sandbox_id": "sbx-123" + }, + "distribution": { + "mode": "cached", + "targets": ["node-a"] + } +} +``` + +Minimum response: + +```json +{ + "kind": "snapshot", + "snapshot": { + "snapshot_id": "snap-123", + "source_sandbox_id": "sbx-123", + "distribution": { + "mode": "cached", + "targets": ["node-a"] + } + } +} +``` + +Required semantics: +- snapshot creation must fail clearly if the source sandbox does not exist +- the daemon may ignore `distribution` during creation if replication is a + separate step, but it must return a normalized snapshot record + +### `GET /v1/snapshots` + +Minimum response: + +```json +{ + "kind": "snapshot_list", + "snapshots": [ + { + "snapshot_id": "snap-123", + "source_sandbox_id": "sbx-123", + "distribution": { + "mode": "cached", + "targets": ["node-a"] + } + } + ] +} +``` + +### `GET /v1/snapshots/{id}` + +Minimum response: + +```json +{ + "kind": "snapshot", + "snapshot": { + "snapshot_id": "snap-123", + "source_sandbox_id": "sbx-123", + "distribution": { + "mode": "cached", + "targets": ["node-a"] + } + } +} +``` + +### `POST /v1/snapshots/{id}/replicate` + +Purpose: +- copy or stage a snapshot to multiple nodes + +Request body: + +```json +{ + "mode": "copy", + "targets": ["node-a", "node-b", "node-c"] +} +``` + +Minimum response: + +```json +{ + "kind": "snapshot", + "snapshot": { + "snapshot_id": "snap-123", + "source_sandbox_id": "sbx-123", + "distribution": { + "mode": "copy", + "targets": ["node-a", "node-b", "node-c"] + } + } +} +``` + +Required semantics: +- this route may be backed by a more primitive implementation internally +- if `void-box` prefers a lower-level replication operation, `void-control` + can adapt later, but the daemon still needs some public primitive that: + - accepts a snapshot id + - accepts target nodes + - returns resulting distribution state + +### `DELETE /v1/snapshots/{id}` + +Minimum success response: + +```json +{ + "kind": "snapshot_deleted", + "snapshot_id": "snap-123" +} +``` + +## Required Enum/State Compatibility + +For the current `void-control` branch, daemon responses should match these +simple states: + +- sandbox `state`: + - `running` + - `stopped` + +For snapshot distribution, current parser/test assumptions are: + +- `mode`: + - `cached` + - `copy` + +If `void-box` wants richer internal state, that is fine, but the public daemon +surface should preserve these minimum values or `void-control` will need an +adapter layer. + +## What Does Not Need To Be In Scope Yet + +The following are intentionally out of scope for the first daemon slice: + +- filesystem routes +- PTY / terminal routes +- port exposure routes +- global lease management +- pool lifecycle +- scheduler-aware placement +- cross-node pool balancing + +Those can land later after the basic sandbox and snapshot primitives are real. + +## Minimum Integration Goal + +`void-control` can replace the current unsupported `VoidBoxRuntimeClient` +compute paths once the daemon can satisfy this round trip: + +1. `POST /v1/sandboxes` +2. `GET /v1/sandboxes` +3. `GET /v1/sandboxes/{id}` +4. `POST /v1/sandboxes/{id}/exec` +5. `POST /v1/sandboxes/{id}/stop` +6. `DELETE /v1/sandboxes/{id}` +7. `POST /v1/snapshots` +8. `GET /v1/snapshots` +9. `GET /v1/snapshots/{id}` +10. `POST /v1/snapshots/{id}/replicate` +11. `DELETE /v1/snapshots/{id}` + +That is the minimum `void-box` daemon contract needed for the current +`feat/compute-sandbox-api` branch to become live instead of mock-backed. diff --git a/examples/README.md b/examples/README.md index 4ac8090..f72e92e 100644 --- a/examples/README.md +++ b/examples/README.md @@ -3,6 +3,7 @@ These examples are intentionally split across two layers: - `examples/*.yaml`: `void-control` execution specs +- `examples/compute/*.yaml`: bridge-managed compute resource payloads - `examples/runtime-templates/*.yaml`: runtime workflow templates launched by `void-box` - `examples/runtime-assets/`: helper scripts and data mounted into runtime templates @@ -31,6 +32,12 @@ Boundary: - plain runtime template used by the swarm example - `runtime-templates/transform_supervision_worker.yaml` - runtime template used by the supervision example +- `compute/sandbox-python.yaml` + - checked-in bridge payload for a reusable Python sandbox +- `compute/snapshot-from-sandbox.yaml` + - checked-in bridge payload for snapshot creation metadata +- `compute/pool-python.yaml` + - checked-in bridge payload for a warm-capacity pool definition ## Transform Swarm Examples diff --git a/examples/compute/pool-python.yaml b/examples/compute/pool-python.yaml new file mode 100644 index 0000000..9541c6f --- /dev/null +++ b/examples/compute/pool-python.yaml @@ -0,0 +1,20 @@ +api_version: v1 +kind: sandbox_pool +metadata: + name: benchmark-python-pool +sandbox_spec: + runtime: + image: python:3.12-slim + cpus: 2 + memory_mb: 2048 + snapshot: + restore_from: snapshot-transform-v1 + lifecycle: + prewarm: true + idle_timeout_secs: 900 + identity: + reusable: true + pool: benchmark-python +capacity: + warm: 5 + max: 20 diff --git a/examples/compute/pool-scale.yaml b/examples/compute/pool-scale.yaml new file mode 100644 index 0000000..779dd57 --- /dev/null +++ b/examples/compute/pool-scale.yaml @@ -0,0 +1,2 @@ +warm: 8 +max: 24 diff --git a/examples/compute/sandbox-python.yaml b/examples/compute/sandbox-python.yaml new file mode 100644 index 0000000..0f46f4c --- /dev/null +++ b/examples/compute/sandbox-python.yaml @@ -0,0 +1,16 @@ +api_version: v1 +kind: sandbox +metadata: + name: python-benchmark-box +runtime: + image: python:3.12-slim + cpus: 2 + memory_mb: 2048 +snapshot: + restore_from: snapshot-transform-v1 +lifecycle: + detach: true + idle_timeout_secs: 900 +identity: + reusable: true + pool: benchmark-python diff --git a/examples/compute/snapshot-from-sandbox.yaml b/examples/compute/snapshot-from-sandbox.yaml new file mode 100644 index 0000000..42a4a82 --- /dev/null +++ b/examples/compute/snapshot-from-sandbox.yaml @@ -0,0 +1,11 @@ +api_version: v1 +kind: snapshot +metadata: + name: snapshot-transform-v1 +source: + sandbox_id: sbx-example +distribution: + mode: cached + targets: + - node-a + - node-b diff --git a/examples/compute/snapshot-replicate.yaml b/examples/compute/snapshot-replicate.yaml new file mode 100644 index 0000000..b90ba27 --- /dev/null +++ b/examples/compute/snapshot-replicate.yaml @@ -0,0 +1,4 @@ +mode: copy +targets: + - node-a + - node-c diff --git a/sdks/go/README.md b/sdks/go/README.md index 3da9cf3..0250709 100644 --- a/sdks/go/README.md +++ b/sdks/go/README.md @@ -8,11 +8,15 @@ The first supported surface is: - executions get/wait - batch run/dry-run/get/wait - yolo run/dry-run/get/wait +- sandboxes create/get/list/exec/stop/delete +- snapshots create/get/list/replicate/delete +- pools create/get/scale Examples under `examples/` are bridge examples against `void-control`: - `template_execute` - `batch_run` +- `sandbox_create` `batch` is the canonical remote-background execution API. `yolo` is an alias for the same high-level surface. diff --git a/sdks/go/client.go b/sdks/go/client.go index 02b6f1c..2501952 100644 --- a/sdks/go/client.go +++ b/sdks/go/client.go @@ -17,6 +17,9 @@ type Client struct { BatchRuns *BatchRunsClient Yolo *BatchClient YoloRuns *BatchRunsClient + Sandboxes *SandboxesClient + Snapshots *SnapshotsClient + Pools *PoolsClient } func NewClient(baseURL string) *Client { @@ -30,6 +33,9 @@ func NewClient(baseURL string) *Client { client.BatchRuns = &BatchRunsClient{client: client, routeBase: "/v1/batch"} client.Yolo = &BatchClient{client: client, routeBase: "/v1/yolo"} client.YoloRuns = &BatchRunsClient{client: client, routeBase: "/v1/yolo"} + client.Sandboxes = &SandboxesClient{client: client} + client.Snapshots = &SnapshotsClient{client: client} + client.Pools = &PoolsClient{client: client} return client } @@ -54,6 +60,14 @@ func (client *Client) postJSON(path string, payload any, out any) error { return client.do(req, out) } +func (client *Client) deleteJSON(path string, out any) error { + req, err := http.NewRequest(http.MethodDelete, client.BaseURL+path, nil) + if err != nil { + return err + } + return client.do(req, out) +} + func (client *Client) do(req *http.Request, out any) error { response, err := client.HTTPClient.Do(req) if err != nil { diff --git a/sdks/go/client_test.go b/sdks/go/client_test.go index b80646d..230cb19 100644 --- a/sdks/go/client_test.go +++ b/sdks/go/client_test.go @@ -32,6 +32,15 @@ func TestClientExposesTemplateAndExecutionClients(t *testing.T) { if client.YoloRuns == nil { t.Fatalf("YoloRuns client should be initialized") } + if client.Sandboxes == nil { + t.Fatalf("Sandboxes client should be initialized") + } + if client.Snapshots == nil { + t.Fatalf("Snapshots client should be initialized") + } + if client.Pools == nil { + t.Fatalf("Pools client should be initialized") + } } func TestTemplateAndExecutionMethods(t *testing.T) { @@ -354,6 +363,436 @@ func TestBatchAndYoloMethods(t *testing.T) { } } +func TestComputeMethods(t *testing.T) { + responses := []map[string]any{ + { + "kind": "sandbox", + "sandbox": map[string]any{ + "sandbox_id": "sbx-1", + "state": "running", + "image": "python:3.12-slim", + "cpus": float64(2), + "memory_mb": float64(2048), + }, + }, + { + "kind": "sandbox_list", + "sandboxes": []map[string]any{ + { + "sandbox_id": "sbx-1", + "state": "running", + "image": "python:3.12-slim", + "cpus": float64(2), + "memory_mb": float64(2048), + }, + }, + }, + { + "kind": "sandbox", + "sandbox": map[string]any{ + "sandbox_id": "sbx-1", + "state": "running", + "image": "python:3.12-slim", + "cpus": float64(2), + "memory_mb": float64(2048), + }, + }, + { + "kind": "sandbox_exec", + "result": map[string]any{ + "exit_code": float64(0), + "stdout": "hello\n", + "stderr": "", + }, + }, + { + "kind": "sandbox_deleted", + "sandbox_id": "sbx-1", + }, + { + "kind": "snapshot", + "snapshot": map[string]any{ + "snapshot_id": "snap-1", + "source_sandbox_id": "sbx-1", + "distribution": map[string]any{ + "mode": "cached", + "targets": []string{"node-a", "node-b"}, + }, + }, + }, + { + "kind": "snapshot_list", + "snapshots": []map[string]any{ + { + "snapshot_id": "snap-1", + "source_sandbox_id": "sbx-1", + "distribution": map[string]any{ + "mode": "cached", + "targets": []string{"node-a", "node-b"}, + }, + }, + }, + }, + { + "kind": "snapshot", + "snapshot": map[string]any{ + "snapshot_id": "snap-1", + "source_sandbox_id": "sbx-1", + "distribution": map[string]any{ + "mode": "cached", + "targets": []string{"node-a", "node-b"}, + }, + }, + }, + { + "kind": "snapshot_deleted", + "snapshot_id": "snap-1", + }, + { + "kind": "snapshot", + "snapshot": map[string]any{ + "snapshot_id": "snap-1", + "source_sandbox_id": "sbx-1", + "distribution": map[string]any{ + "mode": "copy", + "targets": []string{"node-a", "node-c"}, + }, + }, + }, + { + "kind": "pool", + "pool": map[string]any{ + "pool_id": "pool-1", + "sandbox_spec": map[string]any{ + "runtime": map[string]any{ + "image": "python:3.12-slim", + "cpus": float64(2), + "memory_mb": float64(2048), + }, + }, + "capacity": map[string]any{ + "warm": float64(5), + "max": float64(20), + }, + }, + }, + { + "kind": "pool", + "pool": map[string]any{ + "pool_id": "pool-1", + "sandbox_spec": map[string]any{ + "runtime": map[string]any{ + "image": "python:3.12-slim", + "cpus": float64(2), + "memory_mb": float64(2048), + }, + }, + "capacity": map[string]any{ + "warm": float64(5), + "max": float64(20), + }, + }, + }, + { + "kind": "pool", + "pool": map[string]any{ + "pool_id": "pool-1", + "sandbox_spec": map[string]any{ + "runtime": map[string]any{ + "image": "python:3.12-slim", + "cpus": float64(2), + "memory_mb": float64(2048), + }, + }, + "capacity": map[string]any{ + "warm": float64(8), + "max": float64(24), + }, + }, + }, + } + requests := make([]string, 0, len(responses)) + + client := NewClient("http://void-control.test") + client.HTTPClient = &http.Client{ + Transport: roundTripFunc(func(r *http.Request) (*http.Response, error) { + requests = append(requests, r.Method+" "+r.URL.Path) + if len(responses) == 0 { + t.Fatalf("received unexpected request %s %s", r.Method, r.URL.Path) + } + body, err := json.Marshal(responses[0]) + if err != nil { + t.Fatalf("marshal response: %v", err) + } + responses = responses[1:] + return &http.Response{ + StatusCode: http.StatusOK, + Header: http.Header{"Content-Type": []string{"application/json"}}, + Body: io.NopCloser(bytes.NewReader(body)), + Request: r, + }, nil + }), + } + + sandbox, err := client.Sandboxes.Create(map[string]any{ + "api_version": "v1", + "kind": "sandbox", + "runtime": map[string]any{ + "image": "python:3.12-slim", + "cpus": 2, + "memory_mb": 2048, + }, + }) + if err != nil { + t.Fatalf("Sandboxes.Create: %v", err) + } + sandboxes, err := client.Sandboxes.List() + if err != nil { + t.Fatalf("Sandboxes.List: %v", err) + } + fetchedSandbox, err := client.Sandboxes.Get("sbx-1") + if err != nil { + t.Fatalf("Sandboxes.Get: %v", err) + } + execResult, err := client.Sandboxes.Exec("sbx-1", map[string]any{ + "kind": "command", + "command": []string{"python3", "-c", "print('hello')"}, + }) + if err != nil { + t.Fatalf("Sandboxes.Exec: %v", err) + } + deletedSandbox, err := client.Sandboxes.Delete("sbx-1") + if err != nil { + t.Fatalf("Sandboxes.Delete: %v", err) + } + snapshot, err := client.Snapshots.Create(map[string]any{ + "api_version": "v1", + "kind": "snapshot", + "source": map[string]any{ + "sandbox_id": "sbx-1", + }, + "distribution": map[string]any{ + "mode": "cached", + "targets": []string{"node-a", "node-b"}, + }, + }) + if err != nil { + t.Fatalf("Snapshots.Create: %v", err) + } + snapshots, err := client.Snapshots.List() + if err != nil { + t.Fatalf("Snapshots.List: %v", err) + } + fetchedSnapshot, err := client.Snapshots.Get("snap-1") + if err != nil { + t.Fatalf("Snapshots.Get: %v", err) + } + deletedSnapshot, err := client.Snapshots.Delete("snap-1") + if err != nil { + t.Fatalf("Snapshots.Delete: %v", err) + } + replicated, err := client.Snapshots.Replicate("snap-1", map[string]any{ + "mode": "copy", + "targets": []string{"node-a", "node-c"}, + }) + if err != nil { + t.Fatalf("Snapshots.Replicate: %v", err) + } + pool, err := client.Pools.Create(map[string]any{ + "api_version": "v1", + "kind": "sandbox_pool", + "sandbox_spec": map[string]any{ + "runtime": map[string]any{ + "image": "python:3.12-slim", + "cpus": 2, + "memory_mb": 2048, + }, + }, + "capacity": map[string]any{ + "warm": 5, + "max": 20, + }, + }) + if err != nil { + t.Fatalf("Pools.Create: %v", err) + } + fetchedPool, err := client.Pools.Get("pool-1") + if err != nil { + t.Fatalf("Pools.Get: %v", err) + } + scaled, err := client.Pools.Scale("pool-1", map[string]any{ + "warm": 8, + "max": 24, + }) + if err != nil { + t.Fatalf("Pools.Scale: %v", err) + } + + if sandbox.SandboxID != "sbx-1" { + t.Fatalf("sandbox.SandboxID = %q", sandbox.SandboxID) + } + if sandboxes[0].State != "running" { + t.Fatalf("sandboxes[0].State = %q", sandboxes[0].State) + } + if fetchedSandbox.Image != "python:3.12-slim" { + t.Fatalf("fetchedSandbox.Image = %q", fetchedSandbox.Image) + } + if execResult.ExitCode != 0 { + t.Fatalf("execResult.ExitCode = %d", execResult.ExitCode) + } + if deletedSandbox.Kind != "sandbox_deleted" { + t.Fatalf("deletedSandbox.Kind = %q", deletedSandbox.Kind) + } + if deletedSandbox.SandboxID != "sbx-1" { + t.Fatalf("deletedSandbox.SandboxID = %q", deletedSandbox.SandboxID) + } + if snapshot.SnapshotID != "snap-1" { + t.Fatalf("snapshot.SnapshotID = %q", snapshot.SnapshotID) + } + if snapshots[0].SnapshotID != "snap-1" { + t.Fatalf("snapshots[0].SnapshotID = %q", snapshots[0].SnapshotID) + } + if fetchedSnapshot.SourceSandboxID != "sbx-1" { + t.Fatalf("fetchedSnapshot.SourceSandboxID = %q", fetchedSnapshot.SourceSandboxID) + } + if deletedSnapshot.Kind != "snapshot_deleted" { + t.Fatalf("deletedSnapshot.Kind = %q", deletedSnapshot.Kind) + } + if deletedSnapshot.SnapshotID != "snap-1" { + t.Fatalf("deletedSnapshot.SnapshotID = %q", deletedSnapshot.SnapshotID) + } + if replicated.Distribution["mode"] != "copy" { + t.Fatalf("replicated.Distribution = %#v", replicated.Distribution) + } + if pool.PoolID != "pool-1" { + t.Fatalf("pool.PoolID = %q", pool.PoolID) + } + if fetchedPool.Capacity["warm"] != float64(5) { + t.Fatalf("fetchedPool.Capacity = %#v", fetchedPool.Capacity) + } + if scaled.Capacity["warm"] != float64(8) { + t.Fatalf("scaled.Capacity = %#v", scaled.Capacity) + } + if len(requests) != 13 { + t.Fatalf("len(requests) = %d", len(requests)) + } +} + +func TestComputeMethodsPreserveBridgeErrors(t *testing.T) { + responses := []FakeResponse{ + { + StatusCode: http.StatusNotFound, + Body: map[string]any{ + "message": "sandbox 'sbx-missing' not found", + "code": "SANDBOX_NOT_FOUND", + "retryable": false, + }, + }, + { + StatusCode: http.StatusNotFound, + Body: map[string]any{ + "message": "snapshot 'snap-missing' not found", + "code": "SNAPSHOT_NOT_FOUND", + "retryable": false, + }, + }, + { + StatusCode: http.StatusServiceUnavailable, + Body: map[string]any{ + "message": "pool controller unavailable", + "code": "POOL_UNAVAILABLE", + "retryable": true, + }, + }, + } + + client := NewClient("http://void-control.test") + client.HTTPClient = &http.Client{ + Transport: fakeResponseTransport(t, responses), + } + + _, err := client.Sandboxes.Get("sbx-missing") + if err == nil { + t.Fatalf("Sandboxes.Get should fail") + } + sandboxErr, ok := err.(*BridgeError) + if !ok { + t.Fatalf("Sandboxes.Get error type = %T", err) + } + if sandboxErr.Message != "sandbox 'sbx-missing' not found" { + t.Fatalf("sandboxErr.Message = %q", sandboxErr.Message) + } + if sandboxErr.Code != "SANDBOX_NOT_FOUND" { + t.Fatalf("sandboxErr.Code = %q", sandboxErr.Code) + } + if sandboxErr.Retryable { + t.Fatalf("sandboxErr.Retryable = true") + } + + _, err = client.Snapshots.Delete("snap-missing") + if err == nil { + t.Fatalf("Snapshots.Delete should fail") + } + snapshotErr, ok := err.(*BridgeError) + if !ok { + t.Fatalf("Snapshots.Delete error type = %T", err) + } + if snapshotErr.Message != "snapshot 'snap-missing' not found" { + t.Fatalf("snapshotErr.Message = %q", snapshotErr.Message) + } + if snapshotErr.Code != "SNAPSHOT_NOT_FOUND" { + t.Fatalf("snapshotErr.Code = %q", snapshotErr.Code) + } + if snapshotErr.Retryable { + t.Fatalf("snapshotErr.Retryable = true") + } + + _, err = client.Pools.Scale("pool-1", map[string]any{"warm": 8, "max": 24}) + if err == nil { + t.Fatalf("Pools.Scale should fail") + } + poolErr, ok := err.(*BridgeError) + if !ok { + t.Fatalf("Pools.Scale error type = %T", err) + } + if poolErr.Message != "pool controller unavailable" { + t.Fatalf("poolErr.Message = %q", poolErr.Message) + } + if poolErr.Code != "POOL_UNAVAILABLE" { + t.Fatalf("poolErr.Code = %q", poolErr.Code) + } + if !poolErr.Retryable { + t.Fatalf("poolErr.Retryable = false") + } +} + +type FakeResponse struct { + StatusCode int + Body map[string]any +} + +func fakeResponseTransport(t *testing.T, responses []FakeResponse) roundTripFunc { + t.Helper() + + return func(r *http.Request) (*http.Response, error) { + if len(responses) == 0 { + t.Fatalf("received unexpected request %s %s", r.Method, r.URL.Path) + } + response := responses[0] + responses = responses[1:] + body, err := json.Marshal(response.Body) + if err != nil { + t.Fatalf("marshal response: %v", err) + } + return &http.Response{ + StatusCode: response.StatusCode, + Header: http.Header{"Content-Type": []string{"application/json"}}, + Body: io.NopCloser(bytes.NewReader(body)), + Request: r, + }, nil + } +} + type roundTripFunc func(*http.Request) (*http.Response, error) func (fn roundTripFunc) RoundTrip(r *http.Request) (*http.Response, error) { diff --git a/sdks/go/examples/sandbox_create/main.go b/sdks/go/examples/sandbox_create/main.go new file mode 100644 index 0000000..c3519cc --- /dev/null +++ b/sdks/go/examples/sandbox_create/main.go @@ -0,0 +1,63 @@ +package main + +import ( + "encoding/json" + "fmt" + "os" + "strconv" + + voidcontrol "github.com/the-void-ia/void-control/sdks/go" +) + +func main() { + baseURL := getenvDefault("VOID_CONTROL_BASE_URL", "http://127.0.0.1:43210") + image := getenvDefault("VOID_CONTROL_SANDBOX_IMAGE", "python:3.12-slim") + cpus := getenvIntDefault("VOID_CONTROL_SANDBOX_CPUS", 2) + memoryMB := getenvIntDefault("VOID_CONTROL_SANDBOX_MEMORY_MB", 2048) + + client := voidcontrol.NewClient(baseURL) + sandbox, err := client.Sandboxes.Create(map[string]any{ + "api_version": "v1", + "kind": "sandbox", + "runtime": map[string]any{ + "image": image, + "cpus": cpus, + "memory_mb": memoryMB, + }, + }) + if err != nil { + panic(err) + } + + output, err := json.MarshalIndent(map[string]any{ + "sandbox_id": sandbox.SandboxID, + "state": sandbox.State, + "image": sandbox.Image, + "cpus": sandbox.CPUs, + "memory_mb": sandbox.MemoryMB, + }, "", " ") + if err != nil { + panic(err) + } + fmt.Println(string(output)) +} + +func getenvDefault(key string, fallback string) string { + value := os.Getenv(key) + if value == "" { + return fallback + } + return value +} + +func getenvIntDefault(key string, fallback int) int { + value := os.Getenv(key) + if value == "" { + return fallback + } + parsed, err := strconv.Atoi(value) + if err != nil { + return fallback + } + return parsed +} diff --git a/sdks/go/models.go b/sdks/go/models.go index 80c670c..1abd298 100644 --- a/sdks/go/models.go +++ b/sdks/go/models.go @@ -92,3 +92,39 @@ type BatchRunDetail struct { Result ExecutionResult `json:"result"` Candidates []any `json:"candidates"` } + +type SandboxRecord struct { + SandboxID string `json:"sandbox_id"` + State string `json:"state"` + Image string `json:"image"` + CPUs int `json:"cpus"` + MemoryMB int `json:"memory_mb"` +} + +type SandboxExecResult struct { + ExitCode int `json:"exit_code"` + Stdout string `json:"stdout"` + Stderr string `json:"stderr"` +} + +type SnapshotRecord struct { + SnapshotID string `json:"snapshot_id"` + SourceSandboxID string `json:"source_sandbox_id"` + Distribution map[string]any `json:"distribution"` +} + +type PoolRecord struct { + PoolID string `json:"pool_id"` + SandboxSpec map[string]any `json:"sandbox_spec"` + Capacity map[string]any `json:"capacity"` +} + +type SandboxDeleteResult struct { + Kind string `json:"kind"` + SandboxID string `json:"sandbox_id"` +} + +type SnapshotDeleteResult struct { + Kind string `json:"kind"` + SnapshotID string `json:"snapshot_id"` +} diff --git a/sdks/go/pools.go b/sdks/go/pools.go new file mode 100644 index 0000000..f565ec2 --- /dev/null +++ b/sdks/go/pools.go @@ -0,0 +1,35 @@ +package voidcontrol + +type PoolsClient struct { + client *Client +} + +func (client *PoolsClient) Create(spec map[string]any) (*PoolRecord, error) { + var response struct { + Pool PoolRecord `json:"pool"` + } + if err := client.client.postJSON("/v1/pools", spec, &response); err != nil { + return nil, err + } + return &response.Pool, nil +} + +func (client *PoolsClient) Get(poolID string) (*PoolRecord, error) { + var response struct { + Pool PoolRecord `json:"pool"` + } + if err := client.client.getJSON("/v1/pools/"+poolID, &response); err != nil { + return nil, err + } + return &response.Pool, nil +} + +func (client *PoolsClient) Scale(poolID string, request map[string]any) (*PoolRecord, error) { + var response struct { + Pool PoolRecord `json:"pool"` + } + if err := client.client.postJSON("/v1/pools/"+poolID+"/scale", request, &response); err != nil { + return nil, err + } + return &response.Pool, nil +} diff --git a/sdks/go/sandboxes.go b/sdks/go/sandboxes.go new file mode 100644 index 0000000..62b33d3 --- /dev/null +++ b/sdks/go/sandboxes.go @@ -0,0 +1,67 @@ +package voidcontrol + +type sandboxListResponse struct { + Sandboxes []SandboxRecord `json:"sandboxes"` +} + +type sandboxExecResponse struct { + Result SandboxExecResult `json:"result"` +} + +type SandboxesClient struct { + client *Client +} + +func (client *SandboxesClient) Create(spec map[string]any) (*SandboxRecord, error) { + var response struct { + Sandbox SandboxRecord `json:"sandbox"` + } + if err := client.client.postJSON("/v1/sandboxes", spec, &response); err != nil { + return nil, err + } + return &response.Sandbox, nil +} + +func (client *SandboxesClient) Get(sandboxID string) (*SandboxRecord, error) { + var response struct { + Sandbox SandboxRecord `json:"sandbox"` + } + if err := client.client.getJSON("/v1/sandboxes/"+sandboxID, &response); err != nil { + return nil, err + } + return &response.Sandbox, nil +} + +func (client *SandboxesClient) List() ([]SandboxRecord, error) { + var response sandboxListResponse + if err := client.client.getJSON("/v1/sandboxes", &response); err != nil { + return nil, err + } + return response.Sandboxes, nil +} + +func (client *SandboxesClient) Exec(sandboxID string, request map[string]any) (*SandboxExecResult, error) { + var response sandboxExecResponse + if err := client.client.postJSON("/v1/sandboxes/"+sandboxID+"/exec", request, &response); err != nil { + return nil, err + } + return &response.Result, nil +} + +func (client *SandboxesClient) Stop(sandboxID string) (*SandboxRecord, error) { + var response struct { + Sandbox SandboxRecord `json:"sandbox"` + } + if err := client.client.postJSON("/v1/sandboxes/"+sandboxID+"/stop", map[string]any{}, &response); err != nil { + return nil, err + } + return &response.Sandbox, nil +} + +func (client *SandboxesClient) Delete(sandboxID string) (*SandboxDeleteResult, error) { + var response SandboxDeleteResult + if err := client.client.deleteJSON("/v1/sandboxes/"+sandboxID, &response); err != nil { + return nil, err + } + return &response, nil +} diff --git a/sdks/go/snapshots.go b/sdks/go/snapshots.go new file mode 100644 index 0000000..31abaf0 --- /dev/null +++ b/sdks/go/snapshots.go @@ -0,0 +1,55 @@ +package voidcontrol + +type snapshotListResponse struct { + Snapshots []SnapshotRecord `json:"snapshots"` +} + +type SnapshotsClient struct { + client *Client +} + +func (client *SnapshotsClient) Create(spec map[string]any) (*SnapshotRecord, error) { + var response struct { + Snapshot SnapshotRecord `json:"snapshot"` + } + if err := client.client.postJSON("/v1/snapshots", spec, &response); err != nil { + return nil, err + } + return &response.Snapshot, nil +} + +func (client *SnapshotsClient) Get(snapshotID string) (*SnapshotRecord, error) { + var response struct { + Snapshot SnapshotRecord `json:"snapshot"` + } + if err := client.client.getJSON("/v1/snapshots/"+snapshotID, &response); err != nil { + return nil, err + } + return &response.Snapshot, nil +} + +func (client *SnapshotsClient) List() ([]SnapshotRecord, error) { + var response snapshotListResponse + if err := client.client.getJSON("/v1/snapshots", &response); err != nil { + return nil, err + } + return response.Snapshots, nil +} + +func (client *SnapshotsClient) Replicate(snapshotID string, request map[string]any) (*SnapshotRecord, error) { + var response struct { + Snapshot SnapshotRecord `json:"snapshot"` + } + if err := client.client.postJSON("/v1/snapshots/"+snapshotID+"/replicate", request, &response); err != nil { + return nil, err + } + return &response.Snapshot, nil +} + +func (client *SnapshotsClient) Delete(snapshotID string) (*SnapshotDeleteResult, error) { + var response SnapshotDeleteResult + if err := client.client.deleteJSON("/v1/snapshots/"+snapshotID, &response); err != nil { + return nil, err + } + return &response, nil +} diff --git a/sdks/node/README.md b/sdks/node/README.md index eb6a9e8..a6b7f3b 100644 --- a/sdks/node/README.md +++ b/sdks/node/README.md @@ -18,11 +18,15 @@ The first supported surface is: - `client.batchRuns` - `client.yolo` - `client.yoloRuns` +- `client.sandboxes` +- `client.snapshots` +- `client.pools` Examples under `examples/` are bridge examples against `void-control`: - `templateExecute.mjs` - `batchRun.mjs` +- `sandboxCreate.mjs` `batch` is the canonical remote-background execution API. `yolo` is an alias for the same high-level surface. diff --git a/sdks/node/examples/sandboxCreate.mjs b/sdks/node/examples/sandboxCreate.mjs new file mode 100644 index 0000000..9a5e414 --- /dev/null +++ b/sdks/node/examples/sandboxCreate.mjs @@ -0,0 +1,30 @@ +import { VoidControlClient } from "../src/index.js"; + +const baseUrl = process.env.VOID_CONTROL_BASE_URL ?? "http://127.0.0.1:43210"; + +const spec = { + api_version: "v1", + kind: "sandbox", + runtime: { + image: process.env.VOID_CONTROL_SANDBOX_IMAGE ?? "python:3.12-slim", + cpus: Number(process.env.VOID_CONTROL_SANDBOX_CPUS ?? "2"), + memory_mb: Number(process.env.VOID_CONTROL_SANDBOX_MEMORY_MB ?? "2048") + } +}; + +const client = new VoidControlClient({ baseUrl }); +const sandbox = await client.sandboxes.create(spec); + +console.log( + JSON.stringify( + { + sandboxId: sandbox.sandboxId, + state: sandbox.state, + image: sandbox.image, + cpus: sandbox.cpus, + memoryMb: sandbox.memoryMb + }, + null, + 2 + ) +); diff --git a/sdks/node/src/client.js b/sdks/node/src/client.js index 8db76e7..d8a6d06 100644 --- a/sdks/node/src/client.js +++ b/sdks/node/src/client.js @@ -1,5 +1,8 @@ import { BatchClient, BatchRunsClient } from "./batch.js"; import { ExecutionsClient } from "./executions.js"; +import { PoolsClient } from "./pools.js"; +import { SandboxesClient } from "./sandboxes.js"; +import { SnapshotsClient } from "./snapshots.js"; import { TemplatesClient } from "./templates.js"; import { BridgeError } from "./models.js"; @@ -13,6 +16,9 @@ export class VoidControlClient { this.batchRuns = new BatchRunsClient(this, { routeBase: "/v1/batch" }); this.yolo = new BatchClient(this, { routeBase: "/v1/yolo" }); this.yoloRuns = new BatchRunsClient(this, { routeBase: "/v1/yolo" }); + this.sandboxes = new SandboxesClient(this); + this.snapshots = new SnapshotsClient(this); + this.pools = new PoolsClient(this); } async getJson(path) { @@ -33,6 +39,13 @@ export class VoidControlClient { return this.#decodeResponse(response); } + async deleteJson(path) { + const response = await this._fetch(`${this.baseUrl}${path}`, { + method: "DELETE" + }); + return this.#decodeResponse(response); + } + async #decodeResponse(response) { const payload = await response.json(); if (!response.ok) { diff --git a/sdks/node/src/models.js b/sdks/node/src/models.js index b5326f1..d30698d 100644 --- a/sdks/node/src/models.js +++ b/sdks/node/src/models.js @@ -74,3 +74,51 @@ export function toExecutionDetail(payload) { candidates: payload.candidates ?? [] }; } + +export function toSandboxRecord(payload) { + return { + sandboxId: String(payload.sandbox.sandbox_id), + state: String(payload.sandbox.state), + image: String(payload.sandbox.image ?? ""), + cpus: Number(payload.sandbox.cpus ?? 0), + memoryMb: Number(payload.sandbox.memory_mb ?? 0) + }; +} + +export function toSandboxExecResult(payload) { + return { + exitCode: Number(payload.result?.exit_code ?? 0), + stdout: String(payload.result?.stdout ?? ""), + stderr: String(payload.result?.stderr ?? "") + }; +} + +export function toSnapshotRecord(payload) { + return { + snapshotId: String(payload.snapshot.snapshot_id), + sourceSandboxId: String(payload.snapshot.source_sandbox_id ?? ""), + distribution: payload.snapshot.distribution ?? {} + }; +} + +export function toPoolRecord(payload) { + return { + poolId: String(payload.pool.pool_id), + sandboxSpec: payload.pool.sandbox_spec ?? {}, + capacity: payload.pool.capacity ?? {} + }; +} + +export function toSandboxDeleteResult(payload) { + return { + kind: String(payload.kind ?? ""), + sandboxId: String(payload.sandbox_id ?? "") + }; +} + +export function toSnapshotDeleteResult(payload) { + return { + kind: String(payload.kind ?? ""), + snapshotId: String(payload.snapshot_id ?? "") + }; +} diff --git a/sdks/node/src/pools.js b/sdks/node/src/pools.js new file mode 100644 index 0000000..195f559 --- /dev/null +++ b/sdks/node/src/pools.js @@ -0,0 +1,22 @@ +import { toPoolRecord } from "./models.js"; + +export class PoolsClient { + constructor(client) { + this._client = client; + } + + async create(spec) { + const payload = await this._client.postJson("/v1/pools", spec); + return toPoolRecord(payload); + } + + async get(poolId) { + const payload = await this._client.getJson(`/v1/pools/${poolId}`); + return toPoolRecord(payload); + } + + async scale(poolId, request) { + const payload = await this._client.postJson(`/v1/pools/${poolId}/scale`, request); + return toPoolRecord(payload); + } +} diff --git a/sdks/node/src/sandboxes.js b/sdks/node/src/sandboxes.js new file mode 100644 index 0000000..f36eee6 --- /dev/null +++ b/sdks/node/src/sandboxes.js @@ -0,0 +1,41 @@ +import { + toSandboxDeleteResult, + toSandboxExecResult, + toSandboxRecord +} from "./models.js"; + +export class SandboxesClient { + constructor(client) { + this._client = client; + } + + async create(spec) { + const payload = await this._client.postJson("/v1/sandboxes", spec); + return toSandboxRecord(payload); + } + + async get(sandboxId) { + const payload = await this._client.getJson(`/v1/sandboxes/${sandboxId}`); + return toSandboxRecord(payload); + } + + async list() { + const payload = await this._client.getJson("/v1/sandboxes"); + return (payload.sandboxes ?? []).map((item) => toSandboxRecord({ sandbox: item })); + } + + async exec(sandboxId, request) { + const payload = await this._client.postJson(`/v1/sandboxes/${sandboxId}/exec`, request); + return toSandboxExecResult(payload); + } + + async stop(sandboxId) { + const payload = await this._client.postJson(`/v1/sandboxes/${sandboxId}/stop`, {}); + return toSandboxRecord(payload); + } + + async delete(sandboxId) { + const payload = await this._client.deleteJson(`/v1/sandboxes/${sandboxId}`); + return toSandboxDeleteResult(payload); + } +} diff --git a/sdks/node/src/snapshots.js b/sdks/node/src/snapshots.js new file mode 100644 index 0000000..95e70bc --- /dev/null +++ b/sdks/node/src/snapshots.js @@ -0,0 +1,35 @@ +import { toSnapshotDeleteResult, toSnapshotRecord } from "./models.js"; + +export class SnapshotsClient { + constructor(client) { + this._client = client; + } + + async create(spec) { + const payload = await this._client.postJson("/v1/snapshots", spec); + return toSnapshotRecord(payload); + } + + async get(snapshotId) { + const payload = await this._client.getJson(`/v1/snapshots/${snapshotId}`); + return toSnapshotRecord(payload); + } + + async list() { + const payload = await this._client.getJson("/v1/snapshots"); + return (payload.snapshots ?? []).map((item) => toSnapshotRecord({ snapshot: item })); + } + + async replicate(snapshotId, request) { + const payload = await this._client.postJson( + `/v1/snapshots/${snapshotId}/replicate`, + request + ); + return toSnapshotRecord(payload); + } + + async delete(snapshotId) { + const payload = await this._client.deleteJson(`/v1/snapshots/${snapshotId}`); + return toSnapshotDeleteResult(payload); + } +} diff --git a/sdks/node/test/client.test.mjs b/sdks/node/test/client.test.mjs index 1eb9db3..9f5e819 100644 --- a/sdks/node/test/client.test.mjs +++ b/sdks/node/test/client.test.mjs @@ -2,6 +2,7 @@ import test from "node:test"; import assert from "node:assert/strict"; import { VoidControlClient } from "../src/index.js"; +import { BridgeError } from "../src/models.js"; test("client exposes template and execution subclients", () => { const client = new VoidControlClient({ baseUrl: "http://127.0.0.1:43210" }); @@ -13,6 +14,9 @@ test("client exposes template and execution subclients", () => { assert.ok(client.batchRuns); assert.ok(client.yolo); assert.ok(client.yoloRuns); + assert.ok(client.sandboxes); + assert.ok(client.snapshots); + assert.ok(client.pools); }); test("template and execution methods use the bridge API", async () => { @@ -305,3 +309,321 @@ test("batch and yolo methods use the bridge API", async () => { assert.equal(requests[3].path, "/v1/yolo/run"); assert.equal(requests[4].path, "/v1/yolo-runs/exec-yolo-1"); }); + +test("compute methods use the bridge API", async () => { + const responses = [ + { + kind: "sandbox", + sandbox: { + sandbox_id: "sbx-1", + state: "running", + image: "python:3.12-slim", + cpus: 2, + memory_mb: 2048 + } + }, + { + kind: "sandbox_list", + sandboxes: [ + { + sandbox_id: "sbx-1", + state: "running", + image: "python:3.12-slim", + cpus: 2, + memory_mb: 2048 + } + ] + }, + { + kind: "sandbox", + sandbox: { + sandbox_id: "sbx-1", + state: "running", + image: "python:3.12-slim", + cpus: 2, + memory_mb: 2048 + } + }, + { + kind: "sandbox_exec", + result: { + exit_code: 0, + stdout: "hello\n", + stderr: "" + } + }, + { + kind: "sandbox_deleted", + sandbox_id: "sbx-1" + }, + { + kind: "snapshot", + snapshot: { + snapshot_id: "snap-1", + source_sandbox_id: "sbx-1", + distribution: { + mode: "cached", + targets: ["node-a", "node-b"] + } + } + }, + { + kind: "snapshot_list", + snapshots: [ + { + snapshot_id: "snap-1", + source_sandbox_id: "sbx-1", + distribution: { + mode: "cached", + targets: ["node-a", "node-b"] + } + } + ] + }, + { + kind: "snapshot", + snapshot: { + snapshot_id: "snap-1", + source_sandbox_id: "sbx-1", + distribution: { + mode: "cached", + targets: ["node-a", "node-b"] + } + } + }, + { + kind: "snapshot_deleted", + snapshot_id: "snap-1" + }, + { + kind: "snapshot", + snapshot: { + snapshot_id: "snap-1", + source_sandbox_id: "sbx-1", + distribution: { + mode: "copy", + targets: ["node-a", "node-c"] + } + } + }, + { + kind: "pool", + pool: { + pool_id: "pool-1", + sandbox_spec: { + runtime: { + image: "python:3.12-slim", + cpus: 2, + memory_mb: 2048 + } + }, + capacity: { + warm: 5, + max: 20 + } + } + }, + { + kind: "pool", + pool: { + pool_id: "pool-1", + sandbox_spec: { + runtime: { + image: "python:3.12-slim", + cpus: 2, + memory_mb: 2048 + } + }, + capacity: { + warm: 5, + max: 20 + } + } + }, + { + kind: "pool", + pool: { + pool_id: "pool-1", + sandbox_spec: { + runtime: { + image: "python:3.12-slim", + cpus: 2, + memory_mb: 2048 + } + }, + capacity: { + warm: 8, + max: 24 + } + } + } + ]; + const requests = []; + const fetchImpl = async (url, init = {}) => { + const body = init.body ?? null; + requests.push({ + method: init.method ?? "GET", + path: new URL(url).pathname, + body + }); + const payload = responses.shift(); + return new Response(JSON.stringify(payload), { + status: 200, + headers: { "content-type": "application/json" } + }); + }; + + const client = new VoidControlClient({ + baseUrl: "http://127.0.0.1:43210", + fetchImpl + }); + + const sandbox = await client.sandboxes.create({ + api_version: "v1", + kind: "sandbox", + runtime: { + image: "python:3.12-slim", + cpus: 2, + memory_mb: 2048 + } + }); + const sandboxes = await client.sandboxes.list(); + const fetchedSandbox = await client.sandboxes.get("sbx-1"); + const execResult = await client.sandboxes.exec("sbx-1", { + kind: "command", + command: ["python3", "-c", "print('hello')"] + }); + const deletedSandbox = await client.sandboxes.delete("sbx-1"); + const snapshot = await client.snapshots.create({ + api_version: "v1", + kind: "snapshot", + source: { sandbox_id: "sbx-1" }, + distribution: { + mode: "cached", + targets: ["node-a", "node-b"] + } + }); + const snapshots = await client.snapshots.list(); + const fetchedSnapshot = await client.snapshots.get("snap-1"); + const deletedSnapshot = await client.snapshots.delete("snap-1"); + const replicated = await client.snapshots.replicate("snap-1", { + mode: "copy", + targets: ["node-a", "node-c"] + }); + const pool = await client.pools.create({ + api_version: "v1", + kind: "sandbox_pool", + sandbox_spec: { + runtime: { + image: "python:3.12-slim", + cpus: 2, + memory_mb: 2048 + } + }, + capacity: { + warm: 5, + max: 20 + } + }); + const fetchedPool = await client.pools.get("pool-1"); + const scaled = await client.pools.scale("pool-1", { + warm: 8, + max: 24 + }); + + assert.equal(sandbox.sandboxId, "sbx-1"); + assert.equal(sandboxes[0].state, "running"); + assert.equal(fetchedSandbox.image, "python:3.12-slim"); + assert.equal(execResult.exitCode, 0); + assert.equal(deletedSandbox.kind, "sandbox_deleted"); + assert.equal(deletedSandbox.sandboxId, "sbx-1"); + assert.equal(snapshot.snapshotId, "snap-1"); + assert.equal(snapshots[0].snapshotId, "snap-1"); + assert.equal(fetchedSnapshot.sourceSandboxId, "sbx-1"); + assert.equal(deletedSnapshot.kind, "snapshot_deleted"); + assert.equal(deletedSnapshot.snapshotId, "snap-1"); + assert.equal(replicated.distribution.mode, "copy"); + assert.equal(pool.poolId, "pool-1"); + assert.equal(fetchedPool.capacity.warm, 5); + assert.equal(scaled.capacity.warm, 8); + + assert.equal(requests[0].path, "/v1/sandboxes"); + assert.equal(requests[1].path, "/v1/sandboxes"); + assert.equal(requests[2].path, "/v1/sandboxes/sbx-1"); + assert.equal(requests[3].path, "/v1/sandboxes/sbx-1/exec"); + assert.equal(requests[4].path, "/v1/sandboxes/sbx-1"); + assert.equal(requests[5].path, "/v1/snapshots"); + assert.equal(requests[6].path, "/v1/snapshots"); + assert.equal(requests[7].path, "/v1/snapshots/snap-1"); + assert.equal(requests[8].path, "/v1/snapshots/snap-1"); + assert.equal(requests[9].path, "/v1/snapshots/snap-1/replicate"); + assert.equal(requests[10].path, "/v1/pools"); + assert.equal(requests[11].path, "/v1/pools/pool-1"); + assert.equal(requests[12].path, "/v1/pools/pool-1/scale"); +}); + +test("compute methods preserve bridge errors", async () => { + const responses = [ + { + status: 404, + body: { + message: "sandbox 'sbx-missing' not found", + code: "SANDBOX_NOT_FOUND", + retryable: false + } + }, + { + status: 404, + body: { + message: "snapshot 'snap-missing' not found", + code: "SNAPSHOT_NOT_FOUND", + retryable: false + } + }, + { + status: 503, + body: { + message: "pool controller unavailable", + code: "POOL_UNAVAILABLE", + retryable: true + } + } + ]; + + const fetchImpl = async () => { + const response = responses.shift(); + return new Response(JSON.stringify(response.body), { + status: response.status, + headers: { "content-type": "application/json" } + }); + }; + + const client = new VoidControlClient({ + baseUrl: "http://127.0.0.1:43210", + fetchImpl + }); + + await assert.rejects(client.sandboxes.get("sbx-missing"), (error) => { + assert.ok(error instanceof BridgeError); + assert.equal(error.message, "sandbox 'sbx-missing' not found"); + assert.equal(error.code, "SANDBOX_NOT_FOUND"); + assert.equal(error.retryable, false); + return true; + }); + + await assert.rejects(client.snapshots.delete("snap-missing"), (error) => { + assert.ok(error instanceof BridgeError); + assert.equal(error.message, "snapshot 'snap-missing' not found"); + assert.equal(error.code, "SNAPSHOT_NOT_FOUND"); + assert.equal(error.retryable, false); + return true; + }); + + await assert.rejects(client.pools.scale("pool-1", { warm: 8, max: 24 }), (error) => { + assert.ok(error instanceof BridgeError); + assert.equal(error.message, "pool controller unavailable"); + assert.equal(error.code, "POOL_UNAVAILABLE"); + assert.equal(error.retryable, true); + return true; + }); +}); diff --git a/sdks/python/README.md b/sdks/python/README.md index 8206b66..e6cf64d 100644 --- a/sdks/python/README.md +++ b/sdks/python/README.md @@ -18,11 +18,15 @@ The first supported surface is: - `client.batch_runs` - `client.yolo` - `client.yolo_runs` +- `client.sandboxes` +- `client.snapshots` +- `client.pools` Examples under `examples/` are bridge examples against `void-control`: - `template_execute.py` - `batch_run.py` +- `sandbox_create.py` `batch` is the canonical remote-background execution API. `yolo` is an alias for the same high-level surface. diff --git a/sdks/python/examples/sandbox_create.py b/sdks/python/examples/sandbox_create.py new file mode 100644 index 0000000..64f0de4 --- /dev/null +++ b/sdks/python/examples/sandbox_create.py @@ -0,0 +1,41 @@ +from __future__ import annotations + +import asyncio +import json +import os + +from void_control import VoidControlClient + + +async def main() -> None: + base_url = os.environ.get("VOID_CONTROL_BASE_URL", "http://127.0.0.1:43210") + + spec = { + "api_version": "v1", + "kind": "sandbox", + "runtime": { + "image": os.environ.get("VOID_CONTROL_SANDBOX_IMAGE", "python:3.12-slim"), + "cpus": int(os.environ.get("VOID_CONTROL_SANDBOX_CPUS", "2")), + "memory_mb": int(os.environ.get("VOID_CONTROL_SANDBOX_MEMORY_MB", "2048")), + }, + } + + async with VoidControlClient(base_url=base_url) as client: + sandbox = await client.sandboxes.create(spec) + + print( + json.dumps( + { + "sandbox_id": sandbox.sandbox_id, + "state": sandbox.state, + "image": sandbox.image, + "cpus": sandbox.cpus, + "memory_mb": sandbox.memory_mb, + }, + indent=2, + ) + ) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdks/python/src/void_control/client.py b/sdks/python/src/void_control/client.py index 07dbfcc..2ecb3c1 100644 --- a/sdks/python/src/void_control/client.py +++ b/sdks/python/src/void_control/client.py @@ -6,6 +6,9 @@ from .batch import BatchClient, BatchRunsClient from .executions import ExecutionsClient +from .pools import PoolsClient +from .sandboxes import SandboxesClient +from .snapshots import SnapshotsClient from .templates import TemplatesClient from .models import BridgeError @@ -30,6 +33,9 @@ def __init__( self.batch_runs = BatchRunsClient(self, route_base="/v1/batch") self.yolo = BatchClient(self, route_base="/v1/yolo") self.yolo_runs = BatchRunsClient(self, route_base="/v1/yolo") + self.sandboxes = SandboxesClient(self) + self.snapshots = SnapshotsClient(self) + self.pools = PoolsClient(self) async def aclose(self) -> None: await self._http.aclose() @@ -48,6 +54,10 @@ async def post_json(self, path: str, payload: dict[str, Any]) -> dict[str, Any]: response = await self._http.post(path, json=payload) return await self._decode_response(response) + async def delete_json(self, path: str) -> dict[str, Any]: + response = await self._http.delete(path) + return await self._decode_response(response) + async def _decode_response(self, response: httpx.Response) -> dict[str, Any]: data = response.json() if response.status_code >= 400: diff --git a/sdks/python/src/void_control/models.py b/sdks/python/src/void_control/models.py index 5e1db25..f29050f 100644 --- a/sdks/python/src/void_control/models.py +++ b/sdks/python/src/void_control/models.py @@ -206,3 +206,123 @@ def from_json(cls, payload: dict[str, Any]) -> "BatchRunDetail": result=ExecutionResult.from_json(dict(payload.get("result", {}))), candidates=list(payload.get("candidates", [])), ) + + +@dataclass(slots=True) +class SandboxRecord: + sandbox_id: str + state: str + image: str + cpus: int + memory_mb: int + + @classmethod + def from_json(cls, payload: dict[str, Any]) -> "SandboxRecord": + sandbox = payload["sandbox"] + return cls( + sandbox_id=str(sandbox["sandbox_id"]), + state=str(sandbox["state"]), + image=str(sandbox.get("image", "")), + cpus=int(sandbox.get("cpus", 0)), + memory_mb=int(sandbox.get("memory_mb", 0)), + ) + + +@dataclass(slots=True) +class SandboxExecResult: + exit_code: int + stdout: str + stderr: str + + @classmethod + def from_json(cls, payload: dict[str, Any]) -> "SandboxExecResult": + result = payload["result"] + return cls( + exit_code=int(result.get("exit_code", 0)), + stdout=str(result.get("stdout", "")), + stderr=str(result.get("stderr", "")), + ) + + +@dataclass(slots=True) +class SandboxDeleteResult: + kind: str + sandbox_id: str + + @classmethod + def from_json(cls, payload: dict[str, Any]) -> "SandboxDeleteResult": + return cls( + kind=str(payload.get("kind", "")), + sandbox_id=str(payload.get("sandbox_id", "")), + ) + + +@dataclass(slots=True) +class SnapshotRecord: + snapshot_id: str + source_sandbox_id: str + distribution: dict[str, Any] + + @classmethod + def from_json(cls, payload: dict[str, Any]) -> "SnapshotRecord": + snapshot = payload["snapshot"] + return cls( + snapshot_id=str(snapshot["snapshot_id"]), + source_sandbox_id=str(snapshot.get("source_sandbox_id", "")), + distribution=dict(snapshot.get("distribution", {})), + ) + + +@dataclass(slots=True) +class SnapshotDeleteResult: + kind: str + snapshot_id: str + + @classmethod + def from_json(cls, payload: dict[str, Any]) -> "SnapshotDeleteResult": + return cls( + kind=str(payload.get("kind", "")), + snapshot_id=str(payload.get("snapshot_id", "")), + ) + + +@dataclass(slots=True) +class PoolRecord: + pool_id: str + sandbox_spec: dict[str, Any] + capacity: dict[str, Any] + + @classmethod + def from_json(cls, payload: dict[str, Any]) -> "PoolRecord": + pool = payload["pool"] + return cls( + pool_id=str(pool["pool_id"]), + sandbox_spec=dict(pool.get("sandbox_spec", {})), + capacity=dict(pool.get("capacity", {})), + ) + + +@dataclass(slots=True) +class SandboxDeleteResult: + kind: str + sandbox_id: str + + @classmethod + def from_json(cls, payload: dict[str, Any]) -> "SandboxDeleteResult": + return cls( + kind=str(payload.get("kind", "")), + sandbox_id=str(payload.get("sandbox_id", "")), + ) + + +@dataclass(slots=True) +class SnapshotDeleteResult: + kind: str + snapshot_id: str + + @classmethod + def from_json(cls, payload: dict[str, Any]) -> "SnapshotDeleteResult": + return cls( + kind=str(payload.get("kind", "")), + snapshot_id=str(payload.get("snapshot_id", "")), + ) diff --git a/sdks/python/src/void_control/pools.py b/sdks/python/src/void_control/pools.py new file mode 100644 index 0000000..933f12d --- /dev/null +++ b/sdks/python/src/void_control/pools.py @@ -0,0 +1,22 @@ +from __future__ import annotations + +from typing import Any + +from .models import PoolRecord + + +class PoolsClient: + def __init__(self, client: object) -> None: + self._client = client + + async def create(self, spec: dict[str, Any]) -> PoolRecord: + payload = await self._client.post_json("/v1/pools", spec) + return PoolRecord.from_json(payload) + + async def get(self, pool_id: str) -> PoolRecord: + payload = await self._client.get_json(f"/v1/pools/{pool_id}") + return PoolRecord.from_json(payload) + + async def scale(self, pool_id: str, request: dict[str, Any]) -> PoolRecord: + payload = await self._client.post_json(f"/v1/pools/{pool_id}/scale", request) + return PoolRecord.from_json(payload) diff --git a/sdks/python/src/void_control/sandboxes.py b/sdks/python/src/void_control/sandboxes.py new file mode 100644 index 0000000..7d218ba --- /dev/null +++ b/sdks/python/src/void_control/sandboxes.py @@ -0,0 +1,39 @@ +from __future__ import annotations + +from typing import Any + +from .models import SandboxDeleteResult, SandboxExecResult, SandboxRecord + + +class SandboxesClient: + def __init__(self, client: object) -> None: + self._client = client + + async def create(self, spec: dict[str, Any]) -> SandboxRecord: + payload = await self._client.post_json("/v1/sandboxes", spec) + return SandboxRecord.from_json(payload) + + async def get(self, sandbox_id: str) -> SandboxRecord: + payload = await self._client.get_json(f"/v1/sandboxes/{sandbox_id}") + return SandboxRecord.from_json(payload) + + async def list(self) -> list[SandboxRecord]: + payload = await self._client.get_json("/v1/sandboxes") + sandboxes = payload.get("sandboxes", []) + return [SandboxRecord.from_json({"sandbox": item}) for item in sandboxes] + + async def exec( + self, + sandbox_id: str, + request: dict[str, Any], + ) -> SandboxExecResult: + payload = await self._client.post_json(f"/v1/sandboxes/{sandbox_id}/exec", request) + return SandboxExecResult.from_json(payload) + + async def stop(self, sandbox_id: str) -> SandboxRecord: + payload = await self._client.post_json(f"/v1/sandboxes/{sandbox_id}/stop", {}) + return SandboxRecord.from_json(payload) + + async def delete(self, sandbox_id: str) -> SandboxDeleteResult: + payload = await self._client.delete_json(f"/v1/sandboxes/{sandbox_id}") + return SandboxDeleteResult.from_json(payload) diff --git a/sdks/python/src/void_control/snapshots.py b/sdks/python/src/void_control/snapshots.py new file mode 100644 index 0000000..856a9a8 --- /dev/null +++ b/sdks/python/src/void_control/snapshots.py @@ -0,0 +1,38 @@ +from __future__ import annotations + +from typing import Any + +from .models import SnapshotDeleteResult, SnapshotRecord + + +class SnapshotsClient: + def __init__(self, client: object) -> None: + self._client = client + + async def create(self, spec: dict[str, Any]) -> SnapshotRecord: + payload = await self._client.post_json("/v1/snapshots", spec) + return SnapshotRecord.from_json(payload) + + async def get(self, snapshot_id: str) -> SnapshotRecord: + payload = await self._client.get_json(f"/v1/snapshots/{snapshot_id}") + return SnapshotRecord.from_json(payload) + + async def list(self) -> list[SnapshotRecord]: + payload = await self._client.get_json("/v1/snapshots") + snapshots = payload.get("snapshots", []) + return [SnapshotRecord.from_json({"snapshot": item}) for item in snapshots] + + async def replicate( + self, + snapshot_id: str, + request: dict[str, Any], + ) -> SnapshotRecord: + payload = await self._client.post_json( + f"/v1/snapshots/{snapshot_id}/replicate", + request, + ) + return SnapshotRecord.from_json(payload) + + async def delete(self, snapshot_id: str) -> SnapshotDeleteResult: + payload = await self._client.delete_json(f"/v1/snapshots/{snapshot_id}") + return SnapshotDeleteResult.from_json(payload) diff --git a/sdks/python/tests/test_client.py b/sdks/python/tests/test_client.py index cd09872..ce43ddc 100644 --- a/sdks/python/tests/test_client.py +++ b/sdks/python/tests/test_client.py @@ -26,6 +26,9 @@ def test_client_exposes_template_and_execution_subclients(self) -> None: self.assertIsNotNone(client.batch_runs) self.assertIsNotNone(client.yolo) self.assertIsNotNone(client.yolo_runs) + self.assertIsNotNone(client.sandboxes) + self.assertIsNotNone(client.snapshots) + self.assertIsNotNone(client.pools) class ClientMethodsTest(unittest.IsolatedAsyncioTestCase): @@ -304,6 +307,317 @@ def handler(request: httpx.Request) -> httpx.Response: self.assertEqual(requests[3][:2], ("POST", "/v1/yolo/run")) self.assertEqual(requests[4][:2], ("GET", "/v1/yolo-runs/exec-yolo-1")) + async def test_compute_methods(self) -> None: + from void_control import VoidControlClient + + responses = [ + { + "kind": "sandbox", + "sandbox": { + "sandbox_id": "sbx-1", + "state": "running", + "image": "python:3.12-slim", + "cpus": 2, + "memory_mb": 2048, + }, + }, + { + "kind": "sandbox_list", + "sandboxes": [ + { + "sandbox_id": "sbx-1", + "state": "running", + "image": "python:3.12-slim", + "cpus": 2, + "memory_mb": 2048, + } + ], + }, + { + "kind": "sandbox", + "sandbox": { + "sandbox_id": "sbx-1", + "state": "running", + "image": "python:3.12-slim", + "cpus": 2, + "memory_mb": 2048, + }, + }, + { + "kind": "sandbox_exec", + "result": { + "exit_code": 0, + "stdout": "hello\n", + "stderr": "", + }, + }, + { + "kind": "sandbox_deleted", + "sandbox_id": "sbx-1", + }, + { + "kind": "snapshot", + "snapshot": { + "snapshot_id": "snap-1", + "source_sandbox_id": "sbx-1", + "distribution": { + "mode": "cached", + "targets": ["node-a", "node-b"], + }, + }, + }, + { + "kind": "snapshot_list", + "snapshots": [ + { + "snapshot_id": "snap-1", + "source_sandbox_id": "sbx-1", + "distribution": { + "mode": "cached", + "targets": ["node-a", "node-b"], + }, + } + ], + }, + { + "kind": "snapshot", + "snapshot": { + "snapshot_id": "snap-1", + "source_sandbox_id": "sbx-1", + "distribution": { + "mode": "cached", + "targets": ["node-a", "node-b"], + }, + }, + }, + { + "kind": "snapshot", + "snapshot": { + "snapshot_id": "snap-1", + "source_sandbox_id": "sbx-1", + "distribution": { + "mode": "copy", + "targets": ["node-a", "node-c"], + }, + }, + }, + { + "kind": "snapshot_deleted", + "snapshot_id": "snap-1", + }, + { + "kind": "pool", + "pool": { + "pool_id": "pool-1", + "sandbox_spec": { + "runtime": { + "image": "python:3.12-slim", + "cpus": 2, + "memory_mb": 2048, + } + }, + "capacity": { + "warm": 5, + "max": 20, + }, + }, + }, + { + "kind": "pool", + "pool": { + "pool_id": "pool-1", + "sandbox_spec": { + "runtime": { + "image": "python:3.12-slim", + "cpus": 2, + "memory_mb": 2048, + } + }, + "capacity": { + "warm": 5, + "max": 20, + }, + }, + }, + { + "kind": "pool", + "pool": { + "pool_id": "pool-1", + "sandbox_spec": { + "runtime": { + "image": "python:3.12-slim", + "cpus": 2, + "memory_mb": 2048, + } + }, + "capacity": { + "warm": 8, + "max": 24, + }, + }, + }, + ] + requests: list[tuple[str, str, str | None]] = [] + + def handler(request: httpx.Request) -> httpx.Response: + body = request.content.decode() if request.content else None + requests.append((request.method, request.url.path, body)) + payload = responses.pop(0) + return httpx.Response(200, json=payload) + + client = VoidControlClient( + base_url="http://127.0.0.1:43210", + transport=httpx.MockTransport(handler), + ) + + sandbox = await client.sandboxes.create( + { + "api_version": "v1", + "kind": "sandbox", + "runtime": { + "image": "python:3.12-slim", + "cpus": 2, + "memory_mb": 2048, + }, + } + ) + sandboxes = await client.sandboxes.list() + fetched_sandbox = await client.sandboxes.get("sbx-1") + exec_result = await client.sandboxes.exec( + "sbx-1", + { + "kind": "command", + "command": ["python3", "-c", "print('hello')"], + }, + ) + deleted_sandbox = await client.sandboxes.delete("sbx-1") + snapshot = await client.snapshots.create( + { + "api_version": "v1", + "kind": "snapshot", + "source": {"sandbox_id": "sbx-1"}, + "distribution": { + "mode": "cached", + "targets": ["node-a", "node-b"], + }, + } + ) + snapshots = await client.snapshots.list() + fetched_snapshot = await client.snapshots.get("snap-1") + replicated = await client.snapshots.replicate( + "snap-1", + { + "mode": "copy", + "targets": ["node-a", "node-c"], + }, + ) + deleted_snapshot = await client.snapshots.delete("snap-1") + pool = await client.pools.create( + { + "api_version": "v1", + "kind": "sandbox_pool", + "sandbox_spec": { + "runtime": { + "image": "python:3.12-slim", + "cpus": 2, + "memory_mb": 2048, + } + }, + "capacity": {"warm": 5, "max": 20}, + } + ) + fetched_pool = await client.pools.get("pool-1") + scaled = await client.pools.scale("pool-1", {"warm": 8, "max": 24}) + await client.aclose() + + self.assertEqual(sandbox.sandbox_id, "sbx-1") + self.assertEqual(sandboxes[0].state, "running") + self.assertEqual(fetched_sandbox.image, "python:3.12-slim") + self.assertEqual(exec_result.exit_code, 0) + self.assertEqual(deleted_sandbox.kind, "sandbox_deleted") + self.assertEqual(snapshot.snapshot_id, "snap-1") + self.assertEqual(snapshots[0].snapshot_id, "snap-1") + self.assertEqual(fetched_snapshot.source_sandbox_id, "sbx-1") + self.assertEqual(replicated.distribution["mode"], "copy") + self.assertEqual(deleted_snapshot.kind, "snapshot_deleted") + self.assertEqual(pool.pool_id, "pool-1") + self.assertEqual(fetched_pool.capacity["warm"], 5) + self.assertEqual(scaled.capacity["warm"], 8) + + self.assertEqual(requests[0][:2], ("POST", "/v1/sandboxes")) + self.assertEqual(requests[1][:2], ("GET", "/v1/sandboxes")) + self.assertEqual(requests[2][:2], ("GET", "/v1/sandboxes/sbx-1")) + self.assertEqual(requests[3][:2], ("POST", "/v1/sandboxes/sbx-1/exec")) + self.assertEqual(requests[4][:2], ("DELETE", "/v1/sandboxes/sbx-1")) + self.assertEqual(requests[5][:2], ("POST", "/v1/snapshots")) + self.assertEqual(requests[6][:2], ("GET", "/v1/snapshots")) + self.assertEqual(requests[7][:2], ("GET", "/v1/snapshots/snap-1")) + self.assertEqual(requests[8][:2], ("POST", "/v1/snapshots/snap-1/replicate")) + self.assertEqual(requests[9][:2], ("DELETE", "/v1/snapshots/snap-1")) + self.assertEqual(requests[10][:2], ("POST", "/v1/pools")) + self.assertEqual(requests[11][:2], ("GET", "/v1/pools/pool-1")) + self.assertEqual(requests[12][:2], ("POST", "/v1/pools/pool-1/scale")) + + async def test_compute_methods_raise_bridge_error(self) -> None: + from void_control import VoidControlClient + from void_control.models import BridgeError + + responses = [ + ( + 404, + { + "message": "sandbox 'sbx-missing' not found", + "code": "SANDBOX_NOT_FOUND", + "retryable": False, + }, + ), + ( + 404, + { + "message": "snapshot 'snap-missing' not found", + "code": "SNAPSHOT_NOT_FOUND", + "retryable": False, + }, + ), + ( + 503, + { + "message": "pool controller unavailable", + "code": "POOL_UNAVAILABLE", + "retryable": True, + }, + ), + ] + + def handler(request: httpx.Request) -> httpx.Response: + status, payload = responses.pop(0) + return httpx.Response(status, json=payload) + + client = VoidControlClient( + base_url="http://127.0.0.1:43210", + transport=httpx.MockTransport(handler), + ) + + with self.assertRaises(BridgeError) as sandbox_err: + await client.sandboxes.get("sbx-missing") + self.assertEqual(str(sandbox_err.exception), "sandbox 'sbx-missing' not found") + self.assertEqual(sandbox_err.exception.code, "SANDBOX_NOT_FOUND") + self.assertFalse(sandbox_err.exception.retryable) + + with self.assertRaises(BridgeError) as snapshot_err: + await client.snapshots.delete("snap-missing") + self.assertEqual(str(snapshot_err.exception), "snapshot 'snap-missing' not found") + self.assertEqual(snapshot_err.exception.code, "SNAPSHOT_NOT_FOUND") + self.assertFalse(snapshot_err.exception.retryable) + + with self.assertRaises(BridgeError) as pool_err: + await client.pools.scale("pool-1", {"warm": 8, "max": 24}) + self.assertEqual(str(pool_err.exception), "pool controller unavailable") + self.assertEqual(pool_err.exception.code, "POOL_UNAVAILABLE") + self.assertTrue(pool_err.exception.retryable) + + await client.aclose() + if __name__ == "__main__": unittest.main() diff --git a/src/bin/voidctl.rs b/src/bin/voidctl.rs index 9e7855e..053a7ed 100644 --- a/src/bin/voidctl.rs +++ b/src/bin/voidctl.rs @@ -82,6 +82,68 @@ enum TeamCommand { Run { spec: Option, stdin: bool }, } +#[cfg(feature = "serde")] +#[derive(Debug, Clone, PartialEq, Eq)] +enum SandboxCommand { + Create { + spec: Option, + stdin: bool, + }, + Get { + sandbox_id: String, + }, + List, + Exec { + sandbox_id: String, + request: Option, + stdin: bool, + }, + Stop { + sandbox_id: String, + }, + Delete { + sandbox_id: String, + }, +} + +#[cfg(feature = "serde")] +#[derive(Debug, Clone, PartialEq, Eq)] +enum SnapshotCommand { + Create { + spec: Option, + stdin: bool, + }, + Get { + snapshot_id: String, + }, + List, + Replicate { + snapshot_id: String, + request: Option, + stdin: bool, + }, + Delete { + snapshot_id: String, + }, +} + +#[cfg(feature = "serde")] +#[derive(Debug, Clone, PartialEq, Eq)] +enum PoolCommand { + Create { + spec: Option, + stdin: bool, + }, + Get { + pool_id: String, + }, + Scale { + pool_id: String, + request: Option, + stdin: bool, + }, +} + #[cfg(feature = "serde")] #[derive(Debug, Clone, PartialEq, Eq)] enum CliCommand { @@ -92,6 +154,9 @@ enum CliCommand { Template(TemplateCommand), Batch(BatchCommand), Team(TeamCommand), + Sandbox(SandboxCommand), + Snapshot(SnapshotCommand), + Pool(PoolCommand), } #[cfg(feature = "serde")] @@ -132,6 +197,21 @@ fn team_subcommand_candidates() -> &'static [&'static str] { &["dry-run", "run"] } +#[cfg(feature = "serde")] +fn sandbox_subcommand_candidates() -> &'static [&'static str] { + &["create", "get", "list", "exec", "stop", "delete"] +} + +#[cfg(feature = "serde")] +fn snapshot_subcommand_candidates() -> &'static [&'static str] { + &["create", "get", "list", "replicate", "delete"] +} + +#[cfg(feature = "serde")] +fn pool_subcommand_candidates() -> &'static [&'static str] { + &["create", "get", "scale"] +} + #[cfg(feature = "serde")] fn parse_cli_args(args: I) -> Result where @@ -354,12 +434,216 @@ where )), } } + "sandbox" => { + let action = iter.next().ok_or_else(|| { + "usage: voidctl sandbox [args]".to_string() + })?; + match action { + "create" => parse_spec_or_stdin( + &mut iter, + "usage: voidctl sandbox create [ | --stdin]", + ) + .map(|(spec, stdin)| CliCommand::Sandbox(SandboxCommand::Create { spec, stdin })), + "get" => { + let sandbox_id = iter + .next() + .ok_or_else(|| "usage: voidctl sandbox get ".to_string())? + .to_string(); + expect_no_more_args(&mut iter, "usage: voidctl sandbox get ")?; + Ok(CliCommand::Sandbox(SandboxCommand::Get { sandbox_id })) + } + "list" => { + expect_no_more_args(&mut iter, "usage: voidctl sandbox list")?; + Ok(CliCommand::Sandbox(SandboxCommand::List)) + } + "exec" => { + let sandbox_id = iter + .next() + .ok_or_else(|| { + "usage: voidctl sandbox exec [ | --stdin]" + .to_string() + })? + .to_string(); + parse_spec_or_stdin( + &mut iter, + "usage: voidctl sandbox exec [ | --stdin]", + ) + .map(|(request, stdin)| { + CliCommand::Sandbox(SandboxCommand::Exec { + sandbox_id, + request, + stdin, + }) + }) + } + "stop" => { + let sandbox_id = iter + .next() + .ok_or_else(|| "usage: voidctl sandbox stop ".to_string())? + .to_string(); + expect_no_more_args(&mut iter, "usage: voidctl sandbox stop ")?; + Ok(CliCommand::Sandbox(SandboxCommand::Stop { sandbox_id })) + } + "delete" => { + let sandbox_id = iter + .next() + .ok_or_else(|| "usage: voidctl sandbox delete ".to_string())? + .to_string(); + expect_no_more_args(&mut iter, "usage: voidctl sandbox delete ")?; + Ok(CliCommand::Sandbox(SandboxCommand::Delete { sandbox_id })) + } + other => Err(format!( + "unknown sandbox subcommand '{other}'. supported: {}", + sandbox_subcommand_candidates().join(", ") + )), + } + } + "snapshot" => { + let action = iter.next().ok_or_else(|| { + "usage: voidctl snapshot [args]".to_string() + })?; + match action { + "create" => parse_spec_or_stdin( + &mut iter, + "usage: voidctl snapshot create [ | --stdin]", + ) + .map(|(spec, stdin)| CliCommand::Snapshot(SnapshotCommand::Create { spec, stdin })), + "get" => { + let snapshot_id = iter + .next() + .ok_or_else(|| "usage: voidctl snapshot get ".to_string())? + .to_string(); + expect_no_more_args(&mut iter, "usage: voidctl snapshot get ")?; + Ok(CliCommand::Snapshot(SnapshotCommand::Get { snapshot_id })) + } + "list" => { + expect_no_more_args(&mut iter, "usage: voidctl snapshot list")?; + Ok(CliCommand::Snapshot(SnapshotCommand::List)) + } + "replicate" => { + let snapshot_id = iter + .next() + .ok_or_else(|| { + "usage: voidctl snapshot replicate [ | --stdin]" + .to_string() + })? + .to_string(); + parse_spec_or_stdin( + &mut iter, + "usage: voidctl snapshot replicate [ | --stdin]", + ) + .map(|(request, stdin)| { + CliCommand::Snapshot(SnapshotCommand::Replicate { + snapshot_id, + request, + stdin, + }) + }) + } + "delete" => { + let snapshot_id = iter + .next() + .ok_or_else(|| { + "usage: voidctl snapshot delete ".to_string() + })? + .to_string(); + expect_no_more_args( + &mut iter, + "usage: voidctl snapshot delete ", + )?; + Ok(CliCommand::Snapshot(SnapshotCommand::Delete { snapshot_id })) + } + other => Err(format!( + "unknown snapshot subcommand '{other}'. supported: {}", + snapshot_subcommand_candidates().join(", ") + )), + } + } + "pool" => { + let action = iter.next().ok_or_else(|| { + "usage: voidctl pool [args]".to_string() + })?; + match action { + "create" => parse_spec_or_stdin( + &mut iter, + "usage: voidctl pool create [ | --stdin]", + ) + .map(|(spec, stdin)| CliCommand::Pool(PoolCommand::Create { spec, stdin })), + "get" => { + let pool_id = iter + .next() + .ok_or_else(|| "usage: voidctl pool get ".to_string())? + .to_string(); + expect_no_more_args(&mut iter, "usage: voidctl pool get ")?; + Ok(CliCommand::Pool(PoolCommand::Get { pool_id })) + } + "scale" => { + let pool_id = iter + .next() + .ok_or_else(|| { + "usage: voidctl pool scale [ | --stdin]" + .to_string() + })? + .to_string(); + parse_spec_or_stdin( + &mut iter, + "usage: voidctl pool scale [ | --stdin]", + ) + .map(|(request, stdin)| { + CliCommand::Pool(PoolCommand::Scale { + pool_id, + request, + stdin, + }) + }) + } + other => Err(format!( + "unknown pool subcommand '{other}'. supported: {}", + pool_subcommand_candidates().join(", ") + )), + } + } other => Err(format!( - "unknown command '{other}'. supported: serve, help, execution, template, batch, yolo, team" + "unknown command '{other}'. supported: serve, help, execution, template, batch, yolo, team, sandbox, snapshot, pool" )), } } +#[cfg(feature = "serde")] +fn parse_spec_or_stdin<'a, I>(iter: &mut I, usage: &str) -> Result<(Option, bool), String> +where + I: Iterator, +{ + let mut spec = None; + let mut stdin = false; + for token in iter.by_ref() { + match token { + "--stdin" => { + if stdin || spec.is_some() { + return Err(usage.to_string()); + } + stdin = true; + } + other => { + if stdin { + return Err(format!("unexpected extra argument '{other}'")); + } + if spec.is_none() { + spec = Some(other.to_string()); + } else { + return Err(format!("unexpected extra argument '{other}'")); + } + } + } + } + + if !stdin && spec.is_none() { + return Err(usage.to_string()); + } + + Ok((spec, stdin)) +} + #[cfg(feature = "serde")] fn parse_execution_file_or_stdin<'a, I>( iter: &mut I, @@ -484,7 +768,21 @@ fn top_level_help_text() -> &'static str { voidctl team dry-run voidctl team dry-run --stdin voidctl team run - voidctl team run --stdin" + voidctl team run --stdin + voidctl sandbox create [ | --stdin] + voidctl sandbox get + voidctl sandbox list + voidctl sandbox exec [ | --stdin] + voidctl sandbox stop + voidctl sandbox delete + voidctl snapshot create [ | --stdin] + voidctl snapshot get + voidctl snapshot list + voidctl snapshot replicate [ | --stdin] + voidctl snapshot delete + voidctl pool create [ | --stdin] + voidctl pool get + voidctl pool scale [ | --stdin]" } #[cfg(feature = "serde")] @@ -551,23 +849,7 @@ fn load_execution_spec_file(path: &str) -> Result { #[cfg(feature = "serde")] fn load_execution_spec_input(spec: Option<&str>, stdin: bool) -> Result { - use std::io::Read; - - if stdin { - let mut spec = String::new(); - std::io::stdin() - .read_to_string(&mut spec) - .map_err(|e| format!("read stdin failed: {e}"))?; - if spec.trim().is_empty() { - return Err("stdin spec is empty".to_string()); - } - return Ok(spec); - } - - let Some(spec) = spec else { - return Err("spec path is required unless --stdin is used".to_string()); - }; - load_execution_spec_file(spec) + load_bridge_body_input(spec, stdin, "spec", load_execution_spec_file) } #[cfg(feature = "serde")] @@ -577,6 +859,19 @@ fn load_json_input_file(path: &str) -> Result { #[cfg(feature = "serde")] fn load_json_input(inputs: Option<&str>, stdin: bool) -> Result { + load_bridge_body_input(inputs, stdin, "template input", load_json_input_file) +} + +#[cfg(feature = "serde")] +fn load_bridge_body_input( + path: Option<&str>, + stdin: bool, + label: &str, + load_file: F, +) -> Result +where + F: FnOnce(&str) -> Result, +{ use std::io::Read; if stdin { @@ -585,15 +880,15 @@ fn load_json_input(inputs: Option<&str>, stdin: bool) -> Result .read_to_string(&mut body) .map_err(|e| format!("read stdin failed: {e}"))?; if body.trim().is_empty() { - return Err("stdin template input is empty".to_string()); + return Err(format!("stdin {label} is empty")); } return Ok(body); } - let Some(inputs) = inputs else { - return Err("template input path is required unless --stdin is used".to_string()); + let Some(path) = path else { + return Err(format!("{label} path is required unless --stdin is used")); }; - load_json_input_file(inputs) + load_file(path) } #[cfg(feature = "serde")] @@ -835,6 +1130,112 @@ fn print_batch_run_summary(detail: &serde_json::Value) { ); } +#[cfg(feature = "serde")] +fn print_sandbox_summary(detail: &serde_json::Value) { + let sandbox = detail + .get("sandbox") + .cloned() + .unwrap_or(serde_json::Value::Null); + println!( + "sandbox_id={} state={} image={} cpus={} memory_mb={}", + sandbox + .get("sandbox_id") + .and_then(|value| value.as_str()) + .unwrap_or("-"), + sandbox + .get("state") + .and_then(|value| value.as_str()) + .unwrap_or("-"), + sandbox + .get("image") + .and_then(|value| value.as_str()) + .unwrap_or("-"), + sandbox + .get("cpus") + .and_then(|value| value.as_u64()) + .map(|value| value.to_string()) + .unwrap_or_else(|| "-".to_string()), + sandbox + .get("memory_mb") + .and_then(|value| value.as_u64()) + .map(|value| value.to_string()) + .unwrap_or_else(|| "-".to_string()), + ); +} + +#[cfg(feature = "serde")] +fn print_snapshot_summary(detail: &serde_json::Value) { + let snapshot = detail + .get("snapshot") + .cloned() + .unwrap_or(serde_json::Value::Null); + let distribution = snapshot + .get("distribution") + .cloned() + .unwrap_or(serde_json::Value::Null); + let targets = distribution + .get("targets") + .and_then(|value| value.as_array()) + .cloned() + .unwrap_or_default(); + let mut formatted_targets = Vec::new(); + for target in targets { + let Some(target) = target.as_str() else { + continue; + }; + formatted_targets.push(target.to_string()); + } + println!( + "snapshot_id={} source_sandbox_id={} mode={} targets={}", + snapshot + .get("snapshot_id") + .and_then(|value| value.as_str()) + .unwrap_or("-"), + snapshot + .get("source_sandbox_id") + .and_then(|value| value.as_str()) + .unwrap_or("-"), + distribution + .get("mode") + .and_then(|value| value.as_str()) + .unwrap_or("-"), + if formatted_targets.is_empty() { + "-".to_string() + } else { + formatted_targets.join(",") + }, + ); +} + +#[cfg(feature = "serde")] +fn print_pool_summary(detail: &serde_json::Value) { + let pool = detail + .get("pool") + .cloned() + .unwrap_or(serde_json::Value::Null); + println!( + "pool_id={} warm={} max={} image={}", + pool.get("pool_id") + .and_then(|value| value.as_str()) + .unwrap_or("-"), + pool.get("capacity") + .and_then(|value| value.get("warm")) + .and_then(|value| value.as_u64()) + .map(|value| value.to_string()) + .unwrap_or_else(|| "-".to_string()), + pool.get("capacity") + .and_then(|value| value.get("max")) + .and_then(|value| value.as_u64()) + .map(|value| value.to_string()) + .unwrap_or_else(|| "-".to_string()), + pool.get("sandbox_spec") + .and_then(|value| value.get("runtime")) + .and_then(|value| value.get("image")) + .and_then(|value| value.as_str()) + .unwrap_or("-"), + ); +} + #[cfg(feature = "serde")] fn select_runtime_run( detail: &serde_json::Value, @@ -970,6 +1371,9 @@ fn run() -> Result<(), String> { CliCommand::Template(_) => {} CliCommand::Batch(_) => {} CliCommand::Team(_) => {} + CliCommand::Sandbox(_) => {} + CliCommand::Snapshot(_) => {} + CliCommand::Pool(_) => {} CliCommand::Interactive => {} } @@ -1017,6 +1421,9 @@ fn run() -> Result<(), String> { "/batch", "/yolo", "/team", + "/sandbox", + "/snapshot", + "/pool", "/help", "/exit", ]; @@ -1057,6 +1464,9 @@ fn run() -> Result<(), String> { "/template" => options.extend(["list", "get", "dry-run", "execute"]), "/batch" | "/yolo" => options.extend(["dry-run", "run"]), "/team" => options.extend(["dry-run", "run"]), + "/sandbox" => options.extend(["create", "get", "list", "exec", "stop", "delete"]), + "/snapshot" => options.extend(["create", "get", "list", "replicate", "delete"]), + "/pool" => options.extend(["create", "get", "scale"]), "/events" => options.push("--from"), "/logs" => options.push("--follow"), "/cancel" => options.push("--reason"), @@ -1163,6 +1573,47 @@ fn run() -> Result<(), String> { TeamRun { spec: String, }, + SandboxCreate { + spec: String, + }, + SandboxGet { + sandbox_id: String, + }, + SandboxList, + SandboxExec { + sandbox_id: String, + request: String, + }, + SandboxStop { + sandbox_id: String, + }, + SandboxDelete { + sandbox_id: String, + }, + SnapshotCreate { + spec: String, + }, + SnapshotGet { + snapshot_id: String, + }, + SnapshotList, + SnapshotReplicate { + snapshot_id: String, + request: String, + }, + SnapshotDelete { + snapshot_id: String, + }, + PoolCreate { + spec: String, + }, + PoolGet { + pool_id: String, + }, + PoolScale { + pool_id: String, + request: String, + }, Help, Exit, Empty, @@ -1557,11 +2008,135 @@ fn run() -> Result<(), String> { other => Err(format!("unknown /team action '{other}'")), } } - "/help" => Ok(Command::Help), - "/exit" | "/quit" => Ok(Command::Exit), - other => Err(format!("unknown command '{other}'")), - } - } + "/sandbox" => { + let action = tokens.next().ok_or_else(|| { + "usage: /sandbox [args]".to_string() + })?; + match action { + "create" => Ok(Command::SandboxCreate { + spec: tokens + .next() + .ok_or_else(|| "usage: /sandbox create ".to_string())? + .to_string(), + }), + "get" => Ok(Command::SandboxGet { + sandbox_id: tokens + .next() + .ok_or_else(|| "usage: /sandbox get ".to_string())? + .to_string(), + }), + "list" => Ok(Command::SandboxList), + "exec" => Ok(Command::SandboxExec { + sandbox_id: tokens + .next() + .ok_or_else(|| { + "usage: /sandbox exec ".to_string() + })? + .to_string(), + request: tokens + .next() + .ok_or_else(|| { + "usage: /sandbox exec ".to_string() + })? + .to_string(), + }), + "stop" => Ok(Command::SandboxStop { + sandbox_id: tokens + .next() + .ok_or_else(|| "usage: /sandbox stop ".to_string())? + .to_string(), + }), + "delete" => Ok(Command::SandboxDelete { + sandbox_id: tokens + .next() + .ok_or_else(|| "usage: /sandbox delete ".to_string())? + .to_string(), + }), + other => Err(format!("unknown /sandbox action '{other}'")), + } + } + "/snapshot" => { + let action = tokens.next().ok_or_else(|| { + "usage: /snapshot [args]".to_string() + })?; + match action { + "create" => Ok(Command::SnapshotCreate { + spec: tokens + .next() + .ok_or_else(|| "usage: /snapshot create ".to_string())? + .to_string(), + }), + "get" => Ok(Command::SnapshotGet { + snapshot_id: tokens + .next() + .ok_or_else(|| "usage: /snapshot get ".to_string())? + .to_string(), + }), + "list" => Ok(Command::SnapshotList), + "replicate" => Ok(Command::SnapshotReplicate { + snapshot_id: tokens + .next() + .ok_or_else(|| { + "usage: /snapshot replicate " + .to_string() + })? + .to_string(), + request: tokens + .next() + .ok_or_else(|| { + "usage: /snapshot replicate " + .to_string() + })? + .to_string(), + }), + "delete" => Ok(Command::SnapshotDelete { + snapshot_id: tokens + .next() + .ok_or_else(|| "usage: /snapshot delete ".to_string())? + .to_string(), + }), + other => Err(format!("unknown /snapshot action '{other}'")), + } + } + "/pool" => { + let action = tokens + .next() + .ok_or_else(|| "usage: /pool [args]".to_string())?; + match action { + "create" => Ok(Command::PoolCreate { + spec: tokens + .next() + .ok_or_else(|| "usage: /pool create ".to_string())? + .to_string(), + }), + "get" => Ok(Command::PoolGet { + pool_id: tokens + .next() + .ok_or_else(|| "usage: /pool get ".to_string())? + .to_string(), + }), + "scale" => Ok(Command::PoolScale { + pool_id: tokens + .next() + .ok_or_else(|| { + "usage: /pool scale ".to_string() + })? + .to_string(), + request: tokens + .next() + .ok_or_else(|| { + "usage: /pool scale ".to_string() + })? + .to_string(), + }), + other => Err(format!("unknown /pool action '{other}'")), + } + } + "/help" => Ok(Command::Help), + "/exit" | "/quit" => Ok(Command::Exit), + other => Err(format!("unknown command '{other}'")), + } + } fn help_text() -> &'static str { "Commands: @@ -1591,6 +2166,20 @@ fn run() -> Result<(), String> { /yolo run /team dry-run /team run + /sandbox create + /sandbox get + /sandbox list + /sandbox exec + /sandbox stop + /sandbox delete + /snapshot create + /snapshot get + /snapshot list + /snapshot replicate + /snapshot delete + /pool create + /pool get + /pool scale /help /exit @@ -2386,146 +2975,462 @@ Policy presets: fast | balanced | safe" } } - let client = VoidBoxRuntimeClient::new(base_url.clone(), 250); - let session_file = session_path(); - let mut session = load_session(&session_file); - let mut rl = Editor::::new() - .map_err(|e| format!("readline init failed: {e}"))?; - rl.set_helper(Some(VoidCtlHelper)); - for cmd in &session.recent_commands { - let _ = rl.add_history_entry(cmd.as_str()); - } - - println!("{}", load_logo()); - println!("voidctl connected to {base_url}"); - println!("{}", help_text()); - - loop { - let line = match rl.readline("voidctl> ") { - Ok(line) => line, - Err(ReadlineError::Interrupted) => { - println!("^C"); - continue; - } - Err(ReadlineError::Eof) => { - println!(); - break; - } - Err(e) => return Err(format!("stdin read failed: {e}")), - }; - let trimmed = line.trim().to_string(); - if !trimmed.is_empty() { - let _ = rl.add_history_entry(trimmed.as_str()); - session.recent_commands.push(trimmed.clone()); - if session.recent_commands.len() > 200 { - let keep_from = session.recent_commands.len().saturating_sub(200); - session.recent_commands = session.recent_commands[keep_from..].to_vec(); - } - } - - let parsed = match parse_command(&trimmed) { - Ok(cmd) => cmd, - Err(e) => { - println!("{e}"); - continue; - } - }; - - match parsed { - Command::Empty => continue, - Command::Help => println!("{}", help_text()), - Command::Exit => { - save_session(&session_file, &session)?; - println!("bye"); - break; - } - Command::Run { - spec, - run_id, - policy, - } => { - let run_id = run_id.unwrap_or_else(generate_run_id); - let policy = match parse_policy(policy) { - Ok(p) => p, - Err(e) => { - println!("{e}"); - continue; + if let CliCommand::Sandbox(command) = parsed_cli { + match command { + SandboxCommand::Create { spec, stdin } => { + let spec = load_bridge_body_input( + spec.as_deref(), + stdin, + "sandbox spec", + load_execution_spec_file, + )?; + match bridge_request(&bridge_base_url, "POST", "/v1/sandboxes", Some(&spec)) { + Ok(response) => { + if response.status >= 400 { + return Err(bridge_error_message(&response)); + } + print_sandbox_summary(&response.json); } - }; - match client.start(StartRequest { - run_id: run_id.clone(), - workflow_spec: spec, - launch_context: None, - policy, - }) { - Ok(started) => { - session.last_selected_run = Some(run_id.clone()); - println!( - "started run_id={} handle={} attempt_id={} state={:?}", - run_id, started.handle, started.attempt_id, started.state - ); + Err(err) => return Err(err), + } + return Ok(()); + } + SandboxCommand::Get { sandbox_id } => { + let path = format!("/v1/sandboxes/{sandbox_id}"); + match bridge_request(&bridge_base_url, "GET", &path, None) { + Ok(response) => { + if response.status >= 400 { + return Err(bridge_error_message(&response)); + } + print_sandbox_summary(&response.json); } - Err(err) => print_contract_error(&err), + Err(err) => return Err(err), } + return Ok(()); } - Command::Status { run_id } => { - let handle = run_id_to_handle(&run_id); - match client.inspect(&handle) { - Ok(inspect) => { - session.last_selected_run = Some(run_id); - print_status_line(&inspect); - println!( - "started_at={} updated_at={}", - inspect.started_at, inspect.updated_at - ); - if let Some(reason) = inspect.terminal_reason { - println!("terminal_reason={reason}"); + SandboxCommand::List => { + match bridge_request(&bridge_base_url, "GET", "/v1/sandboxes", None) { + Ok(response) => { + if response.status >= 400 { + return Err(bridge_error_message(&response)); } - if let Some(code) = inspect.exit_code { - println!("exit_code={code}"); + let sandboxes = response + .json + .get("sandboxes") + .and_then(|value| value.as_array()) + .cloned() + .unwrap_or_default(); + if sandboxes.is_empty() { + println!("no sandboxes"); + } else { + for sandbox in sandboxes { + print_sandbox_summary(&serde_json::json!({ "sandbox": sandbox })); + } } } - Err(err) => print_contract_error(&err), + Err(err) => return Err(err), } + return Ok(()); } - Command::Events { - run_id, - from_event_id, + SandboxCommand::Exec { + sandbox_id, + request, + stdin, } => { - let handle = run_id_to_handle(&run_id); - match client.subscribe_events(SubscribeEventsRequest { - handle, - from_event_id, - }) { - Ok(events) => { - for event in &events { - print_event(event); - } - if let Some(last) = events.last() { - session - .last_seen_event_id_by_run - .insert(run_id.clone(), last.event_id.clone()); + let body = load_bridge_body_input( + request.as_deref(), + stdin, + "sandbox exec request", + load_execution_spec_file, + )?; + let path = format!("/v1/sandboxes/{sandbox_id}/exec"); + match bridge_request(&bridge_base_url, "POST", &path, Some(&body)) { + Ok(response) => { + if response.status >= 400 { + return Err(bridge_error_message(&response)); } - session.last_selected_run = Some(run_id); + println!( + "kind={} exit_code={} stdout={} stderr={}", + response + .json + .get("kind") + .and_then(|value| value.as_str()) + .unwrap_or("-"), + response + .json + .get("result") + .and_then(|value| value.get("exit_code")) + .and_then(|value| value.as_i64()) + .map(|value| value.to_string()) + .unwrap_or_else(|| "-".to_string()), + response + .json + .get("result") + .and_then(|value| value.get("stdout")) + .and_then(|value| value.as_str()) + .unwrap_or("-"), + response + .json + .get("result") + .and_then(|value| value.get("stderr")) + .and_then(|value| value.as_str()) + .unwrap_or("-"), + ); } - Err(err) => print_contract_error(&err), + Err(err) => return Err(err), } + return Ok(()); } - Command::Logs { run_id, follow } => { - let handle = run_id_to_handle(&run_id); - let from = session.last_seen_event_id_by_run.get(&run_id).cloned(); - match client.subscribe_events(SubscribeEventsRequest { - handle, - from_event_id: from, - }) { - Ok(events) => { - for event in &events { - if !event.payload.is_empty() { - print_event(event); - } + SandboxCommand::Stop { sandbox_id } => { + let path = format!("/v1/sandboxes/{sandbox_id}/stop"); + match bridge_request(&bridge_base_url, "POST", &path, None) { + Ok(response) => { + if response.status >= 400 { + return Err(bridge_error_message(&response)); } - if let Some(last) = events.last() { - session + print_sandbox_summary(&response.json); + } + Err(err) => return Err(err), + } + return Ok(()); + } + SandboxCommand::Delete { sandbox_id } => { + let path = format!("/v1/sandboxes/{sandbox_id}"); + match bridge_request(&bridge_base_url, "DELETE", &path, None) { + Ok(response) => { + if response.status >= 400 { + return Err(bridge_error_message(&response)); + } + println!( + "kind={} sandbox_id={}", + response + .json + .get("kind") + .and_then(|value| value.as_str()) + .unwrap_or("-"), + response + .json + .get("sandbox_id") + .and_then(|value| value.as_str()) + .unwrap_or("-"), + ); + } + Err(err) => return Err(err), + } + return Ok(()); + } + } + } + + if let CliCommand::Snapshot(command) = parsed_cli { + match command { + SnapshotCommand::Create { spec, stdin } => { + let spec = load_bridge_body_input( + spec.as_deref(), + stdin, + "snapshot spec", + load_execution_spec_file, + )?; + match bridge_request(&bridge_base_url, "POST", "/v1/snapshots", Some(&spec)) { + Ok(response) => { + if response.status >= 400 { + return Err(bridge_error_message(&response)); + } + print_snapshot_summary(&response.json); + } + Err(err) => return Err(err), + } + return Ok(()); + } + SnapshotCommand::Get { snapshot_id } => { + let path = format!("/v1/snapshots/{snapshot_id}"); + match bridge_request(&bridge_base_url, "GET", &path, None) { + Ok(response) => { + if response.status >= 400 { + return Err(bridge_error_message(&response)); + } + print_snapshot_summary(&response.json); + } + Err(err) => return Err(err), + } + return Ok(()); + } + SnapshotCommand::List => { + match bridge_request(&bridge_base_url, "GET", "/v1/snapshots", None) { + Ok(response) => { + if response.status >= 400 { + return Err(bridge_error_message(&response)); + } + let snapshots = response + .json + .get("snapshots") + .and_then(|value| value.as_array()) + .cloned() + .unwrap_or_default(); + if snapshots.is_empty() { + println!("no snapshots"); + } else { + for snapshot in snapshots { + print_snapshot_summary( + &serde_json::json!({ "snapshot": snapshot }), + ); + } + } + } + Err(err) => return Err(err), + } + return Ok(()); + } + SnapshotCommand::Replicate { + snapshot_id, + request, + stdin, + } => { + let body = load_bridge_body_input( + request.as_deref(), + stdin, + "snapshot replicate request", + load_execution_spec_file, + )?; + let path = format!("/v1/snapshots/{snapshot_id}/replicate"); + match bridge_request(&bridge_base_url, "POST", &path, Some(&body)) { + Ok(response) => { + if response.status >= 400 { + return Err(bridge_error_message(&response)); + } + print_snapshot_summary(&response.json); + } + Err(err) => return Err(err), + } + return Ok(()); + } + SnapshotCommand::Delete { snapshot_id } => { + let path = format!("/v1/snapshots/{snapshot_id}"); + match bridge_request(&bridge_base_url, "DELETE", &path, None) { + Ok(response) => { + if response.status >= 400 { + return Err(bridge_error_message(&response)); + } + println!( + "kind={} snapshot_id={}", + response + .json + .get("kind") + .and_then(|value| value.as_str()) + .unwrap_or("-"), + response + .json + .get("snapshot_id") + .and_then(|value| value.as_str()) + .unwrap_or("-"), + ); + } + Err(err) => return Err(err), + } + return Ok(()); + } + } + } + + if let CliCommand::Pool(command) = parsed_cli { + match command { + PoolCommand::Create { spec, stdin } => { + let spec = load_bridge_body_input( + spec.as_deref(), + stdin, + "pool spec", + load_execution_spec_file, + )?; + match bridge_request(&bridge_base_url, "POST", "/v1/pools", Some(&spec)) { + Ok(response) => { + if response.status >= 400 { + return Err(bridge_error_message(&response)); + } + print_pool_summary(&response.json); + } + Err(err) => return Err(err), + } + return Ok(()); + } + PoolCommand::Get { pool_id } => { + let path = format!("/v1/pools/{pool_id}"); + match bridge_request(&bridge_base_url, "GET", &path, None) { + Ok(response) => { + if response.status >= 400 { + return Err(bridge_error_message(&response)); + } + print_pool_summary(&response.json); + } + Err(err) => return Err(err), + } + return Ok(()); + } + PoolCommand::Scale { + pool_id, + request, + stdin, + } => { + let body = load_bridge_body_input( + request.as_deref(), + stdin, + "pool scale request", + load_execution_spec_file, + )?; + let path = format!("/v1/pools/{pool_id}/scale"); + match bridge_request(&bridge_base_url, "POST", &path, Some(&body)) { + Ok(response) => { + if response.status >= 400 { + return Err(bridge_error_message(&response)); + } + print_pool_summary(&response.json); + } + Err(err) => return Err(err), + } + return Ok(()); + } + } + } + + let client = VoidBoxRuntimeClient::new(base_url.clone(), 250); + let session_file = session_path(); + let mut session = load_session(&session_file); + let mut rl = Editor::::new() + .map_err(|e| format!("readline init failed: {e}"))?; + rl.set_helper(Some(VoidCtlHelper)); + for cmd in &session.recent_commands { + let _ = rl.add_history_entry(cmd.as_str()); + } + + println!("{}", load_logo()); + println!("voidctl connected to {base_url}"); + println!("{}", help_text()); + + loop { + let line = match rl.readline("voidctl> ") { + Ok(line) => line, + Err(ReadlineError::Interrupted) => { + println!("^C"); + continue; + } + Err(ReadlineError::Eof) => { + println!(); + break; + } + Err(e) => return Err(format!("stdin read failed: {e}")), + }; + let trimmed = line.trim().to_string(); + if !trimmed.is_empty() { + let _ = rl.add_history_entry(trimmed.as_str()); + session.recent_commands.push(trimmed.clone()); + if session.recent_commands.len() > 200 { + let keep_from = session.recent_commands.len().saturating_sub(200); + session.recent_commands = session.recent_commands[keep_from..].to_vec(); + } + } + + let parsed = match parse_command(&trimmed) { + Ok(cmd) => cmd, + Err(e) => { + println!("{e}"); + continue; + } + }; + + match parsed { + Command::Empty => continue, + Command::Help => println!("{}", help_text()), + Command::Exit => { + save_session(&session_file, &session)?; + println!("bye"); + break; + } + Command::Run { + spec, + run_id, + policy, + } => { + let run_id = run_id.unwrap_or_else(generate_run_id); + let policy = match parse_policy(policy) { + Ok(p) => p, + Err(e) => { + println!("{e}"); + continue; + } + }; + match client.start(StartRequest { + run_id: run_id.clone(), + workflow_spec: spec, + launch_context: None, + policy, + }) { + Ok(started) => { + session.last_selected_run = Some(run_id.clone()); + println!( + "started run_id={} handle={} attempt_id={} state={:?}", + run_id, started.handle, started.attempt_id, started.state + ); + } + Err(err) => print_contract_error(&err), + } + } + Command::Status { run_id } => { + let handle = run_id_to_handle(&run_id); + match client.inspect(&handle) { + Ok(inspect) => { + session.last_selected_run = Some(run_id); + print_status_line(&inspect); + println!( + "started_at={} updated_at={}", + inspect.started_at, inspect.updated_at + ); + if let Some(reason) = inspect.terminal_reason { + println!("terminal_reason={reason}"); + } + if let Some(code) = inspect.exit_code { + println!("exit_code={code}"); + } + } + Err(err) => print_contract_error(&err), + } + } + Command::Events { + run_id, + from_event_id, + } => { + let handle = run_id_to_handle(&run_id); + match client.subscribe_events(SubscribeEventsRequest { + handle, + from_event_id, + }) { + Ok(events) => { + for event in &events { + print_event(event); + } + if let Some(last) = events.last() { + session + .last_seen_event_id_by_run + .insert(run_id.clone(), last.event_id.clone()); + } + session.last_selected_run = Some(run_id); + } + Err(err) => print_contract_error(&err), + } + } + Command::Logs { run_id, follow } => { + let handle = run_id_to_handle(&run_id); + let from = session.last_seen_event_id_by_run.get(&run_id).cloned(); + match client.subscribe_events(SubscribeEventsRequest { + handle, + from_event_id: from, + }) { + Ok(events) => { + for event in &events { + if !event.payload.is_empty() { + print_event(event); + } + } + if let Some(last) = events.last() { + session .last_seen_event_id_by_run .insert(run_id.clone(), last.event_id.clone()); } @@ -2798,11 +3703,239 @@ Policy presets: fast | balanced | safe" Err(err) => println!("error: {err}"), } } - Command::TemplateGet { template_id } => { + Command::TemplateGet { template_id } => { + match bridge_request( + &bridge_base_url, + "GET", + &format!("/v1/templates/{template_id}"), + None, + ) { + Ok(response) => { + if response.status >= 400 { + println!("error: {}", bridge_error_message(&response)); + continue; + } + let template = response + .json + .get("template") + .cloned() + .unwrap_or(serde_json::Value::Null); + let workflow_template = response + .json + .get("defaults") + .and_then(|value| value.get("workflow_template")) + .and_then(|value| value.as_str()) + .unwrap_or("-"); + println!( + "template_id={} execution_kind={} name={} workflow_template={}", + template + .get("id") + .and_then(|value| value.as_str()) + .unwrap_or("-"), + template + .get("execution_kind") + .and_then(|value| value.as_str()) + .unwrap_or("-"), + template + .get("name") + .and_then(|value| value.as_str()) + .unwrap_or("-"), + workflow_template + ); + } + Err(err) => println!("error: {err}"), + } + } + Command::TemplateDryRun { + template_id, + inputs, + } => { + match load_json_input_file(&inputs).and_then(|body| { + bridge_request( + &bridge_base_url, + "POST", + &format!("/v1/templates/{template_id}/dry-run"), + Some(&body), + ) + }) { + Ok(response) => { + if response.status >= 400 { + println!("error: {}", bridge_error_message(&response)); + continue; + } + print_template_compilation_summary(&response.json); + } + Err(err) => println!("error: {err}"), + } + } + Command::TemplateExecute { + template_id, + inputs, + } => { + match load_json_input_file(&inputs).and_then(|body| { + bridge_request( + &bridge_base_url, + "POST", + &format!("/v1/templates/{template_id}/execute"), + Some(&body), + ) + }) { + Ok(response) => { + if response.status >= 400 { + println!("error: {}", bridge_error_message(&response)); + continue; + } + println!( + "execution_id={} template_id={} execution_kind={} status={} goal={}", + response + .json + .get("execution_id") + .and_then(|value| value.as_str()) + .unwrap_or("-"), + response + .json + .get("template") + .and_then(|value| value.get("id")) + .and_then(|value| value.as_str()) + .unwrap_or("-"), + response + .json + .get("template") + .and_then(|value| value.get("execution_kind")) + .and_then(|value| value.as_str()) + .unwrap_or("-"), + response + .json + .get("status") + .and_then(|value| value.as_str()) + .unwrap_or("unknown"), + response + .json + .get("goal") + .and_then(|value| value.as_str()) + .unwrap_or("-"), + ); + } + Err(err) => println!("error: {err}"), + } + } + Command::BatchDryRun { spec, alias } => { + let path = if alias == "yolo" { + "/v1/yolo/dry-run" + } else { + "/v1/batch/dry-run" + }; + match load_execution_spec_file(&spec).and_then(|spec_text| { + bridge_request(&bridge_base_url, "POST", path, Some(&spec_text)) + }) { + Ok(response) => { + if response.status >= 400 { + println!("error: {}", bridge_error_message(&response)); + continue; + } + print_batch_compilation_summary(&response.json); + } + Err(err) => println!("error: {err}"), + } + } + Command::BatchRun { spec, alias } => { + let path = if alias == "yolo" { + "/v1/yolo/run" + } else { + "/v1/batch/run" + }; + match load_execution_spec_file(&spec).and_then(|spec_text| { + bridge_request(&bridge_base_url, "POST", path, Some(&spec_text)) + }) { + Ok(response) => { + if response.status >= 400 { + println!("error: {}", bridge_error_message(&response)); + continue; + } + print_batch_run_summary(&response.json); + } + Err(err) => println!("error: {err}"), + } + } + Command::TeamDryRun { spec } => { + match load_execution_spec_file(&spec).and_then(|spec_text| { + bridge_request( + &bridge_base_url, + "POST", + "/v1/teams/dry-run", + Some(&spec_text), + ) + }) { + Ok(response) => { + if response.status >= 400 { + println!("error: {}", bridge_error_message(&response)); + continue; + } + print_batch_compilation_summary(&response.json); + } + Err(err) => println!("error: {err}"), + } + } + Command::TeamRun { spec } => { + match load_execution_spec_file(&spec).and_then(|spec_text| { + bridge_request(&bridge_base_url, "POST", "/v1/teams/run", Some(&spec_text)) + }) { + Ok(response) => { + if response.status >= 400 { + println!("error: {}", bridge_error_message(&response)); + continue; + } + println!( + "kind={} execution_id={} compiled_primitive={} status={} goal={}", + response + .json + .get("kind") + .and_then(|value| value.as_str()) + .unwrap_or("-"), + response + .json + .get("execution_id") + .and_then(|value| value.as_str()) + .unwrap_or("-"), + response + .json + .get("compiled_primitive") + .and_then(|value| value.as_str()) + .unwrap_or("-"), + response + .json + .get("status") + .and_then(|value| value.as_str()) + .unwrap_or("unknown"), + response + .json + .get("goal") + .and_then(|value| value.as_str()) + .unwrap_or("-"), + ); + } + Err(err) => println!("error: {err}"), + } + } + Command::SandboxCreate { spec } => { + match load_execution_spec_file(&spec).and_then(|spec_text| { + bridge_request(&bridge_base_url, "POST", "/v1/sandboxes", Some(&spec_text)) + }) { + Ok(response) => { + if response.status >= 400 { + println!("error: {}", bridge_error_message(&response)); + continue; + } + print_sandbox_summary(&response.json); + } + Err(err) => println!("error: {err}"), + } + } + Command::SandboxGet { sandbox_id } => { match bridge_request( &bridge_base_url, "GET", - &format!("/v1/templates/{template_id}"), + &format!("/v1/sandboxes/{sandbox_id}"), None, ) { Ok(response) => { @@ -2810,68 +3943,44 @@ Policy presets: fast | balanced | safe" println!("error: {}", bridge_error_message(&response)); continue; } - let template = response - .json - .get("template") - .cloned() - .unwrap_or(serde_json::Value::Null); - let workflow_template = response - .json - .get("defaults") - .and_then(|value| value.get("workflow_template")) - .and_then(|value| value.as_str()) - .unwrap_or("-"); - println!( - "template_id={} execution_kind={} name={} workflow_template={}", - template - .get("id") - .and_then(|value| value.as_str()) - .unwrap_or("-"), - template - .get("execution_kind") - .and_then(|value| value.as_str()) - .unwrap_or("-"), - template - .get("name") - .and_then(|value| value.as_str()) - .unwrap_or("-"), - workflow_template - ); + print_sandbox_summary(&response.json); } Err(err) => println!("error: {err}"), } } - Command::TemplateDryRun { - template_id, - inputs, - } => { - match load_json_input_file(&inputs).and_then(|body| { - bridge_request( - &bridge_base_url, - "POST", - &format!("/v1/templates/{template_id}/dry-run"), - Some(&body), - ) - }) { + Command::SandboxList => { + match bridge_request(&bridge_base_url, "GET", "/v1/sandboxes", None) { Ok(response) => { if response.status >= 400 { println!("error: {}", bridge_error_message(&response)); continue; } - print_template_compilation_summary(&response.json); + let sandboxes = response + .json + .get("sandboxes") + .and_then(|value| value.as_array()) + .cloned() + .unwrap_or_default(); + if sandboxes.is_empty() { + println!("no sandboxes"); + } else { + for sandbox in sandboxes { + print_sandbox_summary(&serde_json::json!({ "sandbox": sandbox })); + } + } } Err(err) => println!("error: {err}"), } } - Command::TemplateExecute { - template_id, - inputs, + Command::SandboxExec { + sandbox_id, + request, } => { - match load_json_input_file(&inputs).and_then(|body| { + match load_execution_spec_file(&request).and_then(|body| { bridge_request( &bridge_base_url, "POST", - &format!("/v1/templates/{template_id}/execute"), + &format!("/v1/sandboxes/{sandbox_id}/exec"), Some(&body), ) }) { @@ -2881,32 +3990,75 @@ Policy presets: fast | balanced | safe" continue; } println!( - "execution_id={} template_id={} execution_kind={} status={} goal={}", + "kind={} exit_code={} stdout={} stderr={}", response .json - .get("execution_id") + .get("kind") .and_then(|value| value.as_str()) .unwrap_or("-"), response .json - .get("template") - .and_then(|value| value.get("id")) + .get("result") + .and_then(|value| value.get("exit_code")) + .and_then(|value| value.as_i64()) + .map(|value| value.to_string()) + .unwrap_or_else(|| "-".to_string()), + response + .json + .get("result") + .and_then(|value| value.get("stdout")) .and_then(|value| value.as_str()) .unwrap_or("-"), response .json - .get("template") - .and_then(|value| value.get("execution_kind")) + .get("result") + .and_then(|value| value.get("stderr")) .and_then(|value| value.as_str()) .unwrap_or("-"), + ); + } + Err(err) => println!("error: {err}"), + } + } + Command::SandboxStop { sandbox_id } => { + match bridge_request( + &bridge_base_url, + "POST", + &format!("/v1/sandboxes/{sandbox_id}/stop"), + None, + ) { + Ok(response) => { + if response.status >= 400 { + println!("error: {}", bridge_error_message(&response)); + continue; + } + print_sandbox_summary(&response.json); + } + Err(err) => println!("error: {err}"), + } + } + Command::SandboxDelete { sandbox_id } => { + match bridge_request( + &bridge_base_url, + "DELETE", + &format!("/v1/sandboxes/{sandbox_id}"), + None, + ) { + Ok(response) => { + if response.status >= 400 { + println!("error: {}", bridge_error_message(&response)); + continue; + } + println!( + "kind={} sandbox_id={}", response .json - .get("status") + .get("kind") .and_then(|value| value.as_str()) - .unwrap_or("unknown"), + .unwrap_or("-"), response .json - .get("goal") + .get("sandbox_id") .and_then(|value| value.as_str()) .unwrap_or("-"), ); @@ -2914,51 +4066,73 @@ Policy presets: fast | balanced | safe" Err(err) => println!("error: {err}"), } } - Command::BatchDryRun { spec, alias } => { - let path = if alias == "yolo" { - "/v1/yolo/dry-run" - } else { - "/v1/batch/dry-run" - }; + Command::SnapshotCreate { spec } => { match load_execution_spec_file(&spec).and_then(|spec_text| { - bridge_request(&bridge_base_url, "POST", path, Some(&spec_text)) + bridge_request(&bridge_base_url, "POST", "/v1/snapshots", Some(&spec_text)) }) { Ok(response) => { if response.status >= 400 { println!("error: {}", bridge_error_message(&response)); continue; } - print_batch_compilation_summary(&response.json); + print_snapshot_summary(&response.json); } Err(err) => println!("error: {err}"), } } - Command::BatchRun { spec, alias } => { - let path = if alias == "yolo" { - "/v1/yolo/run" - } else { - "/v1/batch/run" - }; - match load_execution_spec_file(&spec).and_then(|spec_text| { - bridge_request(&bridge_base_url, "POST", path, Some(&spec_text)) - }) { + Command::SnapshotGet { snapshot_id } => { + match bridge_request( + &bridge_base_url, + "GET", + &format!("/v1/snapshots/{snapshot_id}"), + None, + ) { Ok(response) => { if response.status >= 400 { println!("error: {}", bridge_error_message(&response)); continue; } - print_batch_run_summary(&response.json); + print_snapshot_summary(&response.json); } Err(err) => println!("error: {err}"), } } - Command::TeamDryRun { spec } => { - match load_execution_spec_file(&spec).and_then(|spec_text| { + Command::SnapshotList => { + match bridge_request(&bridge_base_url, "GET", "/v1/snapshots", None) { + Ok(response) => { + if response.status >= 400 { + println!("error: {}", bridge_error_message(&response)); + continue; + } + let snapshots = response + .json + .get("snapshots") + .and_then(|value| value.as_array()) + .cloned() + .unwrap_or_default(); + if snapshots.is_empty() { + println!("no snapshots"); + } else { + for snapshot in snapshots { + print_snapshot_summary( + &serde_json::json!({ "snapshot": snapshot }), + ); + } + } + } + Err(err) => println!("error: {err}"), + } + } + Command::SnapshotReplicate { + snapshot_id, + request, + } => { + match load_execution_spec_file(&request).and_then(|body| { bridge_request( &bridge_base_url, "POST", - "/v1/teams/dry-run", - Some(&spec_text), + &format!("/v1/snapshots/{snapshot_id}/replicate"), + Some(&body), ) }) { Ok(response) => { @@ -2966,22 +4140,25 @@ Policy presets: fast | balanced | safe" println!("error: {}", bridge_error_message(&response)); continue; } - print_batch_compilation_summary(&response.json); + print_snapshot_summary(&response.json); } Err(err) => println!("error: {err}"), } } - Command::TeamRun { spec } => { - match load_execution_spec_file(&spec).and_then(|spec_text| { - bridge_request(&bridge_base_url, "POST", "/v1/teams/run", Some(&spec_text)) - }) { + Command::SnapshotDelete { snapshot_id } => { + match bridge_request( + &bridge_base_url, + "DELETE", + &format!("/v1/snapshots/{snapshot_id}"), + None, + ) { Ok(response) => { if response.status >= 400 { println!("error: {}", bridge_error_message(&response)); continue; } println!( - "kind={} execution_id={} compiled_primitive={} status={} goal={}", + "kind={} snapshot_id={}", response .json .get("kind") @@ -2989,22 +4166,7 @@ Policy presets: fast | balanced | safe" .unwrap_or("-"), response .json - .get("execution_id") - .and_then(|value| value.as_str()) - .unwrap_or("-"), - response - .json - .get("compiled_primitive") - .and_then(|value| value.as_str()) - .unwrap_or("-"), - response - .json - .get("status") - .and_then(|value| value.as_str()) - .unwrap_or("unknown"), - response - .json - .get("goal") + .get("snapshot_id") .and_then(|value| value.as_str()) .unwrap_or("-"), ); @@ -3012,6 +4174,56 @@ Policy presets: fast | balanced | safe" Err(err) => println!("error: {err}"), } } + Command::PoolCreate { spec } => { + match load_execution_spec_file(&spec).and_then(|spec_text| { + bridge_request(&bridge_base_url, "POST", "/v1/pools", Some(&spec_text)) + }) { + Ok(response) => { + if response.status >= 400 { + println!("error: {}", bridge_error_message(&response)); + continue; + } + print_pool_summary(&response.json); + } + Err(err) => println!("error: {err}"), + } + } + Command::PoolGet { pool_id } => { + match bridge_request( + &bridge_base_url, + "GET", + &format!("/v1/pools/{pool_id}"), + None, + ) { + Ok(response) => { + if response.status >= 400 { + println!("error: {}", bridge_error_message(&response)); + continue; + } + print_pool_summary(&response.json); + } + Err(err) => println!("error: {err}"), + } + } + Command::PoolScale { pool_id, request } => { + match load_execution_spec_file(&request).and_then(|body| { + bridge_request( + &bridge_base_url, + "POST", + &format!("/v1/pools/{pool_id}/scale"), + Some(&body), + ) + }) { + Ok(response) => { + if response.status >= 400 { + println!("error: {}", bridge_error_message(&response)); + continue; + } + print_pool_summary(&response.json); + } + Err(err) => println!("error: {err}"), + } + } } if let Err(e) = save_session(&session_file, &session) { @@ -3202,6 +4414,45 @@ mod tests { ); } + #[test] + fn parses_sandbox_create_from_stdin() { + let command = parse_cli_args(["sandbox", "create", "--stdin"]).unwrap(); + assert_eq!( + command, + CliCommand::Sandbox(SandboxCommand::Create { + spec: None, + stdin: true, + }) + ); + } + + #[test] + fn parses_snapshot_replicate_with_request_path() { + let command = + parse_cli_args(["snapshot", "replicate", "snap-1", "replicate.json"]).unwrap(); + assert_eq!( + command, + CliCommand::Snapshot(SnapshotCommand::Replicate { + snapshot_id: "snap-1".to_string(), + request: Some("replicate.json".to_string()), + stdin: false, + }) + ); + } + + #[test] + fn parses_pool_scale_from_stdin() { + let command = parse_cli_args(["pool", "scale", "pool-1", "--stdin"]).unwrap(); + assert_eq!( + command, + CliCommand::Pool(PoolCommand::Scale { + pool_id: "pool-1".to_string(), + request: None, + stdin: true, + }) + ); + } + #[test] fn rejects_extra_execution_watch_args() { let err = parse_cli_args(["execution", "watch", "exec-1", "extra"]).unwrap_err(); @@ -3262,6 +4513,35 @@ mod tests { assert!(completions.contains(&"run")); } + #[test] + fn completes_sandbox_subcommands() { + let completions = sandbox_subcommand_candidates(); + assert!(completions.contains(&"create")); + assert!(completions.contains(&"get")); + assert!(completions.contains(&"list")); + assert!(completions.contains(&"exec")); + assert!(completions.contains(&"stop")); + assert!(completions.contains(&"delete")); + } + + #[test] + fn completes_snapshot_subcommands() { + let completions = snapshot_subcommand_candidates(); + assert!(completions.contains(&"create")); + assert!(completions.contains(&"get")); + assert!(completions.contains(&"list")); + assert!(completions.contains(&"replicate")); + assert!(completions.contains(&"delete")); + } + + #[test] + fn completes_pool_subcommands() { + let completions = pool_subcommand_candidates(); + assert!(completions.contains(&"create")); + assert!(completions.contains(&"get")); + assert!(completions.contains(&"scale")); + } + #[test] fn top_level_help_mentions_execution_commands() { let help = top_level_help_text(); @@ -3286,6 +4566,12 @@ mod tests { assert!(help.contains("voidctl yolo run --stdin")); assert!(help.contains("voidctl team dry-run ")); assert!(help.contains("voidctl team run --stdin")); + assert!(help.contains("voidctl sandbox create [ | --stdin]")); + assert!(help.contains("voidctl sandbox list")); + assert!( + help.contains("voidctl snapshot replicate [ | --stdin]") + ); + assert!(help.contains("voidctl pool scale [ | --stdin]")); } #[test] diff --git a/src/bridge.rs b/src/bridge.rs index cf5d931..0e190a6 100644 --- a/src/bridge.rs +++ b/src/bridge.rs @@ -27,7 +27,11 @@ use crate::orchestration::{ WorkflowTemplateRef, }; #[cfg(feature = "serde")] -use crate::runtime::{MockRuntime, VoidBoxRuntimeClient}; +use crate::runtime::{ + MockRuntime, SandboxExecResult, SandboxRecord, SandboxState, VoidBoxRuntimeClient, +}; +#[cfg(feature = "serde")] +use crate::sandbox; #[cfg(feature = "serde")] use crate::team; #[cfg(feature = "serde")] @@ -218,6 +222,66 @@ struct TemplateRequestBody { inputs: Value, } +#[cfg(feature = "serde")] +#[derive(Debug, Deserialize)] +struct SandboxExecRequestBody { + kind: String, + command: Option>, + runtime: Option, + code: Option, +} + +#[cfg(feature = "serde")] +#[derive(Debug, Serialize, Deserialize)] +struct StoredSandboxRecord { + sandbox: SandboxRecord, + spec: sandbox::SandboxSpec, +} + +#[cfg(feature = "serde")] +#[derive(Debug, Deserialize)] +struct SnapshotReplicateRequestBody { + mode: String, + targets: Vec, +} + +#[cfg(feature = "serde")] +#[derive(Debug, Serialize, Deserialize)] +struct SnapshotRecordView { + snapshot_id: String, + source_sandbox_id: String, + distribution: Option, +} + +#[cfg(feature = "serde")] +#[derive(Debug, Serialize, Deserialize)] +struct StoredSnapshotRecord { + snapshot: SnapshotRecordView, + spec: sandbox::SnapshotSpec, +} + +#[cfg(feature = "serde")] +#[derive(Debug, Deserialize)] +struct PoolScaleRequestBody { + warm: u32, + max: u32, +} + +#[cfg(feature = "serde")] +#[derive(Debug, Serialize, Deserialize)] +struct PoolRecordView { + pool_id: String, + sandbox_spec: sandbox::SandboxPoolSandboxSpec, + capacity: sandbox::PoolCapacity, +} + +#[cfg(feature = "serde")] +#[derive(Debug, Serialize, Deserialize)] +struct StoredPoolRecord { + pool: PoolRecordView, + spec: sandbox::SandboxPoolSpec, +} + #[cfg(feature = "serde")] #[derive(Debug, Serialize)] struct ApiError { @@ -407,6 +471,26 @@ struct JsonHttpResponse { body: Vec, } +#[cfg(feature = "serde")] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +enum BridgeMethod { + Get, + Post, + Patch, + Delete, +} + +#[cfg(feature = "serde")] +fn parse_bridge_method(method: &str) -> Option { + match method { + "GET" => Some(BridgeMethod::Get), + "POST" => Some(BridgeMethod::Post), + "PATCH" => Some(BridgeMethod::Patch), + "DELETE" => Some(BridgeMethod::Delete), + _ => None, + } +} + #[cfg(feature = "serde")] fn handle_bridge_request( method: &str, @@ -415,47 +499,120 @@ fn handle_bridge_request( config: &BridgeConfig, client: Option<&VoidBoxRuntimeClient>, ) -> JsonHttpResponse { - if method == "GET" && path == "/v1/health" { + let raw_method = method; + let method = parse_bridge_method(method); + + if method == Some(BridgeMethod::Get) && path == "/v1/health" { return json_response(200, &json!({"status":"ok","service":"voidctl-bridge"})); } - if method == "POST" && path == "/v1/executions/dry-run" { + if method == Some(BridgeMethod::Post) && path == "/v1/executions/dry-run" { return handle_execution_dry_run(body); } - if method == "POST" && (path == "/v1/batch/dry-run" || path == "/v1/yolo/dry-run") { + if method == Some(BridgeMethod::Post) && path == "/v1/sandboxes" { + return handle_sandbox_create(body, config); + } + + if method == Some(BridgeMethod::Get) && path == "/v1/sandboxes" { + return handle_sandbox_list(config); + } + + if method == Some(BridgeMethod::Post) + && path.starts_with("/v1/sandboxes/") + && path.ends_with("/exec") + { + return handle_sandbox_exec(path, body, config); + } + + if method == Some(BridgeMethod::Post) + && path.starts_with("/v1/sandboxes/") + && path.ends_with("/stop") + { + return handle_sandbox_stop(path, config); + } + + if method == Some(BridgeMethod::Get) && path.starts_with("/v1/sandboxes/") { + return handle_sandbox_get(path, config); + } + + if method == Some(BridgeMethod::Delete) && path.starts_with("/v1/sandboxes/") { + return handle_sandbox_delete(path, config); + } + + if method == Some(BridgeMethod::Post) && path == "/v1/snapshots" { + return handle_snapshot_create(body, config); + } + + if method == Some(BridgeMethod::Get) && path == "/v1/snapshots" { + return handle_snapshot_list(config); + } + + if method == Some(BridgeMethod::Post) + && path.starts_with("/v1/snapshots/") + && path.ends_with("/replicate") + { + return handle_snapshot_replicate(path, body, config); + } + + if method == Some(BridgeMethod::Get) && path.starts_with("/v1/snapshots/") { + return handle_snapshot_get(path, config); + } + + if method == Some(BridgeMethod::Delete) && path.starts_with("/v1/snapshots/") { + return handle_snapshot_delete(path, config); + } + + if method == Some(BridgeMethod::Post) && path == "/v1/pools" { + return handle_pool_create(body, config); + } + + if method == Some(BridgeMethod::Post) + && path.starts_with("/v1/pools/") + && path.ends_with("/scale") + { + return handle_pool_scale(path, body, config); + } + + if method == Some(BridgeMethod::Get) && path.starts_with("/v1/pools/") { + return handle_pool_get(path, config); + } + + if method == Some(BridgeMethod::Post) + && (path == "/v1/batch/dry-run" || path == "/v1/yolo/dry-run") + { return handle_batch_dry_run(body); } - if method == "POST" && path == "/v1/teams/dry-run" { + if method == Some(BridgeMethod::Post) && path == "/v1/teams/dry-run" { return handle_team_dry_run(body); } - if method == "POST" && path == "/v1/teams/run" { + if method == Some(BridgeMethod::Post) && path == "/v1/teams/run" { return handle_team_run(body, config); } - if method == "GET" && path.starts_with("/v1/team-runs/") { + if method == Some(BridgeMethod::Get) && path.starts_with("/v1/team-runs/") { return handle_team_get(path, config); } - if method == "POST" && (path == "/v1/batch/run" || path == "/v1/yolo/run") { + if method == Some(BridgeMethod::Post) && (path == "/v1/batch/run" || path == "/v1/yolo/run") { return handle_batch_run(body, config); } - if method == "GET" && path.starts_with("/v1/batch-runs/") { + if method == Some(BridgeMethod::Get) && path.starts_with("/v1/batch-runs/") { return handle_batch_get(path, config); } - if method == "GET" && path.starts_with("/v1/yolo-runs/") { + if method == Some(BridgeMethod::Get) && path.starts_with("/v1/yolo-runs/") { return handle_batch_get(path, config); } - if method == "GET" && path == "/v1/templates" { + if method == Some(BridgeMethod::Get) && path == "/v1/templates" { return handle_template_list(); } - if method == "GET" + if method == Some(BridgeMethod::Get) && path.starts_with("/v1/templates/") && !path.ends_with("/dry-run") && !path.ends_with("/execute") @@ -463,47 +620,68 @@ fn handle_bridge_request( return handle_template_get(path); } - if method == "POST" && path.starts_with("/v1/templates/") && path.ends_with("/dry-run") { + if method == Some(BridgeMethod::Post) + && path.starts_with("/v1/templates/") + && path.ends_with("/dry-run") + { return handle_template_dry_run(path, body); } - if method == "POST" && path.starts_with("/v1/templates/") && path.ends_with("/execute") { + if method == Some(BridgeMethod::Post) + && path.starts_with("/v1/templates/") + && path.ends_with("/execute") + { return handle_template_execute(path, body, config); } - if method == "POST" && path == "/v1/executions" { + if method == Some(BridgeMethod::Post) && path == "/v1/executions" { return handle_execution_create(body, config, client.is_some()); } - if method == "GET" && path == "/v1/executions" { + if method == Some(BridgeMethod::Get) && path == "/v1/executions" { return handle_execution_list(config); } - if method == "GET" && path.starts_with("/v1/executions/") && path.ends_with("/events") { + if method == Some(BridgeMethod::Get) + && path.starts_with("/v1/executions/") + && path.ends_with("/events") + { return handle_execution_events(path, config); } - if method == "GET" && path.starts_with("/v1/executions/") { + if method == Some(BridgeMethod::Get) && path.starts_with("/v1/executions/") { return handle_execution_get(path, config); } - if method == "PATCH" && path.starts_with("/v1/executions/") && path.ends_with("/policy") { + if method == Some(BridgeMethod::Patch) + && path.starts_with("/v1/executions/") + && path.ends_with("/policy") + { return handle_execution_policy_patch(path, body, config); } - if method == "POST" && path.starts_with("/v1/executions/") && path.ends_with("/pause") { + if method == Some(BridgeMethod::Post) + && path.starts_with("/v1/executions/") + && path.ends_with("/pause") + { return handle_execution_action(path, config, ExecutionAction::Pause); } - if method == "POST" && path.starts_with("/v1/executions/") && path.ends_with("/resume") { + if method == Some(BridgeMethod::Post) + && path.starts_with("/v1/executions/") + && path.ends_with("/resume") + { return handle_execution_action(path, config, ExecutionAction::Resume); } - if method == "POST" && path.starts_with("/v1/executions/") && path.ends_with("/cancel") { + if method == Some(BridgeMethod::Post) + && path.starts_with("/v1/executions/") + && path.ends_with("/cancel") + { return handle_execution_action(path, config, ExecutionAction::Cancel); } - if method == "POST" && path == "/v1/launch" { + if method == Some(BridgeMethod::Post) && path == "/v1/launch" { return handle_launch(body, config, client); } @@ -511,7 +689,7 @@ fn handle_bridge_request( 404, &ApiError { code: "NOT_FOUND", - message: format!("no route for {} {}", method, path), + message: format!("no route for {} {}", raw_method, path), retryable: false, }, ) @@ -753,42 +931,714 @@ fn handle_batch_run(body: &str, config: &BridgeConfig) -> JsonHttpResponse { } #[cfg(feature = "serde")] -fn handle_batch_get(path: &str, config: &BridgeConfig) -> JsonHttpResponse { - let execution_path = if let Some(execution_id) = path.strip_prefix("/v1/batch-runs/") { - format!("/v1/executions/{execution_id}") - } else if let Some(execution_id) = path.strip_prefix("/v1/yolo-runs/") { - format!("/v1/executions/{execution_id}") - } else { - String::new() - }; - if execution_path.is_empty() { +fn handle_batch_get(path: &str, config: &BridgeConfig) -> JsonHttpResponse { + let execution_path = if let Some(execution_id) = path.strip_prefix("/v1/batch-runs/") { + format!("/v1/executions/{execution_id}") + } else if let Some(execution_id) = path.strip_prefix("/v1/yolo-runs/") { + format!("/v1/executions/{execution_id}") + } else { + String::new() + }; + if execution_path.is_empty() { + return json_response( + 404, + &ApiError { + code: "NOT_FOUND", + message: format!("no route for GET {path}"), + retryable: false, + }, + ); + } + + let response = handle_execution_get(&execution_path, config); + let Ok(mut value) = serde_json::from_slice::(&response.body) else { + return response; + }; + if response.status == 200 { + let Some(object) = value.as_object_mut() else { + return response; + }; + let execution_id = object + .get("execution") + .and_then(|execution| execution.get("execution_id")) + .cloned() + .unwrap_or(Value::Null); + object.insert("kind".to_string(), Value::String("batch".to_string())); + object.insert("run_id".to_string(), execution_id); + } + json_response(response.status, &value) +} + +#[cfg(feature = "serde")] +fn handle_sandbox_create(body: &str, config: &BridgeConfig) -> JsonHttpResponse { + let spec = match parse_submitted_sandbox_spec(body) { + Ok(spec) => spec, + Err(response) => return response, + }; + let sandbox_id = format!("sbx-{}", now_ms()); + let record = StoredSandboxRecord { + sandbox: SandboxRecord { + sandbox_id: sandbox_id.clone(), + state: SandboxState::Running, + restore_from_snapshot: spec + .snapshot + .as_ref() + .and_then(|snapshot| snapshot.restore_from.clone()), + }, + spec, + }; + if let Err(err) = save_sandbox_record(config, &record) { + return json_response( + 500, + &ApiError { + code: "INTERNAL_ERROR", + message: err.to_string(), + retryable: true, + }, + ); + } + json_response( + 200, + &json!({ + "kind": "sandbox", + "sandbox": record.sandbox + }), + ) +} + +#[cfg(feature = "serde")] +fn handle_sandbox_list(config: &BridgeConfig) -> JsonHttpResponse { + match list_sandbox_records(config) { + Ok(records) => { + let mut sandboxes = Vec::new(); + for record in records { + sandboxes.push(record.sandbox); + } + json_response( + 200, + &json!({ "kind": "sandbox_list", "sandboxes": sandboxes }), + ) + } + Err(err) => json_response( + 500, + &ApiError { + code: "INTERNAL_ERROR", + message: err.to_string(), + retryable: true, + }, + ), + } +} + +#[cfg(feature = "serde")] +fn handle_sandbox_get(path: &str, config: &BridgeConfig) -> JsonHttpResponse { + let Some(sandbox_id) = path.strip_prefix("/v1/sandboxes/") else { + return json_response( + 404, + &ApiError { + code: "NOT_FOUND", + message: format!("no route for GET {path}"), + retryable: false, + }, + ); + }; + if sandbox_id.contains('/') { + return json_response( + 404, + &ApiError { + code: "NOT_FOUND", + message: format!("no route for GET {path}"), + retryable: false, + }, + ); + } + match load_sandbox_record(config, sandbox_id) { + Ok(record) => json_response( + 200, + &json!({ + "kind": "sandbox", + "sandbox": record.sandbox + }), + ), + Err(err) if err.kind() == std::io::ErrorKind::NotFound => json_response( + 404, + &ApiError { + code: "NOT_FOUND", + message: format!("sandbox '{sandbox_id}' not found"), + retryable: false, + }, + ), + Err(err) => json_response( + 500, + &ApiError { + code: "INTERNAL_ERROR", + message: err.to_string(), + retryable: true, + }, + ), + } +} + +#[cfg(feature = "serde")] +fn handle_sandbox_exec(path: &str, body: &str, config: &BridgeConfig) -> JsonHttpResponse { + let Some(sandbox_id) = path + .strip_prefix("/v1/sandboxes/") + .and_then(|rest| rest.strip_suffix("/exec")) + else { + return json_response( + 404, + &ApiError { + code: "NOT_FOUND", + message: format!("no route for POST {path}"), + retryable: false, + }, + ); + }; + let record = match load_sandbox_record(config, sandbox_id) { + Ok(record) => record, + Err(err) if err.kind() == std::io::ErrorKind::NotFound => { + return json_response( + 404, + &ApiError { + code: "NOT_FOUND", + message: format!("sandbox '{sandbox_id}' not found"), + retryable: false, + }, + ) + } + Err(err) => { + return json_response( + 500, + &ApiError { + code: "INTERNAL_ERROR", + message: err.to_string(), + retryable: true, + }, + ) + } + }; + if record.sandbox.state != SandboxState::Running { + return json_response( + 400, + &ApiError { + code: "INVALID_STATE", + message: format!("sandbox '{sandbox_id}' is not running"), + retryable: false, + }, + ); + } + let request = match parse_sandbox_exec_request(body) { + Ok(request) => request, + Err(response) => return response, + }; + let result = match request.kind.as_str() { + "command" => SandboxExecResult { + exit_code: 0, + stdout: request.command.unwrap_or_default().join(" "), + stderr: String::new(), + }, + "code" => SandboxExecResult { + exit_code: 0, + stdout: format!( + "{}: {}", + request.runtime.unwrap_or_else(|| "unknown".to_string()), + request.code.unwrap_or_default() + ), + stderr: String::new(), + }, + _ => unreachable!("validated sandbox exec kind"), + }; + json_response( + 200, + &json!({ + "kind": "sandbox_exec", + "sandbox_id": sandbox_id, + "result": result + }), + ) +} + +#[cfg(feature = "serde")] +fn handle_sandbox_stop(path: &str, config: &BridgeConfig) -> JsonHttpResponse { + let Some(sandbox_id) = path + .strip_prefix("/v1/sandboxes/") + .and_then(|rest| rest.strip_suffix("/stop")) + else { + return json_response( + 404, + &ApiError { + code: "NOT_FOUND", + message: format!("no route for POST {path}"), + retryable: false, + }, + ); + }; + let mut record = match load_sandbox_record(config, sandbox_id) { + Ok(record) => record, + Err(err) if err.kind() == std::io::ErrorKind::NotFound => { + return json_response( + 404, + &ApiError { + code: "NOT_FOUND", + message: format!("sandbox '{sandbox_id}' not found"), + retryable: false, + }, + ) + } + Err(err) => { + return json_response( + 500, + &ApiError { + code: "INTERNAL_ERROR", + message: err.to_string(), + retryable: true, + }, + ) + } + }; + record.sandbox.state = SandboxState::Stopped; + if let Err(err) = save_sandbox_record(config, &record) { + return json_response( + 500, + &ApiError { + code: "INTERNAL_ERROR", + message: err.to_string(), + retryable: true, + }, + ); + } + json_response( + 200, + &json!({ + "kind": "sandbox", + "sandbox": record.sandbox + }), + ) +} + +#[cfg(feature = "serde")] +fn handle_sandbox_delete(path: &str, config: &BridgeConfig) -> JsonHttpResponse { + let Some(sandbox_id) = path.strip_prefix("/v1/sandboxes/") else { + return json_response( + 404, + &ApiError { + code: "NOT_FOUND", + message: format!("no route for DELETE {path}"), + retryable: false, + }, + ); + }; + if sandbox_id.contains('/') { + return json_response( + 404, + &ApiError { + code: "NOT_FOUND", + message: format!("no route for DELETE {path}"), + retryable: false, + }, + ); + } + let sandbox_path = sandbox_file_path(config, sandbox_id); + match fs::remove_file(&sandbox_path) { + Ok(()) => json_response( + 200, + &json!({ + "kind": "sandbox_deleted", + "sandbox_id": sandbox_id + }), + ), + Err(err) if err.kind() == std::io::ErrorKind::NotFound => json_response( + 404, + &ApiError { + code: "NOT_FOUND", + message: format!("sandbox '{sandbox_id}' not found"), + retryable: false, + }, + ), + Err(err) => json_response( + 500, + &ApiError { + code: "INTERNAL_ERROR", + message: err.to_string(), + retryable: true, + }, + ), + } +} + +#[cfg(feature = "serde")] +fn handle_snapshot_create(body: &str, config: &BridgeConfig) -> JsonHttpResponse { + let spec = match parse_submitted_snapshot_spec(body) { + Ok(spec) => spec, + Err(response) => return response, + }; + let snapshot_id = format!("snap-{}", now_ms()); + let record = StoredSnapshotRecord { + snapshot: SnapshotRecordView { + snapshot_id: snapshot_id.clone(), + source_sandbox_id: spec.source.sandbox_id.clone(), + distribution: spec.distribution.clone(), + }, + spec, + }; + if let Err(err) = save_snapshot_record(config, &record) { + return json_response( + 500, + &ApiError { + code: "INTERNAL_ERROR", + message: err.to_string(), + retryable: true, + }, + ); + } + json_response( + 200, + &json!({ + "kind": "snapshot", + "snapshot": record.snapshot + }), + ) +} + +#[cfg(feature = "serde")] +fn handle_snapshot_list(config: &BridgeConfig) -> JsonHttpResponse { + match list_snapshot_records(config) { + Ok(records) => { + let mut snapshots = Vec::new(); + for record in records { + snapshots.push(record.snapshot); + } + json_response( + 200, + &json!({ + "kind": "snapshot_list", + "snapshots": snapshots + }), + ) + } + Err(err) => json_response( + 500, + &ApiError { + code: "INTERNAL_ERROR", + message: err.to_string(), + retryable: true, + }, + ), + } +} + +#[cfg(feature = "serde")] +fn handle_snapshot_get(path: &str, config: &BridgeConfig) -> JsonHttpResponse { + let Some(snapshot_id) = path.strip_prefix("/v1/snapshots/") else { + return json_response( + 404, + &ApiError { + code: "NOT_FOUND", + message: format!("no route for GET {path}"), + retryable: false, + }, + ); + }; + if snapshot_id.contains('/') { + return json_response( + 404, + &ApiError { + code: "NOT_FOUND", + message: format!("no route for GET {path}"), + retryable: false, + }, + ); + } + match load_snapshot_record(config, snapshot_id) { + Ok(record) => json_response( + 200, + &json!({ + "kind": "snapshot", + "snapshot": record.snapshot + }), + ), + Err(err) if err.kind() == std::io::ErrorKind::NotFound => json_response( + 404, + &ApiError { + code: "NOT_FOUND", + message: format!("snapshot '{snapshot_id}' not found"), + retryable: false, + }, + ), + Err(err) => json_response( + 500, + &ApiError { + code: "INTERNAL_ERROR", + message: err.to_string(), + retryable: true, + }, + ), + } +} + +#[cfg(feature = "serde")] +fn handle_snapshot_replicate(path: &str, body: &str, config: &BridgeConfig) -> JsonHttpResponse { + let Some(snapshot_id) = path + .strip_prefix("/v1/snapshots/") + .and_then(|rest| rest.strip_suffix("/replicate")) + else { + return json_response( + 404, + &ApiError { + code: "NOT_FOUND", + message: format!("no route for POST {path}"), + retryable: false, + }, + ); + }; + let mut record = match load_snapshot_record(config, snapshot_id) { + Ok(record) => record, + Err(err) if err.kind() == std::io::ErrorKind::NotFound => { + return json_response( + 404, + &ApiError { + code: "NOT_FOUND", + message: format!("snapshot '{snapshot_id}' not found"), + retryable: false, + }, + ) + } + Err(err) => { + return json_response( + 500, + &ApiError { + code: "INTERNAL_ERROR", + message: err.to_string(), + retryable: true, + }, + ) + } + }; + let request = match parse_snapshot_replicate_request(body) { + Ok(request) => request, + Err(response) => return response, + }; + record.snapshot.distribution = Some(sandbox::SnapshotDistribution { + mode: request.mode.clone(), + targets: request.targets.clone(), + }); + record.spec.distribution = record.snapshot.distribution.clone(); + if let Err(err) = save_snapshot_record(config, &record) { + return json_response( + 500, + &ApiError { + code: "INTERNAL_ERROR", + message: err.to_string(), + retryable: true, + }, + ); + } + json_response( + 200, + &json!({ + "kind": "snapshot", + "snapshot": record.snapshot + }), + ) +} + +#[cfg(feature = "serde")] +fn handle_snapshot_delete(path: &str, config: &BridgeConfig) -> JsonHttpResponse { + let Some(snapshot_id) = path.strip_prefix("/v1/snapshots/") else { + return json_response( + 404, + &ApiError { + code: "NOT_FOUND", + message: format!("no route for DELETE {path}"), + retryable: false, + }, + ); + }; + if snapshot_id.contains('/') { + return json_response( + 404, + &ApiError { + code: "NOT_FOUND", + message: format!("no route for DELETE {path}"), + retryable: false, + }, + ); + } + let snapshot_path = snapshot_file_path(config, snapshot_id); + match fs::remove_file(&snapshot_path) { + Ok(()) => json_response( + 200, + &json!({ + "kind": "snapshot_deleted", + "snapshot_id": snapshot_id + }), + ), + Err(err) if err.kind() == std::io::ErrorKind::NotFound => json_response( + 404, + &ApiError { + code: "NOT_FOUND", + message: format!("snapshot '{snapshot_id}' not found"), + retryable: false, + }, + ), + Err(err) => json_response( + 500, + &ApiError { + code: "INTERNAL_ERROR", + message: err.to_string(), + retryable: true, + }, + ), + } +} + +#[cfg(feature = "serde")] +fn handle_pool_create(body: &str, config: &BridgeConfig) -> JsonHttpResponse { + let spec = match parse_submitted_pool_spec(body) { + Ok(spec) => spec, + Err(response) => return response, + }; + let pool_id = format!("pool-{}", now_ms()); + let record = StoredPoolRecord { + pool: PoolRecordView { + pool_id: pool_id.clone(), + sandbox_spec: spec.sandbox_spec.clone(), + capacity: spec.capacity.clone(), + }, + spec, + }; + if let Err(err) = save_pool_record(config, &record) { + return json_response( + 500, + &ApiError { + code: "INTERNAL_ERROR", + message: err.to_string(), + retryable: true, + }, + ); + } + json_response( + 200, + &json!({ + "kind": "pool", + "pool": record.pool + }), + ) +} + +#[cfg(feature = "serde")] +fn handle_pool_get(path: &str, config: &BridgeConfig) -> JsonHttpResponse { + let Some(pool_id) = path.strip_prefix("/v1/pools/") else { + return json_response( + 404, + &ApiError { + code: "NOT_FOUND", + message: format!("no route for GET {path}"), + retryable: false, + }, + ); + }; + if pool_id.contains('/') { + return json_response( + 404, + &ApiError { + code: "NOT_FOUND", + message: format!("no route for GET {path}"), + retryable: false, + }, + ); + } + match load_pool_record(config, pool_id) { + Ok(record) => json_response( + 200, + &json!({ + "kind": "pool", + "pool": record.pool + }), + ), + Err(err) if err.kind() == std::io::ErrorKind::NotFound => json_response( + 404, + &ApiError { + code: "NOT_FOUND", + message: format!("pool '{pool_id}' not found"), + retryable: false, + }, + ), + Err(err) => json_response( + 500, + &ApiError { + code: "INTERNAL_ERROR", + message: err.to_string(), + retryable: true, + }, + ), + } +} + +#[cfg(feature = "serde")] +fn handle_pool_scale(path: &str, body: &str, config: &BridgeConfig) -> JsonHttpResponse { + let Some(pool_id) = path + .strip_prefix("/v1/pools/") + .and_then(|rest| rest.strip_suffix("/scale")) + else { return json_response( 404, &ApiError { code: "NOT_FOUND", - message: format!("no route for GET {path}"), + message: format!("no route for POST {path}"), retryable: false, }, ); - } - - let response = handle_execution_get(&execution_path, config); - let Ok(mut value) = serde_json::from_slice::(&response.body) else { - return response; }; - if response.status == 200 { - let Some(object) = value.as_object_mut() else { - return response; - }; - let execution_id = object - .get("execution") - .and_then(|execution| execution.get("execution_id")) - .cloned() - .unwrap_or(Value::Null); - object.insert("kind".to_string(), Value::String("batch".to_string())); - object.insert("run_id".to_string(), execution_id); + let request = match parse_pool_scale_request(body) { + Ok(request) => request, + Err(response) => return response, + }; + let mut record = match load_pool_record(config, pool_id) { + Ok(record) => record, + Err(err) if err.kind() == std::io::ErrorKind::NotFound => { + return json_response( + 404, + &ApiError { + code: "NOT_FOUND", + message: format!("pool '{pool_id}' not found"), + retryable: false, + }, + ) + } + Err(err) => { + return json_response( + 500, + &ApiError { + code: "INTERNAL_ERROR", + message: err.to_string(), + retryable: true, + }, + ) + } + }; + record.pool.capacity = sandbox::PoolCapacity { + warm: request.warm, + max: request.max, + }; + record.spec.capacity = record.pool.capacity.clone(); + if let Err(err) = save_pool_record(config, &record) { + return json_response( + 500, + &ApiError { + code: "INTERNAL_ERROR", + message: err.to_string(), + retryable: true, + }, + ); } - json_response(response.status, &value) + json_response( + 200, + &json!({ + "kind": "pool", + "pool": record.pool + }), + ) } #[cfg(feature = "serde")] @@ -831,6 +1681,347 @@ fn parse_submitted_team_spec(body: &str) -> Result Result { + let trimmed = body.trim_start(); + let parsed = if trimmed.starts_with('{') || trimmed.starts_with('[') { + sandbox::parse_sandbox_json(body) + } else { + sandbox::parse_sandbox_yaml(body) + }; + parsed.map_err(|err| { + json_response( + 400, + &ApiError { + code: "INVALID_SANDBOX", + message: err.to_string(), + retryable: false, + }, + ) + }) +} + +#[cfg(feature = "serde")] +fn parse_submitted_snapshot_spec(body: &str) -> Result { + let trimmed = body.trim_start(); + let parsed = if trimmed.starts_with('{') || trimmed.starts_with('[') { + sandbox::parse_snapshot_json(body) + } else { + sandbox::parse_snapshot_yaml(body) + }; + parsed.map_err(|err| { + json_response( + 400, + &ApiError { + code: "INVALID_SNAPSHOT", + message: err.to_string(), + retryable: false, + }, + ) + }) +} + +#[cfg(feature = "serde")] +fn parse_submitted_pool_spec(body: &str) -> Result { + let trimmed = body.trim_start(); + let parsed = if trimmed.starts_with('{') || trimmed.starts_with('[') { + sandbox::parse_pool_json(body) + } else { + sandbox::parse_pool_yaml(body) + }; + parsed.map_err(|err| { + json_response( + 400, + &ApiError { + code: "INVALID_POOL", + message: err.to_string(), + retryable: false, + }, + ) + }) +} + +#[cfg(feature = "serde")] +fn parse_sandbox_exec_request(body: &str) -> Result { + let request: SandboxExecRequestBody = serde_json::from_str(body).or_else(|json_err| { + serde_yaml::from_str(body).map_err(|yaml_err| { + format!("invalid sandbox exec body: JSON parse error: {json_err}; YAML parse error: {yaml_err}") + }) + }) + .map_err(|err| { + json_response( + 400, + &ApiError { + code: "INVALID_SANDBOX_EXEC", + message: err, + retryable: false, + }, + ) + })?; + + match request.kind.as_str() { + "command" => {} + "code" => {} + _ => { + return Err(json_response( + 400, + &ApiError { + code: "INVALID_SANDBOX_EXEC", + message: "kind must be 'command' or 'code'".to_string(), + retryable: false, + }, + )) + } + } + if request.kind == "command" && request.command.as_ref().is_none_or(Vec::is_empty) { + return Err(json_response( + 400, + &ApiError { + code: "INVALID_SANDBOX_EXEC", + message: "command exec requires a non-empty command".to_string(), + retryable: false, + }, + )); + } + if request.kind == "code" && request.code.as_deref().unwrap_or("").trim().is_empty() { + return Err(json_response( + 400, + &ApiError { + code: "INVALID_SANDBOX_EXEC", + message: "code exec requires non-empty code".to_string(), + retryable: false, + }, + )); + } + Ok(request) +} + +#[cfg(feature = "serde")] +fn parse_snapshot_replicate_request( + body: &str, +) -> Result { + let request: SnapshotReplicateRequestBody = serde_json::from_str(body).or_else(|json_err| { + serde_yaml::from_str(body).map_err(|yaml_err| { + format!( + "invalid snapshot replicate body: JSON parse error: {json_err}; YAML parse error: {yaml_err}" + ) + }) + }) + .map_err(|err| { + json_response( + 400, + &ApiError { + code: "INVALID_SNAPSHOT_REPLICATION", + message: err, + retryable: false, + }, + ) + })?; + + match request.mode.as_str() { + "cached" | "copy" => {} + _ => { + return Err(json_response( + 400, + &ApiError { + code: "INVALID_SNAPSHOT_REPLICATION", + message: "mode must be one of cached, copy".to_string(), + retryable: false, + }, + )) + } + } + if request.targets.is_empty() { + return Err(json_response( + 400, + &ApiError { + code: "INVALID_SNAPSHOT_REPLICATION", + message: "targets must not be empty".to_string(), + retryable: false, + }, + )); + } + Ok(request) +} + +#[cfg(feature = "serde")] +fn parse_pool_scale_request(body: &str) -> Result { + let request: PoolScaleRequestBody = serde_json::from_str(body).or_else(|json_err| { + serde_yaml::from_str(body).map_err(|yaml_err| { + format!( + "invalid pool scale body: JSON parse error: {json_err}; YAML parse error: {yaml_err}" + ) + }) + }) + .map_err(|err| { + json_response( + 400, + &ApiError { + code: "INVALID_POOL_SCALE", + message: err, + retryable: false, + }, + ) + })?; + + if request.max == 0 { + return Err(json_response( + 400, + &ApiError { + code: "INVALID_POOL_SCALE", + message: "max must be positive".to_string(), + retryable: false, + }, + )); + } + if request.warm > request.max { + return Err(json_response( + 400, + &ApiError { + code: "INVALID_POOL_SCALE", + message: "warm must not exceed max".to_string(), + retryable: false, + }, + )); + } + + Ok(request) +} + +#[cfg(feature = "serde")] +fn sandbox_dir(config: &BridgeConfig) -> PathBuf { + config.execution_dir.join("sandboxes") +} + +#[cfg(feature = "serde")] +fn sandbox_file_path(config: &BridgeConfig, sandbox_id: &str) -> PathBuf { + sandbox_dir(config).join(format!("{sandbox_id}.json")) +} + +#[cfg(feature = "serde")] +fn save_sandbox_record(config: &BridgeConfig, record: &StoredSandboxRecord) -> std::io::Result<()> { + let dir = sandbox_dir(config); + fs::create_dir_all(&dir)?; + let bytes = + serde_json::to_vec_pretty(record).map_err(|err| std::io::Error::other(err.to_string()))?; + fs::write(sandbox_file_path(config, &record.sandbox.sandbox_id), bytes) +} + +#[cfg(feature = "serde")] +fn load_sandbox_record( + config: &BridgeConfig, + sandbox_id: &str, +) -> std::io::Result { + let path = sandbox_file_path(config, sandbox_id); + let bytes = fs::read(path)?; + serde_json::from_slice(&bytes).map_err(|err| std::io::Error::other(err.to_string())) +} + +#[cfg(feature = "serde")] +fn list_sandbox_records(config: &BridgeConfig) -> std::io::Result> { + let dir = sandbox_dir(config); + let mut records = Vec::new(); + if !dir.exists() { + return Ok(records); + } + for entry in fs::read_dir(dir)? { + let entry = entry?; + let path = entry.path(); + if path.extension().and_then(|value| value.to_str()) != Some("json") { + continue; + } + let bytes = fs::read(path)?; + let record: StoredSandboxRecord = + serde_json::from_slice(&bytes).map_err(|err| std::io::Error::other(err.to_string()))?; + records.push(record); + } + records.sort_by(|left, right| left.sandbox.sandbox_id.cmp(&right.sandbox.sandbox_id)); + Ok(records) +} + +#[cfg(feature = "serde")] +fn snapshot_dir(config: &BridgeConfig) -> PathBuf { + config.execution_dir.join("snapshots") +} + +#[cfg(feature = "serde")] +fn snapshot_file_path(config: &BridgeConfig, snapshot_id: &str) -> PathBuf { + snapshot_dir(config).join(format!("{snapshot_id}.json")) +} + +#[cfg(feature = "serde")] +fn save_snapshot_record( + config: &BridgeConfig, + record: &StoredSnapshotRecord, +) -> std::io::Result<()> { + let dir = snapshot_dir(config); + fs::create_dir_all(&dir)?; + let bytes = + serde_json::to_vec_pretty(record).map_err(|err| std::io::Error::other(err.to_string()))?; + fs::write( + snapshot_file_path(config, &record.snapshot.snapshot_id), + bytes, + ) +} + +#[cfg(feature = "serde")] +fn load_snapshot_record( + config: &BridgeConfig, + snapshot_id: &str, +) -> std::io::Result { + let path = snapshot_file_path(config, snapshot_id); + let bytes = fs::read(path)?; + serde_json::from_slice(&bytes).map_err(|err| std::io::Error::other(err.to_string())) +} + +#[cfg(feature = "serde")] +fn list_snapshot_records(config: &BridgeConfig) -> std::io::Result> { + let dir = snapshot_dir(config); + let mut records = Vec::new(); + if !dir.exists() { + return Ok(records); + } + for entry in fs::read_dir(dir)? { + let entry = entry?; + let path = entry.path(); + if path.extension().and_then(|value| value.to_str()) != Some("json") { + continue; + } + let bytes = fs::read(path)?; + let record: StoredSnapshotRecord = + serde_json::from_slice(&bytes).map_err(|err| std::io::Error::other(err.to_string()))?; + records.push(record); + } + records.sort_by(|left, right| left.snapshot.snapshot_id.cmp(&right.snapshot.snapshot_id)); + Ok(records) +} + +#[cfg(feature = "serde")] +fn pool_dir(config: &BridgeConfig) -> PathBuf { + config.execution_dir.join("pools") +} + +#[cfg(feature = "serde")] +fn pool_file_path(config: &BridgeConfig, pool_id: &str) -> PathBuf { + pool_dir(config).join(format!("{pool_id}.json")) +} + +#[cfg(feature = "serde")] +fn save_pool_record(config: &BridgeConfig, record: &StoredPoolRecord) -> std::io::Result<()> { + let dir = pool_dir(config); + fs::create_dir_all(&dir)?; + let bytes = + serde_json::to_vec_pretty(record).map_err(|err| std::io::Error::other(err.to_string()))?; + fs::write(pool_file_path(config, &record.pool.pool_id), bytes) +} + +#[cfg(feature = "serde")] +fn load_pool_record(config: &BridgeConfig, pool_id: &str) -> std::io::Result { + let path = pool_file_path(config, pool_id); + let bytes = fs::read(path)?; + serde_json::from_slice(&bytes).map_err(|err| std::io::Error::other(err.to_string())) +} + #[cfg(feature = "serde")] fn handle_template_list() -> JsonHttpResponse { match templates::list_templates() { @@ -1679,6 +2870,20 @@ fn process_pending_executions_once( Ok(()) } +#[cfg(all(test, feature = "serde"))] +mod tests { + use super::{parse_bridge_method, BridgeMethod}; + + #[test] + fn parse_bridge_method_normalizes_supported_methods() { + assert_eq!(parse_bridge_method("GET"), Some(BridgeMethod::Get)); + assert_eq!(parse_bridge_method("POST"), Some(BridgeMethod::Post)); + assert_eq!(parse_bridge_method("PATCH"), Some(BridgeMethod::Patch)); + assert_eq!(parse_bridge_method("DELETE"), Some(BridgeMethod::Delete)); + assert_eq!(parse_bridge_method("TRACE"), None); + } +} + #[cfg(feature = "serde")] pub fn process_pending_executions_once_for_test( global: GlobalConfig, diff --git a/src/lib.rs b/src/lib.rs index 0f93c21..4e812ce 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -6,6 +6,8 @@ pub mod contract; pub mod orchestration; pub mod runtime; #[cfg(feature = "serde")] +pub mod sandbox; +#[cfg(feature = "serde")] pub mod team; #[cfg(feature = "serde")] pub mod templates; diff --git a/src/runtime/mock.rs b/src/runtime/mock.rs index 5a6d55f..6dce494 100644 --- a/src/runtime/mock.rs +++ b/src/runtime/mock.rs @@ -6,6 +6,10 @@ use crate::contract::{ StartRequest, StartResult, StopRequest, StopResult, SubscribeEventsRequest, }; use crate::orchestration::{CandidateOutput, StructuredOutputResult}; +use crate::runtime::{ + SandboxCreateRequest, SandboxExecKind, SandboxExecRequest, SandboxExecResult, SandboxRecord, + SandboxRuntime, SandboxState, +}; #[derive(Debug, Clone)] struct RunRecord { @@ -68,6 +72,7 @@ impl RunRecord { pub struct MockRuntime { runs: Vec, seeded: BTreeMap, + sandboxes: BTreeMap, } impl MockRuntime { @@ -250,6 +255,105 @@ impl MockRuntime { } } +impl SandboxRuntime for MockRuntime { + fn create_sandbox( + &mut self, + request: SandboxCreateRequest, + ) -> Result { + let record = SandboxRecord { + sandbox_id: request.sandbox_id, + state: SandboxState::Running, + #[cfg(feature = "serde")] + restore_from_snapshot: request + .spec + .snapshot + .and_then(|snapshot| snapshot.restore_from), + #[cfg(not(feature = "serde"))] + restore_from_snapshot: None, + }; + self.sandboxes + .insert(record.sandbox_id.clone(), record.clone()); + Ok(record) + } + + fn inspect_sandbox(&self, sandbox_id: &str) -> Result { + let Some(record) = self.sandboxes.get(sandbox_id) else { + return Err(ContractError::new( + ContractErrorCode::NotFound, + format!("sandbox '{sandbox_id}' not found"), + false, + )); + }; + Ok(record.clone()) + } + + fn list_sandboxes(&self) -> Result, ContractError> { + let mut sandboxes = Vec::new(); + for sandbox in self.sandboxes.values() { + sandboxes.push(sandbox.clone()); + } + Ok(sandboxes) + } + + fn exec_sandbox( + &mut self, + request: SandboxExecRequest, + ) -> Result { + let Some(record) = self.sandboxes.get(&request.sandbox_id) else { + return Err(ContractError::new( + ContractErrorCode::NotFound, + format!("sandbox '{}' not found", request.sandbox_id), + false, + )); + }; + if record.state != SandboxState::Running { + return Err(ContractError::new( + ContractErrorCode::InternalError, + format!("sandbox '{}' is not running", request.sandbox_id), + false, + )); + } + + let stdout = match request.kind { + SandboxExecKind::Command => request.command.unwrap_or_default().join(" "), + SandboxExecKind::Code => { + let runtime = request.runtime.unwrap_or_else(|| "unknown".to_string()); + let code = request.code.unwrap_or_default(); + format!("{runtime}: {code}") + } + }; + + Ok(SandboxExecResult { + exit_code: 0, + stdout, + stderr: String::new(), + }) + } + + fn stop_sandbox(&mut self, sandbox_id: &str) -> Result { + let Some(record) = self.sandboxes.get_mut(sandbox_id) else { + return Err(ContractError::new( + ContractErrorCode::NotFound, + format!("sandbox '{sandbox_id}' not found"), + false, + )); + }; + record.state = SandboxState::Stopped; + Ok(record.clone()) + } + + fn delete_sandbox(&mut self, sandbox_id: &str) -> Result<(), ContractError> { + let Some(_) = self.sandboxes.remove(sandbox_id) else { + return Err(ContractError::new( + ContractErrorCode::NotFound, + format!("sandbox '{sandbox_id}' not found"), + false, + )); + }; + Ok(()) + } +} + fn now_rfc3339_like() -> String { let secs = SystemTime::now() .duration_since(UNIX_EPOCH) diff --git a/src/runtime/mod.rs b/src/runtime/mod.rs index 3bda2e5..6817f4d 100644 --- a/src/runtime/mod.rs +++ b/src/runtime/mod.rs @@ -6,8 +6,12 @@ mod mock; #[cfg(feature = "serde")] mod void_box; -use crate::contract::{ContractError, RuntimeInspection, StartRequest, StartResult}; +use crate::contract::{ + ContractError, ContractErrorCode, RuntimeInspection, StartRequest, StartResult, +}; use crate::orchestration::{ExecutionRuntime, StructuredOutputResult}; +#[cfg(feature = "serde")] +use crate::sandbox::SandboxSpec; #[cfg(feature = "serde")] pub use delivery::{DeliveryCapability, MessageDeliveryAdapter, VoidBoxRunRef}; @@ -30,6 +34,76 @@ use std::path::Path; #[cfg(feature = "serde")] use std::time::{SystemTime, UNIX_EPOCH}; +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "serde", serde(rename_all = "snake_case"))] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum SandboxState { + Running, + Stopped, +} + +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct SandboxCreateRequest { + pub sandbox_id: String, + #[cfg(feature = "serde")] + pub spec: SandboxSpec, +} + +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct SandboxRecord { + pub sandbox_id: String, + pub state: SandboxState, + pub restore_from_snapshot: Option, +} + +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "serde", serde(rename_all = "snake_case"))] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum SandboxExecKind { + Command, + Code, +} + +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct SandboxExecRequest { + pub sandbox_id: String, + pub kind: SandboxExecKind, + pub command: Option>, + pub runtime: Option, + pub code: Option, +} + +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct SandboxExecResult { + pub exit_code: i32, + pub stdout: String, + pub stderr: String, +} + +pub trait SandboxRuntime { + fn create_sandbox( + &mut self, + request: SandboxCreateRequest, + ) -> Result; + + fn inspect_sandbox(&self, sandbox_id: &str) -> Result; + + fn list_sandboxes(&self) -> Result, ContractError>; + + fn exec_sandbox( + &mut self, + request: SandboxExecRequest, + ) -> Result; + + fn stop_sandbox(&mut self, sandbox_id: &str) -> Result; + + fn delete_sandbox(&mut self, sandbox_id: &str) -> Result<(), ContractError>; +} + #[cfg(feature = "serde")] pub trait ProviderLaunchAdapter { fn prepare_launch_request( @@ -249,6 +323,63 @@ impl ExecutionRuntime for VoidBoxRuntimeClient { } } +#[cfg(feature = "serde")] +impl SandboxRuntime for VoidBoxRuntimeClient { + fn create_sandbox( + &mut self, + _request: SandboxCreateRequest, + ) -> Result { + Err(ContractError::new( + ContractErrorCode::InternalError, + "sandbox api is not supported by the current void-box daemon", + false, + )) + } + + fn inspect_sandbox(&self, _sandbox_id: &str) -> Result { + Err(ContractError::new( + ContractErrorCode::InternalError, + "sandbox api is not supported by the current void-box daemon", + false, + )) + } + + fn list_sandboxes(&self) -> Result, ContractError> { + Err(ContractError::new( + ContractErrorCode::InternalError, + "sandbox api is not supported by the current void-box daemon", + false, + )) + } + + fn exec_sandbox( + &mut self, + _request: SandboxExecRequest, + ) -> Result { + Err(ContractError::new( + ContractErrorCode::InternalError, + "sandbox api is not supported by the current void-box daemon", + false, + )) + } + + fn stop_sandbox(&mut self, _sandbox_id: &str) -> Result { + Err(ContractError::new( + ContractErrorCode::InternalError, + "sandbox api is not supported by the current void-box daemon", + false, + )) + } + + fn delete_sandbox(&mut self, _sandbox_id: &str) -> Result<(), ContractError> { + Err(ContractError::new( + ContractErrorCode::InternalError, + "sandbox api is not supported by the current void-box daemon", + false, + )) + } +} + #[cfg(all(test, feature = "serde"))] mod tests { use super::{write_patched_workflow_spec, LaunchInjectionAdapter, ProviderLaunchAdapter}; diff --git a/src/sandbox/mod.rs b/src/sandbox/mod.rs new file mode 100644 index 0000000..0943e45 --- /dev/null +++ b/src/sandbox/mod.rs @@ -0,0 +1,11 @@ +//! Compute sandbox schema helpers. + +mod schema; + +pub use schema::{ + parse_pool_json, parse_pool_yaml, parse_sandbox_json, parse_sandbox_yaml, parse_snapshot_json, + parse_snapshot_yaml, PoolCapacity, SandboxIdentity, SandboxLifecycle, SandboxMetadata, + SandboxModuleMarker, SandboxMount, SandboxPoolSandboxSpec, SandboxPoolSpec, SandboxRuntime, + SandboxSnapshot, SandboxSpec, SandboxValidationError, SnapshotDistribution, SnapshotMetadata, + SnapshotSource, SnapshotSpec, +}; diff --git a/src/sandbox/schema.rs b/src/sandbox/schema.rs new file mode 100644 index 0000000..84d2aef --- /dev/null +++ b/src/sandbox/schema.rs @@ -0,0 +1,449 @@ +use std::collections::BTreeMap; +use std::error::Error; +use std::fmt::{Display, Formatter}; + +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; + +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[derive(Debug, Clone, PartialEq, Eq, Default)] +/// Describes optional sandbox metadata. +pub struct SandboxMetadata { + #[cfg_attr(feature = "serde", serde(default))] + pub name: Option, + #[cfg_attr(feature = "serde", serde(default))] + pub labels: BTreeMap, +} + +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[derive(Debug, Clone, PartialEq, Eq)] +/// Describes one host mount inside a sandbox. +pub struct SandboxMount { + pub host: String, + pub guest: String, + pub mode: String, +} + +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[derive(Debug, Clone, PartialEq, Eq)] +/// Describes the runtime configuration for one sandbox. +pub struct SandboxRuntime { + pub image: String, + pub cpus: u32, + pub memory_mb: u32, + #[cfg_attr(feature = "serde", serde(default))] + pub network: Option, + #[cfg_attr(feature = "serde", serde(default))] + pub env: BTreeMap, + #[cfg_attr(feature = "serde", serde(default))] + pub mounts: Vec, + #[cfg_attr(feature = "serde", serde(default))] + pub ports: Vec, +} + +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[derive(Debug, Clone, PartialEq, Eq, Default)] +/// Describes snapshot restore inputs for a sandbox. +pub struct SandboxSnapshot { + #[cfg_attr(feature = "serde", serde(default))] + pub restore_from: Option, +} + +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[derive(Debug, Clone, PartialEq, Eq, Default)] +/// Describes lifecycle preferences for a sandbox. +pub struct SandboxLifecycle { + #[cfg_attr(feature = "serde", serde(default))] + pub auto_remove: Option, + #[cfg_attr(feature = "serde", serde(default))] + pub detach: Option, + #[cfg_attr(feature = "serde", serde(default))] + pub idle_timeout_secs: Option, + #[cfg_attr(feature = "serde", serde(default))] + pub prewarm: Option, +} + +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[derive(Debug, Clone, PartialEq, Eq, Default)] +/// Describes identity and reuse preferences for a sandbox. +pub struct SandboxIdentity { + #[cfg_attr(feature = "serde", serde(default))] + pub reusable: Option, + #[cfg_attr(feature = "serde", serde(default))] + pub pool: Option, + #[cfg_attr(feature = "serde", serde(default))] + pub labels: BTreeMap, +} + +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[derive(Debug, Clone, PartialEq, Eq)] +/// Describes one high-level sandbox submission. +pub struct SandboxSpec { + pub api_version: String, + pub kind: String, + #[cfg_attr(feature = "serde", serde(default))] + pub metadata: Option, + pub runtime: SandboxRuntime, + #[cfg_attr(feature = "serde", serde(default))] + pub snapshot: Option, + #[cfg_attr(feature = "serde", serde(default))] + pub lifecycle: Option, + #[cfg_attr(feature = "serde", serde(default))] + pub identity: Option, +} + +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[derive(Debug, Clone, PartialEq, Eq, Default)] +/// Describes optional snapshot metadata. +pub struct SnapshotMetadata { + #[cfg_attr(feature = "serde", serde(default))] + pub name: Option, + #[cfg_attr(feature = "serde", serde(default))] + pub labels: BTreeMap, +} + +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[derive(Debug, Clone, PartialEq, Eq)] +/// Describes the source sandbox for a snapshot. +pub struct SnapshotSource { + pub sandbox_id: String, +} + +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[derive(Debug, Clone, PartialEq, Eq)] +/// Describes snapshot distribution preferences. +pub struct SnapshotDistribution { + pub mode: String, + #[cfg_attr(feature = "serde", serde(default))] + pub targets: Vec, +} + +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[derive(Debug, Clone, PartialEq, Eq)] +/// Describes one snapshot resource. +pub struct SnapshotSpec { + pub api_version: String, + pub kind: String, + #[cfg_attr(feature = "serde", serde(default))] + pub metadata: Option, + pub source: SnapshotSource, + #[cfg_attr(feature = "serde", serde(default))] + pub distribution: Option, +} + +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[derive(Debug, Clone, PartialEq, Eq)] +/// Describes the reusable sandbox shape for a pool. +pub struct SandboxPoolSandboxSpec { + pub runtime: SandboxRuntime, + #[cfg_attr(feature = "serde", serde(default))] + pub snapshot: Option, + #[cfg_attr(feature = "serde", serde(default))] + pub lifecycle: Option, + #[cfg_attr(feature = "serde", serde(default))] + pub identity: Option, +} + +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[derive(Debug, Clone, PartialEq, Eq)] +/// Describes desired warm and maximum pool capacity. +pub struct PoolCapacity { + pub warm: u32, + pub max: u32, +} + +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[derive(Debug, Clone, PartialEq, Eq)] +/// Describes one sandbox pool resource. +pub struct SandboxPoolSpec { + pub api_version: String, + pub kind: String, + #[cfg_attr(feature = "serde", serde(default))] + pub metadata: Option, + pub sandbox_spec: SandboxPoolSandboxSpec, + pub capacity: PoolCapacity, +} + +#[derive(Debug, Clone, PartialEq, Eq)] +/// Reports validation failures for sandbox resources. +pub struct SandboxValidationError(String); + +impl SandboxValidationError { + /// Creates a new validation error. + pub fn new(message: impl Into) -> Self { + Self(message.into()) + } +} + +impl Display for SandboxValidationError { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.write_str(&self.0) + } +} + +impl Error for SandboxValidationError {} + +impl SandboxSpec { + /// Validates the parsed sandbox spec. + /// + /// # Errors + /// + /// Returns [`SandboxValidationError`] if the spec is invalid. + pub fn validate(&self) -> Result<(), SandboxValidationError> { + validate_api_version(&self.api_version)?; + if self.kind != "sandbox" { + return Err(SandboxValidationError::new(format!( + "kind must be 'sandbox', got '{}'", + self.kind + ))); + } + validate_runtime(&self.runtime)?; + validate_snapshot(self.snapshot.as_ref())?; + validate_lifecycle(self.lifecycle.as_ref())?; + validate_identity(self.identity.as_ref())?; + Ok(()) + } +} + +impl SnapshotSpec { + /// Validates the parsed snapshot spec. + /// + /// # Errors + /// + /// Returns [`SandboxValidationError`] if the spec is invalid. + pub fn validate(&self) -> Result<(), SandboxValidationError> { + validate_api_version(&self.api_version)?; + if self.kind != "snapshot" { + return Err(SandboxValidationError::new(format!( + "kind must be 'snapshot', got '{}'", + self.kind + ))); + } + if self.source.sandbox_id.trim().is_empty() { + return Err(SandboxValidationError::new("source.sandbox_id is required")); + } + if let Some(distribution) = &self.distribution { + match distribution.mode.as_str() { + "cached" | "copy" => {} + _ => { + return Err(SandboxValidationError::new( + "distribution.mode must be one of cached, copy", + )) + } + } + if distribution.targets.is_empty() { + return Err(SandboxValidationError::new( + "distribution.targets must not be empty", + )); + } + for target in &distribution.targets { + if target.trim().is_empty() { + return Err(SandboxValidationError::new( + "distribution.targets entries must not be empty", + )); + } + } + } + Ok(()) + } +} + +impl SandboxPoolSpec { + /// Validates the parsed sandbox pool spec. + /// + /// # Errors + /// + /// Returns [`SandboxValidationError`] if the spec is invalid. + pub fn validate(&self) -> Result<(), SandboxValidationError> { + validate_api_version(&self.api_version)?; + if self.kind != "sandbox_pool" { + return Err(SandboxValidationError::new(format!( + "kind must be 'sandbox_pool', got '{}'", + self.kind + ))); + } + validate_runtime(&self.sandbox_spec.runtime)?; + validate_snapshot(self.sandbox_spec.snapshot.as_ref())?; + validate_lifecycle(self.sandbox_spec.lifecycle.as_ref())?; + validate_identity(self.sandbox_spec.identity.as_ref())?; + if self.capacity.max == 0 { + return Err(SandboxValidationError::new("capacity.max must be positive")); + } + if self.capacity.warm > self.capacity.max { + return Err(SandboxValidationError::new( + "capacity.warm must not exceed capacity.max", + )); + } + Ok(()) + } +} + +fn validate_api_version(api_version: &str) -> Result<(), SandboxValidationError> { + if api_version.trim().is_empty() { + return Err(SandboxValidationError::new("api_version is required")); + } + Ok(()) +} + +fn validate_runtime(runtime: &SandboxRuntime) -> Result<(), SandboxValidationError> { + if runtime.image.trim().is_empty() { + return Err(SandboxValidationError::new("runtime.image is required")); + } + if runtime.cpus == 0 { + return Err(SandboxValidationError::new("runtime.cpus must be positive")); + } + if runtime.memory_mb == 0 { + return Err(SandboxValidationError::new( + "runtime.memory_mb must be positive", + )); + } + for mount in &runtime.mounts { + if mount.host.trim().is_empty() { + return Err(SandboxValidationError::new( + "runtime.mounts[].host is required", + )); + } + if mount.guest.trim().is_empty() { + return Err(SandboxValidationError::new( + "runtime.mounts[].guest is required", + )); + } + match mount.mode.as_str() { + "ro" | "rw" => {} + _ => { + return Err(SandboxValidationError::new( + "runtime.mounts[].mode must be one of ro, rw", + )) + } + } + } + for port in &runtime.ports { + if *port == 0 { + return Err(SandboxValidationError::new( + "runtime.ports entries must be positive", + )); + } + } + Ok(()) +} + +fn validate_snapshot(snapshot: Option<&SandboxSnapshot>) -> Result<(), SandboxValidationError> { + let Some(snapshot) = snapshot else { + return Ok(()); + }; + if let Some(restore_from) = &snapshot.restore_from { + if restore_from.trim().is_empty() { + return Err(SandboxValidationError::new( + "snapshot.restore_from must not be empty", + )); + } + } + Ok(()) +} + +fn validate_lifecycle(lifecycle: Option<&SandboxLifecycle>) -> Result<(), SandboxValidationError> { + let Some(lifecycle) = lifecycle else { + return Ok(()); + }; + if lifecycle.idle_timeout_secs == Some(0) { + return Err(SandboxValidationError::new( + "lifecycle.idle_timeout_secs must be positive", + )); + } + Ok(()) +} + +fn validate_identity(identity: Option<&SandboxIdentity>) -> Result<(), SandboxValidationError> { + let Some(identity) = identity else { + return Ok(()); + }; + if let Some(pool) = &identity.pool { + if pool.trim().is_empty() { + return Err(SandboxValidationError::new( + "identity.pool must not be empty", + )); + } + } + Ok(()) +} + +/// Parses a YAML sandbox spec. +/// +/// # Errors +/// +/// Returns [`SandboxValidationError`] if the YAML is invalid or the parsed spec +/// fails validation. +pub fn parse_sandbox_yaml(yaml: &str) -> Result { + let spec: SandboxSpec = serde_yaml::from_str(yaml) + .map_err(|err| SandboxValidationError::new(format!("invalid sandbox yaml: {err}")))?; + spec.validate()?; + Ok(spec) +} + +/// Parses a JSON sandbox spec. +/// +/// # Errors +/// +/// Returns [`SandboxValidationError`] if the JSON is invalid or the parsed spec +/// fails validation. +pub fn parse_sandbox_json(json: &str) -> Result { + let spec: SandboxSpec = serde_json::from_str(json) + .map_err(|err| SandboxValidationError::new(format!("invalid sandbox json: {err}")))?; + spec.validate()?; + Ok(spec) +} + +/// Parses a YAML snapshot spec. +/// +/// # Errors +/// +/// Returns [`SandboxValidationError`] if the YAML is invalid or the parsed spec +/// fails validation. +pub fn parse_snapshot_yaml(yaml: &str) -> Result { + let spec: SnapshotSpec = serde_yaml::from_str(yaml) + .map_err(|err| SandboxValidationError::new(format!("invalid snapshot yaml: {err}")))?; + spec.validate()?; + Ok(spec) +} + +/// Parses a JSON snapshot spec. +/// +/// # Errors +/// +/// Returns [`SandboxValidationError`] if the JSON is invalid or the parsed spec +/// fails validation. +pub fn parse_snapshot_json(json: &str) -> Result { + let spec: SnapshotSpec = serde_json::from_str(json) + .map_err(|err| SandboxValidationError::new(format!("invalid snapshot json: {err}")))?; + spec.validate()?; + Ok(spec) +} + +/// Parses a YAML sandbox pool spec. +/// +/// # Errors +/// +/// Returns [`SandboxValidationError`] if the YAML is invalid or the parsed spec +/// fails validation. +pub fn parse_pool_yaml(yaml: &str) -> Result { + let spec: SandboxPoolSpec = serde_yaml::from_str(yaml) + .map_err(|err| SandboxValidationError::new(format!("invalid sandbox pool yaml: {err}")))?; + spec.validate()?; + Ok(spec) +} + +/// Parses a JSON sandbox pool spec. +/// +/// # Errors +/// +/// Returns [`SandboxValidationError`] if the JSON is invalid or the parsed spec +/// fails validation. +pub fn parse_pool_json(json: &str) -> Result { + let spec: SandboxPoolSpec = serde_json::from_str(json) + .map_err(|err| SandboxValidationError::new(format!("invalid sandbox pool json: {err}")))?; + spec.validate()?; + Ok(spec) +} + +/// Marks the public sandbox module for compile-time tests. +pub struct SandboxModuleMarker; diff --git a/tests/sandbox_api.rs b/tests/sandbox_api.rs new file mode 100644 index 0000000..f8833ba --- /dev/null +++ b/tests/sandbox_api.rs @@ -0,0 +1,620 @@ +#![cfg(feature = "serde")] + +use void_control::runtime::{ + MockRuntime, SandboxCreateRequest, SandboxExecKind, SandboxExecRequest, SandboxRuntime, + SandboxState, +}; +use void_control::sandbox; + +fn temp_root(label: &str) -> std::path::PathBuf { + let nanos = std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .expect("clock") + .as_nanos(); + std::env::temp_dir().join(format!("void-control-sandbox-{label}-{nanos}")) +} + +#[test] +fn sandbox_api_module_is_exposed() { + let _ = std::any::type_name::(); +} + +#[test] +fn sandbox_schema_parses_sandbox_shape() { + let yaml = r#" +api_version: v1 +kind: sandbox + +metadata: + name: python-benchmark-box + labels: + workload: benchmark + language: python + +runtime: + image: python:3.12-slim + cpus: 2 + memory_mb: 2048 + network: true + env: + FOO: bar + mounts: + - host: /data/fixtures + guest: /workspace/fixtures + mode: ro + ports: + - 3000 + +snapshot: + restore_from: snapshot-transform-v1 + +lifecycle: + auto_remove: false + detach: true + idle_timeout_secs: 900 + prewarm: true + +identity: + reusable: true + pool: benchmark-python +"#; + + let sandbox = sandbox::parse_sandbox_yaml(yaml).expect("parse sandbox"); + + assert_eq!(sandbox.api_version, "v1"); + assert_eq!(sandbox.kind, "sandbox"); + assert_eq!( + sandbox.metadata.as_ref().and_then(|m| m.name.as_deref()), + Some("python-benchmark-box") + ); + assert_eq!(sandbox.runtime.image, "python:3.12-slim"); + assert_eq!(sandbox.runtime.cpus, 2); + assert_eq!(sandbox.runtime.memory_mb, 2048); + assert_eq!(sandbox.runtime.ports, vec![3000]); + assert_eq!( + sandbox + .snapshot + .as_ref() + .and_then(|s| s.restore_from.as_deref()), + Some("snapshot-transform-v1") + ); + assert_eq!( + sandbox.identity.as_ref().and_then(|i| i.pool.as_deref()), + Some("benchmark-python") + ); +} + +#[test] +fn sandbox_schema_rejects_missing_runtime() { + let yaml = r#" +api_version: v1 +kind: sandbox +"#; + + let err = sandbox::parse_sandbox_yaml(yaml).expect_err("sandbox should fail"); + assert!( + err.to_string().contains("missing field `runtime`") + || err.to_string().contains("runtime is required"), + "unexpected error: {err}" + ); +} + +#[test] +fn sandbox_schema_rejects_invalid_lifecycle_values() { + let yaml = r#" +api_version: v1 +kind: sandbox + +runtime: + image: python:3.12-slim + cpus: 2 + memory_mb: 2048 + +lifecycle: + idle_timeout_secs: 0 +"#; + + let err = sandbox::parse_sandbox_yaml(yaml).expect_err("sandbox should fail"); + assert!( + err.to_string() + .contains("lifecycle.idle_timeout_secs must be positive"), + "unexpected error: {err}" + ); +} + +#[test] +fn checked_in_compute_examples_parse() { + let sandbox_spec = std::fs::read_to_string("examples/compute/sandbox-python.yaml") + .expect("read sandbox example"); + let snapshot_spec = std::fs::read_to_string("examples/compute/snapshot-from-sandbox.yaml") + .expect("read snapshot example"); + let pool_spec = + std::fs::read_to_string("examples/compute/pool-python.yaml").expect("read pool example"); + + let sandbox = sandbox::parse_sandbox_yaml(&sandbox_spec).expect("parse sandbox example"); + let snapshot = sandbox::parse_snapshot_yaml(&snapshot_spec).expect("parse snapshot example"); + let pool = sandbox::parse_pool_yaml(&pool_spec).expect("parse pool example"); + + assert_eq!(sandbox.kind, "sandbox"); + assert_eq!(sandbox.runtime.image, "python:3.12-slim"); + assert_eq!(snapshot.kind, "snapshot"); + assert_eq!(snapshot.source.sandbox_id, "sbx-example"); + assert_eq!(pool.kind, "sandbox_pool"); + assert_eq!(pool.capacity.warm, 5); +} + +#[test] +fn snapshot_schema_rejects_invalid_distribution_mode() { + let json = r#" +{ + "api_version": "v1", + "kind": "snapshot", + "metadata": { + "name": "snapshot-transform-v1" + }, + "source": { + "sandbox_id": "sbx-123" + }, + "distribution": { + "mode": "broadcast", + "targets": ["node-a", "node-b"] + } +} +"#; + + let err = sandbox::parse_snapshot_json(json).expect_err("snapshot should fail"); + assert!( + err.to_string() + .contains("distribution.mode must be one of cached, copy"), + "unexpected error: {err}" + ); +} + +#[test] +fn pool_schema_parses_pool_shape() { + let json = r#" +{ + "api_version": "v1", + "kind": "sandbox_pool", + "metadata": { + "name": "benchmark-python-pool" + }, + "sandbox_spec": { + "runtime": { + "image": "python:3.12-slim", + "cpus": 2, + "memory_mb": 2048 + }, + "snapshot": { + "restore_from": "snapshot-transform-v1" + }, + "lifecycle": { + "prewarm": true, + "idle_timeout_secs": 900 + }, + "identity": { + "reusable": true, + "pool": "benchmark-python" + } + }, + "capacity": { + "warm": 5, + "max": 20 + } +} +"#; + + let pool = sandbox::parse_pool_json(json).expect("parse pool"); + + assert_eq!(pool.kind, "sandbox_pool"); + assert_eq!(pool.capacity.warm, 5); + assert_eq!(pool.capacity.max, 20); + assert_eq!(pool.sandbox_spec.runtime.image, "python:3.12-slim"); + assert_eq!( + pool.sandbox_spec + .snapshot + .as_ref() + .and_then(|s| s.restore_from.as_deref()), + Some("snapshot-transform-v1") + ); +} + +#[test] +fn mock_runtime_manages_sandbox_lifecycle() { + let yaml = r#" +api_version: v1 +kind: sandbox + +runtime: + image: python:3.12-slim + cpus: 2 + memory_mb: 2048 + +snapshot: + restore_from: snapshot-transform-v1 +"#; + let sandbox_spec = sandbox::parse_sandbox_yaml(yaml).expect("parse sandbox"); + let mut runtime = MockRuntime::new(); + + let created = runtime + .create_sandbox(SandboxCreateRequest { + sandbox_id: "sbx-lifecycle".to_string(), + spec: sandbox_spec, + }) + .expect("create sandbox"); + + assert_eq!(created.sandbox_id, "sbx-lifecycle"); + assert_eq!(created.state, SandboxState::Running); + assert_eq!( + created.restore_from_snapshot.as_deref(), + Some("snapshot-transform-v1") + ); + + let listed = runtime.list_sandboxes().expect("list sandboxes"); + assert_eq!(listed.len(), 1); + assert_eq!(listed[0].sandbox_id, "sbx-lifecycle"); + + let inspected = runtime + .inspect_sandbox("sbx-lifecycle") + .expect("inspect sandbox"); + assert_eq!(inspected.state, SandboxState::Running); + + let stopped = runtime.stop_sandbox("sbx-lifecycle").expect("stop sandbox"); + assert_eq!(stopped.state, SandboxState::Stopped); + + runtime + .delete_sandbox("sbx-lifecycle") + .expect("delete sandbox"); + let err = runtime + .inspect_sandbox("sbx-lifecycle") + .expect_err("sandbox should be deleted"); + assert_eq!( + err.code, + void_control::contract::ContractErrorCode::NotFound + ); +} + +#[test] +fn mock_runtime_executes_sandbox_requests() { + let yaml = r#" +api_version: v1 +kind: sandbox + +runtime: + image: python:3.12-slim + cpus: 2 + memory_mb: 2048 +"#; + let sandbox_spec = sandbox::parse_sandbox_yaml(yaml).expect("parse sandbox"); + let mut runtime = MockRuntime::new(); + runtime + .create_sandbox(SandboxCreateRequest { + sandbox_id: "sbx-exec".to_string(), + spec: sandbox_spec, + }) + .expect("create sandbox"); + + let result = runtime + .exec_sandbox(SandboxExecRequest { + sandbox_id: "sbx-exec".to_string(), + kind: SandboxExecKind::Command, + command: Some(vec!["python3".to_string(), "-V".to_string()]), + runtime: None, + code: None, + }) + .expect("exec sandbox"); + + assert_eq!(result.exit_code, 0); + assert!( + result.stdout.contains("python3 -V"), + "unexpected stdout: {result:?}" + ); +} + +#[test] +fn sandbox_bridge_create_list_get_stop_exec_and_delete_round_trip() { + let root = temp_root("bridge"); + let spec_dir = root.join("specs"); + let execution_dir = root.join("executions"); + let body = serde_json::json!({ + "api_version": "v1", + "kind": "sandbox", + "metadata": { + "name": "python-benchmark-box" + }, + "runtime": { + "image": "python:3.12-slim", + "cpus": 2, + "memory_mb": 2048 + }, + "snapshot": { + "restore_from": "snapshot-transform-v1" + } + }) + .to_string(); + + let created = void_control::bridge::handle_bridge_request_with_dirs_for_test( + "POST", + "/v1/sandboxes", + Some(&body), + &spec_dir, + &execution_dir, + ) + .expect("create response"); + assert_eq!(created.status, 200); + assert_eq!(created.json["kind"], "sandbox"); + let sandbox_id = created.json["sandbox"]["sandbox_id"] + .as_str() + .expect("sandbox id") + .to_string(); + assert_eq!( + created.json["sandbox"]["restore_from_snapshot"], + "snapshot-transform-v1" + ); + + let listed = void_control::bridge::handle_bridge_request_with_dirs_for_test( + "GET", + "/v1/sandboxes", + None, + &spec_dir, + &execution_dir, + ) + .expect("list response"); + assert_eq!(listed.status, 200); + assert_eq!(listed.json["kind"], "sandbox_list"); + assert_eq!( + listed.json["sandboxes"].as_array().map(|items| items.len()), + Some(1) + ); + + let fetched = void_control::bridge::handle_bridge_request_with_dirs_for_test( + "GET", + &format!("/v1/sandboxes/{sandbox_id}"), + None, + &spec_dir, + &execution_dir, + ) + .expect("get response"); + assert_eq!(fetched.status, 200); + assert_eq!(fetched.json["kind"], "sandbox"); + assert_eq!(fetched.json["sandbox"]["sandbox_id"], sandbox_id); + assert_eq!(fetched.json["sandbox"]["state"], "running"); + + let exec_body = serde_json::json!({ + "kind": "command", + "command": ["python3", "-V"] + }) + .to_string(); + let exec = void_control::bridge::handle_bridge_request_with_dirs_for_test( + "POST", + &format!("/v1/sandboxes/{sandbox_id}/exec"), + Some(&exec_body), + &spec_dir, + &execution_dir, + ) + .expect("exec response"); + assert_eq!(exec.status, 200); + assert_eq!(exec.json["kind"], "sandbox_exec"); + assert_eq!(exec.json["result"]["exit_code"], 0); + assert_eq!(exec.json["result"]["stdout"], "python3 -V"); + + let stopped = void_control::bridge::handle_bridge_request_with_dirs_for_test( + "POST", + &format!("/v1/sandboxes/{sandbox_id}/stop"), + Some("{}"), + &spec_dir, + &execution_dir, + ) + .expect("stop response"); + assert_eq!(stopped.status, 200); + assert_eq!(stopped.json["sandbox"]["state"], "stopped"); + + let deleted = void_control::bridge::handle_bridge_request_with_dirs_for_test( + "DELETE", + &format!("/v1/sandboxes/{sandbox_id}"), + None, + &spec_dir, + &execution_dir, + ) + .expect("delete response"); + assert_eq!(deleted.status, 200); + assert_eq!(deleted.json["kind"], "sandbox_deleted"); + assert_eq!(deleted.json["sandbox_id"], sandbox_id); + + let missing = void_control::bridge::handle_bridge_request_with_dirs_for_test( + "GET", + &format!("/v1/sandboxes/{sandbox_id}"), + None, + &spec_dir, + &execution_dir, + ) + .expect("missing response"); + assert_eq!(missing.status, 404); +} + +#[test] +fn snapshot_bridge_create_list_get_replicate_and_delete_round_trip() { + let root = temp_root("snapshot-bridge"); + let spec_dir = root.join("specs"); + let execution_dir = root.join("executions"); + let body = serde_json::json!({ + "api_version": "v1", + "kind": "snapshot", + "metadata": { + "name": "snapshot-transform-v1" + }, + "source": { + "sandbox_id": "sbx-123" + }, + "distribution": { + "mode": "cached", + "targets": ["node-a"] + } + }) + .to_string(); + + let created = void_control::bridge::handle_bridge_request_with_dirs_for_test( + "POST", + "/v1/snapshots", + Some(&body), + &spec_dir, + &execution_dir, + ) + .expect("create response"); + assert_eq!(created.status, 200); + assert_eq!(created.json["kind"], "snapshot"); + let snapshot_id = created.json["snapshot"]["snapshot_id"] + .as_str() + .expect("snapshot id") + .to_string(); + assert_eq!(created.json["snapshot"]["source_sandbox_id"], "sbx-123"); + + let listed = void_control::bridge::handle_bridge_request_with_dirs_for_test( + "GET", + "/v1/snapshots", + None, + &spec_dir, + &execution_dir, + ) + .expect("list response"); + assert_eq!(listed.status, 200); + assert_eq!(listed.json["kind"], "snapshot_list"); + assert_eq!( + listed.json["snapshots"].as_array().map(|items| items.len()), + Some(1) + ); + + let fetched = void_control::bridge::handle_bridge_request_with_dirs_for_test( + "GET", + &format!("/v1/snapshots/{snapshot_id}"), + None, + &spec_dir, + &execution_dir, + ) + .expect("get response"); + assert_eq!(fetched.status, 200); + assert_eq!(fetched.json["snapshot"]["snapshot_id"], snapshot_id); + + let replicate_body = serde_json::json!({ + "mode": "copy", + "targets": ["node-a", "node-b", "node-c"] + }) + .to_string(); + let replicated = void_control::bridge::handle_bridge_request_with_dirs_for_test( + "POST", + &format!("/v1/snapshots/{snapshot_id}/replicate"), + Some(&replicate_body), + &spec_dir, + &execution_dir, + ) + .expect("replicate response"); + assert_eq!(replicated.status, 200); + assert_eq!(replicated.json["kind"], "snapshot"); + assert_eq!(replicated.json["snapshot"]["distribution"]["mode"], "copy"); + assert_eq!( + replicated.json["snapshot"]["distribution"]["targets"] + .as_array() + .map(|items| items.len()), + Some(3) + ); + + let deleted = void_control::bridge::handle_bridge_request_with_dirs_for_test( + "DELETE", + &format!("/v1/snapshots/{snapshot_id}"), + None, + &spec_dir, + &execution_dir, + ) + .expect("delete response"); + assert_eq!(deleted.status, 200); + assert_eq!(deleted.json["kind"], "snapshot_deleted"); + assert_eq!(deleted.json["snapshot_id"], snapshot_id); +} + +#[test] +fn pool_bridge_create_get_and_scale_round_trip() { + let root = temp_root("pool-bridge"); + let spec_dir = root.join("specs"); + let execution_dir = root.join("executions"); + let body = serde_json::json!({ + "api_version": "v1", + "kind": "sandbox_pool", + "metadata": { + "name": "benchmark-python-pool" + }, + "sandbox_spec": { + "runtime": { + "image": "python:3.12-slim", + "cpus": 2, + "memory_mb": 2048 + }, + "snapshot": { + "restore_from": "snapshot-transform-v1" + }, + "lifecycle": { + "prewarm": true, + "idle_timeout_secs": 900 + }, + "identity": { + "reusable": true, + "pool": "benchmark-python" + } + }, + "capacity": { + "warm": 5, + "max": 20 + } + }) + .to_string(); + + let created = void_control::bridge::handle_bridge_request_with_dirs_for_test( + "POST", + "/v1/pools", + Some(&body), + &spec_dir, + &execution_dir, + ) + .expect("create response"); + assert_eq!(created.status, 200); + assert_eq!(created.json["kind"], "pool"); + let pool_id = created.json["pool"]["pool_id"] + .as_str() + .expect("pool id") + .to_string(); + assert_eq!(created.json["pool"]["capacity"]["warm"], 5); + assert_eq!(created.json["pool"]["capacity"]["max"], 20); + + let fetched = void_control::bridge::handle_bridge_request_with_dirs_for_test( + "GET", + &format!("/v1/pools/{pool_id}"), + None, + &spec_dir, + &execution_dir, + ) + .expect("get response"); + assert_eq!(fetched.status, 200); + assert_eq!(fetched.json["kind"], "pool"); + assert_eq!(fetched.json["pool"]["pool_id"], pool_id); + assert_eq!( + fetched.json["pool"]["sandbox_spec"]["snapshot"]["restore_from"], + "snapshot-transform-v1" + ); + + let scale_body = serde_json::json!({ + "warm": 8, + "max": 24 + }) + .to_string(); + let scaled = void_control::bridge::handle_bridge_request_with_dirs_for_test( + "POST", + &format!("/v1/pools/{pool_id}/scale"), + Some(&scale_body), + &spec_dir, + &execution_dir, + ) + .expect("scale response"); + assert_eq!(scaled.status, 200); + assert_eq!(scaled.json["kind"], "pool"); + assert_eq!(scaled.json["pool"]["capacity"]["warm"], 8); + assert_eq!(scaled.json["pool"]["capacity"]["max"], 24); +} diff --git a/tests/voidctl_execution_cli.rs b/tests/voidctl_execution_cli.rs index 61ea390..30678a1 100644 --- a/tests/voidctl_execution_cli.rs +++ b/tests/voidctl_execution_cli.rs @@ -1323,3 +1323,634 @@ fn yolo_run_alias_posts_to_yolo_route() { assert_eq!(requests.len(), 1); assert_eq!(requests[0].path, "/v1/yolo/run"); } + +#[test] +fn sandbox_create_from_stdin_posts_spec_and_prints_summary() { + let (base_url, requests, server) = spawn_fake_bridge(vec![FakeResponse { + status: 200, + body: json!({ + "kind": "sandbox", + "sandbox": { + "sandbox_id": "sbx-1", + "state": "running", + "image": "python:3.12-slim", + "cpus": 2, + "memory_mb": 2048 + } + }), + }]); + + let mut child = voidctl_command(&base_url) + .args(["sandbox", "create", "--stdin"]) + .stdin(Stdio::piped()) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .spawn() + .expect("spawn voidctl"); + let spec = r#"{"api_version":"v1","kind":"sandbox","runtime":{"image":"python:3.12-slim","cpus":2,"memory_mb":2048}}"#; + child + .stdin + .take() + .expect("stdin") + .write_all(spec.as_bytes()) + .expect("write stdin"); + + let output = child.wait_with_output().expect("wait output"); + server.join().expect("join fake bridge"); + + assert!(output.status.success()); + let stdout = String::from_utf8(output.stdout).expect("stdout"); + assert!(stdout.contains("sandbox_id=sbx-1")); + assert!(stdout.contains("state=running")); + assert!(stdout.contains("image=python:3.12-slim")); + + let requests = requests.lock().expect("lock requests"); + assert_eq!(requests.len(), 1); + assert_eq!(requests[0].method, "POST"); + assert_eq!(requests[0].path, "/v1/sandboxes"); + assert_eq!(requests[0].body, spec); +} + +#[test] +fn sandbox_list_prints_available_sandboxes() { + let (base_url, requests, server) = spawn_fake_bridge(vec![FakeResponse { + status: 200, + body: json!({ + "kind": "sandbox_list", + "sandboxes": [ + { + "sandbox_id": "sbx-1", + "state": "running", + "image": "python:3.12-slim", + "cpus": 2, + "memory_mb": 2048 + }, + { + "sandbox_id": "sbx-2", + "state": "stopped", + "image": "node:22-slim", + "cpus": 1, + "memory_mb": 1024 + } + ] + }), + }]); + + let output = voidctl_command(&base_url) + .args(["sandbox", "list"]) + .output() + .expect("sandbox list output"); + server.join().expect("join fake bridge"); + + assert!(output.status.success()); + let stdout = String::from_utf8(output.stdout).expect("stdout"); + assert!(stdout.contains("sandbox_id=sbx-1")); + assert!(stdout.contains("sandbox_id=sbx-2")); + assert!(stdout.contains("state=running")); + assert!(stdout.contains("state=stopped")); + + let requests = requests.lock().expect("lock requests"); + assert_eq!(requests.len(), 1); + assert_eq!(requests[0].method, "GET"); + assert_eq!(requests[0].path, "/v1/sandboxes"); +} + +#[test] +fn sandbox_get_bridge_failure_returns_non_zero_and_prints_error() { + let (base_url, requests, server) = spawn_fake_bridge(vec![FakeResponse { + status: 404, + body: json!({ + "message": "sandbox 'sbx-missing' not found" + }), + }]); + + let output = voidctl_command(&base_url) + .args(["sandbox", "get", "sbx-missing"]) + .output() + .expect("sandbox get output"); + server.join().expect("join fake bridge"); + + assert!(!output.status.success()); + let stderr = String::from_utf8(output.stderr).expect("stderr"); + assert!(stderr.contains("fatal: sandbox 'sbx-missing' not found")); + + let requests = requests.lock().expect("lock requests"); + assert_eq!(requests.len(), 1); + assert_eq!(requests[0].method, "GET"); + assert_eq!(requests[0].path, "/v1/sandboxes/sbx-missing"); +} + +#[test] +fn snapshot_create_from_stdin_posts_spec_and_prints_summary() { + let (base_url, requests, server) = spawn_fake_bridge(vec![FakeResponse { + status: 200, + body: json!({ + "kind": "snapshot", + "snapshot": { + "snapshot_id": "snap-1", + "source_sandbox_id": "sbx-1", + "distribution": { + "mode": "cached", + "targets": ["node-a", "node-b"] + } + } + }), + }]); + + let mut child = voidctl_command(&base_url) + .args(["snapshot", "create", "--stdin"]) + .stdin(Stdio::piped()) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .spawn() + .expect("spawn voidctl"); + let spec = r#"{"api_version":"v1","kind":"snapshot","source":{"sandbox_id":"sbx-1"},"distribution":{"mode":"cached","targets":["node-a","node-b"]}}"#; + child + .stdin + .take() + .expect("stdin") + .write_all(spec.as_bytes()) + .expect("write stdin"); + + let output = child.wait_with_output().expect("wait output"); + server.join().expect("join fake bridge"); + + assert!(output.status.success()); + let stdout = String::from_utf8(output.stdout).expect("stdout"); + assert!(stdout.contains("snapshot_id=snap-1")); + assert!(stdout.contains("source_sandbox_id=sbx-1")); + assert!(stdout.contains("mode=cached")); + assert!(stdout.contains("targets=node-a,node-b")); + + let requests = requests.lock().expect("lock requests"); + assert_eq!(requests.len(), 1); + assert_eq!(requests[0].method, "POST"); + assert_eq!(requests[0].path, "/v1/snapshots"); + assert_eq!(requests[0].body, spec); +} + +#[test] +fn snapshot_replicate_from_stdin_posts_request_and_prints_summary() { + let (base_url, requests, server) = spawn_fake_bridge(vec![FakeResponse { + status: 200, + body: json!({ + "kind": "snapshot", + "snapshot": { + "snapshot_id": "snap-1", + "source_sandbox_id": "sbx-1", + "distribution": { + "mode": "copy", + "targets": ["node-a", "node-c"] + } + } + }), + }]); + + let mut child = voidctl_command(&base_url) + .args(["snapshot", "replicate", "snap-1", "--stdin"]) + .stdin(Stdio::piped()) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .spawn() + .expect("spawn voidctl"); + let body = r#"{"mode":"copy","targets":["node-a","node-c"]}"#; + child + .stdin + .take() + .expect("stdin") + .write_all(body.as_bytes()) + .expect("write stdin"); + + let output = child.wait_with_output().expect("wait output"); + server.join().expect("join fake bridge"); + + assert!(output.status.success()); + let stdout = String::from_utf8(output.stdout).expect("stdout"); + assert!(stdout.contains("snapshot_id=snap-1")); + assert!(stdout.contains("mode=copy")); + assert!(stdout.contains("targets=node-a,node-c")); + + let requests = requests.lock().expect("lock requests"); + assert_eq!(requests.len(), 1); + assert_eq!(requests[0].method, "POST"); + assert_eq!(requests[0].path, "/v1/snapshots/snap-1/replicate"); + assert_eq!(requests[0].body, body); +} + +#[test] +fn pool_create_from_stdin_posts_spec_and_prints_summary() { + let (base_url, requests, server) = spawn_fake_bridge(vec![FakeResponse { + status: 200, + body: json!({ + "kind": "pool", + "pool": { + "pool_id": "pool-1", + "sandbox_spec": { + "runtime": { + "image": "python:3.12-slim", + "cpus": 2, + "memory_mb": 2048 + } + }, + "capacity": { + "warm": 5, + "max": 20 + } + } + }), + }]); + + let mut child = voidctl_command(&base_url) + .args(["pool", "create", "--stdin"]) + .stdin(Stdio::piped()) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .spawn() + .expect("spawn voidctl"); + let spec = r#"{"api_version":"v1","kind":"sandbox_pool","sandbox_spec":{"runtime":{"image":"python:3.12-slim","cpus":2,"memory_mb":2048}},"capacity":{"warm":5,"max":20}}"#; + child + .stdin + .take() + .expect("stdin") + .write_all(spec.as_bytes()) + .expect("write stdin"); + + let output = child.wait_with_output().expect("wait output"); + server.join().expect("join fake bridge"); + + assert!(output.status.success()); + let stdout = String::from_utf8(output.stdout).expect("stdout"); + assert!(stdout.contains("pool_id=pool-1")); + assert!(stdout.contains("warm=5")); + assert!(stdout.contains("max=20")); + assert!(stdout.contains("image=python:3.12-slim")); + + let requests = requests.lock().expect("lock requests"); + assert_eq!(requests.len(), 1); + assert_eq!(requests[0].method, "POST"); + assert_eq!(requests[0].path, "/v1/pools"); + assert_eq!(requests[0].body, spec); +} + +#[test] +fn pool_scale_from_stdin_posts_request_and_prints_summary() { + let (base_url, requests, server) = spawn_fake_bridge(vec![FakeResponse { + status: 200, + body: json!({ + "kind": "pool", + "pool": { + "pool_id": "pool-1", + "sandbox_spec": { + "runtime": { + "image": "python:3.12-slim" + } + }, + "capacity": { + "warm": 8, + "max": 24 + } + } + }), + }]); + + let mut child = voidctl_command(&base_url) + .args(["pool", "scale", "pool-1", "--stdin"]) + .stdin(Stdio::piped()) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .spawn() + .expect("spawn voidctl"); + let body = r#"{"warm":8,"max":24}"#; + child + .stdin + .take() + .expect("stdin") + .write_all(body.as_bytes()) + .expect("write stdin"); + + let output = child.wait_with_output().expect("wait output"); + server.join().expect("join fake bridge"); + + assert!(output.status.success()); + let stdout = String::from_utf8(output.stdout).expect("stdout"); + assert!(stdout.contains("pool_id=pool-1")); + assert!(stdout.contains("warm=8")); + assert!(stdout.contains("max=24")); + + let requests = requests.lock().expect("lock requests"); + assert_eq!(requests.len(), 1); + assert_eq!(requests[0].method, "POST"); + assert_eq!(requests[0].path, "/v1/pools/pool-1/scale"); + assert_eq!(requests[0].body, body); +} + +#[test] +fn snapshot_delete_bridge_failure_returns_non_zero_and_prints_error() { + let (base_url, requests, server) = spawn_fake_bridge(vec![FakeResponse { + status: 404, + body: json!({ + "message": "snapshot 'snap-missing' not found" + }), + }]); + + let output = voidctl_command(&base_url) + .args(["snapshot", "delete", "snap-missing"]) + .output() + .expect("snapshot delete output"); + server.join().expect("join fake bridge"); + + assert!(!output.status.success()); + let stderr = String::from_utf8(output.stderr).expect("stderr"); + assert!(stderr.contains("fatal: snapshot 'snap-missing' not found")); + + let requests = requests.lock().expect("lock requests"); + assert_eq!(requests.len(), 1); + assert_eq!(requests[0].method, "DELETE"); + assert_eq!(requests[0].path, "/v1/snapshots/snap-missing"); +} + +#[test] +fn pool_get_bridge_failure_returns_non_zero_and_prints_error() { + let (base_url, requests, server) = spawn_fake_bridge(vec![FakeResponse { + status: 404, + body: json!({ + "message": "pool 'pool-missing' not found" + }), + }]); + + let output = voidctl_command(&base_url) + .args(["pool", "get", "pool-missing"]) + .output() + .expect("pool get output"); + server.join().expect("join fake bridge"); + + assert!(!output.status.success()); + let stderr = String::from_utf8(output.stderr).expect("stderr"); + assert!(stderr.contains("fatal: pool 'pool-missing' not found")); + + let requests = requests.lock().expect("lock requests"); + assert_eq!(requests.len(), 1); + assert_eq!(requests[0].method, "GET"); + assert_eq!(requests[0].path, "/v1/pools/pool-missing"); +} + +#[test] +fn interactive_sandbox_create_posts_spec_and_prints_summary() { + let (base_url, requests, server) = spawn_fake_bridge(vec![FakeResponse { + status: 200, + body: json!({ + "kind": "sandbox", + "sandbox": { + "sandbox_id": "sbx-1", + "state": "running", + "image": "python:3.12-slim", + "cpus": 2, + "memory_mb": 2048 + } + }), + }]); + + let inputs_path = temp_inputs_path("sandbox.json"); + fs::write( + &inputs_path, + r#"{"api_version":"v1","kind":"sandbox","runtime":{"image":"python:3.12-slim","cpus":2,"memory_mb":2048}}"#, + ) + .expect("write inputs"); + + let mut child = voidctl_command(&base_url) + .stdin(Stdio::piped()) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .spawn() + .expect("spawn voidctl"); + let command = format!("/sandbox create {}\n/exit\n", inputs_path.display()); + child + .stdin + .take() + .expect("stdin") + .write_all(command.as_bytes()) + .expect("write stdin"); + + let output = child.wait_with_output().expect("wait output"); + server.join().expect("join fake bridge"); + + assert!(output.status.success()); + let stdout = String::from_utf8(output.stdout).expect("stdout"); + assert!(stdout.contains("sandbox_id=sbx-1")); + assert!(stdout.contains("state=running")); + + let requests = requests.lock().expect("lock requests"); + assert_eq!(requests.len(), 1); + assert_eq!(requests[0].path, "/v1/sandboxes"); +} + +#[test] +fn interactive_sandbox_get_prints_error_for_bridge_failure() { + let (base_url, requests, server) = spawn_fake_bridge(vec![FakeResponse { + status: 404, + body: json!({ + "message": "sandbox 'sbx-missing' not found" + }), + }]); + + let mut child = voidctl_command(&base_url) + .stdin(Stdio::piped()) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .spawn() + .expect("spawn voidctl"); + child + .stdin + .take() + .expect("stdin") + .write_all(b"/sandbox get sbx-missing\n/exit\n") + .expect("write stdin"); + + let output = child.wait_with_output().expect("wait output"); + server.join().expect("join fake bridge"); + + assert!(output.status.success()); + let stdout = String::from_utf8(output.stdout).expect("stdout"); + assert!(stdout.contains("error: sandbox 'sbx-missing' not found")); + + let requests = requests.lock().expect("lock requests"); + assert_eq!(requests.len(), 1); + assert_eq!(requests[0].method, "GET"); + assert_eq!(requests[0].path, "/v1/sandboxes/sbx-missing"); +} + +#[test] +fn interactive_snapshot_replicate_posts_request_and_prints_summary() { + let (base_url, requests, server) = spawn_fake_bridge(vec![FakeResponse { + status: 200, + body: json!({ + "kind": "snapshot", + "snapshot": { + "snapshot_id": "snap-1", + "source_sandbox_id": "sbx-1", + "distribution": { + "mode": "copy", + "targets": ["node-a", "node-c"] + } + } + }), + }]); + + let inputs_path = temp_inputs_path("replicate.json"); + fs::write( + &inputs_path, + r#"{"mode":"copy","targets":["node-a","node-c"]}"#, + ) + .expect("write inputs"); + + let mut child = voidctl_command(&base_url) + .stdin(Stdio::piped()) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .spawn() + .expect("spawn voidctl"); + let command = format!( + "/snapshot replicate snap-1 {}\n/exit\n", + inputs_path.display() + ); + child + .stdin + .take() + .expect("stdin") + .write_all(command.as_bytes()) + .expect("write stdin"); + + let output = child.wait_with_output().expect("wait output"); + server.join().expect("join fake bridge"); + + assert!(output.status.success()); + let stdout = String::from_utf8(output.stdout).expect("stdout"); + assert!(stdout.contains("snapshot_id=snap-1")); + assert!(stdout.contains("mode=copy")); + + let requests = requests.lock().expect("lock requests"); + assert_eq!(requests.len(), 1); + assert_eq!(requests[0].path, "/v1/snapshots/snap-1/replicate"); +} + +#[test] +fn interactive_snapshot_delete_prints_error_for_bridge_failure() { + let (base_url, requests, server) = spawn_fake_bridge(vec![FakeResponse { + status: 404, + body: json!({ + "message": "snapshot 'snap-missing' not found" + }), + }]); + + let mut child = voidctl_command(&base_url) + .stdin(Stdio::piped()) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .spawn() + .expect("spawn voidctl"); + child + .stdin + .take() + .expect("stdin") + .write_all(b"/snapshot delete snap-missing\n/exit\n") + .expect("write stdin"); + + let output = child.wait_with_output().expect("wait output"); + server.join().expect("join fake bridge"); + + assert!(output.status.success()); + let stdout = String::from_utf8(output.stdout).expect("stdout"); + assert!(stdout.contains("error: snapshot 'snap-missing' not found")); + + let requests = requests.lock().expect("lock requests"); + assert_eq!(requests.len(), 1); + assert_eq!(requests[0].method, "DELETE"); + assert_eq!(requests[0].path, "/v1/snapshots/snap-missing"); +} + +#[test] +fn interactive_pool_scale_posts_request_and_prints_summary() { + let (base_url, requests, server) = spawn_fake_bridge(vec![FakeResponse { + status: 200, + body: json!({ + "kind": "pool", + "pool": { + "pool_id": "pool-1", + "sandbox_spec": { + "runtime": { + "image": "python:3.12-slim" + } + }, + "capacity": { + "warm": 8, + "max": 24 + } + } + }), + }]); + + let inputs_path = temp_inputs_path("scale.json"); + fs::write(&inputs_path, r#"{"warm":8,"max":24}"#).expect("write inputs"); + + let mut child = voidctl_command(&base_url) + .stdin(Stdio::piped()) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .spawn() + .expect("spawn voidctl"); + let command = format!("/pool scale pool-1 {}\n/exit\n", inputs_path.display()); + child + .stdin + .take() + .expect("stdin") + .write_all(command.as_bytes()) + .expect("write stdin"); + + let output = child.wait_with_output().expect("wait output"); + server.join().expect("join fake bridge"); + + assert!(output.status.success()); + let stdout = String::from_utf8(output.stdout).expect("stdout"); + assert!(stdout.contains("pool_id=pool-1")); + assert!(stdout.contains("warm=8")); + assert!(stdout.contains("max=24")); + + let requests = requests.lock().expect("lock requests"); + assert_eq!(requests.len(), 1); + assert_eq!(requests[0].path, "/v1/pools/pool-1/scale"); +} + +#[test] +fn interactive_pool_get_prints_error_for_bridge_failure() { + let (base_url, requests, server) = spawn_fake_bridge(vec![FakeResponse { + status: 404, + body: json!({ + "message": "pool 'pool-missing' not found" + }), + }]); + + let mut child = voidctl_command(&base_url) + .stdin(Stdio::piped()) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .spawn() + .expect("spawn voidctl"); + child + .stdin + .take() + .expect("stdin") + .write_all(b"/pool get pool-missing\n/exit\n") + .expect("write stdin"); + + let output = child.wait_with_output().expect("wait output"); + server.join().expect("join fake bridge"); + + assert!(output.status.success()); + let stdout = String::from_utf8(output.stdout).expect("stdout"); + assert!(stdout.contains("error: pool 'pool-missing' not found")); + + let requests = requests.lock().expect("lock requests"); + assert_eq!(requests.len(), 1); + assert_eq!(requests[0].method, "GET"); + assert_eq!(requests[0].path, "/v1/pools/pool-missing"); +}