diff --git a/README.md b/README.md index 5fbc9cb..8a9e007 100644 --- a/README.md +++ b/README.md @@ -1,11 +1,14 @@ # Containarium -Run hundreds of isolated Linux development environments on a single VM. -Built with LXC, SSH jump hosts, and cloud-native automation. +Run hundreds of isolated development environments on a single VM. +Built with Incus (LXC/QEMU), SSH jump hosts, and cloud-native automation. ๐Ÿšซ No Kubernetes ๐Ÿšซ No VM-per-user -โœ… Just fast, cheap, isolated Linux environments +โœ… Fast, cheap, isolated Linux environments (Ubuntu, Rocky/RHEL 9) +โœ… Windows Server VMs with RDP access +โœ… GPU passthrough for ML/AI workloads +โœ… Multi-backend: GCP spot VMs + bare-metal GPU nodes ### Container Management ![Container Dashboard](docs/screenshots/dashboard-container.png) @@ -110,43 +113,44 @@ In real deployments, this reduces infrastructure costs by up to 90%. ## What It Does -Containarium is a container-based development environment platform that: +Containarium is a multi-backend development environment platform that: -- Hosts many isolated Linux environments on a single cloud VM +- Hosts many isolated environments on cloud VMs and bare-metal GPU nodes - Gives each user SSH access to their own container -- Uses LXC system containers (not Docker app containers) -- Keeps containers persistent, even across VM restarts -- Is managed via a single Go CLI + gRPC +- Supports multiple OS types: **Ubuntu 24.04**, **Rocky Linux 9** (dev), **RHEL 9** (production) +- Runs **Windows Server VMs** with RDP access via QEMU/KVM +- Provides **GPU passthrough** (NVIDIA) for ML/AI workloads +- Keeps containers persistent, even across VM restarts and spot preemptions +- Managed via CLI, REST API, gRPC, Web UI, and MCP (Claude Desktop) Each container behaves like a lightweight VM: -- Full Linux OS -- User accounts -- SSH access -- Can run Docker, build tools, ML workloads, etc. +- Full Linux OS (or Windows Server VM) +- User accounts with SSH access +- Docker/Podman support with nested containers +- GPU passthrough for CUDA workloads +- Pre-configured software stacks (Node.js, Python, Go, Rust, GPU/CUDA, Android, Docker, etc.) ## Architecture (High Level) ``` Developer Laptop | - | ssh (ProxyJump) + | ssh / https v -+-------------------+ -| SSH Jump Host | (no shell access) -+-------------------+ - | - v -+----------------------------------+ -| Cloud VM (Host) | -| | -| +---------+ +---------+ | -| | LXC #1 | | LXC #2 | ... | -| | user A | | user B | | -| +---------+ +---------+ | -| | -| ZFS-backed persistent storage | -+----------------------------------+ ++---------------------------+ +| Sentinel (e2-micro) | sshpiper + reverse proxy ++---------------------------+ + | | | + v v v + +---------+ +---------+ +---------+ + | GCP Spot| | GPU Node| | GPU Node| + | VM | | (tunnel)| | (tunnel)| + +---------+ +---------+ +---------+ + | LXC x19 | | LXC x5 | | LXC x4 | + | Caddy | | RTX 4090| | RTX 3090| + | ZFS | | ZFS | | ZFS | + +---------+ +---------+ +---------+ ``` ## Key Features @@ -154,34 +158,51 @@ Developer Laptop ๐Ÿš€ Fast Provisioning - Create a full Linux environment in seconds -- No VM boot, no OS installation per user +- Pre-configured stacks: Node.js, Python, Go, Rust, Data Science, DevOps, Docker, GPU/CUDA, Android, Full Stack +- Multi-OS: Ubuntu 24.04, Rocky Linux 9, RHEL 9 ๐Ÿ” Strong Isolation - Unprivileged LXC containers - Separate users, filesystems, and processes -- SSH jump host prevents direct host access +- SSH jump host with sshpiper (username-based routing) +- ClamAV + Trivy security scanning across all backends ๐Ÿ’พ Persistent Storage - Containers survive: - VM restarts - Spot/preemptible instance termination -- Backed by ZFS persistent disks +- Backed by ZFS persistent disks with compression + +๐Ÿ–ฅ๏ธ Multi-Backend Architecture + +- **GCP Spot VMs**: Cost-effective cloud backends +- **Bare-metal GPU nodes**: RTX 3090, RTX 4090, etc. connected via tunnel +- **Windows Server VMs**: QEMU/KVM with RDP access +- Containers from all backends appear in a single unified dashboard +- See [docs/WINDOWS-VM-SETUP.md](docs/WINDOWS-VM-SETUP.md) for Windows VMs ๐Ÿ›ก๏ธ Sentinel HA (Spot Instance Recovery) -- One tiny always-on VM (e2-micro, free tier) monitors multiple spot VMs +- One tiny always-on VM (e2-micro, free tier) monitors all backends - Detects preemption in ~10s, serves maintenance page instantly -- Restarts spot VMs automatically โ€” ~85s total recovery (vs ~9min with MIG) -- Syncs TLS certificates for valid HTTPS during maintenance -- Scales horizontally: add more spot VMs behind the same sentinel +- Restarts spot VMs automatically โ€” ~85s total recovery +- Routes SSH via sshpiper, HTTP via reverse proxy - See [docs/SENTINEL-DESIGN.md](docs/SENTINEL-DESIGN.md) for the full design +๐Ÿ“Š Monitoring & Observability + +- Backend heartbeat dashboard (Grafana status-history panel) +- Per-container metrics: CPU, memory, disk, network +- VictoriaMetrics + Grafana auto-provisioned +- Custom alert rules with webhook notifications + โš™๏ธ Simple Management -- Single Go binary -- gRPC-based control plane +- Single Go binary for all components +- Web UI with real-time updates (SSE) +- REST API + gRPC + MCP (Claude Desktop integration) - Terraform for infrastructure provisioning ๐Ÿ’ฐ Cost Efficient @@ -207,22 +228,25 @@ This is not: - A Kubernetes cluster - An application container platform -- A web IDE It is intentionally simple. ## Use Cases -๐Ÿ‘ฉโ€๐Ÿ’ป Shared developer environments +๐Ÿ‘ฉโ€๐Ÿ’ป Shared developer environments (Linux + Windows) ๐Ÿง‘โ€๐ŸŽ“ Education, bootcamps, workshops -๐Ÿงช AI / ML experimentation sandboxes +๐Ÿงช AI / ML experimentation with GPU passthrough + +๐Ÿ“ฑ Android app development (headless CI or Android Studio via VNC) ๐Ÿง‘โ€๐Ÿ’ผ Intern or contractor onboarding ๐Ÿข Cost-sensitive enterprises with SSH workflows +๐Ÿ”’ Security-scanned environments (ClamAV + Trivy) + ## How It's Different | Tool | What It Optimizes For | @@ -235,9 +259,9 @@ It is intentionally simple. ## Status -- Actively used internally -- Early-stage open source -- APIs and CLI may evolve +- Actively used in production (GCP + bare-metal GPU nodes) +- v0.15.0 โ€” multi-OS, multi-backend, security scanning +- APIs stable (protobuf-defined with gRPC-gateway) - Contributions and feedback welcome ## Getting Started @@ -246,7 +270,7 @@ It is intentionally simple. ### System Requirements -**Host System:** +**Host System (runs on Ubuntu, containers can be any supported OS):** - Ubuntu 24.04 LTS (Noble) or later - **Incus 6.19 or later** (required for Docker build support) - Ubuntu 24.04 default repos ship Incus 6.0.0 which has AppArmor bug ([CVE-2025-52881](https://ubuntu.com/security/CVE-2025-52881)) @@ -294,9 +318,20 @@ See [`terraform/gce/README.md`](terraform/gce/README.md) for configuration optio **After Installation:** 1. Start the daemon: `sudo systemctl start containarium` -2. Create containers: `sudo containarium create alice --ssh-key ~/.ssh/id_rsa.pub` +2. Create containers: + ```bash + # Ubuntu (default) + sudo containarium create alice --ssh-key ~/.ssh/id_ed25519.pub + + # Rocky Linux 9 (dev/test) + sudo containarium create bob --ssh-key ~/.ssh/id_ed25519.pub --os-type rocky9 + + # With GPU and software stack + sudo containarium create ml-dev --ssh-key ~/.ssh/id_ed25519.pub --gpu 0 --stack gpu + ``` 3. Connect via SSH: `ssh alice@container-ip` -4. Use REST API: `http://localhost:8080/swagger-ui/` +4. Web UI: `http://your-server:8080/webui/` +5. REST API: `http://your-server:8080/swagger-ui/` ๐Ÿ‘‰ See `docs/` for detailed setup instructions. @@ -396,8 +431,10 @@ curl -X POST \ "memory": "8GB", "disk": "100GB" }, - "image": "ubuntu:24.04", - "enable_docker": true + "osType": "OS_TYPE_UBUNTU_2404", + "enablePodman": true, + "stack": "nodejs", + "async": true }' \ http://localhost:8080/v1/containers @@ -463,18 +500,58 @@ Features: ### Available REST Endpoints +**Containers:** | Method | Endpoint | Description | |--------|----------|-------------| | `POST` | `/v1/containers` | Create a new container | -| `GET` | `/v1/containers` | List all containers | +| `GET` | `/v1/containers` | List all containers (all backends) | | `GET` | `/v1/containers/{username}` | Get container details | | `DELETE` | `/v1/containers/{username}` | Delete a container | | `POST` | `/v1/containers/{username}/start` | Start a container | | `POST` | `/v1/containers/{username}/stop` | Stop a container | -| `POST` | `/v1/containers/{username}/ssh-keys` | Add SSH key | -| `DELETE` | `/v1/containers/{username}/ssh-keys/{key}` | Remove SSH key | -| `GET` | `/v1/metrics` | Get container metrics | -| `GET` | `/v1/system/info` | Get system information | +| `PUT` | `/v1/containers/{username}/resize` | Resize CPU/memory/disk | +| `POST` | `/v1/containers/{username}/install-stack` | Install software stack | +| `POST` | `/v1/containers/{username}/cleanup-disk` | Free disk space | + +**Collaborators:** +| Method | Endpoint | Description | +|--------|----------|-------------| +| `POST` | `/v1/containers/{username}/collaborators` | Add collaborator | +| `DELETE` | `/v1/containers/{username}/collaborators/{collaborator}` | Remove collaborator | +| `GET` | `/v1/containers/{username}/collaborators` | List collaborators | + +**System & Monitoring:** +| Method | Endpoint | Description | +|--------|----------|-------------| +| `GET` | `/v1/system/info` | System info (all backends) | +| `GET` | `/v1/system/monitoring` | Grafana/VictoriaMetrics URLs | +| `GET` | `/v1/metrics` | Container metrics | +| `GET` | `/v1/backends` | List backends with health status | + +**Security:** +| Method | Endpoint | Description | +|--------|----------|-------------| +| `GET` | `/v1/security/clamav-summary` | ClamAV scan summary | +| `GET` | `/v1/security/clamav-reports` | Scan reports | +| `POST` | `/v1/security/clamav-scan` | Trigger security scan | + +**Alerts:** +| Method | Endpoint | Description | +|--------|----------|-------------| +| `POST` | `/v1/alerts` | Create alert rule | +| `GET` | `/v1/alerts` | List alert rules | +| `PUT` | `/v1/system/alerting` | Update webhook config | + +## Documentation + +| Guide | Description | +|-------|-------------| +| [SENTINEL-DESIGN.md](docs/SENTINEL-DESIGN.md) | Sentinel HA architecture | +| [WINDOWS-VM-SETUP.md](docs/WINDOWS-VM-SETUP.md) | Windows Server VM with RDP access | +| [ANDROID-DEV-SETUP.md](docs/ANDROID-DEV-SETUP.md) | Android development environment (headless + GUI) | +| [CROSS-PEER-FILE-TRANSFER.md](docs/CROSS-PEER-FILE-TRANSFER.md) | Transfer large files between peer containers | +| [ALERTING-SETUP.md](docs/ALERTING-SETUP.md) | Alert rules, webhooks (Zulip/Slack), troubleshooting | +| [MCP-INTEGRATION.md](docs/MCP-INTEGRATION.md) | Claude Desktop MCP integration | ## Philosophy @@ -721,7 +798,7 @@ Flow: 4. Host sshd: Authenticate sentinel upstream key 5. containarium-shell: sudo incus exec alice-container -- su -l alice 6. User: Interactive shell in container - (If auth fails 3x โ†’ sshpiper bans client IP for 1h) + (If auth fails 20x โ†’ sshpiper bans client IP for 1h) ``` **Method 2: ProxyJump with container IP** diff --git a/api/swagger/containarium.swagger.json b/api/swagger/containarium.swagger.json index c786054..1b71939 100644 --- a/api/swagger/containarium.swagger.json +++ b/api/swagger/containarium.swagger.json @@ -2121,6 +2121,48 @@ ] } }, + "/v1/pentest/findings/{findingId}/remediate": { + "post": { + "summary": "Remediate a pentest finding", + "description": "Upgrades the OS package that contains the vulnerable binary to the latest version. Only works for Trivy container findings where the binary belongs to an installed package.", + "operationId": "PentestService_RemediatePentestFinding", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/RemediatePentestFindingResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/Status" + } + } + }, + "parameters": [ + { + "name": "findingId", + "description": "Finding ID to remediate", + "in": "path", + "required": true, + "type": "string", + "format": "int64" + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/RemediatePentestFindingBody" + } + } + ], + "tags": [ + "Pentest" + ] + } + }, "/v1/pentest/findings/{findingId}/suppress": { "post": { "summary": "Suppress a pentest finding", @@ -3093,6 +3135,16 @@ }, "title": "ACLRule represents a single firewall rule" }, + "AccessType": { + "type": "string", + "enum": [ + "ACCESS_TYPE_SSH", + "ACCESS_TYPE_RDP" + ], + "default": "ACCESS_TYPE_SSH", + "description": "- ACCESS_TYPE_SSH: SSH access (default for Linux containers)\n - ACCESS_TYPE_RDP: RDP access (for Windows VMs, via Guacamole browser-based client)", + "title": "AccessType indicates how to connect to an instance" + }, "AddCollaboratorBody": { "type": "object", "properties": { @@ -3854,6 +3906,14 @@ "osType": { "$ref": "#/definitions/OSType", "title": "Operating system type of the container" + }, + "accessType": { + "$ref": "#/definitions/AccessType", + "title": "How to connect to this instance (SSH for Linux, RDP for Windows)" + }, + "rdpAddress": { + "type": "string", + "title": "RDP connection address (e.g., \"10.100.0.50:3389\") โ€” populated for Windows VMs" } }, "title": "Container represents a complete container instance" @@ -4050,7 +4110,11 @@ }, "sshCommand": { "type": "string", - "title": "SSH connection string for the user" + "title": "SSH connection string for the user (Linux containers)" + }, + "rdpAddress": { + "type": "string", + "title": "RDP connection address (Windows VMs)" } }, "title": "CreateContainerResponse is the response from creating a container" @@ -5236,10 +5300,11 @@ "OS_TYPE_UNSPECIFIED", "OS_TYPE_UBUNTU_2404", "OS_TYPE_ROCKY_9", - "OS_TYPE_RHEL_9" + "OS_TYPE_RHEL_9", + "OS_TYPE_WINDOWS_2022" ], "default": "OS_TYPE_UNSPECIFIED", - "description": "- OS_TYPE_UNSPECIFIED: Unspecified OS type (defaults to Ubuntu 24.04)\n - OS_TYPE_UBUNTU_2404: Ubuntu 24.04 LTS\n - OS_TYPE_ROCKY_9: Rocky Linux 9 (RHEL 9 rebuild, for dev/test)\n - OS_TYPE_RHEL_9: Red Hat Enterprise Linux 9 (for production, requires subscription)", + "description": "- OS_TYPE_UNSPECIFIED: Unspecified OS type (defaults to Ubuntu 24.04)\n - OS_TYPE_UBUNTU_2404: Ubuntu 24.04 LTS\n - OS_TYPE_ROCKY_9: Rocky Linux 9 (RHEL 9 rebuild, for dev/test)\n - OS_TYPE_RHEL_9: Red Hat Enterprise Linux 9 (for production, requires subscription)\n - OS_TYPE_WINDOWS_2022: Windows Server 2022 (runs as QEMU/KVM VM, RDP access)", "title": "OSType represents the operating system type for a container" }, "PassthroughRoute": { @@ -5579,6 +5644,35 @@ } } }, + "RemediatePentestFindingBody": { + "type": "object", + "description": "RemediatePentestFindingRequest requests remediation of a Trivy finding by\nupgrading the OS package that contains the vulnerable binary." + }, + "RemediatePentestFindingResponse": { + "type": "object", + "properties": { + "success": { + "type": "boolean" + }, + "message": { + "type": "string", + "title": "Human-readable result (e.g., \"docker-ce upgraded from 27.0.1 to 27.0.3\")" + }, + "packageName": { + "type": "string", + "title": "Package that was upgraded" + }, + "oldVersion": { + "type": "string", + "title": "Version before upgrade" + }, + "newVersion": { + "type": "string", + "title": "Version after upgrade" + } + }, + "description": "RemediatePentestFindingResponse returns the result of the remediation attempt." + }, "RemoveCollaboratorResponse": { "type": "object", "properties": { diff --git a/docs/ALERTING-SETUP.md b/docs/ALERTING-SETUP.md new file mode 100644 index 0000000..954e194 --- /dev/null +++ b/docs/ALERTING-SETUP.md @@ -0,0 +1,215 @@ +# Alerting Setup Guide + +Containarium uses Prometheus-style alerting with vmalert + Alertmanager, relayed through the daemon to webhook destinations (Slack, Zulip, etc.). + +## Architecture + +``` +VictoriaMetrics โ”€โ”€โ†’ vmalert โ”€โ”€โ†’ Alertmanager โ”€โ”€โ†’ daemon /internal/alert-relay โ”€โ”€โ†’ Webhook (Slack/Zulip/...) + (metrics) (rules) (grouping/ (HMAC-signed, (final destination) + throttling) records deliveries to DB) +``` + +**Why the daemon relay?** It records every webhook attempt to PostgreSQL (`webhook_deliveries` table), masks credentials in API responses, and lets us swap webhook destinations without restarting Alertmanager. + +## Built-in Alert Rules + +Located in vmalert config (auto-generated by daemon at startup). Current groups: + +### `container_alerts` +| Alert | Severity | Trigger | +|-------|----------|---------| +| `ContainerHighMemory` | warning | Container uses >3.5GB memory for 5+ minutes | +| `ContainerStopped` | info | Any user container stopped for 10+ minutes | +| `ContainerHighCPU` | warning | Container CPU >90% for 10+ minutes | + +### `pentest_alerts` +| Alert | Severity | Trigger | +|-------|----------|---------| +| `PentestCriticalFindings` | critical | Any open critical-severity Trivy/pentest findings | +| `PentestHighFindings` | warning | More than 3 open high-severity findings | + +### `system_alerts` +| Alert | Severity | Trigger | +|-------|----------|---------| +| `BackendUnhealthy` | critical | A backend (peer) is unhealthy for 5+ minutes | +| `DiskSpaceLow` | warning | Disk usage >85% on any container | +| `DiskSpaceCritical` | critical | Disk usage >95% on any container | + +## Configuring Webhook Destination + +### Zulip Setup + +1. **Create webhook bot** in Zulip: + - Settings โ†’ Bots โ†’ Add new bot + - Bot type: **Incoming webhook** + - Name: `containarium-alerts` + - Note the API key + +2. **Create or pick a stream** (e.g., `Op-Room`) + +3. **Webhook URL format**: + ``` + https://YOUR-ORG.zulipchat.com/api/v1/external/alertmanager?api_key=BOT_API_KEY&stream=STREAM_NAME&topic=TOPIC_NAME + ``` + +4. **Configure in Containarium**: + ```bash + TOKEN=$(containarium token generate --username admin --roles admin --expiry 1h --secret-file /etc/containarium/jwt.secret --raw) + + curl -X PUT \ + -H "Authorization: Bearer $TOKEN" \ + -H "Content-Type: application/json" \ + -d '{"webhookUrl": "https://YOUR-ORG.zulipchat.com/api/v1/external/alertmanager?api_key=KEY&stream=Op-Room&topic=alerts"}' \ + https://containarium.kafeido.app/v1/system/alerting + ``` + +5. **Send test alert**: + ```bash + curl -X POST -H "Authorization: Bearer $TOKEN" \ + https://containarium.kafeido.app/v1/system/alerting/test + ``` + + You should see a test message in your Zulip stream. + +### Slack Setup + +1. Create incoming webhook at https://api.slack.com/apps โ†’ Incoming Webhooks +2. Pick a channel and copy the URL (`https://hooks.slack.com/services/T.../B.../xxx`) +3. Configure same as Zulip but with the Slack URL + +## External URL (Alert Source Links) + +Each alert message includes a "Source" link. By default vmalert uses its internal Docker hostname (unreachable from outside). Override with `-external.url`: + +The daemon configures vmalert with: +``` +-external.url=https://containarium.kafeido.app +``` + +Alert links become: +``` +https://containarium.kafeido.app/vmalert/alert?group_id=...&alert_id=... +``` + +## Throttling & Repeat Intervals + +Alertmanager config (auto-generated): + +```yaml +route: + receiver: 'webhook' + group_by: ['alertname', 'severity'] + group_wait: 30s # Wait 30s for more alerts in same group + group_interval: 5m # Send updates every 5min while group is active + repeat_interval: 4h # Re-send unchanged alerts every 4h (warning/info) + routes: + - match: + severity: critical + repeat_interval: 1h # Critical alerts re-send every hour +``` + +**Why don't I see alerts immediately?** +- New alerts wait `group_wait: 30s` before first send +- Active alerts only re-send every `repeat_interval` (1h critical, 4h others) +- To force re-send: `incus exec containarium-core-victoriametrics -- systemctl restart alertmanager` + +## Viewing Alert State + +| What | Where | +|------|-------| +| Currently firing alerts | Web UI โ†’ Monitoring tab | +| Webhook delivery history | `GET /v1/system/alerting/deliveries` (last 100 attempts) | +| vmalert UI | `https://containarium.kafeido.app/vmalert/` | +| Alertmanager UI | `https://containarium.kafeido.app/alertmanager/` | + +## Adding Custom Alert Rules + +Custom rules can be added via the API: +```bash +curl -X POST -H "Authorization: Bearer $TOKEN" -H "Content-Type: application/json" \ + -d '{ + "name": "MyCustomAlert", + "expr": "container_memory_usage_bytes{container_name=\"my-app\"} > 8e9", + "duration": "5m", + "severity": "warning", + "summary": "My app using high memory", + "description": "Memory usage > 8GB for 5+ minutes" + }' \ + https://containarium.kafeido.app/v1/alerts +``` + +List rules: +```bash +curl -H "Authorization: Bearer $TOKEN" https://containarium.kafeido.app/v1/alerts +``` + +## Troubleshooting + +### Alerts firing in UI but not arriving in webhook + +**Symptom**: Web UI Monitoring tab shows firing alerts, but webhook destination receives nothing. + +**Likely cause**: vmalert can't reach Alertmanager. The notifier URL must include the `/alertmanager` path prefix. + +Check: +```bash +incus exec containarium-core-victoriametrics -- grep notifier /etc/systemd/system/vmalert.service +``` + +Should show: +``` +-notifier.url=http://localhost:9093/alertmanager +``` + +If it shows `http://localhost:9093` only, alerts go to a 404. Fix: +```bash +incus exec containarium-core-victoriametrics -- sed -i 's|http://localhost:9093 |http://localhost:9093/alertmanager |' /etc/systemd/system/vmalert.service +incus exec containarium-core-victoriametrics -- systemctl daemon-reload +incus exec containarium-core-victoriametrics -- systemctl restart vmalert +``` + +### Webhook configured but test fails + +Check the daemon logs: +```bash +journalctl -u containarium --since "5 min ago" | grep -i "webhook\|relay" +``` + +Common issues: +- Webhook URL syntax errors (missing `?` or `&` separators) +- Zulip stream doesn't exist or bot has no access +- Slack webhook revoked/disabled + +### Alert links show internal hostname + +vmalert needs `-external.url`. Check: +```bash +incus exec containarium-core-victoriametrics -- grep external /etc/systemd/system/vmalert.service +``` + +If missing, the daemon should set it on next restart. Manual fix: +```bash +incus exec containarium-core-victoriametrics -- sed -i '/-notifier.url/a\ -external.url=https://YOUR-DOMAIN \\' /etc/systemd/system/vmalert.service +incus exec containarium-core-victoriametrics -- systemctl daemon-reload +incus exec containarium-core-victoriametrics -- systemctl restart vmalert +``` + +### Old alerts won't go away + +Alertmanager remembers fired alerts. To clear all state: +```bash +incus exec containarium-core-victoriametrics -- systemctl restart alertmanager +``` + +## API Reference + +| Method | Endpoint | Description | +|--------|----------|-------------| +| `GET` | `/v1/system/alerting` | Get current config (webhookUrl masked) | +| `PUT` | `/v1/system/alerting` | Update webhook URL and secret | +| `POST` | `/v1/system/alerting/test` | Send a test alert | +| `GET` | `/v1/system/alerting/deliveries` | Last 100 delivery attempts | +| `GET` | `/v1/alerts` | List all alert rules | +| `POST` | `/v1/alerts` | Create custom alert rule | +| `DELETE` | `/v1/alerts/{name}` | Delete custom alert rule | diff --git a/docs/ANDROID-DEV-SETUP.md b/docs/ANDROID-DEV-SETUP.md new file mode 100644 index 0000000..c4211a0 --- /dev/null +++ b/docs/ANDROID-DEV-SETUP.md @@ -0,0 +1,248 @@ +# Android Development Environment on Containarium + +Run a full Android development environment inside an Incus container with hardware-accelerated emulator via KVM. + +## Two Modes + +| Mode | Stack | Use Case | Access | +|------|-------|----------|--------| +| **Headless** | `android` | CI/CD, automated testing, command-line builds | SSH + ADB | +| **GUI** | `android-studio` | App UI development, manual testing, full IDE | SSH + VNC | + +## Prerequisites + +- Container must have `security.nesting=true` (Containarium sets this by default when Podman is enabled) +- Host must support KVM (`grep -c vmx /proc/cpuinfo` > 0) +- Recommended: 8+ CPU cores, 16GB+ RAM, 50GB+ disk + +## Quick Start + +### Headless (CI/CD & Command-Line) + +```bash +containarium create android-dev \ + --ssh-key ~/.ssh/id_ed25519.pub \ + --cpu 8 --memory 16GB --disk 100GB \ + --stack android \ + --backend-id tunnel-fts-13700k-gpu +``` + +After creation, SSH in and use: +```bash +ssh android-dev + +# Build a project +cd /path/to/project +./gradlew assembleDebug + +# Start emulator (headless) +emulator -avd default -no-window -no-audio -gpu swiftshader_indirect & + +# Wait for boot +adb wait-for-device +adb shell getprop sys.boot_completed # Returns "1" when ready + +# Run tests +./gradlew connectedAndroidTest + +# Stop emulator +adb emu kill +``` + +### GUI (Android Studio) + +```bash +containarium create android-studio-dev \ + --ssh-key ~/.ssh/id_ed25519.pub \ + --cpu 8 --memory 16GB --disk 100GB \ + --stack android-studio \ + --backend-id tunnel-fts-13700k-gpu +``` + +After creation: +```bash +ssh android-studio-dev + +# Start VNC server (first time sets up desktop) +vncserver :1 -geometry 1920x1080 -depth 24 + +# VNC is now listening on port 5901 +``` + +Access the desktop: + +**Option A โ€” VNC client** (recommended for low latency): +```bash +# From your laptop, create SSH tunnel: +ssh -L 5901:localhost:5901 android-studio-dev + +# Connect VNC client to localhost:5901 +# Password: containarium +``` + +**Option B โ€” Browser via noVNC** (no client install): +```bash +# Inside the container: +sudo apt install novnc websockify +websockify --web /usr/share/novnc 6080 localhost:5901 & + +# From your laptop, tunnel port 6080: +ssh -L 6080:localhost:6080 android-studio-dev + +# Open browser: http://localhost:6080/vnc.html +``` + +Then launch Android Studio: +```bash +# In the VNC desktop terminal: +studio.sh & +``` + +## What's Installed + +### `android` stack (headless) +- OpenJDK 17 (headless) +- Android SDK command-line tools +- Android SDK platform tools (adb, fastboot) +- Android build tools 35.0.0 +- Android platform API 35 +- Android Emulator with x86_64 system image +- Pre-created AVD: `default` (Pixel 6, Android 15) +- QEMU/KVM for emulator acceleration + +### `android-studio` stack (GUI) +- Everything in `android` stack, plus: +- OpenJDK 17 (full, with GUI support) +- Android Studio (latest stable) +- XFCE4 desktop environment +- TigerVNC server +- Noto fonts + +## Emulator Usage + +### Start Emulator + +```bash +# Headless (no GUI needed) +emulator -avd default -no-window -no-audio -gpu swiftshader_indirect & + +# With VNC (visible in VNC desktop) +emulator -avd default -gpu swiftshader_indirect & +``` + +### GPU Acceleration + +For better emulator performance on GPU-equipped peers: +```bash +# If the container has GPU passthrough: +emulator -avd default -gpu host & + +# Software rendering (always works, slower): +emulator -avd default -gpu swiftshader_indirect & +``` + +### Create Additional AVDs + +```bash +# List available system images +sdkmanager --list | grep system-images + +# Install a different API level +sdkmanager "system-images;android-34;google_apis;x86_64" + +# Create AVD +avdmanager create avd -n android34 \ + -k "system-images;android-34;google_apis;x86_64" \ + -d pixel_6 + +# List AVDs +avdmanager list avd +``` + +### ADB Commands + +```bash +# List connected devices/emulators +adb devices + +# Install APK +adb install app-debug.apk + +# Logcat +adb logcat + +# Shell into emulator +adb shell + +# Screenshot +adb exec-out screencap -p > screenshot.png + +# Record screen +adb shell screenrecord /sdcard/demo.mp4 +adb pull /sdcard/demo.mp4 +``` + +## CI/CD Example + +Example script for automated builds and tests: +```bash +#!/bin/bash +set -euo pipefail + +# Start emulator +emulator -avd default -no-window -no-audio -gpu swiftshader_indirect & +EMU_PID=$! + +# Wait for boot (up to 5 minutes) +adb wait-for-device +timeout 300 bash -c 'while [ "$(adb shell getprop sys.boot_completed 2>/dev/null)" != "1" ]; do sleep 5; done' + +# Disable animations for faster tests +adb shell settings put global window_animation_scale 0 +adb shell settings put global transition_animation_scale 0 +adb shell settings put global animator_duration_scale 0 + +# Run tests +./gradlew connectedAndroidTest + +# Cleanup +kill $EMU_PID 2>/dev/null +``` + +## Troubleshooting + +### Emulator fails with "KVM is required" +The container needs nested virtualization: +```bash +# Check from inside container +kvm-ok + +# If not supported, the container needs security.nesting=true +# Containarium sets this by default with --podman flag +``` + +### Emulator extremely slow +- Use `-gpu swiftshader_indirect` (software GPU) +- Ensure KVM is working: `emulator -accel-check` +- Increase container CPU/memory + +### "ANDROID_HOME not set" +```bash +source ~/.bashrc +# Or set manually: +export ANDROID_HOME=/opt/android-sdk +export PATH=$ANDROID_HOME/cmdline-tools/latest/bin:$ANDROID_HOME/platform-tools:$ANDROID_HOME/emulator:$PATH +``` + +### VNC connection refused +```bash +# Check if VNC is running +vncserver -list + +# Start if not running +vncserver :1 -geometry 1920x1080 -depth 24 + +# Kill and restart +vncserver -kill :1 +vncserver :1 -geometry 1920x1080 -depth 24 +``` diff --git a/docs/CROSS-PEER-FILE-TRANSFER.md b/docs/CROSS-PEER-FILE-TRANSFER.md new file mode 100644 index 0000000..80216a6 --- /dev/null +++ b/docs/CROSS-PEER-FILE-TRANSFER.md @@ -0,0 +1,177 @@ +# Cross-Peer File Transfer Guide + +Transfer files between containers on different peer nodes over LAN. Avoids the sentinel tunnel (which is slow) by using direct host-to-host communication. + +## Architecture + +``` +fts-5900x (LAN: 10.0.3.14) fts-13700k (LAN: 10.0.3.19) + โ””โ”€โ”€ incusbr0 (10.100.0.0/24) โ””โ”€โ”€ incusbr0 (10.100.0.0/24) + โ””โ”€โ”€ container-A โ””โ”€โ”€ container-B + (isolated bridge) (isolated bridge) +``` + +Containers on different peers can't reach each other directly โ€” they're on separate bridge networks. File transfers must go through the host filesystem layer. + +## Prerequisites + +### One-Time Setup: Root SSH Between Peers + +Direct host-to-host rsync requires root SSH access (container storage paths are root-owned). + +**On the source peer** (e.g., fts-5900x): +```bash +# Generate root SSH key (if not already done) +sudo ssh-keygen -t ed25519 -N "" -f /root/.ssh/id_ed25519 + +# Print the public key +sudo cat /root/.ssh/id_ed25519.pub +``` + +**On the destination peer** (e.g., fts-13700k): +```bash +# Add the source peer's root key +sudo mkdir -p /root/.ssh +sudo bash -c 'echo "PASTE_THE_PUBLIC_KEY_HERE" >> /root/.ssh/authorized_keys' +sudo chmod 700 /root/.ssh +sudo chmod 600 /root/.ssh/authorized_keys +``` + +**Verify** (on source peer): +```bash +sudo ssh -o StrictHostKeyChecking=no 10.0.3.19 echo "SSH OK" +``` + +## Method 1: rsync (Recommended for Large Transfers) + +Best for large directories (100GB+). Supports resume on interruption. + +### Find Container Storage Paths + +Container rootfs is at: +``` +/var/lib/incus/storage-pools/default/containers//rootfs/ +``` + +Example: +```bash +# Source path (on fts-5900x) +SRC=/var/lib/incus/storage-pools/default/containers/apibox-dev-4090-container/rootfs/home/apibox-dev-4090/data/ + +# Destination path (on fts-13700k) +DST=/var/lib/incus/storage-pools/default/containers/apibox-dev-3090-container/rootfs/home/apibox-dev-3090/data/ +``` + +### Run Transfer + +On the **source peer** as root: +```bash +# Create destination directory +sudo ssh 10.0.3.19 "mkdir -p '$DST'" + +# rsync with progress and compression +sudo rsync -avP --compress "$SRC" "10.0.3.19:$DST" + +# Fix ownership for LXC uid mapping (unprivileged containers use uid 1000000+) +sudo ssh 10.0.3.19 "chown -R 1000000:1000000 '$DST'" +``` + +### Speed Reference + +| Network | Speed | 100GB | 500GB | +|---------|-------|-------|-------| +| WiFi LAN (802.11ac) | ~300-500 Mbps | ~30-50 min | ~2.5-4 hours | +| Gigabit Ethernet | ~900 Mbps | ~15 min | ~1.5 hours | +| 2.5GbE | ~2.3 Gbps | ~6 min | ~30 min | +| 10GbE | ~5 Gbps | ~3 min | ~15 min | + +## Method 2: tar + ssh (No Temp Space) + +Streams data directly without temp files. Good when disk space is tight. + +On the **source peer** as root: +```bash +sudo tar cf - -C "$SRC" . | ssh 10.0.3.19 "mkdir -p '$DST' && tar xf - -C '$DST'" + +# Fix ownership +sudo ssh 10.0.3.19 "chown -R 1000000:1000000 '$DST'" +``` + +Add `pv` for progress monitoring (install with `apt install pv`): +```bash +sudo tar cf - -C "$SRC" . | pv -s $(sudo du -sb "$SRC" | cut -f1) | ssh 10.0.3.19 "tar xf - -C '$DST'" +``` + +## Method 3: incus file (Simple, Small Files) + +For small files or directories. Uses Incus API, no root SSH needed. + +**Pull from source container โ†’ push to destination container:** + +On the **source peer**: +```bash +# Pull files out of container to host +sudo incus file pull -r /path/to/files /tmp/transfer/ +``` + +Transfer to destination peer: +```bash +scp -r /tmp/transfer/ :/tmp/transfer/ +``` + +On the **destination peer**: +```bash +# Push files into container +sudo incus file push -r /tmp/transfer/ /path/to/files/ +``` + +**Drawback**: Requires temp space on both hosts equal to the transfer size. + +## UID/GID Mapping + +Incus unprivileged containers map UIDs: +- Host UID `1000000` = Container UID `0` (root) +- Host UID `1001000` = Container UID `1000` (first user) + +After copying files via host-level tools (rsync, tar), fix ownership: +```bash +# For files owned by the container's first user (uid 1000 inside container): +sudo chown -R 1000000:1000000 /path/on/host/ + +# To match a specific user (e.g., uid 1001 inside container): +sudo chown -R 1001000:1001000 /path/on/host/ +``` + +Verify inside the container: +```bash +incus exec -- ls -la /path/to/files/ +``` + +## Troubleshooting + +### Permission denied on storage path +- Run as root: `sudo rsync ...` +- ZFS storage: check `zfs list | grep ` for the correct mount + +### Transfer interrupted +- rsync automatically resumes: just re-run the same command +- tar does not resume: start over or switch to rsync + +### Slow transfer over WiFi +- Use wired ethernet between peers for 10x+ speed improvement +- Add `--compress` to rsync (helps for compressible data, hurts for pre-compressed files like model weights) +- For model weights (already compressed): skip `--compress` flag + +### Container can't see transferred files +- Check ownership: `ls -la` should show the container user, not root +- Run `chown -R 1000000:1000000` on the host path +- Restart the container if files still don't appear (rare) + +### Storage path differs on ZFS +If using ZFS with non-default pool names: +```bash +# Find the actual mount point +mount | grep +# Example output: +# incus-local/containers/containers/ on /var/lib/incus/storage-pools/default/containers/ type zfs +``` diff --git a/docs/WINDOWS-VM-SETUP.md b/docs/WINDOWS-VM-SETUP.md new file mode 100644 index 0000000..6b6ca5e --- /dev/null +++ b/docs/WINDOWS-VM-SETUP.md @@ -0,0 +1,229 @@ +# Windows Server VM Setup on Incus + +This guide covers running Windows Server 2022 as a VM inside Incus on a Containarium peer node. Incus uses QEMU/KVM for VMs, providing full hardware virtualization with GPU passthrough support. + +> **Note**: Windows cannot run as an LXC container (LXC shares the host Linux kernel). It must run as a full VM using `incus launch --vm`. + +## Prerequisites + +- Incus installed and initialized on the peer node +- KVM support enabled (`kvm-ok` should return "KVM acceleration can be used") +- At least 8GB free RAM and 50GB free disk +- (Optional) GPU for passthrough + +## Step 1: Download ISOs + +### Windows Server 2022 Evaluation ISO (~5.5GB) + +Download from Microsoft Evaluation Center (180-day free evaluation, requires browser): + +**https://www.microsoft.com/en-us/evalcenter/evaluate-windows-server-2022** + +1. Fill in the registration form +2. Select **ISO** format, **64-bit edition**, **English** +3. Download and upload to the peer: + +```bash +scp ~/Downloads/SERVER_EVAL_x64FRE_en-us.iso :/tmp/windows-server-2022.iso +``` + +### Virtio Drivers ISO (~600MB) + +Required for disk and network I/O performance. Download directly on the peer: + +```bash +wget -O /tmp/virtio-win.iso \ + "https://fedorapeople.org/groups/virt/virtio-win/direct-downloads/stable-virtio/virtio-win.iso" +``` + +## Step 2: Create the VM + +```bash +# Create an empty VM (no image โ€” we boot from ISO) +sudo incus init win2022 --empty --vm \ + -c limits.cpu=4 \ + -c limits.memory=8GiB \ + -c security.secureboot=false \ + -d root,size=50GiB + +# Attach Windows install ISO (boot priority ensures it boots first) +sudo incus config device add win2022 install disk \ + source=/tmp/windows-server-2022.iso boot.priority=10 + +# Attach virtio drivers ISO (accessible as second CD drive during install) +sudo incus config device add win2022 virtio disk \ + source=/tmp/virtio-win.iso +``` + +## Step 3: Install Windows + +```bash +# Start the VM +sudo incus start win2022 + +# Connect to the VGA console (graphical installer) +sudo incus console win2022 --type=vga +``` + +During installation: + +1. Select language/keyboard, click **Install Now** +2. Choose **Windows Server 2022 Standard (Desktop Experience)** +3. Accept license terms +4. Select **Custom: Install Windows only** +5. At the disk selection screen, the disk won't be visible yet โ€” you need to load the virtio SCSI driver: + - Click **Load driver** โ†’ **Browse** + - Navigate to the virtio CD โ†’ `vioscsi` โ†’ `2k22` โ†’ `amd64` + - Select the driver and click **Next** + - The 50GB disk will now appear โ€” select it and continue +6. Windows will install and reboot (takes ~15-20 minutes) +7. Set the Administrator password when prompted + +> **Tip**: To exit the VGA console, press `Ctrl+a q`. + +## Step 4: Install Virtio Drivers (Post-Install) + +After Windows boots, connect to the console again: + +```bash +sudo incus console win2022 --type=vga +``` + +Inside Windows: + +1. Open **File Explorer** โ†’ navigate to the virtio CD drive (usually `D:` or `E:`) +2. Run `virtio-win-gt-x64.exe` โ€” this installs all virtio drivers: + - Network (virtio-net) + - Balloon (memory management) + - Serial port + - QEMU guest agent +3. Reboot when prompted + +## Step 5: Enable Remote Access + +### Enable RDP (Remote Desktop) + +Open **PowerShell as Administrator** inside Windows: + +```powershell +# Enable RDP +Set-ItemProperty -Path 'HKLM:\System\CurrentControlSet\Control\Terminal Server' ` + -Name "fDenyTSConnections" -Value 0 + +# Allow RDP through firewall +Enable-NetFirewallRule -DisplayGroup "Remote Desktop" + +# (Optional) Allow RDP from any network +Set-NetFirewallRule -Name "RemoteDesktop-UserMode-In-TCP" -Profile Any +``` + +### (Optional) Install OpenSSH Server + +```powershell +# Install OpenSSH server +Add-WindowsCapability -Online -Name OpenSSH.Server~~~~0.0.1.0 + +# Start and enable SSH +Start-Service sshd +Set-Service -Name sshd -StartupType Automatic + +# Allow SSH through firewall +New-NetFirewallRule -Name "OpenSSH" -DisplayName "OpenSSH Server" ` + -Enabled True -Direction Inbound -Protocol TCP -Action Allow -LocalPort 22 +``` + +## Step 6: Remove Install ISO + +After installation is complete, remove the ISO to free resources: + +```bash +sudo incus config device remove win2022 install +``` + +## Step 7: Access the VM + +### Find the VM's IP address + +```bash +sudo incus list win2022 -f csv -c 4 +# Example output: 10.100.0.50 (eth0) +``` + +### RDP Access + +From your local machine (assuming SSH tunnel or VPN to the peer network): + +```bash +# macOS +open rdp://10.100.0.50 + +# Linux +xfreerdp /v:10.100.0.50 /u:Administrator + +# Windows +mstsc /v:10.100.0.50 +``` + +### SSH Access (if OpenSSH installed) + +```bash +ssh Administrator@10.100.0.50 +``` + +## GPU Passthrough (Optional) + +To pass a GPU to the Windows VM for CUDA/DirectX workloads: + +```bash +# Find GPU PCI address +lspci | grep -i nvidia +# Example: 01:00.0 3D controller: NVIDIA Corporation ... + +# Add GPU to VM (VM must be stopped) +sudo incus stop win2022 +sudo incus config device add win2022 gpu gpu \ + pci=01:00.0 +sudo incus start win2022 +``` + +Inside Windows, install the NVIDIA driver from https://www.nvidia.com/drivers/. + +> **Note**: GPU passthrough requires the GPU to not be in use by the host. If the host is using the GPU, you'll need to unbind it first or use a dedicated GPU for the VM. + +## Save as Reusable Image + +After setup is complete, publish the VM as a local image to avoid repeating the installation: + +```bash +# Stop the VM first +sudo incus stop win2022 + +# Publish as local image +sudo incus publish win2022 --alias windows-server-2022 + +# Now you can create new VMs from this image: +sudo incus launch windows-server-2022 win2022-dev --vm \ + -c limits.cpu=4 -c limits.memory=8GiB +``` + +## Troubleshooting + +### VM won't boot from ISO +- Ensure `security.secureboot=false` is set +- Check boot priority: `sudo incus config device show win2022` + +### No disk visible during install +- You forgot to load the virtio SCSI driver โ€” see Step 3.5 + +### No network after install +- Install virtio-net driver from the virtio CD +- Or run `virtio-win-gt-x64.exe` which installs all drivers + +### VGA console shows black screen +- Wait 30 seconds โ€” Windows boot can be slow +- Try `sudo incus console win2022 --type=vga` again + +### RDP connection refused +- Verify RDP is enabled: `Get-ItemProperty -Path 'HKLM:\System\CurrentControlSet\Control\Terminal Server' -Name "fDenyTSConnections"` +- Check Windows Firewall: `Get-NetFirewallRule -DisplayGroup "Remote Desktop"` +- Verify VM IP: `sudo incus list win2022 -f csv -c 4` diff --git a/internal/cmd/tunnel.go b/internal/cmd/tunnel.go index 088d4fd..b3241c5 100644 --- a/internal/cmd/tunnel.go +++ b/internal/cmd/tunnel.go @@ -44,7 +44,7 @@ func init() { tunnelCmd.Flags().StringVar(&tunnelSentinelAddr, "sentinel-addr", "", "Sentinel address (host:port) to connect to (required)") tunnelCmd.Flags().StringVar(&tunnelToken, "token", "", "Pre-shared authentication token (or CONTAINARIUM_TUNNEL_TOKEN env)") tunnelCmd.Flags().StringVar(&tunnelSpotID, "spot-id", "", "Unique identifier for this spot instance (required)") - tunnelCmd.Flags().StringVar(&tunnelPorts, "ports", "22,80,443,8080", "Comma-separated local ports to expose through the tunnel") + tunnelCmd.Flags().StringVar(&tunnelPorts, "ports", "22,80,443,3389,8080", "Comma-separated local ports to expose through the tunnel") } func runTunnel(cmd *cobra.Command, args []string) error { diff --git a/internal/container/manager.go b/internal/container/manager.go index 0e70484..717cac8 100644 --- a/internal/container/manager.go +++ b/internal/container/manager.go @@ -1,6 +1,8 @@ package container import ( + "crypto/rand" + "encoding/hex" "fmt" "log" "os" @@ -12,6 +14,7 @@ import ( "github.com/footprintai/containarium/internal/ostype" "github.com/footprintai/containarium/internal/stacks" pb "github.com/footprintai/containarium/pkg/pb/containarium/v1" + incusapi "github.com/lxc/incus/v6/shared/api" ) // Manager handles container lifecycle operations @@ -37,6 +40,7 @@ type CreateOptions struct { Stack string // Software stack to install (e.g., "nodejs", "python") OSType pb.OSType // Operating system type for the container OnProvisioning func() // Called when container is running but still provisioning (installing packages/stack) + RDPPassword string // Generated RDP password for Windows VMs (output, set by Create) } // New creates a new container manager @@ -68,6 +72,8 @@ func (m *Manager) Create(opts CreateOptions) (*incus.ContainerInfo, error) { image = ostype.ImageForOSType(opts.OSType) } + isWindows := ostype.IsWindows(opts.OSType) + config := incus.ContainerConfig{ Name: containerName, Image: image, @@ -78,12 +84,29 @@ func (m *Manager) Create(opts CreateOptions) (*incus.ContainerInfo, error) { AutoStart: opts.AutoStart, } + // Windows VMs: set instance type and enforce minimum resources + if isWindows { + config.InstanceType = incusapi.InstanceTypeVM + config.EnableNesting = false + config.EnablePodmanPrivileged = false + if config.CPU == "" { + config.CPU = "4" + } + if config.Memory == "" { + config.Memory = "8GB" + } + } + // Configure root disk device if disk size is specified - if opts.Disk != "" { + diskSize := opts.Disk + if diskSize == "" && isWindows { + diskSize = "50GB" + } + if diskSize != "" { config.Disk = &incus.DiskDevice{ Path: "/", Pool: "default", - Size: opts.Disk, + Size: diskSize, } } @@ -131,6 +154,13 @@ func (m *Manager) Create(opts CreateOptions) (*incus.ContainerInfo, error) { return nil, fmt.Errorf("failed to set labels: %w", err) } + // Windows VM: separate provisioning flow + if isWindows { + return m.provisionWindowsVM(containerName, &opts) + } + + // --- Linux container provisioning (steps 3-7) --- + // Step 3: Create jump server account (proxy-only, no shell access) if opts.Verbose { fmt.Println(" [3/7] Creating jump server account (proxy-only)...") @@ -236,6 +266,87 @@ func (m *Manager) Create(opts CreateOptions) (*incus.ContainerInfo, error) { return info, nil } +// generatePassword generates a random password of the given byte length (hex-encoded). +func generatePassword(byteLen int) (string, error) { + b := make([]byte, byteLen) + if _, err := rand.Read(b); err != nil { + return "", fmt.Errorf("failed to generate random password: %w", err) + } + return hex.EncodeToString(b), nil +} + +// provisionWindowsVM handles the Windows-specific provisioning after the VM +// has been created, started, and labelled. +func (m *Manager) provisionWindowsVM(vmName string, opts *CreateOptions) (*incus.ContainerInfo, error) { + if opts.Verbose { + fmt.Println(" [3/4] Waiting for Windows VM network (this may take 1-2 minutes)...") + } + + // Windows VMs take much longer to boot than Linux containers + ipAddr, err := m.incus.WaitForNetwork(vmName, 120*time.Second) + if err != nil { + _ = m.cleanup(vmName) + return nil, fmt.Errorf("failed to get VM IP: %w", err) + } + + if opts.Verbose { + fmt.Printf(" VM IP: %s\n", ipAddr) + } + + // Signal provisioning state + if opts.OnProvisioning != nil { + opts.OnProvisioning() + } + + // Generate and set Administrator password + if opts.Verbose { + fmt.Println(" [4/4] Setting Administrator password and verifying RDP...") + } + + password, err := generatePassword(16) + if err != nil { + _ = m.cleanup(vmName) + return nil, fmt.Errorf("failed to generate password: %w", err) + } + + // Set Administrator password via PowerShell + psCmd := fmt.Sprintf( + "Set-LocalUser -Name Administrator -Password (ConvertTo-SecureString '%s' -AsPlainText -Force)", + password, + ) + if err := m.incus.Exec(vmName, []string{"powershell", "-Command", psCmd}); err != nil { + _ = m.cleanup(vmName) + return nil, fmt.Errorf("failed to set Administrator password: %w", err) + } + + // Verify RDP is listening on port 3389 + rdpCheck := "Test-NetConnection -ComputerName localhost -Port 3389 -InformationLevel Quiet" + if err := m.incus.Exec(vmName, []string{"powershell", "-Command", rdpCheck}); err != nil { + if opts.Verbose { + fmt.Println(" Warning: RDP port 3389 check failed โ€” RDP may not be enabled in the golden image") + } + } + + // Store RDP password as a label on the VM for the server to retrieve + opts.RDPPassword = password + if err := m.incus.SetLabels(vmName, map[string]string{ + "rdp-password": password, + }); err != nil { + log.Printf("Warning: failed to store RDP password label: %v", err) + } + + info, err := m.incus.GetContainer(vmName) + if err != nil { + return nil, fmt.Errorf("failed to get VM info: %w", err) + } + + if opts.Verbose { + fmt.Printf(" Windows VM ready: RDP at %s:3389 (user: Administrator)\n", ipAddr) + } + + return info, nil +} + // installPackages installs required packages in the container func (m *Manager) installPackages(containerName string, enablePodman bool, stackID string, username string, family ostype.OSFamily) error { pkgMgr := ospkg.ForFamily(family) diff --git a/internal/container/windows_test.go b/internal/container/windows_test.go new file mode 100644 index 0000000..e9e6ada --- /dev/null +++ b/internal/container/windows_test.go @@ -0,0 +1,70 @@ +package container + +import ( + "testing" +) + +func TestGeneratePassword(t *testing.T) { + // Test basic generation + pw1, err := generatePassword(16) + if err != nil { + t.Fatalf("generatePassword(16) error = %v", err) + } + // 16 bytes = 32 hex chars + if len(pw1) != 32 { + t.Errorf("generatePassword(16) length = %d, want 32", len(pw1)) + } + + // Test uniqueness โ€” two calls should produce different passwords + pw2, err := generatePassword(16) + if err != nil { + t.Fatalf("generatePassword(16) error = %v", err) + } + if pw1 == pw2 { + t.Error("generatePassword() produced identical passwords on two calls") + } + + // Test different sizes + sizes := []struct { + byteLen int + wantChars int + }{ + {8, 16}, + {16, 32}, + {32, 64}, + } + for _, tt := range sizes { + pw, err := generatePassword(tt.byteLen) + if err != nil { + t.Fatalf("generatePassword(%d) error = %v", tt.byteLen, err) + } + if len(pw) != tt.wantChars { + t.Errorf("generatePassword(%d) length = %d, want %d", tt.byteLen, len(pw), tt.wantChars) + } + } + + // Test that output is valid hex + pw, _ := generatePassword(16) + for _, c := range pw { + if !((c >= '0' && c <= '9') || (c >= 'a' && c <= 'f')) { + t.Errorf("generatePassword() contains non-hex char: %c", c) + } + } +} + +func TestCreateOptionsRDPPasswordField(t *testing.T) { + // Verify the RDPPassword field exists and is settable + opts := CreateOptions{ + Username: "testuser", + RDPPassword: "initial", + } + if opts.RDPPassword != "initial" { + t.Errorf("RDPPassword = %q, want %q", opts.RDPPassword, "initial") + } + + // Simulate what provisionWindowsVM does + opts.RDPPassword = "generated-password" + if opts.RDPPassword != "generated-password" { + t.Errorf("RDPPassword after update = %q, want %q", opts.RDPPassword, "generated-password") + } +} diff --git a/internal/gateway/gateway.go b/internal/gateway/gateway.go index a015ad5..cc3dd4b 100644 --- a/internal/gateway/gateway.go +++ b/internal/gateway/gateway.go @@ -49,6 +49,9 @@ type GatewayServer struct { eventHandler *EventHandler coreServicesHandler *CoreServicesHandler + // Guacamole reverse proxy (browser-based RDP for Windows VMs) + guacamoleBackendURL string + // Backends handler (for multi-backend support, set externally) backendsHandler http.HandlerFunc @@ -108,6 +111,11 @@ func (gs *GatewayServer) SetAlertmanagerBackendURL(backendURL string) { gs.alertmanagerBackendURL = backendURL } +// SetGuacamoleBackendURL sets the internal Guacamole URL for the reverse proxy +func (gs *GatewayServer) SetGuacamoleBackendURL(backendURL string) { + gs.guacamoleBackendURL = backendURL +} + // SetSecurityStore sets the security store for the CSV export endpoint func (gs *GatewayServer) SetSecurityStore(store *security.Store) { gs.securityStore = store @@ -547,6 +555,23 @@ func (gs *GatewayServer) Start(ctx context.Context) error { } } + // Guacamole reverse proxy (browser-based RDP for Windows VMs, no auth โ€” Guacamole handles its own) + if gs.guacamoleBackendURL != "" { + guacTarget, err := url.Parse(gs.guacamoleBackendURL) + if err != nil { + log.Printf("Warning: Invalid Guacamole backend URL %q: %v", gs.guacamoleBackendURL, err) + } else { + guacProxy := httputil.NewSingleHostReverseProxy(guacTarget) + httpMux.HandleFunc("/guacamole/", func(w http.ResponseWriter, r *http.Request) { + guacProxy.ServeHTTP(w, r) + }) + httpMux.HandleFunc("/guacamole", func(w http.ResponseWriter, r *http.Request) { + http.Redirect(w, r, "/guacamole/", http.StatusMovedPermanently) + }) + log.Printf("Guacamole reverse proxy enabled at /guacamole/ -> %s", gs.guacamoleBackendURL) + } + } + // Security CSV export endpoint (with auth via token query param or Authorization header) if gs.securityStore != nil { registerSecurityExport(httpMux, gs.securityStore, gs.authMiddleware) diff --git a/internal/guacamole/client.go b/internal/guacamole/client.go new file mode 100644 index 0000000..e39b22e --- /dev/null +++ b/internal/guacamole/client.go @@ -0,0 +1,171 @@ +package guacamole + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "time" +) + +// Client wraps the Apache Guacamole REST API. +type Client struct { + baseURL string + httpClient *http.Client +} + +// New creates a new Guacamole REST API client. +func New(baseURL string) *Client { + return &Client{ + baseURL: baseURL, + httpClient: &http.Client{ + Timeout: 10 * time.Second, + }, + } +} + +// authResponse is the response from POST /api/tokens. +type authResponse struct { + AuthToken string `json:"authToken"` + Username string `json:"username"` + DataSource string `json:"dataSource"` +} + +// Authenticate obtains an auth token from Guacamole. +func (c *Client) Authenticate(username, password string) (string, error) { + data := url.Values{ + "username": {username}, + "password": {password}, + } + + resp, err := c.httpClient.PostForm(c.baseURL+"/api/tokens", data) + if err != nil { + return "", fmt.Errorf("guacamole auth request failed: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + body, _ := io.ReadAll(resp.Body) + return "", fmt.Errorf("guacamole auth failed (status %d): %s", resp.StatusCode, body) + } + + var authResp authResponse + if err := json.NewDecoder(resp.Body).Decode(&authResp); err != nil { + return "", fmt.Errorf("failed to decode auth response: %w", err) + } + + return authResp.AuthToken, nil +} + +// ConnectionConfig defines an RDP connection in Guacamole. +type ConnectionConfig struct { + Name string // Display name for the connection + Hostname string // RDP target hostname/IP + Port string // RDP port (typically "3389") + Username string // RDP username (e.g., "Administrator") + Password string // RDP password +} + +// connectionRequest is the Guacamole API request body for creating a connection. +type connectionRequest struct { + ParentIdentifier string `json:"parentIdentifier"` + Name string `json:"name"` + Protocol string `json:"protocol"` + Parameters map[string]string `json:"parameters"` + Attributes map[string]string `json:"attributes"` +} + +// connectionResponse is the Guacamole API response from creating a connection. +type connectionResponse struct { + Identifier string `json:"identifier"` + Name string `json:"name"` + ParentIdentifier string `json:"parentIdentifier"` + Protocol string `json:"protocol"` +} + +// CreateConnection registers a new RDP connection in Guacamole. +// Returns the connection identifier. +func (c *Client) CreateConnection(authToken string, config ConnectionConfig) (string, error) { + reqBody := connectionRequest{ + ParentIdentifier: "ROOT", + Name: config.Name, + Protocol: "rdp", + Parameters: map[string]string{ + "hostname": config.Hostname, + "port": config.Port, + "username": config.Username, + "password": config.Password, + "security": "nla", + "ignore-cert": "true", + "resize-method": "display-update", + "enable-wallpaper": "true", + }, + Attributes: map[string]string{ + "max-connections": "2", + "max-connections-per-user": "2", + }, + } + + body, err := json.Marshal(reqBody) + if err != nil { + return "", fmt.Errorf("failed to marshal connection request: %w", err) + } + + reqURL := fmt.Sprintf("%s/api/session/data/postgresql/connections?token=%s", c.baseURL, authToken) + req, err := http.NewRequest(http.MethodPost, reqURL, bytes.NewReader(body)) + if err != nil { + return "", fmt.Errorf("failed to create request: %w", err) + } + req.Header.Set("Content-Type", "application/json") + + resp, err := c.httpClient.Do(req) + if err != nil { + return "", fmt.Errorf("guacamole create connection failed: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + respBody, _ := io.ReadAll(resp.Body) + return "", fmt.Errorf("guacamole create connection failed (status %d): %s", resp.StatusCode, respBody) + } + + var connResp connectionResponse + if err := json.NewDecoder(resp.Body).Decode(&connResp); err != nil { + return "", fmt.Errorf("failed to decode connection response: %w", err) + } + + return connResp.Identifier, nil +} + +// DeleteConnection removes a connection from Guacamole. +func (c *Client) DeleteConnection(authToken, connectionID string) error { + reqURL := fmt.Sprintf("%s/api/session/data/postgresql/connections/%s?token=%s", c.baseURL, connectionID, authToken) + req, err := http.NewRequest(http.MethodDelete, reqURL, nil) + if err != nil { + return fmt.Errorf("failed to create delete request: %w", err) + } + + resp, err := c.httpClient.Do(req) + if err != nil { + return fmt.Errorf("guacamole delete connection failed: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK { + body, _ := io.ReadAll(resp.Body) + return fmt.Errorf("guacamole delete connection failed (status %d): %s", resp.StatusCode, body) + } + + return nil +} + +// GetConnectionURL returns the Guacamole client URL for a given connection ID. +// The returned path is relative to the Guacamole base URL. +func GetConnectionURL(connectionID string) string { + // Guacamole client URL format: /#/client/{base64-encoded-id} + // The connection identifier for PostgreSQL datasource is: {id}\0c\0postgresql + // Base64-encoded for URL embedding + return fmt.Sprintf("/#/client/%s", connectionID) +} diff --git a/internal/guacamole/client_test.go b/internal/guacamole/client_test.go new file mode 100644 index 0000000..0ff7116 --- /dev/null +++ b/internal/guacamole/client_test.go @@ -0,0 +1,150 @@ +package guacamole + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "testing" +) + +func TestAuthenticate(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != "/api/tokens" { + t.Errorf("unexpected path: %s", r.URL.Path) + http.Error(w, "not found", http.StatusNotFound) + return + } + if r.Method != http.MethodPost { + t.Errorf("unexpected method: %s", r.Method) + http.Error(w, "method not allowed", http.StatusMethodNotAllowed) + return + } + + username := r.FormValue("username") + password := r.FormValue("password") + if username != "guacadmin" || password != "guacadmin" { + http.Error(w, "unauthorized", http.StatusUnauthorized) + return + } + + json.NewEncoder(w).Encode(authResponse{ + AuthToken: "test-token-123", + Username: "guacadmin", + DataSource: "postgresql", + }) + })) + defer server.Close() + + client := New(server.URL) + + // Valid credentials + token, err := client.Authenticate("guacadmin", "guacadmin") + if err != nil { + t.Fatalf("Authenticate() error = %v", err) + } + if token != "test-token-123" { + t.Errorf("Authenticate() token = %q, want %q", token, "test-token-123") + } + + // Invalid credentials + _, err = client.Authenticate("wrong", "wrong") + if err == nil { + t.Error("Authenticate() with bad credentials should fail") + } +} + +func TestCreateConnection(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + http.Error(w, "method not allowed", http.StatusMethodNotAllowed) + return + } + + // Verify auth token + token := r.URL.Query().Get("token") + if token != "test-token" { + http.Error(w, "unauthorized", http.StatusUnauthorized) + return + } + + // Decode and verify request body + var req connectionRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, "bad request", http.StatusBadRequest) + return + } + + if req.Protocol != "rdp" { + t.Errorf("expected protocol rdp, got %q", req.Protocol) + } + if req.Parameters["hostname"] != "10.100.0.50" { + t.Errorf("expected hostname 10.100.0.50, got %q", req.Parameters["hostname"]) + } + if req.Parameters["port"] != "3389" { + t.Errorf("expected port 3389, got %q", req.Parameters["port"]) + } + + json.NewEncoder(w).Encode(connectionResponse{ + Identifier: "42", + Name: req.Name, + ParentIdentifier: "ROOT", + Protocol: "rdp", + }) + })) + defer server.Close() + + client := New(server.URL) + + connID, err := client.CreateConnection("test-token", ConnectionConfig{ + Name: "wintest-container", + Hostname: "10.100.0.50", + Port: "3389", + Username: "Administrator", + Password: "secret123", + }) + if err != nil { + t.Fatalf("CreateConnection() error = %v", err) + } + if connID != "42" { + t.Errorf("CreateConnection() id = %q, want %q", connID, "42") + } +} + +func TestDeleteConnection(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodDelete { + http.Error(w, "method not allowed", http.StatusMethodNotAllowed) + return + } + + token := r.URL.Query().Get("token") + if token != "test-token" { + http.Error(w, "unauthorized", http.StatusUnauthorized) + return + } + + w.WriteHeader(http.StatusNoContent) + })) + defer server.Close() + + client := New(server.URL) + + err := client.DeleteConnection("test-token", "42") + if err != nil { + t.Fatalf("DeleteConnection() error = %v", err) + } + + // Wrong token should fail + err = client.DeleteConnection("wrong-token", "42") + if err == nil { + t.Error("DeleteConnection() with bad token should fail") + } +} + +func TestGetConnectionURL(t *testing.T) { + url := GetConnectionURL("42") + want := "/#/client/42" + if url != want { + t.Errorf("GetConnectionURL(\"42\") = %q, want %q", url, want) + } +} diff --git a/internal/incus/client.go b/internal/incus/client.go index 75cf2d4..246eda6 100644 --- a/internal/incus/client.go +++ b/internal/incus/client.go @@ -95,9 +95,10 @@ type ContainerConfig struct { Image string CPU string Memory string - Disk *DiskDevice // Root disk configuration - NIC *NICDevice // Network interface configuration - GPU *GPUDevice // GPU device configuration for passthrough + Disk *DiskDevice // Root disk configuration + NIC *NICDevice // Network interface configuration + GPU *GPUDevice // GPU device configuration for passthrough + InstanceType api.InstanceType // Container (LXC) or VM (QEMU/KVM). Defaults to Container. EnableNesting bool EnablePodmanPrivileged bool // Full Docker support (requires privileged container + AppArmor disabled) AutoStart bool @@ -120,6 +121,7 @@ const ( RoleCaddy Role = "core-caddy" RoleVictoriaMetrics Role = "core-victoriametrics" RoleSecurity Role = "core-security" + RoleGuacamole Role = "core-guacamole" ) // IsCoreRole returns true if the role represents a core container. @@ -129,17 +131,18 @@ func (r Role) IsCoreRole() bool { // ContainerInfo holds information about a container type ContainerInfo struct { - Name string - State string - IPAddress string - CPU string - Memory string - Disk string - GPU string // GPU device info (e.g., "nvidia.com/gpu" or GPU ID) - Labels map[string]string - Role Role // Core container role (e.g., RolePostgres, RoleCaddy), empty for user containers - CreatedAt time.Time - BackendID string // Backend this container runs on (populated by PeerPool fan-out) + Name string + State string + IPAddress string + CPU string + Memory string + Disk string + GPU string // GPU device info (e.g., "nvidia.com/gpu" or GPU ID) + InstanceType string // "container" or "virtual-machine" + Labels map[string]string + Role Role // Core container role (e.g., RolePostgres, RoleCaddy), empty for user containers + CreatedAt time.Time + BackendID string // Backend this container runs on (populated by PeerPool fan-out) } // ContainerMetrics holds runtime metrics for a container @@ -230,10 +233,14 @@ func (c *Client) CreateContainer(config ContainerConfig) error { // Debug: Log the image being used fmt.Printf("[DEBUG] CreateContainer - Image: '%s'\n", config.Image) - // Prepare container creation request + // Prepare instance creation request + instanceType := config.InstanceType + if instanceType == "" { + instanceType = api.InstanceTypeContainer + } req := api.InstancesPost{ Name: config.Name, - Type: api.InstanceTypeContainer, + Type: instanceType, } // Parse image source - handle remote images like "images:ubuntu/24.04" @@ -367,8 +374,8 @@ func (c *Client) DeleteContainer(name string) error { // ListContainers lists all containers func (c *Client) ListContainers() ([]ContainerInfo, error) { - // Get list of instance names first - names, err := c.server.GetInstanceNames(api.InstanceTypeContainer) + // Get list of instance names (both containers and VMs) + names, err := c.server.GetInstanceNames(api.InstanceTypeAny) if err != nil { return nil, fmt.Errorf("failed to list containers: %w", err) } @@ -382,11 +389,12 @@ func (c *Client) ListContainers() ([]ContainerInfo, error) { } info := ContainerInfo{ - Name: inst.Name, - State: inst.Status, - CreatedAt: inst.CreatedAt, - Labels: extractLabelsFromConfig(inst.Config), - Role: Role(inst.Config[RoleKey]), + Name: inst.Name, + State: inst.Status, + InstanceType: inst.Type, + CreatedAt: inst.CreatedAt, + Labels: extractLabelsFromConfig(inst.Config), + Role: Role(inst.Config[RoleKey]), } // Get CPU and memory limits from config diff --git a/internal/ospkg/ospkg.go b/internal/ospkg/ospkg.go index 20c8102..86c9b1f 100644 --- a/internal/ospkg/ospkg.go +++ b/internal/ospkg/ospkg.go @@ -38,10 +38,13 @@ type PackageManager interface { } // ForFamily returns the appropriate PackageManager for the given OS family. +// Panics for Windows โ€” Windows VMs do not use Linux package managers. func ForFamily(family ostype.OSFamily) PackageManager { switch family { case ostype.RHEL: return &rhelPkgMgr{} + case ostype.Windows: + panic("ospkg.ForFamily called with Windows family โ€” Windows VMs skip Linux package installation") default: return &debianPkgMgr{} } diff --git a/internal/ostype/ostype.go b/internal/ostype/ostype.go index 217be57..c8c0064 100644 --- a/internal/ostype/ostype.go +++ b/internal/ostype/ostype.go @@ -8,8 +8,9 @@ import ( type OSFamily string const ( - Debian OSFamily = "debian" - RHEL OSFamily = "rhel" + Debian OSFamily = "debian" + RHEL OSFamily = "rhel" + Windows OSFamily = "windows" ) // OSTypeLabelKey is the Incus label key used to store the OS type. @@ -24,6 +25,8 @@ func ImageForOSType(osType pb.OSType) string { return "images:rockylinux/9" case pb.OSType_OS_TYPE_RHEL_9: return "local:rhel9" + case pb.OSType_OS_TYPE_WINDOWS_2022: + return "local:windows-server-2022" default: return "images:ubuntu/24.04" } @@ -34,6 +37,8 @@ func FamilyForOSType(osType pb.OSType) OSFamily { switch osType { case pb.OSType_OS_TYPE_ROCKY_9, pb.OSType_OS_TYPE_RHEL_9: return RHEL + case pb.OSType_OS_TYPE_WINDOWS_2022: + return Windows default: return Debian } @@ -48,6 +53,8 @@ func LabelValue(osType pb.OSType) string { return "rocky_9" case pb.OSType_OS_TYPE_RHEL_9: return "rhel_9" + case pb.OSType_OS_TYPE_WINDOWS_2022: + return "windows_2022" default: return "ubuntu_2404" } @@ -58,6 +65,8 @@ func FamilyFromLabel(label string) OSFamily { switch label { case "rocky_9", "rhel_9": return RHEL + case "windows_2022": + return Windows default: return Debian } @@ -72,6 +81,8 @@ func OSTypeFromLabel(label string) pb.OSType { return pb.OSType_OS_TYPE_ROCKY_9 case "rhel_9": return pb.OSType_OS_TYPE_RHEL_9 + case "windows_2022": + return pb.OSType_OS_TYPE_WINDOWS_2022 default: return pb.OSType_OS_TYPE_UBUNTU_2404 } @@ -86,11 +97,18 @@ func OSTypeFromString(s string) pb.OSType { return pb.OSType_OS_TYPE_ROCKY_9 case "rhel9", "rhel-9", "redhat9": return pb.OSType_OS_TYPE_RHEL_9 + case "windows2022", "windows-2022", "win2022": + return pb.OSType_OS_TYPE_WINDOWS_2022 default: return pb.OSType_OS_TYPE_UNSPECIFIED } } +// IsWindows returns true if the OS type is a Windows variant. +func IsWindows(osType pb.OSType) bool { + return osType == pb.OSType_OS_TYPE_WINDOWS_2022 +} + // Execer is the interface for executing commands in a container. type Execer interface { Exec(containerName string, command []string) error @@ -99,6 +117,10 @@ type Execer interface { // DetectFamily probes a running container to determine its OS family. // Used for InstallStack on containers where the os-type label is missing. func DetectFamily(execer Execer, containerName string) OSFamily { + // Check for Windows (PowerShell exists) + if err := execer.Exec(containerName, []string{"powershell", "-Command", "echo ok"}); err == nil { + return Windows + } if err := execer.Exec(containerName, []string{"test", "-f", "/etc/redhat-release"}); err == nil { return RHEL } diff --git a/internal/ostype/ostype_test.go b/internal/ostype/ostype_test.go new file mode 100644 index 0000000..fad50dd --- /dev/null +++ b/internal/ostype/ostype_test.go @@ -0,0 +1,120 @@ +package ostype + +import ( + "testing" + + pb "github.com/footprintai/containarium/pkg/pb/containarium/v1" +) + +func TestIsWindows(t *testing.T) { + tests := []struct { + name string + osType pb.OSType + want bool + }{ + {"unspecified", pb.OSType_OS_TYPE_UNSPECIFIED, false}, + {"ubuntu", pb.OSType_OS_TYPE_UBUNTU_2404, false}, + {"rocky", pb.OSType_OS_TYPE_ROCKY_9, false}, + {"rhel", pb.OSType_OS_TYPE_RHEL_9, false}, + {"windows 2022", pb.OSType_OS_TYPE_WINDOWS_2022, true}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := IsWindows(tt.osType); got != tt.want { + t.Errorf("IsWindows(%v) = %v, want %v", tt.osType, got, tt.want) + } + }) + } +} + +func TestImageForOSType_Windows(t *testing.T) { + got := ImageForOSType(pb.OSType_OS_TYPE_WINDOWS_2022) + want := "local:windows-server-2022" + if got != want { + t.Errorf("ImageForOSType(WINDOWS_2022) = %q, want %q", got, want) + } +} + +func TestFamilyForOSType_Windows(t *testing.T) { + got := FamilyForOSType(pb.OSType_OS_TYPE_WINDOWS_2022) + if got != Windows { + t.Errorf("FamilyForOSType(WINDOWS_2022) = %q, want %q", got, Windows) + } +} + +func TestLabelValue_Windows(t *testing.T) { + got := LabelValue(pb.OSType_OS_TYPE_WINDOWS_2022) + want := "windows_2022" + if got != want { + t.Errorf("LabelValue(WINDOWS_2022) = %q, want %q", got, want) + } +} + +func TestFamilyFromLabel_Windows(t *testing.T) { + got := FamilyFromLabel("windows_2022") + if got != Windows { + t.Errorf("FamilyFromLabel(\"windows_2022\") = %q, want %q", got, Windows) + } +} + +func TestOSTypeFromLabel_Windows(t *testing.T) { + got := OSTypeFromLabel("windows_2022") + want := pb.OSType_OS_TYPE_WINDOWS_2022 + if got != want { + t.Errorf("OSTypeFromLabel(\"windows_2022\") = %v, want %v", got, want) + } +} + +func TestOSTypeFromString_Windows(t *testing.T) { + tests := []struct { + input string + want pb.OSType + }{ + {"windows2022", pb.OSType_OS_TYPE_WINDOWS_2022}, + {"windows-2022", pb.OSType_OS_TYPE_WINDOWS_2022}, + {"win2022", pb.OSType_OS_TYPE_WINDOWS_2022}, + } + for _, tt := range tests { + t.Run(tt.input, func(t *testing.T) { + if got := OSTypeFromString(tt.input); got != tt.want { + t.Errorf("OSTypeFromString(%q) = %v, want %v", tt.input, got, tt.want) + } + }) + } +} + +// TestRoundTrip verifies label serialization round-trips for all OS types. +func TestRoundTrip(t *testing.T) { + osTypes := []pb.OSType{ + pb.OSType_OS_TYPE_UBUNTU_2404, + pb.OSType_OS_TYPE_ROCKY_9, + pb.OSType_OS_TYPE_RHEL_9, + pb.OSType_OS_TYPE_WINDOWS_2022, + } + for _, osType := range osTypes { + label := LabelValue(osType) + got := OSTypeFromLabel(label) + if got != osType { + t.Errorf("round-trip failed: OSType %v -> label %q -> OSType %v", osType, label, got) + } + } +} + +// TestFamilyConsistency verifies FamilyForOSType and FamilyFromLabel agree. +func TestFamilyConsistency(t *testing.T) { + osTypes := []pb.OSType{ + pb.OSType_OS_TYPE_UBUNTU_2404, + pb.OSType_OS_TYPE_ROCKY_9, + pb.OSType_OS_TYPE_RHEL_9, + pb.OSType_OS_TYPE_WINDOWS_2022, + } + for _, osType := range osTypes { + label := LabelValue(osType) + fromEnum := FamilyForOSType(osType) + fromLabel := FamilyFromLabel(label) + if fromEnum != fromLabel { + t.Errorf("family mismatch for %v: FamilyForOSType=%q, FamilyFromLabel(%q)=%q", + osType, fromEnum, label, fromLabel) + } + } +} diff --git a/internal/pentest/store.go b/internal/pentest/store.go index b007c39..93e567d 100644 --- a/internal/pentest/store.go +++ b/internal/pentest/store.go @@ -465,6 +465,23 @@ type FindingSummary struct { ByCategory map[string]int32 } +// GetFindingByID returns a single finding by its ID +func (s *Store) GetFindingByID(ctx context.Context, id int64) (*FindingRecord, error) { + row := s.pool.QueryRow(ctx, `SELECT id, fingerprint, category, severity, title, description, + target, evidence, cve_ids, remediation, status, target_type, + first_seen_at, last_seen_at, suppressed, suppressed_reason + FROM pentest_findings WHERE id = $1`, id) + + var r FindingRecord + err := row.Scan(&r.ID, &r.Fingerprint, &r.Category, &r.Severity, &r.Title, &r.Description, + &r.Target, &r.Evidence, &r.CVEIDs, &r.Remediation, &r.Status, &r.TargetType, + &r.FirstSeenAt, &r.LastSeenAt, &r.Suppressed, &r.SuppressedReason) + if err != nil { + return nil, fmt.Errorf("finding %d not found: %w", id, err) + } + return &r, nil +} + // SuppressFinding marks a finding as suppressed func (s *Store) SuppressFinding(ctx context.Context, findingID int64, reason string) error { result, err := s.pool.Exec(ctx, ` diff --git a/internal/server/container_server.go b/internal/server/container_server.go index 144bfd0..051b676 100644 --- a/internal/server/container_server.go +++ b/internal/server/container_server.go @@ -13,6 +13,7 @@ import ( "github.com/footprintai/containarium/internal/app" "github.com/footprintai/containarium/internal/container" "github.com/footprintai/containarium/internal/events" + "github.com/footprintai/containarium/internal/guacamole" "github.com/footprintai/containarium/internal/incus" "github.com/footprintai/containarium/internal/ostype" pb "github.com/footprintai/containarium/pkg/pb/containarium/v1" @@ -51,6 +52,10 @@ type ContainerServer struct { coreServices *CoreServices daemonConfigStore *app.DaemonConfigStore peerPool *PeerPool + // Guacamole integration for Windows VM RDP access + guacamoleClient *guacamole.Client + guacamoleUser string // Guacamole admin username + guacamolePass string // Guacamole admin password } // NewContainerServer creates a new container server @@ -219,21 +224,37 @@ func (s *ContainerServer) CreateContainer(ctx context.Context, req *pb.CreateCon // Emit container created event s.emitter.EmitContainerCreated(protoContainer) - // Create host-level jump server account so SSH via sshpiper works. - // This is idempotent โ€” skips if the account already exists. - go func() { - if err := container.EnsureJumpServerAccount(req.Username); err != nil { - log.Printf("Warning: failed to create jump server account for %s: %v", req.Username, err) - } else { - log.Printf("Jump server account ensured for %s", req.Username) - } - }() + resp := &pb.CreateContainerResponse{ + Container: protoContainer, + Message: fmt.Sprintf("Container %s created successfully", info.Name), + } - return &pb.CreateContainerResponse{ - Container: protoContainer, - Message: fmt.Sprintf("Container %s created successfully", info.Name), - SshCommand: fmt.Sprintf("ssh %s@%s", req.Username, info.IPAddress), - }, nil + if ostype.IsWindows(req.OsType) { + // Windows VM: return RDP address, skip jump server account + resp.RdpAddress = protoContainer.RdpAddress + + // Register RDP connection in Guacamole (best-effort, runs in background) + go func() { + rdpPassword := info.Labels["rdp-password"] + connID := s.registerGuacamoleConnection(info.Name, info.IPAddress, "Administrator", rdpPassword) + if connID != "" { + // Store connection ID as a label for cleanup on delete + _ = s.manager.AddLabel(req.Username, guacamoleConnectionIDLabel, connID) + } + }() + } else { + // Linux container: return SSH command and ensure jump server account + resp.SshCommand = fmt.Sprintf("ssh %s@%s", req.Username, info.IPAddress) + go func() { + if err := container.EnsureJumpServerAccount(req.Username); err != nil { + log.Printf("Warning: failed to create jump server account for %s: %v", req.Username, err) + } else { + log.Printf("Jump server account ensured for %s", req.Username) + } + }() + } + + return resp, nil } // ListContainers lists all containers @@ -405,6 +426,9 @@ func (s *ContainerServer) DeleteContainer(ctx context.Context, req *pb.DeleteCon containerName := fmt.Sprintf("%s-container", req.Username) + // Before deleting, deregister Guacamole connection if this is a Windows VM + s.deregisterGuacamoleConnection(req.Username) + err := s.manager.Delete(req.Username, req.Force) if err != nil { // Not found locally โ€” try peers @@ -527,13 +551,15 @@ func (s *ContainerServer) ResizeContainer(ctx context.Context, req *pb.ResizeCon containerName := fmt.Sprintf("%s-container", req.Username) - // Perform resize + // Perform resize โ€” try local first, then peer if err := s.manager.Resize(containerName, req.Cpu, req.Memory, req.Disk, false); err != nil { - // Try peer + // Container not found locally โ€” check peers if s.peerPool != nil { authToken := extractAuthToken(ctx) + log.Printf("[resize] container %s not local, searching peers (token len=%d)", containerName, len(authToken)) peer := s.peerPool.FindContainerPeer(req.Username, authToken) if peer != nil { + log.Printf("[resize] found %s on peer %s, forwarding", containerName, peer.ID) body, _ := json.Marshal(map[string]string{ "cpu": req.Cpu, "memory": req.Memory, @@ -630,6 +656,15 @@ func (s *ContainerServer) InstallStack(ctx context.Context, req *pb.InstallStack return nil, fmt.Errorf("stack_id is required") } + // Reject stack installation on Windows VMs + if containerInfo, getErr := s.manager.Get(req.Username); getErr == nil { + if osLabel, ok := containerInfo.Labels[ostype.OSTypeLabelKey]; ok { + if ostype.IsWindows(ostype.OSTypeFromLabel(osLabel)) { + return nil, fmt.Errorf("stack installation is not supported on Windows VMs") + } + } + } + if err := s.manager.InstallStack(req.Username, req.StackId); err != nil { return nil, fmt.Errorf("failed to install stack: %w", err) } @@ -960,6 +995,16 @@ func toProtoContainer(info *incus.ContainerInfo) *pb.Container { osTypeEnum = ostype.OSTypeFromLabel(osLabel) } + // Determine access type based on OS + accessType := pb.AccessType_ACCESS_TYPE_SSH + var rdpAddress string + if ostype.IsWindows(osTypeEnum) { + accessType = pb.AccessType_ACCESS_TYPE_RDP + if info.IPAddress != "" { + rdpAddress = fmt.Sprintf("%s:3389", info.IPAddress) + } + } + return &pb.Container{ Name: info.Name, Username: username, @@ -979,6 +1024,8 @@ func toProtoContainer(info *incus.ContainerInfo) *pb.Container { GpuDevice: info.GPU, BackendId: info.BackendID, OsType: osTypeEnum, + AccessType: accessType, + RdpAddress: rdpAddress, } } @@ -1196,3 +1243,73 @@ func (s *ContainerServer) GetMonitoringInfo(ctx context.Context, req *pb.GetMoni VictoriaMetricsUrl: s.victoriaMetricsURL, }, nil } + +// guacamoleConnectionIDLabel is the Incus label key for storing the Guacamole connection ID. +const guacamoleConnectionIDLabel = "guacamole-connection-id" + +// SetGuacamoleClient sets the Guacamole client for Windows VM RDP registration. +func (s *ContainerServer) SetGuacamoleClient(client *guacamole.Client, adminUser, adminPass string) { + s.guacamoleClient = client + s.guacamoleUser = adminUser + s.guacamolePass = adminPass +} + +// registerGuacamoleConnection registers a Windows VM's RDP connection in Guacamole. +// Returns the connection ID, or "" if Guacamole is not configured. +func (s *ContainerServer) registerGuacamoleConnection(containerName, hostname, rdpUser, rdpPassword string) string { + if s.guacamoleClient == nil { + return "" + } + + token, err := s.guacamoleClient.Authenticate(s.guacamoleUser, s.guacamolePass) + if err != nil { + log.Printf("Warning: Guacamole auth failed, skipping RDP registration: %v", err) + return "" + } + + connID, err := s.guacamoleClient.CreateConnection(token, guacamole.ConnectionConfig{ + Name: containerName, + Hostname: hostname, + Port: "3389", + Username: rdpUser, + Password: rdpPassword, + }) + if err != nil { + log.Printf("Warning: Failed to register Guacamole connection for %s: %v", containerName, err) + return "" + } + + log.Printf("Guacamole RDP connection registered for %s (id=%s)", containerName, connID) + return connID +} + +// deregisterGuacamoleConnection removes a Windows VM's RDP connection from Guacamole. +func (s *ContainerServer) deregisterGuacamoleConnection(username string) { + if s.guacamoleClient == nil { + return + } + + // Look up the connection ID from container labels + info, err := s.manager.Get(username) + if err != nil { + return + } + + connID, ok := info.Labels[guacamoleConnectionIDLabel] + if !ok || connID == "" { + return + } + + token, err := s.guacamoleClient.Authenticate(s.guacamoleUser, s.guacamolePass) + if err != nil { + log.Printf("Warning: Guacamole auth failed during deregistration: %v", err) + return + } + + if err := s.guacamoleClient.DeleteConnection(token, connID); err != nil { + log.Printf("Warning: Failed to deregister Guacamole connection %s: %v", connID, err) + return + } + + log.Printf("Guacamole RDP connection removed for %s (id=%s)", username, connID) +} diff --git a/internal/server/core_services.go b/internal/server/core_services.go index 575ac77..5d7fe0b 100644 --- a/internal/server/core_services.go +++ b/internal/server/core_services.go @@ -889,6 +889,20 @@ func (cs *CoreServices) GetGrafanaURL() string { return fmt.Sprintf("http://%s:%d", cs.victoriaMetricsIP, DefaultGrafanaPort) } +// GetGuacamoleIP returns the IP of the Guacamole core container, or "" if not found. +func (cs *CoreServices) GetGuacamoleIP() string { + containers, err := cs.incusClient.ListContainers() + if err != nil { + return "" + } + for _, c := range containers { + if c.Role == incus.RoleGuacamole && c.State == "Running" { + return c.IPAddress + } + } + return "" +} + // SetupAlerting installs vmalert and Alertmanager inside the VictoriaMetrics // container. It writes default rules, configures Alertmanager with the // webhook URL, creates systemd services, and starts both. diff --git a/internal/server/dual_server.go b/internal/server/dual_server.go index c9c27d1..bc4d44b 100644 --- a/internal/server/dual_server.go +++ b/internal/server/dual_server.go @@ -26,6 +26,7 @@ import ( "github.com/footprintai/containarium/internal/container" "github.com/footprintai/containarium/internal/events" "github.com/footprintai/containarium/internal/gateway" + "github.com/footprintai/containarium/internal/guacamole" "github.com/footprintai/containarium/internal/incus" "github.com/footprintai/containarium/internal/metrics" "github.com/footprintai/containarium/internal/mtls" @@ -812,6 +813,20 @@ skipAppHosting: } } + // Wire Guacamole reverse proxy and API client if core Guacamole container is running + if coreServices != nil { + if guacIP := coreServices.GetGuacamoleIP(); guacIP != "" { + guacBackend := fmt.Sprintf("http://%s:8080", guacIP) + gatewayServer.SetGuacamoleBackendURL(guacBackend) + + // Wire Guacamole client into container server for auto-registration + guacClient := guacamole.New(guacBackend) + containerServer.SetGuacamoleClient(guacClient, "guacadmin", "guacadmin") + + log.Printf("Guacamole reverse proxy and API client configured: %s", guacBackend) + } + } + // Wire alert relay config for HMAC signing if config.AlertWebhookURL != "" { gatewayServer.SetAlertRelayConfig(config.AlertWebhookURL, config.AlertWebhookSecret) diff --git a/internal/server/peer.go b/internal/server/peer.go index 0fb4f7a..7e0e6aa 100644 --- a/internal/server/peer.go +++ b/internal/server/peer.go @@ -588,10 +588,12 @@ func (pp *PeerPool) FindContainerPeer(username, authToken string) *PeerClient { containerName := username + "-container" for _, peer := range pp.Peers() { if !peer.Healthy { + log.Printf("[FindContainerPeer] skipping unhealthy peer %s", peer.ID) continue } containers, err := peer.fetchContainers(authToken) if err != nil { + log.Printf("[FindContainerPeer] peer %s fetchContainers failed: %v", peer.ID, err) continue } for _, c := range containers { diff --git a/internal/server/pentest_server.go b/internal/server/pentest_server.go index 60a3a10..67fad14 100644 --- a/internal/server/pentest_server.go +++ b/internal/server/pentest_server.go @@ -3,9 +3,11 @@ package server import ( "context" "fmt" + "log" "strings" "time" + "github.com/footprintai/containarium/internal/incus" "github.com/footprintai/containarium/internal/pentest" pb "github.com/footprintai/containarium/pkg/pb/containarium/v1" ) @@ -13,17 +15,20 @@ import ( // PentestServer implements the PentestService gRPC service type PentestServer struct { pb.UnimplementedPentestServiceServer - store *pentest.Store - manager *pentest.Manager - installer *pentest.Installer + store *pentest.Store + manager *pentest.Manager + installer *pentest.Installer + incusClient *incus.Client } // NewPentestServer creates a new pentest server func NewPentestServer(store *pentest.Store, manager *pentest.Manager) *PentestServer { + client, _ := incus.New() return &PentestServer{ - store: store, - manager: manager, - installer: pentest.NewInstaller(), + store: store, + manager: manager, + installer: pentest.NewInstaller(), + incusClient: client, } } @@ -217,6 +222,131 @@ func (s *PentestServer) InstallPentestTool(ctx context.Context, req *pb.InstallP }, nil } +// RemediatePentestFinding upgrades the vulnerable package in the affected container. +// The finding's Target field has format "container-name (path/to/binary)". +// We detect which OS package provides the binary and upgrade it. +func (s *PentestServer) RemediatePentestFinding(ctx context.Context, req *pb.RemediatePentestFindingRequest) (*pb.RemediatePentestFindingResponse, error) { + if s.store == nil { + return nil, fmt.Errorf("pentest store not available") + } + if s.incusClient == nil { + return nil, fmt.Errorf("incus client not available") + } + + // Get the finding + finding, err := s.store.GetFindingByID(ctx, req.FindingId) + if err != nil { + return nil, fmt.Errorf("finding not found: %w", err) + } + + if finding.Category != "trivy" { + return nil, fmt.Errorf("remediation only supported for trivy findings, got %q", finding.Category) + } + + // Parse container name and binary path from Target: "container-name (path/to/binary)" + containerName, binaryPath, err := parseTrivyTarget(finding.Target) + if err != nil { + return nil, fmt.Errorf("cannot parse target: %w", err) + } + + log.Printf("[remediate] finding %d: container=%s binary=%s", req.FindingId, containerName, binaryPath) + + // Detect which OS package provides the binary + // Try dpkg first (Debian/Ubuntu), then rpm (RHEL/Rocky) + pkgName := "" + + dpkgOut, _, _ := s.incusClient.ExecWithOutput(containerName, []string{ + "dpkg", "-S", "/" + binaryPath, + }) + if dpkgOut != "" && !strings.Contains(dpkgOut, "no path found") { + // dpkg -S output: "docker-ce-cli: /usr/bin/dockerd" + parts := strings.SplitN(strings.TrimSpace(dpkgOut), ":", 2) + if len(parts) >= 1 { + pkgName = strings.TrimSpace(parts[0]) + } + } + + if pkgName == "" { + // Try rpm + rpmOut, _, _ := s.incusClient.ExecWithOutput(containerName, []string{ + "rpm", "-qf", "/" + binaryPath, + }) + if rpmOut != "" && !strings.Contains(rpmOut, "not owned") { + pkgName = strings.TrimSpace(rpmOut) + // rpm -qf returns "package-version-release.arch", extract name + if idx := strings.LastIndex(pkgName, "-"); idx > 0 { + if idx2 := strings.LastIndex(pkgName[:idx], "-"); idx2 > 0 { + pkgName = pkgName[:idx2] + } + } + } + } + + if pkgName == "" { + return &pb.RemediatePentestFindingResponse{ + Success: false, + Message: fmt.Sprintf("Cannot determine OS package for %s in %s. The binary may not be from a package manager.", binaryPath, containerName), + }, nil + } + + // Get current version before upgrade + oldVerOut, _, _ := s.incusClient.ExecWithOutput(containerName, []string{ + "dpkg-query", "-W", "-f", "${Version}", pkgName, + }) + oldVersion := strings.TrimSpace(oldVerOut) + + log.Printf("[remediate] upgrading %s in %s (current: %s)", pkgName, containerName, oldVersion) + + // Run apt-get update + upgrade + if err := s.incusClient.Exec(containerName, []string{"apt-get", "update"}); err != nil { + return nil, fmt.Errorf("apt-get update failed: %w", err) + } + + upgradeOut, _, upgradeErr := s.incusClient.ExecWithOutput(containerName, []string{ + "apt-get", "install", "--only-upgrade", "-y", pkgName, + }) + if upgradeErr != nil { + return &pb.RemediatePentestFindingResponse{ + Success: false, + Message: fmt.Sprintf("Failed to upgrade %s: %v\n%s", pkgName, upgradeErr, upgradeOut), + }, nil + } + + // Get new version + newVerOut, _, _ := s.incusClient.ExecWithOutput(containerName, []string{ + "dpkg-query", "-W", "-f", "${Version}", pkgName, + }) + newVersion := strings.TrimSpace(newVerOut) + + msg := fmt.Sprintf("Upgraded %s from %s to %s in %s", pkgName, oldVersion, newVersion, containerName) + if oldVersion == newVersion { + msg = fmt.Sprintf("Package %s is already at latest version %s in %s", pkgName, newVersion, containerName) + } + + log.Printf("[remediate] %s", msg) + + return &pb.RemediatePentestFindingResponse{ + Success: true, + Message: msg, + PackageName: pkgName, + OldVersion: oldVersion, + NewVersion: newVersion, + }, nil +} + +// parseTrivyTarget extracts container name and binary path from Trivy target string. +// Format: "container-name (path/to/binary)" +func parseTrivyTarget(target string) (containerName, binaryPath string, err error) { + // Find the last " (" separator + idx := strings.LastIndex(target, " (") + if idx < 0 || !strings.HasSuffix(target, ")") { + return "", "", fmt.Errorf("unexpected target format: %q (expected 'container-name (path)')", target) + } + containerName = target[:idx] + binaryPath = target[idx+2 : len(target)-1] // strip " (" and ")" + return containerName, binaryPath, nil +} + // scanRunToProto converts a store ScanRun to a proto ScanRun func scanRunToProto(run *pentest.ScanRun) *pb.PentestScanRun { pbRun := &pb.PentestScanRun{ diff --git a/internal/stacks/stacks.yaml b/internal/stacks/stacks.yaml index 113eca0..0af4a5e 100644 --- a/internal/stacks/stacks.yaml +++ b/internal/stacks/stacks.yaml @@ -233,3 +233,49 @@ stacks: post_install: - echo 'export PATH=/usr/local/cuda/bin:$PATH' >> ~/.bashrc - echo 'export LD_LIBRARY_PATH=/usr/local/cuda/lib64:$LD_LIBRARY_PATH' >> ~/.bashrc + + - id: android + name: Android Development (Headless) + description: Android SDK, emulator with KVM acceleration, ADB, Gradle for headless builds and testing + icon: android + pre_install: + - "apt-get install -y openjdk-17-jdk-headless unzip wget cpu-checker qemu-kvm" + - "mkdir -p /opt/android-sdk/cmdline-tools" + - "wget -q -O /tmp/cmdline-tools.zip https://dl.google.com/android/repository/commandlinetools-linux-11076708_latest.zip" + - "unzip -q /tmp/cmdline-tools.zip -d /opt/android-sdk/cmdline-tools" + - "mv /opt/android-sdk/cmdline-tools/cmdline-tools /opt/android-sdk/cmdline-tools/latest" + - "rm /tmp/cmdline-tools.zip" + - "yes | /opt/android-sdk/cmdline-tools/latest/bin/sdkmanager --licenses >/dev/null 2>&1 || true" + - "/opt/android-sdk/cmdline-tools/latest/bin/sdkmanager 'platform-tools' 'build-tools;35.0.0' 'platforms;android-35' 'emulator' 'system-images;android-35;google_apis;x86_64'" + packages: [] + post_install: + - echo 'export ANDROID_HOME=/opt/android-sdk' >> ~/.bashrc + - echo 'export ANDROID_SDK_ROOT=/opt/android-sdk' >> ~/.bashrc + - echo 'export PATH=$ANDROID_HOME/cmdline-tools/latest/bin:$ANDROID_HOME/platform-tools:$ANDROID_HOME/emulator:$PATH' >> ~/.bashrc + - /opt/android-sdk/cmdline-tools/latest/bin/avdmanager create avd -n default -k "system-images;android-35;google_apis;x86_64" -d pixel_6 --force + + - id: android-studio + name: Android Development (GUI) + description: Android Studio with VNC access, emulator, full IDE for app development + icon: android + pre_install: + - "apt-get install -y openjdk-17-jdk unzip wget cpu-checker qemu-kvm" + - "apt-get install -y tigervnc-standalone-server dbus-x11 xfce4 xfce4-terminal fonts-noto" + - "mkdir -p /opt/android-sdk/cmdline-tools" + - "wget -q -O /tmp/cmdline-tools.zip https://dl.google.com/android/repository/commandlinetools-linux-11076708_latest.zip" + - "unzip -q /tmp/cmdline-tools.zip -d /opt/android-sdk/cmdline-tools" + - "mv /opt/android-sdk/cmdline-tools/cmdline-tools /opt/android-sdk/cmdline-tools/latest" + - "rm /tmp/cmdline-tools.zip" + - "wget -q -O /tmp/android-studio.tar.gz https://redirector.gvt1.com/edgedl/android/studio/ide-zips/2024.3.2.16/android-studio-2024.3.2.16-linux.tar.gz" + - "tar xzf /tmp/android-studio.tar.gz -C /opt/" + - "rm /tmp/android-studio.tar.gz" + - "yes | /opt/android-sdk/cmdline-tools/latest/bin/sdkmanager --licenses >/dev/null 2>&1 || true" + - "/opt/android-sdk/cmdline-tools/latest/bin/sdkmanager 'platform-tools' 'build-tools;35.0.0' 'platforms;android-35' 'emulator' 'system-images;android-35;google_apis;x86_64'" + packages: [] + post_install: + - echo 'export ANDROID_HOME=/opt/android-sdk' >> ~/.bashrc + - echo 'export ANDROID_SDK_ROOT=/opt/android-sdk' >> ~/.bashrc + - echo 'export PATH=$ANDROID_HOME/cmdline-tools/latest/bin:$ANDROID_HOME/platform-tools:$ANDROID_HOME/emulator:/opt/android-studio/bin:$PATH' >> ~/.bashrc + - /opt/android-sdk/cmdline-tools/latest/bin/avdmanager create avd -n default -k "system-images;android-35;google_apis;x86_64" -d pixel_6 --force + - mkdir -p ~/.vnc && echo "containarium" | vncpasswd -f > ~/.vnc/passwd && chmod 600 ~/.vnc/passwd + - printf '#!/bin/bash\nstartxfce4 &\n' > ~/.vnc/xstartup && chmod +x ~/.vnc/xstartup diff --git a/pkg/pb/containarium/v1/container.pb.go b/pkg/pb/containarium/v1/container.pb.go index 91c2e1b..d8d2fd3 100644 --- a/pkg/pb/containarium/v1/container.pb.go +++ b/pkg/pb/containarium/v1/container.pb.go @@ -34,6 +34,8 @@ const ( OSType_OS_TYPE_ROCKY_9 OSType = 2 // Red Hat Enterprise Linux 9 (for production, requires subscription) OSType_OS_TYPE_RHEL_9 OSType = 3 + // Windows Server 2022 (runs as QEMU/KVM VM, RDP access) + OSType_OS_TYPE_WINDOWS_2022 OSType = 4 ) // Enum value maps for OSType. @@ -43,12 +45,14 @@ var ( 1: "OS_TYPE_UBUNTU_2404", 2: "OS_TYPE_ROCKY_9", 3: "OS_TYPE_RHEL_9", + 4: "OS_TYPE_WINDOWS_2022", } OSType_value = map[string]int32{ - "OS_TYPE_UNSPECIFIED": 0, - "OS_TYPE_UBUNTU_2404": 1, - "OS_TYPE_ROCKY_9": 2, - "OS_TYPE_RHEL_9": 3, + "OS_TYPE_UNSPECIFIED": 0, + "OS_TYPE_UBUNTU_2404": 1, + "OS_TYPE_ROCKY_9": 2, + "OS_TYPE_RHEL_9": 3, + "OS_TYPE_WINDOWS_2022": 4, } ) @@ -79,6 +83,55 @@ func (OSType) EnumDescriptor() ([]byte, []int) { return file_containarium_v1_container_proto_rawDescGZIP(), []int{0} } +// AccessType indicates how to connect to an instance +type AccessType int32 + +const ( + // SSH access (default for Linux containers) + AccessType_ACCESS_TYPE_SSH AccessType = 0 + // RDP access (for Windows VMs, via Guacamole browser-based client) + AccessType_ACCESS_TYPE_RDP AccessType = 1 +) + +// Enum value maps for AccessType. +var ( + AccessType_name = map[int32]string{ + 0: "ACCESS_TYPE_SSH", + 1: "ACCESS_TYPE_RDP", + } + AccessType_value = map[string]int32{ + "ACCESS_TYPE_SSH": 0, + "ACCESS_TYPE_RDP": 1, + } +) + +func (x AccessType) Enum() *AccessType { + p := new(AccessType) + *p = x + return p +} + +func (x AccessType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (AccessType) Descriptor() protoreflect.EnumDescriptor { + return file_containarium_v1_container_proto_enumTypes[1].Descriptor() +} + +func (AccessType) Type() protoreflect.EnumType { + return &file_containarium_v1_container_proto_enumTypes[1] +} + +func (x AccessType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use AccessType.Descriptor instead. +func (AccessType) EnumDescriptor() ([]byte, []int) { + return file_containarium_v1_container_proto_rawDescGZIP(), []int{1} +} + // ContainerState represents the current state of a container type ContainerState int32 @@ -132,11 +185,11 @@ func (x ContainerState) String() string { } func (ContainerState) Descriptor() protoreflect.EnumDescriptor { - return file_containarium_v1_container_proto_enumTypes[1].Descriptor() + return file_containarium_v1_container_proto_enumTypes[2].Descriptor() } func (ContainerState) Type() protoreflect.EnumType { - return &file_containarium_v1_container_proto_enumTypes[1] + return &file_containarium_v1_container_proto_enumTypes[2] } func (x ContainerState) Number() protoreflect.EnumNumber { @@ -145,7 +198,7 @@ func (x ContainerState) Number() protoreflect.EnumNumber { // Deprecated: Use ContainerState.Descriptor instead. func (ContainerState) EnumDescriptor() ([]byte, []int) { - return file_containarium_v1_container_proto_rawDescGZIP(), []int{1} + return file_containarium_v1_container_proto_rawDescGZIP(), []int{2} } // ResourceLimits defines resource constraints for a container @@ -326,7 +379,11 @@ type Container struct { // Backend ID this container runs on (e.g., "gcp-spot", "tunnel-fts-5900x-gpu") BackendId string `protobuf:"bytes,14,opt,name=backend_id,json=backendId,proto3" json:"backend_id,omitempty"` // Operating system type of the container - OsType OSType `protobuf:"varint,15,opt,name=os_type,json=osType,proto3,enum=containarium.v1.OSType" json:"os_type,omitempty"` + OsType OSType `protobuf:"varint,15,opt,name=os_type,json=osType,proto3,enum=containarium.v1.OSType" json:"os_type,omitempty"` + // How to connect to this instance (SSH for Linux, RDP for Windows) + AccessType AccessType `protobuf:"varint,16,opt,name=access_type,json=accessType,proto3,enum=containarium.v1.AccessType" json:"access_type,omitempty"` + // RDP connection address (e.g., "10.100.0.50:3389") โ€” populated for Windows VMs + RdpAddress string `protobuf:"bytes,17,opt,name=rdp_address,json=rdpAddress,proto3" json:"rdp_address,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -466,6 +523,20 @@ func (x *Container) GetOsType() OSType { return OSType_OS_TYPE_UNSPECIFIED } +func (x *Container) GetAccessType() AccessType { + if x != nil { + return x.AccessType + } + return AccessType_ACCESS_TYPE_SSH +} + +func (x *Container) GetRdpAddress() string { + if x != nil { + return x.RdpAddress + } + return "" +} + // ContainerMetrics contains runtime metrics for a container type ContainerMetrics struct { state protoimpl.MessageState `protogen:"open.v1"` @@ -731,8 +802,10 @@ type CreateContainerResponse struct { Container *Container `protobuf:"bytes,1,opt,name=container,proto3" json:"container,omitempty"` // Human-readable message about the creation Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` - // SSH connection string for the user - SshCommand string `protobuf:"bytes,3,opt,name=ssh_command,json=sshCommand,proto3" json:"ssh_command,omitempty"` + // SSH connection string for the user (Linux containers) + SshCommand string `protobuf:"bytes,3,opt,name=ssh_command,json=sshCommand,proto3" json:"ssh_command,omitempty"` + // RDP connection address (Windows VMs) + RdpAddress string `protobuf:"bytes,4,opt,name=rdp_address,json=rdpAddress,proto3" json:"rdp_address,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -788,6 +861,13 @@ func (x *CreateContainerResponse) GetSshCommand() string { return "" } +func (x *CreateContainerResponse) GetRdpAddress() string { + if x != nil { + return x.RdpAddress + } + return "" +} + // ListContainersRequest is the request to list containers type ListContainersRequest struct { state protoimpl.MessageState `protogen:"open.v1"` @@ -2608,7 +2688,7 @@ const file_containarium_v1_container_proto_rawDesc = "" + "\vmac_address\x18\x02 \x01(\tR\n" + "macAddress\x12\x1c\n" + "\tinterface\x18\x03 \x01(\tR\tinterface\x12\x16\n" + - "\x06bridge\x18\x04 \x01(\tR\x06bridge\"\x80\x05\n" + + "\x06bridge\x18\x04 \x01(\tR\x06bridge\"\xdf\x05\n" + "\tContainer\x12\x12\n" + "\x04name\x18\x01 \x01(\tR\x04name\x12\x1a\n" + "\busername\x18\x02 \x01(\tR\busername\x125\n" + @@ -2629,7 +2709,11 @@ const file_containarium_v1_container_proto_rawDesc = "" + "gpu_device\x18\r \x01(\tR\tgpuDevice\x12\x1d\n" + "\n" + "backend_id\x18\x0e \x01(\tR\tbackendId\x120\n" + - "\aos_type\x18\x0f \x01(\x0e2\x17.containarium.v1.OSTypeR\x06osType\x1a9\n" + + "\aos_type\x18\x0f \x01(\x0e2\x17.containarium.v1.OSTypeR\x06osType\x12<\n" + + "\vaccess_type\x18\x10 \x01(\x0e2\x1b.containarium.v1.AccessTypeR\n" + + "accessType\x12\x1f\n" + + "\vrdp_address\x18\x11 \x01(\tR\n" + + "rdpAddress\x1a9\n" + "\vLabelsEntry\x12\x10\n" + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"\xcf\x02\n" + @@ -2659,12 +2743,14 @@ const file_containarium_v1_container_proto_rawDesc = "" + "\aos_type\x18\f \x01(\x0e2\x17.containarium.v1.OSTypeR\x06osType\x1a9\n" + "\vLabelsEntry\x12\x10\n" + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + - "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"\x8e\x01\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"\xaf\x01\n" + "\x17CreateContainerResponse\x128\n" + "\tcontainer\x18\x01 \x01(\v2\x1a.containarium.v1.ContainerR\tcontainer\x12\x18\n" + "\amessage\x18\x02 \x01(\tR\amessage\x12\x1f\n" + "\vssh_command\x18\x03 \x01(\tR\n" + - "sshCommand\"\x86\x02\n" + + "sshCommand\x12\x1f\n" + + "\vrdp_address\x18\x04 \x01(\tR\n" + + "rdpAddress\"\x86\x02\n" + "\x15ListContainersRequest\x125\n" + "\x05state\x18\x01 \x01(\x0e2\x1f.containarium.v1.ContainerStateR\x05state\x12\x1a\n" + "\busername\x18\x02 \x01(\tR\busername\x12Z\n" + @@ -2781,12 +2867,17 @@ const file_containarium_v1_container_proto_rawDesc = "" + "\aenabled\x18\x01 \x01(\bR\aenabled\x12\x1f\n" + "\vgrafana_url\x18\x02 \x01(\tR\n" + "grafanaUrl\x120\n" + - "\x14victoria_metrics_url\x18\x03 \x01(\tR\x12victoriaMetricsUrl*c\n" + + "\x14victoria_metrics_url\x18\x03 \x01(\tR\x12victoriaMetricsUrl*}\n" + "\x06OSType\x12\x17\n" + "\x13OS_TYPE_UNSPECIFIED\x10\x00\x12\x17\n" + "\x13OS_TYPE_UBUNTU_2404\x10\x01\x12\x13\n" + "\x0fOS_TYPE_ROCKY_9\x10\x02\x12\x12\n" + - "\x0eOS_TYPE_RHEL_9\x10\x03*\xc0\x02\n" + + "\x0eOS_TYPE_RHEL_9\x10\x03\x12\x18\n" + + "\x14OS_TYPE_WINDOWS_2022\x10\x04*6\n" + + "\n" + + "AccessType\x12\x13\n" + + "\x0fACCESS_TYPE_SSH\x10\x00\x12\x13\n" + + "\x0fACCESS_TYPE_RDP\x10\x01*\xc0\x02\n" + "\x0eContainerState\x12,\n" + "\x1bCONTAINER_STATE_UNSPECIFIED\x10\x00\x1a\v\x8a\xb5\x18\aUnknown\x12(\n" + "\x17CONTAINER_STATE_RUNNING\x10\x01\x1a\v\x8a\xb5\x18\aRunning\x12(\n" + @@ -2811,82 +2902,84 @@ func file_containarium_v1_container_proto_rawDescGZIP() []byte { return file_containarium_v1_container_proto_rawDescData } -var file_containarium_v1_container_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_containarium_v1_container_proto_enumTypes = make([]protoimpl.EnumInfo, 3) var file_containarium_v1_container_proto_msgTypes = make([]protoimpl.MessageInfo, 40) var file_containarium_v1_container_proto_goTypes = []any{ (OSType)(0), // 0: containarium.v1.OSType - (ContainerState)(0), // 1: containarium.v1.ContainerState - (*ResourceLimits)(nil), // 2: containarium.v1.ResourceLimits - (*NetworkInfo)(nil), // 3: containarium.v1.NetworkInfo - (*Container)(nil), // 4: containarium.v1.Container - (*ContainerMetrics)(nil), // 5: containarium.v1.ContainerMetrics - (*CreateContainerRequest)(nil), // 6: containarium.v1.CreateContainerRequest - (*CreateContainerResponse)(nil), // 7: containarium.v1.CreateContainerResponse - (*ListContainersRequest)(nil), // 8: containarium.v1.ListContainersRequest - (*ListContainersResponse)(nil), // 9: containarium.v1.ListContainersResponse - (*GetContainerRequest)(nil), // 10: containarium.v1.GetContainerRequest - (*GetContainerResponse)(nil), // 11: containarium.v1.GetContainerResponse - (*DeleteContainerRequest)(nil), // 12: containarium.v1.DeleteContainerRequest - (*DeleteContainerResponse)(nil), // 13: containarium.v1.DeleteContainerResponse - (*StartContainerRequest)(nil), // 14: containarium.v1.StartContainerRequest - (*StartContainerResponse)(nil), // 15: containarium.v1.StartContainerResponse - (*StopContainerRequest)(nil), // 16: containarium.v1.StopContainerRequest - (*StopContainerResponse)(nil), // 17: containarium.v1.StopContainerResponse - (*AddSSHKeyRequest)(nil), // 18: containarium.v1.AddSSHKeyRequest - (*AddSSHKeyResponse)(nil), // 19: containarium.v1.AddSSHKeyResponse - (*RemoveSSHKeyRequest)(nil), // 20: containarium.v1.RemoveSSHKeyRequest - (*RemoveSSHKeyResponse)(nil), // 21: containarium.v1.RemoveSSHKeyResponse - (*GetMetricsRequest)(nil), // 22: containarium.v1.GetMetricsRequest - (*GetMetricsResponse)(nil), // 23: containarium.v1.GetMetricsResponse - (*ResizeContainerRequest)(nil), // 24: containarium.v1.ResizeContainerRequest - (*ResizeContainerResponse)(nil), // 25: containarium.v1.ResizeContainerResponse - (*Collaborator)(nil), // 26: containarium.v1.Collaborator - (*AddCollaboratorRequest)(nil), // 27: containarium.v1.AddCollaboratorRequest - (*AddCollaboratorResponse)(nil), // 28: containarium.v1.AddCollaboratorResponse - (*RemoveCollaboratorRequest)(nil), // 29: containarium.v1.RemoveCollaboratorRequest - (*RemoveCollaboratorResponse)(nil), // 30: containarium.v1.RemoveCollaboratorResponse - (*ListCollaboratorsRequest)(nil), // 31: containarium.v1.ListCollaboratorsRequest - (*ListCollaboratorsResponse)(nil), // 32: containarium.v1.ListCollaboratorsResponse - (*CleanupDiskRequest)(nil), // 33: containarium.v1.CleanupDiskRequest - (*CleanupDiskResponse)(nil), // 34: containarium.v1.CleanupDiskResponse - (*InstallStackRequest)(nil), // 35: containarium.v1.InstallStackRequest - (*InstallStackResponse)(nil), // 36: containarium.v1.InstallStackResponse - (*GetMonitoringInfoRequest)(nil), // 37: containarium.v1.GetMonitoringInfoRequest - (*GetMonitoringInfoResponse)(nil), // 38: containarium.v1.GetMonitoringInfoResponse - nil, // 39: containarium.v1.Container.LabelsEntry - nil, // 40: containarium.v1.CreateContainerRequest.LabelsEntry - nil, // 41: containarium.v1.ListContainersRequest.LabelFilterEntry - (*descriptorpb.EnumValueOptions)(nil), // 42: google.protobuf.EnumValueOptions + (AccessType)(0), // 1: containarium.v1.AccessType + (ContainerState)(0), // 2: containarium.v1.ContainerState + (*ResourceLimits)(nil), // 3: containarium.v1.ResourceLimits + (*NetworkInfo)(nil), // 4: containarium.v1.NetworkInfo + (*Container)(nil), // 5: containarium.v1.Container + (*ContainerMetrics)(nil), // 6: containarium.v1.ContainerMetrics + (*CreateContainerRequest)(nil), // 7: containarium.v1.CreateContainerRequest + (*CreateContainerResponse)(nil), // 8: containarium.v1.CreateContainerResponse + (*ListContainersRequest)(nil), // 9: containarium.v1.ListContainersRequest + (*ListContainersResponse)(nil), // 10: containarium.v1.ListContainersResponse + (*GetContainerRequest)(nil), // 11: containarium.v1.GetContainerRequest + (*GetContainerResponse)(nil), // 12: containarium.v1.GetContainerResponse + (*DeleteContainerRequest)(nil), // 13: containarium.v1.DeleteContainerRequest + (*DeleteContainerResponse)(nil), // 14: containarium.v1.DeleteContainerResponse + (*StartContainerRequest)(nil), // 15: containarium.v1.StartContainerRequest + (*StartContainerResponse)(nil), // 16: containarium.v1.StartContainerResponse + (*StopContainerRequest)(nil), // 17: containarium.v1.StopContainerRequest + (*StopContainerResponse)(nil), // 18: containarium.v1.StopContainerResponse + (*AddSSHKeyRequest)(nil), // 19: containarium.v1.AddSSHKeyRequest + (*AddSSHKeyResponse)(nil), // 20: containarium.v1.AddSSHKeyResponse + (*RemoveSSHKeyRequest)(nil), // 21: containarium.v1.RemoveSSHKeyRequest + (*RemoveSSHKeyResponse)(nil), // 22: containarium.v1.RemoveSSHKeyResponse + (*GetMetricsRequest)(nil), // 23: containarium.v1.GetMetricsRequest + (*GetMetricsResponse)(nil), // 24: containarium.v1.GetMetricsResponse + (*ResizeContainerRequest)(nil), // 25: containarium.v1.ResizeContainerRequest + (*ResizeContainerResponse)(nil), // 26: containarium.v1.ResizeContainerResponse + (*Collaborator)(nil), // 27: containarium.v1.Collaborator + (*AddCollaboratorRequest)(nil), // 28: containarium.v1.AddCollaboratorRequest + (*AddCollaboratorResponse)(nil), // 29: containarium.v1.AddCollaboratorResponse + (*RemoveCollaboratorRequest)(nil), // 30: containarium.v1.RemoveCollaboratorRequest + (*RemoveCollaboratorResponse)(nil), // 31: containarium.v1.RemoveCollaboratorResponse + (*ListCollaboratorsRequest)(nil), // 32: containarium.v1.ListCollaboratorsRequest + (*ListCollaboratorsResponse)(nil), // 33: containarium.v1.ListCollaboratorsResponse + (*CleanupDiskRequest)(nil), // 34: containarium.v1.CleanupDiskRequest + (*CleanupDiskResponse)(nil), // 35: containarium.v1.CleanupDiskResponse + (*InstallStackRequest)(nil), // 36: containarium.v1.InstallStackRequest + (*InstallStackResponse)(nil), // 37: containarium.v1.InstallStackResponse + (*GetMonitoringInfoRequest)(nil), // 38: containarium.v1.GetMonitoringInfoRequest + (*GetMonitoringInfoResponse)(nil), // 39: containarium.v1.GetMonitoringInfoResponse + nil, // 40: containarium.v1.Container.LabelsEntry + nil, // 41: containarium.v1.CreateContainerRequest.LabelsEntry + nil, // 42: containarium.v1.ListContainersRequest.LabelFilterEntry + (*descriptorpb.EnumValueOptions)(nil), // 43: google.protobuf.EnumValueOptions } var file_containarium_v1_container_proto_depIdxs = []int32{ - 1, // 0: containarium.v1.Container.state:type_name -> containarium.v1.ContainerState - 2, // 1: containarium.v1.Container.resources:type_name -> containarium.v1.ResourceLimits - 3, // 2: containarium.v1.Container.network:type_name -> containarium.v1.NetworkInfo - 39, // 3: containarium.v1.Container.labels:type_name -> containarium.v1.Container.LabelsEntry + 2, // 0: containarium.v1.Container.state:type_name -> containarium.v1.ContainerState + 3, // 1: containarium.v1.Container.resources:type_name -> containarium.v1.ResourceLimits + 4, // 2: containarium.v1.Container.network:type_name -> containarium.v1.NetworkInfo + 40, // 3: containarium.v1.Container.labels:type_name -> containarium.v1.Container.LabelsEntry 0, // 4: containarium.v1.Container.os_type:type_name -> containarium.v1.OSType - 2, // 5: containarium.v1.CreateContainerRequest.resources:type_name -> containarium.v1.ResourceLimits - 40, // 6: containarium.v1.CreateContainerRequest.labels:type_name -> containarium.v1.CreateContainerRequest.LabelsEntry - 0, // 7: containarium.v1.CreateContainerRequest.os_type:type_name -> containarium.v1.OSType - 4, // 8: containarium.v1.CreateContainerResponse.container:type_name -> containarium.v1.Container - 1, // 9: containarium.v1.ListContainersRequest.state:type_name -> containarium.v1.ContainerState - 41, // 10: containarium.v1.ListContainersRequest.label_filter:type_name -> containarium.v1.ListContainersRequest.LabelFilterEntry - 4, // 11: containarium.v1.ListContainersResponse.containers:type_name -> containarium.v1.Container - 4, // 12: containarium.v1.GetContainerResponse.container:type_name -> containarium.v1.Container - 5, // 13: containarium.v1.GetContainerResponse.metrics:type_name -> containarium.v1.ContainerMetrics - 4, // 14: containarium.v1.StartContainerResponse.container:type_name -> containarium.v1.Container - 4, // 15: containarium.v1.StopContainerResponse.container:type_name -> containarium.v1.Container - 5, // 16: containarium.v1.GetMetricsResponse.metrics:type_name -> containarium.v1.ContainerMetrics - 4, // 17: containarium.v1.ResizeContainerResponse.container:type_name -> containarium.v1.Container - 26, // 18: containarium.v1.AddCollaboratorResponse.collaborator:type_name -> containarium.v1.Collaborator - 26, // 19: containarium.v1.ListCollaboratorsResponse.collaborators:type_name -> containarium.v1.Collaborator - 4, // 20: containarium.v1.CleanupDiskResponse.container:type_name -> containarium.v1.Container - 4, // 21: containarium.v1.InstallStackResponse.container:type_name -> containarium.v1.Container - 42, // 22: containarium.v1.state_name:extendee -> google.protobuf.EnumValueOptions - 23, // [23:23] is the sub-list for method output_type - 23, // [23:23] is the sub-list for method input_type - 23, // [23:23] is the sub-list for extension type_name - 22, // [22:23] is the sub-list for extension extendee - 0, // [0:22] is the sub-list for field type_name + 1, // 5: containarium.v1.Container.access_type:type_name -> containarium.v1.AccessType + 3, // 6: containarium.v1.CreateContainerRequest.resources:type_name -> containarium.v1.ResourceLimits + 41, // 7: containarium.v1.CreateContainerRequest.labels:type_name -> containarium.v1.CreateContainerRequest.LabelsEntry + 0, // 8: containarium.v1.CreateContainerRequest.os_type:type_name -> containarium.v1.OSType + 5, // 9: containarium.v1.CreateContainerResponse.container:type_name -> containarium.v1.Container + 2, // 10: containarium.v1.ListContainersRequest.state:type_name -> containarium.v1.ContainerState + 42, // 11: containarium.v1.ListContainersRequest.label_filter:type_name -> containarium.v1.ListContainersRequest.LabelFilterEntry + 5, // 12: containarium.v1.ListContainersResponse.containers:type_name -> containarium.v1.Container + 5, // 13: containarium.v1.GetContainerResponse.container:type_name -> containarium.v1.Container + 6, // 14: containarium.v1.GetContainerResponse.metrics:type_name -> containarium.v1.ContainerMetrics + 5, // 15: containarium.v1.StartContainerResponse.container:type_name -> containarium.v1.Container + 5, // 16: containarium.v1.StopContainerResponse.container:type_name -> containarium.v1.Container + 6, // 17: containarium.v1.GetMetricsResponse.metrics:type_name -> containarium.v1.ContainerMetrics + 5, // 18: containarium.v1.ResizeContainerResponse.container:type_name -> containarium.v1.Container + 27, // 19: containarium.v1.AddCollaboratorResponse.collaborator:type_name -> containarium.v1.Collaborator + 27, // 20: containarium.v1.ListCollaboratorsResponse.collaborators:type_name -> containarium.v1.Collaborator + 5, // 21: containarium.v1.CleanupDiskResponse.container:type_name -> containarium.v1.Container + 5, // 22: containarium.v1.InstallStackResponse.container:type_name -> containarium.v1.Container + 43, // 23: containarium.v1.state_name:extendee -> google.protobuf.EnumValueOptions + 24, // [24:24] is the sub-list for method output_type + 24, // [24:24] is the sub-list for method input_type + 24, // [24:24] is the sub-list for extension type_name + 23, // [23:24] is the sub-list for extension extendee + 0, // [0:23] is the sub-list for field type_name } func init() { file_containarium_v1_container_proto_init() } @@ -2899,7 +2992,7 @@ func file_containarium_v1_container_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_containarium_v1_container_proto_rawDesc), len(file_containarium_v1_container_proto_rawDesc)), - NumEnums: 2, + NumEnums: 3, NumMessages: 40, NumExtensions: 1, NumServices: 0, diff --git a/pkg/pb/containarium/v1/pentest.pb.go b/pkg/pb/containarium/v1/pentest.pb.go index 798a494..bc6a67f 100644 --- a/pkg/pb/containarium/v1/pentest.pb.go +++ b/pkg/pb/containarium/v1/pentest.pb.go @@ -1390,6 +1390,134 @@ func (x *InstallPentestToolResponse) GetMessage() string { return "" } +// RemediatePentestFindingRequest requests remediation of a Trivy finding by +// upgrading the OS package that contains the vulnerable binary. +type RemediatePentestFindingRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Finding ID to remediate + FindingId int64 `protobuf:"varint,1,opt,name=finding_id,json=findingId,proto3" json:"finding_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RemediatePentestFindingRequest) Reset() { + *x = RemediatePentestFindingRequest{} + mi := &file_containarium_v1_pentest_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RemediatePentestFindingRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RemediatePentestFindingRequest) ProtoMessage() {} + +func (x *RemediatePentestFindingRequest) ProtoReflect() protoreflect.Message { + mi := &file_containarium_v1_pentest_proto_msgTypes[20] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RemediatePentestFindingRequest.ProtoReflect.Descriptor instead. +func (*RemediatePentestFindingRequest) Descriptor() ([]byte, []int) { + return file_containarium_v1_pentest_proto_rawDescGZIP(), []int{20} +} + +func (x *RemediatePentestFindingRequest) GetFindingId() int64 { + if x != nil { + return x.FindingId + } + return 0 +} + +// RemediatePentestFindingResponse returns the result of the remediation attempt. +type RemediatePentestFindingResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Success bool `protobuf:"varint,1,opt,name=success,proto3" json:"success,omitempty"` + // Human-readable result (e.g., "docker-ce upgraded from 27.0.1 to 27.0.3") + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + // Package that was upgraded + PackageName string `protobuf:"bytes,3,opt,name=package_name,json=packageName,proto3" json:"package_name,omitempty"` + // Version before upgrade + OldVersion string `protobuf:"bytes,4,opt,name=old_version,json=oldVersion,proto3" json:"old_version,omitempty"` + // Version after upgrade + NewVersion string `protobuf:"bytes,5,opt,name=new_version,json=newVersion,proto3" json:"new_version,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RemediatePentestFindingResponse) Reset() { + *x = RemediatePentestFindingResponse{} + mi := &file_containarium_v1_pentest_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RemediatePentestFindingResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RemediatePentestFindingResponse) ProtoMessage() {} + +func (x *RemediatePentestFindingResponse) ProtoReflect() protoreflect.Message { + mi := &file_containarium_v1_pentest_proto_msgTypes[21] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RemediatePentestFindingResponse.ProtoReflect.Descriptor instead. +func (*RemediatePentestFindingResponse) Descriptor() ([]byte, []int) { + return file_containarium_v1_pentest_proto_rawDescGZIP(), []int{21} +} + +func (x *RemediatePentestFindingResponse) GetSuccess() bool { + if x != nil { + return x.Success + } + return false +} + +func (x *RemediatePentestFindingResponse) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +func (x *RemediatePentestFindingResponse) GetPackageName() string { + if x != nil { + return x.PackageName + } + return "" +} + +func (x *RemediatePentestFindingResponse) GetOldVersion() string { + if x != nil { + return x.OldVersion + } + return "" +} + +func (x *RemediatePentestFindingResponse) GetNewVersion() string { + if x != nil { + return x.NewVersion + } + return "" +} + var File_containarium_v1_pentest_proto protoreflect.FileDescriptor const file_containarium_v1_pentest_proto_rawDesc = "" + @@ -1509,7 +1637,18 @@ const file_containarium_v1_pentest_proto_rawDesc = "" + "\ttool_name\x18\x01 \x01(\tR\btoolName\"P\n" + "\x1aInstallPentestToolResponse\x12\x18\n" + "\asuccess\x18\x01 \x01(\bR\asuccess\x12\x18\n" + - "\amessage\x18\x02 \x01(\tR\amessage2\x87\x12\n" + + "\amessage\x18\x02 \x01(\tR\amessage\"?\n" + + "\x1eRemediatePentestFindingRequest\x12\x1d\n" + + "\n" + + "finding_id\x18\x01 \x01(\x03R\tfindingId\"\xba\x01\n" + + "\x1fRemediatePentestFindingResponse\x12\x18\n" + + "\asuccess\x18\x01 \x01(\bR\asuccess\x12\x18\n" + + "\amessage\x18\x02 \x01(\tR\amessage\x12!\n" + + "\fpackage_name\x18\x03 \x01(\tR\vpackageName\x12\x1f\n" + + "\vold_version\x18\x04 \x01(\tR\n" + + "oldVersion\x12\x1f\n" + + "\vnew_version\x18\x05 \x01(\tR\n" + + "newVersion2\x98\x15\n" + "\x0ePentestService\x12\x94\x02\n" + "\x12TriggerPentestScan\x12*.containarium.v1.TriggerPentestScanRequest\x1a+.containarium.v1.TriggerPentestScanResponse\"\xa4\x01\x92A\x85\x01\n" + "\aPentest\x12\x1dTrigger penetration test scan\x1a[Triggers an on-demand penetration test scan across all registered endpoints and containers.\x82\xd3\xe4\x93\x02\x15:\x01*\"\x10/v1/pentest/scan\x12\x81\x02\n" + @@ -1526,7 +1665,9 @@ const file_containarium_v1_pentest_proto_rawDesc = "" + "\x10GetPentestConfig\x12(.containarium.v1.GetPentestConfigRequest\x1a).containarium.v1.GetPentestConfigResponse\"\xb0\x01\x92A\x92\x01\n" + "\aPentest\x12\x19Get pentest configuration\x1alReturns the current penetration test configuration including enabled modules and external tool availability.\x82\xd3\xe4\x93\x02\x14\x12\x12/v1/pentest/config\x12\x8f\x02\n" + "\x12InstallPentestTool\x12*.containarium.v1.InstallPentestToolRequest\x1a+.containarium.v1.InstallPentestToolResponse\"\x9f\x01\x92Ax\n" + - "\aPentest\x12\x14Install pentest tool\x1aWDownloads and installs an external pentest tool (nuclei or trivy) from GitHub releases.\x82\xd3\xe4\x93\x02\x1e:\x01*\"\x19/v1/pentest/tools/installBKZIgithub.com/footprintai/containarium/pkg/pb/containarium/v1;containariumv1b\x06proto3" + "\aPentest\x12\x14Install pentest tool\x1aWDownloads and installs an external pentest tool (nuclei or trivy) from GitHub releases.\x82\xd3\xe4\x93\x02\x1e:\x01*\"\x19/v1/pentest/tools/install\x12\x8e\x03\n" + + "\x17RemediatePentestFinding\x12/.containarium.v1.RemediatePentestFindingRequest\x1a0.containarium.v1.RemediatePentestFindingResponse\"\x8f\x02\x92A\xd5\x01\n" + + "\aPentest\x12\x1bRemediate a pentest finding\x1a\xac\x01Upgrades the OS package that contains the vulnerable binary to the latest version. Only works for Trivy container findings where the binary belongs to an installed package.\x82\xd3\xe4\x93\x020:\x01*\"+/v1/pentest/findings/{finding_id}/remediateBKZIgithub.com/footprintai/containarium/pkg/pb/containarium/v1;containariumv1b\x06proto3" var ( file_containarium_v1_pentest_proto_rawDescOnce sync.Once @@ -1540,7 +1681,7 @@ func file_containarium_v1_pentest_proto_rawDescGZIP() []byte { return file_containarium_v1_pentest_proto_rawDescData } -var file_containarium_v1_pentest_proto_msgTypes = make([]protoimpl.MessageInfo, 21) +var file_containarium_v1_pentest_proto_msgTypes = make([]protoimpl.MessageInfo, 23) var file_containarium_v1_pentest_proto_goTypes = []any{ (*PentestScanRun)(nil), // 0: containarium.v1.PentestScanRun (*PentestFinding)(nil), // 1: containarium.v1.PentestFinding @@ -1562,10 +1703,12 @@ var file_containarium_v1_pentest_proto_goTypes = []any{ (*GetPentestConfigResponse)(nil), // 17: containarium.v1.GetPentestConfigResponse (*InstallPentestToolRequest)(nil), // 18: containarium.v1.InstallPentestToolRequest (*InstallPentestToolResponse)(nil), // 19: containarium.v1.InstallPentestToolResponse - nil, // 20: containarium.v1.PentestFindingSummary.ByCategoryEntry + (*RemediatePentestFindingRequest)(nil), // 20: containarium.v1.RemediatePentestFindingRequest + (*RemediatePentestFindingResponse)(nil), // 21: containarium.v1.RemediatePentestFindingResponse + nil, // 22: containarium.v1.PentestFindingSummary.ByCategoryEntry } var file_containarium_v1_pentest_proto_depIdxs = []int32{ - 20, // 0: containarium.v1.PentestFindingSummary.by_category:type_name -> containarium.v1.PentestFindingSummary.ByCategoryEntry + 22, // 0: containarium.v1.PentestFindingSummary.by_category:type_name -> containarium.v1.PentestFindingSummary.ByCategoryEntry 0, // 1: containarium.v1.ListPentestScanRunsResponse.scan_runs:type_name -> containarium.v1.PentestScanRun 0, // 2: containarium.v1.GetPentestScanRunResponse.scan_run:type_name -> containarium.v1.PentestScanRun 1, // 3: containarium.v1.ListPentestFindingsResponse.findings:type_name -> containarium.v1.PentestFinding @@ -1579,16 +1722,18 @@ var file_containarium_v1_pentest_proto_depIdxs = []int32{ 14, // 11: containarium.v1.PentestService.SuppressPentestFinding:input_type -> containarium.v1.SuppressPentestFindingRequest 16, // 12: containarium.v1.PentestService.GetPentestConfig:input_type -> containarium.v1.GetPentestConfigRequest 18, // 13: containarium.v1.PentestService.InstallPentestTool:input_type -> containarium.v1.InstallPentestToolRequest - 5, // 14: containarium.v1.PentestService.TriggerPentestScan:output_type -> containarium.v1.TriggerPentestScanResponse - 7, // 15: containarium.v1.PentestService.ListPentestScanRuns:output_type -> containarium.v1.ListPentestScanRunsResponse - 9, // 16: containarium.v1.PentestService.GetPentestScanRun:output_type -> containarium.v1.GetPentestScanRunResponse - 11, // 17: containarium.v1.PentestService.ListPentestFindings:output_type -> containarium.v1.ListPentestFindingsResponse - 13, // 18: containarium.v1.PentestService.GetPentestFindingSummary:output_type -> containarium.v1.GetPentestFindingSummaryResponse - 15, // 19: containarium.v1.PentestService.SuppressPentestFinding:output_type -> containarium.v1.SuppressPentestFindingResponse - 17, // 20: containarium.v1.PentestService.GetPentestConfig:output_type -> containarium.v1.GetPentestConfigResponse - 19, // 21: containarium.v1.PentestService.InstallPentestTool:output_type -> containarium.v1.InstallPentestToolResponse - 14, // [14:22] is the sub-list for method output_type - 6, // [6:14] is the sub-list for method input_type + 20, // 14: containarium.v1.PentestService.RemediatePentestFinding:input_type -> containarium.v1.RemediatePentestFindingRequest + 5, // 15: containarium.v1.PentestService.TriggerPentestScan:output_type -> containarium.v1.TriggerPentestScanResponse + 7, // 16: containarium.v1.PentestService.ListPentestScanRuns:output_type -> containarium.v1.ListPentestScanRunsResponse + 9, // 17: containarium.v1.PentestService.GetPentestScanRun:output_type -> containarium.v1.GetPentestScanRunResponse + 11, // 18: containarium.v1.PentestService.ListPentestFindings:output_type -> containarium.v1.ListPentestFindingsResponse + 13, // 19: containarium.v1.PentestService.GetPentestFindingSummary:output_type -> containarium.v1.GetPentestFindingSummaryResponse + 15, // 20: containarium.v1.PentestService.SuppressPentestFinding:output_type -> containarium.v1.SuppressPentestFindingResponse + 17, // 21: containarium.v1.PentestService.GetPentestConfig:output_type -> containarium.v1.GetPentestConfigResponse + 19, // 22: containarium.v1.PentestService.InstallPentestTool:output_type -> containarium.v1.InstallPentestToolResponse + 21, // 23: containarium.v1.PentestService.RemediatePentestFinding:output_type -> containarium.v1.RemediatePentestFindingResponse + 15, // [15:24] is the sub-list for method output_type + 6, // [6:15] is the sub-list for method input_type 6, // [6:6] is the sub-list for extension type_name 6, // [6:6] is the sub-list for extension extendee 0, // [0:6] is the sub-list for field type_name @@ -1605,7 +1750,7 @@ func file_containarium_v1_pentest_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_containarium_v1_pentest_proto_rawDesc), len(file_containarium_v1_pentest_proto_rawDesc)), NumEnums: 0, - NumMessages: 21, + NumMessages: 23, NumExtensions: 0, NumServices: 1, }, diff --git a/pkg/pb/containarium/v1/pentest.pb.gw.go b/pkg/pb/containarium/v1/pentest.pb.gw.go index f0c5393..a598824 100644 --- a/pkg/pb/containarium/v1/pentest.pb.gw.go +++ b/pkg/pb/containarium/v1/pentest.pb.gw.go @@ -285,6 +285,51 @@ func local_request_PentestService_InstallPentestTool_0(ctx context.Context, mars return msg, metadata, err } +func request_PentestService_RemediatePentestFinding_0(ctx context.Context, marshaler runtime.Marshaler, client PentestServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var ( + protoReq RemediatePentestFindingRequest + metadata runtime.ServerMetadata + err error + ) + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if req.Body != nil { + _, _ = io.Copy(io.Discard, req.Body) + } + val, ok := pathParams["finding_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "finding_id") + } + protoReq.FindingId, err = runtime.Int64(val) + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "finding_id", err) + } + msg, err := client.RemediatePentestFinding(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err +} + +func local_request_PentestService_RemediatePentestFinding_0(ctx context.Context, marshaler runtime.Marshaler, server PentestServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var ( + protoReq RemediatePentestFindingRequest + metadata runtime.ServerMetadata + err error + ) + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + val, ok := pathParams["finding_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "finding_id") + } + protoReq.FindingId, err = runtime.Int64(val) + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "finding_id", err) + } + msg, err := server.RemediatePentestFinding(ctx, &protoReq) + return msg, metadata, err +} + // RegisterPentestServiceHandlerServer registers the http handlers for service PentestService to "mux". // UnaryRPC :call PentestServiceServer directly. // StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. @@ -451,6 +496,26 @@ func RegisterPentestServiceHandlerServer(ctx context.Context, mux *runtime.Serve } forward_PentestService_InstallPentestTool_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) + mux.Handle(http.MethodPost, pattern_PentestService_RemediatePentestFinding_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/containarium.v1.PentestService/RemediatePentestFinding", runtime.WithHTTPPathPattern("/v1/pentest/findings/{finding_id}/remediate")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_PentestService_RemediatePentestFinding_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + forward_PentestService_RemediatePentestFinding_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) return nil } @@ -627,6 +692,23 @@ func RegisterPentestServiceHandlerClient(ctx context.Context, mux *runtime.Serve } forward_PentestService_InstallPentestTool_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) + mux.Handle(http.MethodPost, pattern_PentestService_RemediatePentestFinding_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/containarium.v1.PentestService/RemediatePentestFinding", runtime.WithHTTPPathPattern("/v1/pentest/findings/{finding_id}/remediate")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_PentestService_RemediatePentestFinding_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + forward_PentestService_RemediatePentestFinding_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) return nil } @@ -639,6 +721,7 @@ var ( pattern_PentestService_SuppressPentestFinding_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"v1", "pentest", "findings", "finding_id", "suppress"}, "")) pattern_PentestService_GetPentestConfig_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "pentest", "config"}, "")) pattern_PentestService_InstallPentestTool_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v1", "pentest", "tools", "install"}, "")) + pattern_PentestService_RemediatePentestFinding_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"v1", "pentest", "findings", "finding_id", "remediate"}, "")) ) var ( @@ -650,4 +733,5 @@ var ( forward_PentestService_SuppressPentestFinding_0 = runtime.ForwardResponseMessage forward_PentestService_GetPentestConfig_0 = runtime.ForwardResponseMessage forward_PentestService_InstallPentestTool_0 = runtime.ForwardResponseMessage + forward_PentestService_RemediatePentestFinding_0 = runtime.ForwardResponseMessage ) diff --git a/pkg/pb/containarium/v1/pentest_grpc.pb.go b/pkg/pb/containarium/v1/pentest_grpc.pb.go index da4d624..c5257c4 100644 --- a/pkg/pb/containarium/v1/pentest_grpc.pb.go +++ b/pkg/pb/containarium/v1/pentest_grpc.pb.go @@ -27,6 +27,7 @@ const ( PentestService_SuppressPentestFinding_FullMethodName = "/containarium.v1.PentestService/SuppressPentestFinding" PentestService_GetPentestConfig_FullMethodName = "/containarium.v1.PentestService/GetPentestConfig" PentestService_InstallPentestTool_FullMethodName = "/containarium.v1.PentestService/InstallPentestTool" + PentestService_RemediatePentestFinding_FullMethodName = "/containarium.v1.PentestService/RemediatePentestFinding" ) // PentestServiceClient is the client API for PentestService service. @@ -51,6 +52,8 @@ type PentestServiceClient interface { GetPentestConfig(ctx context.Context, in *GetPentestConfigRequest, opts ...grpc.CallOption) (*GetPentestConfigResponse, error) // InstallPentestTool downloads and installs an external pentest tool (nuclei or trivy) InstallPentestTool(ctx context.Context, in *InstallPentestToolRequest, opts ...grpc.CallOption) (*InstallPentestToolResponse, error) + // RemediatePentestFinding upgrades the vulnerable package in the affected container + RemediatePentestFinding(ctx context.Context, in *RemediatePentestFindingRequest, opts ...grpc.CallOption) (*RemediatePentestFindingResponse, error) } type pentestServiceClient struct { @@ -141,6 +144,16 @@ func (c *pentestServiceClient) InstallPentestTool(ctx context.Context, in *Insta return out, nil } +func (c *pentestServiceClient) RemediatePentestFinding(ctx context.Context, in *RemediatePentestFindingRequest, opts ...grpc.CallOption) (*RemediatePentestFindingResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(RemediatePentestFindingResponse) + err := c.cc.Invoke(ctx, PentestService_RemediatePentestFinding_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + // PentestServiceServer is the server API for PentestService service. // All implementations must embed UnimplementedPentestServiceServer // for forward compatibility. @@ -163,6 +176,8 @@ type PentestServiceServer interface { GetPentestConfig(context.Context, *GetPentestConfigRequest) (*GetPentestConfigResponse, error) // InstallPentestTool downloads and installs an external pentest tool (nuclei or trivy) InstallPentestTool(context.Context, *InstallPentestToolRequest) (*InstallPentestToolResponse, error) + // RemediatePentestFinding upgrades the vulnerable package in the affected container + RemediatePentestFinding(context.Context, *RemediatePentestFindingRequest) (*RemediatePentestFindingResponse, error) mustEmbedUnimplementedPentestServiceServer() } @@ -197,6 +212,9 @@ func (UnimplementedPentestServiceServer) GetPentestConfig(context.Context, *GetP func (UnimplementedPentestServiceServer) InstallPentestTool(context.Context, *InstallPentestToolRequest) (*InstallPentestToolResponse, error) { return nil, status.Error(codes.Unimplemented, "method InstallPentestTool not implemented") } +func (UnimplementedPentestServiceServer) RemediatePentestFinding(context.Context, *RemediatePentestFindingRequest) (*RemediatePentestFindingResponse, error) { + return nil, status.Error(codes.Unimplemented, "method RemediatePentestFinding not implemented") +} func (UnimplementedPentestServiceServer) mustEmbedUnimplementedPentestServiceServer() {} func (UnimplementedPentestServiceServer) testEmbeddedByValue() {} @@ -362,6 +380,24 @@ func _PentestService_InstallPentestTool_Handler(srv interface{}, ctx context.Con return interceptor(ctx, in, info, handler) } +func _PentestService_RemediatePentestFinding_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RemediatePentestFindingRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PentestServiceServer).RemediatePentestFinding(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: PentestService_RemediatePentestFinding_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PentestServiceServer).RemediatePentestFinding(ctx, req.(*RemediatePentestFindingRequest)) + } + return interceptor(ctx, in, info, handler) +} + // PentestService_ServiceDesc is the grpc.ServiceDesc for PentestService service. // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) @@ -401,6 +437,10 @@ var PentestService_ServiceDesc = grpc.ServiceDesc{ MethodName: "InstallPentestTool", Handler: _PentestService_InstallPentestTool_Handler, }, + { + MethodName: "RemediatePentestFinding", + Handler: _PentestService_RemediatePentestFinding_Handler, + }, }, Streams: []grpc.StreamDesc{}, Metadata: "containarium/v1/pentest.proto", diff --git a/proto/containarium/v1/container.proto b/proto/containarium/v1/container.proto index 7a17ab7..1115262 100644 --- a/proto/containarium/v1/container.proto +++ b/proto/containarium/v1/container.proto @@ -24,6 +24,18 @@ enum OSType { // Red Hat Enterprise Linux 9 (for production, requires subscription) OS_TYPE_RHEL_9 = 3; + + // Windows Server 2022 (runs as QEMU/KVM VM, RDP access) + OS_TYPE_WINDOWS_2022 = 4; +} + +// AccessType indicates how to connect to an instance +enum AccessType { + // SSH access (default for Linux containers) + ACCESS_TYPE_SSH = 0; + + // RDP access (for Windows VMs, via Guacamole browser-based client) + ACCESS_TYPE_RDP = 1; } // ContainerState represents the current state of a container @@ -126,6 +138,12 @@ message Container { // Operating system type of the container OSType os_type = 15; + + // How to connect to this instance (SSH for Linux, RDP for Windows) + AccessType access_type = 16; + + // RDP connection address (e.g., "10.100.0.50:3389") โ€” populated for Windows VMs + string rdp_address = 17; } // ContainerMetrics contains runtime metrics for a container @@ -206,8 +224,11 @@ message CreateContainerResponse { // Human-readable message about the creation string message = 2; - // SSH connection string for the user + // SSH connection string for the user (Linux containers) string ssh_command = 3; + + // RDP connection address (Windows VMs) + string rdp_address = 4; } // ListContainersRequest is the request to list containers diff --git a/proto/containarium/v1/pentest.proto b/proto/containarium/v1/pentest.proto index 645dbe0..83e6f6d 100644 --- a/proto/containarium/v1/pentest.proto +++ b/proto/containarium/v1/pentest.proto @@ -239,6 +239,26 @@ message InstallPentestToolResponse { string message = 2; } +// RemediatePentestFindingRequest requests remediation of a Trivy finding by +// upgrading the OS package that contains the vulnerable binary. +message RemediatePentestFindingRequest { + // Finding ID to remediate + int64 finding_id = 1; +} + +// RemediatePentestFindingResponse returns the result of the remediation attempt. +message RemediatePentestFindingResponse { + bool success = 1; + // Human-readable result (e.g., "docker-ce upgraded from 27.0.1 to 27.0.3") + string message = 2; + // Package that was upgraded + string package_name = 3; + // Version before upgrade + string old_version = 4; + // Version after upgrade + string new_version = 5; +} + // ============= Service Definition ============= // PentestService provides automated penetration testing and vulnerability scanning @@ -341,4 +361,17 @@ service PentestService { tags: "Pentest"; }; } + + // RemediatePentestFinding upgrades the vulnerable package in the affected container + rpc RemediatePentestFinding(RemediatePentestFindingRequest) returns (RemediatePentestFindingResponse) { + option (google.api.http) = { + post: "/v1/pentest/findings/{finding_id}/remediate" + body: "*" + }; + option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { + summary: "Remediate a pentest finding"; + description: "Upgrades the OS package that contains the vulnerable binary to the latest version. Only works for Trivy container findings where the binary belongs to an installed package."; + tags: "Pentest"; + }; + } } diff --git a/scripts/setup-gpu-host.sh b/scripts/setup-gpu-host.sh index 0575865..e67693d 100755 --- a/scripts/setup-gpu-host.sh +++ b/scripts/setup-gpu-host.sh @@ -437,40 +437,69 @@ cat > "$BACKUP_SCRIPT" <<'BACKUPEOF' # Usage: # containarium-zfs-backup # run backup # containarium-zfs-backup --list # list snapshots -# containarium-zfs-backup --prune 7 # keep only last N snapshots +# containarium-zfs-backup --prune 7 # keep only last N snapshots per dataset # -set -euo pipefail +# Note: pipefail is intentionally NOT set. We want replication failures to be +# non-fatal so that snapshot creation and pruning still happen. +set -uo pipefail MAIN_POOL="incus-local" BACKUP_POOL="incus-backup/snapshots" SNAP_PREFIX="backup" KEEP_COUNT="${CONTAINARIUM_BACKUP_KEEP:-7}" +# prune_snapshots keeps the N most recent backup snapshots PER dataset. +# Listing all snapshots in the pool sorted globally would prune wrong: +# 7 containers ร— 25 daily snapshots โ†’ "keep newest 7 globally" wipes +# everything except today's backups across all containers. +prune_snapshots() { + local pool="$1" + local keep="$2" + + # Get unique datasets that have backup snapshots + local datasets + datasets=$(zfs list -t snapshot -r "$pool" -o name -H 2>/dev/null \ + | grep "@${SNAP_PREFIX}-" \ + | sed 's/@.*//' \ + | sort -u) + + while IFS= read -r ds; do + [ -z "$ds" ] && continue + # List this dataset's backup snapshots, newest first + local snaps + snaps=$(zfs list -t snapshot -o name -H -S creation "$ds" 2>/dev/null \ + | grep "@${SNAP_PREFIX}-" || true) + local count=0 + while IFS= read -r snap; do + [ -z "$snap" ] && continue + count=$((count + 1)) + if [ "$count" -gt "$keep" ]; then + if zfs destroy "$snap" 2>/dev/null; then + echo " Destroyed $snap" + else + echo " Failed to destroy $snap (held or has clones, skipping)" + fi + fi + done <<< "$snaps" + done <<< "$datasets" +} + case "${1:-}" in --list) echo "=== Main pool snapshots ===" zfs list -t snapshot -r "$MAIN_POOL" -o name,creation,used 2>/dev/null || echo " (none)" echo "" echo "=== Backup pool snapshots ===" - zfs list -t snapshot -r "$BACKUP_POOL" 2>/dev/null && \ - zfs list -t snapshot -r "$BACKUP_POOL" -o name,creation,used || echo " (none)" + zfs list -t snapshot -r "$BACKUP_POOL" -o name,creation,used 2>/dev/null || echo " (none)" exit 0 ;; --prune) KEEP_COUNT="${2:-$KEEP_COUNT}" - echo "Pruning snapshots, keeping last $KEEP_COUNT..." - for dataset in "$MAIN_POOL" "$BACKUP_POOL"; do - SNAPS=$(zfs list -t snapshot -r "$dataset" -o name -H -S creation 2>/dev/null | grep "@${SNAP_PREFIX}-" || true) - COUNT=0 - while IFS= read -r snap; do - [ -z "$snap" ] && continue - COUNT=$((COUNT + 1)) - if [ "$COUNT" -gt "$KEEP_COUNT" ]; then - echo " Destroying $snap" - zfs destroy "$snap" - fi - done <<< "$SNAPS" - done + echo "Pruning snapshots, keeping last $KEEP_COUNT per dataset..." + prune_snapshots "$MAIN_POOL" "$KEEP_COUNT" + if zpool list incus-backup &>/dev/null; then + prune_snapshots "$BACKUP_POOL" "$KEEP_COUNT" + fi echo "Done." exit 0 ;; @@ -485,8 +514,14 @@ fi TIMESTAMP=$(date +%Y%m%d-%H%M%S) SNAP_NAME="${SNAP_PREFIX}-${TIMESTAMP}" +# Always prune at end, even if replication fails +trap 'echo "==> Pruning old snapshots (keeping last $KEEP_COUNT per dataset)..."; prune_snapshots "$MAIN_POOL" "$KEEP_COUNT"; if zpool list incus-backup &>/dev/null; then prune_snapshots "$BACKUP_POOL" "$KEEP_COUNT"; fi; echo "==> Backup complete."' EXIT + echo "==> Creating snapshot ${MAIN_POOL}@${SNAP_NAME}..." -zfs snapshot -r "${MAIN_POOL}@${SNAP_NAME}" +if ! zfs snapshot -r "${MAIN_POOL}@${SNAP_NAME}"; then + echo "ERROR: Snapshot creation failed" + exit 1 +fi if zpool list incus-backup &>/dev/null; then echo "==> Replicating to backup pool..." @@ -497,47 +532,58 @@ if zpool list incus-backup &>/dev/null; then | grep -v "@${SNAP_NAME}" \ | head -1 || true) - # Check if the previous snapshot exists on the backup pool + REPLICATION_OK=false + if [ -n "$PREV_SNAP" ]; then PREV_TAG="${PREV_SNAP#*@}" if zfs list "${BACKUP_POOL}/${MAIN_POOL}@${PREV_TAG}" &>/dev/null 2>&1; then - # Incremental send + # Incremental send โ€” try first echo " Incremental send from @${PREV_TAG} to @${SNAP_NAME}" - zfs send -R -i "${MAIN_POOL}@${PREV_TAG}" "${MAIN_POOL}@${SNAP_NAME}" \ - | zfs receive -F "${BACKUP_POOL}/${MAIN_POOL}" + if zfs send -R -i "${MAIN_POOL}@${PREV_TAG}" "${MAIN_POOL}@${SNAP_NAME}" \ + | zfs receive -F "${BACKUP_POOL}/${MAIN_POOL}" 2>&1; then + REPLICATION_OK=true + else + # Common failure: backup pool's per-dataset snapshot lineage + # diverged from main pool (e.g. a previous receive failed + # partway, leaving some datasets out of sync). Recover by + # rolling back the backup pool to the last shared snapshot. + echo " Incremental failed โ€” attempting to repair divergent backup lineage" + # Find datasets where backup lacks the previous snapshot and + # roll back to the latest shared one. Easier: destroy backup + # and do full re-send. This is heavy but reliable. + echo " Falling back to full re-send (this will overwrite backup)" + if zfs send -R "${MAIN_POOL}@${SNAP_NAME}" \ + | zfs receive -F "${BACKUP_POOL}/${MAIN_POOL}" 2>&1; then + REPLICATION_OK=true + fi + fi else # Previous snapshot not on backup โ€” full send echo " Full send (previous snapshot not found on backup)" - zfs send -R "${MAIN_POOL}@${SNAP_NAME}" \ - | zfs receive -F "${BACKUP_POOL}/${MAIN_POOL}" + if zfs send -R "${MAIN_POOL}@${SNAP_NAME}" \ + | zfs receive -F "${BACKUP_POOL}/${MAIN_POOL}" 2>&1; then + REPLICATION_OK=true + fi fi else # First backup โ€” full send echo " Full send (first backup)" - zfs send -R "${MAIN_POOL}@${SNAP_NAME}" \ - | zfs receive -F "${BACKUP_POOL}/${MAIN_POOL}" + if zfs send -R "${MAIN_POOL}@${SNAP_NAME}" \ + | zfs receive -F "${BACKUP_POOL}/${MAIN_POOL}" 2>&1; then + REPLICATION_OK=true + fi fi - echo " Backup replicated to ${BACKUP_POOL}/${MAIN_POOL}@${SNAP_NAME}" + if $REPLICATION_OK; then + echo " Backup replicated to ${BACKUP_POOL}/${MAIN_POOL}@${SNAP_NAME}" + else + echo " WARNING: Replication failed โ€” local snapshot kept, but backup pool is out of date" + fi else echo " Backup pool not available โ€” snapshot only (no replication)" fi -# Auto-prune old snapshots -echo "==> Pruning old snapshots (keeping last $KEEP_COUNT)..." -for dataset in "$MAIN_POOL" "$BACKUP_POOL"; do - SNAPS=$(zfs list -t snapshot -r "$dataset" -o name -H -S creation 2>/dev/null | grep "@${SNAP_PREFIX}-" || true) - COUNT=0 - while IFS= read -r snap; do - [ -z "$snap" ] && continue - COUNT=$((COUNT + 1)) - if [ "$COUNT" -gt "$KEEP_COUNT" ]; then - zfs destroy "$snap" 2>/dev/null || true - fi - done <<< "$SNAPS" -done - -echo "==> Backup complete." +# Note: prune runs via the EXIT trap above, so it executes even if we exit early BACKUPEOF chmod +x "$BACKUP_SCRIPT" echo " Installed $BACKUP_SCRIPT" diff --git a/web-ui/src/components/containers/ContainerListView.tsx b/web-ui/src/components/containers/ContainerListView.tsx index af0e568..683d016 100644 --- a/web-ui/src/components/containers/ContainerListView.tsx +++ b/web-ui/src/components/containers/ContainerListView.tsx @@ -19,6 +19,7 @@ import DeleteIcon from '@mui/icons-material/Delete'; import PlayArrowIcon from '@mui/icons-material/PlayArrow'; import StopIcon from '@mui/icons-material/Stop'; import TerminalIcon from '@mui/icons-material/Terminal'; +import DesktopWindowsIcon from '@mui/icons-material/DesktopWindows'; import SecurityIcon from '@mui/icons-material/Security'; import LabelIcon from '@mui/icons-material/Label'; import TuneIcon from '@mui/icons-material/Tune'; @@ -287,7 +288,18 @@ export default function ContainerListView({ - {isRunning && onTerminal && ( + {isRunning && container.accessType === 'ACCESS_TYPE_RDP' && ( + + window.open('/guacamole/', '_blank')} + > + + + + )} + {isRunning && container.accessType !== 'ACCESS_TYPE_RDP' && onTerminal && ( onTerminal(username)}> diff --git a/web-ui/src/components/containers/ContainerNode.tsx b/web-ui/src/components/containers/ContainerNode.tsx index b0ad19f..c969cf4 100644 --- a/web-ui/src/components/containers/ContainerNode.tsx +++ b/web-ui/src/components/containers/ContainerNode.tsx @@ -14,6 +14,7 @@ import DeleteIcon from '@mui/icons-material/Delete'; import PlayArrowIcon from '@mui/icons-material/PlayArrow'; import StopIcon from '@mui/icons-material/Stop'; import TerminalIcon from '@mui/icons-material/Terminal'; +import DesktopWindowsIcon from '@mui/icons-material/DesktopWindows'; import MemoryIcon from '@mui/icons-material/Memory'; import StorageIcon from '@mui/icons-material/Storage'; import DnsIcon from '@mui/icons-material/Dns'; @@ -285,7 +286,18 @@ export default function ContainerNode({ container, metrics, onDelete, onStart, o )} - {isRunning && onTerminal && ( + {isRunning && container.accessType === 'ACCESS_TYPE_RDP' && ( + + window.open('/guacamole/', '_blank')} + > + + + + )} + {isRunning && container.accessType !== 'ACCESS_TYPE_RDP' && onTerminal && ( i.value === image); + const isWindows = !!(selectedImage && 'osType' in selectedImage); + + if (!isWindows && !autoGenerateKey && !sshPublicKey) { setError('Please enter an SSH public key or enable auto-generate'); return; } @@ -231,19 +235,20 @@ export default function CreateContainerDialog({ open, onClose, onSubmit, network const labels = parseLabels(labelsText); const request: CreateContainerRequest = { username, - image, + image: isWindows ? undefined : image, resources: { - cpu, - memory, - disk, + cpu: isWindows ? (cpu || '4') : cpu, + memory: isWindows ? (memory || '8GB') : memory, + disk: isWindows ? (disk || '50GB') : disk, }, - sshKeys: [publicKey], + sshKeys: isWindows ? undefined : [publicKey], labels: Object.keys(labels).length > 0 ? labels : undefined, - enablePodman, - stack: stack || undefined, + enablePodman: isWindows ? false : enablePodman, + stack: isWindows ? undefined : (stack || undefined), staticIp: staticIp || undefined, gpu: gpu || undefined, backendId: backendId || undefined, + osType: selectedImage && 'osType' in selectedImage ? (selectedImage as { osType: number }).osType : undefined, }; const container = await onSubmit(request, (prog) => { diff --git a/web-ui/src/lib/api/client.ts b/web-ui/src/lib/api/client.ts index 60ea770..88b61c3 100644 --- a/web-ui/src/lib/api/client.ts +++ b/web-ui/src/lib/api/client.ts @@ -86,6 +86,9 @@ function transformContainer(apiContainer: Record): Container { labels: (apiContainer.labels as Record) || {}, sshKeys: (apiContainer.sshKeys as string[]) || [], backendId: (apiContainer.backendId as string) || '', + osType: (apiContainer.osType as string) || '', + accessType: (apiContainer.accessType as Container['accessType']) || undefined, + rdpAddress: (apiContainer.rdpAddress as string) || '', }; } @@ -199,6 +202,7 @@ export class ContaineriumClient { static_ip: request.staticIp || '', gpu: request.gpu || '', backend_id: request.backendId || '', + os_type: request.osType || 0, async: async, }, { timeout: async ? 30000 : 300000, // 30s for async, 5min for sync diff --git a/web-ui/src/types/container.ts b/web-ui/src/types/container.ts index c4018fb..40625c3 100644 --- a/web-ui/src/types/container.ts +++ b/web-ui/src/types/container.ts @@ -6,6 +6,11 @@ export type ContainerState = 'Running' | 'Stopped' | 'Frozen' | 'Creating' | 'Pr /** * Container information from Containarium API */ +/** + * Access type enum โ€” how to connect to an instance + */ +export type AccessType = 'ACCESS_TYPE_SSH' | 'ACCESS_TYPE_RDP'; + export interface Container { name: string; username: string; @@ -23,6 +28,9 @@ export interface Container { labels: Record; sshKeys: string[]; backendId?: string; + osType?: string; + accessType?: AccessType; + rdpAddress?: string; } /** @@ -43,6 +51,7 @@ export interface CreateContainerRequest { staticIp?: string; // Static IP address (e.g., "10.100.0.100") - empty for DHCP gpu?: string; // GPU device ID for passthrough (e.g., "0" for first GPU) backendId?: string; // Target backend for creation (empty = primary) + osType?: number; // OS type enum (4 = Windows Server 2022) } /** @@ -97,6 +106,7 @@ export interface CreateContainerResponse { container: Container; message: string; sshCommand: string; + rdpAddress?: string; } /**