diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 2e5fa0b..da9c728 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -8,8 +8,11 @@ on: jobs: test: - name: Test - runs-on: ubuntu-latest + name: Test (${{ matrix.os }}) + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-latest, macos-latest] steps: - uses: actions/checkout@v4 - name: Install Nix @@ -26,7 +29,6 @@ jobs: e2e: name: E2E runs-on: ubuntu-latest - continue-on-error: true timeout-minutes: 30 steps: - uses: actions/checkout@v4 diff --git a/README.md b/README.md index ce00617..e5f2211 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # nixbox -Cloud-hypervisor microVM sandbox for running AI agents (eg: Claude Code with `--dangerously-skip-permissions`) in full isolation. Host protected by KVM boundary + egress filtering. Directories shared via virtiofs bind mounts. +MicroVM sandbox for running AI agents (eg: Claude Code with `--dangerously-skip-permissions`) in full isolation. Host protected by hypervisor boundary + egress filtering. Directories shared via virtiofs bind mounts. Supports Linux (cloud-hypervisor) and macOS on Apple Silicon (vfkit / Virtualization.framework). Multiple VMs can run concurrently (up to 64), each with isolated slot-based networking. Mount your entire workspace (e.g. `~/workspace`) rather than a single project — this lets you switch between projects inside the VM without restarting it. @@ -15,10 +15,18 @@ Multiple VMs can run concurrently (up to 64), each with isolated slot-based netw ## Prerequisites -- Linux with KVM (`/dev/kvm`) +### Linux + +- KVM (`/dev/kvm`) - [Nix](https://nixos.org/download/) with flakes enabled - `dnsmasq`, `nftables`, `e2fsprogs` (for `mke2fs`), `virtiofsd` +### macOS (Apple Silicon) + +- macOS 13+ (Ventura) with Virtualization.framework +- [Nix](https://nixos.org/download/) with flakes enabled +- `e2fsprogs` (for `mke2fs`) + ## Install ```bash @@ -258,6 +266,14 @@ The JDK is specified separately via `nix.packages` so you control the version. S - **virtiofs + `O_TMPFILE`** — virtiofs does not support `O_TMPFILE`. Tools that hit this (e.g. Node.js/Claude Code) need tmpfs overlays on affected dirs — the `claude-code` plugin handles this automatically. - **Claude Code conversations** — Claude Code stores conversations under `~/.claude/projects/` keyed by the workspace's absolute path. Since the workspace path differs between host and guest (e.g. `/home/you/workspace` vs `/home/vmuser/workspace`), conversations don't carry over between the two. Workaround: symlink the guest-side conversation directory to the host's. +### macOS-specific limitations + +- **Network filtering** — `filtered` mode is not yet implemented on macOS. Only `open` mode works; `off` and `filtered` will error. +- **Hot-plug mounts** — `nixbox mount` / `nixbox unmount` are not supported. Declare mounts in `config.nix` and restart the VM. +- **Guest IP discovery** — uses ARP scan after boot instead of static assignment. Boot may be slightly slower. + +See [ADR 015](docs/decisions/015-macos-vfkit-support.md) for full platform comparison. + ## Acknowledgments - Built on [microvm.nix](https://github.com/microvm-nix/microvm.nix) diff --git a/bin/nixbox b/bin/nixbox index a9b779a..e46143e 100755 --- a/bin/nixbox +++ b/bin/nixbox @@ -2,22 +2,43 @@ set -euo pipefail -_self="$(readlink -f "$0")" +_portable_realpath() { + local path="$1" + if command -v realpath &>/dev/null; then + realpath "$path" + elif command -v readlink &>/dev/null && readlink -f "$path" 2>/dev/null; then + : + else + # POSIX fallback + cd "$(dirname "$path")" && echo "$(pwd -P)/$(basename "$path")" + fi +} +_self="$(_portable_realpath "$0")" _self_dir="$(dirname "$_self")" if [ -d "${_self_dir}/../share/nixbox" ]; then NIXBOX_SRC="${_self_dir}/../share/nixbox" else NIXBOX_SRC="$(cd "${_self_dir}/.." && pwd)" fi -NIXBOX_RUNTIME_DIR="${XDG_RUNTIME_DIR:-/run/user/$(id -u)}/nixbox" -SLOTS_DIR="$NIXBOX_RUNTIME_DIR/slots" -# shellcheck disable=SC2034 # used by lib/functions.bash -BYDIR_DIR="$NIXBOX_RUNTIME_DIR/by-dir" + +NIXBOX_PLATFORM="$(uname -s | tr '[:upper:]' '[:lower:]')" # "linux" or "darwin" + +if [ "$NIXBOX_PLATFORM" = "darwin" ]; then + ACTIVE_FILE="${TMPDIR:-/tmp}/nixbox-active" + GUEST_IP="" # discovered at runtime via ARP +else + NIXBOX_RUNTIME_DIR="${XDG_RUNTIME_DIR:-/run/user/$(id -u)}/nixbox" + SLOTS_DIR="$NIXBOX_RUNTIME_DIR/slots" + # shellcheck disable=SC2034 # used by lib/functions.bash + BYDIR_DIR="$NIXBOX_RUNTIME_DIR/by-dir" +fi NIXBOX_DIR="" -# Network variables — set by derive_network() per VM slot -# shellcheck disable=SC2034 -TAP_DEV="" TAP_HOST_IP="" TAP_GUEST_IP="" TAP_SUBNET="" TAP_CIDR="" TAP_MAC="" NFT_TABLE="" VSOCK_CID="" +# Network variables — set by derive_network() per VM slot (Linux only) +if [ "$NIXBOX_PLATFORM" != "darwin" ]; then + # shellcheck disable=SC2034 + TAP_DEV="" TAP_HOST_IP="" TAP_GUEST_IP="" TAP_SUBNET="" TAP_CIDR="" TAP_MAC="" NFT_TABLE="" VSOCK_CID="" +fi # shellcheck disable=SC2034 SSH_KEY="" @@ -42,16 +63,24 @@ do_destroy() { return 0 fi - local vm_slot - vm_slot=$(cat "$state_dir/slot" 2>/dev/null || echo "0") - derive_network "$vm_slot" "$name" + if [ "$NIXBOX_PLATFORM" != "darwin" ]; then + local vm_slot + vm_slot=$(cat "$state_dir/slot" 2>/dev/null || echo "0") + derive_network "$vm_slot" "$name" + fi if [ -f "$state_dir/pid" ]; then local pid pid=$(cat "$state_dir/pid") if kill -0 "$pid" 2>/dev/null; then log "==> Requesting graceful shutdown..." - ssh "${SSH_OPTS[@]}" ${VM_USER}@"$TAP_GUEST_IP" "sudo poweroff" 2>/dev/null || true + local target_ip + if [ "$NIXBOX_PLATFORM" = "darwin" ]; then + target_ip=$(cat "$state_dir/guest_ip" 2>/dev/null || echo "") + else + target_ip="$TAP_GUEST_IP" + fi + [ -n "$target_ip" ] && ssh "${SSH_OPTS[@]}" ${VM_USER}@"$target_ip" "sudo poweroff" 2>/dev/null || true for _ in $(seq 1 10); do kill -0 "$pid" 2>/dev/null || break @@ -74,27 +103,33 @@ do_destroy() { fi fi - for pidfile in "$state_dir"/virtiofsd_*_pid; do - [ -f "$pidfile" ] || continue - kill "$(cat "$pidfile")" 2>/dev/null || true - done + if [ "$NIXBOX_PLATFORM" != "darwin" ]; then + for pidfile in "$state_dir"/virtiofsd_*_pid; do + [ -f "$pidfile" ] || continue + kill "$(cat "$pidfile")" 2>/dev/null || true + done - sudo nft delete table inet $NFT_TABLE 2>/dev/null || true - if sudo iptables -L DOCKER-USER -n >/dev/null 2>&1; then - sudo iptables -D DOCKER-USER -i $TAP_DEV -j ACCEPT 2>/dev/null || true - sudo iptables -D DOCKER-USER -o $TAP_DEV -m conntrack --ctstate ESTABLISHED,RELATED -j ACCEPT 2>/dev/null || true - fi + sudo nft delete table inet $NFT_TABLE 2>/dev/null || true + if sudo iptables -L DOCKER-USER -n >/dev/null 2>&1; then + sudo iptables -D DOCKER-USER -i $TAP_DEV -j ACCEPT 2>/dev/null || true + sudo iptables -D DOCKER-USER -o $TAP_DEV -m conntrack --ctstate ESTABLISHED,RELATED -j ACCEPT 2>/dev/null || true + fi - if [ -f "$state_dir/dnsmasq.pid" ]; then - sudo kill "$(cat "$state_dir/dnsmasq.pid")" 2>/dev/null || true - fi + if [ -f "$state_dir/dnsmasq.pid" ]; then + sudo kill "$(cat "$state_dir/dnsmasq.pid")" 2>/dev/null || true + fi - sudo ip link del "$TAP_DEV" 2>/dev/null || true + sudo ip link del "$TAP_DEV" 2>/dev/null || true + fi rm -f "$run_dir/env.img" "$run_dir/api.sock" "$run_dir/microvm-run" rm -f "$run_dir"/virtiofs-*.sock - sudo rm -rf "$state_dir" + if [ "$NIXBOX_PLATFORM" = "darwin" ]; then + rm -rf "$state_dir" + else + sudo rm -rf "$state_dir" + fi log "==> VM '$name' destroyed." } @@ -114,10 +149,20 @@ do_create() { local network vcpus mem_mb network=$(jq_field '.network.mode') vcpus=$(jq_field '.resources.vcpus') - if [ "$vcpus" -eq 0 ]; then vcpus=$(( $(nproc) / 2 )); [ "$vcpus" -lt 1 ] && vcpus=1; fi + if [ "$vcpus" -eq 0 ]; then + if [ "$NIXBOX_PLATFORM" = "darwin" ]; then + vcpus=$(sysctl -n hw.logicalcpu) + else + vcpus=$(( $(nproc) / 2 )); [ "$vcpus" -lt 1 ] && vcpus=1 + fi + fi mem_mb=$(jq_field '.resources.memoryMB') if [ "$mem_mb" -eq 0 ]; then - mem_mb=$(( $(grep MemTotal /proc/meminfo | awk '{print $2}') / 2 / 1024 )) + if [ "$NIXBOX_PLATFORM" = "darwin" ]; then + mem_mb=$(( $(sysctl -n hw.memsize) / 1024 / 1024 / 2 )) + else + mem_mb=$(( $(grep MemTotal /proc/meminfo | awk '{print $2}') / 2 / 1024 )) + fi [ "$mem_mb" -lt 4096 ] && mem_mb=4096 fi local allowed_ports @@ -140,7 +185,9 @@ do_create() { trap cleanup_on_failure EXIT mkdir -p "$state_dir" "$run_dir" - echo "$VM_SLOT" > "$state_dir/slot" + if [ "$NIXBOX_PLATFORM" != "darwin" ]; then + echo "$VM_SLOT" > "$state_dir/slot" + fi # --- Generate env disk --- log "==> Generating env disk..." @@ -158,27 +205,166 @@ do_create() { mke2fs -F -t ext4 -d "$env_dir" -L env "$env_img" 32M 2>/dev/null rm -rf "$env_dir" - # --- Create TAP device --- - # Use multi_queue only when vcpus > 2; cloud-hypervisor uses IFF_MULTI_QUEUE - # iff num_queues > 2, and opening a multi_queue TAP without IFF_MULTI_QUEUE fails. - log "==> Configuring network..." - local mq_flag="" - [ "$vcpus" -gt 2 ] && mq_flag="multi_queue" - # shellcheck disable=SC2086 - sudo ip tuntap add dev "$TAP_DEV" mode tap $mq_flag user "$(whoami)" - sudo ip addr add "$TAP_CIDR" dev "$TAP_DEV" - sudo ip link set "$TAP_DEV" up - - # Resolve real upstream DNS servers (avoids forwarding to stub 127.0.0.53) - local upstream - local -a upstream_dns - mapfile -t upstream_dns < <(get_upstream_dns_servers) - - # --- Network mode --- - case "$network" in - off) - log_sub "Mode: strict (no forwarding)" - sudo nft -f - < Booting VM '$name'..." + + cp "$runner/bin/microvm-run" "$run_dir/microvm-run" + chmod +x "$run_dir/microvm-run" + + # Patch CPU/RAM in vfkit runner + sed -i '' "s/--cpus [0-9]*/--cpus $vcpus/" "$run_dir/microvm-run" + sed -i '' "s/--memory-size [0-9]*[mMgG]*/--memory-size ${mem_mb}/" "$run_dir/microvm-run" + + # Append env disk and virtiofs mounts to the runner script + local extra_args="" + local env_img_abs + env_img_abs=$(realpath "$run_dir/env.img") + extra_args="--device virtio-blk,path=${env_img_abs}" + + local mount_count + mount_count=$(echo "$mounts_json" | jq 'length') + if [ "$mount_count" -gt 0 ]; then + log "==> Configuring $mount_count virtiofs mount(s)..." + for i in $(seq 0 $((mount_count - 1))); do + local src + src=$(echo "$mounts_json" | jq -r ".[$i].source") + src="${src/#\~/$HOME}" + [[ "$src" != /* ]] && src="$(realpath "${project_dir}/${src}")" + [ ! -d "$src" ] && die "Mount source does not exist: $src" + extra_args="$extra_args --device virtio-fs,sharedDir=${src},mountTag=mount-${i}" + done + fi + + # Patch runner: replace ${runtime_args:-} or append to exec line + if grep -q '${runtime_args:-}' "$run_dir/microvm-run" 2>/dev/null; then + sed -i '' "s|\${runtime_args:-}|${extra_args}|" "$run_dir/microvm-run" + else + # Append extra args to the last exec line + sed -i '' "s|^exec vfkit\(.*\)|exec vfkit\1 ${extra_args}|" "$run_dir/microvm-run" + fi + + cd "$run_dir" + "$run_dir/microvm-run" >> "$run_dir/vm.log" 2>&1 & + local vm_pid=$! + echo "$vm_pid" > "$state_dir/pid" + echo "$name" > "$state_dir/name" + date +%s > "$state_dir/start_time" + + # --- Discover guest IP via ARP --- + log "==> Discovering guest IP..." + GUEST_IP="" + for _ in $(seq 1 60); do + GUEST_IP=$(arp -an | grep -i '2:0:0:0:0:1\|02:00:00:00:00:01' | grep -oE '([0-9]{1,3}\.){3}[0-9]{1,3}' | head -1) + [ -n "$GUEST_IP" ] && break + if ! kill -0 "$vm_pid" 2>/dev/null; then + log "==> VM process died. Log tail:" >&2 + tail -20 "$run_dir/vm.log" >&2 + die "VM crashed before IP was discovered" + fi + sleep 1 + done + [ -z "$GUEST_IP" ] && die "Could not discover guest IP after 60s" + echo "$GUEST_IP" > "$state_dir/guest_ip" + log_sub "Guest IP: $GUEST_IP" + + # --- Wait for SSH --- + log "==> Waiting for SSH readiness..." + for _ in $(seq 1 60); do + if ssh "${SSH_OPTS[@]}" ${VM_USER}@"$GUEST_IP" "true" 2>/dev/null; then + break + fi + sleep 1 + done + if ! ssh "${SSH_OPTS[@]}" ${VM_USER}@"$GUEST_IP" "true" 2>/dev/null; then + die "SSH not ready after 60s" + fi + + # --- Inject VM SSH key for outbound connections (e.g. GitHub) --- + log "==> Injecting SSH identity..." + ssh "${SSH_OPTS[@]}" ${VM_USER}@"$GUEST_IP" "mkdir -p ~/.ssh && chmod 700 ~/.ssh" + scp -q "${SSH_OPTS[@]}" "$SSH_KEY" "${VM_USER}@${GUEST_IP}:~/.ssh/id_ed25519" + ssh "${SSH_OPTS[@]}" ${VM_USER}@"$GUEST_IP" "chmod 600 ~/.ssh/id_ed25519" + log_sub "SSH key: $(cat "${SSH_KEY}.pub")" + + # --- Mount virtiofs inside guest --- + if [ "$mount_count" -gt 0 ]; then + for i in $(seq 0 $((mount_count - 1))); do + local tgt ro src + tgt=$(echo "$mounts_json" | jq -r ".[$i].target") + tgt="${tgt/#\~//home/${VM_USER}}" + ro=$(echo "$mounts_json" | jq -r ".[$i].readonly") + src=$(echo "$mounts_json" | jq -r ".[$i].source") + src="${src/#\~/$HOME}" + [[ "$src" != /* ]] && src="$(realpath "${project_dir}/${src}")" + + local ro_flag="" + [ "$ro" = "true" ] && ro_flag="-o ro" + ssh "${SSH_OPTS[@]}" ${VM_USER}@"$GUEST_IP" \ + "sudo mkdir -p '$tgt' && sudo mount -t virtiofs $ro_flag mount-$i '$tgt' && sudo chown ${VM_USER}:users '$tgt' 2>/dev/null || true" + log_sub "Mounted $src → $tgt" + + local ro_val=0; [ "$ro" = "true" ] && ro_val=1 + echo "$i|$src|$tgt|$ro_val" > "$state_dir/mount_$i" + done + fi + + # --- Run user scripts --- + local scripts_json + scripts_json=$(echo "$CONFIG_JSON" | jq -r '.scripts // []') + local script_count + script_count=$(echo "$scripts_json" | jq 'length') + if [ "$script_count" -gt 0 ]; then + log "==> Running setup scripts..." + for i in $(seq 0 $((script_count - 1))); do + local script_path + script_path=$(echo "$scripts_json" | jq -r ".[$i]") + [[ "$script_path" != /* ]] && script_path="$(realpath "${project_dir}/${script_path}")" + [ ! -f "$script_path" ] && { log_sub "WARNING: script not found: $script_path"; continue; } + local script_name + script_name=$(basename "$script_path") + scp -q "${SSH_OPTS[@]}" "$script_path" "${VM_USER}@$GUEST_IP:/tmp/$script_name" + ssh "${SSH_OPTS[@]}" ${VM_USER}@"$GUEST_IP" "chmod +x /tmp/$script_name && /tmp/$script_name && rm /tmp/$script_name" + log_sub "Ran $script_path" + done + fi + + # --- Connectivity check --- + log "==> Connectivity check..." + if ssh "${SSH_OPTS[@]}" ${VM_USER}@"$GUEST_IP" "curl -sf --max-time 5 https://cache.nixos.org >/dev/null 2>&1"; then + log_sub "Nix cache: OK" + else + log_sub "WARNING: Nix cache unreachable" + fi + + else + # --- Linux boot flow (cloud-hypervisor + TAP + nftables) --- + + # --- Create TAP device --- + # Use multi_queue only when vcpus > 2; cloud-hypervisor uses IFF_MULTI_QUEUE + # iff num_queues > 2, and opening a multi_queue TAP without IFF_MULTI_QUEUE fails. + log "==> Configuring network..." + local mq_flag="" + [ "$vcpus" -gt 2 ] && mq_flag="multi_queue" + # shellcheck disable=SC2086 + sudo ip tuntap add dev "$TAP_DEV" mode tap $mq_flag user "$(whoami)" + sudo ip addr add "$TAP_CIDR" dev "$TAP_DEV" + sudo ip link set "$TAP_DEV" up + + # Resolve real upstream DNS servers (avoids forwarding to stub 127.0.0.53) + local upstream + local -a upstream_dns + mapfile -t upstream_dns < <(get_upstream_dns_servers) + + # --- Network mode --- + case "$network" in + off) + log_sub "Mode: strict (no forwarding)" + sudo nft -f - <>"$run_dir/dnsmasq.log" & - echo $! > "$state_dir/dnsmasq.pid" - sleep 0.3 - kill -0 "$(cat "$state_dir/dnsmasq.pid")" 2>/dev/null \ - || die "dnsmasq failed to start (see $run_dir/dnsmasq.log)" - ;; - filtered) - log_sub "Mode: filtered (domain allowlist)" - local dnsmasq_args=( - --no-resolv --no-hosts --bind-interfaces - "--listen-address=$TAP_HOST_IP" - --except-interface=lo - --keep-in-foreground - "--dhcp-range=$TAP_GUEST_IP,$TAP_GUEST_IP,infinite" - "--dhcp-option=option:router,$TAP_HOST_IP" - "--dhcp-option=option:dns-server,$TAP_HOST_IP" - "--dhcp-host=$TAP_MAC,$TAP_GUEST_IP" - --dhcp-leasefile=/dev/null - --log-facility=- --log-dhcp - ) - while IFS= read -r domain; do - [ -z "$domain" ] && continue - domain="${domain#\*.}" - for upstream in "${upstream_dns[@]}"; do - dnsmasq_args+=("--server=/$domain/$upstream") - done - done < "$domains_file" - sudo dnsmasq "${dnsmasq_args[@]}" 2>>"$run_dir/dnsmasq.log" & - echo $! > "$state_dir/dnsmasq.pid" - sleep 0.3 - kill -0 "$(cat "$state_dir/dnsmasq.pid")" 2>/dev/null \ - || die "dnsmasq failed to start (see $run_dir/dnsmasq.log)" - - local ports_rule="" - [ -n "$allowed_ports" ] && \ - ports_rule="iifname \"$TAP_DEV\" tcp dport { $allowed_ports } accept" - sudo nft -f - <>"$run_dir/dnsmasq.log" & + echo $! > "$state_dir/dnsmasq.pid" + sleep 0.3 + kill -0 "$(cat "$state_dir/dnsmasq.pid")" 2>/dev/null \ + || die "dnsmasq failed to start (see $run_dir/dnsmasq.log)" + ;; + filtered) + log_sub "Mode: filtered (domain allowlist)" + local dnsmasq_args=( + --no-resolv --no-hosts --bind-interfaces + "--listen-address=$TAP_HOST_IP" + --except-interface=lo + --keep-in-foreground + "--dhcp-range=$TAP_GUEST_IP,$TAP_GUEST_IP,infinite" + "--dhcp-option=option:router,$TAP_HOST_IP" + "--dhcp-option=option:dns-server,$TAP_HOST_IP" + "--dhcp-host=$TAP_MAC,$TAP_GUEST_IP" + --dhcp-leasefile=/dev/null + --log-facility=- --log-dhcp + ) + while IFS= read -r domain; do + [ -z "$domain" ] && continue + domain="${domain#\*.}" + for upstream in "${upstream_dns[@]}"; do + dnsmasq_args+=("--server=/$domain/$upstream") + done + done < "$domains_file" + sudo dnsmasq "${dnsmasq_args[@]}" 2>>"$run_dir/dnsmasq.log" & + echo $! > "$state_dir/dnsmasq.pid" + sleep 0.3 + kill -0 "$(cat "$state_dir/dnsmasq.pid")" 2>/dev/null \ + || die "dnsmasq failed to start (see $run_dir/dnsmasq.log)" + + local ports_rule="" + [ -n "$allowed_ports" ] && \ + ports_rule="iifname \"$TAP_DEV\" tcp dport { $allowed_ports } accept" + sudo nft -f - </dev/null - # Docker's iptables FORWARD chain (policy DROP) intercepts forwarded - # packets. Add ACCEPT rules in DOCKER-USER so they're not dropped. - if sudo iptables -L DOCKER-USER -n >/dev/null 2>&1; then - sudo iptables -I DOCKER-USER -i $TAP_DEV -j ACCEPT - sudo iptables -I DOCKER-USER -o $TAP_DEV -m conntrack --ctstate ESTABLISHED,RELATED -j ACCEPT - fi - ;; - open) - log_sub "Mode: open (all traffic forwarded)" - local open_args=( - --no-resolv --no-hosts --bind-interfaces - "--listen-address=$TAP_HOST_IP" - --except-interface=lo - --keep-in-foreground - "--dhcp-range=$TAP_GUEST_IP,$TAP_GUEST_IP,infinite" - "--dhcp-option=option:router,$TAP_HOST_IP" - "--dhcp-option=option:dns-server,$TAP_HOST_IP" - "--dhcp-host=$TAP_MAC,$TAP_GUEST_IP" - --dhcp-leasefile=/dev/null - --log-facility=- --log-dhcp - ) - for upstream in "${upstream_dns[@]}"; do - open_args+=("--server=$upstream") - done - sudo dnsmasq "${open_args[@]}" 2>>"$run_dir/dnsmasq.log" & - echo $! > "$state_dir/dnsmasq.pid" - sleep 0.3 - kill -0 "$(cat "$state_dir/dnsmasq.pid")" 2>/dev/null \ - || die "dnsmasq failed to start (see $run_dir/dnsmasq.log)" - sudo nft -f - </dev/null + # Docker's iptables FORWARD chain (policy DROP) intercepts forwarded + # packets. Add ACCEPT rules in DOCKER-USER so they're not dropped. + if sudo iptables -L DOCKER-USER -n >/dev/null 2>&1; then + sudo iptables -I DOCKER-USER -i $TAP_DEV -j ACCEPT + sudo iptables -I DOCKER-USER -o $TAP_DEV -m conntrack --ctstate ESTABLISHED,RELATED -j ACCEPT + fi + ;; + open) + log_sub "Mode: open (all traffic forwarded)" + local open_args=( + --no-resolv --no-hosts --bind-interfaces + "--listen-address=$TAP_HOST_IP" + --except-interface=lo + --keep-in-foreground + "--dhcp-range=$TAP_GUEST_IP,$TAP_GUEST_IP,infinite" + "--dhcp-option=option:router,$TAP_HOST_IP" + "--dhcp-option=option:dns-server,$TAP_HOST_IP" + "--dhcp-host=$TAP_MAC,$TAP_GUEST_IP" + --dhcp-leasefile=/dev/null + --log-facility=- --log-dhcp + ) + for upstream in "${upstream_dns[@]}"; do + open_args+=("--server=$upstream") + done + sudo dnsmasq "${open_args[@]}" 2>>"$run_dir/dnsmasq.log" & + echo $! > "$state_dir/dnsmasq.pid" + sleep 0.3 + kill -0 "$(cat "$state_dir/dnsmasq.pid")" 2>/dev/null \ + || die "dnsmasq failed to start (see $run_dir/dnsmasq.log)" + sudo nft -f - </dev/null - if sudo iptables -L DOCKER-USER -n >/dev/null 2>&1; then - sudo iptables -I DOCKER-USER -i $TAP_DEV -j ACCEPT - sudo iptables -I DOCKER-USER -o $TAP_DEV -m conntrack --ctstate ESTABLISHED,RELATED -j ACCEPT - fi - ;; - *) - die "Unknown network mode: $network" - ;; - esac - - # --- Start virtiofsd for nix-store share (required by microvm config) --- - log "==> Starting virtiofsd for nix-store..." - local nix_store_sock="$run_dir/nixbox-virtiofs-nix-store.sock" - virtiofsd --socket-path="$nix_store_sock" --shared-dir="/nix/store" --sandbox=none --translate-uid="map:1000:$(id -u):1" --translate-gid="map:100:$(id -g):1" --cache=auto 2>"$run_dir/virtiofsd-nix-store.log" & - echo "$!" > "$state_dir/virtiofsd_nix_store_pid" - for _ in $(seq 1 10); do [ -S "$nix_store_sock" ] && break; sleep 0.2; done - [ -S "$nix_store_sock" ] || die "virtiofsd socket for nix-store did not appear" - - # --- Start virtiofsd for each user mount --- - local mount_count - mount_count=$(echo "$mounts_json" | jq 'length') - if [ "$mount_count" -gt 0 ]; then - log "==> Starting virtiofsd for $mount_count mount(s)..." - for i in $(seq 0 $((mount_count - 1))); do - local src - src=$(echo "$mounts_json" | jq -r ".[$i].source") - src="${src/#\~/$HOME}" - [[ "$src" != /* ]] && src="$(realpath "${project_dir}/${src}")" - [ ! -d "$src" ] && die "Mount source does not exist: $src" - - local virtiofs_sock="$run_dir/virtiofs-${i}.sock" - virtiofsd --socket-path="$virtiofs_sock" --shared-dir="$src" --sandbox=none --translate-uid="map:1000:$(id -u):1" --translate-gid="map:100:$(id -g):1" --cache=auto 2>"$run_dir/virtiofsd-${i}.log" & - echo "$!" > "$state_dir/virtiofsd_${i}_pid" - for _ in $(seq 1 10); do [ -S "$virtiofs_sock" ] && break; sleep 0.2; done - [ -S "$virtiofs_sock" ] || die "virtiofsd socket did not appear for mount $i" + sudo sysctl -w net.ipv4.ip_forward=1 >/dev/null + if sudo iptables -L DOCKER-USER -n >/dev/null 2>&1; then + sudo iptables -I DOCKER-USER -i $TAP_DEV -j ACCEPT + sudo iptables -I DOCKER-USER -o $TAP_DEV -m conntrack --ctstate ESTABLISHED,RELATED -j ACCEPT + fi + ;; + *) + die "Unknown network mode: $network" + ;; + esac + + # --- Start virtiofsd for nix-store share (required by microvm config) --- + log "==> Starting virtiofsd for nix-store..." + local nix_store_sock="$run_dir/nixbox-virtiofs-nix-store.sock" + virtiofsd --socket-path="$nix_store_sock" --shared-dir="/nix/store" --sandbox=none --translate-uid="map:1000:$(id -u):1" --translate-gid="map:100:$(id -g):1" --cache=auto 2>"$run_dir/virtiofsd-nix-store.log" & + echo "$!" > "$state_dir/virtiofsd_nix_store_pid" + for _ in $(seq 1 10); do [ -S "$nix_store_sock" ] && break; sleep 0.2; done + [ -S "$nix_store_sock" ] || die "virtiofsd socket for nix-store did not appear" + + # --- Start virtiofsd for each user mount --- + local mount_count + mount_count=$(echo "$mounts_json" | jq 'length') + if [ "$mount_count" -gt 0 ]; then + log "==> Starting virtiofsd for $mount_count mount(s)..." + for i in $(seq 0 $((mount_count - 1))); do + local src + src=$(echo "$mounts_json" | jq -r ".[$i].source") + src="${src/#\~/$HOME}" + [[ "$src" != /* ]] && src="$(realpath "${project_dir}/${src}")" + [ ! -d "$src" ] && die "Mount source does not exist: $src" + + local virtiofs_sock="$run_dir/virtiofs-${i}.sock" + virtiofsd --socket-path="$virtiofs_sock" --shared-dir="$src" --sandbox=none --translate-uid="map:1000:$(id -u):1" --translate-gid="map:100:$(id -g):1" --cache=auto 2>"$run_dir/virtiofsd-${i}.log" & + echo "$!" > "$state_dir/virtiofsd_${i}_pid" + for _ in $(seq 1 10); do [ -S "$virtiofs_sock" ] && break; sleep 0.2; done + [ -S "$virtiofs_sock" ] || die "virtiofsd socket did not appear for mount $i" + done + fi + + # --- Boot VM (patched runner for vCPU/RAM) --- + log "==> Booting VM '$name'..." + + rm -f "$run_dir/api.sock" + + cp "$runner/bin/microvm-run" "$run_dir/microvm-run" + chmod +x "$run_dir/microvm-run" + sed -i "s/boot=[0-9]*/boot=$vcpus/" "$run_dir/microvm-run" + sed -i "s/shared=on,size=[0-9]*M/shared=on,size=${mem_mb}M/" "$run_dir/microvm-run" + sed -i "s/num_queues=512/num_queues=$vcpus/" "$run_dir/microvm-run" + sed -i "s/num_queues=256/num_queues=$vcpus/" "$run_dir/microvm-run" + sed -i "s/tap=vmtap0/tap=$TAP_DEV/" "$run_dir/microvm-run" + sed -i "s/mac=02:00:00:00:00:01/mac=$TAP_MAC/" "$run_dir/microvm-run" + sed -i "s/'cid=[0-9]*/'cid=$VSOCK_CID/" "$run_dir/microvm-run" + + cd "$run_dir" + ulimit -n 65536 + "$run_dir/microvm-run" >> "$run_dir/vm.log" 2>&1 & + local vm_pid=$! + echo "$vm_pid" > "$state_dir/pid" + echo "$name" > "$state_dir/name" + date +%s > "$state_dir/start_time" + + # --- Wait for API socket --- + log "==> Waiting for cloud-hypervisor API..." + local api_sock="$run_dir/api.sock" + for _ in $(seq 1 30); do + [ -S "$api_sock" ] && break + sleep 1 done - fi + if [ ! -S "$api_sock" ]; then + die "API socket did not appear after 30s" + fi - # --- Boot VM (patched runner for vCPU/RAM) --- - log "==> Booting VM '$name'..." - - rm -f "$run_dir/api.sock" - - cp "$runner/bin/microvm-run" "$run_dir/microvm-run" - chmod +x "$run_dir/microvm-run" - sed -i "s/boot=[0-9]*/boot=$vcpus/" "$run_dir/microvm-run" - sed -i "s/shared=on,size=[0-9]*M/shared=on,size=${mem_mb}M/" "$run_dir/microvm-run" - sed -i "s/num_queues=512/num_queues=$vcpus/" "$run_dir/microvm-run" - sed -i "s/num_queues=256/num_queues=$vcpus/" "$run_dir/microvm-run" - sed -i "s/tap=vmtap0/tap=$TAP_DEV/" "$run_dir/microvm-run" - sed -i "s/mac=02:00:00:00:00:01/mac=$TAP_MAC/" "$run_dir/microvm-run" - sed -i "s/'cid=[0-9]*/'cid=$VSOCK_CID/" "$run_dir/microvm-run" - - cd "$run_dir" - ulimit -n 65536 - "$run_dir/microvm-run" >> "$run_dir/vm.log" 2>&1 & - local vm_pid=$! - echo "$vm_pid" > "$state_dir/pid" - echo "$name" > "$state_dir/name" - date +%s > "$state_dir/start_time" - - # --- Wait for API socket --- - log "==> Waiting for cloud-hypervisor API..." - local api_sock="$run_dir/api.sock" - for _ in $(seq 1 30); do - [ -S "$api_sock" ] && break - sleep 1 - done - if [ ! -S "$api_sock" ]; then - die "API socket did not appear after 30s" - fi + # Wait for API to actually respond + for _ in $(seq 1 30); do + if curl -s --max-time 2 --unix-socket "$api_sock" "http://localhost/api/v1/vm.info" >/dev/null 2>&1; then + break + fi + sleep 1 + done - # Wait for API to actually respond - for _ in $(seq 1 30); do - if curl -s --max-time 2 --unix-socket "$api_sock" "http://localhost/api/v1/vm.info" >/dev/null 2>&1; then - break + # Check VM is still alive + if ! kill -0 "$vm_pid" 2>/dev/null; then + log "==> VM process died. Log tail:" >&2 + tail -20 "$run_dir/vm.log" >&2 + die "VM crashed before API became ready" fi - sleep 1 - done - # Check VM is still alive - if ! kill -0 "$vm_pid" 2>/dev/null; then - log "==> VM process died. Log tail:" >&2 - tail -20 "$run_dir/vm.log" >&2 - die "VM crashed before API became ready" - fi + # --- Hot-plug env disk --- + log "==> Hot-plugging env disk..." + local env_img_abs + env_img_abs=$(realpath "$run_dir/env.img") + local disk_response + disk_response=$(curl -s --max-time 30 --unix-socket "$api_sock" -X PUT \ + "http://localhost/api/v1/vm.add-disk" \ + -H "Content-Type: application/json" \ + -d "{\"path\": \"$env_img_abs\", \"readonly\": true, \"id\": \"env-disk\"}" 2>&1) \ + || die "env disk hot-plug curl failed (exit $?): $disk_response" + [ -n "$disk_response" ] && log_sub "env-disk: $disk_response" + + # --- Hot-plug virtiofs for each mount --- + if [ "$mount_count" -gt 0 ]; then + for i in $(seq 0 $((mount_count - 1))); do + local virtiofs_sock_abs + virtiofs_sock_abs=$(realpath "$run_dir/virtiofs-${i}.sock") + curl -s --unix-socket "$api_sock" -X PUT \ + "http://localhost/api/v1/vm.add-fs" \ + -H "Content-Type: application/json" \ + -d "{\"tag\": \"mount-$i\", \"socket\": \"$virtiofs_sock_abs\", \"num_queues\": 1, \"queue_size\": 1024, \"id\": \"mount-$i\"}" + done + echo "" + fi - # --- Hot-plug env disk --- - log "==> Hot-plugging env disk..." - local env_img_abs - env_img_abs=$(realpath "$run_dir/env.img") - local disk_response - disk_response=$(curl -s --max-time 30 --unix-socket "$api_sock" -X PUT \ - "http://localhost/api/v1/vm.add-disk" \ - -H "Content-Type: application/json" \ - -d "{\"path\": \"$env_img_abs\", \"readonly\": true, \"id\": \"env-disk\"}" 2>&1) \ - || die "env disk hot-plug curl failed (exit $?): $disk_response" - [ -n "$disk_response" ] && log_sub "env-disk: $disk_response" - - # --- Hot-plug virtiofs for each mount --- - if [ "$mount_count" -gt 0 ]; then - for i in $(seq 0 $((mount_count - 1))); do - local virtiofs_sock_abs - virtiofs_sock_abs=$(realpath "$run_dir/virtiofs-${i}.sock") - curl -s --unix-socket "$api_sock" -X PUT \ - "http://localhost/api/v1/vm.add-fs" \ - -H "Content-Type: application/json" \ - -d "{\"tag\": \"mount-$i\", \"socket\": \"$virtiofs_sock_abs\", \"num_queues\": 1, \"queue_size\": 1024, \"id\": \"mount-$i\"}" + # --- Wait for SSH --- + local ssh_timeout="${NIXBOX_SSH_TIMEOUT:-120}" + log "==> Waiting for SSH readiness..." + for _ in $(seq 1 "$ssh_timeout"); do + if ssh "${SSH_OPTS[@]}" -o ConnectTimeout=2 ${VM_USER}@"$TAP_GUEST_IP" "true" 2>>"$run_dir/ssh-wait.log"; then + break + fi + sleep 1 done - echo "" - fi - - # --- Wait for SSH --- - local ssh_timeout="${NIXBOX_SSH_TIMEOUT:-120}" - log "==> Waiting for SSH readiness..." - for _ in $(seq 1 "$ssh_timeout"); do - if ssh "${SSH_OPTS[@]}" -o ConnectTimeout=2 ${VM_USER}@"$TAP_GUEST_IP" "true" 2>>"$run_dir/ssh-wait.log"; then - break + if ! ssh "${SSH_OPTS[@]}" -o ConnectTimeout=5 ${VM_USER}@"$TAP_GUEST_IP" "true" 2>>"$run_dir/ssh-wait.log"; then + # Save pre-cleanup diagnostics before do_destroy wipes state + { + echo "=== Ping test ===" + ping -c1 -W1 "$TAP_GUEST_IP" 2>&1 || true + echo "=== TAP traffic counters ===" + ip -s link show "$TAP_DEV" 2>&1 || echo "(no TAP)" + echo "=== Route to guest IP ===" + ip route get "$TAP_GUEST_IP" 2>&1 || echo "(no route)" + echo "=== ARP/neighbor state ===" + ip neigh show dev "$TAP_DEV" 2>&1 || echo "(no neigh)" + echo "=== cloud-hypervisor net arg ===" + grep -E 'net |tap=' "$run_dir/microvm-run" 2>/dev/null | head -3 || echo "(no microvm-run)" + echo "=== iptables INPUT chain ===" + sudo -n iptables -L INPUT -n -v 2>&1 || echo "(no iptables)" + echo "=== nftables full ruleset ===" + sudo -n nft list ruleset 2>/dev/null || echo "(no rules)" + } > "$run_dir/ssh-fail-diag.log" 2>&1 + die "SSH not ready after ${ssh_timeout}s" fi - sleep 1 - done - if ! ssh "${SSH_OPTS[@]}" -o ConnectTimeout=5 ${VM_USER}@"$TAP_GUEST_IP" "true" 2>>"$run_dir/ssh-wait.log"; then - # Save pre-cleanup diagnostics before do_destroy wipes state - { - echo "=== Ping test ===" - ping -c1 -W1 "$TAP_GUEST_IP" 2>&1 || true - echo "=== TAP traffic counters ===" - ip -s link show "$TAP_DEV" 2>&1 || echo "(no TAP)" - echo "=== Route to guest IP ===" - ip route get "$TAP_GUEST_IP" 2>&1 || echo "(no route)" - echo "=== ARP/neighbor state ===" - ip neigh show dev "$TAP_DEV" 2>&1 || echo "(no neigh)" - echo "=== cloud-hypervisor net arg ===" - grep -E 'net |tap=' "$run_dir/microvm-run" 2>/dev/null | head -3 || echo "(no microvm-run)" - echo "=== iptables INPUT chain ===" - sudo -n iptables -L INPUT -n -v 2>&1 || echo "(no iptables)" - echo "=== nftables full ruleset ===" - sudo -n nft list ruleset 2>/dev/null || echo "(no rules)" - } > "$run_dir/ssh-fail-diag.log" 2>&1 - die "SSH not ready after ${ssh_timeout}s" - fi - # --- Inject VM SSH key for outbound connections (e.g. GitHub) --- - log "==> Injecting SSH identity..." - ssh "${SSH_OPTS[@]}" ${VM_USER}@"$TAP_GUEST_IP" "mkdir -p ~/.ssh && chmod 700 ~/.ssh" - scp -q "${SSH_OPTS[@]}" "$SSH_KEY" "${VM_USER}@${TAP_GUEST_IP}:~/.ssh/id_ed25519" - ssh "${SSH_OPTS[@]}" ${VM_USER}@"$TAP_GUEST_IP" "chmod 600 ~/.ssh/id_ed25519" - log_sub "SSH key: $(cat "${SSH_KEY}.pub")" - - # --- Mount virtiofs inside guest --- - if [ "$mount_count" -gt 0 ]; then - for i in $(seq 0 $((mount_count - 1))); do - local tgt ro src - tgt=$(echo "$mounts_json" | jq -r ".[$i].target") - tgt="${tgt/#\~//home/${VM_USER}}" - ro=$(echo "$mounts_json" | jq -r ".[$i].readonly") - src=$(echo "$mounts_json" | jq -r ".[$i].source") - src="${src/#\~/$HOME}" - [[ "$src" != /* ]] && src="$(realpath "${project_dir}/${src}")" - - local ro_flag="" - [ "$ro" = "true" ] && ro_flag="-o ro" - ssh "${SSH_OPTS[@]}" ${VM_USER}@"$TAP_GUEST_IP" \ - "sudo mkdir -p '$tgt' && sudo mount -t virtiofs $ro_flag mount-$i '$tgt' && sudo chown ${VM_USER}:users '$tgt' 2>/dev/null || true" - log_sub "Mounted $src → $tgt" - - local ro_val=0; [ "$ro" = "true" ] && ro_val=1 - echo "$i|$src|$tgt|$ro_val" > "$state_dir/mount_$i" - done + # --- Inject VM SSH key for outbound connections (e.g. GitHub) --- + log "==> Injecting SSH identity..." + ssh "${SSH_OPTS[@]}" ${VM_USER}@"$TAP_GUEST_IP" "mkdir -p ~/.ssh && chmod 700 ~/.ssh" + scp -q "${SSH_OPTS[@]}" "$SSH_KEY" "${VM_USER}@${TAP_GUEST_IP}:~/.ssh/id_ed25519" + ssh "${SSH_OPTS[@]}" ${VM_USER}@"$TAP_GUEST_IP" "chmod 600 ~/.ssh/id_ed25519" + log_sub "SSH key: $(cat "${SSH_KEY}.pub")" + + # --- Mount virtiofs inside guest --- + if [ "$mount_count" -gt 0 ]; then + for i in $(seq 0 $((mount_count - 1))); do + local tgt ro src + tgt=$(echo "$mounts_json" | jq -r ".[$i].target") + tgt="${tgt/#\~//home/${VM_USER}}" + ro=$(echo "$mounts_json" | jq -r ".[$i].readonly") + src=$(echo "$mounts_json" | jq -r ".[$i].source") + src="${src/#\~/$HOME}" + [[ "$src" != /* ]] && src="$(realpath "${project_dir}/${src}")" + + local ro_flag="" + [ "$ro" = "true" ] && ro_flag="-o ro" + ssh "${SSH_OPTS[@]}" ${VM_USER}@"$TAP_GUEST_IP" \ + "sudo mkdir -p '$tgt' && sudo mount -t virtiofs $ro_flag mount-$i '$tgt' && sudo chown ${VM_USER}:users '$tgt' 2>/dev/null || true" + log_sub "Mounted $src → $tgt" + + local ro_val=0; [ "$ro" = "true" ] && ro_val=1 + echo "$i|$src|$tgt|$ro_val" > "$state_dir/mount_$i" + done - fi + fi - # --- Run user scripts --- - local scripts_json - scripts_json=$(echo "$CONFIG_JSON" | jq -r '.scripts // []') - local script_count - script_count=$(echo "$scripts_json" | jq 'length') - if [ "$script_count" -gt 0 ]; then - log "==> Running setup scripts..." - for i in $(seq 0 $((script_count - 1))); do - local script_path - script_path=$(echo "$scripts_json" | jq -r ".[$i]") - [[ "$script_path" != /* ]] && script_path="$(realpath "${project_dir}/${script_path}")" - [ ! -f "$script_path" ] && { log_sub "WARNING: script not found: $script_path"; continue; } - local script_name - script_name=$(basename "$script_path") - scp -q "${SSH_OPTS[@]}" "$script_path" "${VM_USER}@$TAP_GUEST_IP:/tmp/$script_name" - ssh "${SSH_OPTS[@]}" ${VM_USER}@"$TAP_GUEST_IP" "chmod +x /tmp/$script_name && /tmp/$script_name && rm /tmp/$script_name" - log_sub "Ran $script_path" - done - fi + # --- Run user scripts --- + local scripts_json + scripts_json=$(echo "$CONFIG_JSON" | jq -r '.scripts // []') + local script_count + script_count=$(echo "$scripts_json" | jq 'length') + if [ "$script_count" -gt 0 ]; then + log "==> Running setup scripts..." + for i in $(seq 0 $((script_count - 1))); do + local script_path + script_path=$(echo "$scripts_json" | jq -r ".[$i]") + [[ "$script_path" != /* ]] && script_path="$(realpath "${project_dir}/${script_path}")" + [ ! -f "$script_path" ] && { log_sub "WARNING: script not found: $script_path"; continue; } + local script_name + script_name=$(basename "$script_path") + scp -q "${SSH_OPTS[@]}" "$script_path" "${VM_USER}@$TAP_GUEST_IP:/tmp/$script_name" + ssh "${SSH_OPTS[@]}" ${VM_USER}@"$TAP_GUEST_IP" "chmod +x /tmp/$script_name && /tmp/$script_name && rm /tmp/$script_name" + log_sub "Ran $script_path" + done + fi - # --- Connectivity check (skip for off) --- - if [ "$network" != "off" ]; then - log "==> Connectivity check..." - if ssh "${SSH_OPTS[@]}" ${VM_USER}@"$TAP_GUEST_IP" "curl -sf --max-time 5 https://cache.nixos.org >/dev/null 2>&1"; then - log_sub "Nix cache: OK" - else - log_sub "WARNING: Nix cache unreachable" + # --- Connectivity check (skip for off) --- + if [ "$network" != "off" ]; then + log "==> Connectivity check..." + if ssh "${SSH_OPTS[@]}" ${VM_USER}@"$TAP_GUEST_IP" "curl -sf --max-time 5 https://cache.nixos.org >/dev/null 2>&1"; then + log_sub "Nix cache: OK" + else + log_sub "WARNING: Nix cache unreachable" + fi fi fi @@ -514,11 +701,18 @@ do_ssh() { [ ! -d "$state_dir" ] && die "VM '$name' not found" + local target_ip + if [ "$NIXBOX_PLATFORM" = "darwin" ]; then + target_ip=$(cat "$state_dir/guest_ip" 2>/dev/null) || die "Guest IP not found. Is the VM running?" + else + target_ip="$TAP_GUEST_IP" + fi + if [ -z "$cmd" ]; then - exec ssh -t "${SSH_OPTS[@]}" ${VM_USER}@"$TAP_GUEST_IP" + exec ssh -t "${SSH_OPTS[@]}" ${VM_USER}@"$target_ip" else local escaped_cmd="${cmd//\'/\'\\\'\'}" - ssh "${SSH_OPTS[@]}" ${VM_USER}@"$TAP_GUEST_IP" -- "bash -lc '${escaped_cmd}'" + ssh "${SSH_OPTS[@]}" ${VM_USER}@"$target_ip" -- "bash -lc '${escaped_cmd}'" fi } @@ -529,6 +723,8 @@ do_mount() { local run_dir="$NIXBOX_DIR/run" local api_sock="$run_dir/api.sock" + [ "$NIXBOX_PLATFORM" = "darwin" ] && die "Hot-plug mount/unmount not supported on macOS. Add mounts to config.nix and restart the VM." + [ ! -d "$state_dir" ] && die "VM '$name' not found" parse_mount_spec "$spec" @@ -579,6 +775,8 @@ do_unmount() { local run_dir="$NIXBOX_DIR/run" local api_sock="$run_dir/api.sock" + [ "$NIXBOX_PLATFORM" = "darwin" ] && die "Hot-plug mount/unmount not supported on macOS. Add mounts to config.nix and restart the VM." + [ ! -d "$state_dir" ] && die "VM '$name' not found" local mount_idx="" @@ -614,35 +812,76 @@ do_unmount() { } do_list() { - local found=0 - printf "%-15s %-10s %-12s %-10s %-6s\n" "NAME" "PID" "UPTIME" "STATUS" "SLOT" - printf "%-15s %-10s %-12s %-10s %-6s\n" "----" "---" "------" "------" "----" - mkdir -p "$SLOTS_DIR" - for slot_file in "$SLOTS_DIR"/*; do - [ -f "$slot_file" ] || continue - local nixbox_dir slot state_dir name pid start status uptime - nixbox_dir=$(cat "$slot_file") - slot=$(basename "$slot_file") - state_dir="$nixbox_dir/state" - [ -d "$state_dir" ] || continue + if [ "$NIXBOX_PLATFORM" = "darwin" ]; then + # Darwin: single-VM mode via ACTIVE_FILE + local nixbox_dir="" + if [ -f "$ACTIVE_FILE" ]; then + nixbox_dir="$(cat "$ACTIVE_FILE")" + elif nixbox_dir="$(find_nixbox_dir 2>/dev/null)"; then + : + fi + + if [ -z "$nixbox_dir" ] || [ ! -d "$nixbox_dir/state" ] || [ ! -f "$nixbox_dir/state/pid" ]; then + echo "No VMs found." + return 0 + fi + + local state_dir="$nixbox_dir/state" + + printf "%-15s %-10s %-12s %-10s\n" "NAME" "PID" "UPTIME" "STATUS" + printf "%-15s %-10s %-12s %-10s\n" "----" "---" "------" "------" + + local name pid start status uptime name=$(cat "$state_dir/name" 2>/dev/null || basename "$(dirname "$nixbox_dir")") pid=$(cat "$state_dir/pid" 2>/dev/null || echo "-") start=$(cat "$state_dir/start_time" 2>/dev/null || echo "0") + if [ "$pid" != "-" ] && kill -0 "$pid" 2>/dev/null; then status="running" + local now uptime_secs hours mins + now=$(date +%s) + uptime_secs=$((now - start)) + hours=$((uptime_secs / 3600)) + mins=$(( (uptime_secs % 3600) / 60 )) + uptime="${hours}h${mins}m" else status="dead" + uptime="-" fi - local now elapsed hours mins - now=$(date +%s) - elapsed=$((now - start)) - hours=$((elapsed / 3600)) - mins=$(( (elapsed % 3600) / 60 )) - uptime="${hours}h${mins}m" - printf "%-15s %-10s %-12s %-10s %-6s\n" "$name" "$pid" "$uptime" "$status" "$slot" - found=1 - done - [ "$found" -eq 0 ] && echo "No VMs running." + + printf "%-15s %-10s %-12s %-10s\n" "$name" "$pid" "$uptime" "$status" + else + # Linux: multi-slot mode + local found=0 + printf "%-15s %-10s %-12s %-10s %-6s\n" "NAME" "PID" "UPTIME" "STATUS" "SLOT" + printf "%-15s %-10s %-12s %-10s %-6s\n" "----" "---" "------" "------" "----" + mkdir -p "$SLOTS_DIR" + for slot_file in "$SLOTS_DIR"/*; do + [ -f "$slot_file" ] || continue + local nixbox_dir slot state_dir name pid start status uptime + nixbox_dir=$(cat "$slot_file") + slot=$(basename "$slot_file") + state_dir="$nixbox_dir/state" + [ -d "$state_dir" ] || continue + name=$(cat "$state_dir/name" 2>/dev/null || basename "$(dirname "$nixbox_dir")") + pid=$(cat "$state_dir/pid" 2>/dev/null || echo "-") + start=$(cat "$state_dir/start_time" 2>/dev/null || echo "0") + if [ "$pid" != "-" ] && kill -0 "$pid" 2>/dev/null; then + status="running" + else + status="dead" + fi + local now elapsed hours mins + now=$(date +%s) + elapsed=$((now - start)) + hours=$((elapsed / 3600)) + mins=$(( (elapsed % 3600) / 60 )) + uptime="${hours}h${mins}m" + printf "%-15s %-10s %-12s %-10s %-6s\n" "$name" "$pid" "$uptime" "$status" "$slot" + found=1 + done + [ "$found" -eq 0 ] && echo "No VMs running." + fi } # --------------------------------------------------------------------------- @@ -650,10 +889,16 @@ do_list() { # --------------------------------------------------------------------------- ensure_setup() { - for cmd in nix jq dnsmasq nft mke2fs virtiofsd; do - command -v "$cmd" &>/dev/null || die "$cmd not found. Install it first." - done - [ -e /dev/kvm ] || die "/dev/kvm not found. KVM is required." + if [ "$NIXBOX_PLATFORM" = "darwin" ]; then + for cmd in nix jq mke2fs; do + command -v "$cmd" &>/dev/null || die "$cmd not found. Install it first." + done + else + for cmd in nix jq dnsmasq nft mke2fs virtiofsd; do + command -v "$cmd" &>/dev/null || die "$cmd not found. Install it first." + done + [ -e /dev/kvm ] || die "/dev/kvm not found. KVM is required." + fi mkdir -p "$NIXBOX_DIR"/{ssh,state,run,tmp} if [ ! -f "$SSH_KEY" ]; then log "==> Generating SSH key pair..." @@ -661,6 +906,18 @@ ensure_setup() { fi } +_nix_system() { + local arch + arch="$(uname -m)" + # Nix uses "aarch64", Apple tools report "arm64" + [ "$arch" = "arm64" ] && arch="aarch64" + if [ "$NIXBOX_PLATFORM" = "darwin" ]; then + echo "${arch}-darwin" + else + echo "${arch}-linux" + fi +} + ensure_build() { local config_path="$1" local hash_file="$NIXBOX_DIR/state/.build-hash" @@ -685,7 +942,9 @@ ensure_build() { cp -r "$NIXBOX_SRC/plugins" "$build_dir/" cp "$config_path" "$build_dir/project-config.nix" cp "$ssh_pub" "$build_dir/ssh_key.pub" - echo "{ username = \"$VM_USER\"; }" > "$build_dir/host-info.nix" + local nix_system + nix_system="$(_nix_system)" + echo "{ username = \"$VM_USER\"; hostSystem = \"$nix_system\"; }" > "$build_dir/host-info.nix" # Use project-pinned flake.lock if available; otherwise nix resolves latest local user_lock="$NIXBOX_DIR/flake.lock" @@ -696,12 +955,12 @@ ensure_build() { (cd "$build_dir" && git init -q && git add -A && git commit -q -m "build" --allow-empty) log "==> Building VM runner..." - (cd "$build_dir" && nix build .#vm-runner) + (cd "$build_dir" && nix build ".#packages.${nix_system}.vm-runner") # Persist the resolved flake.lock back to the project cp "$build_dir/flake.lock" "$NIXBOX_DIR/flake.lock" - ln -sfn "$(readlink -f "$build_dir/result")" "$runner" + ln -sfn "$(_portable_realpath "$build_dir/result")" "$runner" echo "$current_hash" > "$hash_file" log "==> Build complete." } @@ -776,14 +1035,22 @@ cmd_up() { echo "$CONFIG_JSON" | jq -r '.network.domains[]' > "$domains_file" fi - VM_SLOT=$(allocate_slot "$NIXBOX_DIR") - derive_network "$VM_SLOT" "$project_name" + if [ "$NIXBOX_PLATFORM" = "darwin" ]; then + check_no_conflicting_vm + else + VM_SLOT=$(allocate_slot "$NIXBOX_DIR") + derive_network "$VM_SLOT" "$project_name" + fi local vcpus mem_mb vcpus="$(jq_field '.resources.vcpus')" mem_mb="$(jq_field '.resources.memoryMB')" log "==> Starting VM '$project_name' [network=$network, vcpus=$vcpus, mem=${mem_mb}MB]" + if [ "$NIXBOX_PLATFORM" = "darwin" ]; then + echo "$NIXBOX_DIR" > "$ACTIVE_FILE" + fi + run_hooks "pre-up" do_create "$project_name" "$mounts_json" "$domains_file" "$project_dir" [ -n "$domains_file" ] && rm -f "$domains_file" @@ -797,13 +1064,19 @@ cmd_down() { local project_name project_name="$(get_project_name)" - local vm_slot - vm_slot=$(cat "$NIXBOX_DIR/state/slot" 2>/dev/null || echo "0") - derive_network "$vm_slot" "$project_name" + if [ "$NIXBOX_PLATFORM" != "darwin" ]; then + local vm_slot + vm_slot=$(cat "$NIXBOX_DIR/state/slot" 2>/dev/null || echo "0") + derive_network "$vm_slot" "$project_name" + fi run_hooks "pre-down" do_destroy "$project_name" - release_slot "$NIXBOX_DIR" + if [ "$NIXBOX_PLATFORM" = "darwin" ]; then + rm -f "$ACTIVE_FILE" + else + release_slot "$NIXBOX_DIR" + fi run_hooks "post-down" } @@ -840,6 +1113,10 @@ cmd_unmount() { cmd_status() { do_list + if [ "$NIXBOX_PLATFORM" = "darwin" ] && [ -f "$ACTIVE_FILE" ]; then + echo "" + echo "Active workspace: $(dirname "$(cat "$ACTIVE_FILE")")" + fi } cmd_config() { @@ -853,20 +1130,31 @@ cmd_doctor() { local errors=0 log "==> Checking prerequisites..." - for cmd in nix jq dnsmasq nft mke2fs virtiofsd; do - if command -v "$cmd" &>/dev/null; then - log_sub "$cmd: OK" + if [ "$NIXBOX_PLATFORM" = "darwin" ]; then + for cmd in nix jq mke2fs; do + if command -v "$cmd" &>/dev/null; then + log_sub "$cmd: OK" + else + log_sub "$cmd: MISSING" + errors=$((errors + 1)) + fi + done + else + for cmd in nix jq dnsmasq nft mke2fs virtiofsd; do + if command -v "$cmd" &>/dev/null; then + log_sub "$cmd: OK" + else + log_sub "$cmd: MISSING" + errors=$((errors + 1)) + fi + done + + if [ -e /dev/kvm ]; then + log_sub "/dev/kvm: OK" else - log_sub "$cmd: MISSING" + log_sub "/dev/kvm: MISSING" errors=$((errors + 1)) fi - done - - if [ -e /dev/kvm ]; then - log_sub "/dev/kvm: OK" - else - log_sub "/dev/kvm: MISSING" - errors=$((errors + 1)) fi echo "" diff --git a/docs/decisions/015-macos-vfkit-support.md b/docs/decisions/015-macos-vfkit-support.md new file mode 100644 index 0000000..8eb2991 --- /dev/null +++ b/docs/decisions/015-macos-vfkit-support.md @@ -0,0 +1,48 @@ +# 015: macOS support via vfkit hypervisor + +**Date:** 2026-04-07 +**Status:** accepted + +## Problem + +nixbox was Linux-only (cloud-hypervisor + TAP + nftables + dnsmasq). macOS users — including those on Apple Silicon — could not run nixbox at all. Supporting macOS requires a different hypervisor, networking stack, and several platform-conditional code paths. + +## Decision + +Add macOS (aarch64-darwin) as a supported host platform using [vfkit](https://github.com/crc-org/vfkit) as the hypervisor and macOS vmnet for networking. + +### Platform differences + +| Concern | Linux | macOS | +|---|---|---| +| Hypervisor | cloud-hypervisor | vfkit (Virtualization.framework) | +| Networking | TAP + nftables + dnsmasq | vmnet NAT (DHCP from macOS) | +| Guest IP discovery | Static (slot-based `172.16.{slot*4}.2`) | ARP scan after boot | +| Filesystem sharing | virtiofs (virtiofsd) | virtiofs (vfkit built-in) | +| CPU detection | `nproc` | `sysctl -n hw.ncpu` | +| Network filtering | nftables allowlist + dnsmasq DNS | Not yet implemented | +| Hot-plug mounts | cloud-hypervisor HTTP API | Not supported — restart required | +| Credential disk | virtio-blk hot-plug via HTTP API | virtio-blk attached at boot | + +### Architecture mapping + +Apple tools report `arm64` for the CPU architecture, but Nix uses `aarch64`. The CLI maps `arm64` → `aarch64` in `_nix_system()` to produce correct Nix system triples (e.g. `aarch64-darwin`). + +The guest is always `aarch64-linux` regardless of host — darwin hosts run Linux guests via Virtualization.framework's Rosetta support or native ARM execution. + +### CI compromise + +macOS GitHub Actions runners cannot build `aarch64-linux` derivations (no Linux builder available). Therefore: + +- **Unit tests** (ShellCheck, BATS, Nix eval) run on both Linux and macOS — they don't build guest images. +- **E2E tests** (full VM lifecycle) run on Linux only — they require building the NixOS guest rootfs. + +macOS E2E testing requires either local hardware or a CI provider with Linux builder support for cross-compilation. + +## Consequences + +- macOS users on Apple Silicon can run nixbox with `vfkit` as the hypervisor. +- Network filtering (`filtered` mode) is not yet available on macOS — only `open` mode works. Implementing pfctl-based filtering is future work. +- Hot-plug mount/unmount is not supported on macOS — mounts must be declared in config and require a VM restart. +- E2E CI coverage is Linux-only; macOS regressions in the boot path won't be caught until tested locally. +- The `microvm.interfaces` config requires an `id` field for all interface types, even when the ID has no physical meaning (e.g. vfkit user networking). diff --git a/docs/decisions/README.md b/docs/decisions/README.md index cc66a5d..fe92fe3 100644 --- a/docs/decisions/README.md +++ b/docs/decisions/README.md @@ -23,3 +23,5 @@ Each file: `NNN-short-title.md` with sections **Problem**, **Decision**, **Conse | [011](011-guest-setup-scripts.md) | Guest setup via user-provided scripts | 2026-03-24 | accepted | | [012](012-per-workspace-nixbox-directory.md) | Per-workspace `.nixbox/` directory | 2026-03-24 | accepted | | [013](013-plugin-env-transparency.md) | Plugins must not inject env vars | 2026-03-24 | accepted | +| [014](014-vm-ssh-key-injection.md) | Inject VM SSH key for outbound authentication | 2026-03-27 | accepted | +| [015](015-macos-vfkit-support.md) | macOS support via vfkit hypervisor | 2026-04-07 | accepted | diff --git a/flake.nix b/flake.nix index d6a6ada..95fd27f 100644 --- a/flake.nix +++ b/flake.nix @@ -16,11 +16,31 @@ microvm, }: let - system = "x86_64-linux"; + lib = nixpkgs.lib; + + supportedSystems = [ + "x86_64-linux" + "aarch64-linux" + "aarch64-darwin" + ]; + + # Host → guest mapping (darwin hosts run linux guests) + guestSystem = { + "x86_64-linux" = "x86_64-linux"; + "aarch64-linux" = "aarch64-linux"; + "aarch64-darwin" = "aarch64-linux"; + }; + + isDarwin = hostSystem: builtins.match ".*-darwin" hostSystem != null; + + # --- Constants --- + vcpus = 256; # Headroom ceiling — actual boot value patched by CLI at launch (defaults to nproc) memMB = 65536; # 64GB headroom ceiling — patched at launch; balloon returns unused pages rootDiskGB = 64; + # --- Host-independent config (resolved from build dir files) --- + projectConfig = let path = ./project-config.nix; @@ -39,214 +59,281 @@ vmUser = hostInfo.username or "user"; - pkgs = import nixpkgs { - inherit system; - config.allowUnfree = true; - }; - in - { - nixosConfigurations.nixbox = nixpkgs.lib.nixosSystem { - inherit system; - modules = [ - microvm.nixosModules.microvm - ( - { - config, - pkgs, - lib, - ... - }: - { - nixpkgs.config.allowUnfree = true; - - microvm = { - hypervisor = "cloud-hypervisor"; - vcpu = vcpus; - mem = memMB; - socket = "api.sock"; - vsock.cid = 3; - balloon = true; - - interfaces = [ - { - type = "tap"; - id = "vmtap0"; - mac = "02:00:00:00:00:01"; - } - ]; + # --- Per-host NixOS configuration builder --- - volumes = [ + mkNixboxConfig = + hostSystem: + let + guest = guestSystem.${hostSystem}; + darwin = isDarwin hostSystem; + in + nixpkgs.lib.nixosSystem { + system = guest; + modules = [ + microvm.nixosModules.microvm + ( + { + config, + pkgs, + lib, + ... + }: + { + nixpkgs.config.allowUnfree = true; + + microvm = { - image = "root.img"; - mountPoint = "/"; - size = rootDiskGB * 1024; - autoCreate = true; + vcpu = vcpus; + mem = memMB; + socket = "api.sock"; + + volumes = [ + { + image = "root.img"; + mountPoint = "/"; + size = rootDiskGB * 1024; + autoCreate = true; + } + ]; + + shares = [ + { + proto = "virtiofs"; + tag = "nix-store"; + source = "/nix/store"; + mountPoint = "/nix/.ro-store"; + } + ]; } - ]; + // ( + if darwin then + { + hypervisor = "vfkit"; + balloon = false; + vsock.cid = null; + vmHostPackages = import nixpkgs { system = hostSystem; }; + interfaces = [ + { + type = "user"; + id = "usernet0"; + mac = "02:00:00:00:00:01"; + } + ]; + } + else + { + hypervisor = "cloud-hypervisor"; + balloon = true; + vsock.cid = 3; + interfaces = [ + { + type = "tap"; + id = "vmtap0"; + mac = "02:00:00:00:00:01"; + } + ]; + } + ); + + # Workaround: nixpkgs removed the default fsType="auto" (NixOS/nixpkgs#444829) + # and microvm.nix's bind mount for /nix/store doesn't set it (astro/microvm.nix#500). + # Remove once microvm.nix merges astro/microvm.nix#502. + fileSystems."/nix/store".fsType = lib.mkDefault "none"; + + # --- Packages --- + + environment.systemPackages = + let + basePackages = with pkgs; [ + curl + git + htop + jq + openssh + python3 + tmux + vim + ]; + extraPackages = map (name: pkgs.${name}) ((projectConfig.nix or { }).packages or [ ]); + in + basePackages ++ extraPackages; + + # --- Environment --- + + environment.shellInit = '' + [ -f "$HOME/.env" ] && set -a && . "$HOME/.env" && set +a + ''; - shares = [ - { - proto = "virtiofs"; - tag = "nix-store"; - source = "/nix/store"; - mountPoint = "/nix/.ro-store"; - } - ]; - }; - - # Workaround: nixpkgs removed the default fsType="auto" (NixOS/nixpkgs#444829) - # and microvm.nix's bind mount for /nix/store doesn't set it (astro/microvm.nix#500). - # Remove once microvm.nix merges astro/microvm.nix#502. - fileSystems."/nix/store".fsType = lib.mkDefault "none"; - - # --- Packages --- - - environment.systemPackages = - let - basePackages = with pkgs; [ - curl - git - htop - jq - openssh - python3 - tmux - vim + # --- User --- + + users.users.${vmUser} = { + isNormalUser = true; + uid = 1000; + home = "/home/${vmUser}"; + extraGroups = [ + "wheel" + "docker" ]; - extraPackages = map (name: pkgs.${name}) ((projectConfig.nix or { }).packages or [ ]); - in - basePackages ++ extraPackages; + openssh.authorizedKeys.keyFiles = [ ./ssh_key.pub ]; + }; - # --- Environment --- + security.sudo.wheelNeedsPassword = false; - environment.shellInit = '' - [ -f "$HOME/.env" ] && set -a && . "$HOME/.env" && set +a - ''; + # --- Services --- - # --- User --- + services.openssh = { + enable = true; + settings = { + PasswordAuthentication = false; + PermitRootLogin = "no"; + }; + }; - users.users.${vmUser} = { - isNormalUser = true; - uid = 1000; - home = "/home/${vmUser}"; - extraGroups = [ - "wheel" - "docker" - ]; - openssh.authorizedKeys.keyFiles = [ ./ssh_key.pub ]; - }; + virtualisation.docker = { + enable = true; + storageDriver = "overlay2"; + }; - security.sudo.wheelNeedsPassword = false; + # --- Networking --- - # --- Services --- + # Use traditional interface names (eth0) instead of predictable names (enp0s*) + boot.kernelParams = [ "net.ifnames=0" ]; - services.openssh = { - enable = true; - settings = { - PasswordAuthentication = false; - PermitRootLogin = "no"; - }; - }; - - virtualisation.docker = { - enable = true; - storageDriver = "overlay2"; - }; - - # --- Networking --- - - # net.ifnames=0 ensures the virtio-net NIC is always named eth0 - boot.kernelParams = [ "net.ifnames=0" ]; - - networking = { - hostName = "nixbox"; - firewall.enable = false; - useNetworkd = true; - }; - - systemd.network.networks."10-vm" = { - matchConfig.Name = "eth0"; - networkConfig.DHCP = "ipv4"; - }; - - # --- systemd: Inject environment from host via hot-plugged disk --- - - systemd.services.inject-env = { - description = "Inject environment from host"; - before = [ "sshd.service" ]; - wantedBy = [ "multi-user.target" ]; - serviceConfig = { - Type = "oneshot"; - RemainAfterExit = true; + networking = + { + hostName = "nixbox"; + firewall.enable = false; + useDHCP = false; + } + // ( + if darwin then + { + interfaces.eth0.useDHCP = true; + nameservers = [ + "8.8.8.8" + "8.8.4.4" + ]; + } + else + { + interfaces.eth0 = { + useDHCP = false; + ipv4.addresses = [ + { + address = "172.16.0.2"; + prefixLength = 30; + } + ]; + }; + defaultGateway = { + address = "172.16.0.1"; + interface = "eth0"; + }; + nameservers = [ "172.16.0.1" ]; + } + ); + + # --- systemd: Inject environment from host via hot-plugged disk --- + + systemd.services.inject-env = { + description = "Inject environment from host"; + before = [ "sshd.service" ]; + wantedBy = [ "multi-user.target" ]; + serviceConfig = { + Type = "oneshot"; + RemainAfterExit = true; + }; + path = with pkgs; [ + util-linux + coreutils + ]; + script = '' + set -euo pipefail + timeout=10 + while [ ! -b /dev/vdb ] && [ "$timeout" -gt 0 ]; do + sleep 1; timeout=$((timeout - 1)) + done + [ ! -b /dev/vdb ] && { echo "WARNING: env disk not found"; exit 0; } + mkdir -p /mnt/env-disk + mount -o ro /dev/vdb /mnt/env-disk + VM_HOME=/home/${vmUser} + + # Environment file + [ -f /mnt/env-disk/env ] && cp /mnt/env-disk/env "$VM_HOME/.env" + + chown -R ${vmUser}:users "$VM_HOME" + umount /mnt/env-disk; rmdir /mnt/env-disk + ''; }; - path = with pkgs; [ - util-linux - coreutils - ]; - script = '' - set -euo pipefail - timeout=10 - while [ ! -b /dev/vdb ] && [ "$timeout" -gt 0 ]; do - sleep 1; timeout=$((timeout - 1)) - done - [ ! -b /dev/vdb ] && { echo "WARNING: env disk not found"; exit 0; } - mkdir -p /mnt/env-disk - mount -o ro /dev/vdb /mnt/env-disk - VM_HOME=/home/${vmUser} - - # Environment file - [ -f /mnt/env-disk/env ] && cp /mnt/env-disk/env "$VM_HOME/.env" - - chown -R ${vmUser}:users "$VM_HOME" - umount /mnt/env-disk; rmdir /mnt/env-disk - ''; - }; - system.stateVersion = "25.05"; - } - ) - ]; - }; + system.stateVersion = "25.05"; + } + ) + ]; + }; + in + { + nixosConfigurations = lib.genAttrs (map (s: "nixbox-${s}") supportedSystems) ( + name: + let + hostSystem = lib.removePrefix "nixbox-" name; + in + mkNixboxConfig hostSystem + ); - packages.${system} = + packages = lib.genAttrs supportedSystems ( + hostSystem: let + darwin = isDarwin hostSystem; + pkgs = import nixpkgs { + system = hostSystem; + config.allowUnfree = true; + }; + hypervisor = if darwin then "vfkit" else "cloud-hypervisor"; + nixbox = pkgs.stdenvNoCC.mkDerivation { pname = "nixbox"; version = "0.1.0"; src = ./.; nativeBuildInputs = [ pkgs.makeWrapper ]; - installPhase = '' - mkdir -p $out/bin $out/share/nixbox - cp bin/nixbox $out/bin/nixbox - cp flake.nix flake.lock $out/share/nixbox/ - cp -r lib $out/share/nixbox/ - cp -r plugins $out/share/nixbox/ - cp config.example.nix $out/share/nixbox/ - - wrapProgram $out/bin/nixbox \ - --prefix PATH : ${ - pkgs.lib.makeBinPath [ - pkgs.jq - pkgs.e2fsprogs - pkgs.virtiofsd - pkgs.openssh - pkgs.curl - pkgs.git - pkgs.gnused - ] - } - ''; + installPhase = + let + commonDeps = with pkgs; [ + jq + e2fsprogs + openssh + curl + git + gnused + ]; + linuxDeps = [ pkgs.virtiofsd ]; + wrapDeps = commonDeps ++ (if darwin then [ ] else linuxDeps); + in + '' + mkdir -p $out/bin $out/share/nixbox + cp bin/nixbox $out/bin/nixbox + cp flake.nix flake.lock $out/share/nixbox/ + cp -r lib $out/share/nixbox/ + cp -r plugins $out/share/nixbox/ + cp config.example.nix $out/share/nixbox/ + + wrapProgram $out/bin/nixbox \ + --prefix PATH : ${pkgs.lib.makeBinPath wrapDeps} + ''; }; in { inherit nixbox; - vm-runner = self.nixosConfigurations.nixbox.config.microvm.runner.cloud-hypervisor; + vm-runner = self.nixosConfigurations."nixbox-${hostSystem}".config.microvm.runner.${hypervisor}; default = nixbox; - }; + } + ); - apps.${system}.default = { - type = "app"; - program = "${self.packages.${system}.nixbox}/bin/nixbox"; - }; + apps = lib.genAttrs supportedSystems (hostSystem: { + default = { + type = "app"; + program = "${self.packages.${hostSystem}.nixbox}/bin/nixbox"; + }; + }); }; } diff --git a/lib/functions.bash b/lib/functions.bash index 4d45a5b..01fad96 100644 --- a/lib/functions.bash +++ b/lib/functions.bash @@ -208,6 +208,8 @@ init_nixbox_dir() { init_nixbox_dir_or_active() { if NIXBOX_DIR="$(find_nixbox_dir 2>/dev/null)"; then : + elif [ "${NIXBOX_PLATFORM:-linux}" = "darwin" ] && [ -f "$ACTIVE_FILE" ]; then + NIXBOX_DIR="$(cat "$ACTIVE_FILE")" else local running=() mkdir -p "$SLOTS_DIR" @@ -234,12 +236,34 @@ init_nixbox_dir_or_active() { SSH_KEY="$NIXBOX_DIR/ssh/vm_key" SSH_OPTS=(-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o LogLevel=ERROR -i "$SSH_KEY") - # Derive network from persisted slot - local vm_slot - vm_slot=$(cat "$NIXBOX_DIR/state/slot" 2>/dev/null || echo "0") - local name - name=$(cat "$NIXBOX_DIR/state/name" 2>/dev/null || basename "$(dirname "$NIXBOX_DIR")") - derive_network "$vm_slot" "$name" + # Derive network from persisted slot (Linux only) + if [ "${NIXBOX_PLATFORM:-linux}" != "darwin" ]; then + local vm_slot + vm_slot=$(cat "$NIXBOX_DIR/state/slot" 2>/dev/null || echo "0") + local name + name=$(cat "$NIXBOX_DIR/state/name" 2>/dev/null || basename "$(dirname "$NIXBOX_DIR")") + derive_network "$vm_slot" "$name" + fi +} + +# --------------------------------------------------------------------------- +# Darwin: single-VM conflict guard +# --------------------------------------------------------------------------- + +check_no_conflicting_vm() { + if [ -f "$ACTIVE_FILE" ]; then + local active_nixbox_dir + active_nixbox_dir=$(cat "$ACTIVE_FILE") + if [ "$active_nixbox_dir" != "$NIXBOX_DIR" ]; then + local pid_file="${active_nixbox_dir}/state/pid" + if [ -f "$pid_file" ] && kill -0 "$(cat "$pid_file")" 2>/dev/null; then + local active_name + active_name=$(basename "$(dirname "$active_nixbox_dir")") + die "VM for '${active_name}' ($(dirname "$active_nixbox_dir")) is running. Run 'nixbox down' there first." + fi + rm -f "$ACTIVE_FILE" + fi + fi } # --------------------------------------------------------------------------- @@ -249,6 +273,8 @@ init_nixbox_dir_or_active() { compute_build_hash() { local config_path="$1" local lock_file="$NIXBOX_DIR/flake.lock" + local hash_cmd="sha256sum" + command -v sha256sum &>/dev/null || hash_cmd="shasum -a 256" ( cat "$NIXBOX_SRC/flake.nix" \ "$NIXBOX_SRC/lib/resolve.nix" \ @@ -261,5 +287,5 @@ compute_build_hash() { echo "__no_lock__" fi find "$NIXBOX_SRC/plugins" -name '*.nix' -exec cat {} + 2>/dev/null || true - ) | sha256sum | cut -d' ' -f1 + ) | $hash_cmd | cut -d' ' -f1 }