diff --git a/src/runtime/incus.rs b/src/runtime/incus.rs index 2d4e8ff..47c0cc9 100644 --- a/src/runtime/incus.rs +++ b/src/runtime/incus.rs @@ -150,13 +150,18 @@ impl Runtime for IncusRuntime { ); } - // Ensure the base image is available (auto-download if missing). - Self::ensure_image(&opts.image).await?; + // Determine which image to launch from: cached or base + let image = if let Some(cached) = &opts.cached_image { + println!("Launching from cached image '{cached}'..."); + cached.clone() + } else { + Self::ensure_image(&opts.image).await?; + Self::image_alias(&opts.image).to_string() + }; // Launch the VM println!("Creating Incus VM '{vm}'..."); - let image = Self::image_alias(&opts.image); - let mut launch_args = vec!["launch", image, &vm, "--vm", "-c", "security.secureboot=false"]; + let mut launch_args = vec!["launch", &image, &vm, "--vm", "-c", "security.secureboot=false"]; let cpu_str; if opts.cpu > 0 { @@ -175,13 +180,14 @@ impl Runtime for IncusRuntime { run_ok("incus", &launch_args).await?; - // Expand the root disk to 20GB (Incus default is 10GB, too small for - // NixOS with dev tools). Must be done after launch, before provisioning. - let _ = run_ok( - "incus", - &["config", "device", "override", &vm, "root", "size=20GiB"], - ) - .await; + // Expand disk only for base images (cached images already have 20GB) + if opts.cached_image.is_none() { + let _ = run_ok( + "incus", + &["config", "device", "override", &vm, "root", "size=20GiB"], + ) + .await; + } // Wait for the VM agent to be ready before provisioning. // The guest agent takes time to start after boot. @@ -384,6 +390,44 @@ impl Runtime for IncusRuntime { async fn update_mounts(&self, _name: &str, _mounts: &[super::Mount]) -> Result<()> { bail!("Updating mounts is not supported for the Incus runtime") } + + async fn cached_image(&self, cache_key: &str) -> Option { + let alias = format!("devbox-cache-{cache_key}"); + let result = run_cmd( + "incus", + &["image", "list", &format!("local:{alias}"), "--format", "json"], + ) + .await + .ok()?; + if result.exit_code == 0 { + if let Ok(arr) = serde_json::from_str::>(&result.stdout) { + if !arr.is_empty() { + return Some(alias); + } + } + } + None + } + + async fn cache_image(&self, name: &str, cache_key: &str) -> Result<()> { + let vm = Self::vm_name(name); + let alias = format!("devbox-cache-{cache_key}"); + + println!("Caching provisioned image as '{alias}'..."); + + // Stop VM before publishing (required by incus publish) + let _ = run_cmd("incus", &["stop", &vm]).await; + + // Publish the VM as a reusable image + run_ok("incus", &["publish", &vm, "--alias", &alias]).await?; + + // Restart the VM + run_ok("incus", &["start", &vm]).await?; + Self::wait_for_agent(&vm).await?; + + println!("Image cached successfully."); + Ok(()) + } } fn chrono_now() -> String { diff --git a/src/runtime/lima.rs b/src/runtime/lima.rs index 0378c0d..401e5f8 100644 --- a/src/runtime/lima.rs +++ b/src/runtime/lima.rs @@ -176,8 +176,13 @@ impl Runtime for LimaRuntime { } else { "NixOS" }; - println!("Creating {image_label} VM '{vm}'..."); - println!(" (first run downloads {image_label} image, this may take a few minutes)"); + + if opts.cached_image.is_some() { + println!("Creating {image_label} VM '{vm}' from cache..."); + } else { + println!("Creating {image_label} VM '{vm}'..."); + println!(" (first run downloads {image_label} image, this may take a few minutes)"); + } run_ok( "limactl", &[ @@ -190,6 +195,21 @@ impl Runtime for LimaRuntime { ) .await?; + // If we have a cached disk, copy it over before first start. + // Lima creates the disk structure during `create`; we overwrite + // `diffdisk` with the cached provisioned state before `start` boots it. + if let Some(cached_disk) = &opts.cached_image { + let home = dirs::home_dir().unwrap_or_default(); + let diffdisk = home.join(format!(".lima/{vm}/diffdisk")); + println!("Restoring cached disk image..."); + // Use cp -c for APFS clone (instant, zero-cost on macOS) + let _ = run_cmd( + "cp", + &["-c", cached_disk, &diffdisk.to_string_lossy()], + ) + .await; + } + println!("Starting {image_label} VM '{vm}'..."); run_ok("limactl", &["start", &vm]).await?; @@ -377,6 +397,57 @@ impl Runtime for LimaRuntime { Ok(()) } + + async fn cached_image(&self, cache_key: &str) -> Option { + let cache_path = dirs::home_dir()? + .join(format!(".devbox/cache/lima-{cache_key}.disk")); + if cache_path.exists() { + Some(cache_path.to_string_lossy().to_string()) + } else { + None + } + } + + async fn cache_image(&self, name: &str, cache_key: &str) -> Result<()> { + let vm = Self::vm_name(name); + let home = dirs::home_dir().unwrap_or_default(); + let diffdisk = home.join(format!(".lima/{vm}/diffdisk")); + + if !diffdisk.exists() { + bail!("Lima disk not found at {}", diffdisk.display()); + } + + let cache_dir = home.join(".devbox/cache"); + std::fs::create_dir_all(&cache_dir)?; + let cache_path = cache_dir.join(format!("lima-{cache_key}.disk")); + + println!("Caching provisioned image..."); + + // Stop VM before copying disk for consistency + let _ = run_cmd("limactl", &["stop", &vm]).await; + + // Use cp -c for APFS clone (instant, zero-cost on macOS) + let result = run_cmd( + "cp", + &["-c", &diffdisk.to_string_lossy(), &cache_path.to_string_lossy()], + ) + .await; + + // Fall back to regular copy if APFS clone fails (non-APFS filesystem) + if result.is_err() || result.as_ref().is_ok_and(|r| r.exit_code != 0) { + let _ = run_cmd( + "cp", + &[&diffdisk.to_string_lossy(), &cache_path.to_string_lossy()], + ) + .await; + } + + // Restart the VM + run_ok("limactl", &["start", &vm]).await?; + + println!("Image cached successfully."); + Ok(()) + } } fn chrono_now() -> String { @@ -412,6 +483,7 @@ mod tests { bare: false, writable: false, image: "nixos".to_string(), + cached_image: None, }; let yaml = LimaRuntime::generate_yaml(&opts); assert!(yaml.contains("cpus: 4")); @@ -440,6 +512,7 @@ mod tests { bare: false, writable: false, image: "nixos".to_string(), + cached_image: None, }; let yaml = LimaRuntime::generate_yaml(&opts); assert!(yaml.contains("cpus: 4")); @@ -477,6 +550,7 @@ mod tests { bare: false, writable: false, image: "ubuntu".to_string(), + cached_image: None, }; let yaml = LimaRuntime::generate_yaml(&opts); assert!(yaml.contains("ubuntu-24.04")); @@ -502,6 +576,7 @@ mod tests { bare: false, writable: false, image: "nixos".to_string(), + cached_image: None, }; let yaml = LimaRuntime::generate_yaml(&opts); assert!(yaml.contains("nixos-lima")); diff --git a/src/runtime/mod.rs b/src/runtime/mod.rs index 7f66723..b559950 100644 --- a/src/runtime/mod.rs +++ b/src/runtime/mod.rs @@ -55,6 +55,9 @@ pub struct CreateOpts { pub writable: bool, /// Base image type: "nixos" or "ubuntu" pub image: String, + /// If set, create from this cached image instead of the base image. + /// For Incus: an image alias; for Lima: a path to a cached disk file. + pub cached_image: Option, } /// A host-to-VM mount point. @@ -141,15 +144,15 @@ pub trait Runtime: Send + Sync { false } - /// Check if a cached provisioned image exists for the given tool set. - /// Returns the image alias if found. - async fn cached_image(&self, _image: &str, _sets: &[String], _languages: &[String]) -> Option { + /// Check if a cached provisioned image exists for the given cache key. + /// Returns the image alias/path if found. + async fn cached_image(&self, _cache_key: &str) -> Option { None } /// Cache the current VM as a provisioned image for reuse. /// Called after successful provisioning to speed up future creates. - async fn cache_image(&self, _name: &str, _image: &str, _sets: &[String], _languages: &[String]) -> Result<()> { + async fn cache_image(&self, _name: &str, _cache_key: &str) -> Result<()> { Ok(()) } diff --git a/src/sandbox/mod.rs b/src/sandbox/mod.rs index ae4b972..7cb8391 100644 --- a/src/sandbox/mod.rs +++ b/src/sandbox/mod.rs @@ -119,6 +119,14 @@ impl SandboxManager { .collect(); mounts.extend_from_slice(extra_mounts); + // Check for a cached provisioned image + let active_sets = config.active_sets(); + let active_langs = config.active_languages(); + let image = config.sandbox.image.as_str(); + let mount_mode = &config.sandbox.mount_mode; + let key = provision::cache_key(image, &active_sets, &active_langs, mount_mode); + let cached = runtime.cached_image(&key).await; + let opts = CreateOpts { name: name.to_string(), mounts, @@ -132,28 +140,46 @@ impl SandboxManager { bare, writable: config.sandbox.mount_mode == "writable", image: config.sandbox.image.clone(), + cached_image: cached.clone(), }; // Create via runtime let info = runtime.create(&opts).await?; - // Provision tools in the VM based on selected sets - let active_sets = config.active_sets(); - let active_langs = config.active_languages(); - let image = config.sandbox.image.as_str(); - // Provision tools — pass mount_mode so NixOS module sets up overlay - let mount_mode = &config.sandbox.mount_mode; - if let Err(e) = provision::provision_vm_with_mode( - runtime, - name, - &active_sets, - &active_langs, - image, - mount_mode, - ) - .await - { - eprintln!("Warning: provisioning incomplete: {e}"); + if cached.is_some() { + // Launched from cached image — skip full provisioning, + // only apply host-specific config (git, devbox binary, state file). + println!("Using cached image — skipping provisioning."); + if let Err(e) = provision::post_cache_setup( + runtime, + name, + &active_sets, + &active_langs, + mount_mode, + ) + .await + { + eprintln!("Warning: post-cache setup incomplete: {e}"); + } + } else { + // No cache — full provisioning + if let Err(e) = provision::provision_vm_with_mode( + runtime, + name, + &active_sets, + &active_langs, + image, + mount_mode, + ) + .await + { + eprintln!("Warning: provisioning incomplete: {e}"); + } + + // Cache the provisioned image for future creates + if let Err(e) = runtime.cache_image(name, &key).await { + eprintln!("Warning: could not cache image: {e}"); + } } // Save state diff --git a/src/sandbox/provision.rs b/src/sandbox/provision.rs index 922cb8f..a77fb27 100644 --- a/src/sandbox/provision.rs +++ b/src/sandbox/provision.rs @@ -195,6 +195,40 @@ fn nix_packages_for_set(set: &str) -> Vec<&'static str> { } } +// ── Cache Key ─────────────────────────────────────────────── + +/// Hash of all embedded nix configuration files. +/// Changes when any nix set file or module is updated → automatic cache invalidation. +fn config_version() -> u64 { + use std::collections::hash_map::DefaultHasher; + use std::hash::{Hash, Hasher}; + let mut hasher = DefaultHasher::new(); + for (_, content) in NIX_SET_FILES { + content.hash(&mut hasher); + } + NIX_DEVBOX_MODULE.hash(&mut hasher); + hasher.finish() +} + +/// Compute a deterministic cache key for a given image + tool set combination. +/// Same inputs always produce the same key. Changes to nix set files +/// automatically invalidate the cache (via config_version). +pub fn cache_key(image: &str, sets: &[String], languages: &[String], mount_mode: &str) -> String { + use std::collections::hash_map::DefaultHasher; + use std::hash::{Hash, Hasher}; + let mut hasher = DefaultHasher::new(); + image.hash(&mut hasher); + mount_mode.hash(&mut hasher); + let mut sorted_sets: Vec<&String> = sets.iter().collect(); + sorted_sets.sort(); + sorted_sets.hash(&mut hasher); + let mut sorted_langs: Vec<&String> = languages.iter().collect(); + sorted_langs.sort(); + sorted_langs.hash(&mut hasher); + config_version().hash(&mut hasher); + format!("{:016x}", hasher.finish()) +} + // ── Public API ────────────────────────────────────────────── /// Provision a VM with tools based on active sets and languages. @@ -224,6 +258,39 @@ pub async fn provision_vm_with_mode( } } +/// Lightweight setup after launching from a cached image. +/// Applies host-specific config (git, devbox binary) without re-provisioning. +pub async fn post_cache_setup( + runtime: &dyn Runtime, + name: &str, + sets: &[String], + languages: &[String], + mount_mode: &str, +) -> Result<()> { + let username = whoami(); + + // Wait for VM to be reachable + wait_for_network(runtime, name).await?; + + // Detect VM user/home + let vm_user = detect_vm_username(runtime, name).await; + let vm_home = detect_vm_home(runtime, name, &vm_user).await; + + // Update state file with current sandbox metadata + let state_toml = generate_state_toml(sets, languages, &username, mount_mode); + write_file_to_vm(runtime, name, "/etc/devbox/devbox-state.toml", &state_toml).await?; + + // Copy current host git config (host-specific, may have changed) + setup_git_config(runtime, name, &vm_user, &vm_home).await?; + + // Update devbox binary + help files (may have been updated since cache was created) + copy_devbox_to_vm(runtime, name).await?; + setup_help_in_vm(runtime, name).await?; + setup_management_script(runtime, name).await?; + + Ok(()) +} + // ── NixOS Provisioning ───────────────────────────────────── /// Provision a NixOS VM: push nix config files + nixos-rebuild switch. @@ -1529,4 +1596,47 @@ mod tests { assert!(!pkgs.is_empty(), "set '{set}' should have packages"); } } + + #[test] + fn cache_key_deterministic() { + let sets = vec!["system".to_string(), "shell".to_string()]; + let langs = vec!["go".to_string()]; + let k1 = cache_key("nixos", &sets, &langs, "overlay"); + let k2 = cache_key("nixos", &sets, &langs, "overlay"); + assert_eq!(k1, k2); + assert_eq!(k1.len(), 16); // 16-char hex string + } + + #[test] + fn cache_key_order_independent() { + let sets_a = vec!["shell".to_string(), "system".to_string()]; + let sets_b = vec!["system".to_string(), "shell".to_string()]; + let langs = vec!["go".to_string()]; + let k1 = cache_key("nixos", &sets_a, &langs, "overlay"); + let k2 = cache_key("nixos", &sets_b, &langs, "overlay"); + assert_eq!(k1, k2, "cache key should be order-independent"); + } + + #[test] + fn cache_key_differs_on_inputs() { + let sets = vec!["system".to_string()]; + let langs = vec![]; + let k_nixos = cache_key("nixos", &sets, &langs, "overlay"); + let k_ubuntu = cache_key("ubuntu", &sets, &langs, "overlay"); + assert_ne!(k_nixos, k_ubuntu, "different image → different key"); + + let k_overlay = cache_key("nixos", &sets, &langs, "overlay"); + let k_writable = cache_key("nixos", &sets, &langs, "writable"); + assert_ne!(k_overlay, k_writable, "different mount_mode → different key"); + + let sets2 = vec!["system".to_string(), "ai-code".to_string()]; + let k_more = cache_key("nixos", &sets2, &langs, "overlay"); + assert_ne!(k_nixos, k_more, "different sets → different key"); + } + + #[test] + fn config_version_nonzero() { + let v = config_version(); + assert_ne!(v, 0, "config_version should be non-zero"); + } }