diff --git a/.github/scripts/install-zig.sh b/.github/scripts/install-zig.sh new file mode 100755 index 0000000..5a977d0 --- /dev/null +++ b/.github/scripts/install-zig.sh @@ -0,0 +1,166 @@ +#!/usr/bin/env bash +set -euo pipefail + +if [ "$#" -ne 1 ]; then + echo "usage: $0 " >&2 + exit 1 +fi + +version="$1" + +python_bin="${PYTHON:-python3}" +if ! command -v "$python_bin" >/dev/null 2>&1; then + python_bin="python" +fi +if ! command -v "$python_bin" >/dev/null 2>&1; then + echo "python is required to install Zig" >&2 + exit 1 +fi + +runner_os="${RUNNER_OS:-$(uname -s)}" +runner_arch="${RUNNER_ARCH:-$(uname -m)}" + +case "$runner_os" in + Linux | linux) + zig_os="linux" + ;; + Darwin | macOS) + zig_os="macos" + ;; + Windows | MINGW* | MSYS* | CYGWIN*) + zig_os="windows" + ;; + *) + echo "unsupported runner OS: $runner_os" >&2 + exit 1 + ;; +esac + +case "$runner_arch" in + X64 | x86_64 | amd64) + zig_arch="x86_64" + ;; + ARM64 | arm64 | aarch64) + zig_arch="aarch64" + ;; + *) + echo "unsupported runner architecture: $runner_arch" >&2 + exit 1 + ;; +esac + +host_key="${zig_arch}-${zig_os}" +tool_root="${RUNNER_TEMP:-${TMPDIR:-/tmp}}/nullboiler-zig" +install_dir="${tool_root}/${version}/${host_key}" +zig_bin="zig" +if [ "$zig_os" = "windows" ]; then + zig_bin="zig.exe" +fi + +if [ ! -x "${install_dir}/${zig_bin}" ]; then + mkdir -p "$(dirname "$install_dir")" + + zig_metadata="$( + "$python_bin" - "$version" "$host_key" <<'PY' +import json +import sys +import urllib.request + +version = sys.argv[1] +host_key = sys.argv[2] + +with urllib.request.urlopen("https://ziglang.org/download/index.json") as response: + data = json.load(response) + +host = data.get(version, {}).get(host_key) +if not host: + raise SystemExit(f"missing Zig download metadata for version={version!r} host={host_key!r}") + +archive_url = host.get("tarball") or host.get("zip") +checksum = host.get("shasum") or "" +if not archive_url: + raise SystemExit(f"missing archive URL for version={version!r} host={host_key!r}") + +print(archive_url) +print(checksum) +PY + )" + + archive_url="$(printf '%s\n' "$zig_metadata" | sed -n '1p')" + expected_sha="$(printf '%s\n' "$zig_metadata" | sed -n '2p')" + if [ -z "$archive_url" ]; then + echo "failed to resolve Zig download URL" >&2 + exit 1 + fi + + archive_name="${archive_url##*/}" + archive_dir="$(mktemp -d "${RUNNER_TEMP:-${TMPDIR:-/tmp}}/zig-archive.XXXXXX")" + archive_path="${archive_dir}/${archive_name}" + extract_dir="$(mktemp -d "${RUNNER_TEMP:-${TMPDIR:-/tmp}}/zig-extract.XXXXXX")" + trap 'rm -rf "$archive_dir"; rm -rf "$extract_dir"' EXIT + + curl -fsSL --retry 3 --retry-all-errors "$archive_url" -o "$archive_path" + + "$python_bin" - "$archive_path" "$expected_sha" <<'PY' +import hashlib +import sys + +path = sys.argv[1] +expected = sys.argv[2].strip().lower() +if not expected: + raise SystemExit(0) + +digest = hashlib.sha256() +with open(path, "rb") as handle: + for chunk in iter(lambda: handle.read(1024 * 1024), b""): + digest.update(chunk) + +actual = digest.hexdigest().lower() +if actual != expected: + raise SystemExit(f"checksum mismatch for {path}: expected {expected}, got {actual}") +PY + + "$python_bin" - "$archive_path" "$extract_dir" <<'PY' +import pathlib +import sys +import tarfile +import zipfile + +archive = pathlib.Path(sys.argv[1]) +destination = pathlib.Path(sys.argv[2]) +destination.mkdir(parents=True, exist_ok=True) + +def ensure_within_destination(relative_name: str) -> None: + target = (destination / relative_name).resolve() + if destination.resolve() not in target.parents and target != destination.resolve(): + raise SystemExit(f"archive entry escapes destination: {relative_name}") + +if archive.suffix == ".zip": + with zipfile.ZipFile(archive) as handle: + for member in handle.namelist(): + ensure_within_destination(member) + handle.extractall(destination) +else: + with tarfile.open(archive, "r:*") as handle: + for member in handle.getnames(): + ensure_within_destination(member) + handle.extractall(destination) +PY + + extracted_dir="$(find "$extract_dir" -mindepth 1 -maxdepth 1 -type d | head -n 1)" + if [ -z "$extracted_dir" ]; then + echo "failed to extract Zig archive: $archive_url" >&2 + exit 1 + fi + + rm -rf "$install_dir" + mv "$extracted_dir" "$install_dir" +fi + +if [ -n "${GITHUB_PATH:-}" ]; then + printf '%s\n' "$install_dir" >> "$GITHUB_PATH" +else + echo "GITHUB_PATH is not set; add this directory to PATH manually: $install_dir" >&2 +fi + +"${install_dir}/${zig_bin}" version diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index d2a6e45..45a7291 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -1,5 +1,9 @@ name: CI +env: + FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: "true" + ZIG_VERSION: "0.16.0" + on: push: branches: [main] @@ -28,18 +32,18 @@ jobs: zig_target: x86_64-windows steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v6 - - name: Install Zig 0.15.2 - uses: mlugg/setup-zig@v2 - with: - version: 0.15.2 + - name: Install Zig 0.16.0 + run: bash .github/scripts/install-zig.sh "${ZIG_VERSION}" - - name: Cache .zig-cache - uses: actions/cache@v4 + - name: Cache Zig build outputs + uses: actions/cache@v5 with: - path: .zig-cache - key: zig-${{ matrix.target }}-${{ hashFiles('src/**/*.zig', 'build.zig') }} + path: | + .zig-cache + ~/.cache/zig + key: zig-${{ matrix.target }}-${{ hashFiles('src/**/*.zig', 'build.zig', 'build.zig.zon', 'deps/**') }} restore-keys: zig-${{ matrix.target }}- - name: Run tests @@ -79,7 +83,7 @@ jobs: - name: Upload binary if: success() - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v7 with: name: nullboiler-${{ matrix.target }} path: zig-out/bin/nullboiler${{ runner.os == 'Windows' && '.exe' || '' }} diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 020be8d..7a0604c 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -1,5 +1,9 @@ name: Release +env: + FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: "true" + ZIG_VERSION: "0.16.0" + on: push: tags: ['v*'] @@ -44,18 +48,16 @@ jobs: ext: ".exe" steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v6 - - name: Install Zig 0.15.2 - uses: mlugg/setup-zig@v2 - with: - version: 0.15.2 + - name: Install Zig 0.16.0 + run: bash .github/scripts/install-zig.sh "${ZIG_VERSION}" - name: Build ReleaseSmall run: zig build -Doptimize=ReleaseSmall ${{ matrix.zig_target && format('-Dtarget={0}', matrix.zig_target) || '' }} - name: Upload artifact - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v7 with: name: nullboiler-${{ matrix.target }} path: zig-out/bin/nullboiler${{ matrix.ext }} @@ -67,7 +69,7 @@ jobs: contents: write steps: - - uses: actions/download-artifact@v4 + - uses: actions/download-artifact@v8 - name: Rename binaries run: | @@ -100,16 +102,16 @@ jobs: packages: write steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v6 - name: Set up QEMU - uses: docker/setup-qemu-action@v3 + uses: docker/setup-qemu-action@v4 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@v4 - name: Log in to ghcr.io - uses: docker/login-action@v3 + uses: docker/login-action@v4 with: registry: ghcr.io username: ${{ github.repository_owner }} @@ -117,7 +119,7 @@ jobs: - name: Extract metadata id: meta - uses: docker/metadata-action@v5 + uses: docker/metadata-action@v6 with: images: ghcr.io/${{ github.repository }} tags: | @@ -125,7 +127,7 @@ jobs: type=raw,value=latest - name: Build and push - uses: docker/build-push-action@v6 + uses: docker/build-push-action@v7 with: context: . platforms: linux/amd64,linux/arm64 diff --git a/CLAUDE.md b/CLAUDE.md index 8989137..ab961ee 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -4,7 +4,7 @@ Graph-based workflow orchestrator with unified state model for NullClaw AI bot a ## Tech Stack -- **Language**: Zig 0.15.2 +- **Language**: Zig 0.16.0 - **Database**: SQLite (vendored in `deps/sqlite/`), WAL mode - **Protocol**: HTTP/1.1 REST API with JSON payloads - **Dispatch**: HTTP (webhook/api_chat/openai_chat/a2a), MQTT, Redis Streams diff --git a/Dockerfile b/Dockerfile index 2d3a0cd..8d6d0b6 100644 --- a/Dockerfile +++ b/Dockerfile @@ -4,7 +4,23 @@ # Build on the native builder architecture and cross-compile with Zig. FROM --platform=$BUILDPLATFORM alpine:3.23 AS builder -RUN apk add --no-cache zig musl-dev +ARG ZIG_VERSION=0.16.0 + +RUN apk add --no-cache bash curl musl-dev python3 tar xz + +COPY .github/scripts/install-zig.sh /tmp/install-zig.sh +RUN set -eu; \ + export GITHUB_PATH=/tmp/zig-path; \ + export RUNNER_OS=Linux; \ + case "$(uname -m)" in \ + x86_64) export RUNNER_ARCH=X64 ;; \ + aarch64|arm64) export RUNNER_ARCH=ARM64 ;; \ + *) echo "Unsupported host arch: $(uname -m)" >&2; exit 1 ;; \ + esac; \ + bash /tmp/install-zig.sh "${ZIG_VERSION}"; \ + zig_dir="$(cat /tmp/zig-path)"; \ + ln -sf "${zig_dir}/zig" /usr/local/bin/zig; \ + zig version WORKDIR /app COPY build.zig build.zig.zon ./ @@ -12,7 +28,9 @@ COPY src/ src/ COPY deps/ deps/ ARG TARGETARCH -RUN set -eu; \ +RUN --mount=type=cache,target=/root/.cache/zig \ + --mount=type=cache,target=/app/.zig-cache \ + set -eu; \ arch="${TARGETARCH:-}"; \ if [ -z "${arch}" ]; then \ case "$(uname -m)" in \ diff --git a/build.zig b/build.zig index 1b331b9..b02cbbb 100644 --- a/build.zig +++ b/build.zig @@ -30,9 +30,9 @@ pub fn build(b: *std.Build) void { .optimize = optimize, }), }); - exe.linkLibrary(sqlite3_lib); - exe.linkLibrary(hiredis_lib); - exe.linkLibrary(mosquitto_lib); + exe.root_module.linkLibrary(sqlite3_lib); + exe.root_module.linkLibrary(hiredis_lib); + exe.root_module.linkLibrary(mosquitto_lib); b.installArtifact(exe); const run_cmd = b.addRunArtifact(exe); @@ -50,9 +50,9 @@ pub fn build(b: *std.Build) void { .optimize = optimize, }), }); - exe_unit_tests.linkLibrary(sqlite3_lib); - exe_unit_tests.linkLibrary(hiredis_lib); - exe_unit_tests.linkLibrary(mosquitto_lib); + exe_unit_tests.root_module.linkLibrary(sqlite3_lib); + exe_unit_tests.root_module.linkLibrary(hiredis_lib); + exe_unit_tests.root_module.linkLibrary(mosquitto_lib); const run_exe_unit_tests = b.addRunArtifact(exe_unit_tests); const test_step = b.step("test", "Run unit tests"); test_step.dependOn(&run_exe_unit_tests.step); diff --git a/build.zig.zon b/build.zig.zon index c1c8b0f..13e89ce 100644 --- a/build.zig.zon +++ b/build.zig.zon @@ -2,7 +2,7 @@ .name = .nullboiler, .fingerprint = 0xa2fe02b8f872821f, .version = "2026.3.2", - .minimum_zig_version = "0.15.2", + .minimum_zig_version = "0.16.0", .dependencies = .{ .sqlite3 = .{ .path = "deps/sqlite", diff --git a/deps/hiredis/build.zig.zon b/deps/hiredis/build.zig.zon index 7fb75d9..4dcb38d 100644 --- a/deps/hiredis/build.zig.zon +++ b/deps/hiredis/build.zig.zon @@ -1,7 +1,8 @@ .{ .name = .hiredis, + .fingerprint = 0xb7c9a464c4191c3c, .version = "1.2.0", - .minimum_zig_version = "0.15.2", + .minimum_zig_version = "0.16.0", .paths = .{ "build.zig", "build.zig.zon", diff --git a/deps/mosquitto/build.zig.zon b/deps/mosquitto/build.zig.zon index 0287d69..ef77060 100644 --- a/deps/mosquitto/build.zig.zon +++ b/deps/mosquitto/build.zig.zon @@ -1,7 +1,8 @@ .{ .name = .mosquitto, + .fingerprint = 0xedaa031912870053, .version = "2.0.18", - .minimum_zig_version = "0.15.2", + .minimum_zig_version = "0.16.0", .paths = .{ "build.zig", "build.zig.zon", diff --git a/deps/sqlite/build.zig.zon b/deps/sqlite/build.zig.zon index b6914df..237ec0b 100644 --- a/deps/sqlite/build.zig.zon +++ b/deps/sqlite/build.zig.zon @@ -1,7 +1,8 @@ .{ .name = .sqlite3, + .fingerprint = 0x6edcd711240901f7, .version = "3.51.2", - .minimum_zig_version = "0.15.2", + .minimum_zig_version = "0.16.0", .paths = .{ "build.zig", "build.zig.zon", diff --git a/src/api.zig b/src/api.zig index 0ce34a9..221e023 100644 --- a/src/api.zig +++ b/src/api.zig @@ -432,7 +432,7 @@ fn handleRegisterWorker(ctx: *Context, body: []const u8) HttpResponse { return jsonResponse(400, "{\"error\":{\"code\":\"bad_request\",\"message\":\"tags must be an array of strings\"}}"); } } - var out: std.io.Writer.Allocating = .init(ctx.allocator); + var out: std.Io.Writer.Allocating = .init(ctx.allocator); var jw: std.json.Stringify = .{ .writer = &out.writer }; jw.write(tags_val) catch return jsonResponse(500, "{\"error\":{\"code\":\"internal\",\"message\":\"failed to serialize tags\"}}"); break :blk out.toOwnedSlice() catch return jsonResponse(500, "{\"error\":{\"code\":\"internal\",\"message\":\"out of memory\"}}"); @@ -1943,7 +1943,7 @@ fn getJsonString(obj: std.json.ObjectMap, key: []const u8) ?[]const u8 { fn serializeJsonValue(allocator: std.mem.Allocator, val: ?std.json.Value) ![]const u8 { const v = val orelse return "{}"; if (v == .null) return "{}"; - var out: std.io.Writer.Allocating = .init(allocator); + var out: std.Io.Writer.Allocating = .init(allocator); var jw: std.json.Stringify = .{ .writer = &out.writer }; try jw.write(v); return try out.toOwnedSlice(); diff --git a/src/async_dispatch.zig b/src/async_dispatch.zig index 3be9359..49749b7 100644 --- a/src/async_dispatch.zig +++ b/src/async_dispatch.zig @@ -1,4 +1,5 @@ const std = @import("std"); +const std_compat = @import("compat.zig"); const log = std.log.scoped(.async_dispatch); // ── Types ───────────────────────────────────────────────────────────── @@ -17,7 +18,7 @@ pub const AsyncResponse = struct { pub const ResponseQueue = struct { allocator: std.mem.Allocator, map: std.StringArrayHashMapUnmanaged(AsyncResponse), - mutex: std.Thread.Mutex, + mutex: std_compat.sync.Mutex, pub fn init(allocator: std.mem.Allocator) ResponseQueue { return .{ diff --git a/src/callbacks.zig b/src/callbacks.zig index 30b814a..648173c 100644 --- a/src/callbacks.zig +++ b/src/callbacks.zig @@ -5,6 +5,7 @@ /// /// Callbacks are fire-and-forget: errors are logged but never propagated. const std = @import("std"); +const std_compat = @import("compat.zig"); const log = std.log.scoped(.callbacks); const ids = @import("ids.zig"); const metrics_mod = @import("metrics.zig"); @@ -137,7 +138,7 @@ fn appendCustomHeaders(allocator: std.mem.Allocator, cb_obj: std.json.ObjectMap, /// Internal: POST payload to a URL with custom headers. Fire-and-forget. fn postCallback(allocator: std.mem.Allocator, url: []const u8, payload: []const u8, custom_headers: []const std.http.Header) bool { - var client: std.http.Client = .{ .allocator = allocator }; + var client: std.http.Client = .{ .allocator = allocator, .io = std_compat.io() }; defer client.deinit(); _ = client.fetch(.{ diff --git a/src/compat.zig b/src/compat.zig new file mode 100644 index 0000000..e9ac140 --- /dev/null +++ b/src/compat.zig @@ -0,0 +1,216 @@ +const std = @import("std"); +const builtin = @import("builtin"); +const shared = @import("compat/shared.zig"); +const Allocator = std.mem.Allocator; + +pub fn io() std.Io { + return shared.io(); +} + +pub fn initProcess(init: std.process.Init) void { + shared.initProcess(init); +} + +pub fn initProcessMinimal(init: std.process.Init.Minimal) void { + shared.initProcessMinimal(init); +} + +pub const fs = @import("compat/fs.zig"); + +pub const process = struct { + pub const EnvMap = std.process.Environ.Map; + pub const GetEnvVarOwnedError = error{ + EnvironmentVariableNotFound, + } || Allocator.Error || error{ InvalidWtf8, Unexpected }; + + pub fn getEnvVarOwned(allocator: Allocator, name: []const u8) GetEnvVarOwnedError![]u8 { + return shared.environ().getAlloc(allocator, name) catch |err| switch (err) { + error.EnvironmentVariableMissing => error.EnvironmentVariableNotFound, + else => |e| e, + }; + } + + pub fn argsAlloc(allocator: Allocator) ![]const [:0]const u8 { + return shared.argsAlloc(allocator); + } + + pub fn argsFree(allocator: Allocator, args: []const [:0]const u8) void { + shared.argsFree(allocator, args); + } + + pub const Child = struct { + allocator: Allocator, + argv: []const []const u8, + env_map: ?*const EnvMap = null, + cwd: ?[]const u8 = null, + stdin_behavior: StdIo = .Inherit, + stdout_behavior: StdIo = .Inherit, + stderr_behavior: StdIo = .Inherit, + request_resource_usage_statistics: bool = false, + pgid: ?std.posix.pid_t = null, + create_no_window: bool = true, + id: Id = undefined, + thread_handle: if (builtin.os.tag == .windows) std.os.windows.HANDLE else void = if (builtin.os.tag == .windows) undefined else {}, + stdin: ?fs.File = null, + stdout: ?fs.File = null, + stderr: ?fs.File = null, + term: ?Term = null, + + pub const Id = std.process.Child.Id; + pub const Term = std.process.Child.Term; + pub const StdIo = enum { Inherit, Ignore, Pipe, Close }; + + pub fn init(argv: []const []const u8, allocator: Allocator) Child { + return .{ + .allocator = allocator, + .argv = argv, + }; + } + + fn mapStdIo(kind: StdIo) std.process.SpawnOptions.StdIo { + return switch (kind) { + .Inherit => .inherit, + .Ignore => .ignore, + .Pipe => .pipe, + .Close => .close, + }; + } + + fn spawnCwd(self: *const Child) std.process.Child.Cwd { + return if (self.cwd) |path_value| .{ .path = path_value } else .inherit; + } + + fn toInner(self: *const Child) std.process.Child { + return .{ + .id = self.id, + .thread_handle = self.thread_handle, + .stdin = if (self.stdin) |file| file.toInner() else null, + .stdout = if (self.stdout) |file| file.toInner() else null, + .stderr = if (self.stderr) |file| file.toInner() else null, + .resource_usage_statistics = .{}, + .request_resource_usage_statistics = self.request_resource_usage_statistics, + }; + } + + fn syncFromInner(self: *Child, inner: std.process.Child) void { + self.stdin = if (inner.stdin) |file| fs.File.wrap(file) else null; + self.stdout = if (inner.stdout) |file| fs.File.wrap(file) else null; + self.stderr = if (inner.stderr) |file| fs.File.wrap(file) else null; + } + + pub fn spawn(self: *Child) !void { + const inner = try std.process.spawn(io(), .{ + .argv = self.argv, + .cwd = self.spawnCwd(), + .environ_map = self.env_map, + .stdin = mapStdIo(self.stdin_behavior), + .stdout = mapStdIo(self.stdout_behavior), + .stderr = mapStdIo(self.stderr_behavior), + .request_resource_usage_statistics = self.request_resource_usage_statistics, + .pgid = self.pgid, + .create_no_window = self.create_no_window, + }); + self.id = inner.id.?; + self.thread_handle = inner.thread_handle; + self.syncFromInner(inner); + self.term = null; + } + + pub fn wait(self: *Child) !Term { + if (self.term) |term| return term; + + var inner = self.toInner(); + const term = try inner.wait(io()); + self.syncFromInner(inner); + self.term = term; + return term; + } + + pub fn kill(self: *Child) !Term { + if (self.term) |term| return term; + + var inner = self.toInner(); + inner.kill(io()); + self.syncFromInner(inner); + + const term: Term = if (builtin.os.tag == .windows) + .{ .exited = 1 } + else + .{ .signal = std.posix.SIG.KILL }; + + self.term = term; + return term; + } + }; +}; + +pub const mem = struct { + pub fn trimLeft(comptime T: type, slice: []const T, values_to_strip: []const T) []const T { + return std.mem.trimStart(T, slice, values_to_strip); + } + + pub fn trimRight(comptime T: type, slice: []const T, values_to_strip: []const T) []const T { + return std.mem.trimEnd(T, slice, values_to_strip); + } +}; + +pub const thread = struct { + pub fn sleep(nanoseconds: u64) void { + std.Io.sleep(io(), .fromNanoseconds(@intCast(nanoseconds)), .awake) catch {}; + } +}; + +pub const crypto = struct { + pub const random = struct { + pub fn bytes(buffer: []u8) void { + std.Io.randomSecure(io(), buffer) catch std.Io.random(io(), buffer); + } + }; +}; + +pub const sync = struct { + pub const Mutex = struct { + inner: std.Io.Mutex = .init, + + pub fn tryLock(self: *Mutex) bool { + return self.inner.tryLock(); + } + + pub fn lock(self: *Mutex) void { + self.inner.lockUncancelable(io()); + } + + pub fn unlock(self: *Mutex) void { + self.inner.unlock(io()); + } + }; +}; + +pub const time = struct { + fn nowNanoseconds() i128 { + return switch (builtin.os.tag) { + .windows => blk: { + const epoch_ns = std.time.epoch.windows * std.time.ns_per_s; + break :blk @as(i128, std.os.windows.ntdll.RtlGetSystemTimePrecise()) * 100 + epoch_ns; + }, + .wasi => blk: { + var ts: std.os.wasi.timestamp_t = undefined; + if (std.os.wasi.clock_time_get(.REALTIME, 1, &ts) == .SUCCESS) { + break :blk @intCast(ts); + } + break :blk 0; + }, + else => blk: { + var ts: std.posix.timespec = undefined; + switch (std.posix.errno(std.posix.system.clock_gettime(.REALTIME, &ts))) { + .SUCCESS => break :blk @as(i128, ts.sec) * std.time.ns_per_s + ts.nsec, + else => break :blk 0, + } + }, + }; + } + + pub fn milliTimestamp() i64 { + return @intCast(@divTrunc(nowNanoseconds(), std.time.ns_per_ms)); + } +}; diff --git a/src/compat/fs.zig b/src/compat/fs.zig new file mode 100644 index 0000000..2be980d --- /dev/null +++ b/src/compat/fs.zig @@ -0,0 +1,307 @@ +const std = @import("std"); +const shared = @import("shared.zig"); + +const Io = std.Io; +const Allocator = std.mem.Allocator; + +pub const path = struct { + pub const basename = std.fs.path.basename; + pub const delimiter = std.fs.path.delimiter; + pub const dirname = std.fs.path.dirname; + pub const extension = std.fs.path.extension; + pub const isAbsolute = std.fs.path.isAbsolute; + pub const isSep = std.fs.path.isSep; + pub const join = std.fs.path.join; + pub const sep = std.fs.path.sep; + pub const sep_str = std.fs.path.sep_str; +}; + +pub const File = struct { + handle: Io.File.Handle, + flags: Io.File.Flags, + + pub const Reader = Io.File.Reader; + pub const Writer = Io.File.Writer; + pub const Mode = if (@import("builtin").os.tag == .windows) u32 else std.posix.mode_t; + pub const Stat = struct { + inode: Io.File.INode, + nlink: Io.File.NLink, + size: u64, + mode: Mode, + kind: Io.File.Kind, + atime: ?i128, + mtime: i128, + ctime: i128, + block_size: Io.File.BlockSize, + }; + + pub fn wrap(inner: Io.File) File { + return .{ + .handle = inner.handle, + .flags = inner.flags, + }; + } + + pub fn toInner(self: File) Io.File { + return .{ + .handle = self.handle, + .flags = self.flags, + }; + } + + fn convertStat(inner: Io.File.Stat) Stat { + return .{ + .inode = inner.inode, + .nlink = inner.nlink, + .size = inner.size, + .mode = if (@hasDecl(@TypeOf(inner.permissions), "toMode")) inner.permissions.toMode() else 0, + .kind = inner.kind, + .atime = if (inner.atime) |ts| ts.nanoseconds else null, + .mtime = inner.mtime.nanoseconds, + .ctime = inner.ctime.nanoseconds, + .block_size = inner.block_size, + }; + } + + pub fn stdout() File { + return wrap(Io.File.stdout()); + } + + pub fn stderr() File { + return wrap(Io.File.stderr()); + } + + pub fn stdin() File { + return wrap(Io.File.stdin()); + } + + pub fn close(self: File) void { + self.toInner().close(shared.io()); + } + + pub fn stat(self: File) Io.File.StatError!Stat { + return convertStat(try self.toInner().stat(shared.io())); + } + + pub fn seekTo(self: File, offset: u64) Io.File.SeekError!void { + try shared.io().vtable.fileSeekTo(shared.io().userdata, self.toInner(), offset); + } + + pub fn seekFromEnd(self: File, offset: i64) !void { + const file_stat = try self.stat(); + const end_offset = @as(i128, @intCast(file_stat.size)) + offset; + if (end_offset < 0) return error.Unseekable; + try self.seekTo(@intCast(end_offset)); + } + + pub fn writer(self: File, buffer: []u8) Writer { + return self.toInner().writer(shared.io(), buffer); + } + + pub fn reader(self: File, buffer: []u8) Reader { + return self.toInner().reader(shared.io(), buffer); + } + + pub fn read(self: File, buffer: []u8) Io.File.ReadStreamingError!usize { + return self.toInner().readStreaming(shared.io(), &.{buffer}) catch |err| switch (err) { + error.EndOfStream => 0, + else => |e| return e, + }; + } + + pub fn readAll(self: File, buffer: []u8) Io.File.ReadStreamingError!usize { + var filled: usize = 0; + while (filled < buffer.len) { + const amt = try self.read(buffer[filled..]); + if (amt == 0) break; + filled += amt; + } + return filled; + } + + pub fn writeAll(self: File, bytes: []const u8) Io.File.Writer.Error!void { + try self.toInner().writeStreamingAll(shared.io(), bytes); + } + + pub fn readToEndAlloc(self: File, allocator: Allocator, max_bytes: usize) ![]u8 { + var stream_buf: [4096]u8 = undefined; + var file_reader = self.toInner().readerStreaming(shared.io(), &stream_buf); + return try file_reader.interface.allocRemaining(allocator, .limited(max_bytes)); + } +}; + +pub const Dir = struct { + handle: Io.Dir.Handle, + + pub const OpenDirOptions = Io.Dir.OpenOptions; + pub const OpenFileOptions = Io.Dir.OpenFileOptions; + pub const CreateFileOptions = Io.Dir.CreateFileOptions; + pub const WriteFileOptions = Io.Dir.WriteFileOptions; + pub const AccessOptions = Io.Dir.AccessOptions; + pub const CopyFileOptions = Io.Dir.CopyFileOptions; + pub const SymLinkFlags = Io.Dir.SymLinkFlags; + pub const Entry = Io.Dir.Entry; + pub const Iterator = struct { + inner: Io.Dir.Iterator, + + pub fn next(self: *Iterator) Io.Dir.Iterator.Error!?Entry { + return self.inner.next(shared.io()); + } + }; + + pub fn wrap(inner: Io.Dir) Dir { + return .{ .handle = inner.handle }; + } + + fn toInner(self: Dir) Io.Dir { + return .{ .handle = self.handle }; + } + + pub fn cwd() Dir { + return wrap(Io.Dir.cwd()); + } + + pub fn close(self: Dir) void { + self.toInner().close(shared.io()); + } + + pub fn iterate(self: Dir) Iterator { + return .{ .inner = self.toInner().iterate() }; + } + + pub fn openDir(self: Dir, sub_path: []const u8, options: OpenDirOptions) Io.Dir.OpenError!Dir { + return wrap(try self.toInner().openDir(shared.io(), sub_path, options)); + } + + pub fn openFile(self: Dir, sub_path: []const u8, options: OpenFileOptions) Io.File.OpenError!File { + return File.wrap(try self.toInner().openFile(shared.io(), sub_path, options)); + } + + pub fn createFile(self: Dir, sub_path: []const u8, options: CreateFileOptions) Io.File.OpenError!File { + return File.wrap(try self.toInner().createFile(shared.io(), sub_path, options)); + } + + pub fn writeFile(self: Dir, options: WriteFileOptions) Io.Dir.WriteFileError!void { + try self.toInner().writeFile(shared.io(), options); + } + + pub fn readFileAlloc(self: Dir, allocator: Allocator, sub_path: []const u8, max_bytes: usize) ![]u8 { + return try self.toInner().readFileAlloc(shared.io(), sub_path, allocator, .limited(max_bytes)); + } + + pub fn access(self: Dir, sub_path: []const u8, options: AccessOptions) Io.Dir.AccessError!void { + try self.toInner().access(shared.io(), sub_path, options); + } + + pub fn makeDir(self: Dir, sub_path: []const u8) Io.Dir.CreateDirError!void { + try self.toInner().createDir(shared.io(), sub_path, .default_dir); + } + + pub fn deleteFile(self: Dir, sub_path: []const u8) Io.Dir.DeleteFileError!void { + try self.toInner().deleteFile(shared.io(), sub_path); + } + + pub fn deleteTree(self: Dir, sub_path: []const u8) Io.Dir.DeleteTreeError!void { + try self.toInner().deleteTree(shared.io(), sub_path); + } + + pub fn rename(self: Dir, old_sub_path: []const u8, new_sub_path: []const u8) Io.Dir.RenameError!void { + try self.toInner().rename(old_sub_path, self.toInner(), new_sub_path, shared.io()); + } + + pub fn realpathAlloc(self: Dir, allocator: Allocator, sub_path: []const u8) Io.Dir.RealPathFileAllocError![]u8 { + const path_z = try self.toInner().realPathFileAlloc(shared.io(), sub_path, allocator); + defer allocator.free(path_z); + return try allocator.dupe(u8, path_z); + } + + pub fn statFile(self: Dir, sub_path: []const u8) !File.Stat { + return File.convertStat(try self.toInner().statFile(shared.io(), sub_path, .{})); + } + + pub fn makePath(self: Dir, sub_path: []const u8) !void { + if (sub_path.len == 0) return; + if (path.isAbsolute(sub_path)) { + makeDirAbsolute(sub_path) catch |err| switch (err) { + error.PathAlreadyExists => return, + else => |e| return e, + }; + return; + } + + var cursor = self; + var opened: ?Dir = null; + defer if (opened) |dir| dir.close(); + + var index: usize = 0; + while (index < sub_path.len) { + while (index < sub_path.len and path.isSep(sub_path[index])) : (index += 1) {} + if (index >= sub_path.len) break; + + const start = index; + while (index < sub_path.len and !path.isSep(sub_path[index])) : (index += 1) {} + const component = sub_path[start..index]; + if (component.len == 0 or std.mem.eql(u8, component, ".")) continue; + if (std.mem.eql(u8, component, "..")) return error.BadPathName; + + cursor.makeDir(component) catch |err| switch (err) { + error.PathAlreadyExists => {}, + else => |e| return e, + }; + + const next = try cursor.openDir(component, .{}); + if (opened) |dir| dir.close(); + opened = next; + cursor = next; + } + } +}; + +pub fn cwd() Dir { + return Dir.cwd(); +} + +pub fn openDirAbsolute(absolute_path: []const u8, options: Dir.OpenDirOptions) Io.Dir.OpenError!Dir { + return Dir.wrap(try Io.Dir.openDirAbsolute(shared.io(), absolute_path, options)); +} + +pub fn openFileAbsolute(absolute_path: []const u8, options: Dir.OpenFileOptions) Io.File.OpenError!File { + return File.wrap(try Io.Dir.openFileAbsolute(shared.io(), absolute_path, options)); +} + +pub fn createFileAbsolute(absolute_path: []const u8, options: Dir.CreateFileOptions) Io.File.OpenError!File { + return File.wrap(try Io.Dir.createFileAbsolute(shared.io(), absolute_path, options)); +} + +pub fn accessAbsolute(absolute_path: []const u8, options: Dir.AccessOptions) Io.Dir.AccessError!void { + try Io.Dir.accessAbsolute(shared.io(), absolute_path, options); +} + +pub fn makeDirAbsolute(absolute_path: []const u8) Io.Dir.CreateDirError!void { + try Io.Dir.createDirAbsolute(shared.io(), absolute_path, .default_dir); +} + +pub fn deleteFileAbsolute(absolute_path: []const u8) Io.Dir.DeleteFileError!void { + try Io.Dir.deleteFileAbsolute(shared.io(), absolute_path); +} + +pub fn deleteTreeAbsolute(absolute_path: []const u8) (Io.Dir.DeleteTreeError || error{FileNotFound})!void { + const dir_path = path.dirname(absolute_path) orelse return error.FileNotFound; + const base_name = path.basename(absolute_path); + var dir = try openDirAbsolute(dir_path, .{}); + defer dir.close(); + try dir.deleteTree(base_name); +} + +pub fn renameAbsolute(old_path: []const u8, new_path: []const u8) Io.Dir.RenameError!void { + try Io.Dir.renameAbsolute(old_path, new_path, shared.io()); +} + +pub fn realpathAlloc(allocator: Allocator, file_path: []const u8) ![]u8 { + if (path.isAbsolute(file_path)) { + const path_z = try Io.Dir.realPathFileAbsoluteAlloc(shared.io(), file_path, allocator); + defer allocator.free(path_z); + return try allocator.dupe(u8, path_z); + } + return try cwd().realpathAlloc(allocator, file_path); +} diff --git a/src/compat/shared.zig b/src/compat/shared.zig new file mode 100644 index 0000000..8c7082e --- /dev/null +++ b/src/compat/shared.zig @@ -0,0 +1,69 @@ +const std = @import("std"); +const builtin = @import("builtin"); +const Allocator = std.mem.Allocator; + +pub const Io = std.Io; + +var fallback_threaded: Io.Threaded = .init_single_threaded; +var process_io: ?Io = null; +var process_args: ?std.process.Args = null; +var process_environ: ?std.process.Environ = null; + +pub fn initProcess(init: std.process.Init) void { + process_io = init.io; + process_args = init.minimal.args; + process_environ = init.minimal.environ; +} + +pub fn initProcessMinimal(init: std.process.Init.Minimal) void { + process_args = init.args; + process_environ = init.environ; +} + +pub fn io() Io { + if (builtin.is_test) return std.testing.io; + if (process_io) |current| return current; + return fallback_threaded.io(); +} + +pub fn environ() std.process.Environ { + if (process_environ) |env| return env; + return switch (builtin.os.tag) { + .windows, .freestanding, .other => .{ .block = .global }, + .wasi, .emscripten => if (builtin.link_libc) blk: { + const c_environ = std.c.environ; + var env_count: usize = 0; + while (c_environ[env_count] != null) : (env_count += 1) {} + break :blk .{ .block = .{ .slice = c_environ[0..env_count :null] } }; + } else .{ .block = .global }, + else => blk: { + const c_environ = std.c.environ; + var env_count: usize = 0; + while (c_environ[env_count] != null) : (env_count += 1) {} + break :blk .{ .block = .{ .slice = c_environ[0..env_count :null] } }; + }, + }; +} + +pub fn argsAlloc(allocator: Allocator) ![]const [:0]const u8 { + const args = process_args orelse return error.MissingProcessContext; + var iter = try args.iterateAllocator(allocator); + defer iter.deinit(); + + var list: std.ArrayList([:0]const u8) = .empty; + errdefer { + for (list.items) |arg| allocator.free(arg); + list.deinit(allocator); + } + + while (iter.next()) |arg| { + try list.append(allocator, try allocator.dupeZ(u8, arg)); + } + + return try list.toOwnedSlice(allocator); +} + +pub fn argsFree(allocator: Allocator, args: []const [:0]const u8) void { + for (args) |arg| allocator.free(arg); + allocator.free(args); +} diff --git a/src/config.zig b/src/config.zig index 0d3406e..26826dd 100644 --- a/src/config.zig +++ b/src/config.zig @@ -1,5 +1,6 @@ const std = @import("std"); const builtin = @import("builtin"); +const std_compat = @import("compat.zig"); pub const home_env_var = "NULLBOILER_HOME"; pub const home_dir_name = ".nullboiler"; @@ -93,7 +94,7 @@ pub fn resolveConfigPath(allocator: std.mem.Allocator, override_path: ?[]const u } pub fn resolveHomeDir(allocator: std.mem.Allocator) ![]const u8 { - if (std.process.getEnvVarOwned(allocator, home_env_var)) |env_home| { + if (std_compat.process.getEnvVarOwned(allocator, home_env_var)) |env_home| { return env_home; } else |err| switch (err) { error.EnvironmentVariableNotFound => {}, @@ -108,7 +109,7 @@ pub fn resolveHomeDir(allocator: std.mem.Allocator) ![]const u8 { /// Load configuration from a JSON file. If the file does not exist, /// return a default Config. pub fn loadFromFile(allocator: std.mem.Allocator, path: []const u8) !Config { - const file = std.fs.cwd().openFile(path, .{}) catch |err| { + const file = std_compat.fs.cwd().openFile(path, .{}) catch |err| { if (err == error.FileNotFound) { return Config{}; } @@ -144,10 +145,10 @@ fn resolveRelativePath(allocator: std.mem.Allocator, config_path: []const u8, va } fn getHomeDirOwned(allocator: std.mem.Allocator) ![]u8 { - return std.process.getEnvVarOwned(allocator, "HOME") catch |err| switch (err) { + return std_compat.process.getEnvVarOwned(allocator, "HOME") catch |err| switch (err) { error.EnvironmentVariableNotFound => { if (builtin.os.tag == .windows) { - return std.process.getEnvVarOwned(allocator, "USERPROFILE") catch error.HomeNotSet; + return std_compat.process.getEnvVarOwned(allocator, "USERPROFILE") catch error.HomeNotSet; } return error.HomeNotSet; }, @@ -205,12 +206,12 @@ test "loadFromFile reads configured host and worker URL from JSON file" { \\} ; - try tmp.dir.writeFile(.{ + try std_compat.fs.Dir.wrap(tmp.dir).writeFile(.{ .sub_path = "config.json", .data = cfg_json, }); - const cfg_path = try tmp.dir.realpathAlloc(std.testing.allocator, "config.json"); + const cfg_path = try std_compat.fs.Dir.wrap(tmp.dir).realpathAlloc(std.testing.allocator, "config.json"); defer std.testing.allocator.free(cfg_path); var arena = std.heap.ArenaAllocator.init(std.testing.allocator); @@ -258,7 +259,7 @@ test "resolveRelativePaths anchors tracker paths to config directory" { var tmp = std.testing.tmpDir(.{}); defer tmp.cleanup(); - try tmp.dir.makePath("configs"); + try std_compat.fs.Dir.wrap(tmp.dir).makePath("configs"); const cfg_json = \\{ \\ "db": "data/nullboiler.db", @@ -276,12 +277,12 @@ test "resolveRelativePaths anchors tracker paths to config directory" { \\} ; - try tmp.dir.writeFile(.{ + try std_compat.fs.Dir.wrap(tmp.dir).writeFile(.{ .sub_path = "configs/config.json", .data = cfg_json, }); - const cfg_path = try tmp.dir.realpathAlloc(std.testing.allocator, "configs/config.json"); + const cfg_path = try std_compat.fs.Dir.wrap(tmp.dir).realpathAlloc(std.testing.allocator, "configs/config.json"); defer std.testing.allocator.free(cfg_path); var arena = std.heap.ArenaAllocator.init(std.testing.allocator); diff --git a/src/dispatch.zig b/src/dispatch.zig index 65fdadd..107b978 100644 --- a/src/dispatch.zig +++ b/src/dispatch.zig @@ -2,6 +2,7 @@ /// Provides worker selection (filtering by status, capacity, and tags) and /// request/response adapters for webhook/api_chat/openai_chat protocols. const std = @import("std"); +const std_compat = @import("compat.zig"); const ids = @import("ids.zig"); const worker_protocol = @import("worker_protocol.zig"); const worker_response = @import("worker_response.zig"); @@ -191,10 +192,10 @@ pub fn dispatchStepWithOpts( } defer if (auth_header) |ah| allocator.free(ah); - var client: std.http.Client = .{ .allocator = allocator }; + var client: std.http.Client = .{ .allocator = allocator, .io = std_compat.io() }; defer client.deinit(); - var response_body: std.io.Writer.Allocating = .init(allocator); + var response_body: std.Io.Writer.Allocating = .init(allocator); defer response_body.deinit(); var headers_buf: [1]std.http.Header = undefined; @@ -253,10 +254,10 @@ pub fn probeWorker( const url = worker_protocol.buildRequestUrl(allocator, worker_url, protocol) catch return false; defer allocator.free(url); - var client: std.http.Client = .{ .allocator = allocator }; + var client: std.http.Client = .{ .allocator = allocator, .io = std_compat.io() }; defer client.deinit(); - var response_body: std.io.Writer.Allocating = .init(allocator); + var response_body: std.Io.Writer.Allocating = .init(allocator); defer response_body.deinit(); const result = client.fetch(.{ diff --git a/src/engine.zig b/src/engine.zig index e2727c4..d467bb1 100644 --- a/src/engine.zig +++ b/src/engine.zig @@ -23,6 +23,7 @@ /// - Configurable runs: config stored as state.__config, accessible via templates /// - Reconciliation: check nulltickets task status between steps const std = @import("std"); +const std_compat = @import("compat.zig"); const log = std.log.scoped(.engine); const json = std.json; @@ -223,7 +224,7 @@ pub const Engine = struct { self.tick() catch |err| { log.err("engine tick error: {}", .{err}); }; - std.Thread.sleep(self.poll_interval_ns); + std_compat.thread.sleep(self.poll_interval_ns); } log.info("engine stopped", .{}); } @@ -1656,7 +1657,7 @@ pub const Engine = struct { /// Merge async_pending + correlation_id into existing input_json. fn mergeAsyncState(alloc: std.mem.Allocator, existing_input: []const u8, correlation_id: []const u8) ![]const u8 { - var obj = json.ObjectMap.init(alloc); + var obj: json.ObjectMap = .empty; if (existing_input.len > 0) { const p = json.parseFromSlice(json.Value, alloc, existing_input, .{}) catch null; @@ -1664,14 +1665,14 @@ pub const Engine = struct { if (parsed.value == .object) { var it = parsed.value.object.iterator(); while (it.next()) |entry| { - try obj.put(entry.key_ptr.*, entry.value_ptr.*); + try obj.put(alloc, entry.key_ptr.*, entry.value_ptr.*); } } } } - try obj.put("async_pending", .{ .bool = true }); - try obj.put("correlation_id", .{ .string = correlation_id }); + try obj.put(alloc, "async_pending", .{ .bool = true }); + try obj.put(alloc, "correlation_id", .{ .string = correlation_id }); return json.Stringify.valueAlloc(alloc, json.Value{ .object = obj }, .{}); } @@ -1738,7 +1739,7 @@ fn findReadyNodesFromRoot( var entry = inbound.getPtr(target); if (entry == null) { - try inbound.put(target, std.ArrayListUnmanaged(EdgeInfo){}); + try inbound.put(target, .empty); entry = inbound.getPtr(target); } try entry.?.append(alloc, .{ @@ -1976,7 +1977,8 @@ fn encodePathSegment(allocator: std.mem.Allocator, value: []const u8) ![]const u { try buf.append(allocator, byte); } else { - try buf.writer(allocator).print("%{X:0>2}", .{byte}); + const upper = "0123456789ABCDEF"; + try buf.appendSlice(allocator, &.{ '%', upper[(byte >> 4) & 0x0F], upper[byte & 0x0F] }); } } @@ -2024,7 +2026,7 @@ fn getNodeTags(alloc: std.mem.Allocator, node_json: []const u8) []const []const // ── JSON / Serialization Helpers ──────────────────────────────────── fn serializeJsonValue(alloc: std.mem.Allocator, value: json.Value) ![]const u8 { - var out: std.io.Writer.Allocating = .init(alloc); + var out: std.Io.Writer.Allocating = .init(alloc); var jw: json.Stringify = .{ .writer = &out.writer }; try jw.write(value); return try out.toOwnedSlice(); @@ -2070,14 +2072,14 @@ fn buildTaskStateUpdates(alloc: std.mem.Allocator, node_json: []const u8, output defer arena.deinit(); const arena_alloc = arena.allocator(); - var result = json.ObjectMap.init(arena_alloc); + var result: json.ObjectMap = .empty; const parsed_output = json.parseFromSlice(json.Value, arena_alloc, output, .{}) catch null; if (output_key) |key| { if (parsed_output) |parsed| { - try result.put(key, parsed.value); + try result.put(arena_alloc, key, parsed.value); } else { - try result.put(key, .{ .string = output }); + try result.put(arena_alloc, key, .{ .string = output }); } } @@ -2092,7 +2094,7 @@ fn buildTaskStateUpdates(alloc: std.mem.Allocator, node_json: []const u8, output const raw_val = state_mod.getStateValue(arena_alloc, output, source_path) catch null; if (raw_val) |value_json| { const parsed_value = json.parseFromSlice(json.Value, arena_alloc, value_json, .{}) catch continue; - try result.put(entry.key_ptr.*, parsed_value.value); + try result.put(arena_alloc, entry.key_ptr.*, parsed_value.value); } } } @@ -2120,19 +2122,19 @@ fn serializeRouteResults(alloc: std.mem.Allocator, route_results: *std.StringHas fn serializeRouteResultsWithVersion(alloc: std.mem.Allocator, route_results: *std.StringHashMap([]const u8), wf_version: ?i64) !?[]const u8 { if (route_results.count() == 0 and wf_version == null) return null; - var obj = json.ObjectMap.init(alloc); + var obj: json.ObjectMap = .empty; if (route_results.count() > 0) { - var rr_obj = json.ObjectMap.init(alloc); + var rr_obj: json.ObjectMap = .empty; var it = route_results.iterator(); while (it.next()) |entry| { - try rr_obj.put(entry.key_ptr.*, .{ .string = entry.value_ptr.* }); + try rr_obj.put(alloc, entry.key_ptr.*, .{ .string = entry.value_ptr.* }); } - try obj.put("route_results", .{ .object = rr_obj }); + try obj.put(alloc, "route_results", .{ .object = rr_obj }); } if (wf_version) |v| { - try obj.put("workflow_version", .{ .integer = v }); + try obj.put(alloc, "workflow_version", .{ .integer = v }); } return try serializeJsonValue(alloc, .{ .object = obj }); @@ -2344,11 +2346,11 @@ fn stripMeta(alloc: std.mem.Allocator, state_json: []const u8) ![]const u8 { const parsed = json.parseFromSlice(json.Value, alloc, state_json, .{}) catch return try alloc.dupe(u8, state_json); if (parsed.value != .object) return try alloc.dupe(u8, state_json); - var result_obj = json.ObjectMap.init(alloc); + var result_obj: json.ObjectMap = .empty; var it = parsed.value.object.iterator(); while (it.next()) |entry| { if (!std.mem.eql(u8, entry.key_ptr.*, "__meta")) { - try result_obj.put(entry.key_ptr.*, entry.value_ptr.*); + try result_obj.put(alloc, entry.key_ptr.*, entry.value_ptr.*); } } return serializeJsonValue(alloc, .{ .object = result_obj }); @@ -2360,7 +2362,7 @@ fn buildSubgraphInput(alloc: std.mem.Allocator, parent_state: []const u8, input_ const mapping_parsed = json.parseFromSlice(json.Value, alloc, input_mapping_json, .{}) catch return try alloc.dupe(u8, "{}"); if (mapping_parsed.value != .object) return try alloc.dupe(u8, "{}"); - var result = json.ObjectMap.init(alloc); + var result: json.ObjectMap = .empty; var it = mapping_parsed.value.object.iterator(); while (it.next()) |entry| { const child_key = entry.key_ptr.*; @@ -2369,7 +2371,7 @@ fn buildSubgraphInput(alloc: std.mem.Allocator, parent_state: []const u8, input_ // Resolve the value from parent state if (state_mod.getStateValue(alloc, parent_state, parent_path) catch null) |value_str| { const val_parsed = json.parseFromSlice(json.Value, alloc, value_str, .{}) catch continue; - try result.put(child_key, val_parsed.value); + try result.put(alloc, child_key, val_parsed.value); } } @@ -2385,10 +2387,10 @@ fn reconcileWithTracker(alloc: std.mem.Allocator, tracker_url: []const u8, track const url = std.fmt.allocPrint(alloc, "{s}/tasks/{s}", .{ tracker_url, task_id_enc }) catch return true; defer alloc.free(url); - var client: std.http.Client = .{ .allocator = alloc }; + var client: std.http.Client = .{ .allocator = alloc, .io = std_compat.io() }; defer client.deinit(); - var response_body: std.io.Writer.Allocating = .init(alloc); + var response_body: std.Io.Writer.Allocating = .init(alloc); defer response_body.deinit(); var auth_header: ?[]const u8 = null; @@ -2564,12 +2566,12 @@ fn processUiMessages(hub: *sse_mod.SseHub, alloc: std.mem.Allocator, run_id: []c }; // Add step_id to the event data - var event_obj = json.ObjectMap.init(alloc); + var event_obj: json.ObjectMap = .empty; var it = msg.object.iterator(); while (it.next()) |entry| { - event_obj.put(entry.key_ptr.*, entry.value_ptr.*) catch continue; + event_obj.put(alloc, entry.key_ptr.*, entry.value_ptr.*) catch continue; } - event_obj.put("step_id", .{ .string = step_id }) catch {}; + event_obj.put(alloc, "step_id", .{ .string = step_id }) catch {}; const event_data = serializeJsonValue(alloc, .{ .object = event_obj }) catch continue; if (is_remove) { @@ -2619,13 +2621,13 @@ fn processStreamMessages(hub: *sse_mod.SseHub, alloc: std.mem.Allocator, run_id: if (msg != .object) continue; // Build enriched message with step context - var event_obj = json.ObjectMap.init(alloc); + var event_obj: json.ObjectMap = .empty; var it = msg.object.iterator(); while (it.next()) |entry| { - event_obj.put(entry.key_ptr.*, entry.value_ptr.*) catch continue; + event_obj.put(alloc, entry.key_ptr.*, entry.value_ptr.*) catch continue; } - event_obj.put("step_id", .{ .string = step_id }) catch {}; - event_obj.put("node_type", .{ .string = node_type }) catch {}; + event_obj.put(alloc, "step_id", .{ .string = step_id }) catch {}; + event_obj.put(alloc, "node_type", .{ .string = node_type }) catch {}; const event_data = serializeJsonValue(alloc, .{ .object = event_obj }) catch continue; hub.broadcast(run_id, .{ .event_type = "message", .data = event_data, .mode = .custom }); diff --git a/src/export_manifest.zig b/src/export_manifest.zig index b6fc7eb..109c5d6 100644 --- a/src/export_manifest.zig +++ b/src/export_manifest.zig @@ -1,4 +1,5 @@ const std = @import("std"); +const std_compat = @import("compat.zig"); pub fn run() !void { const manifest = @@ -19,7 +20,7 @@ pub fn run() !void { \\ "aarch64-windows": { "asset": "nullboiler-windows-aarch64.exe", "binary": "nullboiler.exe" } \\ }, \\ "build_from_source": { - \\ "zig_version": "0.15.2", + \\ "zig_version": "0.16.0", \\ "command": "zig build -Doptimize=ReleaseSmall", \\ "output": "zig-out/bin/nullboiler" \\ }, @@ -56,7 +57,7 @@ pub fn run() !void { \\ ] \\} ; - const stdout = std.fs.File.stdout(); + const stdout = std_compat.fs.File.stdout(); try stdout.writeAll(manifest); try stdout.writeAll("\n"); } diff --git a/src/from_json.zig b/src/from_json.zig index 8d0411d..07b8aee 100644 --- a/src/from_json.zig +++ b/src/from_json.zig @@ -1,5 +1,6 @@ const std = @import("std"); const builtin = @import("builtin"); +const std_compat = @import("compat.zig"); const config_mod = @import("config.zig"); pub fn run(allocator: std.mem.Allocator, args: []const []const u8) !void { @@ -109,7 +110,7 @@ pub fn run(allocator: std.mem.Allocator, args: []const []const u8) !void { } if (!builtin.is_test) { - const stdout = std.fs.File.stdout(); + const stdout = std_compat.fs.File.stdout(); try stdout.writeAll("{\"status\":\"ok\"}\n"); } } @@ -166,17 +167,14 @@ fn getU32(obj: std.json.ObjectMap, key: []const u8) ?u32 { fn ensureHome(home: []const u8) !void { if (std.fs.path.isAbsolute(home)) { - std.fs.makeDirAbsolute(home) catch |err| switch (err) { + std_compat.fs.makeDirAbsolute(home) catch |err| switch (err) { error.PathAlreadyExists => {}, else => return err, }; return; } - std.fs.cwd().makePath(home) catch |err| switch (err) { - error.PathAlreadyExists => {}, - else => return err, - }; + try std_compat.fs.cwd().makePath(home); } fn writeFileAtHome(allocator: std.mem.Allocator, home: []const u8, name: []const u8, contents: []const u8) !void { @@ -185,14 +183,14 @@ fn writeFileAtHome(allocator: std.mem.Allocator, home: []const u8, name: []const try ensureParentDir(home, name); if (std.fs.path.isAbsolute(home)) { - const file = try std.fs.createFileAbsolute(path, .{}); + const file = try std_compat.fs.createFileAbsolute(path, .{}); defer file.close(); try file.writeAll(contents); try file.writeAll("\n"); return; } - const file = try std.fs.cwd().createFile(path, .{}); + const file = try std_compat.fs.cwd().createFile(path, .{}); defer file.close(); try file.writeAll(contents); try file.writeAll("\n"); @@ -203,7 +201,7 @@ fn ensureParentDir(home: []const u8, name: []const u8) !void { if (std.fs.path.isAbsolute(home)) { const full_parent = try std.fs.path.join(std.heap.page_allocator, &.{ home, parent }); defer std.heap.page_allocator.free(full_parent); - std.fs.makeDirAbsolute(full_parent) catch |err| switch (err) { + std_compat.fs.makeDirAbsolute(full_parent) catch |err| switch (err) { error.PathAlreadyExists => {}, else => return err, }; @@ -212,10 +210,7 @@ fn ensureParentDir(home: []const u8, name: []const u8) !void { const full_parent = try std.fs.path.join(std.heap.page_allocator, &.{ home, parent }); defer std.heap.page_allocator.free(full_parent); - std.fs.cwd().makePath(full_parent) catch |err| switch (err) { - error.PathAlreadyExists => {}, - else => return err, - }; + try std_compat.fs.cwd().makePath(full_parent); } fn defaultWorkflowFileName(allocator: std.mem.Allocator, pipeline_id: []const u8) ![]const u8 { @@ -244,7 +239,7 @@ test "run writes tracker config and workflow with advanced settings" { var tmp = std.testing.tmpDir(.{}); defer tmp.cleanup(); - const home = try tmp.dir.realpathAlloc(std.testing.allocator, "."); + const home = try std_compat.fs.Dir.wrap(tmp.dir).realpathAlloc(std.testing.allocator, "."); defer std.testing.allocator.free(home); var arena = std.heap.ArenaAllocator.init(std.testing.allocator); @@ -280,7 +275,7 @@ test "run writes tracker config and workflow with advanced settings" { const config_path = try std.fs.path.join(std.testing.allocator, &.{ home, "config.json" }); defer std.testing.allocator.free(config_path); - const config_file = try std.fs.openFileAbsolute(config_path, .{}); + const config_file = try std_compat.fs.openFileAbsolute(config_path, .{}); defer config_file.close(); const config_bytes = try config_file.readToEndAlloc(arena.allocator(), 64 * 1024); @@ -342,7 +337,7 @@ test "run writes tracker config and workflow with advanced settings" { const workflow_path = try std.fs.path.join(std.testing.allocator, &.{ home, "workflows", "pipeline.dev.json" }); defer std.testing.allocator.free(workflow_path); - const workflow_file = try std.fs.openFileAbsolute(workflow_path, .{}); + const workflow_file = try std_compat.fs.openFileAbsolute(workflow_path, .{}); defer workflow_file.close(); const workflow_bytes = try workflow_file.readToEndAlloc(arena.allocator(), 16 * 1024); diff --git a/src/ids.zig b/src/ids.zig index 267af62..37334fd 100644 --- a/src/ids.zig +++ b/src/ids.zig @@ -1,9 +1,10 @@ const std = @import("std"); +const std_compat = @import("compat.zig"); /// Generate a UUID v4 string (36 chars: 8-4-4-4-12) pub fn generateId() [36]u8 { var bytes: [16]u8 = undefined; - std.crypto.random.bytes(&bytes); + std_compat.crypto.random.bytes(&bytes); // Set version 4 bytes[6] = (bytes[6] & 0x0f) | 0x40; @@ -27,5 +28,5 @@ pub fn generateId() [36]u8 { /// Current time in milliseconds since epoch pub fn nowMs() i64 { - return std.time.milliTimestamp(); + return std_compat.time.milliTimestamp(); } diff --git a/src/main.zig b/src/main.zig index 45590f0..8ea8451 100644 --- a/src/main.zig +++ b/src/main.zig @@ -1,4 +1,6 @@ const std = @import("std"); +const builtin = @import("builtin"); +const std_compat = @import("compat.zig"); const Store = @import("store.zig").Store; const api = @import("api.zig"); const config = @import("config.zig"); @@ -27,22 +29,13 @@ fn onSignal(sig: c_int) callconv(.c) void { shutdown_requested.store(true, .seq_cst); } -pub fn main() !void { - var gpa = std.heap.GeneralPurposeAllocator(.{}){}; - defer _ = gpa.deinit(); - const allocator = gpa.allocator(); +pub fn main(init: std.process.Init) !void { + std_compat.initProcess(init); + const allocator = std.heap.smp_allocator; - var args = try std.process.argsWithAllocator(allocator); - defer args.deinit(); - _ = args.next(); // skip program name - - // Collect all args into a slice for manifest protocol checks - var arg_list: std.ArrayListUnmanaged([:0]const u8) = .empty; - defer arg_list.deinit(allocator); - while (args.next()) |a| { - try arg_list.append(allocator, a); - } - const all_args = arg_list.items; + const args = try std_compat.process.argsAlloc(allocator); + defer std_compat.process.argsFree(allocator, args); + const all_args = if (args.len > 0) args[1..] else &.{}; // Check for manifest protocol flags first (early exit, no config needed) if (all_args.len >= 1) { @@ -228,15 +221,15 @@ pub fn main() !void { } } - const addr = std.net.Address.resolveIp(bind_host, port) catch |err| { + const addr = std.Io.net.IpAddress.resolve(std_compat.io(), bind_host, port) catch |err| { std.debug.print("failed to resolve bind address {s}:{d}: {}\n", .{ bind_host, port, err }); return; }; - var server = addr.listen(.{ .reuse_address = true }) catch |err| { + var server = addr.listen(std_compat.io(), .{ .reuse_address = true }) catch |err| { std.debug.print("failed to listen on {s}:{d}: {}\n", .{ bind_host, port, err }); return; }; - defer server.deinit(); + defer server.deinit(std_compat.io()); // SIGINT/SIGTERM should switch process into drain/shutdown mode. _ = c.signal(c.SIGINT, onSignal); @@ -367,30 +360,32 @@ pub fn main() !void { break; } - var poll_fds = [_]std.posix.pollfd{ - .{ - .fd = server.stream.handle, - .events = std.posix.POLL.IN, - .revents = 0, - }, - }; - const ready = std.posix.poll(&poll_fds, 50) catch { - std.Thread.sleep(50 * std.time.ns_per_ms); - continue; - }; - if (ready == 0) continue; + if (builtin.os.tag != .windows) { + var poll_fds = [_]std.posix.pollfd{ + .{ + .fd = server.socket.handle, + .events = std.posix.POLL.IN, + .revents = 0, + }, + }; + const ready = std.posix.poll(&poll_fds, 50) catch { + std_compat.thread.sleep(50 * std.time.ns_per_ms); + continue; + }; + if (ready == 0) continue; + } - var conn = server.accept() catch |err| { + var conn = server.accept(std_compat.io()) catch |err| { std.debug.print("accept error: {}\n", .{err}); continue; }; - defer conn.stream.close(); + defer conn.close(std_compat.io()); var arena = std.heap.ArenaAllocator.init(allocator); defer arena.deinit(); const req_alloc = arena.allocator(); - const request = readHttpRequest(req_alloc, &conn.stream, max_request_size) catch |err| { + const request = readHttpRequest(req_alloc, &conn, max_request_size) catch |err| { std.debug.print("request read error: {}\n", .{err}); continue; } orelse continue; @@ -415,8 +410,11 @@ pub fn main() !void { const header = std.fmt.allocPrint(req_alloc, "HTTP/1.1 {s}\r\nContent-Type: {s}\r\nX-Request-Id: {s}\r\nContent-Length: {d}\r\nConnection: close\r\n\r\n", .{ response.status, response.content_type, request.request_id, response.body.len }) catch continue; - conn.stream.writeAll(header) catch continue; - conn.stream.writeAll(response.body) catch continue; + var write_buffer: [4096]u8 = undefined; + var writer = conn.writer(std_compat.io(), &write_buffer); + writer.interface.writeAll(header) catch continue; + writer.interface.writeAll(response.body) catch continue; + writer.interface.flush() catch continue; } const drain_deadline = ids.nowMs() + @as(i64, @intCast(cfg.engine.shutdown_grace_ms)); @@ -425,7 +423,7 @@ pub fn main() !void { defer drain_arena.deinit(); const active = store.getActiveRuns(drain_arena.allocator()) catch break; if (active.len == 0) break; - std.Thread.sleep(200 * std.time.ns_per_ms); + std_compat.thread.sleep(200 * std.time.ns_per_ms); } } @@ -436,17 +434,14 @@ fn ensureParentDirForFile(path: []const u8) !void { if (parent.len == 0) return; if (std.fs.path.isAbsolute(parent)) { - std.fs.makeDirAbsolute(parent) catch |err| switch (err) { + std_compat.fs.makeDirAbsolute(parent) catch |err| switch (err) { error.PathAlreadyExists => {}, else => return err, }; return; } - std.fs.cwd().makePath(parent) catch |err| switch (err) { - error.PathAlreadyExists => {}, - else => return err, - }; + try std_compat.fs.cwd().makePath(parent); } fn serializeTagsJson(allocator: std.mem.Allocator, tags: []const []const u8) ![]const u8 { @@ -463,16 +458,18 @@ const ParsedHttpRequest = struct { traceparent: ?[]const u8, }; -fn readHttpRequest(allocator: std.mem.Allocator, stream: *std.net.Stream, max_bytes: usize) !?ParsedHttpRequest { +fn readHttpRequest(allocator: std.mem.Allocator, stream: *std.Io.net.Stream, max_bytes: usize) !?ParsedHttpRequest { var buffer: std.ArrayListUnmanaged(u8) = .empty; defer buffer.deinit(allocator); var header_end: ?usize = null; var content_len: usize = 0; + var read_buffer: [request_read_chunk]u8 = undefined; + var reader = stream.reader(std_compat.io(), &read_buffer); var chunk: [request_read_chunk]u8 = undefined; while (true) { - const n = try stream.read(&chunk); + const n = try reader.interface.readSliceShort(&chunk); if (n == 0) return null; try buffer.appendSlice(allocator, chunk[0..n]); diff --git a/src/mqtt_client.zig b/src/mqtt_client.zig index dd32814..e468682 100644 --- a/src/mqtt_client.zig +++ b/src/mqtt_client.zig @@ -1,4 +1,5 @@ const std = @import("std"); +const std_compat = @import("compat.zig"); const log = std.log.scoped(.mqtt_client); const c = @cImport(@cInclude("mosquitto.h")); const async_dispatch = @import("async_dispatch.zig"); @@ -66,7 +67,7 @@ pub fn runListener( _ = configs; while (!shutdown.load(.acquire)) { - std.Thread.sleep(100 * std.time.ns_per_ms); + std_compat.thread.sleep(100 * std.time.ns_per_ms); } log.info("mqtt listener stopped", .{}); } diff --git a/src/redis_client.zig b/src/redis_client.zig index 087c53f..cc2b7c7 100644 --- a/src/redis_client.zig +++ b/src/redis_client.zig @@ -1,4 +1,5 @@ const std = @import("std"); +const std_compat = @import("compat.zig"); const log = std.log.scoped(.redis_client); const c = @cImport(@cInclude("hiredis.h")); const async_dispatch = @import("async_dispatch.zig"); @@ -55,7 +56,7 @@ pub fn runListener( _ = configs; while (!shutdown.load(.acquire)) { - std.Thread.sleep(100 * std.time.ns_per_ms); + std_compat.thread.sleep(100 * std.time.ns_per_ms); } log.info("redis listener stopped", .{}); } diff --git a/src/sse.zig b/src/sse.zig index 0c64344..20d61d7 100644 --- a/src/sse.zig +++ b/src/sse.zig @@ -1,4 +1,5 @@ const std = @import("std"); +const std_compat = @import("compat.zig"); const Allocator = std.mem.Allocator; pub const StreamMode = enum { @@ -38,7 +39,7 @@ pub const EventSnapshot = struct { pub const RunEventQueue = struct { events: std.ArrayListUnmanaged(SseEvent), alloc: Allocator, - mutex: std.Thread.Mutex, + mutex: std_compat.sync.Mutex, closed: std.atomic.Value(bool), next_seq: u64, @@ -167,7 +168,7 @@ pub const RunEventQueue = struct { /// Central hub managing per-run event queues. pub const SseHub = struct { queues: std.StringHashMap(*RunEventQueue), - mutex: std.Thread.Mutex, + mutex: std_compat.sync.Mutex, alloc: Allocator, pub fn init(alloc: Allocator) SseHub { diff --git a/src/state.zig b/src/state.zig index 5c266a6..7db83f2 100644 --- a/src/state.zig +++ b/src/state.zig @@ -12,7 +12,7 @@ const json = std.json; /// Serialize a std.json.Value to an allocated JSON string. fn serializeValue(alloc: Allocator, value: json.Value) ![]const u8 { - var out: std.io.Writer.Allocating = .init(alloc); + var out: std.Io.Writer.Allocating = .init(alloc); var jw: json.Stringify = .{ .writer = &out.writer }; try jw.write(value); return try out.toOwnedSlice(); @@ -95,19 +95,19 @@ pub fn applyUpdates(alloc: Allocator, state_json: []const u8, updates_json: []co const arena_alloc = arena.allocator(); const state_parsed = try json.parseFromSlice(json.Value, arena_alloc, state_json, .{}); - const state_obj = if (state_parsed.value == .object) state_parsed.value.object else json.ObjectMap.init(arena_alloc); + const state_obj: json.ObjectMap = if (state_parsed.value == .object) state_parsed.value.object else .empty; const updates_parsed = try json.parseFromSlice(json.Value, arena_alloc, updates_json, .{}); if (updates_parsed.value != .object) return try alloc.dupe(u8, state_json); const schema_parsed = try json.parseFromSlice(json.Value, arena_alloc, schema_json, .{}); - const schema_obj = if (schema_parsed.value == .object) schema_parsed.value.object else json.ObjectMap.init(arena_alloc); + const schema_obj: json.ObjectMap = if (schema_parsed.value == .object) schema_parsed.value.object else .empty; // Start with a copy of all existing state keys - var result_obj = json.ObjectMap.init(arena_alloc); + var result_obj: json.ObjectMap = .empty; var state_it = state_obj.iterator(); while (state_it.next()) |entry| { - try result_obj.put(entry.key_ptr.*, entry.value_ptr.*); + try result_obj.put(arena_alloc, entry.key_ptr.*, entry.value_ptr.*); } // For each update key, apply the reducer (with overwrite bypass, Gap 5) @@ -119,7 +119,7 @@ pub fn applyUpdates(alloc: Allocator, state_json: []const u8, updates_json: []co // Gap 5: Check for overwrite bypass if (isOverwrite(update_value)) { const raw_val = extractOverwriteValue(update_value); - try result_obj.put(key, raw_val); + try result_obj.put(arena_alloc, key, raw_val); continue; } @@ -153,7 +153,7 @@ pub fn applyUpdates(alloc: Allocator, state_json: []const u8, updates_json: []co // Parse the result back into a json.Value and put in result const new_parsed = try json.parseFromSlice(json.Value, arena_alloc, new_str, .{}); - try result_obj.put(key, new_parsed.value); + try result_obj.put(arena_alloc, key, new_parsed.value); } // Serialize the result into the caller's allocator @@ -171,12 +171,12 @@ pub fn initState(alloc: Allocator, input_json: []const u8, schema_json: []const const arena_alloc = arena.allocator(); const input_parsed = try json.parseFromSlice(json.Value, arena_alloc, input_json, .{}); - const input_obj = if (input_parsed.value == .object) input_parsed.value.object else json.ObjectMap.init(arena_alloc); + const input_obj: json.ObjectMap = if (input_parsed.value == .object) input_parsed.value.object else .empty; const schema_parsed = try json.parseFromSlice(json.Value, arena_alloc, schema_json, .{}); if (schema_parsed.value != .object) return try alloc.dupe(u8, input_json); - var result_obj = json.ObjectMap.init(arena_alloc); + var result_obj: json.ObjectMap = .empty; var schema_it = schema_parsed.value.object.iterator(); while (schema_it.next()) |entry| { @@ -184,7 +184,7 @@ pub fn initState(alloc: Allocator, input_json: []const u8, schema_json: []const const schema_entry = entry.value_ptr.*; if (input_obj.get(key)) |input_val| { - try result_obj.put(key, input_val); + try result_obj.put(arena_alloc, key, input_val); } else { const type_str = blk: { if (schema_entry == .object) { @@ -206,11 +206,11 @@ pub fn initState(alloc: Allocator, input_json: []const u8, schema_json: []const else if (std.mem.eql(u8, type_str, "boolean")) .{ .bool = false } else if (std.mem.eql(u8, type_str, "object")) - .{ .object = json.ObjectMap.init(arena_alloc) } + .{ .object = .empty } else .null; - try result_obj.put(key, default_val); + try result_obj.put(arena_alloc, key, default_val); } } @@ -397,12 +397,12 @@ fn deepMerge(alloc: Allocator, base: json.Value, overlay: json.Value) !json.Valu return overlay; } - var result = json.ObjectMap.init(alloc); + var result: json.ObjectMap = .empty; // Copy all base keys var base_it = base.object.iterator(); while (base_it.next()) |entry| { - try result.put(entry.key_ptr.*, entry.value_ptr.*); + try result.put(alloc, entry.key_ptr.*, entry.value_ptr.*); } // Apply overlay keys, recursively merging nested objects @@ -414,12 +414,12 @@ fn deepMerge(alloc: Allocator, base: json.Value, overlay: json.Value) !json.Valu if (result.get(key)) |existing| { if (existing == .object and overlay_val == .object) { const merged = try deepMerge(alloc, existing, overlay_val); - try result.put(key, merged); + try result.put(alloc, key, merged); } else { - try result.put(key, overlay_val); + try result.put(alloc, key, overlay_val); } } else { - try result.put(key, overlay_val); + try result.put(alloc, key, overlay_val); } } @@ -577,13 +577,13 @@ fn applyAddMessages(alloc: Allocator, old_json: ?[]const u8, update_json: []cons } } else { // No id — generate one and append - var msg_copy = json.ObjectMap.init(arena_alloc); + var msg_copy: json.ObjectMap = .empty; var it = msg.object.iterator(); while (it.next()) |entry| { - try msg_copy.put(entry.key_ptr.*, entry.value_ptr.*); + try msg_copy.put(arena_alloc, entry.key_ptr.*, entry.value_ptr.*); } const gen_id = try std.fmt.allocPrint(arena_alloc, "msg_{d}", .{result_msgs.items.len}); - try msg_copy.put("id", json.Value{ .string = gen_id }); + try msg_copy.put(arena_alloc, "id", json.Value{ .string = gen_id }); try result_msgs.append(json.Value{ .object = msg_copy }); } } @@ -625,11 +625,11 @@ pub fn stripEphemeralKeys(alloc: Allocator, state_json: []const u8, schema_json: const state_parsed = try json.parseFromSlice(json.Value, arena_alloc, state_json, .{}); if (state_parsed.value != .object) return try alloc.dupe(u8, state_json); - var result_obj = json.ObjectMap.init(arena_alloc); + var result_obj: json.ObjectMap = .empty; var state_it = state_parsed.value.object.iterator(); while (state_it.next()) |entry| { if (ephemeral_keys.get(entry.key_ptr.*) == null) { - try result_obj.put(entry.key_ptr.*, entry.value_ptr.*); + try result_obj.put(arena_alloc, entry.key_ptr.*, entry.value_ptr.*); } } diff --git a/src/store.zig b/src/store.zig index 5b072cf..d9236e0 100644 --- a/src/store.zig +++ b/src/store.zig @@ -1,4 +1,5 @@ const std = @import("std"); +const std_compat = @import("compat.zig"); const log = std.log.scoped(.store); const ids = @import("ids.zig"); const types = @import("types.zig"); @@ -1901,7 +1902,7 @@ test "Store: reopens legacy schema with user_version reset to zero" { var tmp = std.testing.tmpDir(.{}); defer tmp.cleanup(); - const root = try tmp.dir.realpathAlloc(allocator, "."); + const root = try std_compat.fs.Dir.wrap(tmp.dir).realpathAlloc(allocator, "."); defer allocator.free(root); const db_path = try std.fs.path.join(allocator, &.{ root, "legacy.db" }); defer allocator.free(db_path); diff --git a/src/strategy.zig b/src/strategy.zig index 93330d0..a3a9e2c 100644 --- a/src/strategy.zig +++ b/src/strategy.zig @@ -1,4 +1,5 @@ const std = @import("std"); +const std_compat = @import("compat.zig"); // ── Types ───────────────────────────────────────────────────────────── @@ -27,7 +28,7 @@ pub const StrategyError = error{ pub fn loadStrategies(allocator: std.mem.Allocator, dir_path: []const u8) StrategyMap { var map = StrategyMap{}; - var dir = std.fs.cwd().openDir(dir_path, .{ .iterate = true }) catch { + var dir = std_compat.fs.cwd().openDir(dir_path, .{ .iterate = true }) catch { return map; }; defer dir.close(); @@ -125,23 +126,23 @@ fn expandStepNested( // Build a new step object: copy all fields except strategy/steps, // set type=sub_workflow, add workflow object with expanded steps - var new_obj = std.json.ObjectMap.init(allocator); + var new_obj: std.json.ObjectMap = .empty; // Copy existing fields for (step_obj.keys(), step_obj.values()) |key, val| { if (std.mem.eql(u8, key, "strategy")) continue; if (std.mem.eql(u8, key, "steps")) continue; - new_obj.put(key, val) catch return StrategyError.OutOfMemory; + new_obj.put(allocator, key, val) catch return StrategyError.OutOfMemory; } // Override type to sub_workflow - new_obj.put("type", std.json.Value{ .string = "sub_workflow" }) catch + new_obj.put(allocator, "type", std.json.Value{ .string = "sub_workflow" }) catch return StrategyError.OutOfMemory; // Build workflow object with expanded steps - var workflow_obj = std.json.ObjectMap.init(allocator); - workflow_obj.put("steps", nested_steps) catch return StrategyError.OutOfMemory; - new_obj.put("workflow", std.json.Value{ .object = workflow_obj }) catch + var workflow_obj: std.json.ObjectMap = .empty; + workflow_obj.put(allocator, "steps", nested_steps) catch return StrategyError.OutOfMemory; + new_obj.put(allocator, "workflow", std.json.Value{ .object = workflow_obj }) catch return StrategyError.OutOfMemory; return std.json.Value{ .object = new_obj }; @@ -162,7 +163,7 @@ fn applyChain(allocator: std.mem.Allocator, steps: []std.json.Value) !void { var deps = try std.json.Array.initCapacity(allocator, 1); try deps.append(std.json.Value{ .string = prev_id_val.string }); - try step_val.object.put("depends_on", std.json.Value{ .array = deps }); + try step_val.object.put(allocator, "depends_on", std.json.Value{ .array = deps }); } } @@ -172,19 +173,19 @@ fn buildReduceStep( reduce_obj: std.json.ObjectMap, steps: []const std.json.Value, ) !std.json.Value { - var new_obj = std.json.ObjectMap.init(allocator); + var new_obj: std.json.ObjectMap = .empty; // Copy all fields from the reduce config for (reduce_obj.keys(), reduce_obj.values()) |key, val| { - try new_obj.put(key, val); + try new_obj.put(allocator, key, val); } // Set type to reduce - try new_obj.put("type", std.json.Value{ .string = "reduce" }); + try new_obj.put(allocator, "type", std.json.Value{ .string = "reduce" }); // Ensure it has an id; default to "__reduce" if not provided if (new_obj.get("id") == null) { - try new_obj.put("id", std.json.Value{ .string = "__reduce" }); + try new_obj.put(allocator, "id", std.json.Value{ .string = "__reduce" }); } // Build depends_on array from all step ids @@ -196,7 +197,7 @@ fn buildReduceStep( try deps.append(std.json.Value{ .string = id_val.string }); } - try new_obj.put("depends_on", std.json.Value{ .array = deps }); + try new_obj.put(allocator, "depends_on", std.json.Value{ .array = deps }); return std.json.Value{ .object = new_obj }; } @@ -237,25 +238,25 @@ test "loadStrategies: loads from directory" { var tmp = std.testing.tmpDir(.{}); defer tmp.cleanup(); - try tmp.dir.writeFile(.{ + try std_compat.fs.Dir.wrap(tmp.dir).writeFile(.{ .sub_path = "mychain.json", .data = \\{"name":"mychain","description":"test chain","build":"chain"} , }); - try tmp.dir.writeFile(.{ + try std_compat.fs.Dir.wrap(tmp.dir).writeFile(.{ .sub_path = "mypar.json", .data = \\{"name":"mypar","description":"test parallel","build":"independent"} , }); // Non-json file should be ignored - try tmp.dir.writeFile(.{ + try std_compat.fs.Dir.wrap(tmp.dir).writeFile(.{ .sub_path = "readme.txt", .data = "not json", }); - const dir_path = try tmp.dir.realpathAlloc(std.testing.allocator, "."); + const dir_path = try std_compat.fs.Dir.wrap(tmp.dir).realpathAlloc(std.testing.allocator, "."); defer std.testing.allocator.free(dir_path); var arena = std.heap.ArenaAllocator.init(std.testing.allocator); diff --git a/src/subprocess.zig b/src/subprocess.zig index 83b3300..42d883e 100644 --- a/src/subprocess.zig +++ b/src/subprocess.zig @@ -1,6 +1,7 @@ /// NullClaw subprocess manager — spawns, health-checks, and kills agent child processes. /// Used in pull-mode execution where NullBoiler spawns NullClaw as a child process per task. const std = @import("std"); +const std_compat = @import("compat.zig"); const ids = @import("ids.zig"); // ── Types ───────────────────────────────────────────────────────────── @@ -32,7 +33,7 @@ pub const SubprocessInfo = struct { pipeline_id: []const u8, agent_role: []const u8, port: u16, - child: ?std.process.Child, + child: ?std_compat.process.Child, current_turn: u32, max_turns: u32, started_at_ms: i64, @@ -56,7 +57,7 @@ pub fn spawnNullClaw( args: []const []const u8, port: u16, workspace_path: []const u8, -) !std.process.Child { +) !std_compat.process.Child { // Build full argv: command + args + --port + port_str + --workdir + workspace_path const total_len = 1 + args.len + 4; const argv = try allocator.alloc([]const u8, total_len); @@ -74,7 +75,7 @@ pub fn spawnNullClaw( argv[1 + args.len + 2] = "--workdir"; argv[1 + args.len + 3] = workspace_path; - var child = std.process.Child.init(argv, allocator); + var child = std_compat.process.Child.init(argv, allocator); child.stdout_behavior = .Pipe; child.stderr_behavior = .Pipe; child.cwd = workspace_path; @@ -98,13 +99,13 @@ pub fn waitForHealth( var attempt: u32 = 0; while (attempt < max_retries) : (attempt += 1) { if (attempt > 0) { - std.Thread.sleep(500 * std.time.ns_per_ms); + std_compat.thread.sleep(500 * std.time.ns_per_ms); } - var client: std.http.Client = .{ .allocator = allocator }; + var client: std.http.Client = .{ .allocator = allocator, .io = std_compat.io() }; defer client.deinit(); - var response_body: std.io.Writer.Allocating = .init(allocator); + var response_body: std.Io.Writer.Allocating = .init(allocator); defer response_body.deinit(); const result = client.fetch(.{ @@ -138,10 +139,10 @@ pub fn sendPrompt( }, .{}); defer allocator.free(body); - var client: std.http.Client = .{ .allocator = allocator }; + var client: std.http.Client = .{ .allocator = allocator, .io = std_compat.io() }; defer client.deinit(); - var response_body: std.io.Writer.Allocating = .init(allocator); + var response_body: std.Io.Writer.Allocating = .init(allocator); defer response_body.deinit(); const result = client.fetch(.{ @@ -167,7 +168,7 @@ pub fn sendPrompt( // ── Kill ────────────────────────────────────────────────────────────── /// Kill a subprocess and wait for it to exit. Errors are silently ignored. -pub fn killSubprocess(child: *std.process.Child) void { +pub fn killSubprocess(child: *std_compat.process.Child) void { _ = child.kill() catch null; _ = child.wait() catch null; } diff --git a/src/templates.zig b/src/templates.zig index 0332f02..dea3057 100644 --- a/src/templates.zig +++ b/src/templates.zig @@ -338,8 +338,8 @@ fn jsonValueToString(allocator: std.mem.Allocator, val: std.json.Value) RenderEr return allocator.dupe(u8, "null") catch return error.OutOfMemory; }, .object, .array => { - // Serialize back to JSON string using Zig 0.15 Stringify API - var out: std.io.Writer.Allocating = .init(allocator); + // Serialize back to JSON string using Zig 0.16 Stringify API + var out: std.Io.Writer.Allocating = .init(allocator); errdefer out.deinit(); var jw: std.json.Stringify = .{ .writer = &out.writer }; jw.write(val) catch return error.OutOfMemory; diff --git a/src/tracker.zig b/src/tracker.zig index c61950b..f886ac6 100644 --- a/src/tracker.zig +++ b/src/tracker.zig @@ -9,6 +9,7 @@ /// 5. Poll NullTickets and claim new tasks (respecting concurrency limits) /// 6. Clean expired cooldowns const std = @import("std"); +const std_compat = @import("compat.zig"); const log = std.log.scoped(.tracker); const config = @import("config.zig"); @@ -58,7 +59,7 @@ pub const CooldownEntry = struct { // ── TrackerState ──────────────────────────────────────────────────── pub const TrackerState = struct { - mutex: std.Thread.Mutex, + mutex: std_compat.sync.Mutex, running: std.StringArrayHashMapUnmanaged(RunningTask), completed_count: u64, failed_count: u64, @@ -281,7 +282,7 @@ pub const Tracker = struct { self.tick() catch |err| { log.err("tracker tick error: {}", .{err}); }; - std.Thread.sleep(poll_ns); + std_compat.thread.sleep(poll_ns); } log.info("tracker shutting down, killing subprocesses", .{}); @@ -1097,7 +1098,7 @@ pub const Tracker = struct { _ = workspace_mod.runHook(tick_alloc, hook, task.workspace_path, @as(u64, self.cfg.workspace.hook_timeout_ms)) catch {}; } // Remove workspace (will be recreated on next claim) - std.fs.cwd().deleteTree(task.workspace_path) catch |err| { + std_compat.fs.cwd().deleteTree(task.workspace_path) catch |err| { log.warn("workspace: failed to remove {s}: {}", .{ task.workspace_path, err }); }; diff --git a/src/tracker_client.zig b/src/tracker_client.zig index 212e926..b87ebef 100644 --- a/src/tracker_client.zig +++ b/src/tracker_client.zig @@ -1,5 +1,6 @@ /// HTTP client for NullTickets API used by the pull-mode tracker runtime. const std = @import("std"); +const std_compat = @import("compat.zig"); pub const TransitionInfo = struct { trigger: []const u8 = "", @@ -287,10 +288,10 @@ pub const TrackerClient = struct { body: ?[]const u8, bearer_override: ?[]const u8, ) !HttpResult { - var client: std.http.Client = .{ .allocator = self.allocator }; + var client: std.http.Client = .{ .allocator = self.allocator, .io = std_compat.io() }; defer client.deinit(); - var response_body: std.io.Writer.Allocating = .init(self.allocator); + var response_body: std.Io.Writer.Allocating = .init(self.allocator); defer response_body.deinit(); const token = bearer_override orelse self.api_token; @@ -340,7 +341,8 @@ fn encodePathSegment(allocator: std.mem.Allocator, value: []const u8) ![]const u try buf.append(allocator, ch); continue; } - try buf.writer(allocator).print("%{X:0>2}", .{ch}); + const upper = "0123456789ABCDEF"; + try buf.appendSlice(allocator, &.{ '%', upper[(ch >> 4) & 0x0F], upper[ch & 0x0F] }); } return try buf.toOwnedSlice(allocator); diff --git a/src/worker_protocol.zig b/src/worker_protocol.zig index 59be6a6..7de898e 100644 --- a/src/worker_protocol.zig +++ b/src/worker_protocol.zig @@ -1,4 +1,5 @@ const std = @import("std"); +const std_compat = @import("compat.zig"); pub const Protocol = enum { webhook, @@ -45,7 +46,7 @@ pub fn buildRequestUrl( worker_url: []const u8, protocol: Protocol, ) ![]const u8 { - const trimmed = std.mem.trimRight(u8, worker_url, "/"); + const trimmed = std_compat.mem.trimRight(u8, worker_url, "/"); if (requiresExplicitPath(protocol) and !hasExplicitPath(trimmed)) { return error.WebhookUrlPathRequired; } @@ -56,7 +57,7 @@ pub fn buildRequestUrl( } pub fn hasExplicitPath(url: []const u8) bool { - const trimmed = std.mem.trimRight(u8, url, "/"); + const trimmed = std_compat.mem.trimRight(u8, url, "/"); if (std.mem.startsWith(u8, trimmed, "/")) return true; const scheme_idx = std.mem.indexOf(u8, trimmed, "://") orelse return false; diff --git a/src/workflow_loader.zig b/src/workflow_loader.zig index f5d2fe0..24084d4 100644 --- a/src/workflow_loader.zig +++ b/src/workflow_loader.zig @@ -1,4 +1,5 @@ const std = @import("std"); +const std_compat = @import("compat.zig"); const ids = @import("ids.zig"); const Store = @import("store.zig").Store; const log = std.log.scoped(.workflow_loader); @@ -54,9 +55,9 @@ pub const WorkflowMap = std.StringArrayHashMapUnmanaged(WorkflowDef); pub fn loadWorkflows(allocator: std.mem.Allocator, dir_path: []const u8) WorkflowMap { var map = WorkflowMap{}; var dir = if (std.fs.path.isAbsolute(dir_path)) - std.fs.openDirAbsolute(dir_path, .{ .iterate = true }) catch return map + std_compat.fs.openDirAbsolute(dir_path, .{ .iterate = true }) catch return map else - std.fs.cwd().openDir(dir_path, .{ .iterate = true }) catch return map; + std_compat.fs.cwd().openDir(dir_path, .{ .iterate = true }) catch return map; defer dir.close(); var iter = dir.iterate(); @@ -80,7 +81,7 @@ test "loadWorkflows: supports absolute workflow directories" { var tmp = std.testing.tmpDir(.{}); defer tmp.cleanup(); - try tmp.dir.writeFile(.{ + try std_compat.fs.Dir.wrap(tmp.dir).writeFile(.{ .sub_path = "current.json", .data = \\{ @@ -92,7 +93,7 @@ test "loadWorkflows: supports absolute workflow directories" { , }); - const dir_path = try tmp.dir.realpathAlloc(std.testing.allocator, "."); + const dir_path = try std_compat.fs.Dir.wrap(tmp.dir).realpathAlloc(std.testing.allocator, "."); defer std.testing.allocator.free(dir_path); var arena = std.heap.ArenaAllocator.init(std.testing.allocator); @@ -137,9 +138,9 @@ pub const WorkflowWatcher = struct { self.last_check_ms = now; var dir = if (std.fs.path.isAbsolute(self.dir_path)) - std.fs.openDirAbsolute(self.dir_path, .{ .iterate = true }) catch return + std_compat.fs.openDirAbsolute(self.dir_path, .{ .iterate = true }) catch return else - std.fs.cwd().openDir(self.dir_path, .{ .iterate = true }) catch return; + std_compat.fs.cwd().openDir(self.dir_path, .{ .iterate = true }) catch return; defer dir.close(); var iter = dir.iterate(); @@ -242,7 +243,7 @@ test "loadWorkflows: loads JSON files from directory" { var tmp = std.testing.tmpDir(.{}); defer tmp.cleanup(); - try tmp.dir.writeFile(.{ + try std_compat.fs.Dir.wrap(tmp.dir).writeFile(.{ .sub_path = "code_review.json", .data = \\{ @@ -262,7 +263,7 @@ test "loadWorkflows: loads JSON files from directory" { , }); - try tmp.dir.writeFile(.{ + try std_compat.fs.Dir.wrap(tmp.dir).writeFile(.{ .sub_path = "deploy.json", .data = \\{ @@ -279,12 +280,12 @@ test "loadWorkflows: loads JSON files from directory" { }); // Non-json file should be ignored - try tmp.dir.writeFile(.{ + try std_compat.fs.Dir.wrap(tmp.dir).writeFile(.{ .sub_path = "readme.txt", .data = "not json", }); - const dir_path = try tmp.dir.realpathAlloc(std.testing.allocator, "."); + const dir_path = try std_compat.fs.Dir.wrap(tmp.dir).realpathAlloc(std.testing.allocator, "."); defer std.testing.allocator.free(dir_path); var arena = std.heap.ArenaAllocator.init(std.testing.allocator); @@ -313,14 +314,14 @@ test "loadWorkflows: skips files with empty pipeline_id" { var tmp = std.testing.tmpDir(.{}); defer tmp.cleanup(); - try tmp.dir.writeFile(.{ + try std_compat.fs.Dir.wrap(tmp.dir).writeFile(.{ .sub_path = "no_pipeline.json", .data = \\{"id": "wf-nope", "pipeline_id": ""} , }); - const dir_path = try tmp.dir.realpathAlloc(std.testing.allocator, "."); + const dir_path = try std_compat.fs.Dir.wrap(tmp.dir).realpathAlloc(std.testing.allocator, "."); defer std.testing.allocator.free(dir_path); var arena = std.heap.ArenaAllocator.init(std.testing.allocator); @@ -407,7 +408,7 @@ test "WorkflowWatcher: detects file changes" { var tmp = std.testing.tmpDir(.{}); defer tmp.cleanup(); - const dir_path = try tmp.dir.realpathAlloc(allocator, "."); + const dir_path = try std_compat.fs.Dir.wrap(tmp.dir).realpathAlloc(allocator, "."); defer allocator.free(dir_path); var watcher = WorkflowWatcher.init(allocator, dir_path, &s); @@ -417,7 +418,7 @@ test "WorkflowWatcher: detects file changes" { watcher.last_check_ms = 0; // Write a workflow file - try tmp.dir.writeFile(.{ + try std_compat.fs.Dir.wrap(tmp.dir).writeFile(.{ .sub_path = "test_wf.json", .data = \\{"id":"wf-test","name":"Test WF","nodes":{}} diff --git a/src/workspace.zig b/src/workspace.zig index b7ecd9b..07e9b0f 100644 --- a/src/workspace.zig +++ b/src/workspace.zig @@ -1,4 +1,5 @@ const std = @import("std"); +const std_compat = @import("compat.zig"); const log = std.log.scoped(.workspace); /// Sanitize an ID for safe use as a directory name. @@ -28,13 +29,13 @@ pub fn validateWorkspacePath(allocator: std.mem.Allocator, workspace_root: []con } // Canonicalize both paths (resolves symlinks) - const canon_root = std.fs.cwd().realpathAlloc(allocator, workspace_root) catch { + const canon_root = std_compat.fs.cwd().realpathAlloc(allocator, workspace_root) catch { log.warn("workspace: cannot resolve root {s}", .{workspace_root}); return false; }; defer allocator.free(canon_root); - const canon_path = std.fs.cwd().realpathAlloc(allocator, workspace_path) catch { + const canon_path = std_compat.fs.cwd().realpathAlloc(allocator, workspace_path) catch { log.warn("workspace: cannot resolve path {s}", .{workspace_path}); return false; }; @@ -79,14 +80,14 @@ pub const Workspace = struct { const path = try std.fs.path.join(allocator, &.{ root, safe_id }); // Ensure root directory exists - std.fs.cwd().makePath(root) catch |err| { + std_compat.fs.cwd().makePath(root) catch |err| { log.warn("workspace: failed to create root {s}: {}", .{ root, err }); return err; }; // Try to create the workspace directory; track whether it already existed var created = true; - std.fs.cwd().makePath(path) catch |err| { + std_compat.fs.cwd().makePath(path) catch |err| { log.warn("workspace: failed to create workspace dir {s}: {}", .{ path, err }); return err; }; @@ -99,7 +100,7 @@ pub const Workspace = struct { } // If the directory already had contents it was not freshly created - var dir = try std.fs.cwd().openDir(path, .{ .iterate = true }); + var dir = try std_compat.fs.cwd().openDir(path, .{ .iterate = true }); defer dir.close(); var iter = dir.iterate(); @@ -119,7 +120,7 @@ pub const Workspace = struct { /// Remove the workspace directory tree. Logs a warning on failure. pub fn remove(self: *const Workspace) void { - std.fs.cwd().deleteTree(self.path) catch |err| { + std_compat.fs.cwd().deleteTree(self.path) catch |err| { log.warn("workspace: failed to remove {s}: {}", .{ self.path, err }); return; }; @@ -130,7 +131,7 @@ pub const Workspace = struct { /// Remove all subdirectories under the workspace root. /// Used for startup cleanup — workspaces are ephemeral and will be recreated by hooks. pub fn cleanAll(root: []const u8) void { - var dir = std.fs.cwd().openDir(root, .{ .iterate = true }) catch |err| { + var dir = std_compat.fs.cwd().openDir(root, .{ .iterate = true }) catch |err| { log.warn("workspace: cannot open root {s} for cleanup: {}", .{ root, err }); return; }; @@ -173,7 +174,7 @@ pub fn runHook(allocator: std.mem.Allocator, command: []const u8, cwd: []const u else [_][]const u8{ "/bin/sh", "-lc", command }; - var child = std.process.Child.init(&argv, allocator); + var child = std_compat.process.Child.init(&argv, allocator); child.cwd = cwd; // We don't need to capture output for hooks — inherit parent stdio @@ -197,14 +198,14 @@ pub fn runHook(allocator: std.mem.Allocator, command: []const u8, cwd: []const u return false; }; - const success = term == .Exited and term.Exited == 0; + const success = term == .exited and term.exited == 0; if (!success) { log.warn("hook exited non-zero: {s}", .{command}); } return success; } -fn killAfterTimeout(child: *std.process.Child, timeout_ms: u64, done: *std.atomic.Value(bool)) void { +fn killAfterTimeout(child: *std_compat.process.Child, timeout_ms: u64, done: *std.atomic.Value(bool)) void { // Poll in 100ms increments so we exit promptly once the child finishes const poll_ns: u64 = 100 * std.time.ns_per_ms; var elapsed_ns: u64 = 0; @@ -212,7 +213,7 @@ fn killAfterTimeout(child: *std.process.Child, timeout_ms: u64, done: *std.atomi while (elapsed_ns < deadline_ns) { if (done.load(.acquire)) return; - std.Thread.sleep(poll_ns); + std_compat.thread.sleep(poll_ns); elapsed_ns += poll_ns; } @@ -250,7 +251,7 @@ test "Workspace create and remove" { var tmp = std.testing.tmpDir(.{}); defer tmp.cleanup(); - const root = try tmp.dir.realpathAlloc(allocator, "."); + const root = try std_compat.fs.Dir.wrap(tmp.dir).realpathAlloc(allocator, "."); defer allocator.free(root); const ws = try Workspace.create(allocator, root, "test-task"); @@ -260,14 +261,14 @@ test "Workspace create and remove" { try std.testing.expect(ws.created); // Directory should exist - var dir = try std.fs.cwd().openDir(ws.path, .{}); + var dir = try std_compat.fs.cwd().openDir(ws.path, .{}); dir.close(); // Remove it ws.remove(); // Directory should no longer exist - const open_result = std.fs.cwd().openDir(ws.path, .{}); + const open_result = std_compat.fs.cwd().openDir(ws.path, .{}); try std.testing.expectError(error.FileNotFound, open_result); } @@ -279,7 +280,7 @@ test "runHook executes shell command" { var tmp = std.testing.tmpDir(.{}); defer tmp.cleanup(); - const cwd = try tmp.dir.realpathAlloc(allocator, "."); + const cwd = try std_compat.fs.Dir.wrap(tmp.dir).realpathAlloc(allocator, "."); defer allocator.free(cwd); // Run a command that creates a file @@ -287,7 +288,7 @@ test "runHook executes shell command" { try std.testing.expect(ok); // Verify the file was created - const contents = try tmp.dir.readFileAlloc(allocator, "test.txt", 1024); + const contents = try std_compat.fs.Dir.wrap(tmp.dir).readFileAlloc(allocator, "test.txt", 1024); defer allocator.free(contents); try std.testing.expectEqualStrings("hello\n", contents); } @@ -300,7 +301,7 @@ test "runHook returns false for failing command" { var tmp = std.testing.tmpDir(.{}); defer tmp.cleanup(); - const cwd = try tmp.dir.realpathAlloc(allocator, "."); + const cwd = try std_compat.fs.Dir.wrap(tmp.dir).realpathAlloc(allocator, "."); defer allocator.free(cwd); const ok = try runHook(allocator, "exit 1", cwd, 5000); @@ -312,18 +313,18 @@ test "cleanAll removes all subdirectories" { var tmp = std.testing.tmpDir(.{}); defer tmp.cleanup(); - const root = try tmp.dir.realpathAlloc(allocator, "."); + const root = try std_compat.fs.Dir.wrap(tmp.dir).realpathAlloc(allocator, "."); defer allocator.free(root); // Create some fake workspace dirs - try tmp.dir.makeDir("task-001"); - try tmp.dir.makeDir("task-002"); + try std_compat.fs.Dir.wrap(tmp.dir).makeDir("task-001"); + try std_compat.fs.Dir.wrap(tmp.dir).makeDir("task-002"); cleanAll(root); // Verify they're gone - try std.testing.expectError(error.FileNotFound, tmp.dir.openDir("task-001", .{})); - try std.testing.expectError(error.FileNotFound, tmp.dir.openDir("task-002", .{})); + try std.testing.expectError(error.FileNotFound, std_compat.fs.Dir.wrap(tmp.dir).openDir("task-001", .{})); + try std.testing.expectError(error.FileNotFound, std_compat.fs.Dir.wrap(tmp.dir).openDir("task-002", .{})); } test "validateWorkspacePath accepts safe path" { @@ -331,11 +332,11 @@ test "validateWorkspacePath accepts safe path" { var tmp = std.testing.tmpDir(.{}); defer tmp.cleanup(); - const root = try tmp.dir.realpathAlloc(allocator, "."); + const root = try std_compat.fs.Dir.wrap(tmp.dir).realpathAlloc(allocator, "."); defer allocator.free(root); // Create a subdirectory - try tmp.dir.makeDir("safe-task"); + try std_compat.fs.Dir.wrap(tmp.dir).makeDir("safe-task"); const sub_path = try std.fs.path.join(allocator, &.{ root, "safe-task" }); defer allocator.free(sub_path); @@ -347,7 +348,7 @@ test "validateWorkspacePath rejects path outside root" { var tmp = std.testing.tmpDir(.{}); defer tmp.cleanup(); - const root = try tmp.dir.realpathAlloc(allocator, "."); + const root = try std_compat.fs.Dir.wrap(tmp.dir).realpathAlloc(allocator, "."); defer allocator.free(root); // /tmp is definitely not under the test temp dir