From c002ae18f1c9713794778af1b800b8a101f5fef8 Mon Sep 17 00:00:00 2001 From: Igor Somov Date: Fri, 17 Apr 2026 00:32:53 -0300 Subject: [PATCH] chore: migrate nulltickets to zig 0.16 --- .github/scripts/install-zig.sh | 166 ++++++++++++++++++ .github/workflows/ci.yml | 22 ++- .github/workflows/release.yml | 28 +-- AGENTS.md | 2 +- Dockerfile | 39 ++++- README.md | 2 +- build.zig | 4 +- build.zig.zon | 2 +- deps/sqlite/build.zig.zon | 3 +- src/api.zig | 181 +++++++++---------- src/compat.zig | 198 +++++++++++++++++++++ src/compat/fs.zig | 307 +++++++++++++++++++++++++++++++++ src/compat/shared.zig | 69 ++++++++ src/config.zig | 9 +- src/export_manifest.zig | 5 +- src/from_json.zig | 14 +- src/ids.zig | 7 +- src/main.zig | 144 +++++++++------- src/store.zig | 7 +- 19 files changed, 1006 insertions(+), 203 deletions(-) create mode 100644 .github/scripts/install-zig.sh create mode 100644 src/compat.zig create mode 100644 src/compat/fs.zig create mode 100644 src/compat/shared.zig diff --git a/.github/scripts/install-zig.sh b/.github/scripts/install-zig.sh new file mode 100644 index 0000000..afa6668 --- /dev/null +++ b/.github/scripts/install-zig.sh @@ -0,0 +1,166 @@ +#!/usr/bin/env bash +set -euo pipefail + +if [ "$#" -ne 1 ]; then + echo "usage: $0 " >&2 + exit 1 +fi + +version="$1" + +python_bin="${PYTHON:-python3}" +if ! command -v "$python_bin" >/dev/null 2>&1; then + python_bin="python" +fi +if ! command -v "$python_bin" >/dev/null 2>&1; then + echo "python is required to install Zig" >&2 + exit 1 +fi + +runner_os="${RUNNER_OS:-$(uname -s)}" +runner_arch="${RUNNER_ARCH:-$(uname -m)}" + +case "$runner_os" in + Linux | linux) + zig_os="linux" + ;; + Darwin | macOS) + zig_os="macos" + ;; + Windows | MINGW* | MSYS* | CYGWIN*) + zig_os="windows" + ;; + *) + echo "unsupported runner OS: $runner_os" >&2 + exit 1 + ;; +esac + +case "$runner_arch" in + X64 | x86_64 | amd64) + zig_arch="x86_64" + ;; + ARM64 | arm64 | aarch64) + zig_arch="aarch64" + ;; + *) + echo "unsupported runner architecture: $runner_arch" >&2 + exit 1 + ;; +esac + +host_key="${zig_arch}-${zig_os}" +tool_root="${RUNNER_TEMP:-${TMPDIR:-/tmp}}/nulltickets-zig" +install_dir="${tool_root}/${version}/${host_key}" +zig_bin="zig" +if [ "$zig_os" = "windows" ]; then + zig_bin="zig.exe" +fi + +if [ ! -x "${install_dir}/${zig_bin}" ]; then + mkdir -p "$(dirname "$install_dir")" + + zig_metadata="$( + "$python_bin" - "$version" "$host_key" <<'PY' +import json +import sys +import urllib.request + +version = sys.argv[1] +host_key = sys.argv[2] + +with urllib.request.urlopen("https://ziglang.org/download/index.json") as response: + data = json.load(response) + +host = data.get(version, {}).get(host_key) +if not host: + raise SystemExit(f"missing Zig download metadata for version={version!r} host={host_key!r}") + +archive_url = host.get("tarball") or host.get("zip") +checksum = host.get("shasum") or "" +if not archive_url: + raise SystemExit(f"missing archive URL for version={version!r} host={host_key!r}") + +print(archive_url) +print(checksum) +PY + )" + + archive_url="$(printf '%s\n' "$zig_metadata" | sed -n '1p')" + expected_sha="$(printf '%s\n' "$zig_metadata" | sed -n '2p')" + if [ -z "$archive_url" ]; then + echo "failed to resolve Zig download URL" >&2 + exit 1 + fi + + archive_name="${archive_url##*/}" + archive_dir="$(mktemp -d "${RUNNER_TEMP:-${TMPDIR:-/tmp}}/zig-archive.XXXXXX")" + archive_path="${archive_dir}/${archive_name}" + extract_dir="$(mktemp -d "${RUNNER_TEMP:-${TMPDIR:-/tmp}}/zig-extract.XXXXXX")" + trap 'rm -rf "$archive_dir"; rm -rf "$extract_dir"' EXIT + + curl -fsSL --retry 3 --retry-all-errors "$archive_url" -o "$archive_path" + + "$python_bin" - "$archive_path" "$expected_sha" <<'PY' +import hashlib +import sys + +path = sys.argv[1] +expected = sys.argv[2].strip().lower() +if not expected: + raise SystemExit(0) + +digest = hashlib.sha256() +with open(path, "rb") as handle: + for chunk in iter(lambda: handle.read(1024 * 1024), b""): + digest.update(chunk) + +actual = digest.hexdigest().lower() +if actual != expected: + raise SystemExit(f"checksum mismatch for {path}: expected {expected}, got {actual}") +PY + + "$python_bin" - "$archive_path" "$extract_dir" <<'PY' +import pathlib +import sys +import tarfile +import zipfile + +archive = pathlib.Path(sys.argv[1]) +destination = pathlib.Path(sys.argv[2]) +destination.mkdir(parents=True, exist_ok=True) + +def ensure_within_destination(relative_name: str) -> None: + target = (destination / relative_name).resolve() + if destination.resolve() not in target.parents and target != destination.resolve(): + raise SystemExit(f"archive entry escapes destination: {relative_name}") + +if archive.suffix == ".zip": + with zipfile.ZipFile(archive) as handle: + for member in handle.namelist(): + ensure_within_destination(member) + handle.extractall(destination) +else: + with tarfile.open(archive, "r:*") as handle: + for member in handle.getnames(): + ensure_within_destination(member) + handle.extractall(destination) +PY + + extracted_dir="$(find "$extract_dir" -mindepth 1 -maxdepth 1 -type d | head -n 1)" + if [ -z "$extracted_dir" ]; then + echo "failed to extract Zig archive: $archive_url" >&2 + exit 1 + fi + + rm -rf "$install_dir" + mv "$extracted_dir" "$install_dir" +fi + +if [ -n "${GITHUB_PATH:-}" ]; then + printf '%s\n' "$install_dir" >> "$GITHUB_PATH" +else + echo "GITHUB_PATH is not set; add this directory to PATH manually: $install_dir" >&2 +fi + +"${install_dir}/${zig_bin}" version diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index b0be3fe..cc8060f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -1,5 +1,9 @@ name: CI +env: + FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: "true" + ZIG_VERSION: "0.16.0" + on: push: branches: [main] @@ -28,17 +32,17 @@ jobs: zig_target: x86_64-windows steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v6 - - name: Install Zig 0.15.2 - uses: mlugg/setup-zig@v2 - with: - version: 0.15.2 + - name: Install Zig 0.16.0 + run: bash .github/scripts/install-zig.sh "${ZIG_VERSION}" - - name: Cache .zig-cache - uses: actions/cache@v4 + - name: Cache Zig build outputs + uses: actions/cache@v5 with: - path: .zig-cache + path: | + .zig-cache + ~/.cache/zig key: zig-${{ matrix.target }}-${{ hashFiles('src/**/*.zig', 'build.zig', 'build.zig.zon', 'deps/sqlite/**') }} restore-keys: zig-${{ matrix.target }}- @@ -87,7 +91,7 @@ jobs: - name: Upload binary if: success() - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v7 with: name: nulltickets-${{ matrix.target }} path: zig-out/bin/nulltickets${{ runner.os == 'Windows' && '.exe' || '' }} diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 8659642..f953866 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -1,5 +1,9 @@ name: Release +env: + FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: "true" + ZIG_VERSION: "0.16.0" + on: push: tags: ['v*'] @@ -44,18 +48,16 @@ jobs: ext: ".exe" steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v6 - - name: Install Zig 0.15.2 - uses: mlugg/setup-zig@v2 - with: - version: 0.15.2 + - name: Install Zig 0.16.0 + run: bash .github/scripts/install-zig.sh "${ZIG_VERSION}" - name: Build ReleaseSmall run: zig build -Doptimize=ReleaseSmall ${{ matrix.zig_target && format('-Dtarget={0}', matrix.zig_target) || '' }} - name: Upload artifact - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v7 with: name: nulltickets-${{ matrix.target }} path: zig-out/bin/nulltickets${{ matrix.ext }} @@ -67,7 +69,7 @@ jobs: contents: write steps: - - uses: actions/download-artifact@v4 + - uses: actions/download-artifact@v8 - name: Rename binaries run: | @@ -100,16 +102,16 @@ jobs: packages: write steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v6 - name: Set up QEMU - uses: docker/setup-qemu-action@v3 + uses: docker/setup-qemu-action@v4 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@v4 - name: Log in to ghcr.io - uses: docker/login-action@v3 + uses: docker/login-action@v4 with: registry: ghcr.io username: ${{ github.repository_owner }} @@ -117,7 +119,7 @@ jobs: - name: Extract metadata id: meta - uses: docker/metadata-action@v5 + uses: docker/metadata-action@v6 with: images: ghcr.io/${{ github.repository }} tags: | @@ -125,7 +127,7 @@ jobs: type=raw,value=latest - name: Build and push - uses: docker/build-push-action@v6 + uses: docker/build-push-action@v7 with: context: . platforms: linux/amd64,linux/arm64 diff --git a/AGENTS.md b/AGENTS.md index dedce25..08fa7e8 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -72,7 +72,7 @@ bash tests/test_e2e.sh ## 4) Zig + SQLite Rules -- Zig baseline: `0.15.2`. +- Zig baseline: `0.16.0`. - Use `std.ArrayListUnmanaged(...)=.empty` correctly with allocator on each call. - Do not rely on allocator leaks for correctness. - Use `SQLITE_STATIC` (`null`) for sqlite text/blob binds in this codebase. diff --git a/Dockerfile b/Dockerfile index 61a62e2..e36eae1 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,16 +1,49 @@ # syntax=docker/dockerfile:1 # -- Stage 1: Build --------------------------------------------------------- -FROM alpine:3.23 AS builder +FROM --platform=$BUILDPLATFORM alpine:3.23 AS builder -RUN apk add --no-cache zig musl-dev +ARG ZIG_VERSION=0.16.0 + +RUN apk add --no-cache bash curl musl-dev python3 tar xz + +COPY .github/scripts/install-zig.sh /tmp/install-zig.sh +RUN set -eu; \ + export GITHUB_PATH=/tmp/zig-path; \ + export RUNNER_OS=Linux; \ + case "$(uname -m)" in \ + x86_64) export RUNNER_ARCH=X64 ;; \ + aarch64|arm64) export RUNNER_ARCH=ARM64 ;; \ + *) echo "Unsupported host arch: $(uname -m)" >&2; exit 1 ;; \ + esac; \ + bash /tmp/install-zig.sh "${ZIG_VERSION}"; \ + zig_dir="$(cat /tmp/zig-path)"; \ + ln -sf "${zig_dir}/zig" /usr/local/bin/zig; \ + zig version WORKDIR /app COPY build.zig build.zig.zon ./ COPY src/ src/ COPY deps/ deps/ -RUN zig build -Doptimize=ReleaseSmall +ARG TARGETARCH +RUN --mount=type=cache,target=/root/.cache/zig \ + --mount=type=cache,target=/app/.zig-cache \ + set -eu; \ + arch="${TARGETARCH:-}"; \ + if [ -z "${arch}" ]; then \ + case "$(uname -m)" in \ + x86_64) arch="amd64" ;; \ + aarch64|arm64) arch="arm64" ;; \ + *) echo "Unsupported host arch: $(uname -m)" >&2; exit 1 ;; \ + esac; \ + fi; \ + case "${arch}" in \ + amd64) zig_target="x86_64-linux-musl" ;; \ + arm64) zig_target="aarch64-linux-musl" ;; \ + *) echo "Unsupported TARGETARCH: ${arch}" >&2; exit 1 ;; \ + esac; \ + zig build -Dtarget="${zig_target}" -Doptimize=ReleaseSmall # -- Stage 2: Runtime Base (shared) ---------------------------------------- FROM alpine:3.23 AS release-base diff --git a/README.md b/README.md index f18587b..e3be2c6 100644 --- a/README.md +++ b/README.md @@ -51,7 +51,7 @@ You do not have to use all three components. ## Tech Stack -- Zig `0.15.2` +- Zig `0.16.0` - SQLite (vendored, static dependency) - JSON over HTTP/1.1 diff --git a/build.zig b/build.zig index 8400235..f62e145 100644 --- a/build.zig +++ b/build.zig @@ -19,7 +19,7 @@ pub fn build(b: *std.Build) void { .optimize = optimize, }), }); - exe.linkLibrary(sqlite3_lib); + exe.root_module.linkLibrary(sqlite3_lib); b.installArtifact(exe); // Run step @@ -39,7 +39,7 @@ pub fn build(b: *std.Build) void { .optimize = optimize, }), }); - exe_unit_tests.linkLibrary(sqlite3_lib); + exe_unit_tests.root_module.linkLibrary(sqlite3_lib); const run_exe_unit_tests = b.addRunArtifact(exe_unit_tests); const test_step = b.step("test", "Run unit tests"); test_step.dependOn(&run_exe_unit_tests.step); diff --git a/build.zig.zon b/build.zig.zon index 0701b4f..c3e1988 100644 --- a/build.zig.zon +++ b/build.zig.zon @@ -2,7 +2,7 @@ .name = .nulltickets, .version = "2026.3.2", .fingerprint = 0x8d7cc7c0ca874218, - .minimum_zig_version = "0.15.2", + .minimum_zig_version = "0.16.0", .dependencies = .{ .sqlite3 = .{ .path = "deps/sqlite", diff --git a/deps/sqlite/build.zig.zon b/deps/sqlite/build.zig.zon index b6914df..0d4c926 100644 --- a/deps/sqlite/build.zig.zon +++ b/deps/sqlite/build.zig.zon @@ -1,7 +1,8 @@ .{ .name = .sqlite3, .version = "3.51.2", - .minimum_zig_version = "0.15.2", + .fingerprint = 0x6edcd71193075138, + .minimum_zig_version = "0.16.0", .paths = .{ "build.zig", "build.zig.zon", diff --git a/src/api.zig b/src/api.zig index 6003202..161011e 100644 --- a/src/api.zig +++ b/src/api.zig @@ -1,4 +1,5 @@ const std = @import("std"); +const std_compat = @import("compat.zig"); const store_mod = @import("store.zig"); const Store = store_mod.Store; const ids = @import("ids.zig"); @@ -357,24 +358,24 @@ fn handleHealth(ctx: *Context) HttpResponse { }; defer ctx.store.freeHealthStats(&stats); - var buf: std.ArrayListUnmanaged(u8) = .empty; - var w = buf.writer(ctx.allocator); + var out: std.Io.Writer.Allocating = .init(ctx.allocator); + const w = &out.writer; w.writeAll("{") catch return serverError(ctx.allocator); - writeStringField(&w, ctx.allocator, "status", "ok") catch return serverError(ctx.allocator); + writeStringField(w, ctx.allocator, "status", "ok") catch return serverError(ctx.allocator); w.writeAll(",") catch return serverError(ctx.allocator); - writeStringField(&w, ctx.allocator, "version", version) catch return serverError(ctx.allocator); + writeStringField(w, ctx.allocator, "version", version) catch return serverError(ctx.allocator); w.writeAll(",\"tasks_by_stage\":[") catch return serverError(ctx.allocator); for (stats.tasks_by_stage, 0..) |sc, i| { if (i > 0) w.writeAll(",") catch return serverError(ctx.allocator); w.writeAll("{") catch return serverError(ctx.allocator); - writeStringField(&w, ctx.allocator, "stage", sc.stage) catch return serverError(ctx.allocator); + writeStringField(w, ctx.allocator, "stage", sc.stage) catch return serverError(ctx.allocator); w.print(",\"count\":{d}", .{sc.count}) catch return serverError(ctx.allocator); w.writeAll("}") catch return serverError(ctx.allocator); } w.print("],\"active_leases\":{d}", .{stats.active_leases}) catch return serverError(ctx.allocator); w.writeAll("}") catch return serverError(ctx.allocator); - return .{ .status = "200 OK", .body = buf.items }; + return .{ .status = "200 OK", .body = out.written() }; } fn handleOpenApi() HttpResponse { @@ -584,16 +585,16 @@ fn handleListPipelines(ctx: *Context) HttpResponse { const pipelines = ctx.store.listPipelines() catch return serverError(ctx.allocator); defer ctx.store.freePipelineRows(pipelines); - var buf: std.ArrayListUnmanaged(u8) = .empty; - var w = buf.writer(ctx.allocator); + var out: std.Io.Writer.Allocating = .init(ctx.allocator); + const w = &out.writer; w.writeAll("[") catch return serverError(ctx.allocator); for (pipelines, 0..) |p, i| { if (i > 0) w.writeAll(",") catch return serverError(ctx.allocator); - writePipelineJson(&w, ctx.allocator, p) catch return serverError(ctx.allocator); + writePipelineJson(w, ctx.allocator, p) catch return serverError(ctx.allocator); } w.writeAll("]") catch return serverError(ctx.allocator); - return .{ .status = "200 OK", .body = buf.items }; + return .{ .status = "200 OK", .body = out.written() }; } fn handleGetPipeline(ctx: *Context, id: []const u8) HttpResponse { @@ -602,10 +603,10 @@ fn handleGetPipeline(ctx: *Context, id: []const u8) HttpResponse { }; defer ctx.store.freePipelineRow(p); - var buf: std.ArrayListUnmanaged(u8) = .empty; - var w = buf.writer(ctx.allocator); - writePipelineJson(&w, ctx.allocator, p) catch return serverError(ctx.allocator); - return .{ .status = "200 OK", .body = buf.items }; + var out: std.Io.Writer.Allocating = .init(ctx.allocator); + const w = &out.writer; + writePipelineJson(w, ctx.allocator, p) catch return serverError(ctx.allocator); + return .{ .status = "200 OK", .body = out.written() }; } fn handleCreateTask(ctx: *Context, body: []const u8) HttpResponse { @@ -707,8 +708,8 @@ fn handleBulkCreateTasks(ctx: *Context, body: []const u8) HttpResponse { ctx.store.execSimple("COMMIT;") catch return serverError(ctx.allocator); should_rollback = false; - var buf: std.ArrayListUnmanaged(u8) = .empty; - var w = buf.writer(ctx.allocator); + var out: std.Io.Writer.Allocating = .init(ctx.allocator); + const w = &out.writer; w.writeAll("{\"ids\":[") catch return serverError(ctx.allocator); for (created_ids.items, 0..) |id, i| { if (i > 0) w.writeAll(",") catch return serverError(ctx.allocator); @@ -717,7 +718,7 @@ fn handleBulkCreateTasks(ctx: *Context, body: []const u8) HttpResponse { } w.writeAll("]}") catch return serverError(ctx.allocator); - return .{ .status = "201 Created", .body = buf.items, .status_code = 201 }; + return .{ .status = "201 Created", .body = out.written(), .status_code = 201 }; } fn handleListTasks(ctx: *Context, query: ?[]const u8) HttpResponse { @@ -743,12 +744,12 @@ fn handleListTasks(ctx: *Context, query: ?[]const u8) HttpResponse { const page = ctx.store.listTasksPage(stage, pipeline_id, cursor_created_at_ms, cursor_id, limit) catch return serverError(ctx.allocator); defer ctx.store.freeTaskPage(page); - var buf: std.ArrayListUnmanaged(u8) = .empty; - var w = buf.writer(ctx.allocator); + var out: std.Io.Writer.Allocating = .init(ctx.allocator); + const w = &out.writer; w.writeAll("{\"items\":[") catch return serverError(ctx.allocator); for (page.items, 0..) |t, i| { if (i > 0) w.writeAll(",") catch return serverError(ctx.allocator); - writeTaskJson(&w, ctx.allocator, t) catch return serverError(ctx.allocator); + writeTaskJson(w, ctx.allocator, t) catch return serverError(ctx.allocator); } w.writeAll("],\"next_cursor\":") catch return serverError(ctx.allocator); if (page.next_cursor) |next_cursor| { @@ -759,7 +760,7 @@ fn handleListTasks(ctx: *Context, query: ?[]const u8) HttpResponse { } w.writeAll("}") catch return serverError(ctx.allocator); - return .{ .status = "200 OK", .body = buf.items }; + return .{ .status = "200 OK", .body = out.written() }; } fn handleGetTask(ctx: *Context, id: []const u8) HttpResponse { @@ -768,24 +769,24 @@ fn handleGetTask(ctx: *Context, id: []const u8) HttpResponse { }; defer ctx.store.freeTaskDetails(details); - var buf: std.ArrayListUnmanaged(u8) = .empty; - var w = buf.writer(ctx.allocator); + var out: std.Io.Writer.Allocating = .init(ctx.allocator); + const w = &out.writer; w.writeAll("{") catch return serverError(ctx.allocator); - writeTaskJsonFields(&w, ctx.allocator, details.task) catch return serverError(ctx.allocator); + writeTaskJsonFields(w, ctx.allocator, details.task) catch return serverError(ctx.allocator); // Latest run if (details.latest_run) |run| { w.writeAll(",\"latest_run\":{") catch return serverError(ctx.allocator); - writeRunFields(&w, ctx.allocator, run) catch return serverError(ctx.allocator); + writeRunFields(w, ctx.allocator, run) catch return serverError(ctx.allocator); w.writeAll("}") catch return serverError(ctx.allocator); } - writeDependencyRows(&w, ctx.allocator, details.dependencies) catch return serverError(ctx.allocator); - writeAssignmentRows(&w, ctx.allocator, details.assignments) catch return serverError(ctx.allocator); - writeTaskTransitions(&w, ctx.allocator, details.available_transitions) catch return serverError(ctx.allocator); + writeDependencyRows(w, ctx.allocator, details.dependencies) catch return serverError(ctx.allocator); + writeAssignmentRows(w, ctx.allocator, details.assignments) catch return serverError(ctx.allocator); + writeTaskTransitions(w, ctx.allocator, details.available_transitions) catch return serverError(ctx.allocator); w.writeAll("}") catch return serverError(ctx.allocator); - return .{ .status = "200 OK", .body = buf.items }; + return .{ .status = "200 OK", .body = out.written() }; } fn handleClaim(ctx: *Context, body: []const u8) HttpResponse { @@ -813,21 +814,21 @@ fn handleClaim(ctx: *Context, body: []const u8) HttpResponse { if (result) |claim| { defer ctx.store.freeClaimResult(claim); - var buf: std.ArrayListUnmanaged(u8) = .empty; - var w = buf.writer(ctx.allocator); + var out: std.Io.Writer.Allocating = .init(ctx.allocator); + const w = &out.writer; w.writeAll("{\"task\":") catch return serverError(ctx.allocator); - writeTaskJson(&w, ctx.allocator, claim.task) catch return serverError(ctx.allocator); + writeTaskJson(w, ctx.allocator, claim.task) catch return serverError(ctx.allocator); w.writeAll(",\"run\":{") catch return serverError(ctx.allocator); - writeRunFields(&w, ctx.allocator, claim.run) catch return serverError(ctx.allocator); + writeRunFields(w, ctx.allocator, claim.run) catch return serverError(ctx.allocator); w.writeAll("}") catch return serverError(ctx.allocator); w.writeAll(",") catch return serverError(ctx.allocator); - writeStringField(&w, ctx.allocator, "lease_id", claim.lease_id) catch return serverError(ctx.allocator); + writeStringField(w, ctx.allocator, "lease_id", claim.lease_id) catch return serverError(ctx.allocator); w.writeAll(",") catch return serverError(ctx.allocator); - writeStringField(&w, ctx.allocator, "lease_token", claim.lease_token) catch return serverError(ctx.allocator); + writeStringField(w, ctx.allocator, "lease_token", claim.lease_token) catch return serverError(ctx.allocator); w.print(",\"expires_at_ms\":{d}", .{claim.expires_at_ms}) catch return serverError(ctx.allocator); w.writeAll("}") catch return serverError(ctx.allocator); - return .{ .status = "200 OK", .body = buf.items }; + return .{ .status = "200 OK", .body = out.written() }; } else { return .{ .status = "204 No Content", .body = "", .status_code = 204 }; } @@ -892,16 +893,16 @@ fn handleListEvents(ctx: *Context, run_id: []const u8, query: ?[]const u8) HttpR const page = ctx.store.listEventsPage(run_id, cursor_id, limit) catch return serverError(ctx.allocator); defer ctx.store.freeEventPage(page); - var buf: std.ArrayListUnmanaged(u8) = .empty; - var w = buf.writer(ctx.allocator); + var out: std.Io.Writer.Allocating = .init(ctx.allocator); + const w = &out.writer; w.writeAll("{\"items\":[") catch return serverError(ctx.allocator); for (page.items, 0..) |e, i| { if (i > 0) w.writeAll(",") catch return serverError(ctx.allocator); w.writeAll("{") catch return serverError(ctx.allocator); w.print("\"id\":{d},", .{e.id}) catch return serverError(ctx.allocator); - writeStringField(&w, ctx.allocator, "run_id", e.run_id) catch return serverError(ctx.allocator); + writeStringField(w, ctx.allocator, "run_id", e.run_id) catch return serverError(ctx.allocator); w.print(",\"ts_ms\":{d},", .{e.ts_ms}) catch return serverError(ctx.allocator); - writeStringField(&w, ctx.allocator, "kind", e.kind) catch return serverError(ctx.allocator); + writeStringField(w, ctx.allocator, "kind", e.kind) catch return serverError(ctx.allocator); w.print(",\"data\":{s}", .{e.data_json}) catch return serverError(ctx.allocator); w.writeAll("}") catch return serverError(ctx.allocator); } @@ -914,7 +915,7 @@ fn handleListEvents(ctx: *Context, run_id: []const u8, query: ?[]const u8) HttpR } w.writeAll("}") catch return serverError(ctx.allocator); - return .{ .status = "200 OK", .body = buf.items }; + return .{ .status = "200 OK", .body = out.written() }; } fn handleTransition(ctx: *Context, run_id: []const u8, body: []const u8, raw_request: []const u8) HttpResponse { @@ -955,16 +956,16 @@ fn handleTransition(ctx: *Context, run_id: []const u8, body: []const u8, raw_req }; defer ctx.store.freeTransitionResult(result); - var buf: std.ArrayListUnmanaged(u8) = .empty; - var w = buf.writer(ctx.allocator); + var out: std.Io.Writer.Allocating = .init(ctx.allocator); + const w = &out.writer; w.writeAll("{") catch return serverError(ctx.allocator); - writeStringField(&w, ctx.allocator, "previous_stage", result.previous_stage) catch return serverError(ctx.allocator); + writeStringField(w, ctx.allocator, "previous_stage", result.previous_stage) catch return serverError(ctx.allocator); w.writeAll(",") catch return serverError(ctx.allocator); - writeStringField(&w, ctx.allocator, "new_stage", result.new_stage) catch return serverError(ctx.allocator); + writeStringField(w, ctx.allocator, "new_stage", result.new_stage) catch return serverError(ctx.allocator); w.writeAll(",") catch return serverError(ctx.allocator); - writeStringField(&w, ctx.allocator, "trigger", result.trigger) catch return serverError(ctx.allocator); + writeStringField(w, ctx.allocator, "trigger", result.trigger) catch return serverError(ctx.allocator); w.writeAll("}") catch return serverError(ctx.allocator); - const resp = buf.items; + const resp = out.written(); return .{ .status = "200 OK", .body = resp }; } @@ -1040,23 +1041,23 @@ fn handleListArtifacts(ctx: *Context, query: ?[]const u8) HttpResponse { const page = ctx.store.listArtifactsPage(task_id, run_id, cursor_created_at_ms, cursor_id, limit) catch return serverError(ctx.allocator); defer ctx.store.freeArtifactPage(page); - var buf: std.ArrayListUnmanaged(u8) = .empty; - var w = buf.writer(ctx.allocator); + var out: std.Io.Writer.Allocating = .init(ctx.allocator); + const w = &out.writer; w.writeAll("{\"items\":[") catch return serverError(ctx.allocator); for (page.items, 0..) |a, i| { if (i > 0) w.writeAll(",") catch return serverError(ctx.allocator); w.writeAll("{") catch return serverError(ctx.allocator); - writeStringField(&w, ctx.allocator, "id", a.id) catch return serverError(ctx.allocator); + writeStringField(w, ctx.allocator, "id", a.id) catch return serverError(ctx.allocator); w.writeAll(",") catch return serverError(ctx.allocator); - writeNullableStringField(&w, ctx.allocator, "task_id", a.task_id) catch return serverError(ctx.allocator); + writeNullableStringField(w, ctx.allocator, "task_id", a.task_id) catch return serverError(ctx.allocator); w.writeAll(",") catch return serverError(ctx.allocator); - writeNullableStringField(&w, ctx.allocator, "run_id", a.run_id) catch return serverError(ctx.allocator); + writeNullableStringField(w, ctx.allocator, "run_id", a.run_id) catch return serverError(ctx.allocator); w.print(",\"created_at_ms\":{d},", .{a.created_at_ms}) catch return serverError(ctx.allocator); - writeStringField(&w, ctx.allocator, "kind", a.kind) catch return serverError(ctx.allocator); + writeStringField(w, ctx.allocator, "kind", a.kind) catch return serverError(ctx.allocator); w.writeAll(",") catch return serverError(ctx.allocator); - writeStringField(&w, ctx.allocator, "uri", a.uri) catch return serverError(ctx.allocator); + writeStringField(w, ctx.allocator, "uri", a.uri) catch return serverError(ctx.allocator); w.writeAll(",") catch return serverError(ctx.allocator); - writeNullableStringField(&w, ctx.allocator, "sha256", a.sha256_hex) catch return serverError(ctx.allocator); + writeNullableStringField(w, ctx.allocator, "sha256", a.sha256_hex) catch return serverError(ctx.allocator); w.writeAll(",") catch return serverError(ctx.allocator); if (a.size_bytes) |sb| { w.print("\"size_bytes\":{d}", .{sb}) catch return serverError(ctx.allocator); @@ -1075,7 +1076,7 @@ fn handleListArtifacts(ctx: *Context, query: ?[]const u8) HttpResponse { } w.writeAll("}") catch return serverError(ctx.allocator); - return .{ .status = "200 OK", .body = buf.items }; + return .{ .status = "200 OK", .body = out.written() }; } fn handleAddTaskDependency(ctx: *Context, task_id: []const u8, body: []const u8) HttpResponse { @@ -1103,18 +1104,18 @@ fn handleListTaskDependencies(ctx: *Context, task_id: []const u8) HttpResponse { const deps = ctx.store.listTaskDependencies(task_id) catch return serverError(ctx.allocator); defer ctx.store.freeDependencyRows(deps); - var buf: std.ArrayListUnmanaged(u8) = .empty; - var w = buf.writer(ctx.allocator); + var out: std.Io.Writer.Allocating = .init(ctx.allocator); + const w = &out.writer; w.writeAll("[") catch return serverError(ctx.allocator); for (deps, 0..) |dep, i| { if (i > 0) w.writeAll(",") catch return serverError(ctx.allocator); w.writeAll("{") catch return serverError(ctx.allocator); - writeStringField(&w, ctx.allocator, "depends_on_task_id", dep.depends_on_task_id) catch return serverError(ctx.allocator); + writeStringField(w, ctx.allocator, "depends_on_task_id", dep.depends_on_task_id) catch return serverError(ctx.allocator); w.print(",\"resolved\":{s}", .{if (dep.resolved) "true" else "false"}) catch return serverError(ctx.allocator); w.writeAll("}") catch return serverError(ctx.allocator); } w.writeAll("]") catch return serverError(ctx.allocator); - return .{ .status = "200 OK", .body = buf.items }; + return .{ .status = "200 OK", .body = out.written() }; } fn handleAssignTask(ctx: *Context, task_id: []const u8, body: []const u8) HttpResponse { @@ -1140,17 +1141,17 @@ fn handleListTaskAssignments(ctx: *Context, task_id: []const u8) HttpResponse { const rows = ctx.store.listTaskAssignments(task_id) catch return serverError(ctx.allocator); defer ctx.store.freeAssignmentRows(rows); - var buf: std.ArrayListUnmanaged(u8) = .empty; - var w = buf.writer(ctx.allocator); + var out: std.Io.Writer.Allocating = .init(ctx.allocator); + const w = &out.writer; w.writeAll("[") catch return serverError(ctx.allocator); for (rows, 0..) |row, i| { if (i > 0) w.writeAll(",") catch return serverError(ctx.allocator); w.writeAll("{") catch return serverError(ctx.allocator); - writeStringField(&w, ctx.allocator, "task_id", row.task_id) catch return serverError(ctx.allocator); + writeStringField(w, ctx.allocator, "task_id", row.task_id) catch return serverError(ctx.allocator); w.writeAll(",") catch return serverError(ctx.allocator); - writeStringField(&w, ctx.allocator, "agent_id", row.agent_id) catch return serverError(ctx.allocator); + writeStringField(w, ctx.allocator, "agent_id", row.agent_id) catch return serverError(ctx.allocator); w.writeAll(",") catch return serverError(ctx.allocator); - writeNullableStringField(&w, ctx.allocator, "assigned_by", row.assigned_by) catch return serverError(ctx.allocator); + writeNullableStringField(w, ctx.allocator, "assigned_by", row.assigned_by) catch return serverError(ctx.allocator); w.print(",\"active\":{s},\"created_at_ms\":{d},\"updated_at_ms\":{d}", .{ if (row.active) "true" else "false", row.created_at_ms, @@ -1159,7 +1160,7 @@ fn handleListTaskAssignments(ctx: *Context, task_id: []const u8) HttpResponse { w.writeAll("}") catch return serverError(ctx.allocator); } w.writeAll("]") catch return serverError(ctx.allocator); - return .{ .status = "200 OK", .body = buf.items }; + return .{ .status = "200 OK", .body = out.written() }; } fn handleUnassignTask(ctx: *Context, task_id: []const u8, agent_id: []const u8) HttpResponse { @@ -1185,13 +1186,13 @@ fn handleQueueOps(ctx: *Context, query: ?[]const u8) HttpResponse { const rows = ctx.store.getQueueRoleStats(near_expiry_ms, stuck_ms) catch return serverError(ctx.allocator); defer ctx.store.freeQueueRoleStats(rows); - var buf: std.ArrayListUnmanaged(u8) = .empty; - var w = buf.writer(ctx.allocator); + var out: std.Io.Writer.Allocating = .init(ctx.allocator); + const w = &out.writer; w.writeAll("{\"roles\":[") catch return serverError(ctx.allocator); for (rows, 0..) |row, i| { if (i > 0) w.writeAll(",") catch return serverError(ctx.allocator); w.writeAll("{") catch return serverError(ctx.allocator); - writeStringField(&w, ctx.allocator, "role", row.role) catch return serverError(ctx.allocator); + writeStringField(w, ctx.allocator, "role", row.role) catch return serverError(ctx.allocator); w.print(",\"claimable_count\":{d}", .{row.claimable_count}) catch return serverError(ctx.allocator); w.writeAll(",\"oldest_claimable_age_ms\":") catch return serverError(ctx.allocator); if (row.oldest_claimable_age_ms) |age| { @@ -1207,9 +1208,9 @@ fn handleQueueOps(ctx: *Context, query: ?[]const u8) HttpResponse { w.writeAll("}") catch return serverError(ctx.allocator); } w.writeAll("],") catch return serverError(ctx.allocator); - w.print("\"generated_at_ms\":{d}", .{std.time.milliTimestamp()}) catch return serverError(ctx.allocator); + w.print("\"generated_at_ms\":{d}", .{std_compat.time.milliTimestamp()}) catch return serverError(ctx.allocator); w.writeAll("}") catch return serverError(ctx.allocator); - return .{ .status = "200 OK", .body = buf.items }; + return .{ .status = "200 OK", .body = out.written() }; } // ===== JSON helpers ===== @@ -1377,16 +1378,16 @@ fn handleStorePut(ctx: *Context, namespace: []const u8, key: []const u8, body: [ fn handleStoreGet(ctx: *Context, namespace: []const u8, key: []const u8) HttpResponse { const entry = ctx.store.storeGet(ctx.allocator, namespace, key) catch return serverError(ctx.allocator); if (entry) |e| { - var buf: std.ArrayListUnmanaged(u8) = .empty; - var w = buf.writer(ctx.allocator); + var out: std.Io.Writer.Allocating = .init(ctx.allocator); + const w = &out.writer; w.writeAll("{") catch return serverError(ctx.allocator); - writeStringField(&w, ctx.allocator, "namespace", e.namespace) catch return serverError(ctx.allocator); + writeStringField(w, ctx.allocator, "namespace", e.namespace) catch return serverError(ctx.allocator); w.writeAll(",") catch return serverError(ctx.allocator); - writeStringField(&w, ctx.allocator, "key", e.key) catch return serverError(ctx.allocator); + writeStringField(w, ctx.allocator, "key", e.key) catch return serverError(ctx.allocator); w.print(",\"value\":{s}", .{e.value_json}) catch return serverError(ctx.allocator); w.print(",\"created_at_ms\":{d},\"updated_at_ms\":{d}", .{ e.created_at_ms, e.updated_at_ms }) catch return serverError(ctx.allocator); w.writeAll("}") catch return serverError(ctx.allocator); - return .{ .status = "200 OK", .body = buf.items }; + return .{ .status = "200 OK", .body = out.written() }; } else { return respondError(ctx.allocator, 404, "not_found", "Key not found"); } @@ -1395,21 +1396,21 @@ fn handleStoreGet(ctx: *Context, namespace: []const u8, key: []const u8) HttpRes fn handleStoreList(ctx: *Context, namespace: []const u8) HttpResponse { const entries = ctx.store.storeList(ctx.allocator, namespace) catch return serverError(ctx.allocator); - var buf: std.ArrayListUnmanaged(u8) = .empty; - var w = buf.writer(ctx.allocator); + var out: std.Io.Writer.Allocating = .init(ctx.allocator); + const w = &out.writer; w.writeAll("[") catch return serverError(ctx.allocator); for (entries, 0..) |e, i| { if (i > 0) w.writeAll(",") catch return serverError(ctx.allocator); w.writeAll("{") catch return serverError(ctx.allocator); - writeStringField(&w, ctx.allocator, "namespace", e.namespace) catch return serverError(ctx.allocator); + writeStringField(w, ctx.allocator, "namespace", e.namespace) catch return serverError(ctx.allocator); w.writeAll(",") catch return serverError(ctx.allocator); - writeStringField(&w, ctx.allocator, "key", e.key) catch return serverError(ctx.allocator); + writeStringField(w, ctx.allocator, "key", e.key) catch return serverError(ctx.allocator); w.print(",\"value\":{s}", .{e.value_json}) catch return serverError(ctx.allocator); w.print(",\"created_at_ms\":{d},\"updated_at_ms\":{d}", .{ e.created_at_ms, e.updated_at_ms }) catch return serverError(ctx.allocator); w.writeAll("}") catch return serverError(ctx.allocator); } w.writeAll("]") catch return serverError(ctx.allocator); - return .{ .status = "200 OK", .body = buf.items }; + return .{ .status = "200 OK", .body = out.written() }; } fn handleStoreDelete(ctx: *Context, namespace: []const u8, key: []const u8) HttpResponse { @@ -1425,8 +1426,8 @@ fn handleStoreDeleteNamespace(ctx: *Context, namespace: []const u8) HttpResponse fn sanitizeFts5Query(allocator: std.mem.Allocator, raw: []const u8) ?[]const u8 { // Split on whitespace, wrap each token in double quotes (escaping internal quotes). // This turns arbitrary user input into safe FTS5 literal phrases. - var buf: std.ArrayListUnmanaged(u8) = .empty; - var w = buf.writer(allocator); + var out: std.Io.Writer.Allocating = .init(allocator); + const w = &out.writer; var first = true; var it = std.mem.tokenizeAny(u8, raw, " \t\n\r"); while (it.next()) |token| { @@ -1443,7 +1444,7 @@ fn sanitizeFts5Query(allocator: std.mem.Allocator, raw: []const u8) ?[]const u8 w.writeAll("\"") catch return null; } if (first) return null; // all whitespace / empty - return buf.items; + return out.written(); } fn handleStoreSearch(ctx: *Context, query: ?[]const u8) HttpResponse { @@ -1465,21 +1466,21 @@ fn handleStoreSearch(ctx: *Context, query: ?[]const u8) HttpResponse { const entries = ctx.store.storeSearch(ctx.allocator, namespace, sanitized, limit, filter_path, filter_value) catch return serverError(ctx.allocator); - var buf: std.ArrayListUnmanaged(u8) = .empty; - var w = buf.writer(ctx.allocator); + var out: std.Io.Writer.Allocating = .init(ctx.allocator); + const w = &out.writer; w.writeAll("[") catch return serverError(ctx.allocator); for (entries, 0..) |e, i| { if (i > 0) w.writeAll(",") catch return serverError(ctx.allocator); w.writeAll("{") catch return serverError(ctx.allocator); - writeStringField(&w, ctx.allocator, "namespace", e.namespace) catch return serverError(ctx.allocator); + writeStringField(w, ctx.allocator, "namespace", e.namespace) catch return serverError(ctx.allocator); w.writeAll(",") catch return serverError(ctx.allocator); - writeStringField(&w, ctx.allocator, "key", e.key) catch return serverError(ctx.allocator); + writeStringField(w, ctx.allocator, "key", e.key) catch return serverError(ctx.allocator); w.print(",\"value\":{s}", .{e.value_json}) catch return serverError(ctx.allocator); w.print(",\"created_at_ms\":{d},\"updated_at_ms\":{d}", .{ e.created_at_ms, e.updated_at_ms }) catch return serverError(ctx.allocator); w.writeAll("}") catch return serverError(ctx.allocator); } w.writeAll("]") catch return serverError(ctx.allocator); - return .{ .status = "200 OK", .body = buf.items }; + return .{ .status = "200 OK", .body = out.written() }; } fn jsonStringify(allocator: std.mem.Allocator, value: std.json.Value) ![]const u8 { @@ -1652,7 +1653,7 @@ pub fn extractHeader(raw: []const u8, name: []const u8) ?[]const u8 { if (std.mem.indexOfScalar(u8, line, ':')) |colon| { const hdr_key = line[0..colon]; if (std.ascii.eqlIgnoreCase(hdr_key, name)) { - return std.mem.trimLeft(u8, line[colon + 1 ..], " "); + return std_compat.mem.trimLeft(u8, line[colon + 1 ..], " "); } } } diff --git a/src/compat.zig b/src/compat.zig new file mode 100644 index 0000000..8c6339a --- /dev/null +++ b/src/compat.zig @@ -0,0 +1,198 @@ +const std = @import("std"); +const builtin = @import("builtin"); +const shared = @import("compat/shared.zig"); +const Allocator = std.mem.Allocator; + +pub fn io() std.Io { + return shared.io(); +} + +pub fn initProcess(init: std.process.Init) void { + shared.initProcess(init); +} + +pub fn initProcessMinimal(init: std.process.Init.Minimal) void { + shared.initProcessMinimal(init); +} + +pub const fs = @import("compat/fs.zig"); + +pub const process = struct { + pub const EnvMap = std.process.Environ.Map; + pub const GetEnvVarOwnedError = error{ + EnvironmentVariableNotFound, + } || Allocator.Error || error{ InvalidWtf8, Unexpected }; + + pub fn getEnvVarOwned(allocator: Allocator, name: []const u8) GetEnvVarOwnedError![]u8 { + return shared.environ().getAlloc(allocator, name) catch |err| switch (err) { + error.EnvironmentVariableMissing => error.EnvironmentVariableNotFound, + else => |e| e, + }; + } + + pub fn argsAlloc(allocator: Allocator) ![]const [:0]const u8 { + return shared.argsAlloc(allocator); + } + + pub fn argsFree(allocator: Allocator, args: []const [:0]const u8) void { + shared.argsFree(allocator, args); + } + + pub const Child = struct { + allocator: Allocator, + argv: []const []const u8, + env_map: ?*const EnvMap = null, + cwd: ?[]const u8 = null, + stdin_behavior: StdIo = .Inherit, + stdout_behavior: StdIo = .Inherit, + stderr_behavior: StdIo = .Inherit, + request_resource_usage_statistics: bool = false, + pgid: ?std.posix.pid_t = null, + create_no_window: bool = true, + id: Id = undefined, + thread_handle: if (builtin.os.tag == .windows) std.os.windows.HANDLE else void = if (builtin.os.tag == .windows) undefined else {}, + stdin: ?fs.File = null, + stdout: ?fs.File = null, + stderr: ?fs.File = null, + term: ?Term = null, + + pub const Id = std.process.Child.Id; + pub const Term = std.process.Child.Term; + pub const StdIo = enum { Inherit, Ignore, Pipe, Close }; + + pub fn init(argv: []const []const u8, allocator: Allocator) Child { + return .{ + .allocator = allocator, + .argv = argv, + }; + } + + fn mapStdIo(kind: StdIo) std.process.SpawnOptions.StdIo { + return switch (kind) { + .Inherit => .inherit, + .Ignore => .ignore, + .Pipe => .pipe, + .Close => .close, + }; + } + + fn spawnCwd(self: *const Child) std.process.Child.Cwd { + return if (self.cwd) |path_value| .{ .path = path_value } else .inherit; + } + + fn toInner(self: *const Child) std.process.Child { + return .{ + .id = self.id, + .thread_handle = self.thread_handle, + .stdin = if (self.stdin) |file| file.toInner() else null, + .stdout = if (self.stdout) |file| file.toInner() else null, + .stderr = if (self.stderr) |file| file.toInner() else null, + .resource_usage_statistics = .{}, + .request_resource_usage_statistics = self.request_resource_usage_statistics, + }; + } + + fn syncFromInner(self: *Child, inner: std.process.Child) void { + self.stdin = if (inner.stdin) |file| fs.File.wrap(file) else null; + self.stdout = if (inner.stdout) |file| fs.File.wrap(file) else null; + self.stderr = if (inner.stderr) |file| fs.File.wrap(file) else null; + } + + pub fn spawn(self: *Child) !void { + const inner = try std.process.spawn(io(), .{ + .argv = self.argv, + .cwd = self.spawnCwd(), + .environ_map = self.env_map, + .stdin = mapStdIo(self.stdin_behavior), + .stdout = mapStdIo(self.stdout_behavior), + .stderr = mapStdIo(self.stderr_behavior), + .request_resource_usage_statistics = self.request_resource_usage_statistics, + .pgid = self.pgid, + .create_no_window = self.create_no_window, + }); + self.id = inner.id.?; + self.thread_handle = inner.thread_handle; + self.syncFromInner(inner); + self.term = null; + } + + pub fn wait(self: *Child) !Term { + if (self.term) |term| return term; + + var inner = self.toInner(); + const term = try inner.wait(io()); + self.syncFromInner(inner); + self.term = term; + return term; + } + + pub fn kill(self: *Child) !Term { + if (self.term) |term| return term; + + var inner = self.toInner(); + inner.kill(io()); + self.syncFromInner(inner); + + const term: Term = if (builtin.os.tag == .windows) + .{ .exited = 1 } + else + .{ .signal = std.posix.SIG.KILL }; + + self.term = term; + return term; + } + }; +}; + +pub const mem = struct { + pub fn trimLeft(comptime T: type, slice: []const T, values_to_strip: []const T) []const T { + return std.mem.trimStart(T, slice, values_to_strip); + } + + pub fn trimRight(comptime T: type, slice: []const T, values_to_strip: []const T) []const T { + return std.mem.trimEnd(T, slice, values_to_strip); + } +}; + +pub const thread = struct { + pub fn sleep(nanoseconds: u64) void { + std.Io.sleep(io(), .fromNanoseconds(@intCast(nanoseconds)), .awake) catch {}; + } +}; + +pub const crypto = struct { + pub const random = struct { + pub fn bytes(buffer: []u8) void { + std.Io.randomSecure(io(), buffer) catch std.Io.random(io(), buffer); + } + }; +}; + +pub const time = struct { + fn nowNanoseconds() i128 { + return switch (builtin.os.tag) { + .windows => blk: { + const epoch_ns = std.time.epoch.windows * std.time.ns_per_s; + break :blk @as(i128, std.os.windows.ntdll.RtlGetSystemTimePrecise()) * 100 + epoch_ns; + }, + .wasi => blk: { + var ts: std.os.wasi.timestamp_t = undefined; + if (std.os.wasi.clock_time_get(.REALTIME, 1, &ts) == .SUCCESS) { + break :blk @intCast(ts); + } + break :blk 0; + }, + else => blk: { + var ts: std.posix.timespec = undefined; + switch (std.posix.errno(std.posix.system.clock_gettime(.REALTIME, &ts))) { + .SUCCESS => break :blk @as(i128, ts.sec) * std.time.ns_per_s + ts.nsec, + else => break :blk 0, + } + }, + }; + } + + pub fn milliTimestamp() i64 { + return @intCast(@divTrunc(nowNanoseconds(), std.time.ns_per_ms)); + } +}; diff --git a/src/compat/fs.zig b/src/compat/fs.zig new file mode 100644 index 0000000..2be980d --- /dev/null +++ b/src/compat/fs.zig @@ -0,0 +1,307 @@ +const std = @import("std"); +const shared = @import("shared.zig"); + +const Io = std.Io; +const Allocator = std.mem.Allocator; + +pub const path = struct { + pub const basename = std.fs.path.basename; + pub const delimiter = std.fs.path.delimiter; + pub const dirname = std.fs.path.dirname; + pub const extension = std.fs.path.extension; + pub const isAbsolute = std.fs.path.isAbsolute; + pub const isSep = std.fs.path.isSep; + pub const join = std.fs.path.join; + pub const sep = std.fs.path.sep; + pub const sep_str = std.fs.path.sep_str; +}; + +pub const File = struct { + handle: Io.File.Handle, + flags: Io.File.Flags, + + pub const Reader = Io.File.Reader; + pub const Writer = Io.File.Writer; + pub const Mode = if (@import("builtin").os.tag == .windows) u32 else std.posix.mode_t; + pub const Stat = struct { + inode: Io.File.INode, + nlink: Io.File.NLink, + size: u64, + mode: Mode, + kind: Io.File.Kind, + atime: ?i128, + mtime: i128, + ctime: i128, + block_size: Io.File.BlockSize, + }; + + pub fn wrap(inner: Io.File) File { + return .{ + .handle = inner.handle, + .flags = inner.flags, + }; + } + + pub fn toInner(self: File) Io.File { + return .{ + .handle = self.handle, + .flags = self.flags, + }; + } + + fn convertStat(inner: Io.File.Stat) Stat { + return .{ + .inode = inner.inode, + .nlink = inner.nlink, + .size = inner.size, + .mode = if (@hasDecl(@TypeOf(inner.permissions), "toMode")) inner.permissions.toMode() else 0, + .kind = inner.kind, + .atime = if (inner.atime) |ts| ts.nanoseconds else null, + .mtime = inner.mtime.nanoseconds, + .ctime = inner.ctime.nanoseconds, + .block_size = inner.block_size, + }; + } + + pub fn stdout() File { + return wrap(Io.File.stdout()); + } + + pub fn stderr() File { + return wrap(Io.File.stderr()); + } + + pub fn stdin() File { + return wrap(Io.File.stdin()); + } + + pub fn close(self: File) void { + self.toInner().close(shared.io()); + } + + pub fn stat(self: File) Io.File.StatError!Stat { + return convertStat(try self.toInner().stat(shared.io())); + } + + pub fn seekTo(self: File, offset: u64) Io.File.SeekError!void { + try shared.io().vtable.fileSeekTo(shared.io().userdata, self.toInner(), offset); + } + + pub fn seekFromEnd(self: File, offset: i64) !void { + const file_stat = try self.stat(); + const end_offset = @as(i128, @intCast(file_stat.size)) + offset; + if (end_offset < 0) return error.Unseekable; + try self.seekTo(@intCast(end_offset)); + } + + pub fn writer(self: File, buffer: []u8) Writer { + return self.toInner().writer(shared.io(), buffer); + } + + pub fn reader(self: File, buffer: []u8) Reader { + return self.toInner().reader(shared.io(), buffer); + } + + pub fn read(self: File, buffer: []u8) Io.File.ReadStreamingError!usize { + return self.toInner().readStreaming(shared.io(), &.{buffer}) catch |err| switch (err) { + error.EndOfStream => 0, + else => |e| return e, + }; + } + + pub fn readAll(self: File, buffer: []u8) Io.File.ReadStreamingError!usize { + var filled: usize = 0; + while (filled < buffer.len) { + const amt = try self.read(buffer[filled..]); + if (amt == 0) break; + filled += amt; + } + return filled; + } + + pub fn writeAll(self: File, bytes: []const u8) Io.File.Writer.Error!void { + try self.toInner().writeStreamingAll(shared.io(), bytes); + } + + pub fn readToEndAlloc(self: File, allocator: Allocator, max_bytes: usize) ![]u8 { + var stream_buf: [4096]u8 = undefined; + var file_reader = self.toInner().readerStreaming(shared.io(), &stream_buf); + return try file_reader.interface.allocRemaining(allocator, .limited(max_bytes)); + } +}; + +pub const Dir = struct { + handle: Io.Dir.Handle, + + pub const OpenDirOptions = Io.Dir.OpenOptions; + pub const OpenFileOptions = Io.Dir.OpenFileOptions; + pub const CreateFileOptions = Io.Dir.CreateFileOptions; + pub const WriteFileOptions = Io.Dir.WriteFileOptions; + pub const AccessOptions = Io.Dir.AccessOptions; + pub const CopyFileOptions = Io.Dir.CopyFileOptions; + pub const SymLinkFlags = Io.Dir.SymLinkFlags; + pub const Entry = Io.Dir.Entry; + pub const Iterator = struct { + inner: Io.Dir.Iterator, + + pub fn next(self: *Iterator) Io.Dir.Iterator.Error!?Entry { + return self.inner.next(shared.io()); + } + }; + + pub fn wrap(inner: Io.Dir) Dir { + return .{ .handle = inner.handle }; + } + + fn toInner(self: Dir) Io.Dir { + return .{ .handle = self.handle }; + } + + pub fn cwd() Dir { + return wrap(Io.Dir.cwd()); + } + + pub fn close(self: Dir) void { + self.toInner().close(shared.io()); + } + + pub fn iterate(self: Dir) Iterator { + return .{ .inner = self.toInner().iterate() }; + } + + pub fn openDir(self: Dir, sub_path: []const u8, options: OpenDirOptions) Io.Dir.OpenError!Dir { + return wrap(try self.toInner().openDir(shared.io(), sub_path, options)); + } + + pub fn openFile(self: Dir, sub_path: []const u8, options: OpenFileOptions) Io.File.OpenError!File { + return File.wrap(try self.toInner().openFile(shared.io(), sub_path, options)); + } + + pub fn createFile(self: Dir, sub_path: []const u8, options: CreateFileOptions) Io.File.OpenError!File { + return File.wrap(try self.toInner().createFile(shared.io(), sub_path, options)); + } + + pub fn writeFile(self: Dir, options: WriteFileOptions) Io.Dir.WriteFileError!void { + try self.toInner().writeFile(shared.io(), options); + } + + pub fn readFileAlloc(self: Dir, allocator: Allocator, sub_path: []const u8, max_bytes: usize) ![]u8 { + return try self.toInner().readFileAlloc(shared.io(), sub_path, allocator, .limited(max_bytes)); + } + + pub fn access(self: Dir, sub_path: []const u8, options: AccessOptions) Io.Dir.AccessError!void { + try self.toInner().access(shared.io(), sub_path, options); + } + + pub fn makeDir(self: Dir, sub_path: []const u8) Io.Dir.CreateDirError!void { + try self.toInner().createDir(shared.io(), sub_path, .default_dir); + } + + pub fn deleteFile(self: Dir, sub_path: []const u8) Io.Dir.DeleteFileError!void { + try self.toInner().deleteFile(shared.io(), sub_path); + } + + pub fn deleteTree(self: Dir, sub_path: []const u8) Io.Dir.DeleteTreeError!void { + try self.toInner().deleteTree(shared.io(), sub_path); + } + + pub fn rename(self: Dir, old_sub_path: []const u8, new_sub_path: []const u8) Io.Dir.RenameError!void { + try self.toInner().rename(old_sub_path, self.toInner(), new_sub_path, shared.io()); + } + + pub fn realpathAlloc(self: Dir, allocator: Allocator, sub_path: []const u8) Io.Dir.RealPathFileAllocError![]u8 { + const path_z = try self.toInner().realPathFileAlloc(shared.io(), sub_path, allocator); + defer allocator.free(path_z); + return try allocator.dupe(u8, path_z); + } + + pub fn statFile(self: Dir, sub_path: []const u8) !File.Stat { + return File.convertStat(try self.toInner().statFile(shared.io(), sub_path, .{})); + } + + pub fn makePath(self: Dir, sub_path: []const u8) !void { + if (sub_path.len == 0) return; + if (path.isAbsolute(sub_path)) { + makeDirAbsolute(sub_path) catch |err| switch (err) { + error.PathAlreadyExists => return, + else => |e| return e, + }; + return; + } + + var cursor = self; + var opened: ?Dir = null; + defer if (opened) |dir| dir.close(); + + var index: usize = 0; + while (index < sub_path.len) { + while (index < sub_path.len and path.isSep(sub_path[index])) : (index += 1) {} + if (index >= sub_path.len) break; + + const start = index; + while (index < sub_path.len and !path.isSep(sub_path[index])) : (index += 1) {} + const component = sub_path[start..index]; + if (component.len == 0 or std.mem.eql(u8, component, ".")) continue; + if (std.mem.eql(u8, component, "..")) return error.BadPathName; + + cursor.makeDir(component) catch |err| switch (err) { + error.PathAlreadyExists => {}, + else => |e| return e, + }; + + const next = try cursor.openDir(component, .{}); + if (opened) |dir| dir.close(); + opened = next; + cursor = next; + } + } +}; + +pub fn cwd() Dir { + return Dir.cwd(); +} + +pub fn openDirAbsolute(absolute_path: []const u8, options: Dir.OpenDirOptions) Io.Dir.OpenError!Dir { + return Dir.wrap(try Io.Dir.openDirAbsolute(shared.io(), absolute_path, options)); +} + +pub fn openFileAbsolute(absolute_path: []const u8, options: Dir.OpenFileOptions) Io.File.OpenError!File { + return File.wrap(try Io.Dir.openFileAbsolute(shared.io(), absolute_path, options)); +} + +pub fn createFileAbsolute(absolute_path: []const u8, options: Dir.CreateFileOptions) Io.File.OpenError!File { + return File.wrap(try Io.Dir.createFileAbsolute(shared.io(), absolute_path, options)); +} + +pub fn accessAbsolute(absolute_path: []const u8, options: Dir.AccessOptions) Io.Dir.AccessError!void { + try Io.Dir.accessAbsolute(shared.io(), absolute_path, options); +} + +pub fn makeDirAbsolute(absolute_path: []const u8) Io.Dir.CreateDirError!void { + try Io.Dir.createDirAbsolute(shared.io(), absolute_path, .default_dir); +} + +pub fn deleteFileAbsolute(absolute_path: []const u8) Io.Dir.DeleteFileError!void { + try Io.Dir.deleteFileAbsolute(shared.io(), absolute_path); +} + +pub fn deleteTreeAbsolute(absolute_path: []const u8) (Io.Dir.DeleteTreeError || error{FileNotFound})!void { + const dir_path = path.dirname(absolute_path) orelse return error.FileNotFound; + const base_name = path.basename(absolute_path); + var dir = try openDirAbsolute(dir_path, .{}); + defer dir.close(); + try dir.deleteTree(base_name); +} + +pub fn renameAbsolute(old_path: []const u8, new_path: []const u8) Io.Dir.RenameError!void { + try Io.Dir.renameAbsolute(old_path, new_path, shared.io()); +} + +pub fn realpathAlloc(allocator: Allocator, file_path: []const u8) ![]u8 { + if (path.isAbsolute(file_path)) { + const path_z = try Io.Dir.realPathFileAbsoluteAlloc(shared.io(), file_path, allocator); + defer allocator.free(path_z); + return try allocator.dupe(u8, path_z); + } + return try cwd().realpathAlloc(allocator, file_path); +} diff --git a/src/compat/shared.zig b/src/compat/shared.zig new file mode 100644 index 0000000..8c7082e --- /dev/null +++ b/src/compat/shared.zig @@ -0,0 +1,69 @@ +const std = @import("std"); +const builtin = @import("builtin"); +const Allocator = std.mem.Allocator; + +pub const Io = std.Io; + +var fallback_threaded: Io.Threaded = .init_single_threaded; +var process_io: ?Io = null; +var process_args: ?std.process.Args = null; +var process_environ: ?std.process.Environ = null; + +pub fn initProcess(init: std.process.Init) void { + process_io = init.io; + process_args = init.minimal.args; + process_environ = init.minimal.environ; +} + +pub fn initProcessMinimal(init: std.process.Init.Minimal) void { + process_args = init.args; + process_environ = init.environ; +} + +pub fn io() Io { + if (builtin.is_test) return std.testing.io; + if (process_io) |current| return current; + return fallback_threaded.io(); +} + +pub fn environ() std.process.Environ { + if (process_environ) |env| return env; + return switch (builtin.os.tag) { + .windows, .freestanding, .other => .{ .block = .global }, + .wasi, .emscripten => if (builtin.link_libc) blk: { + const c_environ = std.c.environ; + var env_count: usize = 0; + while (c_environ[env_count] != null) : (env_count += 1) {} + break :blk .{ .block = .{ .slice = c_environ[0..env_count :null] } }; + } else .{ .block = .global }, + else => blk: { + const c_environ = std.c.environ; + var env_count: usize = 0; + while (c_environ[env_count] != null) : (env_count += 1) {} + break :blk .{ .block = .{ .slice = c_environ[0..env_count :null] } }; + }, + }; +} + +pub fn argsAlloc(allocator: Allocator) ![]const [:0]const u8 { + const args = process_args orelse return error.MissingProcessContext; + var iter = try args.iterateAllocator(allocator); + defer iter.deinit(); + + var list: std.ArrayList([:0]const u8) = .empty; + errdefer { + for (list.items) |arg| allocator.free(arg); + list.deinit(allocator); + } + + while (iter.next()) |arg| { + try list.append(allocator, try allocator.dupeZ(u8, arg)); + } + + return try list.toOwnedSlice(allocator); +} + +pub fn argsFree(allocator: Allocator, args: []const [:0]const u8) void { + for (args) |arg| allocator.free(arg); + allocator.free(args); +} diff --git a/src/config.zig b/src/config.zig index 0602add..e124be4 100644 --- a/src/config.zig +++ b/src/config.zig @@ -1,5 +1,6 @@ const std = @import("std"); const builtin = @import("builtin"); +const std_compat = @import("compat.zig"); pub const home_env_var = "NULLTICKETS_HOME"; pub const home_dir_name = ".nulltickets"; @@ -19,7 +20,7 @@ pub fn resolveConfigPath(allocator: std.mem.Allocator, override_path: ?[]const u } pub fn resolveHomeDir(allocator: std.mem.Allocator) ![]const u8 { - if (std.process.getEnvVarOwned(allocator, home_env_var)) |env_home| { + if (std_compat.process.getEnvVarOwned(allocator, home_env_var)) |env_home| { return env_home; } else |err| switch (err) { error.EnvironmentVariableNotFound => {}, @@ -35,7 +36,7 @@ pub fn resolveHomeDir(allocator: std.mem.Allocator) ![]const u8 { /// The caller should provide an arena allocator since returned slices may point /// to parser-owned allocations. pub fn loadFromFile(allocator: std.mem.Allocator, path: []const u8) !Config { - const file = std.fs.cwd().openFile(path, .{}) catch |err| { + const file = std_compat.fs.cwd().openFile(path, .{}) catch |err| { if (err == error.FileNotFound) return Config{}; return err; }; @@ -58,10 +59,10 @@ fn resolveRelativePath(allocator: std.mem.Allocator, config_path: []const u8, va } fn getHomeDirOwned(allocator: std.mem.Allocator) ![]u8 { - return std.process.getEnvVarOwned(allocator, "HOME") catch |err| switch (err) { + return std_compat.process.getEnvVarOwned(allocator, "HOME") catch |err| switch (err) { error.EnvironmentVariableNotFound => { if (builtin.os.tag == .windows) { - return std.process.getEnvVarOwned(allocator, "USERPROFILE") catch error.HomeNotSet; + return std_compat.process.getEnvVarOwned(allocator, "USERPROFILE") catch error.HomeNotSet; } return error.HomeNotSet; }, diff --git a/src/export_manifest.zig b/src/export_manifest.zig index f17c063..07578c8 100644 --- a/src/export_manifest.zig +++ b/src/export_manifest.zig @@ -1,4 +1,5 @@ const std = @import("std"); +const std_compat = @import("compat.zig"); pub fn run() !void { const manifest = @@ -19,7 +20,7 @@ pub fn run() !void { \\ "aarch64-windows": { "asset": "nulltickets-windows-aarch64.exe", "binary": "nulltickets.exe" } \\ }, \\ "build_from_source": { - \\ "zig_version": "0.15.2", + \\ "zig_version": "0.16.0", \\ "command": "zig build -Doptimize=ReleaseSmall", \\ "output": "zig-out/bin/nulltickets" \\ }, @@ -35,7 +36,7 @@ pub fn run() !void { \\ "connects_to": [] \\} ; - const stdout = std.fs.File.stdout(); + const stdout = std_compat.fs.File.stdout(); try stdout.writeAll(manifest); try stdout.writeAll("\n"); } diff --git a/src/from_json.zig b/src/from_json.zig index 623e5dc..9e48ccf 100644 --- a/src/from_json.zig +++ b/src/from_json.zig @@ -1,5 +1,6 @@ const std = @import("std"); const builtin = @import("builtin"); +const std_compat = @import("compat.zig"); const config_mod = @import("config.zig"); pub fn run(allocator: std.mem.Allocator, json_str: []const u8) !void { @@ -38,7 +39,7 @@ pub fn run(allocator: std.mem.Allocator, json_str: []const u8) !void { try writeFileAtHome(allocator, home, "config.json", config_json); if (!builtin.is_test) { - const stdout = std.fs.File.stdout(); + const stdout = std_compat.fs.File.stdout(); try stdout.writeAll("{\"status\":\"ok\"}\n"); } } @@ -59,17 +60,14 @@ fn getU16(obj: std.json.ObjectMap, key: []const u8) ?u16 { fn ensureHome(home: []const u8) !void { if (std.fs.path.isAbsolute(home)) { - std.fs.makeDirAbsolute(home) catch |err| switch (err) { + std_compat.fs.makeDirAbsolute(home) catch |err| switch (err) { error.PathAlreadyExists => {}, else => return err, }; return; } - std.fs.cwd().makePath(home) catch |err| switch (err) { - error.PathAlreadyExists => {}, - else => return err, - }; + try std_compat.fs.cwd().makePath(home); } fn writeFileAtHome(allocator: std.mem.Allocator, home: []const u8, name: []const u8, contents: []const u8) !void { @@ -77,14 +75,14 @@ fn writeFileAtHome(allocator: std.mem.Allocator, home: []const u8, name: []const defer allocator.free(path); if (std.fs.path.isAbsolute(home)) { - const file = try std.fs.createFileAbsolute(path, .{}); + const file = try std_compat.fs.createFileAbsolute(path, .{}); defer file.close(); try file.writeAll(contents); try file.writeAll("\n"); return; } - const file = try std.fs.cwd().createFile(path, .{}); + const file = try std_compat.fs.cwd().createFile(path, .{}); defer file.close(); try file.writeAll(contents); try file.writeAll("\n"); diff --git a/src/ids.zig b/src/ids.zig index 487167c..bc81de6 100644 --- a/src/ids.zig +++ b/src/ids.zig @@ -1,9 +1,10 @@ const std = @import("std"); +const std_compat = @import("compat.zig"); /// Generate a UUID v4 string (36 chars: 8-4-4-4-12) pub fn generateId() [36]u8 { var bytes: [16]u8 = undefined; - std.crypto.random.bytes(&bytes); + std_compat.crypto.random.bytes(&bytes); // Set version 4 bytes[6] = (bytes[6] & 0x0f) | 0x40; @@ -28,7 +29,7 @@ pub fn generateId() [36]u8 { /// Generate a 32-byte random token, return as 64-char hex string pub fn generateToken() [64]u8 { var bytes: [32]u8 = undefined; - std.crypto.random.bytes(&bytes); + std_compat.crypto.random.bytes(&bytes); return hexEncode(bytes); } @@ -60,5 +61,5 @@ fn hexEncode(bytes: [32]u8) [64]u8 { /// Current time in milliseconds since epoch pub fn nowMs() i64 { - return std.time.milliTimestamp(); + return std_compat.time.milliTimestamp(); } diff --git a/src/main.zig b/src/main.zig index fd85276..50a84f6 100644 --- a/src/main.zig +++ b/src/main.zig @@ -1,27 +1,28 @@ const std = @import("std"); +const std_compat = @import("compat.zig"); const Store = @import("store.zig").Store; const api = @import("api.zig"); const config = @import("config.zig"); const version = "2026.3.2"; -pub fn main() !void { - var gpa = std.heap.GeneralPurposeAllocator(.{}){}; - defer _ = gpa.deinit(); - const allocator = gpa.allocator(); +pub fn main(init: std.process.Init) !void { + std_compat.initProcess(init); + const allocator = std.heap.smp_allocator; - var args = try std.process.argsWithAllocator(allocator); - defer args.deinit(); - _ = args.next(); // skip program name + const args = try std_compat.process.argsAlloc(allocator); + defer std_compat.process.argsFree(allocator, args); // Check for manifest protocol flags before normal arg parsing - if (args.next()) |first_arg| { + if (args.len > 1) { + const first_arg = args[1]; if (std.mem.eql(u8, first_arg, "--export-manifest")) { try @import("export_manifest.zig").run(); return; } if (std.mem.eql(u8, first_arg, "--from-json")) { - if (args.next()) |json_str| { + if (args.len > 2) { + const json_str = args[2]; try @import("from_json.zig").run(allocator, json_str); } else { std.debug.print("error: --from-json requires a JSON argument\n", .{}); @@ -31,34 +32,39 @@ pub fn main() !void { } } - // Re-parse all args for normal operation - var args2 = try std.process.argsWithAllocator(allocator); - defer args2.deinit(); - _ = args2.next(); // skip program name - var port_override: ?u16 = null; var db_override: ?[:0]const u8 = null; var token_override: ?[]const u8 = null; var config_path_override: ?[]const u8 = null; - while (args2.next()) |arg| { + var arg_index: usize = 1; + while (arg_index < args.len) : (arg_index += 1) { + const arg = args[arg_index]; if (std.mem.eql(u8, arg, "--port")) { - if (args2.next()) |val| { + if (arg_index + 1 < args.len) { + arg_index += 1; + const val = args[arg_index]; port_override = std.fmt.parseInt(u16, val, 10) catch { std.debug.print("invalid port: {s}\n", .{val}); return; }; } } else if (std.mem.eql(u8, arg, "--db")) { - if (args2.next()) |val| { + if (arg_index + 1 < args.len) { + arg_index += 1; + const val = args[arg_index]; db_override = val; } } else if (std.mem.eql(u8, arg, "--token")) { - if (args2.next()) |val| { + if (arg_index + 1 < args.len) { + arg_index += 1; + const val = args[arg_index]; token_override = val; } } else if (std.mem.eql(u8, arg, "--config")) { - if (args2.next()) |val| { + if (arg_index + 1 < args.len) { + arg_index += 1; + const val = args[arg_index]; config_path_override = val; } } else if (std.mem.eql(u8, arg, "--version")) { @@ -109,24 +115,24 @@ pub fn main() !void { var store = try Store.init(allocator, db_path); defer store.deinit(); - const addr = std.net.Address.resolveIp("127.0.0.1", port) catch |err| { + const addr = std.Io.net.IpAddress.resolve(std_compat.io(), "127.0.0.1", port) catch |err| { std.debug.print("failed to resolve address: {}\n", .{err}); return; }; - var server = addr.listen(.{ .reuse_address = true }) catch |err| { + var server = addr.listen(std_compat.io(), .{ .reuse_address = true }) catch |err| { std.debug.print("failed to listen on port {d}: {}\n", .{ port, err }); return; }; - defer server.deinit(); + defer server.deinit(std_compat.io()); std.debug.print("listening on http://127.0.0.1:{d}\n", .{port}); while (true) { - const conn = server.accept() catch |err| { + var conn = server.accept(std_compat.io()) catch |err| { std.debug.print("accept error: {}\n", .{err}); continue; }; - defer conn.stream.close(); + defer conn.close(std_compat.io()); // Per-request arena var arena = std.heap.ArenaAllocator.init(allocator); @@ -134,43 +140,15 @@ pub fn main() !void { const req_alloc = arena.allocator(); // Read request - var req_buf: [max_request_size]u8 = undefined; - const n = conn.stream.read(&req_buf) catch continue; - if (n == 0) continue; - const raw = req_buf[0..n]; + const full_request = readHttpRequest(req_alloc, &conn, max_request_size) catch continue orelse continue; // Parse request line - const first_line_end = std.mem.indexOf(u8, raw, "\r\n") orelse continue; - const first_line = raw[0..first_line_end]; + const first_line_end = std.mem.indexOf(u8, full_request, "\r\n") orelse continue; + const first_line = full_request[0..first_line_end]; var parts = std.mem.splitScalar(u8, first_line, ' '); const method = parts.next() orelse continue; const target = parts.next() orelse continue; - // Read remaining body if Content-Length indicates more data - var full_request = raw; - if (api.extractHeader(raw, "Content-Length")) |cl_str| { - const content_length = std.fmt.parseInt(usize, cl_str, 10) catch 0; - if (content_length > 0) { - const header_end_pos = std.mem.indexOf(u8, raw, "\r\n\r\n") orelse continue; - const body_start = header_end_pos + 4; - const body_received = n - body_start; - if (body_received < content_length) { - // Need to read more - const total_size = body_start + content_length; - if (total_size > max_request_size) continue; - const full_buf = req_alloc.alloc(u8, total_size) catch continue; - @memcpy(full_buf[0..n], raw); - var total_read = n; - while (total_read < total_size) { - const extra = conn.stream.read(full_buf[total_read..total_size]) catch break; - if (extra == 0) break; - total_read += extra; - } - full_request = full_buf[0..total_read]; - } - } - } - const body = api.extractBody(full_request); var ctx = api.Context{ @@ -187,8 +165,11 @@ pub fn main() !void { "HTTP/1.1 {s}\r\nContent-Type: application/json\r\nContent-Length: {d}\r\nConnection: close\r\n\r\n", .{ response.status, response.body.len }, ) catch continue; - _ = conn.stream.write(header) catch continue; - _ = conn.stream.write(response.body) catch continue; + var resp_write_buffer: [1024]u8 = undefined; + var writer = conn.writer(std_compat.io(), &resp_write_buffer); + writer.interface.writeAll(header) catch continue; + writer.interface.writeAll(response.body) catch continue; + writer.interface.flush() catch continue; } } @@ -199,17 +180,56 @@ fn ensureParentDirForFile(path: []const u8) !void { if (parent.len == 0) return; if (std.fs.path.isAbsolute(parent)) { - std.fs.makeDirAbsolute(parent) catch |err| switch (err) { + std_compat.fs.makeDirAbsolute(parent) catch |err| switch (err) { error.PathAlreadyExists => {}, else => return err, }; return; } - std.fs.cwd().makePath(parent) catch |err| switch (err) { - error.PathAlreadyExists => {}, - else => return err, - }; + try std_compat.fs.cwd().makePath(parent); } const max_request_size: usize = 65_536; +const request_read_chunk: usize = 4096; + +fn readHttpRequest(allocator: std.mem.Allocator, stream: *std.Io.net.Stream, max_bytes: usize) !?[]u8 { + var buffer: std.ArrayListUnmanaged(u8) = .empty; + defer buffer.deinit(allocator); + + var read_buffer: [request_read_chunk]u8 = undefined; + var reader = stream.reader(std_compat.io(), &read_buffer); + + while (true) { + const line = reader.interface.takeDelimiterInclusive('\n') catch |err| switch (err) { + error.EndOfStream => { + if (buffer.items.len == 0) return null; + return error.UnexpectedEof; + }, + else => |e| return e, + }; + + try buffer.appendSlice(allocator, line); + if (buffer.items.len > max_bytes) return error.RequestTooLarge; + + if (std.mem.eql(u8, line, "\r\n") or std.mem.eql(u8, line, "\n")) break; + } + + const header_end = std.mem.indexOf(u8, buffer.items, "\r\n\r\n") orelse return error.InvalidRequest; + const content_len = if (api.extractHeader(buffer.items[0 .. header_end + 4], "Content-Length")) |cl_str| + (std.fmt.parseInt(usize, cl_str, 10) catch return error.InvalidContentLength) + else + 0; + + const required = header_end + 4 + content_len; + if (required > max_bytes) return error.RequestTooLarge; + + if (content_len > 0) { + const body = try allocator.alloc(u8, content_len); + defer allocator.free(body); + try reader.interface.readSliceAll(body); + try buffer.appendSlice(allocator, body); + } + + return try allocator.dupe(u8, buffer.items[0..required]); +} diff --git a/src/store.zig b/src/store.zig index dd1b738..14bbe7d 100644 --- a/src/store.zig +++ b/src/store.zig @@ -1,4 +1,5 @@ const std = @import("std"); +const std_compat = @import("compat.zig"); const log = std.log.scoped(.store); const ids = @import("ids.zig"); const domain = @import("domain.zig"); @@ -402,7 +403,7 @@ pub const Store = struct { const lease_sql = "SELECT COUNT(*) FROM leases WHERE expires_at_ms > ?;"; var lease_stmt: ?*c.sqlite3_stmt = null; - const now_ms: i64 = std.time.milliTimestamp(); + const now_ms: i64 = std_compat.time.milliTimestamp(); rc = c.sqlite3_prepare_v2(self.db, lease_sql, -1, &lease_stmt, null); if (rc != c.SQLITE_OK) return error.PrepareFailed; defer _ = c.sqlite3_finalize(lease_stmt); @@ -2420,9 +2421,9 @@ test "claim respects per-state concurrency limits" { defer store.freeOwnedString(t3); // Set per-state concurrency limit of 2 for "review" - var concurrency_map = std.json.ObjectMap.init(alloc); + var concurrency_map: std.json.ObjectMap = .empty; defer concurrency_map.deinit(); - try concurrency_map.put("review", .{ .integer = 2 }); + try concurrency_map.put(alloc, "review", .{ .integer = 2 }); const per_state: std.json.Value = .{ .object = concurrency_map }; // Claim first two tasks — should succeed