diff --git a/ci/aarch64-linux-debug.sh b/ci/aarch64-linux-debug.sh
index b6cbb210e12f..17fef03c9c85 100755
--- a/ci/aarch64-linux-debug.sh
+++ b/ci/aarch64-linux-debug.sh
@@ -56,7 +56,8 @@ stage3-debug/bin/zig build test docs \
-Dtarget=native-native-musl \
--search-prefix "$PREFIX" \
--zig-lib-dir "$PWD/../lib" \
- -Denable-superhtml
+ -Denable-superhtml \
+ --test-timeout-ms 60_000
stage3-debug/bin/zig build \
--prefix stage4-debug \
diff --git a/ci/aarch64-linux-release.sh b/ci/aarch64-linux-release.sh
index 64cfe1924ffc..f5bc24d4ff44 100755
--- a/ci/aarch64-linux-release.sh
+++ b/ci/aarch64-linux-release.sh
@@ -56,7 +56,8 @@ stage3-release/bin/zig build test docs \
-Dtarget=native-native-musl \
--search-prefix "$PREFIX" \
--zig-lib-dir "$PWD/../lib" \
- -Denable-superhtml
+ -Denable-superhtml \
+ --test-timeout-ms 60_000
# Ensure that stage3 and stage4 are byte-for-byte identical.
stage3-release/bin/zig build \
diff --git a/ci/aarch64-macos-debug.sh b/ci/aarch64-macos-debug.sh
index 9649f69b91fa..29c1cf3e52d4 100755
--- a/ci/aarch64-macos-debug.sh
+++ b/ci/aarch64-macos-debug.sh
@@ -54,4 +54,5 @@ stage3-debug/bin/zig build test docs \
-Denable-macos-sdk \
-Dstatic-llvm \
-Dskip-non-native \
- --search-prefix "$PREFIX"
+ --search-prefix "$PREFIX" \
+ --test-timeout-ms 60_000
diff --git a/ci/aarch64-macos-release.sh b/ci/aarch64-macos-release.sh
index 7b512813a5d3..30a51d15e121 100755
--- a/ci/aarch64-macos-release.sh
+++ b/ci/aarch64-macos-release.sh
@@ -54,7 +54,8 @@ stage3-release/bin/zig build test docs \
-Denable-macos-sdk \
-Dstatic-llvm \
-Dskip-non-native \
- --search-prefix "$PREFIX"
+ --search-prefix "$PREFIX" \
+ --test-timeout-ms 60_000
# Ensure that stage3 and stage4 are byte-for-byte identical.
stage3-release/bin/zig build \
diff --git a/ci/aarch64-windows.ps1 b/ci/aarch64-windows.ps1
index f27ba0bec698..6cb5520d87b9 100644
--- a/ci/aarch64-windows.ps1
+++ b/ci/aarch64-windows.ps1
@@ -66,7 +66,8 @@ Write-Output "Main test suite..."
--search-prefix "$PREFIX_PATH" `
-Dstatic-llvm `
-Dskip-non-native `
- -Denable-symlinks-windows
+ -Denable-symlinks-windows `
+ --test-timeout-ms 60_000
CheckLastExitCode
# Ensure that stage3 and stage4 are byte-for-byte identical.
diff --git a/ci/riscv64-linux-debug.sh b/ci/riscv64-linux-debug.sh
index c68952356c58..85e2ea756ec9 100755
--- a/ci/riscv64-linux-debug.sh
+++ b/ci/riscv64-linux-debug.sh
@@ -59,4 +59,5 @@ stage3-debug/bin/zig build test-cases test-modules test-unit test-c-abi test-sta
-Dskip-run-translated-c \
-Dtarget=native-native-musl \
--search-prefix "$PREFIX" \
- --zig-lib-dir "$PWD/../lib"
+ --zig-lib-dir "$PWD/../lib" \
+ --test-timeout-ms 60_000
diff --git a/ci/riscv64-linux-release.sh b/ci/riscv64-linux-release.sh
index 757dace2718c..153ee9e696f7 100755
--- a/ci/riscv64-linux-release.sh
+++ b/ci/riscv64-linux-release.sh
@@ -59,4 +59,5 @@ stage3-release/bin/zig build test-cases test-modules test-unit test-c-abi test-s
-Dskip-run-translated-c \
-Dtarget=native-native-musl \
--search-prefix "$PREFIX" \
- --zig-lib-dir "$PWD/../lib"
+ --zig-lib-dir "$PWD/../lib" \
+ --test-timeout-ms 60_000
diff --git a/ci/x86_64-linux-debug-llvm.sh b/ci/x86_64-linux-debug-llvm.sh
index 335fe60bfb05..e46b0c8f6f84 100644
--- a/ci/x86_64-linux-debug-llvm.sh
+++ b/ci/x86_64-linux-debug-llvm.sh
@@ -67,4 +67,5 @@ stage3-debug/bin/zig build test docs \
-Dtarget=native-native-musl \
--search-prefix "$PREFIX" \
--zig-lib-dir "$PWD/../lib" \
- -Denable-superhtml
+ -Denable-superhtml \
+ --test-timeout-ms 60_000
diff --git a/ci/x86_64-linux-debug.sh b/ci/x86_64-linux-debug.sh
index be5fe3944695..36966f4fa8bf 100755
--- a/ci/x86_64-linux-debug.sh
+++ b/ci/x86_64-linux-debug.sh
@@ -67,4 +67,5 @@ stage3-debug/bin/zig build test docs \
-Dtarget=native-native-musl \
--search-prefix "$PREFIX" \
--zig-lib-dir "$PWD/../lib" \
- -Denable-superhtml
+ -Denable-superhtml \
+ --test-timeout-ms 60_000
diff --git a/ci/x86_64-linux-release.sh b/ci/x86_64-linux-release.sh
index 3352d06a46c8..5a36181eecd9 100755
--- a/ci/x86_64-linux-release.sh
+++ b/ci/x86_64-linux-release.sh
@@ -68,7 +68,8 @@ stage3-release/bin/zig build test docs \
-Dtarget=native-native-musl \
--search-prefix "$PREFIX" \
--zig-lib-dir "$PWD/../lib" \
- -Denable-superhtml
+ -Denable-superhtml \
+ --test-timeout-ms 60_000
# Ensure that stage3 and stage4 are byte-for-byte identical.
stage3-release/bin/zig build \
diff --git a/ci/x86_64-macos-release.sh b/ci/x86_64-macos-release.sh
index 15c80c3d8a38..e9fccc6cbd37 100755
--- a/ci/x86_64-macos-release.sh
+++ b/ci/x86_64-macos-release.sh
@@ -56,7 +56,8 @@ stage3/bin/zig build test docs \
-Denable-macos-sdk \
-Dstatic-llvm \
-Dskip-non-native \
- --search-prefix "$PREFIX"
+ --search-prefix "$PREFIX" \
+ --test-timeout-ms 60_000
# Ensure that stage3 and stage4 are byte-for-byte identical.
stage3/bin/zig build \
diff --git a/ci/x86_64-windows-debug.ps1 b/ci/x86_64-windows-debug.ps1
index 8a5e0b42da8a..8a4474522ce1 100644
--- a/ci/x86_64-windows-debug.ps1
+++ b/ci/x86_64-windows-debug.ps1
@@ -67,7 +67,8 @@ Write-Output "Main test suite..."
-Dstatic-llvm `
-Dskip-non-native `
-Dskip-release `
- -Denable-symlinks-windows
+ -Denable-symlinks-windows `
+ --test-timeout-ms 60_000
CheckLastExitCode
Write-Output "Build x86_64-windows-msvc behavior tests using the C backend..."
diff --git a/ci/x86_64-windows-release.ps1 b/ci/x86_64-windows-release.ps1
index 77ef64ccadf1..c9f3f6afd9e2 100644
--- a/ci/x86_64-windows-release.ps1
+++ b/ci/x86_64-windows-release.ps1
@@ -66,7 +66,8 @@ Write-Output "Main test suite..."
--search-prefix "$PREFIX_PATH" `
-Dstatic-llvm `
-Dskip-non-native `
- -Denable-symlinks-windows
+ -Denable-symlinks-windows `
+ --test-timeout-ms 60_000
CheckLastExitCode
# Ensure that stage3 and stage4 are byte-for-byte identical.
diff --git a/lib/build-web/index.html b/lib/build-web/index.html
index bcdc2c7141d4..fd8f46db2085 100644
--- a/lib/build-web/index.html
+++ b/lib/build-web/index.html
@@ -139,6 +139,19 @@
+
+
+
+
+ Test Name |
+ Duration |
+
+
+
+
+
+
diff --git a/lib/build-web/main.js b/lib/build-web/main.js
index 481c33f024ba..801a89afc51e 100644
--- a/lib/build-web/main.js
+++ b/lib/build-web/main.js
@@ -46,8 +46,9 @@ WebAssembly.instantiateStreaming(wasm_promise, {
updateCoverage: fuzzUpdateCoverage,
},
time_report: {
- updateCompile: timeReportUpdateCompile,
updateGeneric: timeReportUpdateGeneric,
+ updateCompile: timeReportUpdateCompile,
+ updateRunTest: timeReportUpdateRunTest,
},
}).then(function(obj) {
setConnectionStatus("Connecting to WebSocket...", true);
@@ -248,6 +249,7 @@ function timeReportUpdateCompile(
shadow.getElementById("genericReport").classList.add("hidden");
shadow.getElementById("compileReport").classList.remove("hidden");
+ shadow.getElementById("runTestReport").classList.add("hidden");
if (!use_llvm) shadow.querySelector(":host > details").classList.add("no-llvm");
host.innerHTML = inner_html;
@@ -265,8 +267,26 @@ function timeReportUpdateGeneric(
shadow.querySelector(":host > details").classList.remove("pending", "no-llvm");
shadow.getElementById("genericReport").classList.remove("hidden");
shadow.getElementById("compileReport").classList.add("hidden");
+ shadow.getElementById("runTestReport").classList.add("hidden");
host.innerHTML = inner_html;
}
+function timeReportUpdateRunTest(
+ step_idx,
+ table_html_ptr,
+ table_html_len,
+) {
+ const table_html = decodeString(table_html_ptr, table_html_len);
+ const host = domTimeReportList.children.item(step_idx);
+ const shadow = host.shadowRoot;
+
+ shadow.querySelector(":host > details").classList.remove("pending", "no-llvm");
+
+ shadow.getElementById("genericReport").classList.add("hidden");
+ shadow.getElementById("compileReport").classList.add("hidden");
+ shadow.getElementById("runTestReport").classList.remove("hidden");
+
+ shadow.getElementById("runTestTableBody").innerHTML = table_html;
+}
const fuzz_entry_template = document.getElementById("fuzzEntryTemplate").content;
const domFuzz = document.getElementById("fuzz");
diff --git a/lib/build-web/main.zig b/lib/build-web/main.zig
index e971f6de48e7..6a91b5325b0c 100644
--- a/lib/build-web/main.zig
+++ b/lib/build-web/main.zig
@@ -94,6 +94,7 @@ export fn message_end() void {
.time_report_generic_result => return time_report.genericResultMessage(msg_bytes) catch @panic("OOM"),
.time_report_compile_result => return time_report.compileResultMessage(msg_bytes) catch @panic("OOM"),
+ .time_report_run_test_result => return time_report.runTestResultMessage(msg_bytes) catch @panic("OOM"),
}
}
diff --git a/lib/build-web/time_report.zig b/lib/build-web/time_report.zig
index aa37e501becf..da6b4b2f361e 100644
--- a/lib/build-web/time_report.zig
+++ b/lib/build-web/time_report.zig
@@ -27,6 +27,13 @@ const js = struct {
/// Whether the LLVM backend was used. If not, LLVM-specific statistics are hidden.
use_llvm: bool,
) void;
+ extern "time_report" fn updateRunTest(
+ /// The index of the step.
+ step_idx: u32,
+ // The HTML which will populate the of the test table.
+ table_html_ptr: [*]const u8,
+ table_html_len: usize,
+ ) void;
};
pub fn genericResultMessage(msg_bytes: []u8) error{OutOfMemory}!void {
@@ -236,3 +243,37 @@ pub fn compileResultMessage(msg_bytes: []u8) error{OutOfMemory}!void {
hdr.flags.use_llvm,
);
}
+
+pub fn runTestResultMessage(msg_bytes: []u8) error{OutOfMemory}!void {
+ if (msg_bytes.len < @sizeOf(abi.RunTestResult)) @panic("malformed RunTestResult message");
+ const hdr: *const abi.RunTestResult = @ptrCast(msg_bytes[0..@sizeOf(abi.RunTestResult)]);
+ if (hdr.step_idx >= step_list.*.len) @panic("malformed RunTestResult message");
+ const trailing = msg_bytes[@sizeOf(abi.RunTestResult)..];
+
+ const durations: []align(1) const u64 = @ptrCast(trailing[0 .. hdr.tests_len * 8]);
+ var offset: usize = hdr.tests_len * 8;
+
+ var table_html: std.ArrayListUnmanaged(u8) = .empty;
+ defer table_html.deinit(gpa);
+
+ for (durations) |test_ns| {
+ const test_name_len = std.mem.indexOfScalar(u8, trailing[offset..], 0) orelse @panic("malformed RunTestResult message");
+ const test_name = trailing[offset..][0..test_name_len];
+ offset += test_name_len + 1;
+ try table_html.print(gpa, "{f} | ", .{fmtEscapeHtml(test_name)});
+ if (test_ns == std.math.maxInt(u64)) {
+ try table_html.appendSlice(gpa, " | "); // didn't run
+ } else {
+ try table_html.print(gpa, "{D} | ", .{test_ns});
+ }
+ try table_html.appendSlice(gpa, "
\n");
+ }
+
+ if (offset != trailing.len) @panic("malformed RunTestResult message");
+
+ js.updateRunTest(
+ hdr.step_idx,
+ table_html.items.ptr,
+ table_html.items.len,
+ );
+}
diff --git a/lib/compiler/build_runner.zig b/lib/compiler/build_runner.zig
index 41315457c124..91a78a34389e 100644
--- a/lib/compiler/build_runner.zig
+++ b/lib/compiler/build_runner.zig
@@ -103,11 +103,13 @@ pub fn main() !void {
var install_prefix: ?[]const u8 = null;
var dir_list = std.Build.DirList{};
+ var error_style: ErrorStyle = .verbose;
+ var multiline_errors: MultilineErrors = .indent;
var summary: ?Summary = null;
var max_rss: u64 = 0;
var skip_oom_steps = false;
+ var test_timeout_ms: ?u64 = null;
var color: Color = .auto;
- var prominent_compile_errors = false;
var help_menu = false;
var steps_menu = false;
var output_tmp_nonce: ?[16]u8 = null;
@@ -116,6 +118,18 @@ pub fn main() !void {
var debounce_interval_ms: u16 = 50;
var webui_listen: ?std.net.Address = null;
+ if (try std.zig.EnvVar.ZIG_BUILD_ERROR_STYLE.get(arena)) |str| {
+ if (std.meta.stringToEnum(ErrorStyle, str)) |style| {
+ error_style = style;
+ }
+ }
+
+ if (try std.zig.EnvVar.ZIG_BUILD_MULTILINE_ERRORS.get(arena)) |str| {
+ if (std.meta.stringToEnum(MultilineErrors, str)) |style| {
+ multiline_errors = style;
+ }
+ }
+
while (nextArg(args, &arg_idx)) |arg| {
if (mem.startsWith(u8, arg, "-Z")) {
if (arg.len != 18) fatalWithHint("bad argument: '{s}'", .{arg});
@@ -175,6 +189,14 @@ pub fn main() !void {
};
} else if (mem.eql(u8, arg, "--skip-oom-steps")) {
skip_oom_steps = true;
+ } else if (mem.eql(u8, arg, "--test-timeout-ms")) {
+ const millis_str = nextArgOrFatal(args, &arg_idx);
+ test_timeout_ms = std.fmt.parseInt(u64, millis_str, 10) catch |err| {
+ std.debug.print("invalid millisecond count: '{s}': {s}\n", .{
+ millis_str, @errorName(err),
+ });
+ process.exit(1);
+ };
} else if (mem.eql(u8, arg, "--search-prefix")) {
const search_prefix = nextArgOrFatal(args, &arg_idx);
builder.addSearchPrefix(search_prefix);
@@ -188,11 +210,23 @@ pub fn main() !void {
arg, next_arg,
});
};
+ } else if (mem.eql(u8, arg, "--error-style")) {
+ const next_arg = nextArg(args, &arg_idx) orelse
+ fatalWithHint("expected style after '{s}'", .{arg});
+ error_style = std.meta.stringToEnum(ErrorStyle, next_arg) orelse {
+ fatalWithHint("expected style after '{s}', found '{s}'", .{ arg, next_arg });
+ };
+ } else if (mem.eql(u8, arg, "--multiline-errors")) {
+ const next_arg = nextArg(args, &arg_idx) orelse
+ fatalWithHint("expected style after '{s}'", .{arg});
+ multiline_errors = std.meta.stringToEnum(MultilineErrors, next_arg) orelse {
+ fatalWithHint("expected style after '{s}', found '{s}'", .{ arg, next_arg });
+ };
} else if (mem.eql(u8, arg, "--summary")) {
const next_arg = nextArg(args, &arg_idx) orelse
- fatalWithHint("expected [all|new|failures|none] after '{s}'", .{arg});
+ fatalWithHint("expected [all|new|failures|line|none] after '{s}'", .{arg});
summary = std.meta.stringToEnum(Summary, next_arg) orelse {
- fatalWithHint("expected [all|new|failures|none] after '{s}', found '{s}'", .{
+ fatalWithHint("expected [all|new|failures|line|none] after '{s}', found '{s}'", .{
arg, next_arg,
});
};
@@ -264,8 +298,6 @@ pub fn main() !void {
builder.verbose_cc = true;
} else if (mem.eql(u8, arg, "--verbose-llvm-cpu-features")) {
builder.verbose_llvm_cpu_features = true;
- } else if (mem.eql(u8, arg, "--prominent-compile-errors")) {
- prominent_compile_errors = true;
} else if (mem.eql(u8, arg, "--watch")) {
watch = true;
} else if (mem.eql(u8, arg, "--time-report")) {
@@ -414,14 +446,20 @@ pub fn main() !void {
.max_rss_is_default = false,
.max_rss_mutex = .{},
.skip_oom_steps = skip_oom_steps,
+ .unit_test_timeout_ns = ns: {
+ const ms = test_timeout_ms orelse break :ns null;
+ break :ns std.math.mul(u64, ms, std.time.ns_per_ms) catch null;
+ },
+
.watch = watch,
.web_server = undefined, // set after `prepare`
.memory_blocked_steps = .empty,
.step_stack = .empty,
- .prominent_compile_errors = prominent_compile_errors,
.claimed_rss = 0,
- .summary = summary orelse if (watch) .new else .failures,
+ .error_style = error_style,
+ .multiline_errors = multiline_errors,
+ .summary = summary orelse if (watch or webui_listen != null) .line else .failures,
.ttyconf = ttyconf,
.stderr = stderr,
.thread_pool = undefined,
@@ -437,8 +475,14 @@ pub fn main() !void {
}
prepare(arena, builder, targets.items, &run, graph.random_seed) catch |err| switch (err) {
- error.UncleanExit => process.exit(1),
- else => return err,
+ error.DependencyLoopDetected => {
+ // Perhaps in the future there could be an Advanced Options flag such as
+ // --debug-build-runner-leaks which would make this code return instead of
+ // calling exit.
+ std.debug.lockStdErr();
+ process.exit(1);
+ },
+ else => |e| return e,
};
var w: Watch = w: {
@@ -468,30 +512,24 @@ pub fn main() !void {
ws.start() catch |err| fatal("failed to start web server: {s}", .{@errorName(err)});
}
- rebuild: while (true) {
+ rebuild: while (true) : (if (run.error_style.clearOnUpdate()) {
+ const bw = std.debug.lockStderrWriter(&stdio_buffer_allocation);
+ defer std.debug.unlockStderrWriter();
+ try bw.writeAll("\x1B[2J\x1B[3J\x1B[H");
+ }) {
if (run.web_server) |*ws| ws.startBuild();
- runStepNames(
+ try runStepNames(
builder,
targets.items,
main_progress_node,
&run,
- ) catch |err| switch (err) {
- error.UncleanExit => {
- assert(!run.watch and run.web_server == null);
- process.exit(1);
- },
- else => return err,
- };
+ );
if (run.web_server) |*web_server| {
web_server.finishBuild(.{ .fuzz = fuzz });
}
- if (!watch and run.web_server == null) {
- return cleanExit();
- }
-
if (run.web_server) |*ws| {
assert(!watch); // fatal error after CLI parsing
while (true) switch (ws.wait()) {
@@ -565,24 +603,21 @@ const Run = struct {
max_rss_is_default: bool,
max_rss_mutex: std.Thread.Mutex,
skip_oom_steps: bool,
+ unit_test_timeout_ns: ?u64,
watch: bool,
web_server: if (!builtin.single_threaded) ?WebServer else ?noreturn,
/// Allocated into `gpa`.
memory_blocked_steps: std.ArrayListUnmanaged(*Step),
/// Allocated into `gpa`.
step_stack: std.AutoArrayHashMapUnmanaged(*Step, void),
- prominent_compile_errors: bool,
thread_pool: std.Thread.Pool,
claimed_rss: usize,
+ error_style: ErrorStyle,
+ multiline_errors: MultilineErrors,
summary: Summary,
ttyconf: std.io.tty.Config,
stderr: File,
-
- fn cleanExit(run: Run) void {
- if (run.watch or run.web_server != null) return;
- return runner.cleanExit();
- }
};
fn prepare(
@@ -616,10 +651,7 @@ fn prepare(
rand.shuffle(*Step, starting_steps);
for (starting_steps) |s| {
- constructGraphAndCheckForDependencyLoop(gpa, b, s, &run.step_stack, rand) catch |err| switch (err) {
- error.DependencyLoopDetected => return uncleanExit(),
- else => |e| return e,
- };
+ try constructGraphAndCheckForDependencyLoop(gpa, b, s, &run.step_stack, rand);
}
{
@@ -678,10 +710,12 @@ fn runStepNames(
}
assert(run.memory_blocked_steps.items.len == 0);
+ var test_pass_count: usize = 0;
var test_skip_count: usize = 0;
var test_fail_count: usize = 0;
- var test_pass_count: usize = 0;
- var test_leak_count: usize = 0;
+ var test_crash_count: usize = 0;
+ var test_timeout_count: usize = 0;
+
var test_count: usize = 0;
var success_count: usize = 0;
@@ -691,10 +725,12 @@ fn runStepNames(
var total_compile_errors: usize = 0;
for (step_stack.keys()) |s| {
- test_fail_count += s.test_results.fail_count;
- test_skip_count += s.test_results.skip_count;
- test_leak_count += s.test_results.leak_count;
test_pass_count += s.test_results.passCount();
+ test_skip_count += s.test_results.skip_count;
+ test_fail_count += s.test_results.fail_count;
+ test_crash_count += s.test_results.crash_count;
+ test_timeout_count += s.test_results.timeout_count;
+
test_count += s.test_results.test_count;
switch (s.state) {
@@ -724,40 +760,76 @@ fn runStepNames(
}
}
- // A proper command line application defaults to silently succeeding.
- // The user may request verbose mode if they have a different preference.
- const failures_only = switch (run.summary) {
- .failures, .none => true,
- else => false,
- };
+ // Every test has a state
+ assert(test_pass_count + test_skip_count + test_fail_count + test_crash_count + test_timeout_count == test_count);
+
if (failure_count == 0) {
std.Progress.setStatus(.success);
- if (failures_only) return run.cleanExit();
} else {
std.Progress.setStatus(.failure);
}
const ttyconf = run.ttyconf;
- if (run.summary != .none) {
+ summary: {
+ switch (run.summary) {
+ .all, .new, .line => {},
+ .failures => if (failure_count == 0) break :summary,
+ .none => break :summary,
+ }
+
const w = std.debug.lockStderrWriter(&stdio_buffer_allocation);
defer std.debug.unlockStderrWriter();
const total_count = success_count + failure_count + pending_count + skipped_count;
ttyconf.setColor(w, .cyan) catch {};
- w.writeAll("\nBuild Summary:") catch {};
+ ttyconf.setColor(w, .bold) catch {};
+ w.writeAll("Build Summary: ") catch {};
ttyconf.setColor(w, .reset) catch {};
- w.print(" {d}/{d} steps succeeded", .{ success_count, total_count }) catch {};
- if (skipped_count > 0) w.print("; {d} skipped", .{skipped_count}) catch {};
- if (failure_count > 0) w.print("; {d} failed", .{failure_count}) catch {};
+ w.print("{d}/{d} steps succeeded", .{ success_count, total_count }) catch {};
+ {
+ ttyconf.setColor(w, .dim) catch {};
+ var first = true;
+ if (skipped_count > 0) {
+ w.print("{s}{d} skipped", .{ if (first) " (" else ", ", skipped_count }) catch {};
+ first = false;
+ }
+ if (failure_count > 0) {
+ w.print("{s}{d} failed", .{ if (first) " (" else ", ", failure_count }) catch {};
+ first = false;
+ }
+ if (!first) w.writeByte(')') catch {};
+ ttyconf.setColor(w, .reset) catch {};
+ }
- if (test_count > 0) w.print("; {d}/{d} tests passed", .{ test_pass_count, test_count }) catch {};
- if (test_skip_count > 0) w.print("; {d} skipped", .{test_skip_count}) catch {};
- if (test_fail_count > 0) w.print("; {d} failed", .{test_fail_count}) catch {};
- if (test_leak_count > 0) w.print("; {d} leaked", .{test_leak_count}) catch {};
+ if (test_count > 0) {
+ w.print("; {d}/{d} tests passed", .{ test_pass_count, test_count }) catch {};
+ ttyconf.setColor(w, .dim) catch {};
+ var first = true;
+ if (test_skip_count > 0) {
+ w.print("{s}{d} skipped", .{ if (first) " (" else ", ", test_skip_count }) catch {};
+ first = false;
+ }
+ if (test_fail_count > 0) {
+ w.print("{s}{d} failed", .{ if (first) " (" else ", ", test_fail_count }) catch {};
+ first = false;
+ }
+ if (test_crash_count > 0) {
+ w.print("{s}{d} crashed", .{ if (first) " (" else ", ", test_crash_count }) catch {};
+ first = false;
+ }
+ if (test_timeout_count > 0) {
+ w.print("{s}{d} timed out", .{ if (first) " (" else ", ", test_timeout_count }) catch {};
+ first = false;
+ }
+ if (!first) w.writeByte(')') catch {};
+ ttyconf.setColor(w, .reset) catch {};
+ }
w.writeAll("\n") catch {};
+ if (run.summary == .line) break :summary;
+
// Print a fancy tree with build results.
var step_stack_copy = try step_stack.clone(gpa);
defer step_stack_copy.deinit(gpa);
@@ -773,7 +845,7 @@ fn runStepNames(
i -= 1;
const step = b.top_level_steps.get(step_names[i]).?.step;
const found = switch (run.summary) {
- .all, .none => unreachable,
+ .all, .line, .none => unreachable,
.failures => step.state != .success,
.new => !step.result_cached,
};
@@ -790,28 +862,19 @@ fn runStepNames(
w.writeByte('\n') catch {};
}
- if (failure_count == 0) {
- return run.cleanExit();
- }
-
- // Finally, render compile errors at the bottom of the terminal.
- if (run.prominent_compile_errors and total_compile_errors > 0) {
- for (step_stack.keys()) |s| {
- if (s.result_error_bundle.errorMessageCount() > 0) {
- s.result_error_bundle.renderToStdErr(.{ .ttyconf = ttyconf });
- }
- }
+ if (run.watch or run.web_server != null) return;
- if (!run.watch and run.web_server == null) {
- // Signal to parent process that we have printed compile errors. The
- // parent process may choose to omit the "following command failed"
- // line in this case.
- std.debug.lockStdErr();
- process.exit(2);
- }
- }
+ // Perhaps in the future there could be an Advanced Options flag such as
+ // --debug-build-runner-leaks which would make this code return instead of
+ // calling exit.
- if (!run.watch and run.web_server == null) return uncleanExit();
+ const code: u8 = code: {
+ if (failure_count == 0) break :code 0; // success
+ if (run.error_style.verboseContext()) break :code 1; // failure; print build command
+ break :code 2; // failure; do not print build command
+ };
+ std.debug.lockStdErr();
+ process.exit(code);
}
const PrintNode = struct {
@@ -864,11 +927,16 @@ fn printStepStatus(
try stderr.writeAll(" cached");
} else if (s.test_results.test_count > 0) {
const pass_count = s.test_results.passCount();
- try stderr.print(" {d} passed", .{pass_count});
+ assert(s.test_results.test_count == pass_count + s.test_results.skip_count);
+ try stderr.print(" {d} pass", .{pass_count});
if (s.test_results.skip_count > 0) {
+ try ttyconf.setColor(stderr, .reset);
+ try stderr.writeAll(", ");
try ttyconf.setColor(stderr, .yellow);
- try stderr.print(" {d} skipped", .{s.test_results.skip_count});
+ try stderr.print("{d} skip", .{s.test_results.skip_count});
}
+ try ttyconf.setColor(stderr, .reset);
+ try stderr.print(" ({d} total)", .{s.test_results.test_count});
} else {
try stderr.writeAll(" success");
}
@@ -916,7 +984,10 @@ fn printStepStatus(
try stderr.writeAll("\n");
try ttyconf.setColor(stderr, .reset);
},
- .failure => try printStepFailure(s, stderr, ttyconf),
+ .failure => {
+ try printStepFailure(s, stderr, ttyconf, false);
+ try ttyconf.setColor(stderr, .reset);
+ },
}
}
@@ -924,51 +995,87 @@ fn printStepFailure(
s: *Step,
stderr: *Writer,
ttyconf: std.io.tty.Config,
+ dim: bool,
) !void {
if (s.result_error_bundle.errorMessageCount() > 0) {
try ttyconf.setColor(stderr, .red);
try stderr.print(" {d} errors\n", .{
s.result_error_bundle.errorMessageCount(),
});
- try ttyconf.setColor(stderr, .reset);
} else if (!s.test_results.isSuccess()) {
- try stderr.print(" {d}/{d} passed", .{
- s.test_results.passCount(), s.test_results.test_count,
- });
+ // These first values include all of the test "statuses". Every test is either passsed,
+ // skipped, failed, crashed, or timed out.
+ try ttyconf.setColor(stderr, .green);
+ try stderr.print(" {d} pass", .{s.test_results.passCount()});
+ try ttyconf.setColor(stderr, .reset);
+ if (dim) try ttyconf.setColor(stderr, .dim);
+ if (s.test_results.skip_count > 0) {
+ try stderr.writeAll(", ");
+ try ttyconf.setColor(stderr, .yellow);
+ try stderr.print("{d} skip", .{s.test_results.skip_count});
+ try ttyconf.setColor(stderr, .reset);
+ if (dim) try ttyconf.setColor(stderr, .dim);
+ }
if (s.test_results.fail_count > 0) {
try stderr.writeAll(", ");
try ttyconf.setColor(stderr, .red);
- try stderr.print("{d} failed", .{
- s.test_results.fail_count,
- });
+ try stderr.print("{d} fail", .{s.test_results.fail_count});
try ttyconf.setColor(stderr, .reset);
+ if (dim) try ttyconf.setColor(stderr, .dim);
}
- if (s.test_results.skip_count > 0) {
+ if (s.test_results.crash_count > 0) {
try stderr.writeAll(", ");
- try ttyconf.setColor(stderr, .yellow);
- try stderr.print("{d} skipped", .{
- s.test_results.skip_count,
- });
+ try ttyconf.setColor(stderr, .red);
+ try stderr.print("{d} crash", .{s.test_results.crash_count});
try ttyconf.setColor(stderr, .reset);
+ if (dim) try ttyconf.setColor(stderr, .dim);
}
- if (s.test_results.leak_count > 0) {
+ if (s.test_results.timeout_count > 0) {
try stderr.writeAll(", ");
try ttyconf.setColor(stderr, .red);
- try stderr.print("{d} leaked", .{
- s.test_results.leak_count,
- });
+ try stderr.print("{d} timeout", .{s.test_results.timeout_count});
+ try ttyconf.setColor(stderr, .reset);
+ if (dim) try ttyconf.setColor(stderr, .dim);
+ }
+ try stderr.print(" ({d} total)", .{s.test_results.test_count});
+
+ // Memory leaks are intentionally written after the total, because is isn't a test *status*,
+ // but just a flag that any tests -- even passed ones -- can have. We also use a different
+ // separator, so it looks like:
+ // 2 pass, 1 skip, 2 fail (5 total); 2 leaks
+ if (s.test_results.leak_count > 0) {
+ try stderr.writeAll("; ");
+ try ttyconf.setColor(stderr, .red);
+ try stderr.print("{d} leaks", .{s.test_results.leak_count});
+ try ttyconf.setColor(stderr, .reset);
+ if (dim) try ttyconf.setColor(stderr, .dim);
+ }
+
+ // It's usually not helpful to know how many error logs there were because they tend to
+ // just come with other errors (e.g. crashes and leaks print stack traces, and clean
+ // failures print error traces). So only mention them if they're the only thing causing
+ // the failure.
+ const show_err_logs: bool = show: {
+ var alt_results = s.test_results;
+ alt_results.log_err_count = 0;
+ break :show alt_results.isSuccess();
+ };
+ if (show_err_logs) {
+ try stderr.writeAll("; ");
+ try ttyconf.setColor(stderr, .red);
+ try stderr.print("{d} error logs", .{s.test_results.log_err_count});
try ttyconf.setColor(stderr, .reset);
+ if (dim) try ttyconf.setColor(stderr, .dim);
}
+
try stderr.writeAll("\n");
} else if (s.result_error_msgs.items.len > 0) {
try ttyconf.setColor(stderr, .red);
try stderr.writeAll(" failure\n");
- try ttyconf.setColor(stderr, .reset);
} else {
assert(s.result_stderr.len > 0);
try ttyconf.setColor(stderr, .red);
try stderr.writeAll(" stderr\n");
- try ttyconf.setColor(stderr, .reset);
}
}
@@ -984,7 +1091,7 @@ fn printTreeStep(
const first = step_stack.swapRemove(s);
const summary = run.summary;
const skip = switch (summary) {
- .none => unreachable,
+ .none, .line => unreachable,
.all => false,
.new => s.result_cached,
.failures => s.state == .success,
@@ -1017,7 +1124,7 @@ fn printTreeStep(
const step = s.dependencies.items[i];
const found = switch (summary) {
- .all, .none => unreachable,
+ .all, .line, .none => unreachable,
.failures => step.state != .success,
.new => !step.result_cached,
};
@@ -1171,19 +1278,18 @@ fn workerMakeOneStep(
.thread_pool = thread_pool,
.watch = run.watch,
.web_server = if (run.web_server) |*ws| ws else null,
+ .unit_test_timeout_ns = run.unit_test_timeout_ns,
.gpa = run.gpa,
});
// No matter the result, we want to display error/warning messages.
- const show_compile_errors = !run.prominent_compile_errors and
- s.result_error_bundle.errorMessageCount() > 0;
+ const show_compile_errors = s.result_error_bundle.errorMessageCount() > 0;
const show_error_msgs = s.result_error_msgs.items.len > 0;
const show_stderr = s.result_stderr.len > 0;
-
if (show_error_msgs or show_compile_errors or show_stderr) {
const bw = std.debug.lockStderrWriter(&stdio_buffer_allocation);
defer std.debug.unlockStderrWriter();
- printErrorMessages(run.gpa, s, .{ .ttyconf = run.ttyconf }, bw, run.prominent_compile_errors) catch {};
+ printErrorMessages(run.gpa, s, .{ .ttyconf = run.ttyconf }, bw, run.error_style, run.multiline_errors) catch {};
}
handle_result: {
@@ -1247,37 +1353,46 @@ pub fn printErrorMessages(
failing_step: *Step,
options: std.zig.ErrorBundle.RenderOptions,
stderr: *Writer,
- prominent_compile_errors: bool,
+ error_style: ErrorStyle,
+ multiline_errors: MultilineErrors,
) !void {
- // Provide context for where these error messages are coming from by
- // printing the corresponding Step subtree.
-
- var step_stack: std.ArrayListUnmanaged(*Step) = .empty;
- defer step_stack.deinit(gpa);
- try step_stack.append(gpa, failing_step);
- while (step_stack.items[step_stack.items.len - 1].dependants.items.len != 0) {
- try step_stack.append(gpa, step_stack.items[step_stack.items.len - 1].dependants.items[0]);
- }
-
- // Now, `step_stack` has the subtree that we want to print, in reverse order.
const ttyconf = options.ttyconf;
- try ttyconf.setColor(stderr, .dim);
- var indent: usize = 0;
- while (step_stack.pop()) |s| : (indent += 1) {
- if (indent > 0) {
- try stderr.splatByteAll(' ', (indent - 1) * 3);
- try printChildNodePrefix(stderr, ttyconf);
+
+ if (error_style.verboseContext()) {
+ // Provide context for where these error messages are coming from by
+ // printing the corresponding Step subtree.
+ var step_stack: std.ArrayListUnmanaged(*Step) = .empty;
+ defer step_stack.deinit(gpa);
+ try step_stack.append(gpa, failing_step);
+ while (step_stack.items[step_stack.items.len - 1].dependants.items.len != 0) {
+ try step_stack.append(gpa, step_stack.items[step_stack.items.len - 1].dependants.items[0]);
}
- try stderr.writeAll(s.name);
+ // Now, `step_stack` has the subtree that we want to print, in reverse order.
+ try ttyconf.setColor(stderr, .dim);
+ var indent: usize = 0;
+ while (step_stack.pop()) |s| : (indent += 1) {
+ if (indent > 0) {
+ try stderr.splatByteAll(' ', (indent - 1) * 3);
+ try printChildNodePrefix(stderr, ttyconf);
+ }
- if (s == failing_step) {
- try printStepFailure(s, stderr, ttyconf);
- } else {
- try stderr.writeAll("\n");
+ try stderr.writeAll(s.name);
+
+ if (s == failing_step) {
+ try printStepFailure(s, stderr, ttyconf, true);
+ } else {
+ try stderr.writeAll("\n");
+ }
}
+ try ttyconf.setColor(stderr, .reset);
+ } else {
+ // Just print the failing step itself.
+ try ttyconf.setColor(stderr, .dim);
+ try stderr.writeAll(failing_step.name);
+ try printStepFailure(failing_step, stderr, ttyconf, true);
+ try ttyconf.setColor(stderr, .reset);
}
- try ttyconf.setColor(stderr, .reset);
if (failing_step.result_stderr.len > 0) {
try stderr.writeAll(failing_step.result_stderr);
@@ -1286,17 +1401,38 @@ pub fn printErrorMessages(
}
}
- if (!prominent_compile_errors and failing_step.result_error_bundle.errorMessageCount() > 0) {
- try failing_step.result_error_bundle.renderToWriter(options, stderr);
- }
+ try failing_step.result_error_bundle.renderToWriter(options, stderr);
for (failing_step.result_error_msgs.items) |msg| {
try ttyconf.setColor(stderr, .red);
- try stderr.writeAll("error: ");
+ try stderr.writeAll("error:");
try ttyconf.setColor(stderr, .reset);
- try stderr.writeAll(msg);
- try stderr.writeAll("\n");
+ if (std.mem.indexOfScalar(u8, msg, '\n') == null) {
+ try stderr.print(" {s}\n", .{msg});
+ } else switch (multiline_errors) {
+ .indent => {
+ var it = std.mem.splitScalar(u8, msg, '\n');
+ try stderr.print(" {s}\n", .{it.first()});
+ while (it.next()) |line| {
+ try stderr.print(" {s}\n", .{line});
+ }
+ },
+ .newline => try stderr.print("\n{s}\n", .{msg}),
+ .none => try stderr.print(" {s}\n", .{msg}),
+ }
}
+
+ if (error_style.verboseContext()) {
+ if (failing_step.result_failed_command) |cmd_str| {
+ try ttyconf.setColor(stderr, .red);
+ try stderr.writeAll("failed command: ");
+ try ttyconf.setColor(stderr, .reset);
+ try stderr.writeAll(cmd_str);
+ try stderr.writeByte('\n');
+ }
+ }
+
+ try stderr.writeByte('\n');
}
fn printSteps(builder: *std.Build, w: *Writer) !void {
@@ -1351,15 +1487,25 @@ fn printUsage(b: *std.Build, w: *Writer) !void {
\\ -l, --list-steps Print available steps
\\ --verbose Print commands before executing them
\\ --color [auto|off|on] Enable or disable colored error messages
- \\ --prominent-compile-errors Buffer compile errors and display at end
+ \\ --error-style [style] Control how build errors are printed
+ \\ verbose (Default) Report errors with full context
+ \\ minimal Report errors after summary, excluding context like command lines
+ \\ verbose_clear Like 'verbose', but clear the terminal at the start of each update
+ \\ minimal_clear Like 'minimal', but clear the terminal at the start of each update
+ \\ --multiline-errors [style] Control how multi-line error messages are printed
+ \\ indent (Default) Indent non-initial lines to align with initial line
+ \\ newline Include a leading newline so that the error message is on its own lines
+ \\ none Print as usual so the first line is misaligned
\\ --summary [mode] Control the printing of the build summary
\\ all Print the build summary in its entirety
\\ new Omit cached steps
- \\ failures (Default) Only print failed steps
+ \\ failures (Default if short-lived) Only print failed steps
+ \\ line (Default if long-lived) Only print the single-line summary
\\ none Do not print the build summary
\\ -j Limit concurrent jobs (default is to use all CPU cores)
\\ --maxrss Limit memory usage (default is to use available memory)
\\ --skip-oom-steps Instead of failing, skip steps that would exceed --maxrss
+ \\ --test-timeout-ms Limit execution time of unit tests, terminating if exceeded
\\ --fetch[=mode] Fetch dependency tree (optionally choose laziness) and exit
\\ needed (Default) Lazy dependencies are fetched as needed
\\ all Lazy dependencies are always fetched
@@ -1475,24 +1621,27 @@ fn argsRest(args: []const [:0]const u8, idx: usize) ?[]const [:0]const u8 {
return args[idx..];
}
-/// Perhaps in the future there could be an Advanced Options flag such as
-/// --debug-build-runner-leaks which would make this function return instead of
-/// calling exit.
-fn cleanExit() void {
- std.debug.lockStdErr();
- process.exit(0);
-}
-
-/// Perhaps in the future there could be an Advanced Options flag such as
-/// --debug-build-runner-leaks which would make this function return instead of
-/// calling exit.
-fn uncleanExit() error{UncleanExit} {
- std.debug.lockStdErr();
- process.exit(1);
-}
-
const Color = std.zig.Color;
-const Summary = enum { all, new, failures, none };
+const ErrorStyle = enum {
+ verbose,
+ minimal,
+ verbose_clear,
+ minimal_clear,
+ fn verboseContext(s: ErrorStyle) bool {
+ return switch (s) {
+ .verbose, .verbose_clear => true,
+ .minimal, .minimal_clear => false,
+ };
+ }
+ fn clearOnUpdate(s: ErrorStyle) bool {
+ return switch (s) {
+ .verbose, .minimal => false,
+ .verbose_clear, .minimal_clear => true,
+ };
+ }
+};
+const MultilineErrors = enum { indent, newline, none };
+const Summary = enum { all, new, failures, line, none };
fn get_tty_conf(color: Color, stderr: File) std.io.tty.Config {
return switch (color) {
diff --git a/lib/compiler/test_runner.zig b/lib/compiler/test_runner.zig
index e618f72d2ff6..902757835a8b 100644
--- a/lib/compiler/test_runner.zig
+++ b/lib/compiler/test_runner.zig
@@ -124,30 +124,39 @@ fn mainServer() !void {
log_err_count = 0;
const index = try server.receiveBody_u32();
const test_fn = builtin.test_functions[index];
- var fail = false;
- var skip = false;
is_fuzz_test = false;
- test_fn.func() catch |err| switch (err) {
- error.SkipZigTest => skip = true,
- else => {
- fail = true;
+
+ // let the build server know we're starting the test now
+ try server.serveStringMessage(.test_started, &.{});
+
+ const TestResults = std.zig.Server.Message.TestResults;
+ const status: TestResults.Status = if (test_fn.func()) |v| s: {
+ v;
+ break :s .pass;
+ } else |err| switch (err) {
+ error.SkipZigTest => .skip,
+ else => s: {
if (@errorReturnTrace()) |trace| {
std.debug.dumpStackTrace(trace.*);
}
+ break :s .fail;
},
};
- const leak = testing.allocator_instance.deinit() == .leak;
+ const leak_count = testing.allocator_instance.detectLeaks();
+ testing.allocator_instance.deinitWithoutLeakChecks();
try server.serveTestResults(.{
.index = index,
.flags = .{
- .fail = fail,
- .skip = skip,
- .leak = leak,
+ .status = status,
.fuzz = is_fuzz_test,
.log_err_count = std.math.lossyCast(
- @FieldType(std.zig.Server.Message.TestResults.Flags, "log_err_count"),
+ @FieldType(TestResults.Flags, "log_err_count"),
log_err_count,
),
+ .leak_count = std.math.lossyCast(
+ @FieldType(TestResults.Flags, "leak_count"),
+ leak_count,
+ ),
},
});
},
diff --git a/lib/std/Build.zig b/lib/std/Build.zig
index 548f95199192..b933611b0b7f 100644
--- a/lib/std/Build.zig
+++ b/lib/std/Build.zig
@@ -1595,20 +1595,6 @@ pub fn validateUserInputDidItFail(b: *Build) bool {
return b.invalid_user_input;
}
-fn allocPrintCmd(gpa: Allocator, opt_cwd: ?[]const u8, argv: []const []const u8) error{OutOfMemory}![]u8 {
- var buf: ArrayList(u8) = .empty;
- if (opt_cwd) |cwd| try buf.print(gpa, "cd {s} && ", .{cwd});
- for (argv) |arg| {
- try buf.print(gpa, "{s} ", .{arg});
- }
- return buf.toOwnedSlice(gpa);
-}
-
-fn printCmd(ally: Allocator, cwd: ?[]const u8, argv: []const []const u8) void {
- const text = allocPrintCmd(ally, cwd, argv) catch @panic("OOM");
- std.debug.print("{s}\n", .{text});
-}
-
/// This creates the install step and adds it to the dependencies of the
/// top-level install step, using all the default options.
/// See `addInstallArtifact` for a more flexible function.
@@ -1857,14 +1843,14 @@ pub fn runAllowFail(
pub fn run(b: *Build, argv: []const []const u8) []u8 {
if (!process.can_spawn) {
std.debug.print("unable to spawn the following command: cannot spawn child process\n{s}\n", .{
- try allocPrintCmd(b.allocator, null, argv),
+ try Step.allocPrintCmd(b.allocator, null, argv),
});
process.exit(1);
}
var code: u8 = undefined;
return b.runAllowFail(argv, &code, .Inherit) catch |err| {
- const printed_cmd = allocPrintCmd(b.allocator, null, argv) catch @panic("OOM");
+ const printed_cmd = Step.allocPrintCmd(b.allocator, null, argv) catch @panic("OOM");
std.debug.print("unable to spawn the following command: {s}\n{s}\n", .{
@errorName(err), printed_cmd,
});
diff --git a/lib/std/Build/Fuzz.zig b/lib/std/Build/Fuzz.zig
index bc10f7907a2e..51883bee1fce 100644
--- a/lib/std/Build/Fuzz.zig
+++ b/lib/std/Build/Fuzz.zig
@@ -151,7 +151,7 @@ fn rebuildTestsWorkerRunFallible(run: *Step.Run, gpa: Allocator, ttyconf: std.io
var buf: [256]u8 = undefined;
const w = std.debug.lockStderrWriter(&buf);
defer std.debug.unlockStderrWriter();
- build_runner.printErrorMessages(gpa, &compile.step, .{ .ttyconf = ttyconf }, w, false) catch {};
+ build_runner.printErrorMessages(gpa, &compile.step, .{ .ttyconf = ttyconf }, w, .verbose, .indent) catch {};
}
const rebuilt_bin_path = result catch |err| switch (err) {
@@ -177,7 +177,7 @@ fn fuzzWorkerRun(
var buf: [256]u8 = undefined;
const w = std.debug.lockStderrWriter(&buf);
defer std.debug.unlockStderrWriter();
- build_runner.printErrorMessages(gpa, &run.step, .{ .ttyconf = fuzz.ws.ttyconf }, w, false) catch {};
+ build_runner.printErrorMessages(gpa, &run.step, .{ .ttyconf = fuzz.ws.ttyconf }, w, .verbose, .indent) catch {};
return;
},
else => {
diff --git a/lib/std/Build/Step.zig b/lib/std/Build/Step.zig
index 3002db628f4b..98710acdd4dc 100644
--- a/lib/std/Build/Step.zig
+++ b/lib/std/Build/Step.zig
@@ -56,6 +56,9 @@ result_cached: bool,
result_duration_ns: ?u64,
/// 0 means unavailable or not reported.
result_peak_rss: usize,
+/// If the step is failed and this field is populated, this is the command which failed.
+/// This field may be populated even if the step succeeded.
+result_failed_command: ?[]const u8,
test_results: TestResults,
/// The return address associated with creation of this step that can be useful
@@ -63,18 +66,43 @@ test_results: TestResults,
debug_stack_trace: []usize,
pub const TestResults = struct {
- fail_count: u32 = 0,
+ /// The total number of tests in the step. Every test has a "status" from the following:
+ /// * passed
+ /// * skipped
+ /// * failed cleanly
+ /// * crashed
+ /// * timed out
+ test_count: u32 = 0,
+
+ /// The number of tests which were skipped (`error.SkipZigTest`).
skip_count: u32 = 0,
+ /// The number of tests which failed cleanly.
+ fail_count: u32 = 0,
+ /// The number of tests which terminated unexpectedly, i.e. crashed.
+ crash_count: u32 = 0,
+ /// The number of tests which timed out.
+ timeout_count: u32 = 0,
+
+ /// The number of detected memory leaks. The associated test may still have passed; indeed, *all*
+ /// individual tests may have passed. However, the step as a whole fails if any test has leaks.
leak_count: u32 = 0,
+ /// The number of detected error logs. The associated test may still have passed; indeed, *all*
+ /// individual tests may have passed. However, the step as a whole fails if any test logs errors.
log_err_count: u32 = 0,
- test_count: u32 = 0,
pub fn isSuccess(tr: TestResults) bool {
- return tr.fail_count == 0 and tr.leak_count == 0 and tr.log_err_count == 0;
+ // all steps are success or skip
+ return tr.fail_count == 0 and
+ tr.crash_count == 0 and
+ tr.timeout_count == 0 and
+ // no (otherwise successful) step leaked memory or logged errors
+ tr.leak_count == 0 and
+ tr.log_err_count == 0;
}
+ /// Computes the number of tests which passed from the other values.
pub fn passCount(tr: TestResults) u32 {
- return tr.test_count - tr.fail_count - tr.skip_count;
+ return tr.test_count - tr.skip_count - tr.fail_count - tr.crash_count - tr.timeout_count;
}
};
@@ -88,6 +116,8 @@ pub const MakeOptions = struct {
// it currently breaks because `std.net.Address` doesn't work there. Work around for now.
.wasm32 => void,
},
+ /// If set, this is a timeout to enforce on all individual unit tests, in nanoseconds.
+ unit_test_timeout_ns: ?u64,
/// Not to be confused with `Build.allocator`, which is an alias of `Build.graph.arena`.
gpa: Allocator,
};
@@ -237,6 +267,7 @@ pub fn init(options: StepOptions) Step {
.result_cached = false,
.result_duration_ns = null,
.result_peak_rss = 0,
+ .result_failed_command = null,
.test_results = .{},
};
}
@@ -250,6 +281,7 @@ pub fn make(s: *Step, options: MakeOptions) error{ MakeFailed, MakeSkipped }!voi
var timer: ?std.time.Timer = t: {
if (!s.owner.graph.time_report) break :t null;
if (s.id == .compile) break :t null;
+ if (s.id == .run and s.cast(Run).?.stdio == .zig_test) break :t null;
break :t std.time.Timer.start() catch @panic("--time-report not supported on this host");
};
const make_result = s.makeFn(s, options);
@@ -336,20 +368,20 @@ pub fn dump(step: *Step, w: *std.Io.Writer, tty_config: std.Io.tty.Config) void
}
}
-pub fn evalChildProcess(s: *Step, argv: []const []const u8) ![]u8 {
- const run_result = try captureChildProcess(s, std.Progress.Node.none, argv);
- try handleChildProcessTerm(s, run_result.term, null, argv);
- return run_result.stdout;
-}
-
+/// Populates `s.result_failed_command`.
pub fn captureChildProcess(
s: *Step,
+ gpa: Allocator,
progress_node: std.Progress.Node,
argv: []const []const u8,
) !std.process.Child.RunResult {
const arena = s.owner.allocator;
- try handleChildProcUnsupported(s, null, argv);
+ // If an error occurs, it's happened in this command:
+ assert(s.result_failed_command == null);
+ s.result_failed_command = try allocPrintCmd(gpa, null, argv);
+
+ try handleChildProcUnsupported(s);
try handleVerbose(s.owner, null, argv);
const result = std.process.Child.run(.{
@@ -386,6 +418,7 @@ pub const ZigProcess = struct {
/// Assumes that argv contains `--listen=-` and that the process being spawned
/// is the zig compiler - the same version that compiled the build runner.
+/// Populates `s.result_failed_command`.
pub fn evalZigProcess(
s: *Step,
argv: []const []const u8,
@@ -394,6 +427,10 @@ pub fn evalZigProcess(
web_server: ?*Build.WebServer,
gpa: Allocator,
) !?Path {
+ // If an error occurs, it's happened in this command:
+ assert(s.result_failed_command == null);
+ s.result_failed_command = try allocPrintCmd(gpa, null, argv);
+
if (s.getZigProcess()) |zp| update: {
assert(watch);
if (std.Progress.have_ipc) if (zp.progress_ipc_fd) |fd| prog_node.setIpcFd(fd);
@@ -410,8 +447,9 @@ pub fn evalZigProcess(
else => |e| return e,
};
- if (s.result_error_bundle.errorMessageCount() > 0)
+ if (s.result_error_bundle.errorMessageCount() > 0) {
return s.fail("{d} compilation errors", .{s.result_error_bundle.errorMessageCount()});
+ }
if (s.result_error_msgs.items.len > 0 and result == null) {
// Crash detected.
@@ -420,7 +458,7 @@ pub fn evalZigProcess(
};
s.result_peak_rss = zp.child.resource_usage_statistics.getMaxRss() orelse 0;
s.clearZigProcess(gpa);
- try handleChildProcessTerm(s, term, null, argv);
+ try handleChildProcessTerm(s, term);
return error.MakeFailed;
}
@@ -430,7 +468,7 @@ pub fn evalZigProcess(
const b = s.owner;
const arena = b.allocator;
- try handleChildProcUnsupported(s, null, argv);
+ try handleChildProcUnsupported(s);
try handleVerbose(s.owner, null, argv);
var child = std.process.Child.init(argv, arena);
@@ -484,16 +522,11 @@ pub fn evalZigProcess(
else => {},
};
- try handleChildProcessTerm(s, term, null, argv);
+ try handleChildProcessTerm(s, term);
}
- // This is intentionally printed for failure on the first build but not for
- // subsequent rebuilds.
if (s.result_error_bundle.errorMessageCount() > 0) {
- return s.fail("the following command failed with {d} compilation errors:\n{s}", .{
- s.result_error_bundle.errorMessageCount(),
- try allocPrintCmd(arena, null, argv),
- });
+ return s.fail("{d} compilation errors", .{s.result_error_bundle.errorMessageCount()});
}
return result;
@@ -541,7 +574,6 @@ fn zigProcessUpdate(s: *Step, zp: *ZigProcess, watch: bool, web_server: ?*Build.
const header = stdout.takeStruct(Header, .little) catch unreachable;
while (stdout.buffered().len < header.bytes_len) if (!try zp.poller.poll()) break :poll;
const body = stdout.take(header.bytes_len) catch unreachable;
-
switch (header.tag) {
.zig_version => {
if (!std.mem.eql(u8, builtin.zig_version_string, body)) {
@@ -712,54 +744,38 @@ pub fn handleVerbose2(
}
}
-pub inline fn handleChildProcUnsupported(
- s: *Step,
- opt_cwd: ?[]const u8,
- argv: []const []const u8,
-) error{ OutOfMemory, MakeFailed }!void {
+/// Asserts that the caller has already populated `s.result_failed_command`.
+pub inline fn handleChildProcUnsupported(s: *Step) error{ OutOfMemory, MakeFailed }!void {
if (!std.process.can_spawn) {
- return s.fail(
- "unable to execute the following command: host cannot spawn child processes\n{s}",
- .{try allocPrintCmd(s.owner.allocator, opt_cwd, argv)},
- );
+ return s.fail("unable to spawn process: host cannot spawn child processes", .{});
}
}
-pub fn handleChildProcessTerm(
- s: *Step,
- term: std.process.Child.Term,
- opt_cwd: ?[]const u8,
- argv: []const []const u8,
-) error{ MakeFailed, OutOfMemory }!void {
- const arena = s.owner.allocator;
+/// Asserts that the caller has already populated `s.result_failed_command`.
+pub fn handleChildProcessTerm(s: *Step, term: std.process.Child.Term) error{ MakeFailed, OutOfMemory }!void {
+ assert(s.result_failed_command != null);
switch (term) {
.Exited => |code| {
if (code != 0) {
- return s.fail(
- "the following command exited with error code {d}:\n{s}",
- .{ code, try allocPrintCmd(arena, opt_cwd, argv) },
- );
+ return s.fail("process exited with error code {d}", .{code});
}
},
.Signal, .Stopped, .Unknown => {
- return s.fail(
- "the following command terminated unexpectedly:\n{s}",
- .{try allocPrintCmd(arena, opt_cwd, argv)},
- );
+ return s.fail("process terminated unexpectedly", .{});
},
}
}
pub fn allocPrintCmd(
- arena: Allocator,
+ gpa: Allocator,
opt_cwd: ?[]const u8,
argv: []const []const u8,
) Allocator.Error![]u8 {
- return allocPrintCmd2(arena, opt_cwd, null, argv);
+ return allocPrintCmd2(gpa, opt_cwd, null, argv);
}
pub fn allocPrintCmd2(
- arena: Allocator,
+ gpa: Allocator,
opt_cwd: ?[]const u8,
opt_env: ?*const std.process.EnvMap,
argv: []const []const u8,
@@ -799,11 +815,13 @@ pub fn allocPrintCmd2(
}
};
- var aw: std.Io.Writer.Allocating = .init(arena);
+ var aw: std.Io.Writer.Allocating = .init(gpa);
+ defer aw.deinit();
const writer = &aw.writer;
if (opt_cwd) |cwd| writer.print("cd {s} && ", .{cwd}) catch return error.OutOfMemory;
if (opt_env) |env| {
- const process_env_map = std.process.getEnvMap(arena) catch std.process.EnvMap.init(arena);
+ var process_env_map = std.process.getEnvMap(gpa) catch std.process.EnvMap.init(gpa);
+ defer process_env_map.deinit();
var it = env.iterator();
while (it.next()) |entry| {
const key = entry.key_ptr.*;
@@ -989,11 +1007,14 @@ fn addWatchInputFromPath(step: *Step, path: Build.Cache.Path, basename: []const
pub fn reset(step: *Step, gpa: Allocator) void {
assert(step.state == .precheck_done);
+ if (step.result_failed_command) |cmd| gpa.free(cmd);
+
step.result_error_msgs.clearRetainingCapacity();
step.result_stderr = "";
step.result_cached = false;
step.result_duration_ns = null;
step.result_peak_rss = 0;
+ step.result_failed_command = null;
step.test_results = .{};
step.result_error_bundle.deinit(gpa);
diff --git a/lib/std/Build/Step/Fmt.zig b/lib/std/Build/Step/Fmt.zig
index a364dfa6f478..5adebdc454ff 100644
--- a/lib/std/Build/Step/Fmt.zig
+++ b/lib/std/Build/Step/Fmt.zig
@@ -67,7 +67,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
argv.appendAssumeCapacity(b.pathFromRoot(p));
}
- const run_result = try step.captureChildProcess(prog_node, argv.items);
+ const run_result = try step.captureChildProcess(options.gpa, prog_node, argv.items);
if (fmt.check) switch (run_result.term) {
.Exited => |code| if (code != 0 and run_result.stdout.len != 0) {
var it = std.mem.tokenizeScalar(u8, run_result.stdout, '\n');
@@ -77,5 +77,5 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
},
else => {},
};
- try step.handleChildProcessTerm(run_result.term, null, argv.items);
+ try step.handleChildProcessTerm(run_result.term);
}
diff --git a/lib/std/Build/Step/Run.zig b/lib/std/Build/Step/Run.zig
index 78cf08dd4390..19fd4a166a6a 100644
--- a/lib/std/Build/Step/Run.zig
+++ b/lib/std/Build/Step/Run.zig
@@ -555,6 +555,7 @@ pub fn addCheck(run: *Run, new_check: StdIo.Check) void {
pub fn captureStdErr(run: *Run) std.Build.LazyPath {
assert(run.stdio != .inherit);
+ assert(run.stdio != .zig_test);
if (run.captured_stderr) |output| return .{ .generated = .{ .file = &output.generated_file } };
@@ -570,6 +571,7 @@ pub fn captureStdErr(run: *Run) std.Build.LazyPath {
pub fn captureStdOut(run: *Run) std.Build.LazyPath {
assert(run.stdio != .inherit);
+ assert(run.stdio != .zig_test);
if (run.captured_stdout) |output| return .{ .generated = .{ .file = &output.generated_file } };
@@ -673,7 +675,6 @@ const IndexedOutput = struct {
output: *Output,
};
fn make(step: *Step, options: Step.MakeOptions) !void {
- const prog_node = options.progress_node;
const b = step.owner;
const arena = b.allocator;
const run: *Run = @fieldParentPtr("step", step);
@@ -850,7 +851,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
b.fmt("{s}{s}", .{ placeholder.output.prefix, arg_output_path });
}
- try runCommand(run, argv_list.items, has_side_effects, output_dir_path, prog_node, null);
+ try runCommand(run, argv_list.items, has_side_effects, output_dir_path, options, null);
if (!has_side_effects) try step.writeManifestAndWatch(&man);
return;
};
@@ -883,7 +884,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
});
}
- try runCommand(run, argv_list.items, has_side_effects, tmp_dir_path, prog_node, null);
+ try runCommand(run, argv_list.items, has_side_effects, tmp_dir_path, options, null);
const dep_file_dir = std.fs.cwd();
const dep_file_basename = dep_output_file.generated_file.getPath2(b, step);
@@ -982,7 +983,14 @@ pub fn rerunInFuzzMode(
const has_side_effects = false;
const rand_int = std.crypto.random.int(u64);
const tmp_dir_path = "tmp" ++ fs.path.sep_str ++ std.fmt.hex(rand_int);
- try runCommand(run, argv_list.items, has_side_effects, tmp_dir_path, prog_node, .{
+ try runCommand(run, argv_list.items, has_side_effects, tmp_dir_path, .{
+ .progress_node = prog_node,
+ .thread_pool = undefined, // not used by `runCommand`
+ .watch = undefined, // not used by `runCommand`
+ .web_server = null, // only needed for time reports
+ .unit_test_timeout_ns = null, // don't time out fuzz tests for now
+ .gpa = undefined, // not used by `runCommand`
+ }, .{
.unit_test_index = unit_test_index,
.fuzz = fuzz,
});
@@ -1063,16 +1071,17 @@ fn runCommand(
argv: []const []const u8,
has_side_effects: bool,
output_dir_path: []const u8,
- prog_node: std.Progress.Node,
+ options: Step.MakeOptions,
fuzz_context: ?FuzzContext,
) !void {
const step = &run.step;
const b = step.owner;
const arena = b.allocator;
+ const gpa = options.gpa;
const cwd: ?[]const u8 = if (run.cwd) |lazy_cwd| lazy_cwd.getPath2(b, step) else null;
- try step.handleChildProcUnsupported(cwd, argv);
+ try step.handleChildProcUnsupported();
try Step.handleVerbose2(step.owner, cwd, run.env_map, argv);
const allow_skip = switch (run.stdio) {
@@ -1085,7 +1094,7 @@ fn runCommand(
var env_map = run.env_map orelse &b.graph.env_map;
- const result = spawnChildAndCollect(run, argv, env_map, has_side_effects, prog_node, fuzz_context) catch |err| term: {
+ const opt_generic_result = spawnChildAndCollect(run, argv, env_map, has_side_effects, options, fuzz_context) catch |err| term: {
// InvalidExe: cpu arch mismatch
// FileNotFound: can happen with a wrong dynamic linker path
if (err == error.InvalidExe or err == error.FileNotFound) interpret: {
@@ -1222,32 +1231,33 @@ fn runCommand(
run.addPathForDynLibs(exe);
}
+ gpa.free(step.result_failed_command.?);
+ step.result_failed_command = null;
try Step.handleVerbose2(step.owner, cwd, run.env_map, interp_argv.items);
- break :term spawnChildAndCollect(run, interp_argv.items, env_map, has_side_effects, prog_node, fuzz_context) catch |e| {
+ break :term spawnChildAndCollect(run, interp_argv.items, env_map, has_side_effects, options, fuzz_context) catch |e| {
if (!run.failing_to_execute_foreign_is_an_error) return error.MakeSkipped;
-
+ if (e == error.MakeFailed) return error.MakeFailed; // error already reported
return step.fail("unable to spawn interpreter {s}: {s}", .{
interp_argv.items[0], @errorName(e),
});
};
}
+ if (err == error.MakeFailed) return error.MakeFailed; // error already reported
return step.fail("failed to spawn and capture stdio from {s}: {s}", .{ argv[0], @errorName(err) });
};
- step.result_duration_ns = result.elapsed_ns;
- step.result_peak_rss = result.peak_rss;
- step.test_results = result.stdio.test_results;
- if (result.stdio.test_metadata) |tm|
- run.cached_test_metadata = tm.toCachedTestMetadata();
-
- const final_argv = if (interp_argv.items.len == 0) argv else interp_argv.items;
-
- if (fuzz_context != null) {
- try step.handleChildProcessTerm(result.term, cwd, final_argv);
+ const generic_result = opt_generic_result orelse {
+ assert(run.stdio == .zig_test);
+ // Specific errors have already been reported, and test results are populated. All we need
+ // to do is report step failure if any test failed.
+ if (!step.test_results.isSuccess()) return error.MakeFailed;
return;
- }
+ };
+
+ assert(fuzz_context == null);
+ assert(run.stdio != .zig_test);
// Capture stdout and stderr to GeneratedFile objects.
const Stream = struct {
@@ -1257,11 +1267,11 @@ fn runCommand(
for ([_]Stream{
.{
.captured = run.captured_stdout,
- .bytes = result.stdio.stdout,
+ .bytes = generic_result.stdout,
},
.{
.captured = run.captured_stderr,
- .bytes = result.stdio.stderr,
+ .bytes = generic_result.stderr,
},
}) |stream| {
if (stream.captured) |output| {
@@ -1285,133 +1295,94 @@ fn runCommand(
}
switch (run.stdio) {
+ .zig_test => unreachable,
.check => |checks| for (checks.items) |check| switch (check) {
.expect_stderr_exact => |expected_bytes| {
- if (!mem.eql(u8, expected_bytes, result.stdio.stderr.?)) {
+ if (!mem.eql(u8, expected_bytes, generic_result.stderr.?)) {
return step.fail(
- \\
\\========= expected this stderr: =========
\\{s}
\\========= but found: ====================
\\{s}
- \\========= from the following command: ===
- \\{s}
, .{
expected_bytes,
- result.stdio.stderr.?,
- try Step.allocPrintCmd(arena, cwd, final_argv),
+ generic_result.stderr.?,
});
}
},
.expect_stderr_match => |match| {
- if (mem.indexOf(u8, result.stdio.stderr.?, match) == null) {
+ if (mem.indexOf(u8, generic_result.stderr.?, match) == null) {
return step.fail(
- \\
\\========= expected to find in stderr: =========
\\{s}
\\========= but stderr does not contain it: =====
\\{s}
- \\========= from the following command: =========
- \\{s}
, .{
match,
- result.stdio.stderr.?,
- try Step.allocPrintCmd(arena, cwd, final_argv),
+ generic_result.stderr.?,
});
}
},
.expect_stdout_exact => |expected_bytes| {
- if (!mem.eql(u8, expected_bytes, result.stdio.stdout.?)) {
+ if (!mem.eql(u8, expected_bytes, generic_result.stdout.?)) {
return step.fail(
- \\
\\========= expected this stdout: =========
\\{s}
\\========= but found: ====================
\\{s}
- \\========= from the following command: ===
- \\{s}
, .{
expected_bytes,
- result.stdio.stdout.?,
- try Step.allocPrintCmd(arena, cwd, final_argv),
+ generic_result.stdout.?,
});
}
},
.expect_stdout_match => |match| {
- if (mem.indexOf(u8, result.stdio.stdout.?, match) == null) {
+ if (mem.indexOf(u8, generic_result.stdout.?, match) == null) {
return step.fail(
- \\
\\========= expected to find in stdout: =========
\\{s}
\\========= but stdout does not contain it: =====
\\{s}
- \\========= from the following command: =========
- \\{s}
, .{
match,
- result.stdio.stdout.?,
- try Step.allocPrintCmd(arena, cwd, final_argv),
+ generic_result.stdout.?,
});
}
},
.expect_term => |expected_term| {
- if (!termMatches(expected_term, result.term)) {
- return step.fail("the following command {f} (expected {f}):\n{s}", .{
- fmtTerm(result.term),
+ if (!termMatches(expected_term, generic_result.term)) {
+ return step.fail("process {f} (expected {f})", .{
+ fmtTerm(generic_result.term),
fmtTerm(expected_term),
- try Step.allocPrintCmd(arena, cwd, final_argv),
});
}
},
},
- .zig_test => {
- const prefix: []const u8 = p: {
- if (result.stdio.test_metadata) |tm| {
- if (tm.next_index > 0 and tm.next_index <= tm.names.len) {
- const name = tm.testName(tm.next_index - 1);
- break :p b.fmt("while executing test '{s}', ", .{name});
- }
- }
- break :p "";
- };
- const expected_term: std.process.Child.Term = .{ .Exited = 0 };
- if (!termMatches(expected_term, result.term)) {
- return step.fail("{s}the following command {f} (expected {f}):\n{s}", .{
- prefix,
- fmtTerm(result.term),
- fmtTerm(expected_term),
- try Step.allocPrintCmd(arena, cwd, final_argv),
- });
- }
- if (!result.stdio.test_results.isSuccess()) {
- return step.fail(
- "{s}the following test command failed:\n{s}",
- .{ prefix, try Step.allocPrintCmd(arena, cwd, final_argv) },
- );
- }
- },
else => {
- // On failure, print stderr if captured.
- const bad_exit = switch (result.term) {
+ // On failure, report captured stderr like normal standard error output.
+ const bad_exit = switch (generic_result.term) {
.Exited => |code| code != 0,
.Signal, .Stopped, .Unknown => true,
};
+ if (bad_exit) {
+ if (generic_result.stderr) |bytes| {
+ run.step.result_stderr = bytes;
+ }
+ }
- if (bad_exit) if (result.stdio.stderr) |err| {
- try step.addError("stderr:\n{s}", .{err});
- };
-
- try step.handleChildProcessTerm(result.term, cwd, final_argv);
+ try step.handleChildProcessTerm(generic_result.term);
},
}
}
-const ChildProcResult = struct {
+const EvalZigTestResult = struct {
+ test_results: Step.TestResults,
+ test_metadata: ?TestMetadata,
+};
+const EvalGenericResult = struct {
term: std.process.Child.Term,
- elapsed_ns: u64,
- peak_rss: usize,
-
- stdio: StdIoResult,
+ stdout: ?[]const u8,
+ stderr: ?[]const u8,
};
fn spawnChildAndCollect(
@@ -1419,9 +1390,9 @@ fn spawnChildAndCollect(
argv: []const []const u8,
env_map: *EnvMap,
has_side_effects: bool,
- prog_node: std.Progress.Node,
+ options: Step.MakeOptions,
fuzz_context: ?FuzzContext,
-) !ChildProcResult {
+) !?EvalGenericResult {
const b = run.step.owner;
const arena = b.allocator;
@@ -1462,180 +1433,383 @@ fn spawnChildAndCollect(
child.stdin_behavior = .Pipe;
}
- const inherit = child.stdout_behavior == .Inherit or child.stderr_behavior == .Inherit;
-
- if (run.stdio != .zig_test and !run.disable_zig_progress and !inherit) {
- child.progress_node = prog_node;
- }
+ // If an error occurs, it's caused by this command:
+ assert(run.step.result_failed_command == null);
+ run.step.result_failed_command = try Step.allocPrintCmd(options.gpa, child.cwd, argv);
- const term, const result, const elapsed_ns = t: {
+ if (run.stdio == .zig_test) {
+ var timer = try std.time.Timer.start();
+ const res = try evalZigTest(run, &child, options, fuzz_context);
+ run.step.result_duration_ns = timer.read();
+ run.step.test_results = res.test_results;
+ if (res.test_metadata) |tm| {
+ run.cached_test_metadata = tm.toCachedTestMetadata();
+ if (options.web_server) |ws| ws.updateTimeReportRunTest(
+ run,
+ &run.cached_test_metadata.?,
+ tm.ns_per_test,
+ );
+ }
+ return null;
+ } else {
+ const inherit = child.stdout_behavior == .Inherit or child.stderr_behavior == .Inherit;
+ if (!run.disable_zig_progress and !inherit) {
+ child.progress_node = options.progress_node;
+ }
if (inherit) std.debug.lockStdErr();
defer if (inherit) std.debug.unlockStdErr();
+ var timer = try std.time.Timer.start();
+ const res = try evalGeneric(run, &child);
+ run.step.result_duration_ns = timer.read();
+ return .{ .term = res.term, .stdout = res.stdout, .stderr = res.stderr };
+ }
+}
+
+const StdioPollEnum = enum { stdout, stderr };
+
+fn evalZigTest(
+ run: *Run,
+ child: *std.process.Child,
+ options: Step.MakeOptions,
+ fuzz_context: ?FuzzContext,
+) !EvalZigTestResult {
+ const gpa = run.step.owner.allocator;
+ const arena = run.step.owner.allocator;
+
+ // We will update this every time a child runs.
+ run.step.result_peak_rss = 0;
+
+ var result: EvalZigTestResult = .{
+ .test_results = .{
+ .test_count = 0,
+ .skip_count = 0,
+ .fail_count = 0,
+ .crash_count = 0,
+ .timeout_count = 0,
+ .leak_count = 0,
+ .log_err_count = 0,
+ },
+ .test_metadata = null,
+ };
+ while (true) {
try child.spawn();
- errdefer {
+ var poller = std.Io.poll(gpa, StdioPollEnum, .{
+ .stdout = child.stdout.?,
+ .stderr = child.stderr.?,
+ });
+ var child_killed = false;
+ defer if (!child_killed) {
_ = child.kill() catch {};
- }
+ poller.deinit();
+ run.step.result_peak_rss = @max(
+ run.step.result_peak_rss,
+ child.resource_usage_statistics.getMaxRss() orelse 0,
+ );
+ };
- // We need to report `error.InvalidExe` *now* if applicable.
try child.waitForSpawn();
- var timer = try std.time.Timer.start();
+ switch (try pollZigTest(
+ run,
+ child,
+ options,
+ fuzz_context,
+ &poller,
+ &result.test_metadata,
+ &result.test_results,
+ )) {
+ .write_failed => |err| {
+ // The runner unexpectedly closed a stdio pipe, which means a crash. Make sure we've captured
+ // all available stderr to make our error output as useful as possible.
+ while (try poller.poll()) {}
+ run.step.result_stderr = try arena.dupe(u8, poller.reader(.stderr).buffered());
+
+ // Clean up everything and wait for the child to exit.
+ child.stdin.?.close();
+ child.stdin = null;
+ poller.deinit();
+ child_killed = true;
+ const term = try child.wait();
+ run.step.result_peak_rss = @max(
+ run.step.result_peak_rss,
+ child.resource_usage_statistics.getMaxRss() orelse 0,
+ );
- const result = if (run.stdio == .zig_test)
- try evalZigTest(run, &child, prog_node, fuzz_context)
- else
- try evalGeneric(run, &child);
+ try run.step.addError("unable to write stdin ({t}); test process unexpectedly {f}", .{ err, fmtTerm(term) });
+ return result;
+ },
+ .no_poll => |no_poll| {
+ // This might be a success (we requested exit and the child dutifully closed stdout) or
+ // a crash of some kind. Either way, the child will terminate by itself -- wait for it.
+ const stderr_owned = try arena.dupe(u8, poller.reader(.stderr).buffered());
+ poller.reader(.stderr).tossBuffered();
+
+ // Clean up everything and wait for the child to exit.
+ child.stdin.?.close();
+ child.stdin = null;
+ poller.deinit();
+ child_killed = true;
+ const term = try child.wait();
+ run.step.result_peak_rss = @max(
+ run.step.result_peak_rss,
+ child.resource_usage_statistics.getMaxRss() orelse 0,
+ );
- break :t .{ try child.wait(), result, timer.read() };
- };
+ if (no_poll.active_test_index) |test_index| {
+ // A test was running, so this is definitely a crash. Report it against that
+ // test, and continue to the next test.
+ result.test_metadata.?.ns_per_test[test_index] = no_poll.ns_elapsed;
+ result.test_results.crash_count += 1;
+ try run.step.addError("'{s}' {f}{s}{s}", .{
+ result.test_metadata.?.testName(test_index),
+ fmtTerm(term),
+ if (stderr_owned.len != 0) " with stderr:\n" else "",
+ std.mem.trim(u8, stderr_owned, "\n"),
+ });
+ continue;
+ }
- return .{
- .stdio = result,
- .term = term,
- .elapsed_ns = elapsed_ns,
- .peak_rss = child.resource_usage_statistics.getMaxRss() orelse 0,
- };
+ // Report an error if the child terminated uncleanly or if we were still trying to run more tests.
+ run.step.result_stderr = stderr_owned;
+ const tests_done = result.test_metadata != null and result.test_metadata.?.next_index == std.math.maxInt(u32);
+ if (!tests_done or !termMatches(.{ .Exited = 0 }, term)) {
+ try run.step.addError("test process unexpectedly {f}", .{fmtTerm(term)});
+ }
+ return result;
+ },
+ .timeout => |timeout| {
+ const stderr = poller.reader(.stderr).buffered();
+ poller.reader(.stderr).tossBuffered();
+ if (timeout.active_test_index) |test_index| {
+ // A test was running. Report the timeout against that test, and continue on to
+ // the next test.
+ result.test_metadata.?.ns_per_test[test_index] = timeout.ns_elapsed;
+ result.test_results.timeout_count += 1;
+ try run.step.addError("'{s}' timed out after {D}{s}{s}", .{
+ result.test_metadata.?.testName(test_index),
+ timeout.ns_elapsed,
+ if (stderr.len != 0) " with stderr:\n" else "",
+ std.mem.trim(u8, stderr, "\n"),
+ });
+ continue;
+ }
+ // Just log an error and let the child be killed.
+ run.step.result_stderr = try arena.dupe(u8, stderr);
+ return run.step.fail("test runner failed to respond for {D}", .{timeout.ns_elapsed});
+ },
+ }
+ comptime unreachable;
+ }
}
-const StdIoResult = struct {
- stdout: ?[]const u8,
- stderr: ?[]const u8,
- test_results: Step.TestResults,
- test_metadata: ?TestMetadata,
-};
-
-fn evalZigTest(
+/// Polls stdout of a Zig test process until a termination condition is reached:
+/// * A write fails, indicating the child unexpectedly closed stdin
+/// * A test (or a response from the test runner) times out
+/// * `poll` fails, indicating the child closed stdout and stderr
+fn pollZigTest(
run: *Run,
child: *std.process.Child,
- prog_node: std.Progress.Node,
+ options: Step.MakeOptions,
fuzz_context: ?FuzzContext,
-) !StdIoResult {
+ poller: *std.Io.Poller(StdioPollEnum),
+ opt_metadata: *?TestMetadata,
+ results: *Step.TestResults,
+) !union(enum) {
+ write_failed: anyerror,
+ no_poll: struct {
+ active_test_index: ?u32,
+ ns_elapsed: u64,
+ },
+ timeout: struct {
+ active_test_index: ?u32,
+ ns_elapsed: u64,
+ },
+} {
const gpa = run.step.owner.allocator;
const arena = run.step.owner.allocator;
- var poller = std.Io.poll(gpa, enum { stdout, stderr }, .{
- .stdout = child.stdout.?,
- .stderr = child.stderr.?,
- });
- defer poller.deinit();
-
- // If this is `true`, we avoid ever entering the polling loop below, because the stdin pipe has
- // somehow already closed; instead, we go straight to capturing stderr in case it has anything
- // useful.
- const first_write_failed = if (fuzz_context) |fuzz| failed: {
- sendRunTestMessage(child.stdin.?, .start_fuzzing, fuzz.unit_test_index) catch |err| {
- try run.step.addError("unable to write stdin: {s}", .{@errorName(err)});
- break :failed true;
- };
- break :failed false;
- } else failed: {
+ var sub_prog_node: ?std.Progress.Node = null;
+ defer if (sub_prog_node) |n| n.end();
+
+ if (fuzz_context) |fuzz| {
+ assert(opt_metadata.* == null); // fuzz processes are never restarted
+ sendRunTestMessage(child.stdin.?, .start_fuzzing, fuzz.unit_test_index) catch |err| return .{ .write_failed = err };
+ } else if (opt_metadata.*) |*md| {
+ // Previous unit test process died or was killed; we're continuing where it left off
+ requestNextTest(child.stdin.?, md, &sub_prog_node) catch |err| return .{ .write_failed = err };
+ } else {
+ // Running unit tests normally
run.fuzz_tests.clearRetainingCapacity();
- sendMessage(child.stdin.?, .query_test_metadata) catch |err| {
- try run.step.addError("unable to write stdin: {s}", .{@errorName(err)});
- break :failed true;
- };
- break :failed false;
- };
+ sendMessage(child.stdin.?, .query_test_metadata) catch |err| return .{ .write_failed = err };
+ }
- var fail_count: u32 = 0;
- var skip_count: u32 = 0;
- var leak_count: u32 = 0;
- var test_count: u32 = 0;
- var log_err_count: u32 = 0;
+ var active_test_index: ?u32 = null;
+
+ // `null` means this host does not support `std.time.Timer`. This timer is `reset()` whenever we
+ // change `active_test_index`, i.e. whenever a test starts or finishes.
+ var timer: ?std.time.Timer = std.time.Timer.start() catch null;
- var metadata: ?TestMetadata = null;
var coverage_id: ?u64 = null;
- var sub_prog_node: ?std.Progress.Node = null;
- defer if (sub_prog_node) |n| n.end();
+ // This timeout is used when we're waiting on the test runner itself rather than a user-specified
+ // test. For instance, if the test runner leaves this much time between us requesting a test to
+ // start and it acknowledging the test starting, we terminate the child and raise an error. This
+ // *should* never happen, but could in theory be caused by some very unlucky IB in a test.
+ const response_timeout_ns = 30 * std.time.ns_per_s;
const stdout = poller.reader(.stdout);
const stderr = poller.reader(.stderr);
- const any_write_failed = first_write_failed or poll: while (true) {
+
+ while (true) {
const Header = std.zig.Server.Message.Header;
- while (stdout.buffered().len < @sizeOf(Header)) if (!try poller.poll()) break :poll false;
+
+ // This block is exited when `stdout` contains enough bytes for a `Header`.
+ header_ready: {
+ if (stdout.buffered().len >= @sizeOf(Header)) {
+ // We already have one, no need to poll!
+ break :header_ready;
+ }
+
+ // Always `null` if `timer` is `null`.
+ const opt_timeout_ns: ?u64 = ns: {
+ if (timer == null) break :ns null;
+ if (active_test_index == null) break :ns response_timeout_ns;
+ break :ns options.unit_test_timeout_ns;
+ };
+
+ if (opt_timeout_ns) |timeout_ns| {
+ const remaining_ns = timeout_ns -| timer.?.read();
+ if (!try poller.pollTimeout(remaining_ns)) return .{ .no_poll = .{
+ .active_test_index = active_test_index,
+ .ns_elapsed = if (timer) |*t| t.read() else 0,
+ } };
+ } else {
+ if (!try poller.poll()) return .{ .no_poll = .{
+ .active_test_index = active_test_index,
+ .ns_elapsed = if (timer) |*t| t.read() else 0,
+ } };
+ }
+
+ if (stdout.buffered().len >= @sizeOf(Header)) {
+ // There wasn't a header before, but there is one after the `poll`.
+ break :header_ready;
+ }
+
+ if (opt_timeout_ns) |timeout_ns| {
+ const cur_ns = timer.?.read();
+ if (cur_ns >= timeout_ns) return .{ .timeout = .{
+ .active_test_index = active_test_index,
+ .ns_elapsed = cur_ns,
+ } };
+ }
+ continue;
+ }
+ // There is definitely a header available now -- read it.
const header = stdout.takeStruct(Header, .little) catch unreachable;
- while (stdout.buffered().len < header.bytes_len) if (!try poller.poll()) break :poll false;
+
+ while (stdout.buffered().len < header.bytes_len) if (!try poller.poll()) return .{ .no_poll = .{
+ .active_test_index = active_test_index,
+ .ns_elapsed = if (timer) |*t| t.read() else 0,
+ } };
const body = stdout.take(header.bytes_len) catch unreachable;
switch (header.tag) {
.zig_version => {
- if (!std.mem.eql(u8, builtin.zig_version_string, body)) {
- return run.step.fail(
- "zig version mismatch build runner vs compiler: '{s}' vs '{s}'",
- .{ builtin.zig_version_string, body },
- );
- }
+ if (!std.mem.eql(u8, builtin.zig_version_string, body)) return run.step.fail(
+ "zig version mismatch build runner vs compiler: '{s}' vs '{s}'",
+ .{ builtin.zig_version_string, body },
+ );
},
.test_metadata => {
assert(fuzz_context == null);
+
+ // `metadata` would only be populated if we'd already seen a `test_metadata`, but we
+ // only request it once (and importantly, we don't re-request it if we kill and
+ // restart the test runner).
+ assert(opt_metadata.* == null);
+
const TmHdr = std.zig.Server.Message.TestMetadata;
- const tm_hdr = @as(*align(1) const TmHdr, @ptrCast(body));
- test_count = tm_hdr.tests_len;
+ const tm_hdr: *align(1) const TmHdr = @ptrCast(body);
+ results.test_count = tm_hdr.tests_len;
- const names_bytes = body[@sizeOf(TmHdr)..][0 .. test_count * @sizeOf(u32)];
- const expected_panic_msgs_bytes = body[@sizeOf(TmHdr) + names_bytes.len ..][0 .. test_count * @sizeOf(u32)];
+ const names_bytes = body[@sizeOf(TmHdr)..][0 .. results.test_count * @sizeOf(u32)];
+ const expected_panic_msgs_bytes = body[@sizeOf(TmHdr) + names_bytes.len ..][0 .. results.test_count * @sizeOf(u32)];
const string_bytes = body[@sizeOf(TmHdr) + names_bytes.len + expected_panic_msgs_bytes.len ..][0..tm_hdr.string_bytes_len];
const names = std.mem.bytesAsSlice(u32, names_bytes);
const expected_panic_msgs = std.mem.bytesAsSlice(u32, expected_panic_msgs_bytes);
+
const names_aligned = try arena.alloc(u32, names.len);
for (names_aligned, names) |*dest, src| dest.* = src;
const expected_panic_msgs_aligned = try arena.alloc(u32, expected_panic_msgs.len);
for (expected_panic_msgs_aligned, expected_panic_msgs) |*dest, src| dest.* = src;
- prog_node.setEstimatedTotalItems(names.len);
- metadata = .{
+ options.progress_node.setEstimatedTotalItems(names.len);
+ opt_metadata.* = .{
.string_bytes = try arena.dupe(u8, string_bytes),
+ .ns_per_test = try arena.alloc(u64, results.test_count),
.names = names_aligned,
.expected_panic_msgs = expected_panic_msgs_aligned,
.next_index = 0,
- .prog_node = prog_node,
+ .prog_node = options.progress_node,
};
+ @memset(opt_metadata.*.?.ns_per_test, std.math.maxInt(u64));
- requestNextTest(child.stdin.?, &metadata.?, &sub_prog_node) catch |err| {
- try run.step.addError("unable to write stdin: {s}", .{@errorName(err)});
- break :poll true;
- };
+ active_test_index = null;
+ if (timer) |*t| t.reset();
+
+ requestNextTest(child.stdin.?, &opt_metadata.*.?, &sub_prog_node) catch |err| return .{ .write_failed = err };
+ },
+ .test_started => {
+ active_test_index = opt_metadata.*.?.next_index - 1;
+ if (timer) |*t| t.reset();
},
.test_results => {
assert(fuzz_context == null);
- const md = metadata.?;
+ const md = &opt_metadata.*.?;
const TrHdr = std.zig.Server.Message.TestResults;
- const tr_hdr = @as(*align(1) const TrHdr, @ptrCast(body));
- fail_count +|= @intFromBool(tr_hdr.flags.fail);
- skip_count +|= @intFromBool(tr_hdr.flags.skip);
- leak_count +|= @intFromBool(tr_hdr.flags.leak);
- log_err_count +|= tr_hdr.flags.log_err_count;
+ const tr_hdr: *align(1) const TrHdr = @ptrCast(body);
+ assert(tr_hdr.index == active_test_index);
+
+ switch (tr_hdr.flags.status) {
+ .pass => {},
+ .skip => results.skip_count +|= 1,
+ .fail => results.fail_count +|= 1,
+ }
+ const leak_count = tr_hdr.flags.leak_count;
+ const log_err_count = tr_hdr.flags.log_err_count;
+ results.leak_count +|= leak_count;
+ results.log_err_count +|= log_err_count;
if (tr_hdr.flags.fuzz) try run.fuzz_tests.append(gpa, tr_hdr.index);
- if (tr_hdr.flags.fail or tr_hdr.flags.leak or tr_hdr.flags.log_err_count > 0) {
- const name = std.mem.sliceTo(md.string_bytes[md.names[tr_hdr.index]..], 0);
- const stderr_contents = stderr.buffered();
- stderr.toss(stderr_contents.len);
- const msg = std.mem.trim(u8, stderr_contents, "\n");
- const label = if (tr_hdr.flags.fail)
- "failed"
- else if (tr_hdr.flags.leak)
- "leaked"
- else if (tr_hdr.flags.log_err_count > 0)
- "logged errors"
- else
- unreachable;
- if (msg.len > 0) {
- try run.step.addError("'{s}' {s}: {s}", .{ name, label, msg });
+ if (tr_hdr.flags.status == .fail) {
+ const name = std.mem.sliceTo(md.testName(tr_hdr.index), 0);
+ const stderr_bytes = std.mem.trim(u8, stderr.buffered(), "\n");
+ stderr.tossBuffered();
+ if (stderr_bytes.len == 0) {
+ try run.step.addError("'{s}' failed without output", .{name});
} else {
- try run.step.addError("'{s}' {s}", .{ name, label });
+ try run.step.addError("'{s}' failed:\n{s}", .{ name, stderr_bytes });
}
+ } else if (leak_count > 0) {
+ const name = std.mem.sliceTo(md.testName(tr_hdr.index), 0);
+ const stderr_bytes = std.mem.trim(u8, stderr.buffered(), "\n");
+ stderr.tossBuffered();
+ try run.step.addError("'{s}' leaked {d} allocations:\n{s}", .{ name, leak_count, stderr_bytes });
+ } else if (log_err_count > 0) {
+ const name = std.mem.sliceTo(md.testName(tr_hdr.index), 0);
+ const stderr_bytes = std.mem.trim(u8, stderr.buffered(), "\n");
+ stderr.tossBuffered();
+ try run.step.addError("'{s}' logged {d} errors:\n{s}", .{ name, log_err_count, stderr_bytes });
}
- requestNextTest(child.stdin.?, &metadata.?, &sub_prog_node) catch |err| {
- try run.step.addError("unable to write stdin: {s}", .{@errorName(err)});
- break :poll true;
- };
+ active_test_index = null;
+ if (timer) |*t| md.ns_per_test[tr_hdr.index] = t.lap();
+
+ requestNextTest(child.stdin.?, md, &sub_prog_node) catch |err| return .{ .write_failed = err };
},
.coverage_id => {
const fuzz = fuzz_context.?.fuzz;
@@ -1667,39 +1841,12 @@ fn evalZigTest(
},
else => {}, // ignore other messages
}
- };
-
- if (any_write_failed) {
- // The compiler unexpectedly closed stdin; something is very wrong and has probably crashed.
- // We want to make sure we've captured all of stderr so that it's logged below.
- while (try poller.poll()) {}
}
-
- const stderr_contents = std.mem.trim(u8, stderr.buffered(), "\n");
- if (stderr_contents.len > 0) {
- run.step.result_stderr = try arena.dupe(u8, stderr_contents);
- }
-
- // Send EOF to stdin.
- child.stdin.?.close();
- child.stdin = null;
-
- return .{
- .stdout = null,
- .stderr = null,
- .test_results = .{
- .test_count = test_count,
- .fail_count = fail_count,
- .skip_count = skip_count,
- .leak_count = leak_count,
- .log_err_count = log_err_count,
- },
- .test_metadata = metadata,
- };
}
const TestMetadata = struct {
names: []const u32,
+ ns_per_test: []u64,
expected_panic_msgs: []const u32,
string_bytes: []const u8,
next_index: u32,
@@ -1740,6 +1887,7 @@ fn requestNextTest(in: fs.File, metadata: *TestMetadata, sub_prog_node: *?std.Pr
try sendRunTestMessage(in, .run_test, i);
return;
} else {
+ metadata.next_index = std.math.maxInt(u32); // indicate that all tests are done
try sendMessage(in, .exit);
}
}
@@ -1761,10 +1909,15 @@ fn sendRunTestMessage(file: std.fs.File, tag: std.zig.Client.Message.Tag, index:
try file.writeAll(full_msg);
}
-fn evalGeneric(run: *Run, child: *std.process.Child) !StdIoResult {
+fn evalGeneric(run: *Run, child: *std.process.Child) !EvalGenericResult {
const b = run.step.owner;
const arena = b.allocator;
+ try child.spawn();
+ errdefer _ = child.kill() catch {};
+
+ try child.waitForSpawn();
+
switch (run.stdin) {
.bytes => |bytes| {
child.stdin.?.writeAll(bytes) catch |err| {
@@ -1854,11 +2007,12 @@ fn evalGeneric(run: *Run, child: *std.process.Child) !StdIoResult {
}
};
+ run.step.result_peak_rss = child.resource_usage_statistics.getMaxRss() orelse 0;
+
return .{
+ .term = try child.wait(),
.stdout = stdout_bytes,
.stderr = stderr_bytes,
- .test_results = .{},
- .test_metadata = null,
};
}
diff --git a/lib/std/Build/WebServer.zig b/lib/std/Build/WebServer.zig
index 451f4b9d34d4..905c2410862c 100644
--- a/lib/std/Build/WebServer.zig
+++ b/lib/std/Build/WebServer.zig
@@ -755,6 +755,64 @@ pub fn updateTimeReportGeneric(ws: *WebServer, step: *Build.Step, ns_total: u64)
ws.notifyUpdate();
}
+pub fn updateTimeReportRunTest(
+ ws: *WebServer,
+ run: *Build.Step.Run,
+ tests: *const Build.Step.Run.CachedTestMetadata,
+ ns_per_test: []const u64,
+) void {
+ const gpa = ws.gpa;
+
+ const step_idx: u32 = for (ws.all_steps, 0..) |s, i| {
+ if (s == &run.step) break @intCast(i);
+ } else unreachable;
+
+ assert(tests.names.len == ns_per_test.len);
+ const tests_len: u32 = @intCast(tests.names.len);
+
+ const new_len: u64 = len: {
+ var names_len: u64 = 0;
+ for (0..tests_len) |i| {
+ names_len += tests.testName(@intCast(i)).len + 1;
+ }
+ break :len @sizeOf(abi.time_report.RunTestResult) + names_len + 8 * tests_len;
+ };
+ const old_buf = old: {
+ ws.time_report_mutex.lock();
+ defer ws.time_report_mutex.unlock();
+ const old = ws.time_report_msgs[step_idx];
+ ws.time_report_msgs[step_idx] = &.{};
+ break :old old;
+ };
+ const buf = gpa.realloc(old_buf, new_len) catch @panic("out of memory");
+
+ const out_header: *align(1) abi.time_report.RunTestResult = @ptrCast(buf[0..@sizeOf(abi.time_report.RunTestResult)]);
+ out_header.* = .{
+ .step_idx = step_idx,
+ .tests_len = tests_len,
+ };
+ var offset: usize = @sizeOf(abi.time_report.RunTestResult);
+ const ns_per_test_out: []align(1) u64 = @ptrCast(buf[offset..][0 .. tests_len * 8]);
+ @memcpy(ns_per_test_out, ns_per_test);
+ offset += tests_len * 8;
+ for (0..tests_len) |i| {
+ const name = tests.testName(@intCast(i));
+ @memcpy(buf[offset..][0..name.len], name);
+ buf[offset..][name.len] = 0;
+ offset += name.len + 1;
+ }
+ assert(offset == buf.len);
+
+ {
+ ws.time_report_mutex.lock();
+ defer ws.time_report_mutex.unlock();
+ assert(ws.time_report_msgs[step_idx].len == 0);
+ ws.time_report_msgs[step_idx] = buf;
+ ws.time_report_update_times[step_idx] = ws.now();
+ }
+ ws.notifyUpdate();
+}
+
const RunnerRequest = union(enum) {
rebuild,
};
diff --git a/lib/std/Build/abi.zig b/lib/std/Build/abi.zig
index d5b02d951abf..5e8e259f3e4c 100644
--- a/lib/std/Build/abi.zig
+++ b/lib/std/Build/abi.zig
@@ -56,6 +56,7 @@ pub const ToClientTag = enum(u8) {
// `--time-report`
time_report_generic_result,
time_report_compile_result,
+ time_report_run_test_result,
_,
};
@@ -310,4 +311,19 @@ pub const time_report = struct {
};
};
};
+
+ /// WebSocket server->client.
+ ///
+ /// Sent after a `Step.Run` for a Zig test executable finishes, providing the test's time report.
+ ///
+ /// Trailing:
+ /// * for each `tests_len`:
+ /// * `test_ns: u64` (nanoseconds spent running this test)
+ /// * for each `tests_len`:
+ /// * `name` (null-terminated UTF-8 string)
+ pub const RunTestResult = extern struct {
+ tag: ToClientTag = .time_report_run_test_result,
+ step_idx: u32 align(1),
+ tests_len: u32 align(1),
+ };
};
diff --git a/lib/std/heap/debug_allocator.zig b/lib/std/heap/debug_allocator.zig
index a4b1de5b47b5..7288588bec6a 100644
--- a/lib/std/heap/debug_allocator.zig
+++ b/lib/std/heap/debug_allocator.zig
@@ -421,10 +421,10 @@ pub fn DebugAllocator(comptime config: Config) type {
return usedBitsCount(slot_count) * @sizeOf(usize);
}
- fn detectLeaksInBucket(bucket: *BucketHeader, size_class_index: usize, used_bits_count: usize) bool {
+ fn detectLeaksInBucket(bucket: *BucketHeader, size_class_index: usize, used_bits_count: usize) usize {
const size_class = @as(usize, 1) << @as(Log2USize, @intCast(size_class_index));
const slot_count = slot_counts[size_class_index];
- var leaks = false;
+ var leaks: usize = 0;
for (0..used_bits_count) |used_bits_byte| {
const used_int = bucket.usedBits(used_bits_byte).*;
if (used_int != 0) {
@@ -437,7 +437,7 @@ pub fn DebugAllocator(comptime config: Config) type {
const page_addr = @intFromPtr(bucket) & ~(page_size - 1);
const addr = page_addr + slot_index * size_class;
log.err("memory address 0x{x} leaked: {f}", .{ addr, stack_trace });
- leaks = true;
+ leaks += 1;
}
}
}
@@ -445,16 +445,16 @@ pub fn DebugAllocator(comptime config: Config) type {
return leaks;
}
- /// Emits log messages for leaks and then returns whether there were any leaks.
- pub fn detectLeaks(self: *Self) bool {
- var leaks = false;
+ /// Emits log messages for leaks and then returns the number of detected leaks (0 if no leaks were detected).
+ pub fn detectLeaks(self: *Self) usize {
+ var leaks: usize = 0;
for (self.buckets, 0..) |init_optional_bucket, size_class_index| {
var optional_bucket = init_optional_bucket;
const slot_count = slot_counts[size_class_index];
const used_bits_count = usedBitsCount(slot_count);
while (optional_bucket) |bucket| {
- leaks = detectLeaksInBucket(bucket, size_class_index, used_bits_count) or leaks;
+ leaks += detectLeaksInBucket(bucket, size_class_index, used_bits_count);
optional_bucket = bucket.prev;
}
}
@@ -466,7 +466,7 @@ pub fn DebugAllocator(comptime config: Config) type {
log.err("memory address 0x{x} leaked: {f}", .{
@intFromPtr(large_alloc.bytes.ptr), stack_trace,
});
- leaks = true;
+ leaks += 1;
}
return leaks;
}
@@ -498,11 +498,17 @@ pub fn DebugAllocator(comptime config: Config) type {
/// Returns `std.heap.Check.leak` if there were leaks; `std.heap.Check.ok` otherwise.
pub fn deinit(self: *Self) std.heap.Check {
- const leaks = if (config.safety) self.detectLeaks() else false;
+ const leaks: usize = if (config.safety) self.detectLeaks() else 0;
+ self.deinitWithoutLeakChecks();
+ return if (leaks == 0) .ok else .leak;
+ }
+
+ /// Like `deinit`, but does not check for memory leaks. This is useful if leaks have already
+ /// been detected manually with `detectLeaks` to avoid reporting them for a second time.
+ pub fn deinitWithoutLeakChecks(self: *Self) void {
if (config.retain_metadata) self.freeRetainedMetadata();
self.large_allocations.deinit(self.backing_allocator);
self.* = undefined;
- return if (leaks) .leak else .ok;
}
fn collectStackTrace(first_trace_addr: usize, addresses: *[stack_n]usize) void {
diff --git a/lib/std/zig.zig b/lib/std/zig.zig
index cbb01b2b3828..0d143ba9dd0c 100644
--- a/lib/std/zig.zig
+++ b/lib/std/zig.zig
@@ -696,6 +696,8 @@ pub const EnvVar = enum {
ZIG_LIB_DIR,
ZIG_LIBC,
ZIG_BUILD_RUNNER,
+ ZIG_BUILD_ERROR_STYLE,
+ ZIG_BUILD_MULTILINE_ERRORS,
ZIG_VERBOSE_LINK,
ZIG_VERBOSE_CC,
ZIG_BTRFS_WORKAROUND,
diff --git a/lib/std/zig/Server.zig b/lib/std/zig/Server.zig
index ea6035474111..7e7f2b73ea60 100644
--- a/lib/std/zig/Server.zig
+++ b/lib/std/zig/Server.zig
@@ -34,6 +34,12 @@ pub const Message = struct {
test_metadata,
/// Body is a TestResults
test_results,
+ /// Does not have a body.
+ /// Notifies the build runner that the next test (requested by `Client.Message.Tag.run_test`)
+ /// is starting execution. This message helps to ensure that the timestamp used by the build
+ /// runner to enforce unit test time limits is relatively accurate under extreme system load
+ /// (where there may be a non-trivial delay before the test process is scheduled).
+ test_started,
/// Body is a series of strings, delimited by null bytes.
/// Each string is a prefixed file path.
/// The first byte indicates the file prefix path (see prefixes fields
@@ -86,15 +92,16 @@ pub const Message = struct {
pub const TestResults = extern struct {
index: u32,
- flags: Flags,
+ flags: Flags align(4),
- pub const Flags = packed struct(u32) {
- fail: bool,
- skip: bool,
- leak: bool,
+ pub const Flags = packed struct(u64) {
+ status: Status,
fuzz: bool,
- log_err_count: u28 = 0,
+ log_err_count: u30,
+ leak_count: u31,
};
+
+ pub const Status = enum(u2) { pass, fail, skip };
};
/// Trailing is the same as in `std.Build.abi.time_report.CompileResult`, excluding `step_name`.