diff --git a/CHANGELOG.md b/CHANGELOG.md
index 8697ffe..e9f2f86 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -11,6 +11,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- **Review automation baseline** — added `.github/CODEOWNERS` with repo-wide ownership for `@git-stunts`.
- **Release runbook** — added `docs/RELEASE.md` and linked it from `CONTRIBUTING.md` as the canonical patch-release workflow.
- **`pnpm release:verify`** — new maintainer-facing release helper runs the full release checklist, captures observed test counts, and prints a Markdown summary that can be pasted into release notes or changelog prep.
+- **`git cas vault stats`** — new vault summary command reports logical size, chunk references, dedupe ratio, encryption coverage, compression usage, and chunking strategy breakdowns.
+- **`git cas doctor`** — new diagnostics command scans `refs/cas/vault`, validates every referenced manifest, and exits non-zero with structured issue output when it finds broken entries or a missing vault ref.
- **Deterministic property-based envelope coverage** — added a `fast-check`-backed property suite for envelope-encrypted store/restore round-trips and tamper rejection across empty, boundary-adjacent, and multi-chunk payload sizes.
### Changed
@@ -21,6 +23,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
### Fixed
- **Bun blob writes in Git persistence** — `GitPersistenceAdapter.writeBlob()` now hashes temp files instead of piping large buffers through `git hash-object --stdin` under Bun, avoiding unhandled `EPIPE` failures during real Git-backed stores.
- **Release verification runner failures** — `runReleaseVerify()` now converts thrown step-runner errors into structured step failures with a `ReleaseVerifyError` summary instead of letting raw exceptions escape.
+- **Machine-readable release verification** — `pnpm release:verify --json` now emits structured JSON on both success and failure paths, making CI automation and release-note tooling consume the same verification source of truth.
+- **Dashboard launch context normalization** — `launchDashboard()` now treats injected Bijou contexts without an explicit `mode` as interactive, avoiding an incorrect static fallback, and the CLI mode tests now lock the `BIJOU_ACCESSIBLE` and `TERM=dumb` branches.
## [5.3.2] — 2026-03-15
diff --git a/README.md b/README.md
index 20b5aea..e496dfc 100644
--- a/README.md
+++ b/README.md
@@ -31,8 +31,9 @@ We use the object database.
- **Full round-trip** store, tree, and restore — get your bytes back, verified.
- **Lifecycle management** `readManifest`, `inspectAsset`, `collectReferencedChunks` — inspect trees, plan deletions, audit storage.
- **Vault** GC-safe ref-based storage. One ref (`refs/cas/vault`) indexes all assets by slug. No more silent data loss from `git gc`.
+- **Vault diagnostics** `git cas vault stats` summarizes size/dedupe/encryption coverage, and `git cas doctor` scans the vault for broken manifests before they surprise you.
- **Interactive dashboard** `git cas inspect` with chunk heatmap, animated progress bars, and rich manifest views.
-- **Verify & JSON output** `git cas verify` checks integrity; `--json` on all current human-facing commands provides convenient structured output for CI/scripting.
+- **Verify & JSON output** `git cas verify` checks integrity; `--json` on all current human-facing commands provides convenient structured output for CI/scripting, including `pnpm release:verify --json` for release automation.
**Use it for:** binary assets, build artifacts, model weights, data packs, secret bundles, weird experiments, etc.
@@ -180,6 +181,8 @@ See [CHANGELOG.md](./CHANGELOG.md) for the full list of changes.
**`--json` everywhere** — all commands now support `--json` for structured output. Pipe `git cas vault list --json | jq` in CI.
+**Vault diagnostics** — `git cas vault stats` surfaces logical size, dedupe, chunking, and encryption coverage; `git cas doctor` scans the current vault and exits non-zero when it finds trouble.
+
**CryptoPort base class** — shared key validation, metadata building, and KDF normalization. All three adapters (Node/Bun/Web) inherit from a single source of truth.
**Centralized error handling** — `runAction` wrapper with CasError codes and actionable hints (e.g., "Provide --key-file or --vault-passphrase").
@@ -296,9 +299,12 @@ git cas vault init
git cas vault list # TTY table
git cas vault list --json # structured JSON
git cas vault list --filter "photos/*" # glob filter
+git cas vault stats # size / dedupe / coverage summary
git cas vault info my-image
git cas vault remove my-image
git cas vault history
+git cas doctor # vault health scan
+pnpm release:verify --json # machine-readable release report
# Multi-recipient encryption
git cas store ./secret.bin --slug shared \
diff --git a/bin/git-cas.js b/bin/git-cas.js
index 28882ba..333e838 100755
--- a/bin/git-cas.js
+++ b/bin/git-cas.js
@@ -12,6 +12,7 @@ import { renderEncryptionCard } from './ui/encryption-card.js';
import { renderHistoryTimeline } from './ui/history-timeline.js';
import { renderManifestView } from './ui/manifest-view.js';
import { renderHeatmap } from './ui/heatmap.js';
+import { buildVaultStats, inspectVaultHealth, renderDoctorReport, renderVaultStats } from './ui/vault-report.js';
import { runAction } from './actions.js';
import { flushStdioAndExit, installBrokenPipeHandlers } from './io.js';
import { filterEntries, formatTable, formatTabSeparated } from './ui/vault-list.js';
@@ -415,6 +416,29 @@ program
}
}, getJson));
+// ---------------------------------------------------------------------------
+// doctor
+// ---------------------------------------------------------------------------
+program
+ .command('doctor')
+ .description('Inspect vault health and surface integrity issues')
+ .option('--cwd
', 'Git working directory', '.')
+ .action(runAction(async (/** @type {Record} */ opts) => {
+ const cas = createCas(opts.cwd);
+ const report = await inspectVaultHealth(cas);
+ const json = program.opts().json;
+
+ if (json) {
+ process.stdout.write(`${JSON.stringify(report)}\n`);
+ } else {
+ process.stdout.write(renderDoctorReport(report));
+ }
+
+ if (report.status !== 'ok') {
+ process.exitCode = 1;
+ }
+ }, getJson));
+
// ---------------------------------------------------------------------------
// vault init
// ---------------------------------------------------------------------------
@@ -469,6 +493,32 @@ vault
}
}, getJson));
+// ---------------------------------------------------------------------------
+// vault stats
+// ---------------------------------------------------------------------------
+vault
+ .command('stats')
+ .description('Summarize vault size, dedupe, and encryption coverage')
+ .option('--filter ', 'Filter entries by glob pattern')
+ .option('--cwd ', 'Git working directory', '.')
+ .action(runAction(async (/** @type {Record} */ opts) => {
+ const cas = createCas(opts.cwd);
+ const all = await cas.listVault();
+ const entries = filterEntries(all, opts.filter);
+ const records = [];
+ for (const entry of entries) {
+ const manifest = await cas.readManifest({ treeOid: entry.treeOid });
+ records.push({ ...entry, manifest });
+ }
+ const stats = buildVaultStats(records);
+ const json = program.opts().json;
+ if (json) {
+ process.stdout.write(`${JSON.stringify(stats)}\n`);
+ } else {
+ process.stdout.write(renderVaultStats(stats));
+ }
+ }, getJson));
+
// ---------------------------------------------------------------------------
// vault remove
// ---------------------------------------------------------------------------
diff --git a/bin/ui/context.js b/bin/ui/context.js
index 96a7438..594db2d 100644
--- a/bin/ui/context.js
+++ b/bin/ui/context.js
@@ -3,7 +3,7 @@
*/
import { createBijou } from '@flyingrobots/bijou';
-import { nodeRuntime, chalkStyle } from '@flyingrobots/bijou-node';
+import { nodeRuntime, nodeIO, chalkStyle } from '@flyingrobots/bijou-node';
/** @type {import('@flyingrobots/bijou').BijouContext | null} */
let ctx = null;
@@ -28,6 +28,58 @@ export function getCliContext() {
return ctx;
}
+/**
+ * Detect the display mode for full-screen CLI TUI flows.
+ *
+ * Unlike Bijou's default detection, NO_COLOR only disables styling here.
+ * It must not downgrade a real TTY session out of interactive mode.
+ *
+ * @param {import('@flyingrobots/bijou').RuntimePort} runtime
+ * @returns {'interactive' | 'pipe' | 'static' | 'accessible'}
+ */
+export function detectCliTuiMode(runtime) {
+ if (runtime.env('BIJOU_ACCESSIBLE') === '1') {
+ return 'accessible';
+ }
+ if (runtime.env('TERM') === 'dumb') {
+ return 'pipe';
+ }
+ if (!runtime.stdoutIsTTY || !runtime.stdinIsTTY) {
+ return 'pipe';
+ }
+ if (runtime.env('CI') !== undefined) {
+ return 'static';
+ }
+ return 'interactive';
+}
+
+/**
+ * Returns a bijou context for interactive CLI TUI flows.
+ *
+ * This keeps NO_COLOR behavior for styling while preserving interactive mode
+ * on real TTYs.
+ *
+ * @param {{
+ * runtime?: import('@flyingrobots/bijou').RuntimePort,
+ * io?: import('@flyingrobots/bijou').IOPort,
+ * style?: import('@flyingrobots/bijou').StylePort,
+ * }} [options]
+ * @returns {import('@flyingrobots/bijou').BijouContext}
+ */
+export function createCliTuiContext(options = {}) {
+ const runtime = options.runtime || nodeRuntime();
+ const noColor = runtime.env('NO_COLOR') !== undefined;
+ const base = createBijou({
+ runtime,
+ io: options.io || nodeIO(),
+ style: options.style || chalkStyle(noColor),
+ });
+ return {
+ ...base,
+ mode: detectCliTuiMode(runtime),
+ };
+}
+
/**
* @returns {import('@flyingrobots/bijou').IOPort}
*/
diff --git a/bin/ui/dashboard.js b/bin/ui/dashboard.js
index 5810059..6b5cf8d 100644
--- a/bin/ui/dashboard.js
+++ b/bin/ui/dashboard.js
@@ -3,8 +3,8 @@
*/
import { run, quit, createKeyMap } from '@flyingrobots/bijou-tui';
-import { createNodeContext } from '@flyingrobots/bijou-node';
import { loadEntriesCmd, loadManifestCmd } from './dashboard-cmds.js';
+import { createCliTuiContext, detectCliTuiMode } from './context.js';
import { renderDashboard } from './dashboard-view.js';
/**
@@ -79,13 +79,14 @@ export function createKeyBindings() {
/**
* Create the initial model.
*
+ * @param {BijouContext} ctx
* @returns {DashModel}
*/
-function createInitModel() {
+function createInitModel(ctx) {
return {
status: 'loading',
- columns: process.stdout.columns ?? 80,
- rows: process.stdout.rows ?? 24,
+ columns: ctx.runtime.columns ?? 80,
+ rows: ctx.runtime.rows ?? 24,
entries: [],
filtered: [],
cursor: 0,
@@ -272,7 +273,7 @@ function handleUpdate(msg, model, deps) {
*/
export function createDashboardApp(deps) {
return {
- init: () => /** @type {[DashModel, DashCmd[]]} */ ([createInitModel(), [/** @type {DashCmd} */ (loadEntriesCmd(deps.cas))]]),
+ init: () => /** @type {[DashModel, DashCmd[]]} */ ([createInitModel(deps.ctx), [/** @type {DashCmd} */ (loadEntriesCmd(deps.cas))]]),
update: (/** @type {KeyMsg | ResizeMsg | DashMsg} */ msg, /** @type {DashModel} */ model) => handleUpdate(msg, model, deps),
view: (/** @type {DashModel} */ model) => renderDashboard(model, deps),
};
@@ -281,26 +282,53 @@ export function createDashboardApp(deps) {
/**
* Print static list for non-TTY environments.
*
- * @param {ContentAddressableStore} cas
+ * @param {ContentAddressableStore} cas Content-addressable store read by printStaticList.
+ * @param {Pick | NodeJS.WriteStream} [output=process.stdout] Output stream used by printStaticList to write each entry.
*/
-async function printStaticList(cas) {
+async function printStaticList(cas, output = process.stdout) {
const entries = await cas.listVault();
for (const { slug, treeOid } of entries) {
- process.stdout.write(`${slug}\t${treeOid}\n`);
+ output.write(`${slug}\t${treeOid}\n`);
+ }
+}
+
+/**
+ * Ensure launchDashboard has a mode before branching on interactive behavior.
+ *
+ * @param {BijouContext} ctx
+ * @returns {BijouContext}
+ */
+function normalizeLaunchContext(ctx) {
+ const candidate = /** @type {BijouContext & { mode?: import('@flyingrobots/bijou').OutputMode }} */ (ctx);
+ if (candidate.mode) {
+ return candidate;
+ }
+ if (!candidate.runtime) {
+ throw new TypeError('launchDashboard requires ctx.runtime when ctx.mode is absent');
}
+ return {
+ ...candidate,
+ mode: detectCliTuiMode(candidate.runtime),
+ };
}
/**
* Launch the interactive vault dashboard.
*
* @param {ContentAddressableStore} cas
+ * @param {{
+ * ctx?: BijouContext,
+ * runApp?: typeof run,
+ * output?: Pick,
+ * }} [options]
*/
-export async function launchDashboard(cas) {
- if (!process.stdout.isTTY) {
- return printStaticList(cas);
+export async function launchDashboard(cas, options = {}) {
+ const ctx = options.ctx ? normalizeLaunchContext(options.ctx) : createCliTuiContext();
+ if (ctx.mode !== 'interactive') {
+ return printStaticList(cas, options.output);
}
- const ctx = createNodeContext();
const keyMap = createKeyBindings();
const deps = { keyMap, cas, ctx };
- return run(createDashboardApp(deps), { ctx });
+ const runApp = options.runApp || run;
+ return runApp(createDashboardApp(deps), { ctx });
}
diff --git a/bin/ui/vault-report.js b/bin/ui/vault-report.js
new file mode 100644
index 0000000..4491cf3
--- /dev/null
+++ b/bin/ui/vault-report.js
@@ -0,0 +1,418 @@
+/**
+ * Shared reporting helpers for vault diagnostics commands.
+ */
+
+/**
+ * @typedef {{ slug: string, treeOid: string, manifest: { toJSON?: () => any } | Record }} VaultRecord
+ * @typedef {{
+ * entries: number,
+ * totalLogicalSize: number,
+ * totalChunkRefs: number,
+ * uniqueChunks: number,
+ * duplicateChunkRefs: number,
+ * dedupRatio: number,
+ * encryptedEntries: number,
+ * envelopeEntries: number,
+ * compressedEntries: number,
+ * chunkingStrategies: Record,
+ * largestEntry: { slug: string, size: number } | null,
+ * }} VaultStats
+ * @typedef {{
+ * scope: 'vault' | 'entry',
+ * code: string,
+ * message: string,
+ * slug?: string,
+ * treeOid?: string,
+ * }} DoctorIssue
+ * @typedef {{
+ * status: 'ok' | 'warn' | 'fail',
+ * hasVault: boolean,
+ * commitOid: string | null,
+ * entryCount: number,
+ * checkedEntries: number,
+ * validEntries: number,
+ * invalidEntries: number,
+ * metadataEncrypted: boolean,
+ * stats: VaultStats,
+ * issues: DoctorIssue[],
+ * }} DoctorReport
+ */
+
+/**
+ * Normalize a manifest-like value to plain JSON data.
+ *
+ * @param {{ toJSON?: () => any } | Record} manifest
+ * @returns {Record}
+ */
+function toManifestData(manifest) {
+ return typeof manifest?.toJSON === 'function' ? manifest.toJSON() : manifest;
+}
+
+/**
+ * Format a byte count using binary units.
+ *
+ * @param {number} bytes
+ * @returns {string}
+ */
+function formatBytes(bytes) {
+ if (!Number.isFinite(bytes) || bytes < 0) {
+ return '0 bytes';
+ }
+ if (bytes < 1024) {
+ return `${bytes} bytes`;
+ }
+
+ const units = ['KiB', 'MiB', 'GiB', 'TiB'];
+ let value = bytes;
+ let unitIndex = -1;
+ while (value >= 1024 && unitIndex < units.length - 1) {
+ value /= 1024;
+ unitIndex += 1;
+ }
+ return `${value.toFixed(1)} ${units[unitIndex]}`;
+}
+
+/**
+ * Create an empty stats payload.
+ *
+ * @returns {VaultStats}
+ */
+function emptyVaultStats() {
+ return {
+ entries: 0,
+ totalLogicalSize: 0,
+ totalChunkRefs: 0,
+ uniqueChunks: 0,
+ duplicateChunkRefs: 0,
+ dedupRatio: 1,
+ encryptedEntries: 0,
+ envelopeEntries: 0,
+ compressedEntries: 0,
+ chunkingStrategies: {},
+ largestEntry: null,
+ };
+}
+
+/**
+ * Return true when manifest uses envelope recipients.
+ *
+ * @param {Record} manifest
+ * @returns {boolean}
+ */
+function hasEnvelopeRecipients(manifest) {
+ return Array.isArray(manifest.encryption?.recipients) && manifest.encryption.recipients.length > 0;
+}
+
+/**
+ * Return true when manifest is encrypted.
+ *
+ * @param {Record} manifest
+ * @returns {boolean}
+ */
+function isEncryptedManifest(manifest) {
+ return Boolean(manifest.encryption?.encrypted || hasEnvelopeRecipients(manifest));
+}
+
+/**
+ * Extract valid chunk blob OIDs from a manifest.
+ *
+ * @param {Record} manifest
+ * @returns {string[]}
+ */
+function listChunkBlobs(manifest) {
+ const chunks = Array.isArray(manifest.chunks) ? manifest.chunks : [];
+ return chunks
+ .map((chunk) => (typeof chunk?.blob === 'string' ? chunk.blob : ''))
+ .filter(Boolean);
+}
+
+/**
+ * Summarize a single vault record for aggregation.
+ *
+ * @param {VaultRecord} record
+ * @returns {{
+ * slug: string,
+ * size: number,
+ * strategy: string,
+ * chunkBlobs: string[],
+ * chunkRefs: number,
+ * encrypted: boolean,
+ * envelope: boolean,
+ * compressed: boolean,
+ * }}
+ */
+function summarizeRecord(record) {
+ const manifest = toManifestData(record.manifest);
+ const chunks = Array.isArray(manifest.chunks) ? manifest.chunks : [];
+ return {
+ slug: record.slug,
+ size: Number.isFinite(manifest.size) ? manifest.size : 0,
+ strategy: manifest.chunking?.strategy ?? 'fixed',
+ chunkBlobs: listChunkBlobs(manifest),
+ chunkRefs: chunks.length,
+ encrypted: isEncryptedManifest(manifest),
+ envelope: hasEnvelopeRecipients(manifest),
+ compressed: Boolean(manifest.compression),
+ };
+}
+
+/**
+ * Merge a summarized record into aggregate stats.
+ *
+ * @param {VaultStats} stats
+ * @param {ReturnType} summary
+ * @param {Set} uniqueChunks
+ * @returns {void}
+ */
+function applyRecordSummary(stats, summary, uniqueChunks) {
+ stats.entries += 1;
+ stats.totalLogicalSize += summary.size;
+ stats.totalChunkRefs += summary.chunkRefs;
+ if (summary.encrypted) { stats.encryptedEntries += 1; }
+ if (summary.envelope) { stats.envelopeEntries += 1; }
+ if (summary.compressed) { stats.compressedEntries += 1; }
+ stats.chunkingStrategies[summary.strategy] = (stats.chunkingStrategies[summary.strategy] ?? 0) + 1;
+
+ if (!stats.largestEntry || summary.size > stats.largestEntry.size) {
+ stats.largestEntry = { slug: summary.slug, size: summary.size };
+ }
+
+ for (const blob of summary.chunkBlobs) {
+ uniqueChunks.add(blob);
+ }
+}
+
+/**
+ * Build aggregate vault stats from loaded manifests.
+ *
+ * Fixed chunking is implicit in current manifests, so missing chunking metadata
+ * is treated as `fixed`.
+ *
+ * @param {VaultRecord[]} records
+ * @returns {VaultStats}
+ */
+export function buildVaultStats(records) {
+ /** @type {VaultStats} */
+ const stats = emptyVaultStats();
+ const uniqueChunks = new Set();
+
+ for (const record of records) {
+ applyRecordSummary(stats, summarizeRecord(record), uniqueChunks);
+ }
+
+ stats.uniqueChunks = uniqueChunks.size;
+ stats.duplicateChunkRefs = Math.max(0, stats.totalChunkRefs - stats.uniqueChunks);
+ stats.dedupRatio = stats.uniqueChunks > 0
+ ? stats.totalChunkRefs / stats.uniqueChunks
+ : 1;
+
+ return stats;
+}
+
+/**
+ * Render a human-readable vault stats report.
+ *
+ * @param {VaultStats} stats
+ * @returns {string}
+ */
+export function renderVaultStats(stats) {
+ const chunking = Object.entries(stats.chunkingStrategies)
+ .sort(([left], [right]) => left.localeCompare(right))
+ .map(([strategy, count]) => `${strategy}:${count}`)
+ .join(', ') || '-';
+
+ const largest = stats.largestEntry
+ ? `${stats.largestEntry.slug} (${stats.largestEntry.size} bytes)`
+ : '-';
+
+ return [
+ `entries\t${stats.entries}`,
+ `logical-size\t${formatBytes(stats.totalLogicalSize)} (${stats.totalLogicalSize} bytes)`,
+ `chunk-refs\t${stats.totalChunkRefs}`,
+ `unique-chunks\t${stats.uniqueChunks}`,
+ `duplicate-refs\t${stats.duplicateChunkRefs}`,
+ `dedup-ratio\t${stats.dedupRatio.toFixed(2)}x`,
+ `encrypted\t${stats.encryptedEntries}`,
+ `envelope\t${stats.envelopeEntries}`,
+ `compressed\t${stats.compressedEntries}`,
+ `chunking\t${chunking}`,
+ `largest\t${largest}`,
+ '',
+ ].join('\n');
+}
+
+/**
+ * Normalize thrown errors into doctor issue entries.
+ *
+ * @param {DoctorIssue['scope']} scope
+ * @param {unknown} error
+ * @param {{ slug?: string, treeOid?: string }} [meta]
+ * @returns {DoctorIssue}
+ */
+function toDoctorIssue(scope, error, meta = {}) {
+ const code = typeof error === 'object' && error && 'code' in error && typeof error.code === 'string'
+ ? error.code
+ : 'UNKNOWN_ERROR';
+ const message = error instanceof Error ? error.message : String(error);
+ return { scope, code, message, ...meta };
+}
+
+/**
+ * Build the failure report for vault-level errors.
+ *
+ * @param {unknown} error
+ * @returns {DoctorReport}
+ */
+function buildDoctorFailureReport(error) {
+ return {
+ status: 'fail',
+ hasVault: true,
+ commitOid: null,
+ entryCount: 0,
+ checkedEntries: 0,
+ validEntries: 0,
+ invalidEntries: 1,
+ metadataEncrypted: false,
+ stats: emptyVaultStats(),
+ issues: [toDoctorIssue('vault', error)],
+ };
+}
+
+/**
+ * Build the report for a missing vault ref.
+ *
+ * @returns {DoctorReport}
+ */
+function buildMissingVaultReport() {
+ return {
+ status: 'warn',
+ hasVault: false,
+ commitOid: null,
+ entryCount: 0,
+ checkedEntries: 0,
+ validEntries: 0,
+ invalidEntries: 0,
+ metadataEncrypted: false,
+ stats: emptyVaultStats(),
+ issues: [{
+ scope: 'vault',
+ code: 'VAULT_REF_MISSING',
+ message: 'refs/cas/vault not found',
+ }],
+ };
+}
+
+/**
+ * Read the current vault state.
+ *
+ * @param {{ getVaultService: () => Promise<{ readState: () => Promise<{ entries: Map, parentCommitOid: string | null, metadata: Record | null }> }> }} cas
+ * @returns {Promise<{ entries: Map, parentCommitOid: string | null, metadata: Record | null }>}
+ */
+async function readVaultState(cas) {
+ const vault = await cas.getVaultService();
+ return await vault.readState();
+}
+
+/**
+ * Load doctor entry records while keeping per-entry failures as issues.
+ *
+ * @param {{ readManifest: ({ treeOid }: { treeOid: string }) => Promise }} cas
+ * @param {Array<{ slug: string, treeOid: string }>} entries
+ * @returns {Promise<{ records: VaultRecord[], issues: DoctorIssue[] }>}
+ */
+async function readDoctorEntries(cas, entries) {
+ /** @type {VaultRecord[]} */
+ const records = [];
+ /** @type {DoctorIssue[]} */
+ const issues = [];
+
+ for (const entry of entries) {
+ try {
+ const manifest = await cas.readManifest({ treeOid: entry.treeOid });
+ records.push({ ...entry, manifest });
+ } catch (error) {
+ issues.push(toDoctorIssue('entry', error, entry));
+ }
+ }
+
+ return { records, issues };
+}
+
+/**
+ * Inspect vault health without aborting on per-entry failures.
+ *
+ * @param {{
+ * getVaultService: () => Promise<{ readState: () => Promise<{ entries: Map, parentCommitOid: string | null, metadata: Record | null }> }>,
+ * readManifest: ({ treeOid }: { treeOid: string }) => Promise,
+ * }} cas
+ * @returns {Promise}
+ */
+export async function inspectVaultHealth(cas) {
+ let state;
+
+ try {
+ state = await readVaultState(cas);
+ } catch (error) {
+ return buildDoctorFailureReport(error);
+ }
+
+ if (!state.parentCommitOid) {
+ return buildMissingVaultReport();
+ }
+
+ const entries = [...state.entries.entries()]
+ .map(([slug, treeOid]) => ({ slug, treeOid }))
+ .sort((left, right) => left.slug.localeCompare(right.slug));
+ const { records, issues } = await readDoctorEntries(cas, entries);
+
+ return {
+ status: issues.length > 0 ? 'fail' : 'ok',
+ hasVault: true,
+ commitOid: state.parentCommitOid,
+ entryCount: entries.length,
+ checkedEntries: entries.length,
+ validEntries: records.length,
+ invalidEntries: issues.length,
+ metadataEncrypted: Boolean(state.metadata?.encryption),
+ stats: buildVaultStats(records),
+ issues,
+ };
+}
+
+/**
+ * Render a human-readable doctor report.
+ *
+ * @param {DoctorReport} report
+ * @returns {string}
+ */
+export function renderDoctorReport(report) {
+ const lines = [
+ `status\t${report.status}`,
+ `vault\t${report.hasVault ? 'present' : 'missing'}`,
+ `commit\t${report.commitOid ?? '-'}`,
+ `entries\t${report.entryCount}`,
+ `checked\t${report.checkedEntries}`,
+ `valid\t${report.validEntries}`,
+ `invalid\t${report.invalidEntries}`,
+ `metadata\t${report.metadataEncrypted ? 'encrypted' : 'plain'}`,
+ `issues\t${report.issues.length}`,
+ `logical-size\t${formatBytes(report.stats.totalLogicalSize)} (${report.stats.totalLogicalSize} bytes)`,
+ `chunk-refs\t${report.stats.totalChunkRefs}`,
+ `unique-chunks\t${report.stats.uniqueChunks}`,
+ '',
+ ];
+
+ if (report.issues.length > 0) {
+ lines.push('issue-details');
+ for (const issue of report.issues) {
+ if (issue.scope === 'entry') {
+ lines.push(`[entry] ${issue.slug} (${issue.treeOid}) ${issue.code}: ${issue.message}`);
+ } else {
+ lines.push(`[vault] ${issue.code}: ${issue.message}`);
+ }
+ }
+ lines.push('');
+ }
+
+ return lines.join('\n');
+}
diff --git a/docs/RELEASE.md b/docs/RELEASE.md
index e76c152..d6990d4 100644
--- a/docs/RELEASE.md
+++ b/docs/RELEASE.md
@@ -22,7 +22,9 @@ This document defines the canonical patch-release flow for `git-cas`.
`pnpm release:verify` is the maintainer-facing verification entrypoint for
release prep. It runs the repository release gates in order and prints a
-Markdown summary that can be pasted into release notes or changelog prep.
+Markdown summary that can be pasted into release notes or changelog prep. Pass
+`--json` when you need the same report in machine-readable form for CI or
+release automation.
Current release verification includes:
diff --git a/scripts/release/verify.js b/scripts/release/verify.js
index 3440ffd..f2448a4 100644
--- a/scripts/release/verify.js
+++ b/scripts/release/verify.js
@@ -91,12 +91,14 @@ export const RELEASE_STEPS = [
];
export class ReleaseVerifyError extends Error {
- constructor(message, { step, results, summary } = {}) {
+ constructor(message, { step, results, summary, version, totalTests } = {}) {
super(message);
this.name = 'ReleaseVerifyError';
this.step = step;
this.results = results ?? [];
this.summary = summary ?? '';
+ this.version = version ?? '';
+ this.totalTests = totalTests ?? 0;
}
}
@@ -129,6 +131,23 @@ export function renderMarkdownSummary({ version, results, totalTests }) {
return `${lines.join('\n')}\n`;
}
+/**
+ * Render the report as machine-readable JSON.
+ *
+ * @param {{ version: string, results: Array>, totalTests: number, step?: Record }} report
+ * @returns {string}
+ */
+export function renderJsonReport({ version, results, totalTests, step }) {
+ return `${JSON.stringify({
+ version,
+ stepsPassed: results.filter((result) => result.passed).length,
+ totalSteps: results.length,
+ totalTests,
+ failedStep: step ? { id: step.id, label: step.label } : null,
+ results,
+ }, null, 2)}\n`;
+}
+
/**
* Sum every observed test count across all executed steps.
*
@@ -263,6 +282,8 @@ export async function runReleaseVerify({
step: result,
results,
summary,
+ version,
+ totalTests,
});
}
}
@@ -276,18 +297,37 @@ export async function runReleaseVerify({
};
}
+/**
+ * Resolve the CLI output format from argv.
+ *
+ * @param {string[]} [argv]
+ * @returns {'markdown' | 'json'}
+ */
+export function resolveOutputFormat(argv = process.argv.slice(2)) {
+ return argv.includes('--json') ? 'json' : 'markdown';
+}
+
/**
* CLI entry point for `pnpm release:verify`.
*
* @returns {Promise}
*/
async function main() {
+ const format = resolveOutputFormat();
try {
const report = await runReleaseVerify();
- process.stdout.write(`\n${report.summary}`);
+ if (format === 'json') {
+ process.stdout.write(renderJsonReport(report));
+ } else {
+ process.stdout.write(`\n${report.summary}`);
+ }
} catch (error) {
if (error instanceof ReleaseVerifyError) {
- process.stderr.write(`\n${error.summary}`);
+ if (format === 'json') {
+ process.stderr.write(renderJsonReport(error));
+ } else {
+ process.stderr.write(`\n${error.summary}`);
+ }
}
process.stderr.write(`${error instanceof Error ? error.message : String(error)}\n`);
process.exitCode = 1;
diff --git a/test/integration/round-trip.test.js b/test/integration/round-trip.test.js
index 5836a71..41fcb14 100644
--- a/test/integration/round-trip.test.js
+++ b/test/integration/round-trip.test.js
@@ -7,7 +7,7 @@
* MUST run inside Docker (GIT_STUNTS_DOCKER=1). Refuses to run on the host.
*/
-import { describe, it, expect, beforeAll, afterAll } from 'vitest';
+import { describe, it, expect, beforeAll, afterAll, vi } from 'vitest';
import { mkdtempSync, rmSync, writeFileSync, readFileSync } from 'node:fs';
import { randomBytes } from 'node:crypto';
import { spawnSync } from 'node:child_process';
@@ -27,6 +27,11 @@ if (process.env.GIT_STUNTS_DOCKER !== '1') {
);
}
+vi.setConfig({
+ testTimeout: 15000,
+ hookTimeout: 30000,
+});
+
let repoDir;
let cas;
let casCbor;
diff --git a/test/integration/vault-cli.test.js b/test/integration/vault-cli.test.js
index 7c58ab5..d068fae 100644
--- a/test/integration/vault-cli.test.js
+++ b/test/integration/vault-cli.test.js
@@ -47,7 +47,7 @@ const RUNTIME_CMD = globalThis.Bun
function runCli(args, cwd) {
return spawnSync(RUNTIME_CMD[0], [...RUNTIME_CMD.slice(1), ...args, '--cwd', cwd], {
encoding: 'utf8',
- timeout: 30_000,
+ timeout: 90_000,
});
}
@@ -151,6 +151,44 @@ describe('vault CLI — init, store, query', () => {
});
});
+describe('vault CLI — diagnostics', () => {
+ it('vault stats --json summarizes the current vault', () => {
+ const out = cli(['vault', 'stats', '--json'], repoDir);
+ const report = JSON.parse(out);
+
+ expect(report).toMatchObject({
+ entries: 1,
+ totalLogicalSize: original.length,
+ totalChunkRefs: 1,
+ uniqueChunks: 1,
+ duplicateChunkRefs: 0,
+ encryptedEntries: 0,
+ envelopeEntries: 0,
+ compressedEntries: 0,
+ chunkingStrategies: { fixed: 1 },
+ largestEntry: { slug: 'demo/hello', size: original.length },
+ });
+ expect(report.dedupRatio).toBe(1);
+ });
+
+ it('doctor --json reports a healthy vault', () => {
+ const out = cli(['doctor', '--json'], repoDir);
+ const report = JSON.parse(out);
+
+ expect(report.status).toBe('ok');
+ expect(report.hasVault).toBe(true);
+ expect(report.entryCount).toBe(1);
+ expect(report.validEntries).toBe(1);
+ expect(report.invalidEntries).toBe(0);
+ expect(report.issues).toEqual([]);
+ expect(report.stats).toMatchObject({
+ entries: 1,
+ totalChunkRefs: 1,
+ uniqueChunks: 1,
+ });
+ });
+});
+
// ---------------------------------------------------------------------------
// vault restore + remove + re-add
// ---------------------------------------------------------------------------
diff --git a/test/unit/cli/_testContext.js b/test/unit/cli/_testContext.js
index 1087310..7a63e34 100644
--- a/test/unit/cli/_testContext.js
+++ b/test/unit/cli/_testContext.js
@@ -3,6 +3,28 @@
*/
import { createTestContext } from '@flyingrobots/bijou/adapters/test';
-export function makeCtx(mode = 'interactive') {
- return createTestContext({ mode, noColor: true });
+export function makeRuntime(runtime = {}) {
+ return { columns: 80, rows: 24, ...runtime };
+}
+
+export function makeInteractiveRuntime(runtime = {}) {
+ return makeRuntime({
+ env: { TERM: 'xterm-256color' },
+ stdoutIsTTY: true,
+ stdinIsTTY: true,
+ ...runtime,
+ });
+}
+
+export function makePipeRuntime(runtime = {}) {
+ return makeRuntime({
+ env: { TERM: 'xterm-256color' },
+ stdoutIsTTY: false,
+ stdinIsTTY: false,
+ ...runtime,
+ });
+}
+
+export function makeCtx(mode = 'interactive', runtime = {}) {
+ return createTestContext({ mode, noColor: true, runtime });
}
diff --git a/test/unit/cli/context.test.js b/test/unit/cli/context.test.js
new file mode 100644
index 0000000..0b0a936
--- /dev/null
+++ b/test/unit/cli/context.test.js
@@ -0,0 +1,67 @@
+import { describe, it, expect } from 'vitest';
+
+import { detectCliTuiMode } from '../../../bin/ui/context.js';
+
+function makeRuntime(overrides = {}) {
+ return {
+ env: (key) => overrides.env?.[key],
+ stdoutIsTTY: overrides.stdoutIsTTY ?? true,
+ stdinIsTTY: overrides.stdinIsTTY ?? true,
+ columns: overrides.columns ?? 80,
+ rows: overrides.rows ?? 24,
+ };
+}
+
+describe('detectCliTuiMode interactive modes', () => {
+ it('uses accessible mode when BIJOU_ACCESSIBLE=1', () => {
+ const mode = detectCliTuiMode(makeRuntime({
+ env: { BIJOU_ACCESSIBLE: '1', TERM: 'xterm-256color' },
+ }));
+
+ expect(mode).toBe('accessible');
+ });
+
+ it('falls back to pipe when TERM is dumb', () => {
+ const mode = detectCliTuiMode(makeRuntime({
+ env: { TERM: 'dumb' },
+ }));
+
+ expect(mode).toBe('pipe');
+ });
+
+ it('stays interactive on a TTY when NO_COLOR is set', () => {
+ const mode = detectCliTuiMode(makeRuntime({
+ env: { NO_COLOR: '1', TERM: 'xterm-256color' },
+ }));
+
+ expect(mode).toBe('interactive');
+ });
+});
+
+describe('detectCliTuiMode non-interactive fallbacks', () => {
+ it('falls back to pipe when stdout is not a TTY', () => {
+ const mode = detectCliTuiMode(makeRuntime({
+ env: { TERM: 'xterm-256color' },
+ stdoutIsTTY: false,
+ }));
+
+ expect(mode).toBe('pipe');
+ });
+
+ it('falls back to pipe when stdin is not a TTY', () => {
+ const mode = detectCliTuiMode(makeRuntime({
+ env: { TERM: 'xterm-256color' },
+ stdinIsTTY: false,
+ }));
+
+ expect(mode).toBe('pipe');
+ });
+
+ it('falls back to static in CI on a TTY', () => {
+ const mode = detectCliTuiMode(makeRuntime({
+ env: { CI: 'true', TERM: 'xterm-256color' },
+ }));
+
+ expect(mode).toBe('static');
+ });
+});
diff --git a/test/unit/cli/dashboard.launch.default.test.js b/test/unit/cli/dashboard.launch.default.test.js
new file mode 100644
index 0000000..e433051
--- /dev/null
+++ b/test/unit/cli/dashboard.launch.default.test.js
@@ -0,0 +1,55 @@
+import { describe, it, expect, vi, beforeEach } from 'vitest';
+import { mockRuntime, mockIO, plainStyle } from '@flyingrobots/bijou/adapters/test';
+
+const runMock = vi.fn().mockResolvedValue(undefined);
+
+function mockCas(entries = []) {
+ return {
+ listVault: vi.fn().mockResolvedValue(entries),
+ getVaultMetadata: vi.fn().mockResolvedValue(null),
+ readManifest: vi.fn().mockResolvedValue(null),
+ };
+}
+
+beforeEach(() => {
+ vi.resetModules();
+ runMock.mockClear();
+});
+
+describe('launchDashboard default context path', () => {
+ it('stays interactive on a tty when NO_COLOR is set', async () => {
+ vi.doMock('@flyingrobots/bijou-tui', async () => {
+ const actual = await vi.importActual('@flyingrobots/bijou-tui');
+ return { ...actual, run: runMock };
+ });
+
+ vi.doMock('@flyingrobots/bijou-node', async () => {
+ const actual = await vi.importActual('@flyingrobots/bijou-node');
+ return {
+ ...actual,
+ nodeRuntime: () => mockRuntime({
+ env: { NO_COLOR: '1', TERM: 'xterm-256color' },
+ stdoutIsTTY: true,
+ stdinIsTTY: true,
+ columns: 111,
+ rows: 42,
+ }),
+ nodeIO: () => mockIO(),
+ chalkStyle: () => plainStyle(),
+ };
+ });
+
+ const { launchDashboard } = await import('../../../bin/ui/dashboard.js');
+ const cas = mockCas();
+
+ await launchDashboard(cas);
+
+ expect(runMock).toHaveBeenCalledTimes(1);
+ expect(cas.listVault).not.toHaveBeenCalled();
+
+ const [app] = runMock.mock.calls[0];
+ const [model] = app.init();
+ expect(model.columns).toBe(111);
+ expect(model.rows).toBe(42);
+ });
+});
diff --git a/test/unit/cli/dashboard.launch.test.js b/test/unit/cli/dashboard.launch.test.js
new file mode 100644
index 0000000..1ad143a
--- /dev/null
+++ b/test/unit/cli/dashboard.launch.test.js
@@ -0,0 +1,87 @@
+import { describe, it, expect, vi, beforeEach } from 'vitest';
+import { makeCtx, makeInteractiveRuntime } from './_testContext.js';
+
+const runMock = vi.fn().mockResolvedValue(undefined);
+
+vi.mock('@flyingrobots/bijou-tui', async () => {
+ const actual = await vi.importActual('@flyingrobots/bijou-tui');
+ return { ...actual, run: runMock };
+});
+
+const { launchDashboard } = await import('../../../bin/ui/dashboard.js');
+
+function mockCas(entries = []) {
+ return {
+ listVault: vi.fn().mockResolvedValue(entries),
+ getVaultMetadata: vi.fn().mockResolvedValue(null),
+ readManifest: vi.fn().mockResolvedValue(null),
+ };
+}
+
+beforeEach(() => {
+ runMock.mockClear();
+});
+
+describe('launchDashboard runtime wiring', () => {
+ it('uses injected runtime dimensions for the first frame', async () => {
+ const cas = mockCas();
+ const ctx = makeCtx('interactive', { columns: 123, rows: 55 });
+
+ await launchDashboard(cas, { ctx, runApp: runMock });
+
+ const [app] = runMock.mock.calls[0];
+ const [model] = app.init();
+ expect(model.columns).toBe(123);
+ expect(model.rows).toBe(55);
+ });
+
+ it('treats an injected context without mode as interactive', async () => {
+ const cas = mockCas();
+ const ctx = {
+ ...makeCtx('interactive', makeInteractiveRuntime()),
+ };
+ delete ctx.mode;
+
+ await launchDashboard(cas, { ctx, runApp: runMock });
+
+ expect(runMock).toHaveBeenCalledTimes(1);
+ expect(cas.listVault).not.toHaveBeenCalled();
+ });
+});
+
+describe('launchDashboard context normalization', () => {
+ it('throws a clear error when mode-less context is missing runtime', async () => {
+ const cas = mockCas();
+ const ctx = { ...makeCtx('interactive') };
+ delete ctx.mode;
+ delete ctx.runtime;
+
+ await expect(
+ launchDashboard(cas, { ctx, runApp: runMock }),
+ ).rejects.toThrow('launchDashboard requires ctx.runtime when ctx.mode is absent');
+ });
+});
+
+describe('launchDashboard mode branching', () => {
+ it('uses the interactive runtime when the context is interactive', async () => {
+ const cas = mockCas();
+ const ctx = makeCtx('interactive');
+
+ await launchDashboard(cas, { ctx, runApp: runMock });
+
+ expect(runMock).toHaveBeenCalledTimes(1);
+ expect(cas.listVault).not.toHaveBeenCalled();
+ });
+
+ it('falls back to a static list when the context is non-interactive', async () => {
+ const cas = mockCas([{ slug: 'alpha', treeOid: 'deadbeef' }]);
+ const ctx = makeCtx('pipe');
+ const output = { write: vi.fn() };
+
+ await launchDashboard(cas, { ctx, runApp: runMock, output });
+
+ expect(runMock).not.toHaveBeenCalled();
+ expect(cas.listVault).toHaveBeenCalledTimes(1);
+ expect(output.write).toHaveBeenCalledWith('alpha\tdeadbeef\n');
+ });
+});
diff --git a/test/unit/cli/dashboard.test.js b/test/unit/cli/dashboard.test.js
index e2c484b..c4c60a3 100644
--- a/test/unit/cli/dashboard.test.js
+++ b/test/unit/cli/dashboard.test.js
@@ -3,6 +3,7 @@ import { makeCtx } from './_testContext.js';
vi.mock('../../../bin/ui/context.js', () => ({
getCliContext: () => makeCtx(),
+ createCliTuiContext: () => makeCtx(),
}));
const { createDashboardApp, createKeyBindings } = await import('../../../bin/ui/dashboard.js');
diff --git a/test/unit/cli/vault-report.test.js b/test/unit/cli/vault-report.test.js
new file mode 100644
index 0000000..d5fba03
--- /dev/null
+++ b/test/unit/cli/vault-report.test.js
@@ -0,0 +1,219 @@
+import { describe, it, expect, vi } from 'vitest';
+import {
+ buildVaultStats,
+ inspectVaultHealth,
+ renderDoctorReport,
+ renderVaultStats,
+} from '../../../bin/ui/vault-report.js';
+
+function makeManifest(data) {
+ return {
+ ...data,
+ toJSON() {
+ return data;
+ },
+ };
+}
+
+function makeSampleRecords() {
+ return [
+ {
+ slug: 'photos/hero.jpg',
+ treeOid: 'tree-1',
+ manifest: makeManifest({
+ slug: 'photos/hero.jpg',
+ size: 1000,
+ chunks: [
+ { blob: 'blob-1', size: 600 },
+ { blob: 'blob-2', size: 400 },
+ ],
+ encryption: { encrypted: true },
+ compression: { algorithm: 'gzip' },
+ chunking: { strategy: 'fixed', params: {} },
+ }),
+ },
+ {
+ slug: 'photos/thumb.jpg',
+ treeOid: 'tree-2',
+ manifest: makeManifest({
+ slug: 'photos/thumb.jpg',
+ size: 600,
+ chunks: [
+ { blob: 'blob-2', size: 400 },
+ { blob: 'blob-3', size: 200 },
+ ],
+ encryption: {
+ encrypted: true,
+ recipients: [{ label: 'alice', wrappedDek: 'x', nonce: 'y', tag: 'z' }],
+ },
+ chunking: { strategy: 'cdc', params: {} },
+ }),
+ },
+ ];
+}
+
+function makePartialFailureCas() {
+ return {
+ getVaultService: vi.fn().mockResolvedValue({
+ readState: vi.fn().mockResolvedValue({
+ entries: new Map([
+ ['ok/asset', 'tree-1'],
+ ['bad/asset', 'tree-2'],
+ ]),
+ parentCommitOid: 'commit-1',
+ metadata: { version: 1 },
+ }),
+ }),
+ readManifest: vi.fn(async ({ treeOid }) => {
+ if (treeOid === 'tree-2') {
+ const error = new Error('manifest missing');
+ error.code = 'MANIFEST_NOT_FOUND';
+ throw error;
+ }
+
+ return makeManifest({
+ slug: 'ok/asset',
+ size: 512,
+ chunks: [{ blob: 'blob-1', size: 512 }],
+ chunking: { strategy: 'fixed', params: {} },
+ });
+ }),
+ };
+}
+
+describe('buildVaultStats', () => {
+ it('aggregates logical size, dedupe, encryption, and chunking data', () => {
+ const stats = buildVaultStats(makeSampleRecords());
+
+ expect(stats).toMatchObject({
+ entries: 2,
+ totalLogicalSize: 1600,
+ totalChunkRefs: 4,
+ uniqueChunks: 3,
+ duplicateChunkRefs: 1,
+ encryptedEntries: 2,
+ envelopeEntries: 1,
+ compressedEntries: 1,
+ chunkingStrategies: { fixed: 1, cdc: 1 },
+ largestEntry: { slug: 'photos/hero.jpg', size: 1000 },
+ });
+ expect(stats.dedupRatio).toBeCloseTo(4 / 3, 6);
+ });
+});
+
+describe('renderVaultStats', () => {
+ it('renders a concise operator-facing report', () => {
+ const output = renderVaultStats({
+ entries: 2,
+ totalLogicalSize: 1600,
+ totalChunkRefs: 4,
+ uniqueChunks: 3,
+ duplicateChunkRefs: 1,
+ dedupRatio: 4 / 3,
+ encryptedEntries: 2,
+ envelopeEntries: 1,
+ compressedEntries: 1,
+ chunkingStrategies: { fixed: 1, cdc: 1 },
+ largestEntry: { slug: 'photos/hero.jpg', size: 1000 },
+ });
+
+ expect(output).toContain('entries\t2');
+ expect(output).toContain('logical-size\t1.6 KiB (1600 bytes)');
+ expect(output).toContain('dedup-ratio\t1.33x');
+ expect(output).toContain('chunking\tcdc:1, fixed:1');
+ expect(output).toContain('largest\tphotos/hero.jpg (1000 bytes)');
+ });
+});
+
+describe('inspectVaultHealth', () => {
+ it('returns a warning when refs/cas/vault is missing', async () => {
+ const cas = {
+ getVaultService: vi.fn().mockResolvedValue({
+ readState: vi.fn().mockResolvedValue({
+ entries: new Map(),
+ parentCommitOid: null,
+ metadata: null,
+ }),
+ }),
+ };
+
+ const report = await inspectVaultHealth(cas);
+
+ expect(report.status).toBe('warn');
+ expect(report.hasVault).toBe(false);
+ expect(report.issues).toEqual([
+ expect.objectContaining({
+ code: 'VAULT_REF_MISSING',
+ scope: 'vault',
+ }),
+ ]);
+ });
+
+ it('records per-entry manifest failures without aborting the scan', async () => {
+ const cas = makePartialFailureCas();
+
+ const report = await inspectVaultHealth(cas);
+
+ expect(report.status).toBe('fail');
+ expect(report.hasVault).toBe(true);
+ expect(report.entryCount).toBe(2);
+ expect(report.validEntries).toBe(1);
+ expect(report.invalidEntries).toBe(1);
+ expect(report.stats).toMatchObject({
+ entries: 1,
+ totalChunkRefs: 1,
+ uniqueChunks: 1,
+ });
+ expect(report.issues).toEqual([
+ expect.objectContaining({
+ scope: 'entry',
+ slug: 'bad/asset',
+ treeOid: 'tree-2',
+ code: 'MANIFEST_NOT_FOUND',
+ message: 'manifest missing',
+ }),
+ ]);
+ });
+});
+
+describe('renderDoctorReport', () => {
+ it('renders health summary and issues', () => {
+ const output = renderDoctorReport({
+ status: 'fail',
+ hasVault: true,
+ commitOid: 'commit-1',
+ entryCount: 2,
+ checkedEntries: 2,
+ validEntries: 1,
+ invalidEntries: 1,
+ metadataEncrypted: false,
+ stats: {
+ entries: 1,
+ totalLogicalSize: 512,
+ totalChunkRefs: 1,
+ uniqueChunks: 1,
+ duplicateChunkRefs: 0,
+ dedupRatio: 1,
+ encryptedEntries: 0,
+ envelopeEntries: 0,
+ compressedEntries: 0,
+ chunkingStrategies: { fixed: 1 },
+ largestEntry: { slug: 'ok/asset', size: 512 },
+ },
+ issues: [
+ {
+ scope: 'entry',
+ slug: 'bad/asset',
+ treeOid: 'tree-2',
+ code: 'MANIFEST_NOT_FOUND',
+ message: 'manifest missing',
+ },
+ ],
+ });
+
+ expect(output).toContain('status\tfail');
+ expect(output).toContain('vault\tpresent');
+ expect(output).toContain('issues\t1');
+ expect(output).toContain('[entry] bad/asset (tree-2) MANIFEST_NOT_FOUND: manifest missing');
+ });
+});
diff --git a/test/unit/domain/services/CasService.compression.test.js b/test/unit/domain/services/CasService.compression.test.js
index 507ad5e..0ac5d0b 100644
--- a/test/unit/domain/services/CasService.compression.test.js
+++ b/test/unit/domain/services/CasService.compression.test.js
@@ -6,6 +6,7 @@ import JsonCodec from '../../../../src/infrastructure/codecs/JsonCodec.js';
import SilentObserver from '../../../../src/infrastructure/adapters/SilentObserver.js';
const testCrypto = await getTestCryptoAdapter();
+const SLOW_COMPRESSION_TEST_TIMEOUT_MS = 15000;
// ---------------------------------------------------------------------------
// Helpers
@@ -331,6 +332,6 @@ describe('CasService compression – fuzz round-trip across sizes', () => {
const { buffer } = await service.restore({ manifest, encryptionKey: key });
expect(buffer.equals(original)).toBe(true);
- });
+ }, size >= 5000 ? SLOW_COMPRESSION_TEST_TIMEOUT_MS : undefined);
}
});
diff --git a/test/unit/domain/services/CasService.empty-file.test.js b/test/unit/domain/services/CasService.empty-file.test.js
index 40356ad..4f3581a 100644
--- a/test/unit/domain/services/CasService.empty-file.test.js
+++ b/test/unit/domain/services/CasService.empty-file.test.js
@@ -9,6 +9,7 @@ import JsonCodec from '../../../../src/infrastructure/codecs/JsonCodec.js';
import SilentObserver from '../../../../src/infrastructure/adapters/SilentObserver.js';
const testCrypto = await getTestCryptoAdapter();
+const SLOW_EMPTY_FILE_TEST_TIMEOUT_MS = 15000;
/**
* Helper: writes a 0-byte file and returns its path.
@@ -217,5 +218,5 @@ describe('CasService – empty file repeated stores', () => {
// writeBlob should never have been called across all 100 iterations.
expect(mockPersistence.writeBlob).not.toHaveBeenCalled();
- });
+ }, SLOW_EMPTY_FILE_TEST_TIMEOUT_MS);
});
diff --git a/test/unit/domain/services/CasService.envelope.test.js b/test/unit/domain/services/CasService.envelope.test.js
index a49b203..a51e64d 100644
--- a/test/unit/domain/services/CasService.envelope.test.js
+++ b/test/unit/domain/services/CasService.envelope.test.js
@@ -7,6 +7,7 @@ import SilentObserver from '../../../../src/infrastructure/adapters/SilentObserv
import CasError from '../../../../src/domain/errors/CasError.js';
const testCrypto = await getTestCryptoAdapter();
+const SLOW_ENVELOPE_TEST_TIMEOUT_MS = 15000;
// ---------------------------------------------------------------------------
// Deterministic PRNG (xorshift32) — keeps fuzz tests reproducible
@@ -312,7 +313,7 @@ describe('CasService – envelope encryption (edge cases)', () => { // eslint-di
const { buffer } = await service.restore({ manifest, encryptionKey: kek });
expect(buffer.equals(original)).toBe(true);
- });
+ }, SLOW_ENVELOPE_TEST_TIMEOUT_MS);
});
// ---------------------------------------------------------------------------
diff --git a/test/unit/domain/services/CasService.kdf.test.js b/test/unit/domain/services/CasService.kdf.test.js
index 12a22aa..74deee1 100644
--- a/test/unit/domain/services/CasService.kdf.test.js
+++ b/test/unit/domain/services/CasService.kdf.test.js
@@ -7,6 +7,7 @@ import CasError from '../../../../src/domain/errors/CasError.js';
import SilentObserver from '../../../../src/infrastructure/adapters/SilentObserver.js';
const testCrypto = await getTestCryptoAdapter();
+const SLOW_KDF_TEST_TIMEOUT_MS = 20000;
// ---------------------------------------------------------------------------
// Helpers
@@ -100,7 +101,7 @@ describe('CasService.deriveKey() – scrypt', () => {
expect(typeof result.params.parallelization).toBe('number');
// scrypt params should NOT have iterations
expect(result.params.iterations).toBeUndefined();
- });
+ }, SLOW_KDF_TEST_TIMEOUT_MS);
});
// ---------------------------------------------------------------------------
@@ -131,7 +132,7 @@ describe('CasService.deriveKey() – determinism', () => {
const result2 = await service.deriveKey({ passphrase, salt, algorithm: 'scrypt' });
expect(result1.key.equals(result2.key)).toBe(true);
- });
+ }, SLOW_KDF_TEST_TIMEOUT_MS);
});
// ---------------------------------------------------------------------------
@@ -164,7 +165,7 @@ describe('CasService.deriveKey() – different salts', () => {
const result2 = await service.deriveKey({ passphrase, salt: salt2, algorithm: 'scrypt' });
expect(result1.key.equals(result2.key)).toBe(false);
- });
+ }, SLOW_KDF_TEST_TIMEOUT_MS);
});
// ---------------------------------------------------------------------------
@@ -268,7 +269,7 @@ describe('CasService – wrong passphrase fails restore', () => {
} catch (err) {
expect(err.code).toBe('INTEGRITY_ERROR');
}
- });
+ }, SLOW_KDF_TEST_TIMEOUT_MS);
});
// ---------------------------------------------------------------------------
@@ -326,7 +327,7 @@ describe('CasService – manifest KDF metadata (scrypt)', () => {
expect(typeof kdf.cost).toBe('number');
expect(typeof kdf.blockSize).toBe('number');
expect(kdf.iterations).toBeUndefined();
- });
+ }, SLOW_KDF_TEST_TIMEOUT_MS);
});
// ---------------------------------------------------------------------------
@@ -352,7 +353,7 @@ describe('CasService – scrypt passphrase round-trip', () => {
expect(manifest.encryption.kdf.algorithm).toBe('scrypt');
const { buffer } = await service.restore({ manifest, passphrase: 'scrypt-passphrase' });
expect(buffer.equals(original)).toBe(true);
- });
+ }, SLOW_KDF_TEST_TIMEOUT_MS);
it('scrypt round-trip with multi-chunk data', async () => {
const original = randomBytes(3 * 1024);
@@ -367,7 +368,7 @@ describe('CasService – scrypt passphrase round-trip', () => {
expect(manifest.chunks.length).toBe(3);
const { buffer } = await service.restore({ manifest, passphrase: 'scrypt-multi-chunk' });
expect(buffer.equals(original)).toBe(true);
- });
+ }, SLOW_KDF_TEST_TIMEOUT_MS);
});
describe('CasService – wrong scrypt passphrase', () => {
@@ -389,7 +390,7 @@ describe('CasService – wrong scrypt passphrase', () => {
await expect(
service.restore({ manifest, passphrase: 'wrong-scrypt-pass' }),
).rejects.toThrow(CasError);
- });
+ }, SLOW_KDF_TEST_TIMEOUT_MS);
});
// ---------------------------------------------------------------------------
@@ -433,7 +434,7 @@ describe('CasService – passphrase + compression round-trip', () => {
expect(manifest.encryption.kdf.algorithm).toBe('scrypt');
const { buffer } = await service.restore({ manifest, passphrase: 'scrypt-compress' });
expect(buffer.equals(original)).toBe(true);
- });
+ }, SLOW_KDF_TEST_TIMEOUT_MS);
});
describe('CasService – passphrase + compression edge cases', () => {
diff --git a/test/unit/domain/services/rotateVaultPassphrase.test.js b/test/unit/domain/services/rotateVaultPassphrase.test.js
index 41c4073..f899ee7 100644
--- a/test/unit/domain/services/rotateVaultPassphrase.test.js
+++ b/test/unit/domain/services/rotateVaultPassphrase.test.js
@@ -15,7 +15,7 @@ import { getTestCryptoAdapter } from '../../../helpers/crypto-adapter.js';
import rotateVaultPassphrase from '../../../../src/domain/services/rotateVaultPassphrase.js';
import CasError from '../../../../src/domain/errors/CasError.js';
-const LONG_TEST_TIMEOUT_MS = 15000;
+const LONG_TEST_TIMEOUT_MS = 60000;
// ---------------------------------------------------------------------------
// Helpers
diff --git a/test/unit/facade/ContentAddressableStore.rotation.test.js b/test/unit/facade/ContentAddressableStore.rotation.test.js
index 80af59f..ee0c095 100644
--- a/test/unit/facade/ContentAddressableStore.rotation.test.js
+++ b/test/unit/facade/ContentAddressableStore.rotation.test.js
@@ -7,7 +7,7 @@ import { execSync } from 'node:child_process';
import ContentAddressableStore from '../../../index.js';
import { createGitPlumbing } from '../../../src/infrastructure/createGitPlumbing.js';
-const LONG_TEST_TIMEOUT_MS = 15000;
+const LONG_TEST_TIMEOUT_MS = 45000;
// ---------------------------------------------------------------------------
// Helpers
diff --git a/test/unit/scripts/release-verify.cli.test.js b/test/unit/scripts/release-verify.cli.test.js
new file mode 100644
index 0000000..b16ff8c
--- /dev/null
+++ b/test/unit/scripts/release-verify.cli.test.js
@@ -0,0 +1,35 @@
+import { describe, it, expect } from 'vitest';
+import {
+ renderJsonReport,
+ resolveOutputFormat,
+} from '../../../scripts/release/verify.js';
+
+describe('release verify CLI helpers', () => {
+ it('uses json output when --json is present', () => {
+ expect(resolveOutputFormat(['--json'])).toBe('json');
+ expect(resolveOutputFormat([])).toBe('markdown');
+ });
+
+ it('renders machine-readable release output', () => {
+ const output = renderJsonReport({
+ version: '5.3.3',
+ totalTests: 12,
+ results: [
+ { id: 'lint', label: 'Lint', passed: true, tests: null, code: 0, signal: null, errorMessage: null },
+ { id: 'unit-node', label: 'Unit Tests (Node)', passed: true, tests: 12, code: 0, signal: null, errorMessage: null },
+ ],
+ });
+
+ const report = JSON.parse(output);
+ expect(report.version).toBe('5.3.3');
+ expect(report.stepsPassed).toBe(2);
+ expect(report.totalSteps).toBe(2);
+ expect(report.totalTests).toBe(12);
+ expect(report.results[1]).toMatchObject({
+ id: 'unit-node',
+ label: 'Unit Tests (Node)',
+ tests: 12,
+ passed: true,
+ });
+ });
+});
diff --git a/test/unit/vault/VaultService.test.js b/test/unit/vault/VaultService.test.js
index 865777c..ba81b87 100644
--- a/test/unit/vault/VaultService.test.js
+++ b/test/unit/vault/VaultService.test.js
@@ -2,7 +2,7 @@ import { describe, it, expect, vi, beforeEach } from 'vitest';
import VaultService from '../../../src/domain/services/VaultService.js';
import CasError from '../../../src/domain/errors/CasError.js';
-const LONG_TEST_TIMEOUT_MS = 15000;
+const LONG_TEST_TIMEOUT_MS = 60000;
// ---------------------------------------------------------------------------
// Helpers