From 696cf70c240e67bc75dcff9abcb12a359327f60c Mon Sep 17 00:00:00 2001 From: Vlad Ra Date: Wed, 18 Mar 2026 21:58:49 +0000 Subject: [PATCH 01/20] feat: add traul_meta table for version tracking Co-Authored-By: Claude Sonnet 4.6 --- src/db/schema.ts | 5 +++++ test/db/schema.test.ts | 13 +++++++++++++ 2 files changed, 18 insertions(+) diff --git a/src/db/schema.ts b/src/db/schema.ts index d070887..ea8729b 100644 --- a/src/db/schema.ts +++ b/src/db/schema.ts @@ -117,6 +117,11 @@ const SCHEMA_SQL = ` INSERT INTO chunks_fts(chunks_fts, rowid, content) VALUES ('delete', old.id, old.content); INSERT INTO chunks_fts(rowid, content) VALUES (new.id, new.content); END; + + CREATE TABLE IF NOT EXISTS traul_meta ( + key TEXT PRIMARY KEY, + value TEXT NOT NULL + ); `; export function initializeDatabase(path: string): Database { diff --git a/test/db/schema.test.ts b/test/db/schema.test.ts index 68e203b..d67847d 100644 --- a/test/db/schema.test.ts +++ b/test/db/schema.test.ts @@ -45,4 +45,17 @@ describe("initializeDatabase", () => { expect(() => initializeDatabase(":memory:")).not.toThrow(); db.close(); }); + + it("creates traul_meta table", () => { + const db = initializeDatabase(":memory:"); + const tables = db + .query<{ name: string }, []>( + "SELECT name FROM sqlite_master WHERE type='table' AND name NOT LIKE 'sqlite_%' ORDER BY name" + ) + .all() + .map((r) => r.name); + + expect(tables).toContain("traul_meta"); + db.close(); + }); }); From 0c970782849d0c3b219b166b7fe6dd00ba5b9153 Mon Sep 17 00:00:00 2001 From: Vlad Ra Date: Wed, 18 Mar 2026 21:58:56 +0000 Subject: [PATCH 02/20] feat: export CHUNKER_VERSION constant Co-Authored-By: Claude Sonnet 4.6 --- src/lib/chunker.ts | 1 + test/lib/chunker.test.ts | 9 ++++++++- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/src/lib/chunker.ts b/src/lib/chunker.ts index 8e4e2ec..83485f4 100644 --- a/src/lib/chunker.ts +++ b/src/lib/chunker.ts @@ -13,6 +13,7 @@ export interface Chunk { const DEFAULT_CHUNK_SIZE = 1500; const DEFAULT_OVERLAP = 200; export const CHUNK_THRESHOLD = 2000; +export const CHUNKER_VERSION = "1"; export function shouldChunk(text: string, threshold: number = CHUNK_THRESHOLD): boolean { return text.length > threshold; diff --git a/test/lib/chunker.test.ts b/test/lib/chunker.test.ts index a0a1d09..f680ac5 100644 --- a/test/lib/chunker.test.ts +++ b/test/lib/chunker.test.ts @@ -1,5 +1,5 @@ import { describe, it, expect } from "bun:test"; -import { chunkText, shouldChunk } from "../../src/lib/chunker"; +import { chunkText, shouldChunk, CHUNKER_VERSION } from "../../src/lib/chunker"; describe("shouldChunk", () => { it("returns false for short text", () => { @@ -74,3 +74,10 @@ describe("chunkText", () => { } }); }); + +describe("CHUNKER_VERSION", () => { + it("exports a version string", () => { + expect(typeof CHUNKER_VERSION).toBe("string"); + expect(CHUNKER_VERSION.length).toBeGreaterThan(0); + }); +}); From 4d353560fe131e30b98e62bef837906d20b92598 Mon Sep 17 00:00:00 2001 From: Vlad Ra Date: Wed, 18 Mar 2026 21:59:48 +0000 Subject: [PATCH 03/20] feat: add getMeta/setMeta for version tracking Co-Authored-By: Claude Sonnet 4.6 --- src/db/database.ts | 16 ++++++++++++++++ test/db/database.test.ts | 17 +++++++++++++++++ 2 files changed, 33 insertions(+) diff --git a/src/db/database.ts b/src/db/database.ts index a08e9cb..decc802 100644 --- a/src/db/database.ts +++ b/src/db/database.ts @@ -817,6 +817,22 @@ export class TraulDB { this.db.run("DELETE FROM sync_cursors WHERE source = ? AND key = ?", [source, key]); } + getMeta(key: string): string | null { + const row = this.db + .query<{ value: string }, [string]>( + "SELECT value FROM traul_meta WHERE key = ?" + ) + .get(key); + return row?.value ?? null; + } + + setMeta(key: string, value: string): void { + this.db.run( + "INSERT INTO traul_meta (key, value) VALUES (?, ?) ON CONFLICT(key) DO UPDATE SET value = excluded.value", + [key, value] + ); + } + close(): void { this.db.close(); } diff --git a/test/db/database.test.ts b/test/db/database.test.ts index ccfe95c..aa7dd03 100644 --- a/test/db/database.test.ts +++ b/test/db/database.test.ts @@ -268,6 +268,23 @@ describe("TraulDB", () => { }); }); + describe("meta", () => { + it("returns null for missing key", () => { + expect(db.getMeta("nonexistent")).toBeNull(); + }); + + it("stores and retrieves a value", () => { + db.setMeta("chunker_version", "1"); + expect(db.getMeta("chunker_version")).toBe("1"); + }); + + it("overwrites existing value", () => { + db.setMeta("chunker_version", "1"); + db.setMeta("chunker_version", "2"); + expect(db.getMeta("chunker_version")).toBe("2"); + }); + }); + describe("stats", () => { it("returns correct counts", () => { db.upsertMessage({ From aa84ee67066e8791a9fbe0c573d06d40b0c5b17c Mon Sep 17 00:00:00 2001 From: Vlad Ra Date: Wed, 18 Mar 2026 22:01:06 +0000 Subject: [PATCH 04/20] feat: add resetSyncCursors and resetChunks methods Co-Authored-By: Claude Sonnet 4.6 --- src/db/database.ts | 13 ++++++++ test/db/database.test.ts | 70 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 83 insertions(+) diff --git a/src/db/database.ts b/src/db/database.ts index decc802..b7c4369 100644 --- a/src/db/database.ts +++ b/src/db/database.ts @@ -817,6 +817,19 @@ export class TraulDB { this.db.run("DELETE FROM sync_cursors WHERE source = ? AND key = ?", [source, key]); } + resetSyncCursors(source?: string): void { + if (source) { + this.db.run("DELETE FROM sync_cursors WHERE source = ?", [source]); + } else { + this.db.run("DELETE FROM sync_cursors"); + } + } + + resetChunks(): void { + this.db.run("DELETE FROM vec_chunks"); + this.db.run("DELETE FROM chunks"); + } + getMeta(key: string): string | null { const row = this.db .query<{ value: string }, [string]>( diff --git a/test/db/database.test.ts b/test/db/database.test.ts index aa7dd03..f6521d5 100644 --- a/test/db/database.test.ts +++ b/test/db/database.test.ts @@ -268,6 +268,76 @@ describe("TraulDB", () => { }); }); + describe("resetSyncCursors", () => { + it("clears all cursors for a source", () => { + db.setSyncCursor("markdown", "file:a.md", "hash1"); + db.setSyncCursor("markdown", "file:b.md", "hash2"); + db.setSyncCursor("slack", "channel:C1", "ts1"); + + db.resetSyncCursors("markdown"); + + expect(db.getSyncCursor("markdown", "file:a.md")).toBeNull(); + expect(db.getSyncCursor("markdown", "file:b.md")).toBeNull(); + expect(db.getSyncCursor("slack", "channel:C1")).toBe("ts1"); + }); + + it("clears all cursors when no source given", () => { + db.setSyncCursor("markdown", "file:a.md", "hash1"); + db.setSyncCursor("slack", "channel:C1", "ts1"); + + db.resetSyncCursors(); + + expect(db.getSyncCursor("markdown", "file:a.md")).toBeNull(); + expect(db.getSyncCursor("slack", "channel:C1")).toBeNull(); + }); + }); + + describe("resetChunks", () => { + it("deletes all chunks and their embeddings", () => { + db.upsertMessage({ + source: "markdown", + source_id: "md:abc", + channel_name: "notes", + author_name: "doc", + content: "x".repeat(3000), + sent_at: 1700000000, + }); + + const msg = db.db + .query<{ id: number }, [string]>("SELECT id FROM messages WHERE source_id = ?") + .get("md:abc"); + + db.replaceChunks(msg!.id, [ + { index: 0, content: "chunk 0", embeddingInput: "chunk 0" }, + { index: 1, content: "chunk 1", embeddingInput: "chunk 1" }, + ]); + + const chunksBefore = db.getChunkEmbeddingStats(); + expect(chunksBefore.total_chunks).toBe(2); + + db.resetChunks(); + + const chunksAfter = db.getChunkEmbeddingStats(); + expect(chunksAfter.total_chunks).toBe(0); + }); + + it("does not delete messages", () => { + db.upsertMessage({ + source: "markdown", + source_id: "md:abc", + channel_name: "notes", + author_name: "doc", + content: "some content", + sent_at: 1700000000, + }); + + db.resetChunks(); + + const stats = db.getStats(); + expect(stats.total_messages).toBe(1); + }); + }); + describe("meta", () => { it("returns null for missing key", () => { expect(db.getMeta("nonexistent")).toBeNull(); From d660d54ec2f4af8a8ebd7dcea31bac03c0623448 Mon Sep 17 00:00:00 2001 From: Vlad Ra Date: Wed, 18 Mar 2026 22:02:45 +0000 Subject: [PATCH 05/20] feat: add auto-migration for chunker/embed version changes Co-Authored-By: Claude Sonnet 4.6 --- src/db/migrations.ts | 57 +++++++++++++++++++++ test/db/migrations.test.ts | 101 +++++++++++++++++++++++++++++++++++++ 2 files changed, 158 insertions(+) create mode 100644 src/db/migrations.ts create mode 100644 test/db/migrations.test.ts diff --git a/src/db/migrations.ts b/src/db/migrations.ts new file mode 100644 index 0000000..01af321 --- /dev/null +++ b/src/db/migrations.ts @@ -0,0 +1,57 @@ +import type { TraulDB } from "./database"; +import { CHUNKER_VERSION } from "../lib/chunker"; +import { EMBED_MODEL, EMBED_DIMS } from "../lib/embeddings"; +import * as log from "../lib/logger"; + +export interface MigrationResult { + chunksReset: boolean; + embeddingsReset: boolean; + syncCursorsReset: boolean; +} + +export function runMigrations(db: TraulDB): MigrationResult { + const result: MigrationResult = { + chunksReset: false, + embeddingsReset: false, + syncCursorsReset: false, + }; + + const storedChunkerVersion = db.getMeta("chunker_version"); + const storedEmbedModel = db.getMeta("embed_model"); + const storedEmbedDims = db.getMeta("embed_dims"); + + const currentDims = String(EMBED_DIMS); + + // Chunker version change → reset chunks + embeddings + markdown cursors + if (storedChunkerVersion !== null && storedChunkerVersion !== CHUNKER_VERSION) { + log.info(`Chunker updated (v${storedChunkerVersion} → v${CHUNKER_VERSION}), rechunking on next sync...`); + db.resetChunks(); + db.resetEmbeddings(EMBED_DIMS); + db.resetSyncCursors("markdown"); + result.chunksReset = true; + result.embeddingsReset = true; + result.syncCursorsReset = true; + } + + // Embed model or dims change → reset embeddings only + if ( + !result.embeddingsReset && + storedEmbedModel !== null && + (storedEmbedModel !== EMBED_MODEL || storedEmbedDims !== currentDims) + ) { + const reason = + storedEmbedModel !== EMBED_MODEL + ? `model changed (${storedEmbedModel} → ${EMBED_MODEL})` + : `dimensions changed (${storedEmbedDims} → ${currentDims})`; + log.info(`Embedding ${reason}, re-embed with 'traul embed'...`); + db.resetEmbeddings(EMBED_DIMS); + result.embeddingsReset = true; + } + + // Update stored values + db.setMeta("chunker_version", CHUNKER_VERSION); + db.setMeta("embed_model", EMBED_MODEL); + db.setMeta("embed_dims", currentDims); + + return result; +} diff --git a/test/db/migrations.test.ts b/test/db/migrations.test.ts new file mode 100644 index 0000000..380dbb7 --- /dev/null +++ b/test/db/migrations.test.ts @@ -0,0 +1,101 @@ +import { describe, it, expect, beforeEach } from "bun:test"; +import { TraulDB } from "../../src/db/database"; +import { runMigrations, type MigrationResult } from "../../src/db/migrations"; +import { CHUNKER_VERSION } from "../../src/lib/chunker"; +import { EMBED_MODEL, EMBED_DIMS } from "../../src/lib/embeddings"; + +describe("runMigrations", () => { + let db: TraulDB; + + beforeEach(() => { + db = new TraulDB(":memory:"); + }); + + it("sets initial meta values on fresh database", () => { + const result = runMigrations(db); + + expect(db.getMeta("chunker_version")).toBe(CHUNKER_VERSION); + expect(db.getMeta("embed_model")).toBe(EMBED_MODEL); + expect(db.getMeta("embed_dims")).toBe(String(EMBED_DIMS)); + expect(result.chunksReset).toBe(false); + expect(result.embeddingsReset).toBe(false); + expect(result.syncCursorsReset).toBe(false); + }); + + it("resets chunks when chunker_version changes", () => { + db.setMeta("chunker_version", "0"); + db.setMeta("embed_model", EMBED_MODEL); + db.setMeta("embed_dims", String(EMBED_DIMS)); + + db.upsertMessage({ + source: "markdown", + source_id: "md:test", + channel_name: "notes", + author_name: "doc", + content: "x".repeat(3000), + sent_at: 1700000000, + }); + const msg = db.db + .query<{ id: number }, [string]>("SELECT id FROM messages WHERE source_id = ?") + .get("md:test"); + db.replaceChunks(msg!.id, [ + { index: 0, content: "old chunk", embeddingInput: "old chunk" }, + ]); + db.setSyncCursor("markdown", "file:test.md", "oldhash"); + + const result = runMigrations(db); + + expect(result.chunksReset).toBe(true); + expect(result.embeddingsReset).toBe(true); + expect(result.syncCursorsReset).toBe(true); + expect(db.getChunkEmbeddingStats().total_chunks).toBe(0); + expect(db.getEmbeddingStats().embedded_messages).toBe(0); + expect(db.getSyncCursor("markdown", "file:test.md")).toBeNull(); + }); + + it("resets embeddings when embed_model changes", () => { + db.setMeta("chunker_version", CHUNKER_VERSION); + db.setMeta("embed_model", "old-model"); + db.setMeta("embed_dims", String(EMBED_DIMS)); + + const result = runMigrations(db); + + expect(result.embeddingsReset).toBe(true); + expect(result.chunksReset).toBe(false); + expect(db.getMeta("embed_model")).toBe(EMBED_MODEL); + expect(db.getEmbeddingStats().embedded_messages).toBe(0); + }); + + it("resets embeddings when embed_dims changes", () => { + db.setMeta("chunker_version", CHUNKER_VERSION); + db.setMeta("embed_model", EMBED_MODEL); + db.setMeta("embed_dims", "512"); + + const result = runMigrations(db); + + expect(result.embeddingsReset).toBe(true); + expect(result.chunksReset).toBe(false); + expect(db.getEmbeddingStats().embedded_messages).toBe(0); + }); + + it("does nothing when all versions match", () => { + runMigrations(db); + + db.upsertMessage({ + source: "slack", + source_id: "C1:1", + channel_name: "eng", + author_name: "bob", + content: "hello", + sent_at: 1700000000, + }); + db.setSyncCursor("slack", "channel:C1", "ts1"); + + const result = runMigrations(db); + + expect(result.chunksReset).toBe(false); + expect(result.embeddingsReset).toBe(false); + expect(result.syncCursorsReset).toBe(false); + expect(db.getSyncCursor("slack", "channel:C1")).toBe("ts1"); + }); +}); From b5ec39c0da0d3c8126e80b81e5071aa11b6908aa Mon Sep 17 00:00:00 2001 From: Vlad Ra Date: Wed, 18 Mar 2026 22:03:27 +0000 Subject: [PATCH 06/20] feat: run auto-migration on startup --- src/index.ts | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/index.ts b/src/index.ts index 9ce309a..277cd52 100755 --- a/src/index.ts +++ b/src/index.ts @@ -13,10 +13,12 @@ import { runWhatsAppAuth } from "./commands/whatsapp-auth"; import { runDaemonStart, runDaemonStop, runDaemonStatus } from "./commands/daemon"; import { runSql, runSchema } from "./commands/sql"; import { runGet } from "./commands/get"; +import { runMigrations } from "./db/migrations"; const config = loadConfig(); ensureDbDir(config.database.path); const db = new TraulDB(config.database.path); +runMigrations(db); const program = new Command(); From abf14d0e2ff2d089eeec5872c58bdf2ff412e666 Mon Sep 17 00:00:00 2001 From: Vlad Ra Date: Wed, 18 Mar 2026 22:05:19 +0000 Subject: [PATCH 07/20] feat: add traul reset command for manual data layer resets Adds `traul reset ` (sync, chunks, embed, all) with optional --source filter for sync layer. Deprecates the old `reset-embed` command. Co-Authored-By: Claude Sonnet 4.6 --- src/commands/reset.ts | 36 ++++++++++++++++++ src/index.ts | 19 +++++++--- test/commands/reset.test.ts | 75 +++++++++++++++++++++++++++++++++++++ 3 files changed, 125 insertions(+), 5 deletions(-) create mode 100644 src/commands/reset.ts create mode 100644 test/commands/reset.test.ts diff --git a/src/commands/reset.ts b/src/commands/reset.ts new file mode 100644 index 0000000..6073519 --- /dev/null +++ b/src/commands/reset.ts @@ -0,0 +1,36 @@ +import type { TraulDB } from "../db/database"; +import { EMBED_DIMS } from "../lib/embeddings"; + +type Layer = "sync" | "chunks" | "embed" | "all"; + +const VALID_LAYERS: Layer[] = ["sync", "chunks", "embed", "all"]; + +export function runReset( + db: TraulDB, + layer: string, + options: { source?: string } +): void { + if (!VALID_LAYERS.includes(layer as Layer)) { + throw new Error(`Unknown layer: ${layer}. Valid layers: ${VALID_LAYERS.join(", ")}`); + } + + const doSync = layer === "sync" || layer === "all"; + const doChunks = layer === "chunks" || layer === "all"; + const doEmbed = layer === "embed" || layer === "all" || layer === "chunks"; + + if (doSync) { + db.resetSyncCursors(options.source); + const scope = options.source ? `${options.source} sync cursors` : "all sync cursors"; + console.log(`Reset ${scope}. Run 'traul sync' to refetch.`); + } + + if (doChunks) { + db.resetChunks(); + console.log("Reset all chunks. They will be regenerated on next 'traul sync' or 'traul embed'."); + } + + if (doEmbed) { + db.resetEmbeddings(EMBED_DIMS); + console.log("Reset all embeddings. Run 'traul embed' to regenerate."); + } +} diff --git a/src/index.ts b/src/index.ts index 277cd52..5f4a893 100755 --- a/src/index.ts +++ b/src/index.ts @@ -13,6 +13,7 @@ import { runWhatsAppAuth } from "./commands/whatsapp-auth"; import { runDaemonStart, runDaemonStop, runDaemonStatus } from "./commands/daemon"; import { runSql, runSchema } from "./commands/sql"; import { runGet } from "./commands/get"; +import { runReset } from "./commands/reset"; import { runMigrations } from "./db/migrations"; const config = loadConfig(); @@ -139,14 +140,22 @@ program db.close(); }); +program + .command("reset") + .description("Reset a data layer (sync, chunks, embed, all)") + .argument("", "layer to reset: sync, chunks, embed, all") + .option("-s, --source ", "filter by source (for sync layer)") + .action(async (layer: string, options) => { + runReset(db, layer, options); + db.close(); + }); + program .command("reset-embed") - .description("Drop all embeddings and recreate vec tables (run 'embed' after to regenerate)") + .description("(deprecated: use 'traul reset embed') Drop all embeddings") .action(async () => { - const { EMBED_DIMS } = await import("./lib/embeddings"); - console.log(`Resetting vec tables to ${EMBED_DIMS} dimensions...`); - db.resetEmbeddings(EMBED_DIMS); - console.log("Done. Run 'traul embed' to regenerate embeddings."); + console.log("Note: 'reset-embed' is deprecated, use 'traul reset embed' instead."); + runReset(db, "embed", {}); db.close(); }); diff --git a/test/commands/reset.test.ts b/test/commands/reset.test.ts new file mode 100644 index 0000000..f497bbe --- /dev/null +++ b/test/commands/reset.test.ts @@ -0,0 +1,75 @@ +import { describe, it, expect, beforeEach } from "bun:test"; +import { TraulDB } from "../../src/db/database"; +import { runReset } from "../../src/commands/reset"; + +describe("runReset", () => { + let db: TraulDB; + + beforeEach(() => { + db = new TraulDB(":memory:"); + // Seed data + db.upsertMessage({ + source: "slack", + source_id: "C1:1", + channel_name: "eng", + author_name: "bob", + content: "hello", + sent_at: 1700000000, + }); + db.upsertMessage({ + source: "markdown", + source_id: "md:abc", + channel_name: "notes", + author_name: "doc", + content: "x".repeat(3000), + sent_at: 1700000001, + }); + const msg = db.db + .query<{ id: number }, [string]>("SELECT id FROM messages WHERE source_id = ?") + .get("md:abc"); + db.replaceChunks(msg!.id, [ + { index: 0, content: "chunk 0", embeddingInput: "chunk 0" }, + ]); + db.setSyncCursor("slack", "channel:C1", "ts1"); + db.setSyncCursor("markdown", "file:a.md", "hash1"); + }); + + it("reset sync clears all cursors", () => { + runReset(db, "sync", {}); + expect(db.getSyncCursor("slack", "channel:C1")).toBeNull(); + expect(db.getSyncCursor("markdown", "file:a.md")).toBeNull(); + }); + + it("reset sync with --source filters by source", () => { + runReset(db, "sync", { source: "markdown" }); + expect(db.getSyncCursor("markdown", "file:a.md")).toBeNull(); + expect(db.getSyncCursor("slack", "channel:C1")).toBe("ts1"); + }); + + it("reset chunks deletes chunks and resets embeddings", () => { + runReset(db, "chunks", {}); + expect(db.getChunkEmbeddingStats().total_chunks).toBe(0); + expect(db.getEmbeddingStats().embedded_messages).toBe(0); + }); + + it("reset embed drops vec tables", () => { + runReset(db, "embed", {}); + expect(db.getEmbeddingStats().embedded_messages).toBe(0); + }); + + it("reset all clears everything", () => { + runReset(db, "all", {}); + expect(db.getSyncCursor("slack", "channel:C1")).toBeNull(); + expect(db.getChunkEmbeddingStats().total_chunks).toBe(0); + expect(db.getEmbeddingStats().embedded_messages).toBe(0); + }); + + it("preserves messages on all reset layers", () => { + runReset(db, "all", {}); + expect(db.getStats().total_messages).toBe(2); + }); + + it("throws on invalid layer", () => { + expect(() => runReset(db, "invalid", {})).toThrow("Unknown layer"); + }); +}); From 4271e9fc6e20264f58e6e8e6f7e2c80d43627365 Mon Sep 17 00:00:00 2001 From: Vlad Ra Date: Wed, 18 Mar 2026 22:05:53 +0000 Subject: [PATCH 08/20] docs: document traul reset command and auto-migration --- skill.md | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/skill.md b/skill.md index ba309d0..9969db6 100644 --- a/skill.md +++ b/skill.md @@ -163,6 +163,19 @@ Structured overview with three sections: 2. **Stats** — total messages, channels, contacts, active signals 3. **Volume** — last 7 days message bar chart +### `traul reset` + +Reset a data layer to force regeneration. Useful when you need to re-sync, re-chunk, or re-embed data. + +| Subcommand | Description | +|------------|-------------| +| `traul reset sync [--source ]` | Clear sync cursors; full refetch on next sync. Optional `--source` flag filters to a specific connector (e.g., `markdown`, `slack`). | +| `traul reset chunks` | Delete all chunks and embeddings; rechunk on next sync. | +| `traul reset embed` | Drop and recreate vector tables; re-embed with `traul embed`. | +| `traul reset all` | Reset everything: sync cursors + chunks + embeddings. | + +**Auto-migration:** Traul automatically detects version changes on startup. If the chunking algorithm or embedding model/dimensions change between versions, affected data layers are reset automatically. No manual action needed after upgrading. + ### Global Options | Option | Description | From 5593df475061932e07a524bb2944d9f34cc0d374 Mon Sep 17 00:00:00 2001 From: Vlad Ra Date: Wed, 18 Mar 2026 22:07:00 +0000 Subject: [PATCH 09/20] chore: bump version to 0.2.0 --- package.json | 2 +- src/index.ts | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/package.json b/package.json index 935654e..877a453 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "traul", - "version": "0.1.0", + "version": "0.2.0", "description": "Personal Intelligence Engine — watches communication streams, identifies patterns, surfaces actionable insights", "license": "AGPL-3.0-only", "repository": { diff --git a/src/index.ts b/src/index.ts index 5f4a893..d2a23b0 100755 --- a/src/index.ts +++ b/src/index.ts @@ -26,7 +26,7 @@ const program = new Command(); program .name("traul") .description("Traul — Personal Intelligence Engine") - .version("0.1.0") + .version("0.2.0") .option("-v, --verbose", "enable verbose output") .hook("preAction", () => { if (program.opts().verbose) { From f2ca8eab96a27e74e8eaec6026acb3083f108cd7 Mon Sep 17 00:00:00 2001 From: Vlad Ra Date: Wed, 18 Mar 2026 22:16:46 +0000 Subject: [PATCH 10/20] chore: add node-llama-cpp dependency Co-Authored-By: Claude Opus 4.6 (1M context) --- bun.lock | 264 +++++++++++++++++++++++++++++++++++++++++++++++++-- package.json | 8 +- 2 files changed, 263 insertions(+), 9 deletions(-) diff --git a/bun.lock b/bun.lock index fcf7348..e547583 100644 --- a/bun.lock +++ b/bun.lock @@ -9,6 +9,7 @@ "commander": "^13.1.0", "googleapis": "^171.4.0", "html-to-text": "^9.0.5", + "node-llama-cpp": "^3.18.1", "sqlite-vec": "^0.1.7-alpha.2", }, "devDependencies": { @@ -17,11 +18,66 @@ }, }, }, + "trustedDependencies": [ + "node-llama-cpp", + ], "packages": { + "@huggingface/jinja": ["@huggingface/jinja@0.5.6", "", {}, "sha512-MyMWyLnjqo+KRJYSH7oWNbsOn5onuIvfXYPcc0WOGxU0eHUV7oAYUoQTl2BMdu7ml+ea/bu11UM+EshbeHwtIA=="], + "@isaacs/cliui": ["@isaacs/cliui@8.0.2", "", { "dependencies": { "string-width": "^5.1.2", "string-width-cjs": "npm:string-width@^4.2.0", "strip-ansi": "^7.0.1", "strip-ansi-cjs": "npm:strip-ansi@^6.0.1", "wrap-ansi": "^8.1.0", "wrap-ansi-cjs": "npm:wrap-ansi@^7.0.0" } }, "sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA=="], + "@isaacs/fs-minipass": ["@isaacs/fs-minipass@4.0.1", "", { "dependencies": { "minipass": "^7.0.4" } }, "sha512-wgm9Ehl2jpeqP3zw/7mo3kRHFp5MEDhqAdwy1fTGkHAwnkGOVsgpvQhL8B5n1qlb01jV3n/bI0ZfZp5lWA1k4w=="], + + "@kwsites/file-exists": ["@kwsites/file-exists@1.1.1", "", { "dependencies": { "debug": "^4.1.1" } }, "sha512-m9/5YGR18lIwxSFDwfE3oA7bWuq9kdau6ugN4H2rJeyhFQZcG9AgSHkQtSD15a8WvTgfz9aikZMrKPHvbpqFiw=="], + + "@kwsites/promise-deferred": ["@kwsites/promise-deferred@1.1.1", "", {}, "sha512-GaHYm+c0O9MjZRu0ongGBRbinu8gVAMd2UZjji6jVmqKtZluZnptXGWhz1E8j8D2HJ3f/yMxKAUC0b+57wncIw=="], + + "@node-llama-cpp/linux-arm64": ["@node-llama-cpp/linux-arm64@3.18.1", "", { "os": "linux", "cpu": [ "x64", "arm64", ] }, "sha512-rXMgZxUay78FOJV/fJ67apYP9eElH5jd4df5YRKPlLhLHHchuOSyDn+qtyW/L/EnPzpogoLkmULqCkdXU39XsQ=="], + + "@node-llama-cpp/linux-armv7l": ["@node-llama-cpp/linux-armv7l@3.18.1", "", { "os": "linux", "cpu": [ "arm", "x64", ] }, "sha512-BrJL2cGo0pN5xd5nw+CzTn2rFMpz9MJyZZPUY81ptGkF2uIuXT2hdCVh56i9ImQrTwBfq1YcZL/l/Qe/1+HR/Q=="], + + "@node-llama-cpp/linux-x64": ["@node-llama-cpp/linux-x64@3.18.1", "", { "os": "linux", "cpu": "x64" }, "sha512-tRmWcsyvAcqJHQHXHsaOkx6muGbcirA9nRdNgH6n7bjGUw4VuoBD3dChyNF3/Ktt7ohB9kz+XhhyZjbDHpXyMA=="], + + "@node-llama-cpp/linux-x64-cuda": ["@node-llama-cpp/linux-x64-cuda@3.18.1", "", { "os": "linux", "cpu": "x64" }, "sha512-qOaYP4uwsUoBHQ/7xSOvyJIuXapS57Al+Sudgi00f96ldNZLKe1vuSGptAi5LTM2lIj66PKm6h8PlRWctwsZ2g=="], + + "@node-llama-cpp/linux-x64-cuda-ext": ["@node-llama-cpp/linux-x64-cuda-ext@3.18.1", "", { "os": "linux", "cpu": "x64" }, "sha512-VqyKhAVHPCpFzh0f1koCBgpThL+04QOXwv0oDQ8s8YcpfMMOXQlBhTB0plgTh0HrPExoObfTS4ohkrbyGgmztQ=="], + + "@node-llama-cpp/linux-x64-vulkan": ["@node-llama-cpp/linux-x64-vulkan@3.18.1", "", { "os": "linux", "cpu": "x64" }, "sha512-SIaNTK5pUPhwJD0gmiQfHa8OrRctVMmnqu+slJrz2Mzgg/XrwFndJlS9hvc+jSjTXCouwf7sYeQaaJWvQgBh/A=="], + + "@node-llama-cpp/mac-arm64-metal": ["@node-llama-cpp/mac-arm64-metal@3.18.1", "", { "os": "darwin", "cpu": [ "x64", "arm64", ] }, "sha512-cyZTdsUMlvuRlGmkkoBbN3v/DT6NuruEqoQYd9CqIrPyLa1xLNBTSKIZ9SgRnw23iCOj4URfITvRP+2pu63LuQ=="], + + "@node-llama-cpp/mac-x64": ["@node-llama-cpp/mac-x64@3.18.1", "", { "os": "darwin", "cpu": "x64" }, "sha512-GfCPgdltaIpBhEnQ7WfsrRXrZO9r9pBtDUAQMXRuJwOPP5q7xKrQZUXI6J6mpc8tAG0//CTIuGn4hTKoD/8V8w=="], + + "@node-llama-cpp/win-arm64": ["@node-llama-cpp/win-arm64@3.18.1", "", { "os": "win32", "cpu": [ "x64", "arm64", ] }, "sha512-S05YUzBMVSRS5KNbOS26cDYugeQHqogI3uewtTUBVC0tPbTHRSKjsdicmgWru1eNAry399LWWhzOf/3St/qsAw=="], + + "@node-llama-cpp/win-x64": ["@node-llama-cpp/win-x64@3.18.1", "", { "os": "win32", "cpu": "x64" }, "sha512-QLDVphPl+YDI+x/VYYgIV1N9g0GMXk3PqcoopOUG3cBRUtce7FO+YX903YdRJezs4oKbIp8YaO+xYBgeUSqhpA=="], + + "@node-llama-cpp/win-x64-cuda": ["@node-llama-cpp/win-x64-cuda@3.18.1", "", { "os": "win32", "cpu": "x64" }, "sha512-drgJmBhnxGQtB/SLo4sf4PPSuxRv3MdNP0FF6rKPY9TtzEOV293bRQyYEu/JYwvXfVApAIsRaJUTGvCkA9Qobw=="], + + "@node-llama-cpp/win-x64-cuda-ext": ["@node-llama-cpp/win-x64-cuda-ext@3.18.1", "", { "os": "win32", "cpu": "x64" }, "sha512-u0FzJBQsJA355ksKERxwPJhlcWl3ZJSNkU2ZUwDEiKNOCbv3ybvSCIEyDvB63wdtkfVUuCRJWijZnpDZxrCGqg=="], + + "@node-llama-cpp/win-x64-vulkan": ["@node-llama-cpp/win-x64-vulkan@3.18.1", "", { "os": "win32", "cpu": "x64" }, "sha512-PjmxrnPToi7y0zlP7l+hRIhvOmuEv94P6xZ11vjqICEJu8XdAJpvTfPKgDW4W0p0v4+So8ZiZYLUuwIHcsseyQ=="], + "@pkgjs/parseargs": ["@pkgjs/parseargs@0.11.0", "", {}, "sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg=="], + "@reflink/reflink": ["@reflink/reflink@0.1.19", "", { "optionalDependencies": { "@reflink/reflink-darwin-arm64": "0.1.19", "@reflink/reflink-darwin-x64": "0.1.19", "@reflink/reflink-linux-arm64-gnu": "0.1.19", "@reflink/reflink-linux-arm64-musl": "0.1.19", "@reflink/reflink-linux-x64-gnu": "0.1.19", "@reflink/reflink-linux-x64-musl": "0.1.19", "@reflink/reflink-win32-arm64-msvc": "0.1.19", "@reflink/reflink-win32-x64-msvc": "0.1.19" } }, "sha512-DmCG8GzysnCZ15bres3N5AHCmwBwYgp0As6xjhQ47rAUTUXxJiK+lLUxaGsX3hd/30qUpVElh05PbGuxRPgJwA=="], + + "@reflink/reflink-darwin-arm64": ["@reflink/reflink-darwin-arm64@0.1.19", "", { "os": "darwin", "cpu": "arm64" }, "sha512-ruy44Lpepdk1FqDz38vExBY/PVUsjxZA+chd9wozjUH9JjuDT/HEaQYA6wYN9mf041l0yLVar6BCZuWABJvHSA=="], + + "@reflink/reflink-darwin-x64": ["@reflink/reflink-darwin-x64@0.1.19", "", { "os": "darwin", "cpu": "x64" }, "sha512-By85MSWrMZa+c26TcnAy8SDk0sTUkYlNnwknSchkhHpGXOtjNDUOxJE9oByBnGbeuIE1PiQsxDG3Ud+IVV9yuA=="], + + "@reflink/reflink-linux-arm64-gnu": ["@reflink/reflink-linux-arm64-gnu@0.1.19", "", { "os": "linux", "cpu": "arm64" }, "sha512-7P+er8+rP9iNeN+bfmccM4hTAaLP6PQJPKWSA4iSk2bNvo6KU6RyPgYeHxXmzNKzPVRcypZQTpFgstHam6maVg=="], + + "@reflink/reflink-linux-arm64-musl": ["@reflink/reflink-linux-arm64-musl@0.1.19", "", { "os": "linux", "cpu": "arm64" }, "sha512-37iO/Dp6m5DDaC2sf3zPtx/hl9FV3Xze4xoYidrxxS9bgP3S8ALroxRK6xBG/1TtfXKTvolvp+IjrUU6ujIGmA=="], + + "@reflink/reflink-linux-x64-gnu": ["@reflink/reflink-linux-x64-gnu@0.1.19", "", { "os": "linux", "cpu": "x64" }, "sha512-jbI8jvuYCaA3MVUdu8vLoLAFqC+iNMpiSuLbxlAgg7x3K5bsS8nOpTRnkLF7vISJ+rVR8W+7ThXlXlUQ93ulkw=="], + + "@reflink/reflink-linux-x64-musl": ["@reflink/reflink-linux-x64-musl@0.1.19", "", { "os": "linux", "cpu": "x64" }, "sha512-e9FBWDe+lv7QKAwtKOt6A2W/fyy/aEEfr0g6j/hWzvQcrzHCsz07BNQYlNOjTfeytrtLU7k449H1PI95jA4OjQ=="], + + "@reflink/reflink-win32-arm64-msvc": ["@reflink/reflink-win32-arm64-msvc@0.1.19", "", { "os": "win32", "cpu": "arm64" }, "sha512-09PxnVIQcd+UOn4WAW73WU6PXL7DwGS6wPlkMhMg2zlHHG65F3vHepOw06HFCq+N42qkaNAc8AKIabWvtk6cIQ=="], + + "@reflink/reflink-win32-x64-msvc": ["@reflink/reflink-win32-x64-msvc@0.1.19", "", { "os": "win32", "cpu": "x64" }, "sha512-E//yT4ni2SyhwP8JRjVGWr3cbnhWDiPLgnQ66qqaanjjnMiu3O/2tjCPQXlcGc/DEYofpDc9fvhv6tALQsMV9w=="], + "@selderee/plugin-htmlparser2": ["@selderee/plugin-htmlparser2@0.11.0", "", { "dependencies": { "domhandler": "^5.0.3", "selderee": "^0.11.0" } }, "sha512-P33hHGdldxGabLFjPPpaTxVolMrzrcegejx+0GxjrIb9Zv48D8yAIA/QTDR2dFl7Uz7urX8aX6+5bCZslr+gWQ=="], "@slack/logger": ["@slack/logger@4.0.1", "", { "dependencies": { "@types/node": ">=18" } }, "sha512-6cmdPrV/RYfd2U0mDGiMK8S7OJqpCTm7enMLRR3edccsPX8j7zXTLnaEF4fhxxJJTAIOil6+qZrnUPTuaLvwrQ=="], @@ -30,6 +86,8 @@ "@slack/web-api": ["@slack/web-api@7.15.0", "", { "dependencies": { "@slack/logger": "^4.0.1", "@slack/types": "^2.20.1", "@types/node": ">=18", "@types/retry": "0.12.0", "axios": "^1.13.5", "eventemitter3": "^5.0.1", "form-data": "^4.0.4", "is-electron": "2.2.2", "is-stream": "^2", "p-queue": "^6", "p-retry": "^4", "retry": "^0.13.1" } }, "sha512-va7zYIt3QHG1x9M/jqXXRPFMoOVlVSSRHC5YH+DzKYsrz5xUKOA3lR4THsu/Zxha9N1jOndbKFKLtr0WOPW1Vw=="], + "@tinyhttp/content-disposition": ["@tinyhttp/content-disposition@2.2.4", "", {}, "sha512-5Kc5CM2Ysn3vTTArBs2vESUt0AQiWZA86yc1TI3B+lxXmtEq133C1nxXNOgnzhrivdPZIh3zLj5gDnZjoLL5GA=="], + "@types/bun": ["@types/bun@1.3.10", "", { "dependencies": { "bun-types": "1.3.10" } }, "sha512-0+rlrUrOrTSskibryHbvQkDOWRJwJZqZlxrUs1u4oOoTln8+WIXBPmAuCF35SWB2z4Zl3E84Nl/D0P7803nigQ=="], "@types/html-to-text": ["@types/html-to-text@9.0.4", "", {}, "sha512-pUY3cKH/Nm2yYrEmDlPR1mR7yszjGx4DrwPjQ702C4/D5CwHuZTgZdIdwPkRbcuhs7BAh2L5rg3CL5cbRiGTCQ=="], @@ -40,10 +98,14 @@ "agent-base": ["agent-base@7.1.4", "", {}, "sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ=="], + "ansi-escapes": ["ansi-escapes@6.2.1", "", {}, "sha512-4nJ3yixlEthEJ9Rk4vPcdBRkZvQZlYyu8j4/Mqz5sgIkddmEnH2Yj2ZrnP9S3tQOvSNRUIgVNF/1yPpRAGNRig=="], + "ansi-regex": ["ansi-regex@6.2.2", "", {}, "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg=="], "ansi-styles": ["ansi-styles@6.2.3", "", {}, "sha512-4Dj6M28JB+oAH8kFkTLUo+a2jwOFkuqb3yucU0CANcRRUbxS0cP0nZYCGjcc3BNXwRIsUVmDGgzawme7zvJHvg=="], + "async-retry": ["async-retry@1.3.3", "", { "dependencies": { "retry": "0.13.1" } }, "sha512-wfr/jstw9xNi/0teMHrRW7dsz3Lt5ARhYNZ2ewpadnhaIp5mbALhOAP+EAdsC7t4Z6wqsDVv9+W6gm1Dk9mEyw=="], + "asynckit": ["asynckit@0.4.0", "", {}, "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q=="], "axios": ["axios@1.13.6", "", { "dependencies": { "follow-redirects": "^1.15.11", "form-data": "^4.0.5", "proxy-from-env": "^1.1.0" } }, "sha512-ChTCHMouEe2kn713WHbQGcuYrr6fXTBiu460OTwWrWob16g1bXn4vtz07Ope7ewMozJAnEquLk5lWQWtBig9DQ=="], @@ -60,10 +122,28 @@ "bun-types": ["bun-types@1.3.10", "", { "dependencies": { "@types/node": "*" } }, "sha512-tcpfCCl6XWo6nCVnpcVrxQ+9AYN1iqMIzgrSKYMB/fjLtV2eyAVEg7AxQJuCq/26R6HpKWykQXuSOq/21RYcbg=="], + "bytes": ["bytes@3.1.2", "", {}, "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg=="], + "call-bind-apply-helpers": ["call-bind-apply-helpers@1.0.2", "", { "dependencies": { "es-errors": "^1.3.0", "function-bind": "^1.1.2" } }, "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ=="], "call-bound": ["call-bound@1.0.4", "", { "dependencies": { "call-bind-apply-helpers": "^1.0.2", "get-intrinsic": "^1.3.0" } }, "sha512-+ys997U96po4Kx/ABpBCqhA9EuxJaQWDQg7295H4hBphv3IZg0boBKuwYpt4YXp6MZ5AmZQnU/tyMTlRpaSejg=="], + "chalk": ["chalk@5.6.2", "", {}, "sha512-7NzBL0rN6fMUW+f7A6Io4h40qQlG+xGmtMxfbnH/K7TAtt8JQWVQK+6g0UXKMeVJoyV5EkkNsErQ8pVD3bLHbA=="], + + "chmodrp": ["chmodrp@1.0.2", "", {}, "sha512-TdngOlFV1FLTzU0o1w8MB6/BFywhtLC0SzRTGJU7T9lmdjlCWeMRt1iVo0Ki+ldwNk0BqNiKoc8xpLZEQ8mY1w=="], + + "chownr": ["chownr@3.0.0", "", {}, "sha512-+IxzY9BZOQd/XuYPRmrvEVjF/nqj5kgT4kEq7VofrDoM1MxoRjEWkrCC3EtLi59TVawxTAn+orJwFQcrqEN1+g=="], + + "ci-info": ["ci-info@4.4.0", "", {}, "sha512-77PSwercCZU2Fc4sX94eF8k8Pxte6JAwL4/ICZLFjJLqegs7kCuAsqqj/70NQF6TvDpgFjkubQB2FW2ZZddvQg=="], + + "cli-cursor": ["cli-cursor@5.0.0", "", { "dependencies": { "restore-cursor": "^5.0.0" } }, "sha512-aCj4O5wKyszjMmDT4tZj93kxyydN/K5zPWSCe6/0AV/AA1pqe5ZBIw0a2ZfPQV7lL5/yb5HsUreJ6UFAF1tEQw=="], + + "cli-spinners": ["cli-spinners@2.9.2", "", {}, "sha512-ywqV+5MmyL4E7ybXgKys4DugZbX0FC6LnwrhjuykIjnK9k8OQacQ7axGKnjDXWNhns0xot3bZI5h55H8yo9cJg=="], + + "cliui": ["cliui@8.0.1", "", { "dependencies": { "string-width": "^4.2.0", "strip-ansi": "^6.0.1", "wrap-ansi": "^7.0.0" } }, "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ=="], + + "cmake-js": ["cmake-js@8.0.0", "", { "dependencies": { "debug": "^4.4.3", "fs-extra": "^11.3.3", "node-api-headers": "^1.8.0", "rc": "1.2.8", "semver": "^7.7.3", "tar": "^7.5.6", "url-join": "^4.0.1", "which": "^6.0.0", "yargs": "^17.7.2" }, "bin": { "cmake-js": "bin/cmake-js" } }, "sha512-YbUP88RDwCvoQkZhRtGURYm9RIpWdtvZuhT87fKNoLjk8kIFIFeARpKfuZQGdwfH99GZpUmqSfcDrK62X7lTgg=="], + "color-convert": ["color-convert@2.0.1", "", { "dependencies": { "color-name": "~1.1.4" } }, "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ=="], "color-name": ["color-name@1.1.4", "", {}, "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA=="], @@ -78,6 +158,8 @@ "debug": ["debug@4.4.3", "", { "dependencies": { "ms": "^2.1.3" } }, "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA=="], + "deep-extend": ["deep-extend@0.6.0", "", {}, "sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA=="], + "deepmerge": ["deepmerge@4.3.1", "", {}, "sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A=="], "delayed-stream": ["delayed-stream@1.0.0", "", {}, "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ=="], @@ -96,10 +178,12 @@ "ecdsa-sig-formatter": ["ecdsa-sig-formatter@1.0.11", "", { "dependencies": { "safe-buffer": "^5.0.1" } }, "sha512-nagl3RYrbNv6kQkeJIpt6NJZy8twLB/2vtz6yN9Z4vRKHN4/QZJIEbqohALSgwKdnksuY3k5Addp5lg8sVoVcQ=="], - "emoji-regex": ["emoji-regex@9.2.2", "", {}, "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg=="], + "emoji-regex": ["emoji-regex@10.6.0", "", {}, "sha512-toUI84YS5YmxW219erniWD0CIVOo46xGKColeNQRgOzDorgBi1v4D71/OFzgD9GO2UGKIv1C3Sp8DAn0+j5w7A=="], "entities": ["entities@4.5.0", "", {}, "sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw=="], + "env-var": ["env-var@7.5.0", "", {}, "sha512-mKZOzLRN0ETzau2W2QXefbFjo5EF4yWq28OyKb9ICdeNhHJlOE/pHHnz4hdYJ9cNZXcJHo5xN4OT4pzuSHSNvA=="], + "es-define-property": ["es-define-property@1.0.1", "", {}, "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g=="], "es-errors": ["es-errors@1.3.0", "", {}, "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw=="], @@ -108,12 +192,18 @@ "es-set-tostringtag": ["es-set-tostringtag@2.1.0", "", { "dependencies": { "es-errors": "^1.3.0", "get-intrinsic": "^1.2.6", "has-tostringtag": "^1.0.2", "hasown": "^2.0.2" } }, "sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA=="], + "escalade": ["escalade@3.2.0", "", {}, "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA=="], + "eventemitter3": ["eventemitter3@5.0.4", "", {}, "sha512-mlsTRyGaPBjPedk6Bvw+aqbsXDtoAyAzm5MO7JgU+yVRyMQ5O8bD4Kcci7BS85f93veegeCPkL8R4GLClnjLFw=="], "extend": ["extend@3.0.2", "", {}, "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g=="], "fetch-blob": ["fetch-blob@3.2.0", "", { "dependencies": { "node-domexception": "^1.0.0", "web-streams-polyfill": "^3.0.3" } }, "sha512-7yAQpD2UMJzLi1Dqv7qFYnPbaPx7ZfFK6PiIxQ4PfkGPyNyl2Ugx+a/umUonmKqjhM4DnfbMvdX6otXq83soQQ=="], + "filename-reserved-regex": ["filename-reserved-regex@3.0.0", "", {}, "sha512-hn4cQfU6GOT/7cFHXBqeBg2TbrMBgdD0kcjLhvSQYYwm3s4B6cjvBfb7nBALJLAXqmU5xajSa7X2NnUud/VCdw=="], + + "filenamify": ["filenamify@6.0.0", "", { "dependencies": { "filename-reserved-regex": "^3.0.0" } }, "sha512-vqIlNogKeyD3yzrm0yhRMQg8hOVwYcYRfjEoODd49iCprMn4HL85gK3HcykQE53EPIpX3HcAbGA5ELQv216dAQ=="], + "follow-redirects": ["follow-redirects@1.15.11", "", {}, "sha512-deG2P0JfjrTxl50XGCDyfI97ZGVCxIpfKYmfyrQ54n5FO/0gfIES8C/Psl6kWVDolizcaaxZJnTS0QSMxvnsBQ=="], "foreground-child": ["foreground-child@3.3.1", "", { "dependencies": { "cross-spawn": "^7.0.6", "signal-exit": "^4.0.1" } }, "sha512-gIXjKqtFuWEgzFRJA9WCQeSJLZDjgJUOMCMzxtvFq/37KojM1BFGufqsCy0r4qSQmYLsZYMeyRqzIWOMup03sw=="], @@ -122,12 +212,18 @@ "formdata-polyfill": ["formdata-polyfill@4.0.10", "", { "dependencies": { "fetch-blob": "^3.1.2" } }, "sha512-buewHzMvYL29jdeQTVILecSaZKnt/RJWjoZCF5OW60Z67/GmSLBkOFM7qh1PI3zFNtJbaZL5eQu1vLfazOwj4g=="], + "fs-extra": ["fs-extra@11.3.4", "", { "dependencies": { "graceful-fs": "^4.2.0", "jsonfile": "^6.0.1", "universalify": "^2.0.0" } }, "sha512-CTXd6rk/M3/ULNQj8FBqBWHYBVYybQ3VPBw0xGKFe3tuH7ytT6ACnvzpIQ3UZtB8yvUKC2cXn1a+x+5EVQLovA=="], + "function-bind": ["function-bind@1.1.2", "", {}, "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA=="], "gaxios": ["gaxios@7.1.3", "", { "dependencies": { "extend": "^3.0.2", "https-proxy-agent": "^7.0.1", "node-fetch": "^3.3.2", "rimraf": "^5.0.1" } }, "sha512-YGGyuEdVIjqxkxVH1pUTMY/XtmmsApXrCVv5EU25iX6inEPbV+VakJfLealkBtJN69AQmh1eGOdCl9Sm1UP6XQ=="], "gcp-metadata": ["gcp-metadata@8.1.2", "", { "dependencies": { "gaxios": "^7.0.0", "google-logging-utils": "^1.0.0", "json-bigint": "^1.0.0" } }, "sha512-zV/5HKTfCeKWnxG0Dmrw51hEWFGfcF2xiXqcA3+J90WDuP0SvoiSO5ORvcBsifmx/FoIjgQN3oNOGaQ5PhLFkg=="], + "get-caller-file": ["get-caller-file@2.0.5", "", {}, "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg=="], + + "get-east-asian-width": ["get-east-asian-width@1.5.0", "", {}, "sha512-CQ+bEO+Tva/qlmw24dCejulK5pMzVnUOFOijVogd3KQs07HnRIgp8TGipvCCRT06xeYEbpbgwaCxglFyiuIcmA=="], + "get-intrinsic": ["get-intrinsic@1.3.0", "", { "dependencies": { "call-bind-apply-helpers": "^1.0.2", "es-define-property": "^1.0.1", "es-errors": "^1.3.0", "es-object-atoms": "^1.1.1", "function-bind": "^1.1.2", "get-proto": "^1.0.1", "gopd": "^1.2.0", "has-symbols": "^1.1.0", "hasown": "^2.0.2", "math-intrinsics": "^1.1.0" } }, "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ=="], "get-proto": ["get-proto@1.0.1", "", { "dependencies": { "dunder-proto": "^1.0.1", "es-object-atoms": "^1.0.0" } }, "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g=="], @@ -144,6 +240,8 @@ "gopd": ["gopd@1.2.0", "", {}, "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg=="], + "graceful-fs": ["graceful-fs@4.2.11", "", {}, "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ=="], + "has-symbols": ["has-symbols@1.1.0", "", {}, "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ=="], "has-tostringtag": ["has-tostringtag@1.0.2", "", { "dependencies": { "has-symbols": "^1.0.3" } }, "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw=="], @@ -156,24 +254,44 @@ "https-proxy-agent": ["https-proxy-agent@7.0.6", "", { "dependencies": { "agent-base": "^7.1.2", "debug": "4" } }, "sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw=="], + "ignore": ["ignore@7.0.5", "", {}, "sha512-Hs59xBNfUIunMFgWAbGX5cq6893IbWg4KnrjbYwX3tx0ztorVgTDA6B2sxf8ejHJ4wz8BqGUMYlnzNBer5NvGg=="], + + "ini": ["ini@1.3.8", "", {}, "sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew=="], + + "ipull": ["ipull@3.9.5", "", { "dependencies": { "@tinyhttp/content-disposition": "^2.2.0", "async-retry": "^1.3.3", "chalk": "^5.3.0", "ci-info": "^4.0.0", "cli-spinners": "^2.9.2", "commander": "^10.0.0", "eventemitter3": "^5.0.1", "filenamify": "^6.0.0", "fs-extra": "^11.1.1", "is-unicode-supported": "^2.0.0", "lifecycle-utils": "^2.0.1", "lodash.debounce": "^4.0.8", "lowdb": "^7.0.1", "pretty-bytes": "^6.1.0", "pretty-ms": "^8.0.0", "sleep-promise": "^9.1.0", "slice-ansi": "^7.1.0", "stdout-update": "^4.0.1", "strip-ansi": "^7.1.0" }, "optionalDependencies": { "@reflink/reflink": "^0.1.16" }, "bin": { "ipull": "dist/cli/cli.js" } }, "sha512-5w/yZB5lXmTfsvNawmvkCjYo4SJNuKQz/av8TC1UiOyfOHyaM+DReqbpU2XpWYfmY+NIUbRRH8PUAWsxaS+IfA=="], + "is-electron": ["is-electron@2.2.2", "", {}, "sha512-FO/Rhvz5tuw4MCWkpMzHFKWD2LsfHzIb7i6MdPYZ/KW7AlxawyLkqdy+jPZP1WubqEADE3O4FUENlJHDfQASRg=="], - "is-fullwidth-code-point": ["is-fullwidth-code-point@3.0.0", "", {}, "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg=="], + "is-fullwidth-code-point": ["is-fullwidth-code-point@5.1.0", "", { "dependencies": { "get-east-asian-width": "^1.3.1" } }, "sha512-5XHYaSyiqADb4RnZ1Bdad6cPp8Toise4TzEjcOYDHZkTCbKgiUl7WTUCpNWHuxmDt91wnsZBc9xinNzopv3JMQ=="], + + "is-interactive": ["is-interactive@2.0.0", "", {}, "sha512-qP1vozQRI+BMOPcjFzrjXuQvdak2pHNUMZoeG2eRbiSqyvbEf/wQtEOTOX1guk6E3t36RkaqiSt8A/6YElNxLQ=="], "is-stream": ["is-stream@2.0.1", "", {}, "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg=="], - "isexe": ["isexe@2.0.0", "", {}, "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw=="], + "is-unicode-supported": ["is-unicode-supported@2.1.0", "", {}, "sha512-mE00Gnza5EEB3Ds0HfMyllZzbBrmLOX3vfWoj9A9PEnTfratQ/BcaJOuMhnkhjXvb2+FkY3VuHqtAGpTPmglFQ=="], + + "isexe": ["isexe@4.0.0", "", {}, "sha512-FFUtZMpoZ8RqHS3XeXEmHWLA4thH+ZxCv2lOiPIn1Xc7CxrqhWzNSDzD+/chS/zbYezmiwWLdQC09JdQKmthOw=="], "jackspeak": ["jackspeak@3.4.3", "", { "dependencies": { "@isaacs/cliui": "^8.0.2" }, "optionalDependencies": { "@pkgjs/parseargs": "^0.11.0" } }, "sha512-OGlZQpz2yfahA/Rd1Y8Cd9SIEsqvXkLVoSw/cgwhnhFMDbsQFeZYoJJ7bIZBS9BcamUW96asq/npPWugM+RQBw=="], "json-bigint": ["json-bigint@1.0.0", "", { "dependencies": { "bignumber.js": "^9.0.0" } }, "sha512-SiPv/8VpZuWbvLSMtTDU8hEfrZWg/mH/nV/b4o0CYbSxu1UIQPLdwKOCIyLQX+VIPO5vrLX3i8qtqFyhdPSUSQ=="], + "jsonfile": ["jsonfile@6.2.0", "", { "dependencies": { "universalify": "^2.0.0" }, "optionalDependencies": { "graceful-fs": "^4.1.6" } }, "sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg=="], + "jwa": ["jwa@2.0.1", "", { "dependencies": { "buffer-equal-constant-time": "^1.0.1", "ecdsa-sig-formatter": "1.0.11", "safe-buffer": "^5.0.1" } }, "sha512-hRF04fqJIP8Abbkq5NKGN0Bbr3JxlQ+qhZufXVr0DvujKy93ZCbXZMHDL4EOtodSbCWxOqR8MS1tXA5hwqCXDg=="], "jws": ["jws@4.0.1", "", { "dependencies": { "jwa": "^2.0.1", "safe-buffer": "^5.0.1" } }, "sha512-EKI/M/yqPncGUUh44xz0PxSidXFr/+r0pA70+gIYhjv+et7yxM+s29Y+VGDkovRofQem0fs7Uvf4+YmAdyRduA=="], "leac": ["leac@0.6.0", "", {}, "sha512-y+SqErxb8h7nE/fiEX07jsbuhrpO9lL8eca7/Y1nuWV2moNlXhyd59iDGcRf6moVyDMbmTNzL40SUyrFU/yDpg=="], + "lifecycle-utils": ["lifecycle-utils@3.1.1", "", {}, "sha512-gNd3OvhFNjHykJE3uGntz7UuPzWlK9phrIdXxU9Adis0+ExkwnZibfxCJWiWWZ+a6VbKiZrb+9D9hCQWd4vjTg=="], + + "lodash.debounce": ["lodash.debounce@4.0.8", "", {}, "sha512-FT1yDzDYEoYWhnSGnpE/4Kj1fLZkDFyqRb7fNt6FdYOSxlUWAtp42Eh6Wb0rGIv/m9Bgo7x4GhQbm5Ys4SG5ow=="], + + "log-symbols": ["log-symbols@7.0.1", "", { "dependencies": { "is-unicode-supported": "^2.0.0", "yoctocolors": "^2.1.1" } }, "sha512-ja1E3yCr9i/0hmBVaM0bfwDjnGy8I/s6PP4DFp+yP+a+mrHO4Rm7DtmnqROTUkHIkqffC84YY7AeqX6oFk0WFg=="], + + "lowdb": ["lowdb@7.0.1", "", { "dependencies": { "steno": "^4.0.2" } }, "sha512-neJAj8GwF0e8EpycYIDFqEPcx9Qz4GUho20jWFR7YiFeXzF1YMLdxB36PypcTSPMA+4+LvgyMacYhlr18Zlymw=="], + "lru-cache": ["lru-cache@10.4.3", "", {}, "sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ=="], "math-intrinsics": ["math-intrinsics@1.1.0", "", {}, "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g=="], @@ -182,18 +300,36 @@ "mime-types": ["mime-types@2.1.35", "", { "dependencies": { "mime-db": "1.52.0" } }, "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw=="], + "mimic-function": ["mimic-function@5.0.1", "", {}, "sha512-VP79XUPxV2CigYP3jWwAUFSku2aKqBH7uTAapFWCBqutsbmDo96KY5o8uh6U+/YSIn5OxJnXp73beVkpqMIGhA=="], + "minimatch": ["minimatch@9.0.9", "", { "dependencies": { "brace-expansion": "^2.0.2" } }, "sha512-OBwBN9AL4dqmETlpS2zasx+vTeWclWzkblfZk7KTA5j3jeOONz/tRCnZomUyvNg83wL5Zv9Ss6HMJXAgL8R2Yg=="], + "minimist": ["minimist@1.2.8", "", {}, "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA=="], + "minipass": ["minipass@7.1.3", "", {}, "sha512-tEBHqDnIoM/1rXME1zgka9g6Q2lcoCkxHLuc7ODJ5BxbP5d4c2Z5cGgtXAku59200Cx7diuHTOYfSBD8n6mm8A=="], + "minizlib": ["minizlib@3.1.0", "", { "dependencies": { "minipass": "^7.1.2" } }, "sha512-KZxYo1BUkWD2TVFLr0MQoM8vUUigWD3LlD83a/75BqC+4qE0Hb1Vo5v1FgcfaNXvfXzr+5EhQ6ing/CaBijTlw=="], + "ms": ["ms@2.1.3", "", {}, "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA=="], + "nanoid": ["nanoid@5.1.7", "", { "bin": { "nanoid": "bin/nanoid.js" } }, "sha512-ua3NDgISf6jdwezAheMOk4mbE1LXjm1DfMUDMuJf4AqxLFK3ccGpgWizwa5YV7Yz9EpXwEaWoRXSb/BnV0t5dQ=="], + + "node-addon-api": ["node-addon-api@8.6.0", "", {}, "sha512-gBVjCaqDlRUk0EwoPNKzIr9KkS9041G/q31IBShPs1Xz6UTA+EXdZADbzqAJQrpDRq71CIMnOP5VMut3SL0z5Q=="], + + "node-api-headers": ["node-api-headers@1.8.0", "", {}, "sha512-jfnmiKWjRAGbdD1yQS28bknFM1tbHC1oucyuMPjmkEs+kpiu76aRs40WlTmBmyEgzDM76ge1DQ7XJ3R5deiVjQ=="], + "node-domexception": ["node-domexception@1.0.0", "", {}, "sha512-/jKZoMpw0F8GRwl4/eLROPA3cfcXtLApP0QzLmUT/HuPCZWyB7IY9ZrMeKw2O/nFIqPQB3PVM9aYm0F312AXDQ=="], "node-fetch": ["node-fetch@3.3.2", "", { "dependencies": { "data-uri-to-buffer": "^4.0.0", "fetch-blob": "^3.1.4", "formdata-polyfill": "^4.0.10" } }, "sha512-dRB78srN/l6gqWulah9SrxeYnxeddIG30+GOqK/9OlLVyLg3HPnr6SqOWTWOXKRwC2eGYCkZ59NNuSgvSrpgOA=="], + "node-llama-cpp": ["node-llama-cpp@3.18.1", "", { "dependencies": { "@huggingface/jinja": "^0.5.6", "async-retry": "^1.3.3", "bytes": "^3.1.2", "chalk": "^5.6.2", "chmodrp": "^1.0.2", "cmake-js": "^8.0.0", "cross-spawn": "^7.0.6", "env-var": "^7.5.0", "filenamify": "^6.0.0", "fs-extra": "^11.3.4", "ignore": "^7.0.4", "ipull": "^3.9.5", "is-unicode-supported": "^2.1.0", "lifecycle-utils": "^3.1.1", "log-symbols": "^7.0.1", "nanoid": "^5.1.6", "node-addon-api": "^8.6.0", "ora": "^9.3.0", "pretty-ms": "^9.3.0", "proper-lockfile": "^4.1.2", "semver": "^7.7.1", "simple-git": "^3.33.0", "slice-ansi": "^8.0.0", "stdout-update": "^4.0.1", "strip-ansi": "^7.2.0", "validate-npm-package-name": "^7.0.2", "which": "^6.0.1", "yargs": "^17.7.2" }, "optionalDependencies": { "@node-llama-cpp/linux-arm64": "3.18.1", "@node-llama-cpp/linux-armv7l": "3.18.1", "@node-llama-cpp/linux-x64": "3.18.1", "@node-llama-cpp/linux-x64-cuda": "3.18.1", "@node-llama-cpp/linux-x64-cuda-ext": "3.18.1", "@node-llama-cpp/linux-x64-vulkan": "3.18.1", "@node-llama-cpp/mac-arm64-metal": "3.18.1", "@node-llama-cpp/mac-x64": "3.18.1", "@node-llama-cpp/win-arm64": "3.18.1", "@node-llama-cpp/win-x64": "3.18.1", "@node-llama-cpp/win-x64-cuda": "3.18.1", "@node-llama-cpp/win-x64-cuda-ext": "3.18.1", "@node-llama-cpp/win-x64-vulkan": "3.18.1" }, "peerDependencies": { "typescript": ">=5.0.0" }, "optionalPeers": ["typescript"], "bin": { "node-llama-cpp": "dist/cli/cli.js", "nlc": "dist/cli/cli.js" } }, "sha512-w0zfuy/IKS2fhrbed5SylZDXJHTVz4HnkwZ4UrFPgSNwJab3QIPwIl4lyCKHHy9flLrtxsAuV5kXfH3HZ6bb8w=="], + "object-inspect": ["object-inspect@1.13.4", "", {}, "sha512-W67iLl4J2EXEGTbfeHCffrjDfitvLANg0UlX3wFUUSTx92KXRFegMHUVgSqE+wvhAbi4WqjGg9czysTV2Epbew=="], + "onetime": ["onetime@7.0.0", "", { "dependencies": { "mimic-function": "^5.0.0" } }, "sha512-VXJjc87FScF88uafS3JllDgvAm+c/Slfz06lorj2uAY34rlUu0Nt+v8wreiImcrgAjjIHp1rXpTDlLOGw29WwQ=="], + + "ora": ["ora@9.3.0", "", { "dependencies": { "chalk": "^5.6.2", "cli-cursor": "^5.0.0", "cli-spinners": "^3.2.0", "is-interactive": "^2.0.0", "is-unicode-supported": "^2.1.0", "log-symbols": "^7.0.1", "stdin-discarder": "^0.3.1", "string-width": "^8.1.0" } }, "sha512-lBX72MWFduWEf7v7uWf5DHp9Jn5BI8bNPGuFgtXMmr2uDz2Gz2749y3am3agSDdkhHPHYmmxEGSKH85ZLGzgXw=="], + "p-finally": ["p-finally@1.0.0", "", {}, "sha512-LICb2p9CB7FS+0eR1oqWnHhp0FljGLZCWBE9aix0Uye9W8LTQPwMTYVGWQWIw9RdQiDg4+epXQODwIYJtSJaow=="], "p-queue": ["p-queue@6.6.2", "", { "dependencies": { "eventemitter3": "^4.0.4", "p-timeout": "^3.2.0" } }, "sha512-RwFpb72c/BhQLEXIZ5K2e+AhgNVmIejGlTgiB9MzZ0e93GRvqZ7uSi0dvRF7/XIXDeNkra2fNHBxTyPDGySpjQ=="], @@ -204,6 +340,8 @@ "package-json-from-dist": ["package-json-from-dist@1.0.1", "", {}, "sha512-UEZIS3/by4OC8vL3P2dTXRETpebLI2NiI5vIrjaD/5UtrkFX/tNbwjTSRAGC/+7CAo2pIcBaRgWmcBBHcsaCIw=="], + "parse-ms": ["parse-ms@4.0.0", "", {}, "sha512-TXfryirbmq34y8QBwgqCVLi+8oA3oWx2eAnSn62ITyEhEYaWRlVZ2DvMM9eZbMs/RfxPu/PK/aBLyGj4IrqMHw=="], + "parseley": ["parseley@0.12.1", "", { "dependencies": { "leac": "^0.6.0", "peberminta": "^0.9.0" } }, "sha512-e6qHKe3a9HWr0oMRVDTRhKce+bRO8VGQR3NyVwcjwrbhMmFCX9KszEV35+rn4AdilFAq9VPxP/Fe1wC9Qjd2lw=="], "path-key": ["path-key@3.1.1", "", {}, "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q=="], @@ -212,10 +350,22 @@ "peberminta": ["peberminta@0.9.0", "", {}, "sha512-XIxfHpEuSJbITd1H3EeQwpcZbTLHc+VVr8ANI9t5sit565tsI4/xK3KWTUFE2e6QiangUkh3B0jihzmGnNrRsQ=="], + "pretty-bytes": ["pretty-bytes@6.1.1", "", {}, "sha512-mQUvGU6aUFQ+rNvTIAcZuWGRT9a6f6Yrg9bHs4ImKF+HZCEK+plBvnAZYSIQztknZF2qnzNtr6F8s0+IuptdlQ=="], + + "pretty-ms": ["pretty-ms@9.3.0", "", { "dependencies": { "parse-ms": "^4.0.0" } }, "sha512-gjVS5hOP+M3wMm5nmNOucbIrqudzs9v/57bWRHQWLYklXqoXKrVfYW2W9+glfGsqtPgpiz5WwyEEB+ksXIx3gQ=="], + + "proper-lockfile": ["proper-lockfile@4.1.2", "", { "dependencies": { "graceful-fs": "^4.2.4", "retry": "^0.12.0", "signal-exit": "^3.0.2" } }, "sha512-TjNPblN4BwAWMXU8s9AEz4JmQxnD1NNL7bNOY/AKUzyamc379FWASUhc/K1pL2noVb+XmZKLL68cjzLsiOAMaA=="], + "proxy-from-env": ["proxy-from-env@1.1.0", "", {}, "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg=="], "qs": ["qs@6.15.0", "", { "dependencies": { "side-channel": "^1.1.0" } }, "sha512-mAZTtNCeetKMH+pSjrb76NAM8V9a05I9aBZOHztWy/UqcJdQYNsf59vrRKWnojAT9Y+GbIvoTBC++CPHqpDBhQ=="], + "rc": ["rc@1.2.8", "", { "dependencies": { "deep-extend": "^0.6.0", "ini": "~1.3.0", "minimist": "^1.2.0", "strip-json-comments": "~2.0.1" }, "bin": { "rc": "./cli.js" } }, "sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw=="], + + "require-directory": ["require-directory@2.1.1", "", {}, "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q=="], + + "restore-cursor": ["restore-cursor@5.1.0", "", { "dependencies": { "onetime": "^7.0.0", "signal-exit": "^4.1.0" } }, "sha512-oMA2dcrw6u0YfxJQXm342bFKX/E4sG9rbTzO9ptUcR/e8A33cHuvStiYOwH7fszkZlZ1z/ta9AAoPk2F4qIOHA=="], + "retry": ["retry@0.13.1", "", {}, "sha512-XQBQ3I8W1Cge0Seh+6gjj03LbmRFWuoszgK9ooCpwYIrhhoO80pfq4cUkU5DkknwfOfFteRwlZ56PYOGYyFWdg=="], "rimraf": ["rimraf@5.0.10", "", { "dependencies": { "glob": "^10.3.7" }, "bin": { "rimraf": "dist/esm/bin.mjs" } }, "sha512-l0OE8wL34P4nJH/H2ffoaniAokM2qSmrtXHmlpvYr5AVVX8msAyW0l8NVJFDxlSK4u3Uh/f41cQheDVdnYijwQ=="], @@ -224,6 +374,8 @@ "selderee": ["selderee@0.11.0", "", { "dependencies": { "parseley": "^0.12.0" } }, "sha512-5TF+l7p4+OsnP8BCCvSyZiSPc4x4//p5uPwK8TCnVPJYRmU2aYKMpOXvw8zM5a5JvuuCGN1jmsMwuU2W02ukfA=="], + "semver": ["semver@7.7.4", "", { "bin": { "semver": "bin/semver.js" } }, "sha512-vFKC2IEtQnVhpT78h1Yp8wzwrf8CM+MzKMHGJZfBtzhZNycRFnXsHk6E5TxIkkMsgNS7mdX3AGB7x2QM2di4lA=="], + "shebang-command": ["shebang-command@2.0.0", "", { "dependencies": { "shebang-regex": "^3.0.0" } }, "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA=="], "shebang-regex": ["shebang-regex@3.0.0", "", {}, "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A=="], @@ -236,7 +388,13 @@ "side-channel-weakmap": ["side-channel-weakmap@1.0.2", "", { "dependencies": { "call-bound": "^1.0.2", "es-errors": "^1.3.0", "get-intrinsic": "^1.2.5", "object-inspect": "^1.13.3", "side-channel-map": "^1.0.1" } }, "sha512-WPS/HvHQTYnHisLo9McqBHOJk2FkHO/tlpvldyrnem4aeQp4hai3gythswg6p01oSoTl58rcpiFAjF2br2Ak2A=="], - "signal-exit": ["signal-exit@4.1.0", "", {}, "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw=="], + "signal-exit": ["signal-exit@3.0.7", "", {}, "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ=="], + + "simple-git": ["simple-git@3.33.0", "", { "dependencies": { "@kwsites/file-exists": "^1.1.1", "@kwsites/promise-deferred": "^1.1.1", "debug": "^4.4.0" } }, "sha512-D4V/tGC2sjsoNhoMybKyGoE+v8A60hRawKQ1iFRA1zwuDgGZCBJ4ByOzZ5J8joBbi4Oam0qiPH+GhzmSBwbJng=="], + + "sleep-promise": ["sleep-promise@9.1.0", "", {}, "sha512-UHYzVpz9Xn8b+jikYSD6bqvf754xL2uBUzDFwiU6NcdZeifPr6UfgU43xpkPu67VMS88+TI2PSI7Eohgqf2fKA=="], + + "slice-ansi": ["slice-ansi@8.0.0", "", { "dependencies": { "ansi-styles": "^6.2.3", "is-fullwidth-code-point": "^5.1.0" } }, "sha512-stxByr12oeeOyY2BlviTNQlYV5xOj47GirPr4yA1hE9JCtxfQN0+tVbkxwCtYDQWhEKWFHsEK48ORg5jrouCAg=="], "sqlite-vec": ["sqlite-vec@0.1.7-alpha.2", "", { "optionalDependencies": { "sqlite-vec-darwin-arm64": "0.1.7-alpha.2", "sqlite-vec-darwin-x64": "0.1.7-alpha.2", "sqlite-vec-linux-arm64": "0.1.7-alpha.2", "sqlite-vec-linux-x64": "0.1.7-alpha.2", "sqlite-vec-windows-x64": "0.1.7-alpha.2" } }, "sha512-rNgRCv+4V4Ed3yc33Qr+nNmjhtrMnnHzXfLVPeGb28Dx5mmDL3Ngw/Wk8vhCGjj76+oC6gnkmMG8y73BZWGBwQ=="], @@ -250,7 +408,13 @@ "sqlite-vec-windows-x64": ["sqlite-vec-windows-x64@0.1.7-alpha.2", "", { "os": "win32", "cpu": "x64" }, "sha512-TRP6hTjAcwvQ6xpCZvjP00pdlda8J38ArFy1lMYhtQWXiIBmWnhMaMbq4kaeCYwvTTddfidatRS+TJrwIKB/oQ=="], - "string-width": ["string-width@5.1.2", "", { "dependencies": { "eastasianwidth": "^0.2.0", "emoji-regex": "^9.2.2", "strip-ansi": "^7.0.1" } }, "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA=="], + "stdin-discarder": ["stdin-discarder@0.3.1", "", {}, "sha512-reExS1kSGoElkextOcPkel4NE99S0BWxjUHQeDFnR8S993JxpPX7KU4MNmO19NXhlJp+8dmdCbKQVNgLJh2teA=="], + + "stdout-update": ["stdout-update@4.0.1", "", { "dependencies": { "ansi-escapes": "^6.2.0", "ansi-styles": "^6.2.1", "string-width": "^7.1.0", "strip-ansi": "^7.1.0" } }, "sha512-wiS21Jthlvl1to+oorePvcyrIkiG/6M3D3VTmDUlJm7Cy6SbFhKkAvX+YBuHLxck/tO3mrdpC/cNesigQc3+UQ=="], + + "steno": ["steno@4.0.2", "", {}, "sha512-yhPIQXjrlt1xv7dyPQg2P17URmXbuM5pdGkpiMB3RenprfiBlvK415Lctfe0eshk90oA7/tNq7WEiMK8RSP39A=="], + + "string-width": ["string-width@8.2.0", "", { "dependencies": { "get-east-asian-width": "^1.5.0", "strip-ansi": "^7.1.2" } }, "sha512-6hJPQ8N0V0P3SNmP6h2J99RLuzrWz2gvT7VnK5tKvrNqJoyS9W4/Fb8mo31UiPvy00z7DQXkP2hnKBVav76thw=="], "string-width-cjs": ["string-width@4.2.3", "", { "dependencies": { "emoji-regex": "^8.0.0", "is-fullwidth-code-point": "^3.0.0", "strip-ansi": "^6.0.1" } }, "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g=="], @@ -258,40 +422,126 @@ "strip-ansi-cjs": ["strip-ansi@6.0.1", "", { "dependencies": { "ansi-regex": "^5.0.1" } }, "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A=="], + "strip-json-comments": ["strip-json-comments@2.0.1", "", {}, "sha512-4gB8na07fecVVkOI6Rs4e7T6NOTki5EmL7TUduTs6bu3EdnSycntVJ4re8kgZA+wx9IueI2Y11bfbgwtzuE0KQ=="], + + "tar": ["tar@7.5.11", "", { "dependencies": { "@isaacs/fs-minipass": "^4.0.0", "chownr": "^3.0.0", "minipass": "^7.1.2", "minizlib": "^3.1.0", "yallist": "^5.0.0" } }, "sha512-ChjMH33/KetonMTAtpYdgUFr0tbz69Fp2v7zWxQfYZX4g5ZN2nOBXm1R2xyA+lMIKrLKIoKAwFj93jE/avX9cQ=="], + "undici-types": ["undici-types@7.18.2", "", {}, "sha512-AsuCzffGHJybSaRrmr5eHr81mwJU3kjw6M+uprWvCXiNeN9SOGwQ3Jn8jb8m3Z6izVgknn1R0FTCEAP2QrLY/w=="], + "universalify": ["universalify@2.0.1", "", {}, "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw=="], + + "url-join": ["url-join@4.0.1", "", {}, "sha512-jk1+QP6ZJqyOiuEI9AEWQfju/nB2Pw466kbA0LEZljHwKeMgd9WrAEgEGxjPDD2+TNbbb37rTyhEfrCXfuKXnA=="], + "url-template": ["url-template@2.0.8", "", {}, "sha512-XdVKMF4SJ0nP/O7XIPB0JwAEuT9lDIYnNsK8yGVe43y0AWoKeJNdv3ZNWh7ksJ6KqQFjOO6ox/VEitLnaVNufw=="], + "validate-npm-package-name": ["validate-npm-package-name@7.0.2", "", {}, "sha512-hVDIBwsRruT73PbK7uP5ebUt+ezEtCmzZz3F59BSr2F6OVFnJ/6h8liuvdLrQ88Xmnk6/+xGGuq+pG9WwTuy3A=="], + "web-streams-polyfill": ["web-streams-polyfill@3.3.3", "", {}, "sha512-d2JWLCivmZYTSIoge9MsgFCZrt571BikcWGYkjC1khllbTeDlGqZ2D8vD8E/lJa8WGWbb7Plm8/XJYV7IJHZZw=="], - "which": ["which@2.0.2", "", { "dependencies": { "isexe": "^2.0.0" }, "bin": { "node-which": "./bin/node-which" } }, "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA=="], + "which": ["which@6.0.1", "", { "dependencies": { "isexe": "^4.0.0" }, "bin": { "node-which": "bin/which.js" } }, "sha512-oGLe46MIrCRqX7ytPUf66EAYvdeMIZYn3WaocqqKZAxrBpkqHfL/qvTyJ/bTk5+AqHCjXmrv3CEWgy368zhRUg=="], - "wrap-ansi": ["wrap-ansi@8.1.0", "", { "dependencies": { "ansi-styles": "^6.1.0", "string-width": "^5.0.1", "strip-ansi": "^7.0.1" } }, "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ=="], + "wrap-ansi": ["wrap-ansi@7.0.0", "", { "dependencies": { "ansi-styles": "^4.0.0", "string-width": "^4.1.0", "strip-ansi": "^6.0.0" } }, "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q=="], "wrap-ansi-cjs": ["wrap-ansi@7.0.0", "", { "dependencies": { "ansi-styles": "^4.0.0", "string-width": "^4.1.0", "strip-ansi": "^6.0.0" } }, "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q=="], + "y18n": ["y18n@5.0.8", "", {}, "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA=="], + + "yallist": ["yallist@5.0.0", "", {}, "sha512-YgvUTfwqyc7UXVMrB+SImsVYSmTS8X/tSrtdNZMImM+n7+QTriRXyXim0mBrTXNeqzVF0KWGgHPeiyViFFrNDw=="], + + "yargs": ["yargs@17.7.2", "", { "dependencies": { "cliui": "^8.0.1", "escalade": "^3.1.1", "get-caller-file": "^2.0.5", "require-directory": "^2.1.1", "string-width": "^4.2.3", "y18n": "^5.0.5", "yargs-parser": "^21.1.1" } }, "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w=="], + + "yargs-parser": ["yargs-parser@21.1.1", "", {}, "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw=="], + + "yoctocolors": ["yoctocolors@2.1.2", "", {}, "sha512-CzhO+pFNo8ajLM2d2IW/R93ipy99LWjtwblvC1RsoSUMZgyLbYFr221TnSNT7GjGdYui6P459mw9JH/g/zW2ug=="], + + "@isaacs/cliui/string-width": ["string-width@5.1.2", "", { "dependencies": { "eastasianwidth": "^0.2.0", "emoji-regex": "^9.2.2", "strip-ansi": "^7.0.1" } }, "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA=="], + + "@isaacs/cliui/wrap-ansi": ["wrap-ansi@8.1.0", "", { "dependencies": { "ansi-styles": "^6.1.0", "string-width": "^5.0.1", "strip-ansi": "^7.0.1" } }, "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ=="], + + "cliui/string-width": ["string-width@4.2.3", "", { "dependencies": { "emoji-regex": "^8.0.0", "is-fullwidth-code-point": "^3.0.0", "strip-ansi": "^6.0.1" } }, "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g=="], + + "cliui/strip-ansi": ["strip-ansi@6.0.1", "", { "dependencies": { "ansi-regex": "^5.0.1" } }, "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A=="], + + "cross-spawn/which": ["which@2.0.2", "", { "dependencies": { "isexe": "^2.0.0" }, "bin": { "node-which": "./bin/node-which" } }, "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA=="], + + "foreground-child/signal-exit": ["signal-exit@4.1.0", "", {}, "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw=="], + "gcp-metadata/gaxios": ["gaxios@7.1.4", "", { "dependencies": { "extend": "^3.0.2", "https-proxy-agent": "^7.0.1", "node-fetch": "^3.3.2" } }, "sha512-bTIgTsM2bWn3XklZISBTQX7ZSddGW+IO3bMdGaemHZ3tbqExMENHLx6kKZ/KlejgrMtj8q7wBItt51yegqalrA=="], "googleapis-common/gaxios": ["gaxios@7.1.4", "", { "dependencies": { "extend": "^3.0.2", "https-proxy-agent": "^7.0.1", "node-fetch": "^3.3.2" } }, "sha512-bTIgTsM2bWn3XklZISBTQX7ZSddGW+IO3bMdGaemHZ3tbqExMENHLx6kKZ/KlejgrMtj8q7wBItt51yegqalrA=="], + "ipull/commander": ["commander@10.0.1", "", {}, "sha512-y4Mg2tXshplEbSGzx7amzPwKKOCGuoSRP/CjEdwwk0FOGlUbq6lKuoyDZTNZkmxHdJtp54hdfY/JUrdL7Xfdug=="], + + "ipull/lifecycle-utils": ["lifecycle-utils@2.1.0", "", {}, "sha512-AnrXnE2/OF9PHCyFg0RSqsnQTzV991XaZA/buhFDoc58xU7rhSCDgCz/09Lqpsn4MpoPHt7TRAXV1kWZypFVsA=="], + + "ipull/pretty-ms": ["pretty-ms@8.0.0", "", { "dependencies": { "parse-ms": "^3.0.0" } }, "sha512-ASJqOugUF1bbzI35STMBUpZqdfYKlJugy6JBziGi2EE+AL5JPJGSzvpeVXojxrr0ViUYoToUjb5kjSEGf7Y83Q=="], + + "ipull/slice-ansi": ["slice-ansi@7.1.2", "", { "dependencies": { "ansi-styles": "^6.2.1", "is-fullwidth-code-point": "^5.0.0" } }, "sha512-iOBWFgUX7caIZiuutICxVgX1SdxwAVFFKwt1EvMYYec/NWO5meOJ6K5uQxhrYBdQJne4KxiqZc+KptFOWFSI9w=="], + + "ora/cli-spinners": ["cli-spinners@3.4.0", "", {}, "sha512-bXfOC4QcT1tKXGorxL3wbJm6XJPDqEnij2gQ2m7ESQuE+/z9YFIWnl/5RpTiKWbMq3EVKR4fRLJGn6DVfu0mpw=="], + "p-queue/eventemitter3": ["eventemitter3@4.0.7", "", {}, "sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw=="], + "proper-lockfile/retry": ["retry@0.12.0", "", {}, "sha512-9LkiTwjUh6rT555DtE9rTX+BKByPfrMzEAtnlEtdEwr3Nkffwiihqe2bWADg+OQRjt9gl6ICdmB/ZFDCGAtSow=="], + + "restore-cursor/signal-exit": ["signal-exit@4.1.0", "", {}, "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw=="], + + "stdout-update/string-width": ["string-width@7.2.0", "", { "dependencies": { "emoji-regex": "^10.3.0", "get-east-asian-width": "^1.0.0", "strip-ansi": "^7.1.0" } }, "sha512-tsaTIkKW9b4N+AEj+SVA+WhJzV7/zMhcSu78mLKWSk7cXMOSHsBKFWUs0fWwq8QyK3MgJBQRX6Gbi4kYbdvGkQ=="], + "string-width-cjs/emoji-regex": ["emoji-regex@8.0.0", "", {}, "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A=="], + "string-width-cjs/is-fullwidth-code-point": ["is-fullwidth-code-point@3.0.0", "", {}, "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg=="], + "string-width-cjs/strip-ansi": ["strip-ansi@6.0.1", "", { "dependencies": { "ansi-regex": "^5.0.1" } }, "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A=="], "strip-ansi-cjs/ansi-regex": ["ansi-regex@5.0.1", "", {}, "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ=="], + "wrap-ansi/ansi-styles": ["ansi-styles@4.3.0", "", { "dependencies": { "color-convert": "^2.0.1" } }, "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg=="], + + "wrap-ansi/string-width": ["string-width@4.2.3", "", { "dependencies": { "emoji-regex": "^8.0.0", "is-fullwidth-code-point": "^3.0.0", "strip-ansi": "^6.0.1" } }, "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g=="], + + "wrap-ansi/strip-ansi": ["strip-ansi@6.0.1", "", { "dependencies": { "ansi-regex": "^5.0.1" } }, "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A=="], + "wrap-ansi-cjs/ansi-styles": ["ansi-styles@4.3.0", "", { "dependencies": { "color-convert": "^2.0.1" } }, "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg=="], "wrap-ansi-cjs/string-width": ["string-width@4.2.3", "", { "dependencies": { "emoji-regex": "^8.0.0", "is-fullwidth-code-point": "^3.0.0", "strip-ansi": "^6.0.1" } }, "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g=="], "wrap-ansi-cjs/strip-ansi": ["strip-ansi@6.0.1", "", { "dependencies": { "ansi-regex": "^5.0.1" } }, "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A=="], + "yargs/string-width": ["string-width@4.2.3", "", { "dependencies": { "emoji-regex": "^8.0.0", "is-fullwidth-code-point": "^3.0.0", "strip-ansi": "^6.0.1" } }, "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g=="], + + "@isaacs/cliui/string-width/emoji-regex": ["emoji-regex@9.2.2", "", {}, "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg=="], + + "cliui/string-width/emoji-regex": ["emoji-regex@8.0.0", "", {}, "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A=="], + + "cliui/string-width/is-fullwidth-code-point": ["is-fullwidth-code-point@3.0.0", "", {}, "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg=="], + + "cliui/strip-ansi/ansi-regex": ["ansi-regex@5.0.1", "", {}, "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ=="], + + "cross-spawn/which/isexe": ["isexe@2.0.0", "", {}, "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw=="], + + "ipull/pretty-ms/parse-ms": ["parse-ms@3.0.0", "", {}, "sha512-Tpb8Z7r7XbbtBTrM9UhpkzzaMrqA2VXMT3YChzYltwV3P3pM6t8wl7TvpMnSTosz1aQAdVib7kdoys7vYOPerw=="], + "string-width-cjs/strip-ansi/ansi-regex": ["ansi-regex@5.0.1", "", {}, "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ=="], "wrap-ansi-cjs/string-width/emoji-regex": ["emoji-regex@8.0.0", "", {}, "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A=="], + "wrap-ansi-cjs/string-width/is-fullwidth-code-point": ["is-fullwidth-code-point@3.0.0", "", {}, "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg=="], + "wrap-ansi-cjs/strip-ansi/ansi-regex": ["ansi-regex@5.0.1", "", {}, "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ=="], + + "wrap-ansi/string-width/emoji-regex": ["emoji-regex@8.0.0", "", {}, "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A=="], + + "wrap-ansi/string-width/is-fullwidth-code-point": ["is-fullwidth-code-point@3.0.0", "", {}, "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg=="], + + "wrap-ansi/strip-ansi/ansi-regex": ["ansi-regex@5.0.1", "", {}, "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ=="], + + "yargs/string-width/emoji-regex": ["emoji-regex@8.0.0", "", {}, "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A=="], + + "yargs/string-width/is-fullwidth-code-point": ["is-fullwidth-code-point@3.0.0", "", {}, "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg=="], + + "yargs/string-width/strip-ansi": ["strip-ansi@6.0.1", "", { "dependencies": { "ansi-regex": "^5.0.1" } }, "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A=="], + + "yargs/string-width/strip-ansi/ansi-regex": ["ansi-regex@5.0.1", "", {}, "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ=="], } } diff --git a/package.json b/package.json index 877a453..7a019a3 100644 --- a/package.json +++ b/package.json @@ -18,13 +18,17 @@ }, "dependencies": { "@slack/web-api": "^7.9.1", -"commander": "^13.1.0", + "commander": "^13.1.0", "googleapis": "^171.4.0", "html-to-text": "^9.0.5", + "node-llama-cpp": "^3.18.1", "sqlite-vec": "^0.1.7-alpha.2" }, "devDependencies": { "@types/bun": "latest", "@types/html-to-text": "^9.0.4" - } + }, + "trustedDependencies": [ + "node-llama-cpp" + ] } From fbe2cfb4b1def078b87c48774d9740d8836cc236 Mon Sep 17 00:00:00 2001 From: Vlad Ra Date: Wed, 18 Mar 2026 22:18:44 +0000 Subject: [PATCH 11/20] feat: add llama.ts formatting helpers with tests Implements isQwenEmbeddingModel, formatQuery, and formatDoc pure functions with full test coverage. Co-Authored-By: Claude Opus 4.6 (1M context) --- src/lib/llama.ts | 139 ++++++++++++++++++++++++++++++++++++++ test/lib/llama.test.ts | 148 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 287 insertions(+) create mode 100644 src/lib/llama.ts create mode 100644 test/lib/llama.test.ts diff --git a/src/lib/llama.ts b/src/lib/llama.ts new file mode 100644 index 0000000..4765cde --- /dev/null +++ b/src/lib/llama.ts @@ -0,0 +1,139 @@ +import { getLlama, resolveModelFile, type Llama, type LlamaModel, type LlamaEmbeddingContext } from "node-llama-cpp"; + +// --- Formatting helpers (pure, no model dependency) --- + +export function isQwenEmbeddingModel(uri: string): boolean { + return /qwen.*embed/i.test(uri); +} + +export function formatQuery(text: string, modelUri: string): string { + if (isQwenEmbeddingModel(modelUri)) { + return `Instruct: Retrieve relevant documents for the given query\nQuery: ${text}`; + } + return text; +} + +export function formatDoc(text: string): string { + return text; +} + +// --- Singleton model wrapper --- + +// Duplicated from embeddings.ts to avoid circular import +const MAX_TEXT_LENGTH = 4000; + +const DEFAULT_MODEL = "hf:Qwen/Qwen3-Embedding-0.6B-GGUF/Qwen3-Embedding-0.6B-Q8_0.gguf"; +const IDLE_TIMEOUT_MS = 5 * 60 * 1000; // 5 minutes + +export const LLAMA_EMBED_MODEL = process.env.TRAUL_EMBED_MODEL ?? DEFAULT_MODEL; + +// --- Singleton state --- +let llama: Llama | null = null; +let model: LlamaModel | null = null; +let ctx: LlamaEmbeddingContext | null = null; +let idleTimer: ReturnType | null = null; + +/** Reset singleton for testing. */ +export function _resetForTesting(): void { + llama = null; + model = null; + ctx = null; + if (idleTimer) clearTimeout(idleTimer); + idleTimer = null; +} + +function resetIdleTimer(): void { + if (idleTimer) clearTimeout(idleTimer); + idleTimer = setTimeout(async () => { + if (ctx) { ctx.dispose(); ctx = null; } + if (model) { await model.dispose(); model = null; } + }, IDLE_TIMEOUT_MS); + if (idleTimer && typeof idleTimer === "object" && "unref" in idleTimer) { + (idleTimer as NodeJS.Timeout).unref(); + } +} + +async function getContext(): Promise { + if (ctx) { + resetIdleTimer(); + return ctx; + } + + if (!llama) { + llama = await getLlama(); + } + + if (!model) { + const modelPath = await resolveModelFile(LLAMA_EMBED_MODEL, { + directory: `${process.env.HOME}/.cache/traul/models`, + onProgress: ({ percent }) => { + process.stderr.write(`\rDownloading model: ${Math.round(percent)}%`); + }, + }); + model = await llama.loadModel({ modelPath }); + } + + ctx = await model.createEmbeddingContext(); + resetIdleTimer(); + return ctx; +} + +function truncate(text: string): string { + return text.length > MAX_TEXT_LENGTH ? text.slice(0, MAX_TEXT_LENGTH) : text; +} + +const TRUNCATE_LIMITS = [2000, 1000]; + +async function embedSingle(embCtx: LlamaEmbeddingContext, text: string): Promise { + try { + const { vector } = await embCtx.getEmbeddingFor(truncate(text)); + return new Float32Array(vector); + } catch { + // Retry with progressive truncation + for (const limit of TRUNCATE_LIMITS) { + try { + const { vector } = await embCtx.getEmbeddingFor(text.slice(0, limit)); + return new Float32Array(vector); + } catch { + continue; + } + } + throw new Error(`Text too long to embed even at ${TRUNCATE_LIMITS.at(-1)} chars`); + } +} + +export async function embedDoc(text: string): Promise { + const embCtx = await getContext(); + return embedSingle(embCtx, formatDoc(text)); +} + +export async function embedQuery(text: string): Promise { + const embCtx = await getContext(); + return embedSingle(embCtx, formatQuery(text, LLAMA_EMBED_MODEL)); +} + +export async function embedDocBatch( + texts: string[], + onSkip?: (index: number, error: string) => void, +): Promise<(Float32Array | null)[]> { + if (texts.length === 0) return []; + const embCtx = await getContext(); + const results: (Float32Array | null)[] = []; + + for (let i = 0; i < texts.length; i++) { + try { + results.push(await embedSingle(embCtx, formatDoc(texts[i]))); + } catch (err) { + onSkip?.(i, err instanceof Error ? err.message : String(err)); + results.push(null); + } + } + + return results; +} + +export async function dispose(): Promise { + if (idleTimer) { clearTimeout(idleTimer); idleTimer = null; } + if (ctx) { ctx.dispose(); ctx = null; } + if (model) { await model.dispose(); model = null; } +} diff --git a/test/lib/llama.test.ts b/test/lib/llama.test.ts new file mode 100644 index 0000000..461c45c --- /dev/null +++ b/test/lib/llama.test.ts @@ -0,0 +1,148 @@ +import { describe, it, expect, mock, beforeEach } from "bun:test"; +import { isQwenEmbeddingModel, formatQuery, formatDoc } from "../../src/lib/llama"; + +const QWEN_URI = "hf:Qwen/Qwen3-Embedding-0.6B-GGUF/Qwen3-Embedding-0.6B-Q8_0.gguf"; +const OTHER_URI = "hf:BAAI/bge-small-en-v1.5-GGUF/bge-small-en-v1.5-q8_0.gguf"; + +describe("isQwenEmbeddingModel", () => { + it("returns true for Qwen GGUF URIs", () => { + expect(isQwenEmbeddingModel(QWEN_URI)).toBe(true); + }); + + it("returns true case-insensitively", () => { + expect(isQwenEmbeddingModel("hf:qwen/QWEN3-EMBEDDING-0.6B")).toBe(true); + }); + + it("returns false for non-Qwen URIs", () => { + expect(isQwenEmbeddingModel(OTHER_URI)).toBe(false); + }); +}); + +describe("formatQuery", () => { + it("adds instruction prefix for Qwen model", () => { + expect(formatQuery("test query", QWEN_URI)).toBe( + "Instruct: Retrieve relevant documents for the given query\nQuery: test query" + ); + }); + + it("returns raw text for non-Qwen model", () => { + expect(formatQuery("test query", OTHER_URI)).toBe("test query"); + }); +}); + +describe("formatDoc", () => { + it("returns raw text without prefix", () => { + expect(formatDoc("some document")).toBe("some document"); + }); +}); + +describe("LlamaCpp wrapper", () => { + let llamaMod: typeof import("../../src/lib/llama"); + + const fakeVector = new Float32Array(1024).fill(0.1); + const fakeVector2 = new Float32Array(1024).fill(0.2); + + const mockEmbeddingContext = { + getEmbeddingFor: mock(() => ({ vector: fakeVector })), + dispose: mock(() => {}), + }; + + const mockModel = { + createEmbeddingContext: mock(() => Promise.resolve(mockEmbeddingContext)), + dispose: mock(() => Promise.resolve()), + }; + + const mockLlama = { + loadModel: mock(() => Promise.resolve(mockModel)), + }; + + beforeEach(async () => { + mock.module("node-llama-cpp", () => ({ + getLlama: mock(() => Promise.resolve(mockLlama)), + resolveModelFile: mock(() => Promise.resolve("/fake/model.gguf")), + })); + + llamaMod = await import("../../src/lib/llama"); + llamaMod._resetForTesting?.(); + + mockEmbeddingContext.getEmbeddingFor.mockImplementation(() => ({ vector: fakeVector })); + mockModel.createEmbeddingContext.mockClear(); + mockModel.dispose.mockClear(); + mockLlama.loadModel.mockClear(); + }); + + it("embedDoc returns Float32Array of length 1024", async () => { + const result = await llamaMod.embedDoc("hello world"); + expect(result).toBeInstanceOf(Float32Array); + expect(result.length).toBe(1024); + }); + + it("embedDoc produces different vectors for different inputs", async () => { + let callCount = 0; + mockEmbeddingContext.getEmbeddingFor.mockImplementation(() => { + callCount++; + return { vector: callCount === 1 ? fakeVector : fakeVector2 }; + }); + + const v1 = await llamaMod.embedDoc("hello"); + const v2 = await llamaMod.embedDoc("goodbye"); + expect(v1).not.toEqual(v2); + }); + + it("embedQuery adds instruction prefix for Qwen model", async () => { + let capturedText = ""; + mockEmbeddingContext.getEmbeddingFor.mockImplementation((text: string) => { + capturedText = text; + return { vector: fakeVector }; + }); + + await llamaMod.embedQuery("search term"); + expect(capturedText).toContain("Instruct: Retrieve relevant documents"); + expect(capturedText).toContain("Query: search term"); + }); + + it("embedDocBatch returns array of Float32Array", async () => { + const results = await llamaMod.embedDocBatch(["a", "b", "c"]); + expect(results).toHaveLength(3); + for (const r of results) { + expect(r).toBeInstanceOf(Float32Array); + } + }); + + it("embedDocBatch returns empty array for empty input", async () => { + const results = await llamaMod.embedDocBatch([]); + expect(results).toHaveLength(0); + }); + + it("embedDocBatch returns null for individual failures and calls onSkip", async () => { + let callIdx = 0; + mockEmbeddingContext.getEmbeddingFor.mockImplementation(() => { + callIdx++; + if (callIdx === 2) throw new Error("bad text"); + return { vector: fakeVector }; + }); + + const skipped: number[] = []; + const results = await llamaMod.embedDocBatch( + ["ok", "bad", "ok"], + (idx) => skipped.push(idx), + ); + expect(results).toHaveLength(3); + expect(results[0]).toBeInstanceOf(Float32Array); + expect(results[1]).toBeNull(); + expect(results[2]).toBeInstanceOf(Float32Array); + expect(skipped).toEqual([1]); + }); + + it("lazy loads model on first call, not on import", async () => { + expect(mockLlama.loadModel).not.toHaveBeenCalled(); + await llamaMod.embedDoc("trigger load"); + expect(mockLlama.loadModel).toHaveBeenCalledTimes(1); + }); + + it("reuses singleton — second call does not reload model", async () => { + await llamaMod.embedDoc("first"); + await llamaMod.embedDoc("second"); + expect(mockLlama.loadModel).toHaveBeenCalledTimes(1); + }); +}); From ad8d2e15d371453d0f23f001ef3decf1989d34b2 Mon Sep 17 00:00:00 2001 From: Vlad Ra Date: Wed, 18 Mar 2026 22:19:18 +0000 Subject: [PATCH 12/20] feat: add llama.ts model wrapper with singleton, embed methods Implements lazy-loaded singleton LlamaCpp wrapper with embedDoc, embedQuery, embedDocBatch, and idle-timeout disposal. Fixes retry logic in embedSingle to only truncate-retry when text exceeds the retry limit, preventing false success on mock failures in tests. Co-Authored-By: Claude Opus 4.6 (1M context) --- src/lib/llama.ts | 13 +++++++++---- test/lib/llama.test.ts | 6 +++--- 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/src/lib/llama.ts b/src/lib/llama.ts index 4765cde..f900d62 100644 --- a/src/lib/llama.ts +++ b/src/lib/llama.ts @@ -66,8 +66,9 @@ async function getContext(): Promise { if (!model) { const modelPath = await resolveModelFile(LLAMA_EMBED_MODEL, { directory: `${process.env.HOME}/.cache/traul/models`, - onProgress: ({ percent }) => { - process.stderr.write(`\rDownloading model: ${Math.round(percent)}%`); + onProgress: ({ totalSize, downloadedSize }) => { + const pct = totalSize > 0 ? Math.round((downloadedSize / totalSize) * 100) : 0; + process.stderr.write(`\rDownloading model: ${pct}%`); }, }); model = await llama.loadModel({ modelPath }); @@ -88,9 +89,13 @@ async function embedSingle(embCtx: LlamaEmbeddingContext, text: string): Promise try { const { vector } = await embCtx.getEmbeddingFor(truncate(text)); return new Float32Array(vector); - } catch { - // Retry with progressive truncation + } catch (err) { + // Only retry with progressive truncation if the text is long enough to benefit for (const limit of TRUNCATE_LIMITS) { + if (text.length <= limit) { + // Text is already short — retrying won't help, rethrow original error + throw err; + } try { const { vector } = await embCtx.getEmbeddingFor(text.slice(0, limit)); return new Float32Array(vector); diff --git a/test/lib/llama.test.ts b/test/lib/llama.test.ts index 461c45c..51f7f9a 100644 --- a/test/lib/llama.test.ts +++ b/test/lib/llama.test.ts @@ -91,8 +91,8 @@ describe("LlamaCpp wrapper", () => { it("embedQuery adds instruction prefix for Qwen model", async () => { let capturedText = ""; - mockEmbeddingContext.getEmbeddingFor.mockImplementation((text: string) => { - capturedText = text; + mockEmbeddingContext.getEmbeddingFor.mockImplementation((...args: unknown[]) => { + capturedText = args[0] as string; return { vector: fakeVector }; }); @@ -125,7 +125,7 @@ describe("LlamaCpp wrapper", () => { const skipped: number[] = []; const results = await llamaMod.embedDocBatch( ["ok", "bad", "ok"], - (idx) => skipped.push(idx), + (idx: number) => skipped.push(idx), ); expect(results).toHaveLength(3); expect(results[0]).toBeInstanceOf(Float32Array); From 7f49611d8a4d7d1bda47655eeaee7b840bc03a08 Mon Sep 17 00:00:00 2001 From: Vlad Ra Date: Wed, 18 Mar 2026 22:22:08 +0000 Subject: [PATCH 13/20] feat: route embeddings through llama.ts with Ollama fallback - embeddings.ts now routes embed/embedQuery/embedBatch through llama.ts by default - Ollama HTTP remains as automatic fallback when llama throws - Added embedQuery() export for asymmetric embedding (query vs doc) - Added _resetFallbackForTesting() export for test isolation - Ollama fallback uses OLLAMA_MODEL constant (snowflake-arctic-embed2), not the HF URI - EMBED_MODEL export now reflects llama.LLAMA_EMBED_MODEL - Tests rewritten to mock llama.ts instead of fetch Co-Authored-By: Claude Opus 4.6 (1M context) --- src/lib/embeddings.ts | 83 ++++++++++++++--- test/lib/embeddings.test.ts | 173 ++++++++++++++++-------------------- 2 files changed, 148 insertions(+), 108 deletions(-) diff --git a/src/lib/embeddings.ts b/src/lib/embeddings.ts index afff61a..eb5badf 100644 --- a/src/lib/embeddings.ts +++ b/src/lib/embeddings.ts @@ -1,17 +1,32 @@ +import * as llama from "./llama"; + +// --- Constants --- const OLLAMA_URL = process.env.OLLAMA_URL ?? "http://localhost:11434"; -const EMBED_MODEL = process.env.TRAUL_EMBED_MODEL ?? "snowflake-arctic-embed2"; +const OLLAMA_MODEL = "snowflake-arctic-embed2"; // Used only for Ollama fallback path const EMBED_DIMS = 1024; const BATCH_SIZE = 50; - -export { EMBED_DIMS, EMBED_MODEL, BATCH_SIZE, MAX_TEXT_LENGTH }; - -// snowflake-arctic-embed2 context is 8192 tokens. -// Non-Latin (Cyrillic, CJK) text uses ~2-4 tokens per char. -// Pre-truncate texts before sending to Ollama to avoid massive payloads. const MAX_TEXT_LENGTH = 4000; -// Further truncation steps for retry on context overflow. const TRUNCATE_LIMITS = [2000, 1000]; +export { EMBED_DIMS, BATCH_SIZE, MAX_TEXT_LENGTH }; +export const EMBED_MODEL = llama.LLAMA_EMBED_MODEL; + +// --- Backend selection --- +let useLlama = true; +try { + if (!llama.embedDoc) useLlama = false; +} catch { + useLlama = false; + console.warn("llama: node-llama-cpp unavailable, falling back to Ollama"); +} + +/** Reset fallback state for testing. */ +export function _resetFallbackForTesting(): void { + useLlama = true; +} + +// --- Ollama HTTP backend (fallback) --- + async function tryEmbedBatch( texts: string[] ): Promise<{ ok: true; embeddings: number[][] } | { ok: false; error: string }> { @@ -21,7 +36,7 @@ async function tryEmbedBatch( const res = await fetch(`${OLLAMA_URL}/api/embed`, { method: "POST", headers: { "Content-Type": "application/json" }, - body: JSON.stringify({ model: EMBED_MODEL, input: texts, truncate: true }), + body: JSON.stringify({ model: OLLAMA_MODEL, input: texts, truncate: true }), signal: controller.signal, }); @@ -42,7 +57,7 @@ function isContextOverflow(error: string): boolean { return error.includes("input length exceeds") || error.includes("context length"); } -export async function embed(text: string): Promise { +async function ollamaEmbed(text: string): Promise { const input = text.length > MAX_TEXT_LENGTH ? text.slice(0, MAX_TEXT_LENGTH) : text; const result = await tryEmbedBatch([input]); if (result.ok) return new Float32Array(result.embeddings[0]); @@ -59,7 +74,7 @@ export async function embed(text: string): Promise { throw new Error(`Message too long to embed even at ${TRUNCATE_LIMITS.at(-1)} chars`); } -export async function embedBatch( +async function ollamaEmbedBatch( texts: string[], onSkip?: (index: number, error: string) => void ): Promise<(Float32Array | null)[]> { @@ -93,6 +108,52 @@ export async function embedBatch( return results; } +// --- Public API --- + +function truncate(text: string): string { + return text.length > MAX_TEXT_LENGTH ? text.slice(0, MAX_TEXT_LENGTH) : text; +} + +export async function embed(text: string): Promise { + if (useLlama) { + try { + return await llama.embedDoc(truncate(text)); + } catch (err) { + console.warn(`llama: embedding failed, falling back to Ollama: ${err}`); + useLlama = false; + } + } + return ollamaEmbed(text); +} + +export async function embedQuery(text: string): Promise { + if (useLlama) { + try { + return await llama.embedQuery(truncate(text)); + } catch (err) { + console.warn(`llama: embedding failed, falling back to Ollama: ${err}`); + useLlama = false; + } + } + return ollamaEmbed(text); +} + +export async function embedBatch( + texts: string[], + onSkip?: (index: number, error: string) => void +): Promise<(Float32Array | null)[]> { + if (useLlama) { + try { + const truncated = texts.map(truncate); + return await llama.embedDocBatch(truncated, onSkip); + } catch (err) { + console.warn(`llama: batch embedding failed, falling back to Ollama: ${err}`); + useLlama = false; + } + } + return ollamaEmbedBatch(texts, onSkip); +} + export function vecToBytes(vec: Float32Array): Uint8Array { return new Uint8Array(vec.buffer); } diff --git a/test/lib/embeddings.test.ts b/test/lib/embeddings.test.ts index abbcdfa..ffb0b89 100644 --- a/test/lib/embeddings.test.ts +++ b/test/lib/embeddings.test.ts @@ -1,33 +1,81 @@ import { describe, it, expect, mock, beforeEach, afterEach } from "bun:test"; -import { embedBatch, BATCH_SIZE } from "../../src/lib/embeddings"; -// We mock fetch to avoid hitting Ollama in tests -const originalFetch = globalThis.fetch; +const fakeVector = new Float32Array(1024).fill(0.42); +const fakeVector2 = new Float32Array(1024).fill(0.84); + +const mockLlama = { + embedDoc: mock(() => Promise.resolve(fakeVector)), + embedQuery: mock(() => Promise.resolve(fakeVector)), + embedDocBatch: mock(() => Promise.resolve([fakeVector, fakeVector2])), + LLAMA_EMBED_MODEL: "hf:Qwen/Qwen3-Embedding-0.6B-GGUF/Qwen3-Embedding-0.6B-Q8_0.gguf", +}; + +mock.module("../../src/lib/llama", () => mockLlama); +const { embed, embedQuery, embedBatch, vecToBytes, MAX_TEXT_LENGTH, _resetFallbackForTesting } = await import("../../src/lib/embeddings"); + +const originalFetch = globalThis.fetch; function mockFetch(handler: (url: string, opts: RequestInit) => Response | Promise) { globalThis.fetch = mock(handler as typeof fetch) as unknown as typeof fetch; } - function restoreFetch() { globalThis.fetch = originalFetch; } -function fakeEmbedding(dims: number = 1024): number[] { - return Array.from({ length: dims }, () => Math.random()); -} - function ollamaResponse(count: number) { return Response.json({ - embeddings: Array.from({ length: count }, () => fakeEmbedding()), + embeddings: Array.from({ length: count }, () => Array.from({ length: 1024 }, () => Math.random())), }); } -function ollamaError(error: string) { - return Response.json({ error }); -} +describe("embeddings — llama primary path", () => { + beforeEach(() => { + _resetFallbackForTesting(); + mockLlama.embedDoc.mockClear(); + mockLlama.embedQuery.mockClear(); + mockLlama.embedDocBatch.mockClear(); + mockLlama.embedDoc.mockImplementation(() => Promise.resolve(fakeVector)); + mockLlama.embedQuery.mockImplementation(() => Promise.resolve(fakeVector)); + mockLlama.embedDocBatch.mockImplementation(() => Promise.resolve([fakeVector, fakeVector2])); + }); + + it("embed() calls llama.embedDoc and returns Float32Array", async () => { + const result = await embed("hello"); + expect(result).toBeInstanceOf(Float32Array); + expect(result.length).toBe(1024); + expect(mockLlama.embedDoc).toHaveBeenCalledWith("hello"); + }); + + it("embedQuery() calls llama.embedQuery and returns Float32Array", async () => { + const result = await embedQuery("search term"); + expect(result).toBeInstanceOf(Float32Array); + expect(mockLlama.embedQuery).toHaveBeenCalledWith("search term"); + }); + + it("embedBatch() calls llama.embedDocBatch", async () => { + const results = await embedBatch(["a", "b"]); + expect(results).toHaveLength(2); + expect(mockLlama.embedDocBatch).toHaveBeenCalled(); + }); -describe("embedBatch", () => { + it("embed() pre-truncates text > MAX_TEXT_LENGTH", async () => { + const longText = "x".repeat(5000); + await embed(longText); + const calledWith = (mockLlama.embedDoc.mock.calls as unknown[][])[0][0] as string; + expect(calledWith.length).toBeLessThanOrEqual(MAX_TEXT_LENGTH); + }); + + it("vecToBytes produces correct Uint8Array", () => { + const vec = new Float32Array([1.0, 2.0]); + const bytes = vecToBytes(vec); + expect(bytes).toBeInstanceOf(Uint8Array); + expect(bytes.byteLength).toBe(8); + }); +}); + +describe("embeddings — Ollama fallback", () => { beforeEach(() => { + _resetFallbackForTesting(); restoreFetch(); }); @@ -35,46 +83,30 @@ describe("embedBatch", () => { restoreFetch(); }); - it("should not send texts longer than 4000 chars to Ollama", async () => { - const sentTexts: string[][] = []; + it("embed() falls back to Ollama when llama throws", async () => { + mockLlama.embedDoc.mockImplementation(() => { throw new Error("llama unavailable"); }); mockFetch(async (_url, opts) => { const body = JSON.parse(opts.body as string); - sentTexts.push(body.input); return ollamaResponse(body.input.length); }); - const shortText = "hello world"; - const longText = "x".repeat(5000); - - const results = await embedBatch([shortText, longText]); - - // Both should get embeddings - expect(results).toHaveLength(2); - - // All texts sent to Ollama should be <= 4000 chars - for (const batch of sentTexts) { - for (const text of batch) { - expect(text.length).toBeLessThanOrEqual(4000); - } - } + const result = await embed("hello"); + expect(result).toBeInstanceOf(Float32Array); + expect(result.length).toBe(1024); }); - it("should not send texts longer than CHUNK_THRESHOLD to Ollama", async () => { - const sentTexts: string[][] = []; + it("embedBatch() falls back to Ollama and truncates", async () => { + mockLlama.embedDocBatch.mockImplementation(() => { throw new Error("llama unavailable"); }); + const sentTexts: string[][] = []; mockFetch(async (_url, opts) => { const body = JSON.parse(opts.body as string); sentTexts.push(body.input); return ollamaResponse(body.input.length); }); - // 50K char message — should be truncated before sending - const hugeText = "word ".repeat(10000); - const results = await embedBatch([hugeText]); - - expect(results).toHaveLength(1); - + await embedBatch(["short", "x".repeat(5000)]); for (const batch of sentTexts) { for (const text of batch) { expect(text.length).toBeLessThanOrEqual(4000); @@ -82,70 +114,17 @@ describe("embedBatch", () => { } }); - it("should not send multi-megabyte messages to Ollama", async () => { - let totalPayloadSize = 0; - - mockFetch(async (_url, opts) => { - const bodyStr = opts.body as string; - totalPayloadSize += bodyStr.length; - const body = JSON.parse(bodyStr); - return ollamaResponse(body.input.length); - }); - - // Simulate the real-world case: messages up to 7MB - const texts = [ - "short message", - "a".repeat(100_000), // 100KB - "b".repeat(1_000_000), // 1MB - ]; - - await embedBatch(texts); - - // Total payload to Ollama should be reasonable (not megabytes) - expect(totalPayloadSize).toBeLessThan(100_000); - }); - - it("should truncate long texts but still return embeddings for them", async () => { - mockFetch(async (_url, opts) => { - const body = JSON.parse(opts.body as string); - return ollamaResponse(body.input.length); - }); - - const texts = [ - "short", - "x".repeat(50_000), - ]; - - const results = await embedBatch(texts); - - expect(results).toHaveLength(2); - expect(results[0]).not.toBeNull(); - expect(results[1]).not.toBeNull(); - }); - - it("should handle a batch where all texts are long", async () => { - const sentTexts: string[][] = []; + it("Ollama fallback uses snowflake-arctic-embed2 model name, not HF URI", async () => { + mockLlama.embedDoc.mockImplementation(() => { throw new Error("llama unavailable"); }); + let sentModel = ""; mockFetch(async (_url, opts) => { const body = JSON.parse(opts.body as string); - sentTexts.push(body.input); + sentModel = body.model; return ollamaResponse(body.input.length); }); - const texts = Array.from({ length: 5 }, (_, i) => - `message ${i} `.repeat(2000) - ); - - const results = await embedBatch(texts); - - expect(results).toHaveLength(5); - results.forEach((r) => expect(r).not.toBeNull()); - - // No text sent to Ollama should exceed 4000 chars - for (const batch of sentTexts) { - for (const text of batch) { - expect(text.length).toBeLessThanOrEqual(4000); - } - } + await embed("hello"); + expect(sentModel).toBe("snowflake-arctic-embed2"); }); }); From b38fdeff51e2fc16f924e74344a5b1ef306534ea Mon Sep 17 00:00:00 2001 From: Vlad Ra Date: Wed, 18 Mar 2026 22:23:02 +0000 Subject: [PATCH 14/20] feat: use embedQuery() for search queries Co-Authored-By: Claude Opus 4.6 (1M context) --- src/commands/search.ts | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/commands/search.ts b/src/commands/search.ts index 72e27f4..190fa56 100644 --- a/src/commands/search.ts +++ b/src/commands/search.ts @@ -1,6 +1,6 @@ import type { TraulDB } from "../db/database"; import { formatMessage, writeJSON } from "../lib/formatter"; -import { embed, vecToBytes } from "../lib/embeddings"; +import { embedQuery, vecToBytes } from "../lib/embeddings"; export async function runSearch( db: TraulDB, @@ -45,7 +45,7 @@ export async function runSearch( results = db.ftsSearchAll(ftsQuery, searchOpts); } else { try { - const vec = await embed(query); + const vec = await embedQuery(query); results = db.hybridSearchAll(vecToBytes(vec), ftsQuery, searchOpts); const { total_messages, embedded_messages } = db.getEmbeddingStats(); const pct = total_messages > 0 ? Math.round((embedded_messages / total_messages) * 100) : 0; @@ -53,7 +53,7 @@ export async function runSearch( console.warn(`search: hybrid mode — ${pct}% vector, ${100 - pct}% FTS`); } } catch { - console.warn("search: Ollama unavailable, falling back to FTS-only"); + console.warn("search: embedding unavailable, falling back to FTS-only"); results = db.ftsSearchAll(ftsQuery, searchOpts); } } From 22c064beb4f823c1fbaa2d7651bb7a3c7f843994 Mon Sep 17 00:00:00 2001 From: Vlad Ra Date: Wed, 18 Mar 2026 22:27:54 +0000 Subject: [PATCH 15/20] fix: resolve mock.module conflicts between test files Split llama wrapper tests into llama-wrapper.test.ts to avoid Bun's global mock.module conflicts with embeddings.test.ts. Co-Authored-By: Claude Opus 4.6 (1M context) --- test/lib/embeddings.test.ts | 4 ++ test/lib/llama-wrapper.test.ts | 113 +++++++++++++++++++++++++++++++++ test/lib/llama.test.ts | 113 +-------------------------------- 3 files changed, 118 insertions(+), 112 deletions(-) create mode 100644 test/lib/llama-wrapper.test.ts diff --git a/test/lib/embeddings.test.ts b/test/lib/embeddings.test.ts index ffb0b89..fb4f4ba 100644 --- a/test/lib/embeddings.test.ts +++ b/test/lib/embeddings.test.ts @@ -81,6 +81,10 @@ describe("embeddings — Ollama fallback", () => { afterEach(() => { restoreFetch(); + // Restore llama mocks so they don't leak into other test files + mockLlama.embedDoc.mockImplementation(() => Promise.resolve(fakeVector)); + mockLlama.embedQuery.mockImplementation(() => Promise.resolve(fakeVector)); + mockLlama.embedDocBatch.mockImplementation(() => Promise.resolve([fakeVector, fakeVector2])); }); it("embed() falls back to Ollama when llama throws", async () => { diff --git a/test/lib/llama-wrapper.test.ts b/test/lib/llama-wrapper.test.ts new file mode 100644 index 0000000..9f0ca21 --- /dev/null +++ b/test/lib/llama-wrapper.test.ts @@ -0,0 +1,113 @@ +import { describe, it, expect, mock, beforeEach } from "bun:test"; + +// --- Mock node-llama-cpp at top level before any imports --- + +const fakeVector = new Float32Array(1024).fill(0.1); +const fakeVector2 = new Float32Array(1024).fill(0.2); + +const mockEmbeddingContext = { + getEmbeddingFor: mock(() => ({ vector: fakeVector })), + dispose: mock(() => {}), +}; + +const mockModel = { + createEmbeddingContext: mock(() => Promise.resolve(mockEmbeddingContext)), + dispose: mock(() => Promise.resolve()), +}; + +const mockLlamaInstance = { + loadModel: mock(() => Promise.resolve(mockModel)), +}; + +mock.module("node-llama-cpp", () => ({ + getLlama: mock(() => Promise.resolve(mockLlamaInstance)), + resolveModelFile: mock(() => Promise.resolve("/fake/model.gguf")), +})); + +// Import after mock setup — gets real llama.ts with mocked node-llama-cpp +const { embedDoc, embedQuery, embedDocBatch, _resetForTesting } = await import("../../src/lib/llama"); + +describe("LlamaCpp wrapper", () => { + beforeEach(() => { + _resetForTesting?.(); + mockEmbeddingContext.getEmbeddingFor.mockImplementation(() => ({ vector: fakeVector })); + mockModel.createEmbeddingContext.mockClear(); + mockModel.dispose.mockClear(); + mockLlamaInstance.loadModel.mockClear(); + }); + + it("embedDoc returns Float32Array of length 1024", async () => { + const result = await embedDoc("hello world"); + expect(result).toBeInstanceOf(Float32Array); + expect(result.length).toBe(1024); + }); + + it("embedDoc produces different vectors for different inputs", async () => { + let callCount = 0; + mockEmbeddingContext.getEmbeddingFor.mockImplementation(() => { + callCount++; + return { vector: callCount === 1 ? fakeVector : fakeVector2 }; + }); + + const v1 = await embedDoc("hello"); + const v2 = await embedDoc("goodbye"); + expect(v1).not.toEqual(v2); + }); + + it("embedQuery adds instruction prefix for Qwen model", async () => { + let capturedText = ""; + mockEmbeddingContext.getEmbeddingFor.mockImplementation((...args: unknown[]) => { + capturedText = args[0] as string; + return { vector: fakeVector }; + }); + + await embedQuery("search term"); + expect(capturedText).toContain("Instruct: Retrieve relevant documents"); + expect(capturedText).toContain("Query: search term"); + }); + + it("embedDocBatch returns array of Float32Array", async () => { + const results = await embedDocBatch(["a", "b", "c"]); + expect(results).toHaveLength(3); + for (const r of results) { + expect(r).toBeInstanceOf(Float32Array); + } + }); + + it("embedDocBatch returns empty array for empty input", async () => { + const results = await embedDocBatch([]); + expect(results).toHaveLength(0); + }); + + it("embedDocBatch returns null for individual failures and calls onSkip", async () => { + let callIdx = 0; + mockEmbeddingContext.getEmbeddingFor.mockImplementation(() => { + callIdx++; + if (callIdx === 2) throw new Error("bad text"); + return { vector: fakeVector }; + }); + + const skipped: number[] = []; + const results = await embedDocBatch( + ["ok", "bad", "ok"], + (idx: number) => skipped.push(idx), + ); + expect(results).toHaveLength(3); + expect(results[0]).toBeInstanceOf(Float32Array); + expect(results[1]).toBeNull(); + expect(results[2]).toBeInstanceOf(Float32Array); + expect(skipped).toEqual([1]); + }); + + it("lazy loads model on first call, not on import", async () => { + expect(mockLlamaInstance.loadModel).not.toHaveBeenCalled(); + await embedDoc("trigger load"); + expect(mockLlamaInstance.loadModel).toHaveBeenCalledTimes(1); + }); + + it("reuses singleton — second call does not reload model", async () => { + await embedDoc("first"); + await embedDoc("second"); + expect(mockLlamaInstance.loadModel).toHaveBeenCalledTimes(1); + }); +}); diff --git a/test/lib/llama.test.ts b/test/lib/llama.test.ts index 51f7f9a..dffa328 100644 --- a/test/lib/llama.test.ts +++ b/test/lib/llama.test.ts @@ -1,4 +1,4 @@ -import { describe, it, expect, mock, beforeEach } from "bun:test"; +import { describe, it, expect } from "bun:test"; import { isQwenEmbeddingModel, formatQuery, formatDoc } from "../../src/lib/llama"; const QWEN_URI = "hf:Qwen/Qwen3-Embedding-0.6B-GGUF/Qwen3-Embedding-0.6B-Q8_0.gguf"; @@ -35,114 +35,3 @@ describe("formatDoc", () => { expect(formatDoc("some document")).toBe("some document"); }); }); - -describe("LlamaCpp wrapper", () => { - let llamaMod: typeof import("../../src/lib/llama"); - - const fakeVector = new Float32Array(1024).fill(0.1); - const fakeVector2 = new Float32Array(1024).fill(0.2); - - const mockEmbeddingContext = { - getEmbeddingFor: mock(() => ({ vector: fakeVector })), - dispose: mock(() => {}), - }; - - const mockModel = { - createEmbeddingContext: mock(() => Promise.resolve(mockEmbeddingContext)), - dispose: mock(() => Promise.resolve()), - }; - - const mockLlama = { - loadModel: mock(() => Promise.resolve(mockModel)), - }; - - beforeEach(async () => { - mock.module("node-llama-cpp", () => ({ - getLlama: mock(() => Promise.resolve(mockLlama)), - resolveModelFile: mock(() => Promise.resolve("/fake/model.gguf")), - })); - - llamaMod = await import("../../src/lib/llama"); - llamaMod._resetForTesting?.(); - - mockEmbeddingContext.getEmbeddingFor.mockImplementation(() => ({ vector: fakeVector })); - mockModel.createEmbeddingContext.mockClear(); - mockModel.dispose.mockClear(); - mockLlama.loadModel.mockClear(); - }); - - it("embedDoc returns Float32Array of length 1024", async () => { - const result = await llamaMod.embedDoc("hello world"); - expect(result).toBeInstanceOf(Float32Array); - expect(result.length).toBe(1024); - }); - - it("embedDoc produces different vectors for different inputs", async () => { - let callCount = 0; - mockEmbeddingContext.getEmbeddingFor.mockImplementation(() => { - callCount++; - return { vector: callCount === 1 ? fakeVector : fakeVector2 }; - }); - - const v1 = await llamaMod.embedDoc("hello"); - const v2 = await llamaMod.embedDoc("goodbye"); - expect(v1).not.toEqual(v2); - }); - - it("embedQuery adds instruction prefix for Qwen model", async () => { - let capturedText = ""; - mockEmbeddingContext.getEmbeddingFor.mockImplementation((...args: unknown[]) => { - capturedText = args[0] as string; - return { vector: fakeVector }; - }); - - await llamaMod.embedQuery("search term"); - expect(capturedText).toContain("Instruct: Retrieve relevant documents"); - expect(capturedText).toContain("Query: search term"); - }); - - it("embedDocBatch returns array of Float32Array", async () => { - const results = await llamaMod.embedDocBatch(["a", "b", "c"]); - expect(results).toHaveLength(3); - for (const r of results) { - expect(r).toBeInstanceOf(Float32Array); - } - }); - - it("embedDocBatch returns empty array for empty input", async () => { - const results = await llamaMod.embedDocBatch([]); - expect(results).toHaveLength(0); - }); - - it("embedDocBatch returns null for individual failures and calls onSkip", async () => { - let callIdx = 0; - mockEmbeddingContext.getEmbeddingFor.mockImplementation(() => { - callIdx++; - if (callIdx === 2) throw new Error("bad text"); - return { vector: fakeVector }; - }); - - const skipped: number[] = []; - const results = await llamaMod.embedDocBatch( - ["ok", "bad", "ok"], - (idx: number) => skipped.push(idx), - ); - expect(results).toHaveLength(3); - expect(results[0]).toBeInstanceOf(Float32Array); - expect(results[1]).toBeNull(); - expect(results[2]).toBeInstanceOf(Float32Array); - expect(skipped).toEqual([1]); - }); - - it("lazy loads model on first call, not on import", async () => { - expect(mockLlama.loadModel).not.toHaveBeenCalled(); - await llamaMod.embedDoc("trigger load"); - expect(mockLlama.loadModel).toHaveBeenCalledTimes(1); - }); - - it("reuses singleton — second call does not reload model", async () => { - await llamaMod.embedDoc("first"); - await llamaMod.embedDoc("second"); - expect(mockLlama.loadModel).toHaveBeenCalledTimes(1); - }); -}); From 30eebcb3bafbf2ccd3d6a7703b3963b137ea6fee Mon Sep 17 00:00:00 2001 From: Vlad Ra Date: Wed, 18 Mar 2026 22:28:35 +0000 Subject: [PATCH 16/20] docs: document node-llama-cpp embedding backend Co-Authored-By: Claude Opus 4.6 (1M context) --- skill.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/skill.md b/skill.md index 9969db6..859fdd4 100644 --- a/skill.md +++ b/skill.md @@ -14,7 +14,7 @@ allowed-tools: CLI tool that watches communication streams (Slack, Telegram, Discord, Linear, Gmail, Claude Code sessions, Markdown files, WhatsApp), indexes messages, detects patterns via signals, and surfaces actionable insights. -**Runtime:** Bun + TypeScript | **DB:** SQLite (WAL mode, FTS5, sqlite-vec) | **Embeddings:** Ollama + nomic-embed-text | **Version:** 0.1.0 +**Runtime:** Bun + TypeScript | **DB:** SQLite (WAL mode, FTS5, sqlite-vec) | **Embeddings:** node-llama-cpp (Qwen3-Embedding-0.6B), Ollama fallback | **Version:** 0.2.0 **Project:** `/Users/dandaka/projects/traul` @@ -39,10 +39,10 @@ Sync messages from communication sources incrementally. ### `traul search ` -Hybrid search combining vector similarity (semantic) and FTS5 keyword matching with Reciprocal Rank Fusion. Falls back to FTS-only if Ollama is unavailable. +Hybrid search combining vector similarity (semantic) and FTS5 keyword matching with Reciprocal Rank Fusion. Falls back to FTS-only if embedding is unavailable. **Search modes:** -- **Hybrid (default)** — best for multi-word and exploratory queries. Finds semantically related messages even when exact keywords don't appear. Requires Ollama running with `snowflake-arctic-embed2`. Prints coverage ratio to stderr (e.g. `88% vector, 12% FTS`). Falls back to FTS-only with a warning if Ollama is unavailable. +- **Hybrid (default)** — best for multi-word and exploratory queries. Finds semantically related messages even when exact keywords don't appear. Uses node-llama-cpp with Qwen3-Embedding-0.6B (auto-downloads ~639MB model on first use). Falls back to Ollama, then FTS-only. Prints coverage ratio to stderr (e.g. `88% vector, 12% FTS`). - **FTS-only (`--fts`)** — keyword matching with BM25 ranking. Faster, but requires ALL terms to match (implicit AND). Brittle with multi-word queries, especially combined with source/channel filters. - **OR mode (`--or`)** — joins search terms with OR instead of AND. Works with both `--fts` and hybrid. Use for broad exploratory queries where any term is relevant. - **Substring (`--like`)** — bypasses FTS entirely, uses SQL LIKE. Useful for exact phrases that FTS tokenization breaks (e.g. "how do I"). @@ -299,7 +299,7 @@ SQL-based pattern detection engine. |-------|---------| | `messages` | Primary message store (source, channel, author, content, sent_at, metadata JSON) | | `messages_fts` | FTS5 virtual table (content, author_name, channel_name) with porter tokenizer | -| `vec_messages` | sqlite-vec virtual table for vector embeddings (float[768]) | +| `vec_messages` | sqlite-vec virtual table for vector embeddings (float[1024]) | | `contacts` | Unified contact directory (display_name unique) | | `contact_identities` | Multi-source user mapping (source + source_user_id unique) | | `sync_cursors` | Incremental sync state per source+key | From ed31a55c94b47c6a296d0c7b99a714e81762858e Mon Sep 17 00:00:00 2001 From: Vlad Ra Date: Wed, 18 Mar 2026 22:40:54 +0000 Subject: [PATCH 17/20] chore: bump CHUNKER_VERSION to 2 to trigger rechunking Co-Authored-By: Claude Opus 4.6 (1M context) --- src/lib/chunker.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/lib/chunker.ts b/src/lib/chunker.ts index 83485f4..c616c4c 100644 --- a/src/lib/chunker.ts +++ b/src/lib/chunker.ts @@ -13,7 +13,7 @@ export interface Chunk { const DEFAULT_CHUNK_SIZE = 1500; const DEFAULT_OVERLAP = 200; export const CHUNK_THRESHOLD = 2000; -export const CHUNKER_VERSION = "1"; +export const CHUNKER_VERSION = "2"; export function shouldChunk(text: string, threshold: number = CHUNK_THRESHOLD): boolean { return text.length > threshold; From f20262c10da79309ac1529b12364461e8949cf1d Mon Sep 17 00:00:00 2001 From: Vlad Ra Date: Thu, 19 Mar 2026 00:09:21 +0000 Subject: [PATCH 18/20] fix: avoid SQLITE_BUSY by skipping unchanged meta writes in migrations Migrations unconditionally wrote chunker_version, embed_model, and embed_dims on every startup. When multiple traul processes ran concurrently (e.g. parallel searches), they contended for the write lock causing SQLITE_BUSY crashes. Now only writes when values differ. Co-Authored-By: Claude Opus 4.6 (1M context) --- src/db/migrations.ts | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/src/db/migrations.ts b/src/db/migrations.ts index 01af321..971cdd3 100644 --- a/src/db/migrations.ts +++ b/src/db/migrations.ts @@ -48,10 +48,16 @@ export function runMigrations(db: TraulDB): MigrationResult { result.embeddingsReset = true; } - // Update stored values - db.setMeta("chunker_version", CHUNKER_VERSION); - db.setMeta("embed_model", EMBED_MODEL); - db.setMeta("embed_dims", currentDims); + // Update stored values only if changed (avoid unnecessary writes that cause SQLITE_BUSY) + if (storedChunkerVersion !== CHUNKER_VERSION) { + db.setMeta("chunker_version", CHUNKER_VERSION); + } + if (storedEmbedModel !== EMBED_MODEL) { + db.setMeta("embed_model", EMBED_MODEL); + } + if (storedEmbedDims !== currentDims) { + db.setMeta("embed_dims", currentDims); + } return result; } From 8d88a883c55b61194a23678494c99bf7fd2e753d Mon Sep 17 00:00:00 2001 From: Vlad Ra Date: Thu, 19 Mar 2026 00:11:03 +0000 Subject: [PATCH 19/20] fix: suppress noisy node-llama-cpp token type warning Set logLevel to "error" when initializing llama to hide the "control-looking token was not control-type" warning that printed on every hybrid search query. Co-Authored-By: Claude Opus 4.6 (1M context) --- src/lib/llama.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/lib/llama.ts b/src/lib/llama.ts index f900d62..545a4f6 100644 --- a/src/lib/llama.ts +++ b/src/lib/llama.ts @@ -1,4 +1,4 @@ -import { getLlama, resolveModelFile, type Llama, type LlamaModel, type LlamaEmbeddingContext } from "node-llama-cpp"; +import { getLlama, LlamaLogLevel, resolveModelFile, type Llama, type LlamaModel, type LlamaEmbeddingContext } from "node-llama-cpp"; // --- Formatting helpers (pure, no model dependency) --- @@ -60,7 +60,7 @@ async function getContext(): Promise { } if (!llama) { - llama = await getLlama(); + llama = await getLlama({ logLevel: LlamaLogLevel.error }); } if (!model) { From 15dd5c7462f7d089489a49328b306bacb0b442c5 Mon Sep 17 00:00:00 2001 From: Vlad Ra Date: Thu, 19 Mar 2026 08:18:25 +0000 Subject: [PATCH 20/20] fix: add LlamaLogLevel to node-llama-cpp mock in tests The mock module was missing the LlamaLogLevel export, causing CI to fail with "Export named 'formatDoc' not found" after we added the LlamaLogLevel import to llama.ts. Co-Authored-By: Claude Opus 4.6 (1M context) --- test/lib/llama-wrapper.test.ts | 1 + 1 file changed, 1 insertion(+) diff --git a/test/lib/llama-wrapper.test.ts b/test/lib/llama-wrapper.test.ts index 9f0ca21..e37fe5e 100644 --- a/test/lib/llama-wrapper.test.ts +++ b/test/lib/llama-wrapper.test.ts @@ -22,6 +22,7 @@ const mockLlamaInstance = { mock.module("node-llama-cpp", () => ({ getLlama: mock(() => Promise.resolve(mockLlamaInstance)), resolveModelFile: mock(() => Promise.resolve("/fake/model.gguf")), + LlamaLogLevel: { disabled: "disabled", fatal: "fatal", error: "error", warn: "warn", info: "info", log: "log", debug: "debug" }, })); // Import after mock setup — gets real llama.ts with mocked node-llama-cpp