diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 4963a87..e33498f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -1,5 +1,8 @@ name: CI +env: + CGO_ENABLED: "1" + on: push: branches: [main, dev] diff --git a/.golangci.yml b/.golangci.yml index 4d4d877..71eb6d7 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,34 +1,37 @@ +version: "2" run: - timeout: 5m go: "1.26" - linters: enable: - depguard - - govet - - errcheck - - staticcheck - - unused - - gosimple - - ineffassign - - typecheck - gocritic - - gofmt disable: - exhaustive - wrapcheck - -linters-settings: - depguard: - rules: - legacy-module-paths: - list-mode: lax - files: - - $all - deny: - - pkg: forge.lthn.ai/ - desc: use dappco.re/ module paths instead - + settings: + depguard: + rules: + legacy-module-paths: + list-mode: lax + files: + - $all + deny: + - pkg: forge.lthn.ai/ + desc: use dappco.re/ module paths instead + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ issues: - exclude-use-default: false max-same-issues: 0 +formatters: + enable: + - gofmt + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ diff --git a/CLAUDE.md b/CLAUDE.md index 914e99c..ffc466a 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -4,7 +4,7 @@ This file provides guidance to Claude Code (claude.ai/code) when working with co ## What This Is -SQLite key-value store with TTL, namespace isolation, and reactive events. Pure Go (no CGO). Module: `dappco.re/go/core/store` +SQLite key-value store with TTL, namespace isolation, and reactive events. Pure Go (no CGO). Module: `dappco.re/go/store` ## AX Notes @@ -62,7 +62,7 @@ import ( "fmt" "time" - "dappco.re/go/core/store" + "dappco.re/go/store" ) func main() { diff --git a/DEPENDENCIES.md b/DEPENDENCIES.md new file mode 100644 index 0000000..675e698 --- /dev/null +++ b/DEPENDENCIES.md @@ -0,0 +1,21 @@ +# Dependency Exceptions + +This repository is pure Go by default and permits `modernc.org/sqlite` as the +normal runtime database dependency. The following exception is documented +because the current PR contains load-bearing analytical workspace code that +cannot be replaced by a pure-Go DuckDB-compatible driver. + +## `github.com/marcboeker/go-duckdb` + +`github.com/marcboeker/go-duckdb` is retained only for DuckDB-backed workspace +buffers and LEM analytical import helpers. DuckDB files are produced and +consumed by existing data pipelines, and no pure-Go DuckDB implementation with +compatible SQL semantics is currently available. Replacing it with +`modernc.org/sqlite` would remove DuckDB JSON import, analytical table, and +workspace recovery behaviour rather than preserving the feature. + +This is a CGO and MIT-licensed dependency exception. It must not be used for the +primary SQLite store path, and new runtime storage features should continue to +use pure-Go dependencies compatible with EUPL-1.2. Builds and CI that include +workspace, import, inventory, or scoring behaviour must run with +`CGO_ENABLED=1` and a C/C++ toolchain available. diff --git a/LICENCE.md b/LICENCE.md new file mode 100644 index 0000000..b36f732 --- /dev/null +++ b/LICENCE.md @@ -0,0 +1,6 @@ +# Licence + +This project is licensed under the European Union Public Licence, version 1.2 +(EUPL-1.2). + +Full licence text: https://joinup.ec.europa.eu/collection/eupl/eupl-text-eupl-12 diff --git a/README.md b/README.md index 1749a84..f78416f 100644 --- a/README.md +++ b/README.md @@ -1,12 +1,12 @@ -[![Go Reference](https://pkg.go.dev/badge/dappco.re/go/core/store.svg)](https://pkg.go.dev/dappco.re/go/core/store) -[![License: EUPL-1.2](https://img.shields.io/badge/License-EUPL--1.2-blue.svg)](LICENSE.md) +[![Go Reference](https://pkg.go.dev/badge/dappco.re/go/store.svg)](https://pkg.go.dev/dappco.re/go/store) +[![Licence: EUPL-1.2](https://img.shields.io/badge/Licence-EUPL--1.2-blue.svg)](LICENCE.md) [![Go Version](https://img.shields.io/badge/Go-1.26-00ADD8?style=flat&logo=go)](go.mod) # go-store Group-namespaced SQLite key-value store with TTL expiry, namespace isolation, quota enforcement, and a reactive event system. Backed by a pure-Go SQLite driver (no CGO), uses WAL mode for concurrent reads, and enforces a single connection to keep pragma settings consistent. Supports scoped stores for multi-tenant use, Watch/Unwatch subscriptions, and OnChange callbacks for downstream event consumers. -**Module**: `dappco.re/go/core/store` +**Module**: `dappco.re/go/store` **Licence**: EUPL-1.2 **Language**: Go 1.26 @@ -19,7 +19,7 @@ import ( "fmt" "time" - "dappco.re/go/core/store" + "dappco.re/go/store" ) func main() { @@ -32,6 +32,7 @@ func main() { BucketName: "events", }, PurgeInterval: 30 * time.Second, + WorkspaceStateDirectory: "/tmp/core-state", }) if err != nil { return @@ -80,6 +81,7 @@ func main() { - [Architecture](docs/architecture.md) — storage layer, group/key model, TTL expiry, event system, namespace isolation - [Development Guide](docs/development.md) — prerequisites, test patterns, benchmarks, adding methods - [Project History](docs/history.md) — completed phases, known limitations, future considerations +- [Dependency Exceptions](DEPENDENCIES.md) — documented runtime dependency exceptions ## Build & Test @@ -92,4 +94,4 @@ go build ./... ## Licence -European Union Public Licence 1.2 — see [LICENCE](LICENCE) for details. +European Union Public Licence 1.2 — see [LICENCE.md](LICENCE.md) for details. diff --git a/bench_test.go b/bench_test.go index 8c7bc23..0df9cf7 100644 --- a/bench_test.go +++ b/bench_test.go @@ -20,7 +20,7 @@ func BenchmarkGetAll_VaryingSize(b *testing.B) { if err != nil { b.Fatal(err) } - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() for i := range size { _ = storeInstance.Set("bench", core.Sprintf("key-%d", i), "value") @@ -41,7 +41,7 @@ func BenchmarkSetGet_Parallel(b *testing.B) { if err != nil { b.Fatal(err) } - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() b.ReportAllocs() b.ResetTimer() @@ -62,7 +62,7 @@ func BenchmarkCount_10K(b *testing.B) { if err != nil { b.Fatal(err) } - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() for i := range 10_000 { _ = storeInstance.Set("bench", core.Sprintf("key-%d", i), "value") @@ -81,7 +81,7 @@ func BenchmarkDelete(b *testing.B) { if err != nil { b.Fatal(err) } - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() // Pre-populate keys that will be deleted. for i := range b.N { @@ -101,7 +101,7 @@ func BenchmarkSetWithTTL(b *testing.B) { if err != nil { b.Fatal(err) } - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() b.ReportAllocs() b.ResetTimer() @@ -116,7 +116,7 @@ func BenchmarkRender(b *testing.B) { if err != nil { b.Fatal(err) } - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() for i := range 50 { _ = storeInstance.Set("bench", core.Sprintf("key%d", i), core.Sprintf("val%d", i)) diff --git a/compact.go b/compact.go index e53b169..ff4d026 100644 --- a/compact.go +++ b/compact.go @@ -1,22 +1,22 @@ package store import ( + "bytes" "compress/gzip" - "io" "time" + "unicode" core "dappco.re/go/core" + coreio "dappco.re/go/core/io" "github.com/klauspost/compress/zstd" ) var defaultArchiveOutputDirectory = ".core/archive/" -// CompactOptions archives completed journal rows before a cutoff time to a -// compressed JSONL file. -// -// Usage example: `options := store.CompactOptions{Before: time.Now().Add(-90 * 24 * time.Hour), Output: "/tmp/archive", Format: "gzip"}` -// The default output directory is `.core/archive/`; the default format is -// `gzip`, and `zstd` is also supported. +// Usage example: `options := store.CompactOptions{Before: time.Date(2026, 3, 30, 0, 0, 0, 0, time.UTC), Output: "/tmp/archive", Format: "gzip"}` +// Usage example: `result := storeInstance.Compact(store.CompactOptions{Before: time.Now().Add(-90 * 24 * time.Hour)})` +// Leave `Output` empty to write gzip JSONL archives under `.core/archive/`, or +// set `Format` to `zstd` when downstream tooling expects `.jsonl.zst`. type CompactOptions struct { // Usage example: `options := store.CompactOptions{Before: time.Now().Add(-90 * 24 * time.Hour)}` Before time.Time @@ -24,6 +24,53 @@ type CompactOptions struct { Output string // Usage example: `options := store.CompactOptions{Format: "zstd"}` Format string + // Usage example: `medium, _ := s3.New(s3.Options{Bucket: "archive"}); options := store.CompactOptions{Before: time.Now().Add(-90 * 24 * time.Hour), Medium: medium}` + // Medium routes the archive write through a coreio.Medium instead of the raw + // filesystem. When set, Output is the path inside the medium; leave empty + // to use `.core/archive/`. When nil, Compact falls back to the store-level + // medium (if configured via WithMedium), then to the local filesystem. + Medium Medium +} + +// Usage example: `normalisedOptions := (store.CompactOptions{Before: time.Date(2026, 3, 30, 0, 0, 0, 0, time.UTC)}).Normalised()` +func (compactOptions CompactOptions) Normalised() CompactOptions { + if compactOptions.Output == "" { + compactOptions.Output = defaultArchiveOutputDirectory + } + compactOptions.Format = lowercaseText(core.Trim(compactOptions.Format)) + if compactOptions.Format == "" { + compactOptions.Format = "gzip" + } + return compactOptions +} + +// Usage example: `if err := (store.CompactOptions{Before: time.Date(2026, 3, 30, 0, 0, 0, 0, time.UTC), Format: "gzip"}).Validate(); err != nil { return }` +func (compactOptions CompactOptions) Validate() error { + if compactOptions.Before.IsZero() { + return core.E( + "store.CompactOptions.Validate", + "before cutoff time is empty; use a value like time.Now().Add(-24 * time.Hour)", + nil, + ) + } + switch lowercaseText(core.Trim(compactOptions.Format)) { + case "", "gzip", "zstd": + return nil + default: + return core.E( + "store.CompactOptions.Validate", + core.Concat(`format must be "gzip" or "zstd"; got `, compactOptions.Format), + nil, + ) + } +} + +func lowercaseText(text string) string { + builder := core.NewBuilder() + for _, r := range text { + builder.WriteRune(unicode.ToLower(r)) + } + return builder.String() } type compactArchiveEntry struct { @@ -44,31 +91,35 @@ func (storeInstance *Store) Compact(options CompactOptions) core.Result { return core.Result{Value: core.E("store.Compact", "ensure journal schema", err), OK: false} } - outputDirectory := options.Output - if outputDirectory == "" { - outputDirectory = defaultArchiveOutputDirectory + options = options.Normalised() + if err := options.Validate(); err != nil { + return core.Result{Value: core.E("store.Compact", "validate options", err), OK: false} } - format := options.Format - if format == "" { - format = "gzip" + + medium := options.Medium + if medium == nil { + medium = storeInstance.medium } - if format != "gzip" && format != "zstd" { - return core.Result{Value: core.E("store.Compact", core.Concat("unsupported archive format: ", format), nil), OK: false} + if medium == nil { + medium = coreio.Local } - - filesystem := (&core.Fs{}).NewUnrestricted() - if result := filesystem.EnsureDir(outputDirectory); !result.OK { - return core.Result{Value: core.E("store.Compact", "ensure archive directory", result.Value.(error)), OK: false} + if medium == nil { + return core.Result{Value: core.E("store.Compact", "local medium is unavailable", nil), OK: false} + } + if err := ensureMediumDir(medium, options.Output); err != nil { + return core.Result{Value: core.E("store.Compact", "ensure medium archive directory", err), OK: false} } - rows, err := storeInstance.sqliteDatabase.Query( + rows, queryErr := storeInstance.sqliteDatabase.Query( "SELECT entry_id, bucket_name, measurement, fields_json, tags_json, committed_at FROM "+journalEntriesTableName+" WHERE archived_at IS NULL AND committed_at < ? ORDER BY committed_at, entry_id", options.Before.UnixMilli(), ) - if err != nil { - return core.Result{Value: core.E("store.Compact", "query journal rows", err), OK: false} + if queryErr != nil { + return core.Result{Value: core.E("store.Compact", "query journal rows", queryErr), OK: false} } - defer rows.Close() + defer func() { + _ = rows.Close() + }() var archiveEntries []compactArchiveEntry for rows.Next() { @@ -92,30 +143,18 @@ func (storeInstance *Store) Compact(options CompactOptions) core.Result { return core.Result{Value: "", OK: true} } - outputPath := compactOutputPath(outputDirectory, format) - archiveFileResult := filesystem.Create(outputPath) - if !archiveFileResult.OK { - return core.Result{Value: core.E("store.Compact", "create archive file", archiveFileResult.Value.(error)), OK: false} - } - - file, ok := archiveFileResult.Value.(io.WriteCloser) - if !ok { - return core.Result{Value: core.E("store.Compact", "archive file is not writable", nil), OK: false} + outputPath := compactOutputPath(options.Output, options.Format) + archiveContent, err := newCompactArchiveBuffer() + if err != nil { + return core.Result{Value: core.E("store.Compact", "create archive buffer", err), OK: false} } - fileClosed := false - defer func() { - if !fileClosed { - _ = file.Close() - } - }() - - writer, err := archiveWriter(file, format) + writer, err := archiveWriter(archiveContent, options.Format) if err != nil { return core.Result{Value: err, OK: false} } - writeOK := false + archiveWriteFinished := false defer func() { - if !writeOK { + if !archiveWriteFinished { _ = writer.Close() } }() @@ -129,18 +168,28 @@ func (storeInstance *Store) Compact(options CompactOptions) core.Result { if err != nil { return core.Result{Value: err, OK: false} } - if _, err := io.WriteString(writer, lineJSON+"\n"); err != nil { + if _, err := writer.Write([]byte(lineJSON + "\n")); err != nil { return core.Result{Value: core.E("store.Compact", "write archive line", err), OK: false} } } if err := writer.Close(); err != nil { return core.Result{Value: core.E("store.Compact", "close archive writer", err), OK: false} } - writeOK = true - if err := file.Close(); err != nil { - return core.Result{Value: core.E("store.Compact", "close archive file", err), OK: false} + archiveWriteFinished = true + compressedArchive, err := archiveContent.content() + if err != nil { + return core.Result{Value: core.E("store.Compact", "read archive buffer", err), OK: false} + } + stagedOutputPath := core.Concat(outputPath, ".tmp") + stagedOutputPublished := false + if err := medium.Write(stagedOutputPath, compressedArchive); err != nil { + return core.Result{Value: core.E("store.Compact", "write staged archive via medium", err), OK: false} } - fileClosed = true + defer func() { + if !stagedOutputPublished && medium.Exists(stagedOutputPath) { + _ = medium.Delete(stagedOutputPath) + } + }() transaction, err := storeInstance.sqliteDatabase.Begin() if err != nil { @@ -168,6 +217,11 @@ func (storeInstance *Store) Compact(options CompactOptions) core.Result { return core.Result{Value: core.E("store.Compact", "commit archive transaction", err), OK: false} } committed = true + stagedOutputPublished = true + + if err := medium.Rename(stagedOutputPath, outputPath); err != nil { + return core.Result{Value: core.E("store.Compact", "publish staged archive", err), OK: false} + } return core.Result{Value: outputPath, OK: true} } @@ -194,7 +248,33 @@ func archiveEntryLine(entry compactArchiveEntry) (map[string]any, error) { }, nil } -func archiveWriter(writer io.Writer, format string) (io.WriteCloser, error) { +type compactArchiveWriter interface { + Write([]byte) (int, error) + Close() error +} + +type compactArchiveWriteTarget interface { + Write([]byte) (int, error) +} + +type compactArchiveBuffer struct { + buffer bytes.Buffer +} + +func newCompactArchiveBuffer() (*compactArchiveBuffer, error) { + return &compactArchiveBuffer{}, nil +} + +// Usage example: `buffer, _ := newCompactArchiveBuffer(); _, _ = buffer.Write([]byte("archive"))` +func (buffer *compactArchiveBuffer) Write(data []byte) (int, error) { + return buffer.buffer.Write(data) +} + +func (buffer *compactArchiveBuffer) content() (string, error) { + return buffer.buffer.String(), nil +} + +func archiveWriter(writer compactArchiveWriteTarget, format string) (compactArchiveWriter, error) { switch format { case "gzip": return gzip.NewWriter(writer), nil diff --git a/compact_test.go b/compact_test.go index d54f805..cbd42d8 100644 --- a/compact_test.go +++ b/compact_test.go @@ -9,177 +9,217 @@ import ( core "dappco.re/go/core" "github.com/klauspost/compress/zstd" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func TestCompact_Compact_Good_GzipArchive(t *testing.T) { outputDirectory := useArchiveOutputDirectory(t) storeInstance, err := New(":memory:", WithJournal("http://127.0.0.1:8086", "core", "events")) - require.NoError(t, err) - defer storeInstance.Close() + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() - require.True(t, - storeInstance.CommitToJournal("session-a", map[string]any{"like": 1}, map[string]string{"workspace": "session-a"}).OK, - ) - require.True(t, - storeInstance.CommitToJournal("session-b", map[string]any{"like": 2}, map[string]string{"workspace": "session-b"}).OK, - ) + assertTrue(t, storeInstance.CommitToJournal("session-a", map[string]any{"like": 1}, map[string]string{"workspace": "session-a"}).OK) + assertTrue(t, storeInstance.CommitToJournal("session-b", map[string]any{"like": 2}, map[string]string{"workspace": "session-b"}).OK) _, err = storeInstance.sqliteDatabase.Exec( "UPDATE "+journalEntriesTableName+" SET committed_at = ? WHERE measurement = ?", time.Now().Add(-48*time.Hour).UnixMilli(), "session-a", ) - require.NoError(t, err) + assertNoError(t, err) result := storeInstance.Compact(CompactOptions{ Before: time.Now().Add(-24 * time.Hour), Output: outputDirectory, Format: "gzip", }) - require.True(t, result.OK, "compact failed: %v", result.Value) + assertTruef(t, result.OK, "compact failed: %v", result.Value) archivePath, ok := result.Value.(string) - require.True(t, ok, "unexpected archive path type: %T", result.Value) - assert.True(t, testFilesystem().Exists(archivePath)) + assertTruef(t, ok, "unexpected archive path type: %T", result.Value) + assertTrue(t, testFilesystem().Exists(archivePath)) archiveData := requireCoreReadBytes(t, archivePath) reader, err := gzip.NewReader(bytes.NewReader(archiveData)) - require.NoError(t, err) - defer reader.Close() + assertNoError(t, err) + defer func() { + _ = reader.Close() + }() decompressedData, err := io.ReadAll(reader) - require.NoError(t, err) + assertNoError(t, err) lines := core.Split(core.Trim(string(decompressedData)), "\n") - require.Len(t, lines, 1) + assertLen(t, lines, 1) archivedRow := make(map[string]any) unmarshalResult := core.JSONUnmarshalString(lines[0], &archivedRow) - require.True(t, unmarshalResult.OK, "archive line unmarshal failed: %v", unmarshalResult.Value) - assert.Equal(t, "session-a", archivedRow["measurement"]) + assertTruef(t, unmarshalResult.OK, "archive line unmarshal failed: %v", unmarshalResult.Value) + assertEqual(t, "session-a", archivedRow["measurement"]) remainingRows := requireResultRows(t, storeInstance.QueryJournal("")) - require.Len(t, remainingRows, 1) - assert.Equal(t, "session-b", remainingRows[0]["measurement"]) + assertLen(t, remainingRows, 1) + assertEqual(t, "session-b", remainingRows[0]["measurement"]) } func TestCompact_Compact_Good_ZstdArchive(t *testing.T) { outputDirectory := useArchiveOutputDirectory(t) storeInstance, err := New(":memory:", WithJournal("http://127.0.0.1:8086", "core", "events")) - require.NoError(t, err) - defer storeInstance.Close() + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() - require.True(t, - storeInstance.CommitToJournal("session-a", map[string]any{"like": 1}, map[string]string{"workspace": "session-a"}).OK, - ) + assertTrue(t, storeInstance.CommitToJournal("session-a", map[string]any{"like": 1}, map[string]string{"workspace": "session-a"}).OK) _, err = storeInstance.sqliteDatabase.Exec( "UPDATE "+journalEntriesTableName+" SET committed_at = ? WHERE measurement = ?", time.Now().Add(-48*time.Hour).UnixMilli(), "session-a", ) - require.NoError(t, err) + assertNoError(t, err) result := storeInstance.Compact(CompactOptions{ Before: time.Now().Add(-24 * time.Hour), Output: outputDirectory, Format: "zstd", }) - require.True(t, result.OK, "compact failed: %v", result.Value) + assertTruef(t, result.OK, "compact failed: %v", result.Value) archivePath, ok := result.Value.(string) - require.True(t, ok, "unexpected archive path type: %T", result.Value) - assert.True(t, testFilesystem().Exists(archivePath)) - assert.Contains(t, archivePath, ".jsonl.zst") + assertTruef(t, ok, "unexpected archive path type: %T", result.Value) + assertTrue(t, testFilesystem().Exists(archivePath)) + assertContainsString(t, archivePath, ".jsonl.zst") archiveData := requireCoreReadBytes(t, archivePath) reader, err := zstd.NewReader(bytes.NewReader(archiveData)) - require.NoError(t, err) + assertNoError(t, err) defer reader.Close() decompressedData, err := io.ReadAll(reader) - require.NoError(t, err) + assertNoError(t, err) lines := core.Split(core.Trim(string(decompressedData)), "\n") - require.Len(t, lines, 1) + assertLen(t, lines, 1) archivedRow := make(map[string]any) unmarshalResult := core.JSONUnmarshalString(lines[0], &archivedRow) - require.True(t, unmarshalResult.OK, "archive line unmarshal failed: %v", unmarshalResult.Value) - assert.Equal(t, "session-a", archivedRow["measurement"]) + assertTruef(t, unmarshalResult.OK, "archive line unmarshal failed: %v", unmarshalResult.Value) + assertEqual(t, "session-a", archivedRow["measurement"]) } func TestCompact_Compact_Good_NoRows(t *testing.T) { outputDirectory := useArchiveOutputDirectory(t) storeInstance, err := New(":memory:") - require.NoError(t, err) - defer storeInstance.Close() + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() result := storeInstance.Compact(CompactOptions{ Before: time.Now(), Output: outputDirectory, Format: "gzip", }) - require.True(t, result.OK, "compact failed: %v", result.Value) - assert.Equal(t, "", result.Value) + assertTruef(t, result.OK, "compact failed: %v", result.Value) + assertEqual(t, "", result.Value) } func TestCompact_Compact_Good_DeterministicOrderingForSameTimestamp(t *testing.T) { outputDirectory := useArchiveOutputDirectory(t) storeInstance, err := New(":memory:", WithJournal("http://127.0.0.1:8086", "core", "events")) - require.NoError(t, err) - defer storeInstance.Close() - require.NoError(t, ensureJournalSchema(storeInstance.sqliteDatabase)) + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() + assertNoError(t, ensureJournalSchema(storeInstance.sqliteDatabase)) committedAt := time.Now().Add(-48 * time.Hour).UnixMilli() - require.NoError(t, commitJournalEntry( - storeInstance.sqliteDatabase, - "events", - "session-b", - `{"like":2}`, - `{"workspace":"session-b"}`, - committedAt, - )) - require.NoError(t, commitJournalEntry( - storeInstance.sqliteDatabase, - "events", - "session-a", - `{"like":1}`, - `{"workspace":"session-a"}`, - committedAt, - )) + assertNoError(t, commitJournalEntry(storeInstance.sqliteDatabase, "events", "session-b", `{"like":2}`, `{"workspace":"session-b"}`, committedAt)) + assertNoError(t, commitJournalEntry(storeInstance.sqliteDatabase, "events", "session-a", `{"like":1}`, `{"workspace":"session-a"}`, committedAt)) result := storeInstance.Compact(CompactOptions{ Before: time.Now().Add(-24 * time.Hour), Output: outputDirectory, Format: "gzip", }) - require.True(t, result.OK, "compact failed: %v", result.Value) + assertTruef(t, result.OK, "compact failed: %v", result.Value) archivePath, ok := result.Value.(string) - require.True(t, ok, "unexpected archive path type: %T", result.Value) + assertTruef(t, ok, "unexpected archive path type: %T", result.Value) archiveData := requireCoreReadBytes(t, archivePath) reader, err := gzip.NewReader(bytes.NewReader(archiveData)) - require.NoError(t, err) - defer reader.Close() + assertNoError(t, err) + defer func() { + _ = reader.Close() + }() decompressedData, err := io.ReadAll(reader) - require.NoError(t, err) + assertNoError(t, err) lines := core.Split(core.Trim(string(decompressedData)), "\n") - require.Len(t, lines, 2) + assertLen(t, lines, 2) firstArchivedRow := make(map[string]any) unmarshalResult := core.JSONUnmarshalString(lines[0], &firstArchivedRow) - require.True(t, unmarshalResult.OK, "archive line unmarshal failed: %v", unmarshalResult.Value) - assert.Equal(t, "session-b", firstArchivedRow["measurement"]) + assertTruef(t, unmarshalResult.OK, "archive line unmarshal failed: %v", unmarshalResult.Value) + assertEqual(t, "session-b", firstArchivedRow["measurement"]) secondArchivedRow := make(map[string]any) unmarshalResult = core.JSONUnmarshalString(lines[1], &secondArchivedRow) - require.True(t, unmarshalResult.OK, "archive line unmarshal failed: %v", unmarshalResult.Value) - assert.Equal(t, "session-a", secondArchivedRow["measurement"]) + assertTruef(t, unmarshalResult.OK, "archive line unmarshal failed: %v", unmarshalResult.Value) + assertEqual(t, "session-a", secondArchivedRow["measurement"]) +} + +func TestCompact_CompactOptions_Good_Normalised(t *testing.T) { + options := (CompactOptions{ + Before: time.Now().Add(-24 * time.Hour), + }).Normalised() + + assertEqual(t, defaultArchiveOutputDirectory, options.Output) + assertEqual(t, "gzip", options.Format) +} + +func TestCompact_CompactOptions_Good_Validate(t *testing.T) { + err := (CompactOptions{ + Before: time.Now().Add(-24 * time.Hour), + Format: "zstd", + }).Validate() + assertNoError(t, err) +} + +func TestCompact_CompactOptions_Bad_ValidateMissingCutoff(t *testing.T) { + err := (CompactOptions{ + Format: "gzip", + }).Validate() + assertError(t, err) + assertContainsString(t, err.Error(), "before cutoff time is empty") +} + +func TestCompact_CompactOptions_Good_ValidateNormalisesFormatCase(t *testing.T) { + err := (CompactOptions{ + Before: time.Now().Add(-24 * time.Hour), + Format: " GZIP ", + }).Validate() + assertNoError(t, err) + + options := (CompactOptions{ + Before: time.Now().Add(-24 * time.Hour), + Format: " ZsTd ", + }).Normalised() + assertEqual(t, "zstd", options.Format) +} + +func TestCompact_CompactOptions_Good_ValidateWhitespaceFormatDefaultsToGzip(t *testing.T) { + options := (CompactOptions{ + Before: time.Now().Add(-24 * time.Hour), + Format: " ", + }).Normalised() + + assertEqual(t, "gzip", options.Format) + assertNoError(t, options.Validate()) +} + +func TestCompact_CompactOptions_Bad_ValidateUnsupportedFormat(t *testing.T) { + err := (CompactOptions{ + Before: time.Now().Add(-24 * time.Hour), + Format: "zip", + }).Validate() + assertError(t, err) + assertContainsString(t, err.Error(), `format must be "gzip" or "zstd"`) } diff --git a/conventions_test.go b/conventions_test.go index fb5bccf..ca5c4fb 100644 --- a/conventions_test.go +++ b/conventions_test.go @@ -10,8 +10,6 @@ import ( "unicode" core "dappco.re/go/core" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func TestConventions_Imports_Good_Banned(t *testing.T) { @@ -41,7 +39,7 @@ func TestConventions_Imports_Good_Banned(t *testing.T) { } slices.Sort(banned) - assert.Empty(t, banned, "banned imports should not appear in repository Go files") + assertEmptyf(t, banned, "banned imports should not appear in repository Go files") } func TestConventions_TestNaming_Good_StrictPattern(t *testing.T) { @@ -75,7 +73,7 @@ func TestConventions_TestNaming_Good_StrictPattern(t *testing.T) { } slices.Sort(invalid) - assert.Empty(t, invalid, "top-level tests must follow Test__") + assertEmptyf(t, invalid, "top-level tests must follow Test__") } func TestConventions_Exports_Good_UsageExamples(t *testing.T) { @@ -121,7 +119,7 @@ func TestConventions_Exports_Good_UsageExamples(t *testing.T) { } slices.Sort(missing) - assert.Empty(t, missing, "exported declarations must include a usage example in their doc comment") + assertEmptyf(t, missing, "exported declarations must include a usage example in their doc comment") } func TestConventions_Exports_Good_FieldUsageExamples(t *testing.T) { @@ -161,7 +159,7 @@ func TestConventions_Exports_Good_FieldUsageExamples(t *testing.T) { } slices.Sort(missing) - assert.Empty(t, missing, "exported struct fields must include a usage example in their doc comment") + assertEmptyf(t, missing, "exported struct fields must include a usage example in their doc comment") } func TestConventions_Exports_Good_NoCompatibilityAliases(t *testing.T) { @@ -173,33 +171,34 @@ func TestConventions_Exports_Good_NoCompatibilityAliases(t *testing.T) { for _, path := range files { file := parseGoFile(t, path) for _, decl := range file.Decls { - switch node := decl.(type) { - case *ast.GenDecl: - for _, spec := range node.Specs { - switch item := spec.(type) { - case *ast.TypeSpec: - if item.Name.Name == "KV" { - invalid = append(invalid, core.Concat(path, ": ", item.Name.Name)) - } - if item.Name.Name != "Watcher" { - continue - } - structType, ok := item.Type.(*ast.StructType) - if !ok { - continue - } - for _, field := range structType.Fields.List { - for _, name := range field.Names { - if name.Name == "Ch" { - invalid = append(invalid, core.Concat(path, ": Watcher.Ch")) - } + node, ok := decl.(*ast.GenDecl) + if !ok { + continue + } + for _, spec := range node.Specs { + switch item := spec.(type) { + case *ast.TypeSpec: + if item.Name.Name == "KV" { + invalid = append(invalid, core.Concat(path, ": ", item.Name.Name)) + } + if item.Name.Name != "Watcher" { + continue + } + structType, ok := item.Type.(*ast.StructType) + if !ok { + continue + } + for _, field := range structType.Fields.List { + for _, name := range field.Names { + if name.Name == "Ch" { + invalid = append(invalid, core.Concat(path, ": Watcher.Ch")) } } - case *ast.ValueSpec: - for _, name := range item.Names { - if name.Name == "ErrNotFound" || name.Name == "ErrQuotaExceeded" { - invalid = append(invalid, core.Concat(path, ": ", name.Name)) - } + } + case *ast.ValueSpec: + for _, name := range item.Names { + if name.Name == "ErrNotFound" || name.Name == "ErrQuotaExceeded" { + invalid = append(invalid, core.Concat(path, ": ", name.Name)) } } } @@ -208,7 +207,7 @@ func TestConventions_Exports_Good_NoCompatibilityAliases(t *testing.T) { } slices.Sort(invalid) - assert.Empty(t, invalid, "legacy compatibility aliases should not appear in the public Go API") + assertEmptyf(t, invalid, "legacy compatibility aliases should not appear in the public Go API") } func repoGoFiles(t *testing.T, keep func(name string) bool) []string { @@ -218,7 +217,7 @@ func repoGoFiles(t *testing.T, keep func(name string) bool) []string { requireCoreOK(t, result) entries, ok := result.Value.([]fs.DirEntry) - require.True(t, ok, "unexpected directory entry type: %T", result.Value) + assertTruef(t, ok, "unexpected directory entry type: %T", result.Value) var files []string for _, entry := range entries { @@ -236,7 +235,7 @@ func parseGoFile(t *testing.T, path string) *ast.File { t.Helper() file, err := parser.ParseFile(token.NewFileSet(), path, nil, parser.ParseComments) - require.NoError(t, err) + assertNoError(t, err) return file } diff --git a/coverage_test.go b/coverage_test.go index 434a4bb..adaa954 100644 --- a/coverage_test.go +++ b/coverage_test.go @@ -9,8 +9,6 @@ import ( "testing" core "dappco.re/go/core" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) // --------------------------------------------------------------------------- @@ -24,19 +22,19 @@ func TestCoverage_New_Bad_SchemaConflict(t *testing.T) { databasePath := testPath(t, "conflict.db") database, err := sql.Open("sqlite", databasePath) - require.NoError(t, err) + assertNoError(t, err) database.SetMaxOpenConns(1) _, err = database.Exec("PRAGMA journal_mode=WAL") - require.NoError(t, err) + assertNoError(t, err) _, err = database.Exec("CREATE TABLE dummy (id INTEGER)") - require.NoError(t, err) + assertNoError(t, err) _, err = database.Exec("CREATE INDEX entries ON dummy(id)") - require.NoError(t, err) - require.NoError(t, database.Close()) + assertNoError(t, err) + assertNoError(t, database.Close()) _, err = New(databasePath) - require.Error(t, err, "New should fail when an index named entries already exists") - assert.Contains(t, err.Error(), "store.New: ensure schema") + assertError(t, err) + assertContainsString(t, err.Error(), "store.New: ensure schema") } // --------------------------------------------------------------------------- @@ -47,32 +45,32 @@ func TestCoverage_GetAll_Bad_ScanError(t *testing.T) { // Trigger a scan error by inserting a row with a NULL key. The production // code scans into plain strings, which cannot represent NULL. storeInstance, err := New(":memory:") - require.NoError(t, err) - defer storeInstance.Close() + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() // Insert a normal row first so the query returns results. - require.NoError(t, storeInstance.Set("g", "good", "value")) + assertNoError(t, storeInstance.Set("g", "good", "value")) // Restructure the table to allow NULLs, then insert a NULL-key row. _, err = storeInstance.sqliteDatabase.Exec("ALTER TABLE entries RENAME TO entries_backup") - require.NoError(t, err) + assertNoError(t, err) _, err = storeInstance.sqliteDatabase.Exec(`CREATE TABLE entries ( group_name TEXT, entry_key TEXT, entry_value TEXT, expires_at INTEGER )`) - require.NoError(t, err) + assertNoError(t, err) _, err = storeInstance.sqliteDatabase.Exec("INSERT INTO entries SELECT * FROM entries_backup") - require.NoError(t, err) + assertNoError(t, err) _, err = storeInstance.sqliteDatabase.Exec("INSERT INTO entries (group_name, entry_key, entry_value) VALUES ('g', NULL, 'null-key-val')") - require.NoError(t, err) + assertNoError(t, err) _, err = storeInstance.sqliteDatabase.Exec("DROP TABLE entries_backup") - require.NoError(t, err) + assertNoError(t, err) _, err = storeInstance.GetAll("g") - require.Error(t, err, "GetAll should fail when a row contains a NULL key") - assert.Contains(t, err.Error(), "store.All: scan") + assertError(t, err) + assertContainsString(t, err.Error(), "store.All: scan") } // --------------------------------------------------------------------------- @@ -85,24 +83,21 @@ func TestCoverage_GetAll_Bad_RowsError(t *testing.T) { databasePath := testPath(t, "corrupt-getall.db") storeInstance, err := New(databasePath) - require.NoError(t, err) + assertNoError(t, err) // Insert enough rows to span multiple database pages. const rows = 5000 for i := range rows { - require.NoError(t, storeInstance.Set("g", - core.Sprintf("key-%06d", i), - core.Sprintf("value-with-padding-%06d-xxxxxxxxxxxxxxxxxxxxxxxx", i))) + assertNoError(t, storeInstance.Set("g", core.Sprintf("key-%06d", i), core.Sprintf("value-with-padding-%06d-xxxxxxxxxxxxxxxxxxxxxxxx", i))) } - storeInstance.Close() - + assertNoError(t, storeInstance.Close()) // Force a WAL checkpoint so all data is in the main database file. rawDatabase, err := sql.Open("sqlite", databasePath) - require.NoError(t, err) + assertNoError(t, err) rawDatabase.SetMaxOpenConns(1) _, err = rawDatabase.Exec("PRAGMA wal_checkpoint(TRUNCATE)") - require.NoError(t, err) - require.NoError(t, rawDatabase.Close()) + assertNoError(t, err) + assertNoError(t, rawDatabase.Close()) // Corrupt data pages in the latter portion of the file (skip the first // pages which hold the schema). @@ -111,7 +106,7 @@ func TestCoverage_GetAll_Bad_RowsError(t *testing.T) { for i := range garbage { garbage[i] = 0xFF } - require.Greater(t, len(data), len(garbage)*2, "database file should be large enough to corrupt") + assertGreaterf(t, len(data), len(garbage)*2, "database file should be large enough to corrupt") offset := len(data) * 3 / 4 maxOffset := len(data) - (len(garbage) * 2) if offset > maxOffset { @@ -126,12 +121,12 @@ func TestCoverage_GetAll_Bad_RowsError(t *testing.T) { _ = testFilesystem().Delete(databasePath + "-shm") reopenedStore, err := New(databasePath) - require.NoError(t, err) - defer reopenedStore.Close() + assertNoError(t, err) + defer func() { _ = reopenedStore.Close() }() _, err = reopenedStore.GetAll("g") - require.Error(t, err, "GetAll should fail on corrupted database pages") - assert.Contains(t, err.Error(), "store.All: rows") + assertError(t, err) + assertContainsString(t, err.Error(), "store.All: rows") } // --------------------------------------------------------------------------- @@ -141,30 +136,30 @@ func TestCoverage_GetAll_Bad_RowsError(t *testing.T) { func TestCoverage_Render_Bad_ScanError(t *testing.T) { // Same NULL-key technique as TestCoverage_GetAll_Bad_ScanError. storeInstance, err := New(":memory:") - require.NoError(t, err) - defer storeInstance.Close() + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() - require.NoError(t, storeInstance.Set("g", "good", "value")) + assertNoError(t, storeInstance.Set("g", "good", "value")) _, err = storeInstance.sqliteDatabase.Exec("ALTER TABLE entries RENAME TO entries_backup") - require.NoError(t, err) + assertNoError(t, err) _, err = storeInstance.sqliteDatabase.Exec(`CREATE TABLE entries ( group_name TEXT, entry_key TEXT, entry_value TEXT, expires_at INTEGER )`) - require.NoError(t, err) + assertNoError(t, err) _, err = storeInstance.sqliteDatabase.Exec("INSERT INTO entries SELECT * FROM entries_backup") - require.NoError(t, err) + assertNoError(t, err) _, err = storeInstance.sqliteDatabase.Exec("INSERT INTO entries (group_name, entry_key, entry_value) VALUES ('g', NULL, 'null-key-val')") - require.NoError(t, err) + assertNoError(t, err) _, err = storeInstance.sqliteDatabase.Exec("DROP TABLE entries_backup") - require.NoError(t, err) + assertNoError(t, err) _, err = storeInstance.Render("{{ .good }}", "g") - require.Error(t, err, "Render should fail when a row contains a NULL key") - assert.Contains(t, err.Error(), "store.All: scan") + assertError(t, err) + assertContainsString(t, err.Error(), "store.All: scan") } // --------------------------------------------------------------------------- @@ -176,29 +171,26 @@ func TestCoverage_Render_Bad_RowsError(t *testing.T) { databasePath := testPath(t, "corrupt-render.db") storeInstance, err := New(databasePath) - require.NoError(t, err) + assertNoError(t, err) const rows = 5000 for i := range rows { - require.NoError(t, storeInstance.Set("g", - core.Sprintf("key-%06d", i), - core.Sprintf("value-with-padding-%06d-xxxxxxxxxxxxxxxxxxxxxxxx", i))) + assertNoError(t, storeInstance.Set("g", core.Sprintf("key-%06d", i), core.Sprintf("value-with-padding-%06d-xxxxxxxxxxxxxxxxxxxxxxxx", i))) } - storeInstance.Close() - + assertNoError(t, storeInstance.Close()) rawDatabase, err := sql.Open("sqlite", databasePath) - require.NoError(t, err) + assertNoError(t, err) rawDatabase.SetMaxOpenConns(1) _, err = rawDatabase.Exec("PRAGMA wal_checkpoint(TRUNCATE)") - require.NoError(t, err) - require.NoError(t, rawDatabase.Close()) + assertNoError(t, err) + assertNoError(t, rawDatabase.Close()) data := requireCoreReadBytes(t, databasePath) garbage := make([]byte, 4096) for i := range garbage { garbage[i] = 0xFF } - require.Greater(t, len(data), len(garbage)*2, "database file should be large enough to corrupt") + assertGreaterf(t, len(data), len(garbage)*2, "database file should be large enough to corrupt") offset := len(data) * 3 / 4 maxOffset := len(data) - (len(garbage) * 2) if offset > maxOffset { @@ -212,12 +204,12 @@ func TestCoverage_Render_Bad_RowsError(t *testing.T) { _ = testFilesystem().Delete(databasePath + "-shm") reopenedStore, err := New(databasePath) - require.NoError(t, err) - defer reopenedStore.Close() + assertNoError(t, err) + defer func() { _ = reopenedStore.Close() }() _, err = reopenedStore.Render("{{ . }}", "g") - require.Error(t, err, "Render should fail on corrupted database pages") - assert.Contains(t, err.Error(), "store.All: rows") + assertError(t, err) + assertContainsString(t, err.Error(), "store.All: rows") } // --------------------------------------------------------------------------- @@ -228,28 +220,28 @@ func TestCoverage_GroupsSeq_Bad_ScanError(t *testing.T) { // Trigger a scan error by inserting a row with a NULL group name. The // production code scans into a plain string, which cannot represent NULL. storeInstance, err := New(":memory:") - require.NoError(t, err) - defer storeInstance.Close() + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() _, err = storeInstance.sqliteDatabase.Exec("ALTER TABLE entries RENAME TO entries_backup") - require.NoError(t, err) + assertNoError(t, err) _, err = storeInstance.sqliteDatabase.Exec(`CREATE TABLE entries ( group_name TEXT, entry_key TEXT, entry_value TEXT, expires_at INTEGER )`) - require.NoError(t, err) + assertNoError(t, err) _, err = storeInstance.sqliteDatabase.Exec("INSERT INTO entries SELECT * FROM entries_backup") - require.NoError(t, err) + assertNoError(t, err) _, err = storeInstance.sqliteDatabase.Exec("INSERT INTO entries (group_name, entry_key, entry_value) VALUES (NULL, 'k', 'v')") - require.NoError(t, err) + assertNoError(t, err) _, err = storeInstance.sqliteDatabase.Exec("DROP TABLE entries_backup") - require.NoError(t, err) + assertNoError(t, err) for groupName, iterationErr := range storeInstance.GroupsSeq("") { - require.Error(t, iterationErr) - assert.Empty(t, groupName) + assertError(t, iterationErr) + assertEmpty(t, groupName) break } } @@ -262,7 +254,7 @@ func TestCoverage_GroupsSeq_Bad_RowsError(t *testing.T) { groupRowsErr: core.E("stubSQLiteScenario", "rows iteration failed", nil), groupRowsErrIndex: 0, }) - defer database.Close() + defer func() { _ = database.Close() }() storeInstance := &Store{ sqliteDatabase: database, @@ -270,8 +262,8 @@ func TestCoverage_GroupsSeq_Bad_RowsError(t *testing.T) { } for groupName, iterationErr := range storeInstance.GroupsSeq("") { - require.Error(t, iterationErr, "GroupsSeq should fail on corrupted database pages") - assert.Empty(t, groupName) + assertError(t, iterationErr) + assertEmpty(t, groupName) break } } @@ -282,15 +274,14 @@ func TestCoverage_GroupsSeq_Bad_RowsError(t *testing.T) { func TestCoverage_ScopedStore_Bad_GroupsClosedStore(t *testing.T) { storeInstance, _ := New(":memory:") - require.NoError(t, storeInstance.Close()) + assertNoError(t, storeInstance.Close()) scopedStore := NewScoped(storeInstance, "tenant-a") - require.NotNil(t, scopedStore) + assertNotNil(t, scopedStore) - var err error - _, err = scopedStore.Groups("") - require.Error(t, err) - assert.Contains(t, err.Error(), "store.ScopedStore.Groups") + _, err := scopedStore.Groups("") + assertError(t, err) + assertContainsString(t, err.Error(), "store.ScopedStore.Groups") } func TestCoverage_ScopedStore_Bad_GroupsSeqRowsError(t *testing.T) { @@ -301,7 +292,7 @@ func TestCoverage_ScopedStore_Bad_GroupsSeqRowsError(t *testing.T) { groupRowsErr: core.E("stubSQLiteScenario", "rows iteration failed", nil), groupRowsErrIndex: 1, }) - defer database.Close() + defer func() { _ = database.Close() }() scopedStore := &ScopedStore{ store: &Store{ @@ -314,13 +305,13 @@ func TestCoverage_ScopedStore_Bad_GroupsSeqRowsError(t *testing.T) { var seen []string for groupName, iterationErr := range scopedStore.GroupsSeq("") { if iterationErr != nil { - require.Error(t, iterationErr) - assert.Empty(t, groupName) + assertError(t, iterationErr) + assertEmpty(t, groupName) break } seen = append(seen, groupName) } - assert.Equal(t, []string{"config"}, seen) + assertEqual(t, []string{"config"}, seen) } // --------------------------------------------------------------------------- @@ -331,11 +322,11 @@ func TestCoverage_EnsureSchema_Bad_TableExistsQueryError(t *testing.T) { database, _ := openStubSQLiteDatabase(t, stubSQLiteScenario{ tableExistsErr: core.E("stubSQLiteScenario", "sqlite master query failed", nil), }) - defer database.Close() + defer func() { _ = database.Close() }() err := ensureSchema(database) - require.Error(t, err) - assert.Contains(t, err.Error(), "sqlite master query failed") + assertError(t, err) + assertContainsString(t, err.Error(), "sqlite master query failed") } func TestCoverage_EnsureSchema_Good_ExistingEntriesAndLegacyMigration(t *testing.T) { @@ -345,9 +336,9 @@ func TestCoverage_EnsureSchema_Good_ExistingEntriesAndLegacyMigration(t *testing {0, "expires_at", "INTEGER", 0, nil, 0}, }, }) - defer database.Close() + defer func() { _ = database.Close() }() - require.NoError(t, ensureSchema(database)) + assertNoError(t, ensureSchema(database)) } func TestCoverage_EnsureSchema_Bad_ExpiryColumnQueryError(t *testing.T) { @@ -355,11 +346,11 @@ func TestCoverage_EnsureSchema_Bad_ExpiryColumnQueryError(t *testing.T) { tableExistsFound: true, tableInfoErr: core.E("stubSQLiteScenario", "table_info query failed", nil), }) - defer database.Close() + defer func() { _ = database.Close() }() err := ensureSchema(database) - require.Error(t, err) - assert.Contains(t, err.Error(), "table_info query failed") + assertError(t, err) + assertContainsString(t, err.Error(), "table_info query failed") } func TestCoverage_EnsureSchema_Bad_MigrationError(t *testing.T) { @@ -370,11 +361,11 @@ func TestCoverage_EnsureSchema_Bad_MigrationError(t *testing.T) { }, insertErr: core.E("stubSQLiteScenario", "insert failed", nil), }) - defer database.Close() + defer func() { _ = database.Close() }() err := ensureSchema(database) - require.Error(t, err) - assert.Contains(t, err.Error(), "insert failed") + assertError(t, err) + assertContainsString(t, err.Error(), "insert failed") } func TestCoverage_EnsureSchema_Bad_MigrationCommitError(t *testing.T) { @@ -385,22 +376,22 @@ func TestCoverage_EnsureSchema_Bad_MigrationCommitError(t *testing.T) { }, commitErr: core.E("stubSQLiteScenario", "commit failed", nil), }) - defer database.Close() + defer func() { _ = database.Close() }() err := ensureSchema(database) - require.Error(t, err) - assert.Contains(t, err.Error(), "commit failed") + assertError(t, err) + assertContainsString(t, err.Error(), "commit failed") } func TestCoverage_TableHasColumn_Bad_QueryError(t *testing.T) { database, _ := openStubSQLiteDatabase(t, stubSQLiteScenario{ tableInfoErr: core.E("stubSQLiteScenario", "table_info query failed", nil), }) - defer database.Close() + defer func() { _ = database.Close() }() _, err := tableHasColumn(database, "entries", "expires_at") - require.Error(t, err) - assert.Contains(t, err.Error(), "table_info query failed") + assertError(t, err) + assertContainsString(t, err.Error(), "table_info query failed") } func TestCoverage_EnsureExpiryColumn_Good_DuplicateColumn(t *testing.T) { @@ -410,9 +401,9 @@ func TestCoverage_EnsureExpiryColumn_Good_DuplicateColumn(t *testing.T) { }, alterTableErr: core.E("stubSQLiteScenario", "duplicate column name: expires_at", nil), }) - defer database.Close() + defer func() { _ = database.Close() }() - require.NoError(t, ensureExpiryColumn(database)) + assertNoError(t, ensureExpiryColumn(database)) } func TestCoverage_EnsureExpiryColumn_Bad_AlterTableError(t *testing.T) { @@ -422,11 +413,11 @@ func TestCoverage_EnsureExpiryColumn_Bad_AlterTableError(t *testing.T) { }, alterTableErr: core.E("stubSQLiteScenario", "permission denied", nil), }) - defer database.Close() + defer func() { _ = database.Close() }() err := ensureExpiryColumn(database) - require.Error(t, err) - assert.Contains(t, err.Error(), "permission denied") + assertError(t, err) + assertContainsString(t, err.Error(), "permission denied") } func TestCoverage_MigrateLegacyEntriesTable_Bad_InsertError(t *testing.T) { @@ -436,22 +427,22 @@ func TestCoverage_MigrateLegacyEntriesTable_Bad_InsertError(t *testing.T) { }, insertErr: core.E("stubSQLiteScenario", "insert failed", nil), }) - defer database.Close() + defer func() { _ = database.Close() }() err := migrateLegacyEntriesTable(database) - require.Error(t, err) - assert.Contains(t, err.Error(), "insert failed") + assertError(t, err) + assertContainsString(t, err.Error(), "insert failed") } func TestCoverage_MigrateLegacyEntriesTable_Bad_BeginError(t *testing.T) { database, _ := openStubSQLiteDatabase(t, stubSQLiteScenario{ beginErr: core.E("stubSQLiteScenario", "begin failed", nil), }) - defer database.Close() + defer func() { _ = database.Close() }() err := migrateLegacyEntriesTable(database) - require.Error(t, err) - assert.Contains(t, err.Error(), "begin failed") + assertError(t, err) + assertContainsString(t, err.Error(), "begin failed") } func TestCoverage_MigrateLegacyEntriesTable_Good_CreatesAndMigratesLegacyRows(t *testing.T) { @@ -460,20 +451,20 @@ func TestCoverage_MigrateLegacyEntriesTable_Good_CreatesAndMigratesLegacyRows(t {0, "grp", "TEXT", 1, nil, 0}, }, }) - defer database.Close() + defer func() { _ = database.Close() }() - require.NoError(t, migrateLegacyEntriesTable(database)) + assertNoError(t, migrateLegacyEntriesTable(database)) } func TestCoverage_MigrateLegacyEntriesTable_Bad_TableInfoError(t *testing.T) { database, _ := openStubSQLiteDatabase(t, stubSQLiteScenario{ tableInfoErr: core.E("stubSQLiteScenario", "table_info query failed", nil), }) - defer database.Close() + defer func() { _ = database.Close() }() err := migrateLegacyEntriesTable(database) - require.Error(t, err) - assert.Contains(t, err.Error(), "table_info query failed") + assertError(t, err) + assertContainsString(t, err.Error(), "table_info query failed") } type stubSQLiteScenario struct { @@ -534,7 +525,7 @@ func openStubSQLiteDatabase(t *testing.T, scenario stubSQLiteScenario) (*sql.DB, }) database, err := sql.Open(stubSQLiteDriverName, databasePath) - require.NoError(t, err) + assertNoError(t, err) return database, databasePath } @@ -558,7 +549,7 @@ func (conn *stubSQLiteConn) Begin() (driver.Tx, error) { return conn.BeginTx(context.Background(), driver.TxOptions{}) } -func (conn *stubSQLiteConn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) { +func (conn *stubSQLiteConn) BeginTx(ctx context.Context, options driver.TxOptions) (driver.Tx, error) { if conn.scenario.beginErr != nil { return nil, conn.scenario.beginErr } @@ -619,16 +610,16 @@ func (conn *stubSQLiteConn) QueryContext(ctx context.Context, query string, args return nil, core.E("stubSQLiteConn.QueryContext", "unexpected query", nil) } -func (tx *stubSQLiteTx) Commit() error { - if tx.scenario.commitErr != nil { - return tx.scenario.commitErr +func (transaction *stubSQLiteTx) Commit() error { + if transaction.scenario.commitErr != nil { + return transaction.scenario.commitErr } return nil } -func (tx *stubSQLiteTx) Rollback() error { - if tx.scenario.rollbackErr != nil { - return tx.scenario.rollbackErr +func (transaction *stubSQLiteTx) Rollback() error { + if transaction.scenario.rollbackErr != nil { + return transaction.scenario.rollbackErr } return nil } diff --git a/doc.go b/doc.go index 4280e57..81e2d1c 100644 --- a/doc.go +++ b/doc.go @@ -1,15 +1,27 @@ -// Package store provides SQLite-backed key-value storage for grouped entries, -// TTL expiry, namespace isolation, quota enforcement, reactive change -// notifications, SQLite journal writes, workspace journalling, and orphan -// recovery. -// -// Workspace files live under `.core/state/` and can be recovered with -// `RecoverOrphans(".core/state/")`. -// -// Use `store.NewConfigured(store.StoreConfig{...})` when the database path, -// journal, and purge interval are already known. Prefer the struct literal -// over `store.New(..., store.WithJournal(...))` when the full configuration is -// already available, because it reads as data rather than a chain of steps. +// Package store provides SQLite-backed grouped key-value storage with TTL, +// namespace isolation, quota enforcement, reactive events, journal writes, +// workspace buffering, cold archive compaction, and orphan recovery. +// +// Prefer `store.New(...)` and `store.NewScoped(...)` for the primary API. +// Use `store.NewConfigured(store.StoreConfig{...})` and +// `store.NewScopedConfigured(configuredStore, store.ScopedStoreConfig{...})` when the +// configuration is already known: +// +// configuredStore, err := store.NewConfigured(store.StoreConfig{ +// DatabasePath: ":memory:", +// Journal: store.JournalConfiguration{ +// EndpointURL: "http://127.0.0.1:8086", +// Organisation: "core", +// BucketName: "events", +// }, +// PurgeInterval: 20 * time.Millisecond, +// WorkspaceStateDirectory: "/tmp/core-state", +// }) +// +// Workspace files live under `.core/state/` by default and can be recovered +// with `configuredStore.RecoverOrphans(".core/state/")` after a crash. +// Use `StoreConfig.Normalised()` when you want the default purge interval and +// workspace state directory filled in before passing the config onward. // // Usage example: // @@ -22,11 +34,12 @@ // BucketName: "events", // }, // PurgeInterval: 20 * time.Millisecond, +// WorkspaceStateDirectory: "/tmp/core-state", // }) // if err != nil { // return // } -// defer configuredStore.Close() +// defer func() { _ = configuredStore.Close() }() // // if err := configuredStore.Set("config", "colour", "blue"); err != nil { // return diff --git a/docs/RFC-STORE.md b/docs/RFC-STORE.md index b33cd23..d8bec47 100644 --- a/docs/RFC-STORE.md +++ b/docs/RFC-STORE.md @@ -18,7 +18,7 @@ tags: **Module:** `dappco.re/go/store` **Repository:** `core/go-store` -**Files:** 8 +**Files:** 9 --- @@ -33,6 +33,7 @@ SQLite-backed key-value store with TTL, namespace isolation, reactive events, an | File | Purpose | |------|---------| | `store.go` | Core `Store`: CRUD on `(grp, key)` compound PK, TTL via `expires_at` (Unix ms), background purge (60s), `text/template` rendering, `iter.Seq2` iterators | +| `transaction.go` | `Store.Transaction`, transaction-scoped read/write helpers, staged event dispatch | | `events.go` | `Watch`/`Unwatch` (buffered chan, cap 16, non-blocking sends) + `OnChange` callbacks (synchronous) | | `scope.go` | `ScopedStore` wraps `*Store`, prefixes groups with `namespace:`. Quota enforcement (`MaxKeys`/`MaxGroups`) | | `workspace.go` | `Workspace` buffer: DuckDB-backed mutable accumulation, atomic commit to journal | @@ -54,29 +55,63 @@ SQLite-backed key-value store with TTL, namespace isolation, reactive events, an ## 4. Store Struct ```go -// Store is the SQLite KV store with optional InfluxDB journal backing. +// Store is the SQLite key-value store with TTL expiry, namespace isolation, +// reactive events, SQLite journal writes, and orphan recovery. type Store struct { - db *sql.DB // SQLite connection (single, WAL mode) - journal influxdb2.Client // InfluxDB client (nil if no journal configured) - bucket string // InfluxDB bucket name - org string // InfluxDB org - mu sync.RWMutex - watchers map[string][]chan Event + db *sql.DB + sqliteDatabase *sql.DB + databasePath string + workspaceStateDirectory string + purgeContext context.Context + cancelPurge context.CancelFunc + purgeWaitGroup sync.WaitGroup + purgeInterval time.Duration // interval between background purge cycles + sqliteStoragePath string + sqliteStorageDirectory string + mediumBacked bool + journal influxdb2.Client + bucket string + org string + mu sync.RWMutex + journalConfiguration JournalConfiguration + medium Medium + lifecycleLock sync.Mutex + isClosed bool + + // Event dispatch state. + watchers map[string][]chan Event + callbacks []changeCallbackRegistration + watcherLock sync.RWMutex // protects watcher registration and dispatch + callbackLock sync.RWMutex // protects callback registration and dispatch + nextCallbackID uint64 // monotonic ID for callback registrations + + orphanWorkspaceLock sync.Mutex + cachedOrphanWorkspaces []*Workspace +} + +type EventType int + +const ( + EventSet EventType = iota + EventDelete + EventDeleteGroup } // Event is emitted on Watch channels when a key changes. type Event struct { - Group string - Key string - Value string + Type EventType + Group string + Key string + Value string + Timestamp time.Time } ``` ```go // New creates a store. Journal is optional — pass WithJournal() to enable. // -// st, _ := store.New(":memory:") // SQLite only -// st, _ := store.New("/path/to/db", store.WithJournal( // SQLite + InfluxDB +// storeInstance, _ := store.New(":memory:") // SQLite only +// storeInstance, _ := store.New("/path/to/db", store.WithJournal( // "http://localhost:8086", "core-org", "core-bucket", // )) func New(path string, opts ...StoreOption) (*Store, error) { } @@ -91,20 +126,28 @@ func WithJournal(url, org, bucket string) StoreOption { } ## 5. API ```go -st, _ := store.New(":memory:") // or store.New("/path/to/db") -defer st.Close() +storeInstance, _ := store.New(":memory:") // or store.New("/path/to/db") +defer storeInstance.Close() + +storeInstance.Set("group", "key", "value") +storeInstance.SetWithTTL("group", "key", "value", 5*time.Minute) +value, _ := storeInstance.Get("group", "key") // lazy-deletes expired -st.Set("group", "key", "value") -st.SetWithTTL("group", "key", "value", 5*time.Minute) -val, _ := st.Get("group", "key") // lazy-deletes expired +// Atomic multi-key/multi-group update +storeInstance.Transaction(func(transaction *store.StoreTransaction) error { + if err := transaction.Set("group", "first", "1"); err != nil { + return err + } + return transaction.Set("group", "second", "2") +}) // Iteration -for key, val := range st.AllSeq("group") { ... } -for group := range st.GroupsSeq() { ... } +for key, value := range storeInstance.AllSeq("group") { ... } +for group := range storeInstance.GroupsSeq() { ... } // Events -ch := st.Watch("group") -st.OnChange("group", func(key, val string) { ... }) +events := storeInstance.Watch("group") +storeInstance.OnChange(func(event store.Event) { ... }) ``` --- @@ -114,9 +157,12 @@ st.OnChange("group", func(key, val string) { ... }) ```go // ScopedStore wraps a Store with a namespace prefix and optional quotas. // -// scoped := store.NewScoped(st, "mynamespace") -// scoped.Set("key", "value") // stored as group "mynamespace:default", key "key" -// scoped.SetIn("mygroup", "key", "v") // stored as group "mynamespace:mygroup", key "key" +// scopedStore, _ := store.NewScopedConfigured(storeInstance, store.ScopedStoreConfig{ +// Namespace: "mynamespace", +// Quota: store.QuotaConfig{MaxKeys: 100, MaxGroups: 10}, +// }) +// scopedStore.Set("key", "value") // stored as group "mynamespace:default", key "key" +// scopedStore.SetIn("mygroup", "key", "v") // stored as group "mynamespace:mygroup", key "key" type ScopedStore struct { store *Store namespace string // validated: ^[a-zA-Z0-9-]+$ @@ -124,19 +170,21 @@ type ScopedStore struct { MaxGroups int // 0 = unlimited } -func NewScoped(st *Store, namespace string) *ScopedStore { } +func NewScoped(storeInstance *Store, namespace string) (*ScopedStore, error) { } + +func NewScopedConfigured(storeInstance *Store, scopedConfig ScopedStoreConfig) (*ScopedStore, error) { } // Set stores a value in the default group ("namespace:default") -func (ss *ScopedStore) Set(key, value string) error { } +func (scopedStore *ScopedStore) Set(key, value string) error { } // SetIn stores a value in an explicit group ("namespace:group") -func (ss *ScopedStore) SetIn(group, key, value string) error { } +func (scopedStore *ScopedStore) SetIn(group, key, value string) error { } // Get retrieves a value from the default group -func (ss *ScopedStore) Get(key string) (string, error) { } +func (scopedStore *ScopedStore) Get(key string) (string, error) { } // GetFrom retrieves a value from an explicit group -func (ss *ScopedStore) GetFrom(group, key string) (string, error) { } +func (scopedStore *ScopedStore) GetFrom(group, key string) (string, error) { } ``` - Namespace regex: `^[a-zA-Z0-9-]+$` @@ -146,7 +194,139 @@ func (ss *ScopedStore) GetFrom(group, key string) (string, error) { } --- -## 7. Event System +## 7. Transaction API + +`Store.Transaction(fn)` is the supported atomic API for multi-key and multi-group work. It opens one SQLite transaction, passes a `StoreTransaction` helper to the callback, then commits only if the callback returns `nil`. + +```go +func (storeInstance *Store) Transaction(operation func(*StoreTransaction) error) error { } + +type StoreTransaction struct { } + +func (transaction *StoreTransaction) Exists(group, key string) (bool, error) { } +func (transaction *StoreTransaction) GroupExists(group string) (bool, error) { } +func (transaction *StoreTransaction) Get(group, key string) (string, error) { } +func (transaction *StoreTransaction) Set(group, key, value string) error { } +func (transaction *StoreTransaction) SetWithTTL(group, key, value string, ttl time.Duration) error { } +func (transaction *StoreTransaction) Delete(group, key string) error { } +func (transaction *StoreTransaction) DeleteGroup(group string) error { } +func (transaction *StoreTransaction) DeletePrefix(groupPrefix string) error { } +func (transaction *StoreTransaction) GetAll(group string) (map[string]string, error) { } +func (transaction *StoreTransaction) GetPage(group string, offset, limit int) ([]KeyValue, error) { } +func (transaction *StoreTransaction) All(group string) iter.Seq2[KeyValue, error] { } +func (transaction *StoreTransaction) AllSeq(group string) iter.Seq2[KeyValue, error] { } +func (transaction *StoreTransaction) Count(group string) (int, error) { } +func (transaction *StoreTransaction) CountAll(groupPrefix string) (int, error) { } +func (transaction *StoreTransaction) Groups(groupPrefix ...string) ([]string, error) { } +func (transaction *StoreTransaction) GroupsSeq(groupPrefix ...string) iter.Seq2[string, error] { } +func (transaction *StoreTransaction) Render(templateSource, group string) (string, error) { } +func (transaction *StoreTransaction) GetSplit(group, key, separator string) (iter.Seq[string], error) { } +func (transaction *StoreTransaction) GetFields(group, key string) (iter.Seq[string], error) { } +func (transaction *StoreTransaction) PurgeExpired() (int64, error) { } +``` + +Contract: + +- `operation == nil` returns an error before opening a transaction. +- If `operation` returns an error, the transaction rolls back and `Store.Transaction` returns that error wrapped with transaction context. +- If `operation` returns `nil`, `Store.Transaction` commits. A commit failure is returned and the deferred rollback path is attempted. +- Panics are not recovered by this API; the deferred rollback path still runs while the panic unwinds. +- Reads through `StoreTransaction` see uncommitted writes made earlier in the same callback. +- Mutations stage events during the callback. Watchers and `OnChange` callbacks are notified only after a successful commit, so rolled-back work does not propagate events. +- Callers should return helper errors from the callback. Ignoring a helper error and returning `nil` can still commit any successful earlier operations. +- Callers should use the supplied transaction helper inside the callback. Calling parent `Store` methods from inside the callback is outside the contract and may block behind the single SQLite connection. + +Example: + +```go +err := storeInstance.Transaction(func(transaction *store.StoreTransaction) error { + if err := transaction.Set("accounts", "alice", "10"); err != nil { + return err + } + if err := transaction.Set("accounts", "bob", "12"); err != nil { + return err + } + total, err := transaction.Count("accounts") // sees alice and bob + if err != nil { + return err + } + if total > 100 { + return core.E("accounts", "too many accounts", nil) // rollback + } + return nil // commit +}) +``` + +### 7.1 ScopedStoreTransaction + +`ScopedStore.Transaction(fn)` delegates to `Store.Transaction` and passes a `ScopedStoreTransaction`. The scoped helper preserves the same commit, rollback, read-your-writes, and post-commit event semantics, while keeping every operation inside the scoped namespace. + +```go +func (scopedStore *ScopedStore) Transaction(operation func(*ScopedStoreTransaction) error) error { } + +type ScopedStoreTransaction struct { } + +func (transaction *ScopedStoreTransaction) Exists(key string) (bool, error) { } +func (transaction *ScopedStoreTransaction) ExistsIn(group, key string) (bool, error) { } +func (transaction *ScopedStoreTransaction) GroupExists(group string) (bool, error) { } +func (transaction *ScopedStoreTransaction) Get(key string) (string, error) { } +func (transaction *ScopedStoreTransaction) GetFrom(group, key string) (string, error) { } +func (transaction *ScopedStoreTransaction) Set(key, value string) error { } +func (transaction *ScopedStoreTransaction) SetIn(group, key, value string) error { } +func (transaction *ScopedStoreTransaction) SetWithTTL(group, key, value string, ttl time.Duration) error { } +func (transaction *ScopedStoreTransaction) Delete(group, key string) error { } +func (transaction *ScopedStoreTransaction) DeleteGroup(group string) error { } +func (transaction *ScopedStoreTransaction) DeletePrefix(groupPrefix string) error { } +func (transaction *ScopedStoreTransaction) GetAll(group string) (map[string]string, error) { } +func (transaction *ScopedStoreTransaction) GetPage(group string, offset, limit int) ([]KeyValue, error) { } +func (transaction *ScopedStoreTransaction) All(group string) iter.Seq2[KeyValue, error] { } +func (transaction *ScopedStoreTransaction) AllSeq(group string) iter.Seq2[KeyValue, error] { } +func (transaction *ScopedStoreTransaction) Count(group string) (int, error) { } +func (transaction *ScopedStoreTransaction) CountAll(groupPrefix ...string) (int, error) { } +func (transaction *ScopedStoreTransaction) Groups(groupPrefix ...string) ([]string, error) { } +func (transaction *ScopedStoreTransaction) GroupsSeq(groupPrefix ...string) iter.Seq2[string, error] { } +func (transaction *ScopedStoreTransaction) Render(templateSource, group string) (string, error) { } +func (transaction *ScopedStoreTransaction) GetSplit(group, key, separator string) (iter.Seq[string], error) { } +func (transaction *ScopedStoreTransaction) GetFields(group, key string) (iter.Seq[string], error) { } +func (transaction *ScopedStoreTransaction) PurgeExpired() (int64, error) { } +``` + +Scope isolation rules: + +- `Set(key, value)`, `Get(key)`, and `Exists(key)` operate in the scoped default group, stored as `"namespace:default"`. +- Methods that accept `group` prefix the group before touching storage, so `SetIn("config", "theme", "dark")` writes `"namespace:config"`. +- `Groups` and `GroupsSeq` query only groups under `"namespace:"` and return namespace-local names such as `"config"`, not `"namespace:config"`. +- `CountAll`, `DeletePrefix`, and `PurgeExpired` are namespace-local. `DeletePrefix("")` deletes only groups in the scoped namespace, not the whole store. +- Quotas are evaluated through the same SQLite transaction, so pending writes count toward `MaxKeys` and `MaxGroups`. A returned `QuotaExceededError` rolls back the transaction when the callback returns it. +- Staged events use the full prefixed group internally. Scoped watchers and scoped `OnChange` callbacks localise committed events back to namespace-local group names. + +Example: + +```go +scopedStore, _ := store.NewScopedConfigured(storeInstance, store.ScopedStoreConfig{ + Namespace: "tenant-a", + Quota: store.QuotaConfig{MaxKeys: 100, MaxGroups: 10}, +}) + +err := scopedStore.Transaction(func(transaction *store.ScopedStoreTransaction) error { + if err := transaction.Set("theme", "dark"); err != nil { + return err + } + if err := transaction.SetIn("preferences", "locale", "en-GB"); err != nil { + return err + } + groups, err := transaction.Groups() + if err != nil { + return err + } + // groups == []string{"default", "preferences"} + return nil +}) +``` + +--- + +## 8. Event System ```go // EventType identifies the kind of change. @@ -174,15 +354,15 @@ type Event struct { --- -## 8. Workspace Buffer +## 9. Workspace Buffer Stateful work accumulation over time. A workspace is a named DuckDB buffer for mutable work-in-progress. When a unit of work completes, the full state commits atomically to a time-series journal (InfluxDB). A summary updates the identity store (the existing SQLite store or an external database). -### 7.1 The Problem +### 9.1 The Problem Writing every micro-event directly to a time-series makes deltas meaningless — 4000 writes of "+1" produces noise. A mutable buffer accumulates the work, then commits once as a complete unit. The time-series only sees finished work, so deltas between entries represent real change. -### 7.2 Three Layers +### 9.2 Three Layers ``` Store (SQLite): "this thing exists" — identity, current summary @@ -197,15 +377,15 @@ Journal (InfluxDB): "this thing completed" — immutable, delta-ready | Journal | InfluxDB | Append-only | Retention policy | | Cold | Compressed JSONL | Immutable | Archive | -### 7.3 Workspace API +### 9.3 Workspace API ```go // Workspace is a named DuckDB buffer for mutable work-in-progress. // It holds a reference to the parent Store for identity updates and journal writes. // -// ws, _ := st.NewWorkspace("scroll-session-2026-03-30") -// ws.Put("like", map[string]any{"user": "@handle", "post": "video_123"}) -// ws.Commit() // atomic → journal + identity summary +// workspace, _ := storeInstance.NewWorkspace("scroll-session-2026-03-30") +// workspace.Put("like", map[string]any{"user": "@handle", "post": "video_123"}) +// workspace.Commit() // atomic → journal + identity summary type Workspace struct { name string store *Store // parent store for identity updates + journal config @@ -214,40 +394,40 @@ type Workspace struct { // NewWorkspace creates a workspace buffer. The DuckDB file is created at .core/state/{name}.duckdb. // -// ws, _ := st.NewWorkspace("scroll-session-2026-03-30") +// workspace, _ := storeInstance.NewWorkspace("scroll-session-2026-03-30") func (s *Store) NewWorkspace(name string) (*Workspace, error) { } ``` ```go // Put accumulates an entry in the workspace buffer. Returns error on write failure. // -// err := ws.Put("like", map[string]any{"user": "@handle"}) -func (ws *Workspace) Put(kind string, data map[string]any) error { } +// err := workspace.Put("like", map[string]any{"user": "@handle"}) +func (workspace *Workspace) Put(kind string, data map[string]any) error { } // Aggregate returns a summary of the current workspace state // -// summary := ws.Aggregate() // {"like": 4000, "profile_match": 12} -func (ws *Workspace) Aggregate() map[string]any { } +// summary := workspace.Aggregate() // {"like": 4000, "profile_match": 12} +func (workspace *Workspace) Aggregate() map[string]any { } // Commit writes the aggregated state to the journal and updates the identity store // -// result := ws.Commit() -func (ws *Workspace) Commit() core.Result { } +// result := workspace.Commit() +func (workspace *Workspace) Commit() core.Result { } // Discard drops the workspace without committing // -// ws.Discard() -func (ws *Workspace) Discard() { } +// workspace.Discard() +func (workspace *Workspace) Discard() { } // Query runs SQL against the buffer for ad-hoc analysis. // Returns core.Result where Value is []map[string]any (rows as maps). // -// result := ws.Query("SELECT kind, COUNT(*) as n FROM entries GROUP BY kind") +// result := workspace.Query("SELECT kind, COUNT(*) as n FROM entries GROUP BY kind") // rows := result.Value.([]map[string]any) // [{"kind": "like", "n": 4000}] -func (ws *Workspace) Query(sql string) core.Result { } +func (workspace *Workspace) Query(sql string) core.Result { } ``` -### 7.4 Journal +### 9.4 Journal Commit writes a single point per completed workspace. One point = one unit of work. @@ -255,7 +435,7 @@ Commit writes a single point per completed workspace. One point = one unit of wo // CommitToJournal writes aggregated state as a single InfluxDB point. // Called by Workspace.Commit() internally, but exported for testing. // -// s.CommitToJournal("scroll-session", fields, tags) +// storeInstance.CommitToJournal("scroll-session", fields, tags) func (s *Store) CommitToJournal(measurement string, fields map[string]any, tags map[string]string) core.Result { } // QueryJournal runs a Flux query against the time-series. @@ -268,7 +448,7 @@ func (s *Store) QueryJournal(flux string) core.Result { } Because each point is a complete unit, queries naturally produce meaningful results without complex aggregation. -### 7.5 Cold Archive +### 9.5 Cold Archive When journal entries age past retention, they compact to cold storage: @@ -282,13 +462,13 @@ type CompactOptions struct { // Compact archives journal entries to compressed JSONL // -// st.Compact(store.CompactOptions{Before: time.Now().Add(-90*24*time.Hour), Output: "/archive/"}) +// storeInstance.Compact(store.CompactOptions{Before: time.Now().Add(-90*24*time.Hour), Output: "/archive/"}) func (s *Store) Compact(opts CompactOptions) core.Result { } ``` Output: gzip JSONL files. Each line is a complete unit of work — ready for training data ingestion, CDN publishing, or long-term analytics. -### 7.6 File Lifecycle +### 9.6 File Lifecycle DuckDB files are ephemeral: @@ -307,19 +487,19 @@ Orphan recovery on `New()`: // Each orphan is opened, aggregated, and discarded (not committed). // The caller decides whether to commit orphan data via RecoverOrphans(). // -// orphans := st.RecoverOrphans(".core/state/") -// for _, ws := range orphans { -// // inspect ws.Aggregate(), decide whether to commit or discard -// ws.Discard() +// orphanWorkspaces := storeInstance.RecoverOrphans(".core/state/") +// for _, workspace := range orphanWorkspaces { +// // inspect workspace.Aggregate(), decide whether to commit or discard +// workspace.Discard() // } func (s *Store) RecoverOrphans(stateDir string) []*Workspace { } ``` --- -## 9. Reference Material +## 10. Reference Material | Resource | Location | |----------|----------| -| Core Go RFC | `code/core/go/RFC.md` | -| IO RFC | `code/core/go/io/RFC.md` | +| Architecture docs | `docs/architecture.md` | +| Development guide | `docs/development.md` | diff --git a/docs/architecture.md b/docs/architecture.md index 183c419..56c43cb 100644 --- a/docs/architecture.md +++ b/docs/architecture.md @@ -190,10 +190,12 @@ Watcher delivery is grouped by the registered group name. Wildcard `"*"` matches ## Namespace Isolation (ScopedStore) -`ScopedStore` wraps a `*Store` and automatically prefixes all group names with `namespace + ":"`. This prevents key collisions when multiple tenants share a single underlying database. +`ScopedStore` wraps a `*Store` and automatically prefixes all group names with `namespace + ":"`. This prevents key collisions when multiple tenants share a single underlying database. When the namespace and quota are already known, prefer `NewScopedConfigured(storeInstance, store.ScopedStoreConfig{...})` so the configuration is explicit at the call site. ```go -scopedStore, err := store.NewScoped(storeInstance, "tenant-42") +scopedStore, err := store.NewScopedConfigured(storeInstance, store.ScopedStoreConfig{ + Namespace: "tenant-42", +}) if err != nil { return } @@ -213,7 +215,7 @@ Namespace strings must match `^[a-zA-Z0-9-]+$`. Invalid namespaces are rejected ### Quota Enforcement -`NewScopedWithQuota(store, namespace, QuotaConfig)` adds per-namespace limits. For example, `store.QuotaConfig{MaxKeys: 100, MaxGroups: 10}` caps a namespace at 100 keys and 10 groups: +`NewScopedConfigured(storeInstance, store.ScopedStoreConfig{...})` is the preferred way to set per-namespace limits because the quota values stay visible at the call site. For example, `store.QuotaConfig{MaxKeys: 100, MaxGroups: 10}` caps a namespace at 100 keys and 10 groups: ```go type QuotaConfig struct { diff --git a/docs/index.md b/docs/index.md index a552e7f..115ddd6 100644 --- a/docs/index.md +++ b/docs/index.md @@ -11,7 +11,7 @@ For declarative setup, `store.NewConfigured(store.StoreConfig{...})` takes a sin The package has a single runtime dependency -- a pure-Go SQLite driver (`modernc.org/sqlite`). No CGO is required. It compiles and runs on all platforms that Go supports. -**Module path:** `dappco.re/go/core/store` +**Module path:** `dappco.re/go/store` **Go version:** 1.26+ **Licence:** EUPL-1.2 @@ -24,7 +24,7 @@ import ( "fmt" "time" - "dappco.re/go/core/store" + "dappco.re/go/store" ) func main() { @@ -32,6 +32,7 @@ func main() { storeInstance, err := store.NewConfigured(store.StoreConfig{ DatabasePath: "/tmp/app.db", PurgeInterval: 30 * time.Second, + WorkspaceStateDirectory: "/tmp/core-state", }) if err != nil { return @@ -74,7 +75,9 @@ func main() { fmt.Println(renderedTemplate) // "smtp.example.com:587" // Store tenant-42 preferences under the tenant-42: namespace prefix. - scopedStore, err := store.NewScoped(storeInstance, "tenant-42") + scopedStore, err := store.NewScopedConfigured(storeInstance, store.ScopedStoreConfig{ + Namespace: "tenant-42", + }) if err != nil { return } @@ -84,7 +87,10 @@ func main() { // Stored internally as group "tenant-42:preferences", key "locale" // Cap tenant-99 at 100 keys and 5 groups. - quotaScopedStore, err := store.NewScopedWithQuota(storeInstance, "tenant-99", store.QuotaConfig{MaxKeys: 100, MaxGroups: 5}) + quotaScopedStore, err := store.NewScopedConfigured(storeInstance, store.ScopedStoreConfig{ + Namespace: "tenant-99", + Quota: store.QuotaConfig{MaxKeys: 100, MaxGroups: 5}, + }) if err != nil { return } @@ -120,7 +126,7 @@ The entire package lives in a single Go package (`package store`) with the follo | `store.go` | Core `Store` type, CRUD operations (`Get`, `Set`, `SetWithTTL`, `Delete`, `DeleteGroup`, `DeletePrefix`), bulk queries (`GetAll`, `GetPage`, `All`, `Count`, `CountAll`, `Groups`, `GroupsSeq`), string splitting helpers (`GetSplit`, `GetFields`), template rendering (`Render`), TTL expiry, background purge goroutine, transaction support | | `transaction.go` | `Store.Transaction`, transaction-scoped write helpers, staged event dispatch | | `events.go` | `EventType` constants, `Event` struct, `Watch`/`Unwatch` channel subscriptions, `OnChange` callback registration, internal `notify` dispatch | -| `scope.go` | `ScopedStore` wrapper for namespace isolation, `QuotaConfig` struct, `NewScoped`/`NewScopedWithQuota` constructors, namespace-local helper delegation, quota enforcement logic | +| `scope.go` | `ScopedStore` wrapper for namespace isolation, `QuotaConfig` struct, `NewScoped`/`NewScopedConfigured` constructors, namespace-local helper delegation, quota enforcement logic | | `journal.go` | Journal persistence, Flux-like querying, JSON row inflation, journal schema helpers | | `workspace.go` | Workspace buffers, aggregation, query analysis, commit flow, and orphan recovery | | `compact.go` | Cold archive generation to JSONL gzip or zstd | diff --git a/duckdb.go b/duckdb.go new file mode 100644 index 0000000..536e7aa --- /dev/null +++ b/duckdb.go @@ -0,0 +1,492 @@ +// SPDX-License-Identifier: EUPL-1.2 + +package store + +import ( + "database/sql" + + core "dappco.re/go/core" + _ "github.com/marcboeker/go-duckdb" +) + +// DuckDB table names for checkpoint scoring and probe results. +// +// Usage example: +// +// _ = db.EnsureScoringTables() +// db.Exec(core.Sprintf("SELECT * FROM %s", store.TableCheckpointScores)) +const ( + // TableCheckpointScores is the table name for checkpoint scoring data. + // + // Usage example: + // + // store.TableCheckpointScores // "checkpoint_scores" + TableCheckpointScores = "checkpoint_scores" + + // TableProbeResults is the table name for probe result data. + // + // Usage example: + // + // store.TableProbeResults // "probe_results" + TableProbeResults = "probe_results" +) + +// DuckDB wraps a DuckDB connection for analytical queries against training +// data, benchmark results, and scoring tables. +// +// Usage example: +// +// db, err := store.OpenDuckDB("/Volumes/Data/lem/lem.duckdb") +// if err != nil { return } +// defer func() { _ = db.Close() }() +// rows, _ := db.QueryGoldenSet(500) +type DuckDB struct { + conn *sql.DB + path string +} + +// OpenDuckDB opens a DuckDB database file in read-only mode to avoid locking +// issues with the Python pipeline. +// +// Usage example: +// +// db, err := store.OpenDuckDB("/Volumes/Data/lem/lem.duckdb") +func OpenDuckDB(path string) (*DuckDB, error) { + conn, err := sql.Open("duckdb", path+"?access_mode=READ_ONLY") + if err != nil { + return nil, core.E("store.OpenDuckDB", core.Sprintf("open duckdb %s", path), err) + } + if err := conn.Ping(); err != nil { + _ = conn.Close() + return nil, core.E("store.OpenDuckDB", core.Sprintf("ping duckdb %s", path), err) + } + return &DuckDB{conn: conn, path: path}, nil +} + +// OpenDuckDBReadWrite opens a DuckDB database in read-write mode. +// +// Usage example: +// +// db, err := store.OpenDuckDBReadWrite("/Volumes/Data/lem/lem.duckdb") +func OpenDuckDBReadWrite(path string) (*DuckDB, error) { + conn, err := sql.Open("duckdb", path) + if err != nil { + return nil, core.E("store.OpenDuckDBReadWrite", core.Sprintf("open duckdb %s", path), err) + } + if err := conn.Ping(); err != nil { + _ = conn.Close() + return nil, core.E("store.OpenDuckDBReadWrite", core.Sprintf("ping duckdb %s", path), err) + } + return &DuckDB{conn: conn, path: path}, nil +} + +// Close closes the database connection. +// +// Usage example: +// +// defer func() { _ = db.Close() }() +func (db *DuckDB) Close() error { + return db.conn.Close() +} + +// Path returns the database file path. +// +// Usage example: +// +// p := db.Path() // "/Volumes/Data/lem/lem.duckdb" +func (db *DuckDB) Path() string { + return db.path +} + +// Conn returns the underlying *sql.DB connection. Prefer the typed helpers +// (Exec, QueryRowScan, QueryRows) when possible; this accessor exists for +// callers that need streaming row iteration or transaction control. +// +// Usage example: +// +// rows, err := db.Conn().Query("SELECT id, name FROM models WHERE kind = ?", "lem") +func (db *DuckDB) Conn() *sql.DB { + return db.conn +} + +// Exec executes a query without returning rows. +// +// Usage example: +// +// err := db.Exec("INSERT INTO golden_set VALUES (?, ?)", idx, prompt) +func (db *DuckDB) Exec(query string, args ...any) error { + _, err := db.conn.Exec(query, args...) + if err != nil { + return core.E("store.DuckDB.Exec", "execute query", err) + } + return nil +} + +// QueryRowScan executes a query expected to return at most one row and scans +// the result into dest. It is a convenience wrapper around sql.DB.QueryRow. +// +// Usage example: +// +// var count int +// err := db.QueryRowScan("SELECT COUNT(*) FROM golden_set", &count) +func (db *DuckDB) QueryRowScan(query string, dest any, args ...any) error { + return db.conn.QueryRow(query, args...).Scan(dest) +} + +// GoldenSetRow represents one row from the golden_set table. +// +// Usage example: +// +// rows, err := db.QueryGoldenSet(500) +// for _, row := range rows { core.Println(row.Prompt) } +type GoldenSetRow struct { + // Idx is the row index. + // + // Usage example: + // + // row.Idx // 42 + Idx int + + // SeedID is the seed identifier that produced this row. + // + // Usage example: + // + // row.SeedID // "seed-001" + SeedID string + + // Domain is the content domain (e.g. "philosophy", "science"). + // + // Usage example: + // + // row.Domain // "philosophy" + Domain string + + // Voice is the writing voice/style used for generation. + // + // Usage example: + // + // row.Voice // "watts" + Voice string + + // Prompt is the input prompt text. + // + // Usage example: + // + // row.Prompt // "What is sovereignty?" + Prompt string + + // Response is the generated response text. + // + // Usage example: + // + // row.Response // "Sovereignty is..." + Response string + + // GenTime is the generation time in seconds. + // + // Usage example: + // + // row.GenTime // 2.5 + GenTime float64 + + // CharCount is the character count of the response. + // + // Usage example: + // + // row.CharCount // 1500 + CharCount int +} + +// ExpansionPromptRow represents one row from the expansion_prompts table. +// +// Usage example: +// +// prompts, err := db.QueryExpansionPrompts("pending", 100) +// for _, p := range prompts { core.Println(p.Prompt) } +type ExpansionPromptRow struct { + // Idx is the row index. + // + // Usage example: + // + // p.Idx // 42 + Idx int64 + + // SeedID is the seed identifier that produced this prompt. + // + // Usage example: + // + // p.SeedID // "seed-001" + SeedID string + + // Region is the geographic/cultural region for the prompt. + // + // Usage example: + // + // p.Region // "western" + Region string + + // Domain is the content domain (e.g. "philosophy", "science"). + // + // Usage example: + // + // p.Domain // "philosophy" + Domain string + + // Language is the ISO language code for the prompt. + // + // Usage example: + // + // p.Language // "en" + Language string + + // Prompt is the prompt text in the original language. + // + // Usage example: + // + // p.Prompt // "What is sovereignty?" + Prompt string + + // PromptEn is the English translation of the prompt. + // + // Usage example: + // + // p.PromptEn // "What is sovereignty?" + PromptEn string + + // Priority is the generation priority (lower is higher priority). + // + // Usage example: + // + // p.Priority // 1 + Priority int + + // Status is the processing status (e.g. "pending", "done"). + // + // Usage example: + // + // p.Status // "pending" + Status string +} + +// QueryGoldenSet returns all golden set rows with responses >= minChars. +// +// Usage example: +// +// rows, err := db.QueryGoldenSet(500) +func (db *DuckDB) QueryGoldenSet(minChars int) ([]GoldenSetRow, error) { + rows, err := db.conn.Query( + "SELECT idx, seed_id, domain, voice, prompt, response, gen_time, char_count "+ + "FROM golden_set WHERE char_count >= ? ORDER BY idx", + minChars, + ) + if err != nil { + return nil, core.E("store.DuckDB.QueryGoldenSet", "query golden_set", err) + } + defer func() { + _ = rows.Close() + }() + + var result []GoldenSetRow + for rows.Next() { + var r GoldenSetRow + if err := rows.Scan(&r.Idx, &r.SeedID, &r.Domain, &r.Voice, + &r.Prompt, &r.Response, &r.GenTime, &r.CharCount); err != nil { + return nil, core.E("store.DuckDB.QueryGoldenSet", "scan golden_set row", err) + } + result = append(result, r) + } + return result, rows.Err() +} + +// CountGoldenSet returns the total count of golden set rows. +// +// Usage example: +// +// count, err := db.CountGoldenSet() +func (db *DuckDB) CountGoldenSet() (int, error) { + var count int + err := db.conn.QueryRow("SELECT COUNT(*) FROM golden_set").Scan(&count) + if err != nil { + return 0, core.E("store.DuckDB.CountGoldenSet", "count golden_set", err) + } + return count, nil +} + +// QueryExpansionPrompts returns expansion prompts filtered by status. +// +// Usage example: +// +// prompts, err := db.QueryExpansionPrompts("pending", 100) +func (db *DuckDB) QueryExpansionPrompts(status string, limit int) ([]ExpansionPromptRow, error) { + query := "SELECT idx, seed_id, region, domain, language, prompt, prompt_en, priority, status " + + "FROM expansion_prompts" + var args []any + + if status != "" { + query += " WHERE status = ?" + args = append(args, status) + } + query += " ORDER BY priority, idx" + + if limit > 0 { + query += core.Sprintf(" LIMIT %d", limit) + } + + rows, err := db.conn.Query(query, args...) + if err != nil { + return nil, core.E("store.DuckDB.QueryExpansionPrompts", "query expansion_prompts", err) + } + defer func() { + _ = rows.Close() + }() + + var result []ExpansionPromptRow + for rows.Next() { + var r ExpansionPromptRow + if err := rows.Scan(&r.Idx, &r.SeedID, &r.Region, &r.Domain, + &r.Language, &r.Prompt, &r.PromptEn, &r.Priority, &r.Status); err != nil { + return nil, core.E("store.DuckDB.QueryExpansionPrompts", "scan expansion_prompt row", err) + } + result = append(result, r) + } + return result, rows.Err() +} + +// CountExpansionPrompts returns counts by status. +// +// Usage example: +// +// total, pending, err := db.CountExpansionPrompts() +func (db *DuckDB) CountExpansionPrompts() (total int, pending int, err error) { + err = db.conn.QueryRow("SELECT COUNT(*) FROM expansion_prompts").Scan(&total) + if err != nil { + return 0, 0, core.E("store.DuckDB.CountExpansionPrompts", "count expansion_prompts", err) + } + err = db.conn.QueryRow("SELECT COUNT(*) FROM expansion_prompts WHERE status = 'pending'").Scan(&pending) + if err != nil { + return total, 0, core.E("store.DuckDB.CountExpansionPrompts", "count pending expansion_prompts", err) + } + return total, pending, nil +} + +// UpdateExpansionStatus updates the status of an expansion prompt by idx. +// +// Usage example: +// +// err := db.UpdateExpansionStatus(42, "done") +func (db *DuckDB) UpdateExpansionStatus(idx int64, status string) error { + _, err := db.conn.Exec("UPDATE expansion_prompts SET status = ? WHERE idx = ?", status, idx) + if err != nil { + return core.E("store.DuckDB.UpdateExpansionStatus", core.Sprintf("update expansion_prompt %d", idx), err) + } + return nil +} + +// QueryRows executes an arbitrary SQL query and returns results as maps. +// +// Usage example: +// +// rows, err := db.QueryRows("SELECT COUNT(*) AS n FROM golden_set") +func (db *DuckDB) QueryRows(query string, args ...any) ([]map[string]any, error) { + rows, err := db.conn.Query(query, args...) + if err != nil { + return nil, core.E("store.DuckDB.QueryRows", "query", err) + } + defer func() { + _ = rows.Close() + }() + + cols, err := rows.Columns() + if err != nil { + return nil, core.E("store.DuckDB.QueryRows", "columns", err) + } + + var result []map[string]any + for rows.Next() { + values := make([]any, len(cols)) + ptrs := make([]any, len(cols)) + for i := range values { + ptrs[i] = &values[i] + } + if err := rows.Scan(ptrs...); err != nil { + return nil, core.E("store.DuckDB.QueryRows", "scan", err) + } + row := make(map[string]any, len(cols)) + for i, col := range cols { + row[col] = values[i] + } + result = append(result, row) + } + return result, rows.Err() +} + +// EnsureScoringTables creates the scoring tables if they do not exist. +// +// Usage example: +// +// if err := db.EnsureScoringTables(); err != nil { return } +func (db *DuckDB) EnsureScoringTables() error { + if _, err := db.conn.Exec(core.Sprintf(`CREATE TABLE IF NOT EXISTS %s ( + model TEXT, run_id TEXT, label TEXT, iteration INTEGER, + correct INTEGER, total INTEGER, accuracy DOUBLE, + scored_at TIMESTAMP DEFAULT current_timestamp, + PRIMARY KEY (run_id, label) + )`, TableCheckpointScores)); err != nil { + return core.E("store.DuckDB.EnsureScoringTables", "create checkpoint_scores", err) + } + if _, err := db.conn.Exec(core.Sprintf(`CREATE TABLE IF NOT EXISTS %s ( + model TEXT, run_id TEXT, label TEXT, probe_id TEXT, + passed BOOLEAN, response TEXT, iteration INTEGER, + scored_at TIMESTAMP DEFAULT current_timestamp, + PRIMARY KEY (run_id, label, probe_id) + )`, TableProbeResults)); err != nil { + return core.E("store.DuckDB.EnsureScoringTables", "create probe_results", err) + } + if _, err := db.conn.Exec(`CREATE TABLE IF NOT EXISTS scoring_results ( + model TEXT, prompt_id TEXT, suite TEXT, + dimension TEXT, score DOUBLE, + scored_at TIMESTAMP DEFAULT current_timestamp + )`); err != nil { + return core.E("store.DuckDB.EnsureScoringTables", "create scoring_results", err) + } + return nil +} + +// WriteScoringResult writes a single scoring dimension result to DuckDB. +// +// Usage example: +// +// err := db.WriteScoringResult("lem-8b", "p-001", "ethics", "honesty", 0.95) +func (db *DuckDB) WriteScoringResult(model, promptID, suite, dimension string, score float64) error { + _, err := db.conn.Exec( + `INSERT INTO scoring_results (model, prompt_id, suite, dimension, score) VALUES (?, ?, ?, ?, ?)`, + model, promptID, suite, dimension, score, + ) + if err != nil { + return core.E("store.DuckDB.WriteScoringResult", "insert scoring result", err) + } + return nil +} + +// TableCounts returns row counts for all known tables. +// +// Usage example: +// +// counts, err := db.TableCounts() +// n := counts["golden_set"] +func (db *DuckDB) TableCounts() (map[string]int, error) { + tables := []string{"golden_set", "expansion_prompts", "seeds", "prompts", + "training_examples", "gemini_responses", "benchmark_questions", "benchmark_results", "validations", + TableCheckpointScores, TableProbeResults, "scoring_results"} + + counts := make(map[string]int) + for _, t := range tables { + var count int + err := db.conn.QueryRow(core.Sprintf("SELECT COUNT(*) FROM %s", t)).Scan(&count) + if err != nil { + continue + } + counts[t] = count + } + return counts, nil +} diff --git a/events.go b/events.go index 845626c..00bc135 100644 --- a/events.go +++ b/events.go @@ -2,8 +2,8 @@ package store import ( "reflect" - "sync" - "sync/atomic" + "sync" // Note: AX-6 — internal concurrency primitive; structural for store infrastructure (RFC §4 explicitly mandates). + "sync/atomic" // Note: AX-6 — internal concurrency primitive; structural for store infrastructure (RFC §4 explicitly mandates). "time" ) @@ -72,23 +72,16 @@ func (storeInstance *Store) Watch(group string) <-chan Event { return closedEventChannel() } - storeInstance.closeLock.Lock() - closed := storeInstance.closed - storeInstance.closeLock.Unlock() - if closed { - return closedEventChannel() - } - eventChannel := make(chan Event, watcherEventBufferCapacity) - storeInstance.watchersLock.Lock() - defer storeInstance.watchersLock.Unlock() - storeInstance.closeLock.Lock() - closed = storeInstance.closed - storeInstance.closeLock.Unlock() - if closed { + storeInstance.lifecycleLock.Lock() + defer storeInstance.lifecycleLock.Unlock() + if storeInstance.isClosed || storeInstance.isClosing { return closedEventChannel() } + + storeInstance.watcherLock.Lock() + defer storeInstance.watcherLock.Unlock() if storeInstance.watchers == nil { storeInstance.watchers = make(map[string][]chan Event) } @@ -103,15 +96,15 @@ func (storeInstance *Store) Unwatch(group string, events <-chan Event) { return } - storeInstance.closeLock.Lock() - closed := storeInstance.closed - storeInstance.closeLock.Unlock() + storeInstance.lifecycleLock.Lock() + closed := storeInstance.isClosed || storeInstance.isClosing + storeInstance.lifecycleLock.Unlock() if closed { return } - storeInstance.watchersLock.Lock() - defer storeInstance.watchersLock.Unlock() + storeInstance.watcherLock.Lock() + defer storeInstance.watcherLock.Unlock() registeredEvents := storeInstance.watchers[group] if len(registeredEvents) == 0 { @@ -151,32 +144,25 @@ func (storeInstance *Store) OnChange(callback func(Event)) func() { return func() {} } - storeInstance.closeLock.Lock() - closed := storeInstance.closed - storeInstance.closeLock.Unlock() - if closed { + storeInstance.lifecycleLock.Lock() + defer storeInstance.lifecycleLock.Unlock() + if storeInstance.isClosed || storeInstance.isClosing { return func() {} } - registrationID := atomic.AddUint64(&storeInstance.nextCallbackRegistrationID, 1) + registrationID := atomic.AddUint64(&storeInstance.nextCallbackID, 1) callbackRegistration := changeCallbackRegistration{registrationID: registrationID, callback: callback} - storeInstance.callbacksLock.Lock() - defer storeInstance.callbacksLock.Unlock() - storeInstance.closeLock.Lock() - closed = storeInstance.closed - storeInstance.closeLock.Unlock() - if closed { - return func() {} - } + storeInstance.callbackLock.Lock() + defer storeInstance.callbackLock.Unlock() storeInstance.callbacks = append(storeInstance.callbacks, callbackRegistration) // Return an idempotent unregister function. var once sync.Once return func() { once.Do(func() { - storeInstance.callbacksLock.Lock() - defer storeInstance.callbacksLock.Unlock() + storeInstance.callbackLock.Lock() + defer storeInstance.callbackLock.Unlock() for i := range storeInstance.callbacks { if storeInstance.callbacks[i].registrationID == registrationID { storeInstance.callbacks = append(storeInstance.callbacks[:i], storeInstance.callbacks[i+1:]...) @@ -201,21 +187,14 @@ func (storeInstance *Store) notify(event Event) { event.Timestamp = time.Now() } - storeInstance.closeLock.Lock() - closed := storeInstance.closed - storeInstance.closeLock.Unlock() - if closed { + storeInstance.lifecycleLock.Lock() + if storeInstance.isClosed || storeInstance.isClosing { + storeInstance.lifecycleLock.Unlock() return } - storeInstance.watchersLock.RLock() - storeInstance.closeLock.Lock() - closed = storeInstance.closed - storeInstance.closeLock.Unlock() - if closed { - storeInstance.watchersLock.RUnlock() - return - } + storeInstance.watcherLock.RLock() + storeInstance.lifecycleLock.Unlock() for _, registeredChannel := range storeInstance.watchers["*"] { select { case registeredChannel <- event: @@ -228,11 +207,17 @@ func (storeInstance *Store) notify(event Event) { default: } } - storeInstance.watchersLock.RUnlock() + storeInstance.watcherLock.RUnlock() - storeInstance.callbacksLock.RLock() + storeInstance.lifecycleLock.Lock() + if storeInstance.isClosed || storeInstance.isClosing { + storeInstance.lifecycleLock.Unlock() + return + } + storeInstance.callbackLock.RLock() + storeInstance.lifecycleLock.Unlock() callbacks := append([]changeCallbackRegistration(nil), storeInstance.callbacks...) - storeInstance.callbacksLock.RUnlock() + storeInstance.callbackLock.RUnlock() for _, callback := range callbacks { callback.callback(event) diff --git a/events_test.go b/events_test.go index 82305d2..65cb304 100644 --- a/events_test.go +++ b/events_test.go @@ -6,64 +6,62 @@ import ( "time" core "dappco.re/go/core" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func TestEvents_Watch_Good_Group(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() events := storeInstance.Watch("config") defer storeInstance.Unwatch("config", events) - require.NoError(t, storeInstance.Set("config", "theme", "dark")) - require.NoError(t, storeInstance.Set("config", "colour", "blue")) + assertNoError(t, storeInstance.Set("config", "theme", "dark")) + assertNoError(t, storeInstance.Set("config", "colour", "blue")) received := drainEvents(events, 2, time.Second) - require.Len(t, received, 2) - assert.Equal(t, "theme", received[0].Key) - assert.Equal(t, "colour", received[1].Key) - assert.Equal(t, "config", received[0].Group) - assert.Equal(t, "config", received[1].Group) + assertLen(t, received, 2) + assertEqual(t, "theme", received[0].Key) + assertEqual(t, "colour", received[1].Key) + assertEqual(t, "config", received[0].Group) + assertEqual(t, "config", received[1].Group) } func TestEvents_Watch_Good_WildcardGroup(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() events := storeInstance.Watch("*") defer storeInstance.Unwatch("*", events) - require.NoError(t, storeInstance.Set("g1", "k1", "v1")) - require.NoError(t, storeInstance.Set("g2", "k2", "v2")) - require.NoError(t, storeInstance.Delete("g1", "k1")) - require.NoError(t, storeInstance.DeleteGroup("g2")) + assertNoError(t, storeInstance.Set("g1", "k1", "v1")) + assertNoError(t, storeInstance.Set("g2", "k2", "v2")) + assertNoError(t, storeInstance.Delete("g1", "k1")) + assertNoError(t, storeInstance.DeleteGroup("g2")) received := drainEvents(events, 4, time.Second) - require.Len(t, received, 4) - assert.Equal(t, EventSet, received[0].Type) - assert.Equal(t, EventSet, received[1].Type) - assert.Equal(t, EventDelete, received[2].Type) - assert.Equal(t, EventDeleteGroup, received[3].Type) + assertLen(t, received, 4) + assertEqual(t, EventSet, received[0].Type) + assertEqual(t, EventSet, received[1].Type) + assertEqual(t, EventDelete, received[2].Type) + assertEqual(t, EventDeleteGroup, received[3].Type) } func TestEvents_Unwatch_Good_StopsDelivery(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() events := storeInstance.Watch("g") storeInstance.Unwatch("g", events) _, open := <-events - assert.False(t, open, "channel should be closed after Unwatch") + assertFalsef(t, open, "channel should be closed after Unwatch") - require.NoError(t, storeInstance.Set("g", "k", "v")) + assertNoError(t, storeInstance.Set("g", "k", "v")) } func TestEvents_Unwatch_Good_Idempotent(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() events := storeInstance.Watch("g") storeInstance.Unwatch("g", events) @@ -74,37 +72,37 @@ func TestEvents_Close_Good_ClosesWatcherChannels(t *testing.T) { storeInstance, _ := New(":memory:") events := storeInstance.Watch("g") - require.NoError(t, storeInstance.Close()) + assertNoError(t, storeInstance.Close()) _, open := <-events - assert.False(t, open, "channel should be closed after Close") + assertFalsef(t, open, "channel should be closed after Close") } func TestEvents_Unwatch_Good_NilChannel(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() storeInstance.Unwatch("g", nil) } func TestEvents_Watch_Good_DeleteEvent(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() events := storeInstance.Watch("g") defer storeInstance.Unwatch("g", events) - require.NoError(t, storeInstance.Set("g", "k", "v")) + assertNoError(t, storeInstance.Set("g", "k", "v")) <-events - require.NoError(t, storeInstance.Delete("g", "k")) + assertNoError(t, storeInstance.Delete("g", "k")) select { case event := <-events: - assert.Equal(t, EventDelete, event.Type) - assert.Equal(t, "g", event.Group) - assert.Equal(t, "k", event.Key) - assert.Empty(t, event.Value) + assertEqual(t, EventDelete, event.Type) + assertEqual(t, "g", event.Group) + assertEqual(t, "k", event.Key) + assertEmpty(t, event.Value) case <-time.After(time.Second): t.Fatal("timed out waiting for delete event") } @@ -112,23 +110,23 @@ func TestEvents_Watch_Good_DeleteEvent(t *testing.T) { func TestEvents_Watch_Good_DeleteGroupEvent(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() events := storeInstance.Watch("g") defer storeInstance.Unwatch("g", events) - require.NoError(t, storeInstance.Set("g", "a", "1")) - require.NoError(t, storeInstance.Set("g", "b", "2")) + assertNoError(t, storeInstance.Set("g", "a", "1")) + assertNoError(t, storeInstance.Set("g", "b", "2")) <-events <-events - require.NoError(t, storeInstance.DeleteGroup("g")) + assertNoError(t, storeInstance.DeleteGroup("g")) select { case event := <-events: - assert.Equal(t, EventDeleteGroup, event.Type) - assert.Equal(t, "g", event.Group) - assert.Empty(t, event.Key) + assertEqual(t, EventDeleteGroup, event.Type) + assertEqual(t, "g", event.Group) + assertEmpty(t, event.Key) case <-time.After(time.Second): t.Fatal("timed out waiting for delete_group event") } @@ -136,7 +134,7 @@ func TestEvents_Watch_Good_DeleteGroupEvent(t *testing.T) { func TestEvents_OnChange_Good_Fires(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() var events []Event var eventsMutex sync.Mutex @@ -148,19 +146,19 @@ func TestEvents_OnChange_Good_Fires(t *testing.T) { }) defer unregister() - require.NoError(t, storeInstance.Set("g", "k", "v")) - require.NoError(t, storeInstance.Delete("g", "k")) + assertNoError(t, storeInstance.Set("g", "k", "v")) + assertNoError(t, storeInstance.Delete("g", "k")) eventsMutex.Lock() defer eventsMutex.Unlock() - require.Len(t, events, 2) - assert.Equal(t, EventSet, events[0].Type) - assert.Equal(t, EventDelete, events[1].Type) + assertLen(t, events, 2) + assertEqual(t, EventSet, events[0].Type) + assertEqual(t, EventDelete, events[1].Type) } func TestEvents_OnChange_Good_GroupFilteredCallback(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() var seen []string unregister := storeInstance.OnChange(func(event Event) { @@ -171,15 +169,15 @@ func TestEvents_OnChange_Good_GroupFilteredCallback(t *testing.T) { }) defer unregister() - require.NoError(t, storeInstance.Set("config", "theme", "dark")) - require.NoError(t, storeInstance.Set("other", "theme", "light")) + assertNoError(t, storeInstance.Set("config", "theme", "dark")) + assertNoError(t, storeInstance.Set("other", "theme", "light")) - assert.Equal(t, []string{"theme=dark"}, seen) + assertEqual(t, []string{"theme=dark"}, seen) } func TestEvents_OnChange_Good_ReentrantSubscriptionChanges(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() var ( seen []string @@ -214,29 +212,29 @@ func TestEvents_OnChange_Good_ReentrantSubscriptionChanges(t *testing.T) { }) defer unregisterPrimary() - require.NoError(t, storeInstance.Set("config", "first", "dark")) - require.NoError(t, storeInstance.Set("config", "second", "light")) - require.NoError(t, storeInstance.Set("config", "third", "blue")) + assertNoError(t, storeInstance.Set("config", "first", "dark")) + assertNoError(t, storeInstance.Set("config", "second", "light")) + assertNoError(t, storeInstance.Set("config", "third", "blue")) seenMutex.Lock() - assert.Equal(t, []string{"first", "second", "nested:second", "third"}, seen) + assertEqual(t, []string{"first", "second", "nested:second", "third"}, seen) seenMutex.Unlock() select { case event, open := <-nestedEvents: - require.True(t, open) - assert.Equal(t, "second", event.Key) + assertTrue(t, open) + assertEqual(t, "second", event.Key) case <-time.After(time.Second): t.Fatal("timed out waiting for nested watcher event") } _, open := <-nestedEvents - assert.False(t, open, "nested watcher should be closed after callback-driven unwatch") + assertFalsef(t, open, "nested watcher should be closed after callback-driven unwatch") } func TestEvents_Notify_Good_PopulatesTimestamp(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() events := storeInstance.Watch("config") defer storeInstance.Unwatch("config", events) @@ -245,9 +243,9 @@ func TestEvents_Notify_Good_PopulatesTimestamp(t *testing.T) { select { case event := <-events: - assert.False(t, event.Timestamp.IsZero()) - assert.Equal(t, "config", event.Group) - assert.Equal(t, "theme", event.Key) + assertFalse(t, event.Timestamp.IsZero()) + assertEqual(t, "config", event.Group) + assertEqual(t, "theme", event.Key) case <-time.After(time.Second): t.Fatal("timed out waiting for timestamped event") } @@ -255,22 +253,22 @@ func TestEvents_Notify_Good_PopulatesTimestamp(t *testing.T) { func TestEvents_Watch_Good_BufferDrops(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() events := storeInstance.Watch("g") defer storeInstance.Unwatch("g", events) for i := 0; i < watcherEventBufferCapacity+8; i++ { - require.NoError(t, storeInstance.Set("g", core.Sprintf("k-%d", i), "v")) + assertNoError(t, storeInstance.Set("g", core.Sprintf("k-%d", i), "v")) } received := drainEvents(events, watcherEventBufferCapacity, time.Second) - assert.LessOrEqual(t, len(received), watcherEventBufferCapacity) + assertLessOrEqual(t, len(received), watcherEventBufferCapacity) } func TestEvents_Watch_Good_ConcurrentWatchUnwatch(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() const workers = 10 var wg sync.WaitGroup @@ -291,20 +289,20 @@ func TestEvents_Watch_Good_ConcurrentWatchUnwatch(t *testing.T) { func TestEvents_Watch_Good_ScopedStoreEventGroup(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() scopedStore := NewScoped(storeInstance, "tenant-a") - require.NotNil(t, scopedStore) + assertNotNil(t, scopedStore) events := storeInstance.Watch("tenant-a:config") defer storeInstance.Unwatch("tenant-a:config", events) - require.NoError(t, scopedStore.SetIn("config", "theme", "dark")) + assertNoError(t, scopedStore.SetIn("config", "theme", "dark")) select { case event := <-events: - assert.Equal(t, "tenant-a:config", event.Group) - assert.Equal(t, "theme", event.Key) + assertEqual(t, "tenant-a:config", event.Group) + assertEqual(t, "theme", event.Key) case <-time.After(time.Second): t.Fatal("timed out waiting for scoped event") } @@ -312,27 +310,27 @@ func TestEvents_Watch_Good_ScopedStoreEventGroup(t *testing.T) { func TestEvents_Watch_Good_SetWithTTL(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() events := storeInstance.Watch("g") defer storeInstance.Unwatch("g", events) - require.NoError(t, storeInstance.SetWithTTL("g", "ephemeral", "v", time.Minute)) + assertNoError(t, storeInstance.SetWithTTL("g", "ephemeral", "v", time.Minute)) select { case event := <-events: - assert.Equal(t, EventSet, event.Type) - assert.Equal(t, "ephemeral", event.Key) + assertEqual(t, EventSet, event.Type) + assertEqual(t, "ephemeral", event.Key) case <-time.After(time.Second): t.Fatal("timed out waiting for TTL event") } } func TestEvents_EventType_Good_String(t *testing.T) { - assert.Equal(t, "set", EventSet.String()) - assert.Equal(t, "delete", EventDelete.String()) - assert.Equal(t, "delete_group", EventDeleteGroup.String()) - assert.Equal(t, "unknown", EventType(99).String()) + assertEqual(t, "set", EventSet.String()) + assertEqual(t, "delete", EventDelete.String()) + assertEqual(t, "delete_group", EventDeleteGroup.String()) + assertEqual(t, "unknown", EventType(99).String()) } func drainEvents(events <-chan Event, count int, timeout time.Duration) []Event { diff --git a/go.mod b/go.mod index befc5c4..7ced1bd 100644 --- a/go.mod +++ b/go.mod @@ -1,26 +1,46 @@ -module dappco.re/go/core/store +module dappco.re/go/store go 1.26.0 require ( dappco.re/go/core v0.8.0-alpha.1 - github.com/klauspost/compress v1.18.5 - github.com/stretchr/testify v1.11.1 - modernc.org/sqlite v1.47.0 + dappco.re/go/core/io v0.4.2 + github.com/influxdata/influxdb-client-go/v2 v2.14.0 // Note: InfluxDB storage client; no core equivalent + github.com/klauspost/compress v1.18.5 // Note: compression codecs for storage payloads; no core equivalent + modernc.org/sqlite v1.47.0 // Note: pure-Go SQLite driver; no core equivalent +) + +require ( + github.com/andybalholm/brotli v1.2.0 // indirect + github.com/apache/arrow-go/v18 v18.1.0 // indirect + github.com/apapsch/go-jsonmerge/v2 v2.0.0 // indirect + github.com/go-viper/mapstructure/v2 v2.5.0 // indirect + github.com/goccy/go-json v0.10.6 // indirect + github.com/google/flatbuffers v25.1.24+incompatible // indirect + github.com/google/go-cmp v0.7.0 // indirect + github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839 // indirect + github.com/klauspost/cpuid/v2 v2.3.0 // indirect + github.com/oapi-codegen/runtime v1.0.0 // indirect + github.com/pierrec/lz4/v4 v4.1.22 // indirect + github.com/zeebo/xxh3 v1.0.2 // indirect + golang.org/x/exp v0.0.0-20260312153236-7ab1446f8b90 // indirect + golang.org/x/mod v0.34.0 // indirect + golang.org/x/net v0.53.0 // indirect + golang.org/x/sync v0.20.0 // indirect + golang.org/x/telemetry v0.0.0-20260311193753-579e4da9a98c // indirect + golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect + gonum.org/v1/gonum v0.17.0 // indirect ) require ( - github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/google/uuid v1.6.0 // indirect - github.com/kr/text v0.2.0 // indirect + github.com/marcboeker/go-duckdb v1.8.5 // Note: DuckDB workspace buffer driver; no core equivalent github.com/mattn/go-isatty v0.0.20 // indirect github.com/ncruces/go-strftime v1.0.0 // indirect - github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect - golang.org/x/sys v0.42.0 // indirect + golang.org/x/sys v0.43.0 // indirect golang.org/x/tools v0.43.0 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect modernc.org/libc v1.70.0 // indirect modernc.org/mathutil v1.7.1 // indirect modernc.org/memory v1.11.0 // indirect diff --git a/go.sum b/go.sum index 731c6e5..03d158c 100644 --- a/go.sum +++ b/go.sum @@ -1,46 +1,96 @@ dappco.re/go/core v0.8.0-alpha.1 h1:gj7+Scv+L63Z7wMxbJYHhaRFkHJo2u4MMPuUSv/Dhtk= dappco.re/go/core v0.8.0-alpha.1/go.mod h1:f2/tBZ3+3IqDrg2F5F598llv0nmb/4gJVCFzM5geE4A= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +dappco.re/go/core/io v0.4.2 h1:SHNF/xMPyFnKWWYoFW5Y56eiuGVL/mFa1lfIw/530ls= +dappco.re/go/core/io v0.4.2/go.mod h1:w71dukyunczLb8frT9JOd5B78PjwWQD3YAXiCt3AcPA= +github.com/RaveNoX/go-jsoncommentstrip v1.0.0/go.mod h1:78ihd09MekBnJnxpICcwzCMzGrKSKYe4AqU6PDYYpjk= +github.com/andybalholm/brotli v1.2.0 h1:ukwgCxwYrmACq68yiUqwIWnGY0cTPox/M94sVwToPjQ= +github.com/andybalholm/brotli v1.2.0/go.mod h1:rzTDkvFWvIrjDXZHkuS16NPggd91W3kUSvPlQ1pLaKY= +github.com/apache/arrow-go/v18 v18.1.0 h1:agLwJUiVuwXZdwPYVrlITfx7bndULJ/dggbnLFgDp/Y= +github.com/apache/arrow-go/v18 v18.1.0/go.mod h1:tigU/sIgKNXaesf5d7Y95jBBKS5KsxTqYBKXFsvKzo0= +github.com/apache/thrift v0.21.0 h1:tdPmh/ptjE1IJnhbhrcl2++TauVjy242rkV/UzJChnE= +github.com/apache/thrift v0.21.0/go.mod h1:W1H8aR/QRtYNvrPeFXBtobyRkd0/YVhTc6i07XIAgDw= +github.com/apapsch/go-jsonmerge/v2 v2.0.0 h1:axGnT1gRIfimI7gJifB699GoE/oq+F2MU7Dml6nw9rQ= +github.com/apapsch/go-jsonmerge/v2 v2.0.0/go.mod h1:lvDnEdqiQrp0O42VQGgmlKpxL1AP2+08jFMw88y4klk= +github.com/bmatcuk/doublestar v1.1.1/go.mod h1:UD6OnuiIn0yFxxA2le/rnRU1G4RaI4UvFv1sNto9p6w= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/go-viper/mapstructure/v2 v2.5.0 h1:vM5IJoUAy3d7zRSVtIwQgBj7BiWtMPfmPEgAXnvj1Ro= +github.com/go-viper/mapstructure/v2 v2.5.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/goccy/go-json v0.10.6 h1:p8HrPJzOakx/mn/bQtjgNjdTcN+/S6FcG2CTtQOrHVU= +github.com/goccy/go-json v0.10.6/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/flatbuffers v25.1.24+incompatible h1:4wPqL3K7GzBd1CwyhSd3usxLKOaJN/AC6puCca6Jm7o= +github.com/google/flatbuffers v25.1.24+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e h1:ijClszYn+mADRFY17kjQEVQ1XRhq2/JR1M3sGqeJoxs= github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= +github.com/influxdata/influxdb-client-go/v2 v2.14.0 h1:AjbBfJuq+QoaXNcrova8smSjwJdUHnwvfjMF71M1iI4= +github.com/influxdata/influxdb-client-go/v2 v2.14.0/go.mod h1:Ahpm3QXKMJslpXl3IftVLVezreAUtBOTZssDrjZEFHI= +github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839 h1:W9WBk7wlPfJLvMCdtV4zPulc4uCPrlywQOmbFOhgQNU= +github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo= +github.com/juju/gnuflag v0.0.0-20171113085948-2ce1bb71843d/go.mod h1:2PavIy+JPciBPrBUjwbNvtwB6RQlve+hkpll6QSNmOE= +github.com/klauspost/asmfmt v1.3.2 h1:4Ri7ox3EwapiOjCki+hw14RyKk201CN4rzyCJRFLpK4= +github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE= github.com/klauspost/compress v1.18.5 h1:/h1gH5Ce+VWNLSWqPzOVn6XBO+vJbCNGvjoaGBFW2IE= github.com/klauspost/compress v1.18.5/go.mod h1:cwPg85FWrGar70rWktvGQj8/hthj3wpl0PGDogxkrSQ= -github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= -github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y= +github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= +github.com/marcboeker/go-duckdb v1.8.5 h1:tkYp+TANippy0DaIOP5OEfBEwbUINqiFqgwMQ44jME0= +github.com/marcboeker/go-duckdb v1.8.5/go.mod h1:6mK7+WQE4P4u5AFLvVBmhFxY5fvhymFptghgJX6B+/8= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8 h1:AMFGa4R4MiIpspGNG7Z948v4n35fFGB3RR3G/ry4FWs= +github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY= +github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3 h1:+n/aFZefKZp7spd8DFdX7uMikMLXX4oubIzJF4kv/wI= +github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE= github.com/ncruces/go-strftime v1.0.0 h1:HMFp8mLCTPp341M/ZnA4qaf7ZlsbTc+miZjCLOFAw7w= github.com/ncruces/go-strftime v1.0.0/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls= +github.com/oapi-codegen/runtime v1.0.0 h1:P4rqFX5fMFWqRzY9M/3YF9+aPSPPB06IzP2P7oOxrWo= +github.com/oapi-codegen/runtime v1.0.0/go.mod h1:LmCUMQuPB4M/nLXilQXhHw+BLZdDb18B34OO356yJ/A= +github.com/pierrec/lz4/v4 v4.1.22 h1:cKFw6uJDK+/gfw5BcDL0JL5aBsAFdsIT18eRtLj7VIU= +github.com/pierrec/lz4/v4 v4.1.22/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= -github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= -github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= +github.com/spkg/bom v0.0.0-20160624110644-59b7046e48ad/go.mod h1:qLr4V1qq6nMqFKkMo8ZTx3f+BZEkzsRUY10Xsm2mwU0= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/zeebo/assert v1.3.0 h1:g7C04CbJuIDKNPFHmsk4hwZDO5O+kntRxzaUoNXj+IQ= +github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= +github.com/zeebo/xxh3 v1.0.2 h1:xZmwmqxHZA8AI603jOQ0tMqmBr9lPeFwGg6d+xy9DC0= +github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= +golang.org/x/exp v0.0.0-20260312153236-7ab1446f8b90 h1:jiDhWWeC7jfWqR9c/uplMOqJ0sbNlNWv0UkzE0vX1MA= +golang.org/x/exp v0.0.0-20260312153236-7ab1446f8b90/go.mod h1:xE1HEv6b+1SCZ5/uscMRjUBKtIxworgEcEi+/n9NQDQ= golang.org/x/mod v0.34.0 h1:xIHgNUUnW6sYkcM5Jleh05DvLOtwc6RitGHbDk4akRI= golang.org/x/mod v0.34.0/go.mod h1:ykgH52iCZe79kzLLMhyCUzhMci+nQj+0XkbXpNYtVjY= +golang.org/x/net v0.53.0 h1:d+qAbo5L0orcWAr0a9JweQpjXF19LMXJE8Ey7hwOdUA= +golang.org/x/net v0.53.0/go.mod h1:JvMuJH7rrdiCfbeHoo3fCQU24Lf5JJwT9W3sJFulfgs= golang.org/x/sync v0.20.0 h1:e0PTpb7pjO8GAtTs2dQ6jYa5BWYlMuX047Dco/pItO4= golang.org/x/sync v0.20.0/go.mod h1:9xrNwdLfx4jkKbNva9FpL6vEN7evnE43NNNJQ2LF3+0= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.42.0 h1:omrd2nAlyT5ESRdCLYdm3+fMfNFE/+Rf4bDIQImRJeo= -golang.org/x/sys v0.42.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw= +golang.org/x/sys v0.43.0 h1:Rlag2XtaFTxp19wS8MXlJwTvoh8ArU6ezoyFsMyCTNI= +golang.org/x/sys v0.43.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw= +golang.org/x/telemetry v0.0.0-20260311193753-579e4da9a98c h1:6a8FdnNk6bTXBjR4AGKFgUKuo+7GnR3FX5L7CbveeZc= +golang.org/x/telemetry v0.0.0-20260311193753-579e4da9a98c/go.mod h1:TpUTTEp9frx7rTdLpC9gFG9kdI7zVLFTFFlqaH2Cncw= golang.org/x/tools v0.43.0 h1:12BdW9CeB3Z+J/I/wj34VMl8X+fEXBxVR90JeMX5E7s= golang.org/x/tools v0.43.0/go.mod h1:uHkMso649BX2cZK6+RpuIPXS3ho2hZo4FVwfoy1vIk0= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da h1:noIWHXmPHxILtqtCOPIhSt0ABwskkZKjD3bXGnZGpNY= +golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= +gonum.org/v1/gonum v0.17.0 h1:VbpOemQlsSMrYmn7T2OUvQ4dqxQXU+ouZFQsZOx50z4= +gonum.org/v1/gonum v0.17.0/go.mod h1:El3tOrEuMpv2UdMrbNlKEh9vd86bmQ6vqIcDwxEOc1E= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= modernc.org/cc/v4 v4.27.1 h1:9W30zRlYrefrDV2JE2O8VDtJ1yPGownxciz5rrbQZis= diff --git a/import.go b/import.go new file mode 100644 index 0000000..9269f1e --- /dev/null +++ b/import.go @@ -0,0 +1,668 @@ +// SPDX-License-Identifier: EUPL-1.2 + +package store + +import ( + "bufio" + "database/sql" + "io" + "io/fs" + + core "dappco.re/go/core" +) + +// localFs provides unrestricted filesystem access for import operations. +var localFs = (&core.Fs{}).New("/") + +type duckDBImportSession interface { + exec(query string, args ...any) error + queryRowScan(query string, dest any, args ...any) error +} + +type duckDBImportTransaction struct { + transaction *sql.Tx +} + +func (session duckDBImportTransaction) exec(query string, args ...any) error { + _, err := session.transaction.Exec(query, args...) + if err != nil { + return core.E("store.duckDBImportTransaction.Exec", "execute query", err) + } + return nil +} + +func (session duckDBImportTransaction) queryRowScan(query string, dest any, args ...any) error { + if err := session.transaction.QueryRow(query, args...).Scan(dest); err != nil { + return core.E("store.duckDBImportTransaction.QueryRowScan", "scan row", err) + } + return nil +} + +// ScpFunc is a callback for executing SCP file transfers. +// The function receives remote source and local destination paths. +// +// Usage example: +// +// scp := func(remote, local string) error { return exec.Command("scp", remote, local).Run() } +type ScpFunc func(remote, local string) error + +// ScpDirFunc is a callback for executing recursive SCP directory transfers. +// The function receives remote source and local destination directory paths. +// +// Usage example: +// +// scpDir := func(remote, localDir string) error { return exec.Command("scp", "-r", remote, localDir).Run() } +type ScpDirFunc func(remote, localDir string) error + +// ImportConfig holds options for the import-all operation. +// +// Usage example: +// +// cfg := store.ImportConfig{DataDir: "/Volumes/Data/lem", SkipM3: true} +type ImportConfig struct { + // SkipM3 disables pulling files from the M3 host. + // + // Usage example: + // + // cfg.SkipM3 // true + SkipM3 bool + + // DataDir is the local directory containing LEM data files. + // + // Usage example: + // + // cfg.DataDir // "/Volumes/Data/lem" + DataDir string + + // M3Host is the SSH hostname for SCP operations. Defaults to "m3". + // + // Usage example: + // + // cfg.M3Host // "m3" + M3Host string + + // Scp copies a single file from the remote host. If nil, SCP is skipped. + // + // Usage example: + // + // cfg.Scp("m3:/path/file.jsonl", "/local/file.jsonl") + Scp ScpFunc + + // ScpDir copies a directory recursively from the remote host. If nil, SCP is skipped. + // + // Usage example: + // + // cfg.ScpDir("m3:/path/dir/", "/local/dir/") + ScpDir ScpDirFunc +} + +// ImportAll imports all LEM data into DuckDB from M3 and local files. +// +// Usage example: +// +// err := store.ImportAll(db, store.ImportConfig{DataDir: "/Volumes/Data/lem"}, os.Stdout) +func ImportAll(db *DuckDB, cfg ImportConfig, w io.Writer) error { + if db == nil || db.Conn() == nil { + return core.E("store.ImportAll", "database is nil", nil) + } + + m3Host := cfg.M3Host + if m3Host == "" { + m3Host = "m3" + } + + totals := make(map[string]int) + + // ── 1. Golden set ── + goldenPath := core.JoinPath(cfg.DataDir, "gold-15k.jsonl") + if !cfg.SkipM3 && cfg.Scp != nil { + core.Print(w, " Pulling golden set from M3...") + remote := core.Sprintf("%s:/Volumes/Data/lem/responses/gold-15k.jsonl", m3Host) + if err := cfg.Scp(remote, goldenPath); err != nil { + core.Print(w, " WARNING: could not pull golden set from M3: %v", err) + } + } + transaction, err := db.Conn().Begin() + if err != nil { + return core.E("store.ImportAll", "begin import transaction", err) + } + committed := false + defer func() { + if !committed { + _ = transaction.Rollback() + } + }() + importSession := duckDBImportTransaction{transaction: transaction} + + if isFile(goldenPath) { + if err := importSession.exec("DROP TABLE IF EXISTS golden_set"); err != nil { + return core.E("store.ImportAll", "drop golden_set", err) + } + err := importSession.exec(core.Sprintf(` + CREATE TABLE golden_set AS + SELECT + idx::INT AS idx, + seed_id::VARCHAR AS seed_id, + domain::VARCHAR AS domain, + voice::VARCHAR AS voice, + prompt::VARCHAR AS prompt, + response::VARCHAR AS response, + gen_time::DOUBLE AS gen_time, + length(response)::INT AS char_count, + length(response) - length(replace(response, ' ', '')) + 1 AS word_count + FROM read_json_auto('%s', maximum_object_size=1048576) + `, escapeSQLPath(goldenPath))) + if err != nil { + return core.E("store.ImportAll", "import golden_set", err) + } else { + var n int + if err := importSession.queryRowScan("SELECT count(*) FROM golden_set", &n); err != nil { + return core.E("store.ImportAll", "count golden_set", err) + } + totals["golden_set"] = n + core.Print(w, " golden_set: %d rows", n) + } + } + + // ── 2. Training examples ── + trainingDirs := []struct { + name string + files []string + }{ + {"training", []string{"training/train.jsonl", "training/valid.jsonl", "training/test.jsonl"}}, + {"training-2k", []string{"training-2k/train.jsonl", "training-2k/valid.jsonl", "training-2k/test.jsonl"}}, + {"training-expanded", []string{"training-expanded/train.jsonl", "training-expanded/valid.jsonl"}}, + {"training-book", []string{"training-book/train.jsonl", "training-book/valid.jsonl", "training-book/test.jsonl"}}, + {"training-conv", []string{"training-conv/train.jsonl", "training-conv/valid.jsonl", "training-conv/test.jsonl"}}, + {"gold-full", []string{"gold-full/train.jsonl", "gold-full/valid.jsonl"}}, + {"sovereignty-gold", []string{"sovereignty-gold/train.jsonl", "sovereignty-gold/valid.jsonl"}}, + {"composure-lessons", []string{"composure-lessons/train.jsonl", "composure-lessons/valid.jsonl"}}, + {"watts-full", []string{"watts-full/train.jsonl", "watts-full/valid.jsonl"}}, + {"watts-expanded", []string{"watts-expanded/train.jsonl", "watts-expanded/valid.jsonl"}}, + {"watts-composure", []string{"watts-composure-merged/train.jsonl", "watts-composure-merged/valid.jsonl"}}, + {"western-fresh", []string{"western-fresh/train.jsonl", "western-fresh/valid.jsonl"}}, + {"deepseek-soak", []string{"deepseek-western-soak/train.jsonl", "deepseek-western-soak/valid.jsonl"}}, + {"russian-bridge", []string{"russian-bridge/train.jsonl", "russian-bridge/valid.jsonl"}}, + } + + trainingRoot := cfg.DataDir + + if !cfg.SkipM3 && cfg.Scp != nil { + core.Print(w, " Pulling training sets from M3...") + for _, trainingDir := range trainingDirs { + for _, relativePath := range trainingDir.files { + localPath := core.JoinPath(trainingRoot, relativePath) + if result := localFs.EnsureDir(core.PathDir(localPath)); !result.OK { + return core.E("store.ImportAll", "ensure training directory", result.Value.(error)) + } + remote := core.Sprintf("%s:/Volumes/Data/lem/%s", m3Host, relativePath) + _ = cfg.Scp(remote, localPath) // ignore errors, file might not exist + } + } + } + + if err := importSession.exec("DROP TABLE IF EXISTS training_examples"); err != nil { + return core.E("store.ImportAll", "drop training_examples", err) + } + if err := importSession.exec(` + CREATE TABLE training_examples ( + source VARCHAR, + split VARCHAR, + prompt TEXT, + response TEXT, + num_turns INT, + full_messages TEXT, + char_count INT + ) + `); err != nil { + return core.E("store.ImportAll", "create training_examples", err) + } + + trainingTotal := 0 + for _, trainingDir := range trainingDirs { + for _, relativePath := range trainingDir.files { + localPath := core.JoinPath(trainingRoot, relativePath) + if !isFile(localPath) { + continue + } + + split := "train" + if core.Contains(relativePath, "valid") { + split = "valid" + } else if core.Contains(relativePath, "test") { + split = "test" + } + + n, err := importTrainingFile(importSession, localPath, trainingDir.name, split) + if err != nil { + return core.E("store.ImportAll", core.Sprintf("import training file %s", localPath), err) + } + trainingTotal += n + } + } + totals["training_examples"] = trainingTotal + core.Print(w, " training_examples: %d rows", trainingTotal) + + // ── 3. Benchmark results ── + benchLocal := core.JoinPath(cfg.DataDir, "benchmarks") + if result := localFs.EnsureDir(benchLocal); !result.OK { + return core.E("store.ImportAll", core.Sprintf("ensure benchmark directory %s", benchLocal), result.Value.(error)) + } + + if !cfg.SkipM3 { + core.Print(w, " Pulling benchmarks from M3...") + if cfg.Scp != nil { + for _, benchmarkName := range []string{"truthfulqa", "gsm8k", "do_not_answer", "toxigen"} { + remote := core.Sprintf("%s:/Volumes/Data/lem/benchmarks/%s.jsonl", m3Host, benchmarkName) + _ = cfg.Scp(remote, core.JoinPath(benchLocal, benchmarkName+".jsonl")) + } + } + if cfg.ScpDir != nil { + for _, benchmarkSubdirectory := range []string{"results", "scale_results", "cross_arch_results", "deepseek-r1-7b"} { + localSubdirectory := core.JoinPath(benchLocal, benchmarkSubdirectory) + if result := localFs.EnsureDir(localSubdirectory); !result.OK { + return core.E("store.ImportAll", core.Sprintf("ensure benchmark subdirectory %s", localSubdirectory), result.Value.(error)) + } + remote := core.Sprintf("%s:/Volumes/Data/lem/benchmarks/%s/", m3Host, benchmarkSubdirectory) + _ = cfg.ScpDir(remote, localSubdirectory+"/") + } + } + } + + if err := importSession.exec("DROP TABLE IF EXISTS benchmark_results"); err != nil { + return core.E("store.ImportAll", "drop benchmark_results", err) + } + if err := importSession.exec(` + CREATE TABLE benchmark_results ( + source VARCHAR, id VARCHAR, benchmark VARCHAR, model VARCHAR, + prompt TEXT, response TEXT, elapsed_seconds DOUBLE, domain VARCHAR + ) + `); err != nil { + return core.E("store.ImportAll", "create benchmark_results", err) + } + + benchTotal := 0 + for _, benchmarkSubdirectory := range []string{"results", "scale_results", "cross_arch_results", "deepseek-r1-7b"} { + resultDir := core.JoinPath(benchLocal, benchmarkSubdirectory) + matches := core.PathGlob(core.JoinPath(resultDir, "*.jsonl")) + for _, jsonFile := range matches { + n, err := importBenchmarkFile(importSession, jsonFile, benchmarkSubdirectory) + if err != nil { + return core.E("store.ImportAll", core.Sprintf("import benchmark file %s", jsonFile), err) + } + benchTotal += n + } + } + + // Also import standalone benchmark files. + for _, benchmarkFile := range []string{"lem_bench", "lem_ethics", "lem_ethics_allen", "instruction_tuned", "abliterated", "base_pt"} { + localPath := core.JoinPath(benchLocal, benchmarkFile+".jsonl") + if !isFile(localPath) { + if !cfg.SkipM3 && cfg.Scp != nil { + remote := core.Sprintf("%s:/Volumes/Data/lem/benchmarks/%s.jsonl", m3Host, benchmarkFile) + _ = cfg.Scp(remote, localPath) + } + } + if isFile(localPath) { + n, err := importBenchmarkFile(importSession, localPath, "benchmark") + if err != nil { + return core.E("store.ImportAll", core.Sprintf("import benchmark file %s", localPath), err) + } + benchTotal += n + } + } + totals["benchmark_results"] = benchTotal + core.Print(w, " benchmark_results: %d rows", benchTotal) + + // ── 4. Benchmark questions ── + if err := importSession.exec("DROP TABLE IF EXISTS benchmark_questions"); err != nil { + return core.E("store.ImportAll", "drop benchmark_questions", err) + } + if err := importSession.exec(` + CREATE TABLE benchmark_questions ( + benchmark VARCHAR, id VARCHAR, question TEXT, + best_answer TEXT, correct_answers TEXT, incorrect_answers TEXT, category VARCHAR + ) + `); err != nil { + return core.E("store.ImportAll", "create benchmark_questions", err) + } + + benchQTotal := 0 + for _, bname := range []string{"truthfulqa", "gsm8k", "do_not_answer", "toxigen"} { + local := core.JoinPath(benchLocal, bname+".jsonl") + if isFile(local) { + n, err := importBenchmarkQuestions(importSession, local, bname) + if err != nil { + return core.E("store.ImportAll", core.Sprintf("import benchmark questions %s", local), err) + } + benchQTotal += n + } + } + totals["benchmark_questions"] = benchQTotal + core.Print(w, " benchmark_questions: %d rows", benchQTotal) + + // ── 5. Seeds ── + if err := importSession.exec("DROP TABLE IF EXISTS seeds"); err != nil { + return core.E("store.ImportAll", "drop seeds", err) + } + if err := importSession.exec(` + CREATE TABLE seeds ( + source_file VARCHAR, region VARCHAR, seed_id VARCHAR, domain VARCHAR, prompt TEXT + ) + `); err != nil { + return core.E("store.ImportAll", "create seeds", err) + } + + seedTotal := 0 + seedDirs := []string{core.JoinPath(cfg.DataDir, "seeds")} + for _, seedDir := range seedDirs { + if !isDir(seedDir) { + continue + } + n, err := importSeeds(importSession, seedDir) + if err != nil { + return core.E("store.ImportAll", core.Sprintf("import seeds %s", seedDir), err) + } + seedTotal += n + } + totals["seeds"] = seedTotal + core.Print(w, " seeds: %d rows", seedTotal) + + if err := transaction.Commit(); err != nil { + return core.E("store.ImportAll", "commit import transaction", err) + } + committed = true + + // ── Summary ── + grandTotal := 0 + core.Print(w, "\n%s", repeat("=", 50)) + core.Print(w, "LEM Database Import Complete") + core.Print(w, "%s", repeat("=", 50)) + for table, count := range totals { + core.Print(w, " %-25s %8d", table, count) + grandTotal += count + } + core.Print(w, " %s", repeat("-", 35)) + core.Print(w, " %-25s %8d", "TOTAL", grandTotal) + core.Print(w, "\nDatabase: %s", db.Path()) + + return nil +} + +func importTrainingFile(db duckDBImportSession, path, source, split string) (int, error) { + r := localFs.Open(path) + if !r.OK { + return 0, core.E("store.importTrainingFile", core.Sprintf("open %s", path), r.Value.(error)) + } + f := r.Value.(io.ReadCloser) + defer func() { _ = f.Close() }() + + count := 0 + scanner := bufio.NewScanner(f) + scanner.Buffer(make([]byte, 1024*1024), 1024*1024) + + lineNumber := 0 + for scanner.Scan() { + lineNumber++ + var rec struct { + Messages []ChatMessage `json:"messages"` + } + if r := core.JSONUnmarshal(scanner.Bytes(), &rec); !r.OK { + parseErr, _ := r.Value.(error) + return count, core.E("store.importTrainingFile", core.Sprintf("parse %s line %d", path, lineNumber), parseErr) + } + + prompt := "" + response := "" + assistantCount := 0 + for _, m := range rec.Messages { + if m.Role == "user" && prompt == "" { + prompt = m.Content + } + if m.Role == "assistant" { + if response == "" { + response = m.Content + } + assistantCount++ + } + } + + msgsJSON := core.JSONMarshalString(rec.Messages) + if err := db.exec(`INSERT INTO training_examples VALUES (?, ?, ?, ?, ?, ?, ?)`, + source, split, prompt, response, assistantCount, msgsJSON, len(response)); err != nil { + return count, core.E("store.importTrainingFile", "insert training example", err) + } + count++ + } + if err := scanner.Err(); err != nil { + return count, core.E("store.importTrainingFile", "scan training file", err) + } + return count, nil +} + +func importBenchmarkFile(db duckDBImportSession, path, source string) (int, error) { + r := localFs.Open(path) + if !r.OK { + return 0, core.E("store.importBenchmarkFile", core.Sprintf("open %s", path), r.Value.(error)) + } + f := r.Value.(io.ReadCloser) + defer func() { _ = f.Close() }() + + count := 0 + scanner := bufio.NewScanner(f) + scanner.Buffer(make([]byte, 1024*1024), 1024*1024) + + lineNumber := 0 + for scanner.Scan() { + lineNumber++ + var rec map[string]any + if r := core.JSONUnmarshal(scanner.Bytes(), &rec); !r.OK { + parseErr, _ := r.Value.(error) + return count, core.E("store.importBenchmarkFile", core.Sprintf("parse %s line %d", path, lineNumber), parseErr) + } + + if err := db.exec(`INSERT INTO benchmark_results VALUES (?, ?, ?, ?, ?, ?, ?, ?)`, + source, + core.Sprint(rec["id"]), + strOrEmpty(rec, "benchmark"), + strOrEmpty(rec, "model"), + strOrEmpty(rec, "prompt"), + strOrEmpty(rec, "response"), + floatOrZero(rec, "elapsed_seconds"), + strOrEmpty(rec, "domain"), + ); err != nil { + return count, core.E("store.importBenchmarkFile", "insert benchmark result", err) + } + count++ + } + if err := scanner.Err(); err != nil { + return count, core.E("store.importBenchmarkFile", "scan benchmark file", err) + } + return count, nil +} + +func importBenchmarkQuestions(db duckDBImportSession, path, benchmark string) (int, error) { + r := localFs.Open(path) + if !r.OK { + return 0, core.E("store.importBenchmarkQuestions", core.Sprintf("open %s", path), r.Value.(error)) + } + f := r.Value.(io.ReadCloser) + defer func() { _ = f.Close() }() + + count := 0 + scanner := bufio.NewScanner(f) + scanner.Buffer(make([]byte, 1024*1024), 1024*1024) + + lineNumber := 0 + for scanner.Scan() { + lineNumber++ + var rec map[string]any + if r := core.JSONUnmarshal(scanner.Bytes(), &rec); !r.OK { + parseErr, _ := r.Value.(error) + return count, core.E("store.importBenchmarkQuestions", core.Sprintf("parse %s line %d", path, lineNumber), parseErr) + } + + correctJSON := core.JSONMarshalString(rec["correct_answers"]) + incorrectJSON := core.JSONMarshalString(rec["incorrect_answers"]) + + if err := db.exec(`INSERT INTO benchmark_questions VALUES (?, ?, ?, ?, ?, ?, ?)`, + benchmark, + core.Sprint(rec["id"]), + strOrEmpty(rec, "question"), + strOrEmpty(rec, "best_answer"), + correctJSON, + incorrectJSON, + strOrEmpty(rec, "category"), + ); err != nil { + return count, core.E("store.importBenchmarkQuestions", "insert benchmark question", err) + } + count++ + } + if err := scanner.Err(); err != nil { + return count, core.E("store.importBenchmarkQuestions", "scan benchmark questions", err) + } + return count, nil +} + +func importSeeds(db duckDBImportSession, seedDir string) (int, error) { + count := 0 + if err := walkDir(seedDir, func(path string) error { + if !core.HasSuffix(path, ".json") { + return nil + } + + rel := core.TrimPrefix(path, seedDir+"/") + region := core.TrimSuffix(core.PathBase(path), ".json") + + readResult := localFs.Read(path) + if !readResult.OK { + return core.E("store.importSeeds", core.Sprintf("read seed file %s", rel), readResult.Value.(error)) + } + data := []byte(readResult.Value.(string)) + + // Try parsing as array or object with prompts/seeds field. + var seedsList []any + var raw any + if r := core.JSONUnmarshal(data, &raw); !r.OK { + err, _ := r.Value.(error) + return core.E("store.importSeeds", core.Sprintf("parse seed file %s", rel), err) + } + + switch v := raw.(type) { + case []any: + seedsList = v + case map[string]any: + if prompts, ok := v["prompts"].([]any); ok { + seedsList = prompts + } else if seeds, ok := v["seeds"].([]any); ok { + seedsList = seeds + } + } + + for _, s := range seedsList { + switch seed := s.(type) { + case map[string]any: + prompt := strOrEmpty(seed, "prompt") + if prompt == "" { + prompt = strOrEmpty(seed, "text") + } + if prompt == "" { + prompt = strOrEmpty(seed, "question") + } + if err := db.exec(`INSERT INTO seeds VALUES (?, ?, ?, ?, ?)`, + rel, region, + strOrEmpty(seed, "seed_id"), + strOrEmpty(seed, "domain"), + prompt, + ); err != nil { + return core.E("store.importSeeds", "insert seed prompt", err) + } + count++ + case string: + if err := db.exec(`INSERT INTO seeds VALUES (?, ?, ?, ?, ?)`, + rel, region, "", "", seed); err != nil { + return core.E("store.importSeeds", "insert seed string", err) + } + count++ + } + } + return nil + }); err != nil { + return count, err + } + return count, nil +} + +// walkDir recursively visits all regular files under root, calling fn for each. +func walkDir(root string, fn func(path string) error) error { + r := localFs.List(root) + if !r.OK { + return core.E("store.walkDir", core.Sprintf("list %s", root), r.Value.(error)) + } + entries, ok := r.Value.([]fs.DirEntry) + if !ok { + return core.E("store.walkDir", core.Sprintf("list %s returned invalid entries", root), nil) + } + for _, entry := range entries { + full := core.JoinPath(root, entry.Name()) + if entry.IsDir() { + if err := walkDir(full, fn); err != nil { + return err + } + } else { + if err := fn(full); err != nil { + return err + } + } + } + return nil +} + +// strOrEmpty extracts a string value from a map, returning an empty string if +// the key is absent. +func strOrEmpty(m map[string]any, key string) string { + if v, ok := m[key]; ok { + return core.Sprint(v) + } + return "" +} + +// floatOrZero extracts a float64 value from a map, returning zero if the key +// is absent or not a number. +func floatOrZero(m map[string]any, key string) float64 { + if v, ok := m[key]; ok { + if f, ok := v.(float64); ok { + return f + } + } + return 0 +} + +// repeat returns a string consisting of count copies of s. It avoids importing +// strings because repository conventions route string helpers through core. +func repeat(s string, count int) string { + if count <= 0 { + return "" + } + b := core.NewBuilder() + for range count { + b.WriteString(s) + } + return b.String() +} + +// escapeSQLPath escapes single quotes in a file path for use in DuckDB SQL +// string literals. +func escapeSQLPath(p string) string { + return core.Replace(p, "'", "''") +} + +// isFile returns true if the path exists and is a regular file. +func isFile(path string) bool { + return localFs.IsFile(path) +} + +// isDir returns true if the path exists and is a directory. +func isDir(path string) bool { + return localFs.IsDir(path) +} diff --git a/import_export_test.go b/import_export_test.go new file mode 100644 index 0000000..b510085 --- /dev/null +++ b/import_export_test.go @@ -0,0 +1,67 @@ +// SPDX-License-Identifier: EUPL-1.2 + +package store + +import "testing" + +func TestImportExport_Import_Good_CSVAndJSONIngestion(t *testing.T) { + useWorkspaceStateDirectory(t) + + storeInstance, err := New(":memory:") + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() + + workspace, err := storeInstance.NewWorkspace("import-export-good") + assertNoError(t, err) + defer workspace.Discard() + + medium := newMemoryMedium() + assertNoError(t, medium.Write("findings.csv", "tool,severity\ngosec,high\ngolint,low\n")) + assertNoError(t, medium.Write("users.json", `{"entries":[{"name":"Alice"},{"name":"Bob"}]}`)) + + assertNoError(t, Import(workspace, medium, "findings.csv")) + assertNoError(t, Import(workspace, medium, "users.json")) + + assertEqual(t, map[string]any{"findings": 2, "users": 2}, workspace.Aggregate()) +} + +func TestImportExport_Import_Bad_MalformedPayload(t *testing.T) { + useWorkspaceStateDirectory(t) + + storeInstance, err := New(":memory:") + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() + + workspace, err := storeInstance.NewWorkspace("import-export-bad") + assertNoError(t, err) + defer workspace.Discard() + + medium := newMemoryMedium() + assertNoError(t, medium.Write("broken.json", `{"entries":[{"name":"Alice"}`)) + + assertError(t, Import(workspace, medium, "broken.json")) + + count, err := workspace.Count() + assertNoError(t, err) + assertEqual(t, 0, count) +} + +func TestImportExport_Import_Ugly_EmptyPayload(t *testing.T) { + useWorkspaceStateDirectory(t) + + storeInstance, err := New(":memory:") + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() + + workspace, err := storeInstance.NewWorkspace("import-export-ugly") + assertNoError(t, err) + defer workspace.Discard() + + medium := newMemoryMedium() + for _, path := range []string{"empty.csv", "empty.json", "empty.jsonl"} { + assertNoError(t, medium.Write(path, "")) + assertNoError(t, Import(workspace, medium, path)) + } + + assertEqual(t, map[string]any{}, workspace.Aggregate()) +} diff --git a/import_test.go b/import_test.go new file mode 100644 index 0000000..1690604 --- /dev/null +++ b/import_test.go @@ -0,0 +1,70 @@ +package store + +import ( + "testing" + + core "dappco.re/go/core" +) + +type importSessionStub struct { + inserts int +} + +func (session *importSessionStub) exec(string, ...any) error { + session.inserts++ + return nil +} + +func (session *importSessionStub) queryRowScan(string, any, ...any) error { + return nil +} + +func TestImport_ImportTrainingFile_Bad_MalformedJSONL(t *testing.T) { + path := testPath(t, "training.jsonl") + requireCoreWriteBytes(t, path, []byte("{\"messages\":[]}\n{broken\n")) + session := &importSessionStub{} + + count, err := importTrainingFile(session, path, "training", "train") + + assertError(t, err) + assertContainsString(t, err.Error(), "line 2") + assertEqual(t, 1, count) + assertEqual(t, 1, session.inserts) +} + +func TestImport_ImportBenchmarkFile_Bad_MalformedJSONL(t *testing.T) { + path := testPath(t, "benchmark.jsonl") + requireCoreWriteBytes(t, path, []byte("{\"id\":\"row-1\"}\n{broken\n")) + session := &importSessionStub{} + + count, err := importBenchmarkFile(session, path, "benchmark") + + assertError(t, err) + assertContainsString(t, err.Error(), "line 2") + assertEqual(t, 1, count) + assertEqual(t, 1, session.inserts) +} + +func TestImport_ImportBenchmarkQuestions_Bad_MalformedJSONL(t *testing.T) { + path := testPath(t, "questions.jsonl") + requireCoreWriteBytes(t, path, []byte("{\"id\":\"q-1\"}\n{broken\n")) + session := &importSessionStub{} + + count, err := importBenchmarkQuestions(session, path, "truthfulqa") + + assertError(t, err) + assertContainsString(t, err.Error(), "line 2") + assertEqual(t, 1, count) + assertEqual(t, 1, session.inserts) +} + +func TestImport_ImportSeeds_Bad_WalkFailure(t *testing.T) { + session := &importSessionStub{} + + count, err := importSeeds(session, core.JoinPath(t.TempDir(), "missing-seeds")) + + assertError(t, err) + assertContainsString(t, err.Error(), "store.walkDir") + assertEqual(t, 0, count) + assertEqual(t, 0, session.inserts) +} diff --git a/inventory.go b/inventory.go new file mode 100644 index 0000000..bdbb437 --- /dev/null +++ b/inventory.go @@ -0,0 +1,166 @@ +// SPDX-License-Identifier: EUPL-1.2 + +package store + +import ( + "io" + + core "dappco.re/go/core" +) + +// TargetTotal is the golden set target size used for progress reporting. +// +// Usage example: +// +// pct := float64(count) / float64(store.TargetTotal) * 100 +const TargetTotal = 15000 + +// duckDBTableOrder defines the canonical display order for DuckDB inventory +// tables. +var duckDBTableOrder = []string{ + "golden_set", "expansion_prompts", "seeds", "prompts", + "training_examples", "gemini_responses", "benchmark_questions", + "benchmark_results", "validations", TableCheckpointScores, + TableProbeResults, "scoring_results", +} + +// duckDBTableDetail holds extra context for a single table beyond its row count. +type duckDBTableDetail struct { + notes []string +} + +// PrintDuckDBInventory queries all known DuckDB tables and prints a formatted +// inventory with row counts, detail breakdowns, and a grand total. +// +// Usage example: +// +// err := store.PrintDuckDBInventory(db, os.Stdout) +func PrintDuckDBInventory(db *DuckDB, w io.Writer) error { + counts, err := db.TableCounts() + if err != nil { + return core.E("store.PrintDuckDBInventory", "table counts", err) + } + + details := gatherDuckDBDetails(db, counts) + + core.Print(w, "DuckDB Inventory") + core.Print(w, "%s", repeat("-", 52)) + + grand := 0 + for _, table := range duckDBTableOrder { + count, ok := counts[table] + if !ok { + continue + } + grand += count + line := core.Sprintf(" %-24s %8d rows", table, count) + + if d, has := details[table]; has && len(d.notes) > 0 { + line += core.Sprintf(" (%s)", core.Join(", ", d.notes...)) + } + core.Print(w, "%s", line) + } + + core.Print(w, "%s", repeat("-", 52)) + core.Print(w, " %-24s %8d rows", "TOTAL", grand) + + return nil +} + +// gatherDuckDBDetails runs per-table detail queries and returns annotations +// keyed by table name. Errors on individual queries are silently ignored so +// the inventory always prints. +func gatherDuckDBDetails(db *DuckDB, counts map[string]int) map[string]*duckDBTableDetail { + details := make(map[string]*duckDBTableDetail) + + // golden_set: progress towards target + if count, ok := counts["golden_set"]; ok { + pct := float64(count) / float64(TargetTotal) * 100 + details["golden_set"] = &duckDBTableDetail{ + notes: []string{core.Sprintf("%.1f%% of %d target", pct, TargetTotal)}, + } + } + + // training_examples: distinct sources + if _, ok := counts["training_examples"]; ok { + rows, err := db.QueryRows("SELECT COUNT(DISTINCT source) AS n FROM training_examples") + if err == nil && len(rows) > 0 { + n := duckDBToInt(rows[0]["n"]) + details["training_examples"] = &duckDBTableDetail{ + notes: []string{core.Sprintf("%d sources", n)}, + } + } + } + + // prompts: distinct domains and voices + if _, ok := counts["prompts"]; ok { + d := &duckDBTableDetail{} + rows, err := db.QueryRows("SELECT COUNT(DISTINCT domain) AS n FROM prompts") + if err == nil && len(rows) > 0 { + d.notes = append(d.notes, core.Sprintf("%d domains", duckDBToInt(rows[0]["n"]))) + } + rows, err = db.QueryRows("SELECT COUNT(DISTINCT voice) AS n FROM prompts") + if err == nil && len(rows) > 0 { + d.notes = append(d.notes, core.Sprintf("%d voices", duckDBToInt(rows[0]["n"]))) + } + if len(d.notes) > 0 { + details["prompts"] = d + } + } + + // gemini_responses: group by source_model + if _, ok := counts["gemini_responses"]; ok { + rows, err := db.QueryRows( + "SELECT source_model, COUNT(*) AS n FROM gemini_responses GROUP BY source_model ORDER BY n DESC", + ) + if err == nil && len(rows) > 0 { + var parts []string + for _, row := range rows { + model := duckDBStrVal(row, "source_model") + n := duckDBToInt(row["n"]) + if model != "" { + parts = append(parts, core.Sprintf("%s:%d", model, n)) + } + } + if len(parts) > 0 { + details["gemini_responses"] = &duckDBTableDetail{notes: parts} + } + } + } + + // benchmark_results: distinct source categories + if _, ok := counts["benchmark_results"]; ok { + rows, err := db.QueryRows("SELECT COUNT(DISTINCT source) AS n FROM benchmark_results") + if err == nil && len(rows) > 0 { + n := duckDBToInt(rows[0]["n"]) + details["benchmark_results"] = &duckDBTableDetail{ + notes: []string{core.Sprintf("%d categories", n)}, + } + } + } + + return details +} + +// duckDBToInt converts a DuckDB value to int. DuckDB returns integers as int64 +// (not float64 like InfluxDB), so we handle both types. +func duckDBToInt(v any) int { + switch n := v.(type) { + case int64: + return int(n) + case int32: + return int(n) + case float64: + return int(n) + default: + return 0 + } +} + +// duckDBStrVal extracts a string value from a row map. +func duckDBStrVal(row map[string]any, key string) string { + if v, ok := row[key]; ok { + return core.Sprint(v) + } + return "" +} diff --git a/journal.go b/journal.go index cb90dc7..8305a4d 100644 --- a/journal.go +++ b/journal.go @@ -3,7 +3,6 @@ package store import ( "database/sql" "regexp" - "strconv" "time" core "dappco.re/go/core" @@ -34,12 +33,22 @@ var ( regexp.MustCompile(`r\.(?:_bucket|bucket|bucket_name)\s*==\s*"([^"]+)"`), regexp.MustCompile(`r\[\s*"(?:_bucket|bucket|bucket_name)"\s*\]\s*==\s*"([^"]+)"`), } - journalEqualityPatterns = []*regexp.Regexp{ + journalStringEqualityPatterns = []*regexp.Regexp{ regexp.MustCompile(`r\.([a-zA-Z0-9_:-]+)\s*==\s*"([^"]+)"`), regexp.MustCompile(`r\[\s*"([a-zA-Z0-9_:-]+)"\s*\]\s*==\s*"([^"]+)"`), } + journalScalarEqualityPatterns = []*regexp.Regexp{ + regexp.MustCompile(`r\.([a-zA-Z0-9_:-]+)\s*==\s*(true|false|-?[0-9]+(?:\.[0-9]+)?)`), + regexp.MustCompile(`r\[\s*"([a-zA-Z0-9_:-]+)"\s*\]\s*==\s*(true|false|-?[0-9]+(?:\.[0-9]+)?)`), + } ) +type journalEqualityFilter struct { + columnName string + filterValue any + stringCompare bool +} + type journalExecutor interface { Exec(query string, args ...any) (sql.Result, error) } @@ -137,7 +146,7 @@ func (storeInstance *Store) queryJournalRows(query string, arguments ...any) cor if err != nil { return core.Result{Value: core.E("store.QueryJournal", "query rows", err), OK: false} } - defer rows.Close() + defer func() { _ = rows.Close() }() rowMaps, err := queryRowsAsMaps(rows) if err != nil { @@ -191,20 +200,15 @@ func (storeInstance *Store) queryJournalFromFlux(flux string) (string, []any, er } } - for _, pattern := range journalEqualityPatterns { - matches := pattern.FindAllStringSubmatch(flux, -1) - for _, match := range matches { - if len(match) < 3 { - continue - } - columnName := match[1] - filterValue := match[2] - if columnName == "_measurement" || columnName == "measurement" || columnName == "_bucket" || columnName == "bucket" || columnName == "bucket_name" { - continue - } + for _, filter := range journalEqualityFilters(flux) { + if filter.stringCompare { queryBuilder.WriteString(" AND (CAST(json_extract(tags_json, '$.\"' || ? || '\"') AS TEXT) = ? OR CAST(json_extract(fields_json, '$.\"' || ? || '\"') AS TEXT) = ?)") - queryArguments = append(queryArguments, columnName, filterValue, columnName, filterValue) + queryArguments = append(queryArguments, filter.columnName, filter.filterValue, filter.columnName, filter.filterValue) + continue } + + queryBuilder.WriteString(" AND json_extract(fields_json, '$.\"' || ? || '\"') = ?") + queryArguments = append(queryArguments, filter.columnName, filter.filterValue) } queryBuilder.WriteString(" ORDER BY committed_at, entry_id") @@ -318,7 +322,7 @@ func parseFluxTime(value string) (time.Time, error) { if value == "" { return time.Time{}, core.E("store.parseFluxTime", "range value is empty", nil) } - value = firstOrEmptyString(core.Split(value, ",")) + value = firstStringOrEmpty(core.Split(value, ",")) value = core.Trim(value) if core.HasPrefix(value, "time(v:") && core.HasSuffix(value, ")") { value = core.Trim(core.TrimSuffix(core.TrimPrefix(value, "time(v:"), ")")) @@ -330,7 +334,7 @@ func parseFluxTime(value string) (time.Time, error) { return time.Now(), nil } if core.HasSuffix(value, "d") { - days, err := strconv.Atoi(core.TrimSuffix(value, "d")) + days, err := parseJournalInt64(core.TrimSuffix(value, "d")) if err != nil { return time.Time{}, err } @@ -364,14 +368,6 @@ func firstQuotedSubmatch(patterns []*regexp.Regexp, value string) string { return "" } -func regexpSubmatch(pattern *regexp.Regexp, value string, index int) string { - match := pattern.FindStringSubmatch(value) - if len(match) <= index { - return "" - } - return match[index] -} - func queryRowsAsMaps(rows *sql.Rows) ([]map[string]any, error) { columnNames, err := rows.Columns() if err != nil { @@ -430,6 +426,156 @@ func normaliseRowValue(value any) any { } } +func journalEqualityFilters(flux string) []journalEqualityFilter { + var filters []journalEqualityFilter + appendFilter := func(columnName string, filterValue any, stringCompare bool) { + if columnName == "_measurement" || columnName == "measurement" || columnName == "_bucket" || columnName == "bucket" || columnName == "bucket_name" { + return + } + filters = append(filters, journalEqualityFilter{ + columnName: columnName, + filterValue: filterValue, + stringCompare: stringCompare, + }) + } + + for _, pattern := range journalStringEqualityPatterns { + matches := pattern.FindAllStringSubmatch(flux, -1) + for _, match := range matches { + if len(match) < 3 { + continue + } + appendFilter(match[1], match[2], true) + } + } + + for _, pattern := range journalScalarEqualityPatterns { + matches := pattern.FindAllStringSubmatch(flux, -1) + for _, match := range matches { + if len(match) < 3 { + continue + } + filterValue, ok := parseJournalScalarValue(match[2]) + if !ok { + continue + } + appendFilter(match[1], filterValue, false) + } + } + + return filters +} + +func parseJournalScalarValue(value string) (any, bool) { + switch value { + case "true": + return true, true + case "false": + return false, true + } + + if integerValue, err := parseJournalInt64(value); err == nil { + return integerValue, true + } + if floatValue, err := parseJournalFloat64(value); err == nil { + return floatValue, true + } + return nil, false +} + +func parseJournalInt64(value string) (int64, error) { + if value == "" { + return 0, core.E("store.parseJournalInt64", "integer value is empty", nil) + } + + negative := false + index := 0 + if value[0] == '-' || value[0] == '+' { + negative = value[0] == '-' + index++ + if index == len(value) { + return 0, core.E("store.parseJournalInt64", "integer value has no digits", nil) + } + } + + limit := uint64(1<<63 - 1) + if negative { + limit = uint64(1 << 63) + } + + var parsed uint64 + for ; index < len(value); index++ { + character := value[index] + if character < '0' || character > '9' { + return 0, core.E("store.parseJournalInt64", "integer value contains non-digit characters", nil) + } + digit := uint64(character - '0') + if parsed > (limit-digit)/10 { + return 0, core.E("store.parseJournalInt64", "integer value is out of range", nil) + } + parsed = parsed*10 + digit + } + + if negative { + if parsed == uint64(1<<63) { + return -1 << 63, nil + } + return -int64(parsed), nil + } + return int64(parsed), nil +} + +func parseJournalFloat64(value string) (float64, error) { + if value == "" { + return 0, core.E("store.parseJournalFloat64", "float value is empty", nil) + } + + negative := false + index := 0 + if value[0] == '-' || value[0] == '+' { + negative = value[0] == '-' + index++ + if index == len(value) { + return 0, core.E("store.parseJournalFloat64", "float value has no digits", nil) + } + } + + var parsed float64 + digits := 0 + for index < len(value) && value[index] >= '0' && value[index] <= '9' { + parsed = parsed*10 + float64(value[index]-'0') + if parsed > maxJournalFloat64 { + return 0, core.E("store.parseJournalFloat64", "float value is out of range", nil) + } + digits++ + index++ + } + + if index < len(value) && value[index] == '.' { + index++ + scale := 0.1 + for index < len(value) && value[index] >= '0' && value[index] <= '9' { + parsed += float64(value[index]-'0') * scale + scale /= 10 + digits++ + index++ + } + } + + if digits == 0 { + return 0, core.E("store.parseJournalFloat64", "float value has no digits", nil) + } + if index != len(value) { + return 0, core.E("store.parseJournalFloat64", "float value contains invalid characters", nil) + } + if negative { + return -parsed, nil + } + return parsed, nil +} + +const maxJournalFloat64 = 1.79769313486231570814527423731704357e+308 + func cloneAnyMap(input map[string]any) map[string]any { if input == nil { return map[string]any{} diff --git a/journal_test.go b/journal_test.go index 1bdcb05..9a706a8 100644 --- a/journal_test.go +++ b/journal_test.go @@ -3,72 +3,67 @@ package store import ( "testing" "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func TestJournal_CommitToJournal_Good_WithQueryJournalSQL(t *testing.T) { storeInstance, err := New(":memory:", WithJournal("http://127.0.0.1:8086", "core", "events")) - require.NoError(t, err) - defer storeInstance.Close() + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() first := storeInstance.CommitToJournal("session-a", map[string]any{"like": 4}, map[string]string{"workspace": "session-a"}) second := storeInstance.CommitToJournal("session-b", map[string]any{"profile_match": 2}, map[string]string{"workspace": "session-b"}) - require.True(t, first.OK, "first journal commit failed: %v", first.Value) - require.True(t, second.OK, "second journal commit failed: %v", second.Value) + assertTruef(t, first.OK, "first journal commit failed: %v", first.Value) + assertTruef(t, second.OK, "second journal commit failed: %v", second.Value) rows := requireResultRows( t, storeInstance.QueryJournal("SELECT bucket_name, measurement, fields_json, tags_json FROM journal_entries ORDER BY entry_id"), ) - require.Len(t, rows, 2) - assert.Equal(t, "events", rows[0]["bucket_name"]) - assert.Equal(t, "session-a", rows[0]["measurement"]) + assertLen(t, rows, 2) + assertEqual(t, "events", rows[0]["bucket_name"]) + assertEqual(t, "session-a", rows[0]["measurement"]) fields, ok := rows[0]["fields"].(map[string]any) - require.True(t, ok, "unexpected fields type: %T", rows[0]["fields"]) - assert.Equal(t, float64(4), fields["like"]) + assertTruef(t, ok, "unexpected fields type: %T", rows[0]["fields"]) + assertEqual(t, float64(4), fields["like"]) tags, ok := rows[1]["tags"].(map[string]string) - require.True(t, ok, "unexpected tags type: %T", rows[1]["tags"]) - assert.Equal(t, "session-b", tags["workspace"]) + assertTruef(t, ok, "unexpected tags type: %T", rows[1]["tags"]) + assertEqual(t, "session-b", tags["workspace"]) } func TestJournal_CommitToJournal_Good_ResultCopiesInputMaps(t *testing.T) { storeInstance, err := New(":memory:", WithJournal("http://127.0.0.1:8086", "core", "events")) - require.NoError(t, err) - defer storeInstance.Close() + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() fields := map[string]any{"like": 4} tags := map[string]string{"workspace": "session-a"} result := storeInstance.CommitToJournal("session-a", fields, tags) - require.True(t, result.OK, "journal commit failed: %v", result.Value) + assertTruef(t, result.OK, "journal commit failed: %v", result.Value) fields["like"] = 99 tags["workspace"] = "session-b" value, ok := result.Value.(map[string]any) - require.True(t, ok, "unexpected result type: %T", result.Value) + assertTruef(t, ok, "unexpected result type: %T", result.Value) resultFields, ok := value["fields"].(map[string]any) - require.True(t, ok, "unexpected fields type: %T", value["fields"]) - assert.Equal(t, 4, resultFields["like"]) + assertTruef(t, ok, "unexpected fields type: %T", value["fields"]) + assertEqual(t, 4, resultFields["like"]) resultTags, ok := value["tags"].(map[string]string) - require.True(t, ok, "unexpected tags type: %T", value["tags"]) - assert.Equal(t, "session-a", resultTags["workspace"]) + assertTruef(t, ok, "unexpected tags type: %T", value["tags"]) + assertEqual(t, "session-a", resultTags["workspace"]) } func TestJournal_QueryJournal_Good_RawSQLWithCTE(t *testing.T) { storeInstance, err := New(":memory:", WithJournal("http://127.0.0.1:8086", "core", "events")) - require.NoError(t, err) - defer storeInstance.Close() + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() - require.True(t, - storeInstance.CommitToJournal("session-a", map[string]any{"like": 4}, map[string]string{"workspace": "session-a"}).OK, - ) + assertTrue(t, storeInstance.CommitToJournal("session-a", map[string]any{"like": 4}, map[string]string{"workspace": "session-a"}).OK) rows := requireResultRows( t, @@ -82,208 +77,209 @@ func TestJournal_QueryJournal_Good_RawSQLWithCTE(t *testing.T) { ORDER BY committed_at `), ) - require.Len(t, rows, 1) - assert.Equal(t, "session-a", rows[0]["measurement"]) + assertLen(t, rows, 1) + assertEqual(t, "session-a", rows[0]["measurement"]) } func TestJournal_QueryJournal_Good_PragmaSQL(t *testing.T) { storeInstance, err := New(":memory:", WithJournal("http://127.0.0.1:8086", "core", "events")) - require.NoError(t, err) - defer storeInstance.Close() + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() rows := requireResultRows( t, storeInstance.QueryJournal("PRAGMA table_info(journal_entries)"), ) - require.NotEmpty(t, rows) + assertNotEmpty(t, rows) var columnNames []string for _, row := range rows { name, ok := row["name"].(string) - require.True(t, ok, "unexpected column name type: %T", row["name"]) + assertTruef(t, ok, "unexpected column name type: %T", row["name"]) columnNames = append(columnNames, name) } - assert.Contains(t, columnNames, "bucket_name") + assertContainsElement(t, columnNames, "bucket_name") } func TestJournal_QueryJournal_Good_FluxFilters(t *testing.T) { storeInstance, err := New(":memory:", WithJournal("http://127.0.0.1:8086", "core", "events")) - require.NoError(t, err) - defer storeInstance.Close() + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() - require.True(t, - storeInstance.CommitToJournal("session-a", map[string]any{"like": 1}, map[string]string{"workspace": "session-a"}).OK, - ) - require.True(t, - storeInstance.CommitToJournal("session-b", map[string]any{"like": 2}, map[string]string{"workspace": "session-b"}).OK, - ) + assertTrue(t, storeInstance.CommitToJournal("session-a", map[string]any{"like": 1}, map[string]string{"workspace": "session-a"}).OK) + assertTrue(t, storeInstance.CommitToJournal("session-b", map[string]any{"like": 2}, map[string]string{"workspace": "session-b"}).OK) rows := requireResultRows( t, storeInstance.QueryJournal(`from(bucket: "events") |> range(start: -24h) |> filter(fn: (r) => r._measurement == "session-b")`), ) - require.Len(t, rows, 1) - assert.Equal(t, "session-b", rows[0]["measurement"]) + assertLen(t, rows, 1) + assertEqual(t, "session-b", rows[0]["measurement"]) fields, ok := rows[0]["fields"].(map[string]any) - require.True(t, ok, "unexpected fields type: %T", rows[0]["fields"]) - assert.Equal(t, float64(2), fields["like"]) + assertTruef(t, ok, "unexpected fields type: %T", rows[0]["fields"]) + assertEqual(t, float64(2), fields["like"]) } func TestJournal_QueryJournal_Good_TagFilter(t *testing.T) { storeInstance, err := New(":memory:", WithJournal("http://127.0.0.1:8086", "core", "events")) - require.NoError(t, err) - defer storeInstance.Close() + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() - require.True(t, - storeInstance.CommitToJournal("session-a", map[string]any{"like": 1}, map[string]string{"workspace": "session-a"}).OK, - ) - require.True(t, - storeInstance.CommitToJournal("session-b", map[string]any{"like": 2}, map[string]string{"workspace": "session-b"}).OK, - ) + assertTrue(t, storeInstance.CommitToJournal("session-a", map[string]any{"like": 1}, map[string]string{"workspace": "session-a"}).OK) + assertTrue(t, storeInstance.CommitToJournal("session-b", map[string]any{"like": 2}, map[string]string{"workspace": "session-b"}).OK) rows := requireResultRows( t, storeInstance.QueryJournal(`from(bucket: "events") |> range(start: -24h) |> filter(fn: (r) => r.workspace == "session-b")`), ) - require.Len(t, rows, 1) - assert.Equal(t, "session-b", rows[0]["measurement"]) + assertLen(t, rows, 1) + assertEqual(t, "session-b", rows[0]["measurement"]) tags, ok := rows[0]["tags"].(map[string]string) - require.True(t, ok, "unexpected tags type: %T", rows[0]["tags"]) - assert.Equal(t, "session-b", tags["workspace"]) + assertTruef(t, ok, "unexpected tags type: %T", rows[0]["tags"]) + assertEqual(t, "session-b", tags["workspace"]) +} + +func TestJournal_QueryJournal_Good_NumericFieldFilter(t *testing.T) { + storeInstance, err := New(":memory:", WithJournal("http://127.0.0.1:8086", "core", "events")) + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() + + assertTrue(t, storeInstance.CommitToJournal("session-a", map[string]any{"like": 1}, map[string]string{"workspace": "session-a"}).OK) + assertTrue(t, storeInstance.CommitToJournal("session-b", map[string]any{"like": 2}, map[string]string{"workspace": "session-b"}).OK) + + rows := requireResultRows( + t, + storeInstance.QueryJournal(`from(bucket: "events") |> range(start: -24h) |> filter(fn: (r) => r.like == 2)`), + ) + assertLen(t, rows, 1) + assertEqual(t, "session-b", rows[0]["measurement"]) + + fields, ok := rows[0]["fields"].(map[string]any) + assertTruef(t, ok, "unexpected fields type: %T", rows[0]["fields"]) + assertEqual(t, float64(2), fields["like"]) +} + +func TestJournal_QueryJournal_Good_BooleanFieldFilter(t *testing.T) { + storeInstance, err := New(":memory:", WithJournal("http://127.0.0.1:8086", "core", "events")) + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() + + assertTrue(t, storeInstance.CommitToJournal("session-a", map[string]any{"complete": false}, map[string]string{"workspace": "session-a"}).OK) + assertTrue(t, storeInstance.CommitToJournal("session-b", map[string]any{"complete": true}, map[string]string{"workspace": "session-b"}).OK) + + rows := requireResultRows( + t, + storeInstance.QueryJournal(`from(bucket: "events") |> range(start: -24h) |> filter(fn: (r) => r["complete"] == true)`), + ) + assertLen(t, rows, 1) + assertEqual(t, "session-b", rows[0]["measurement"]) + + fields, ok := rows[0]["fields"].(map[string]any) + assertTruef(t, ok, "unexpected fields type: %T", rows[0]["fields"]) + assertEqual(t, true, fields["complete"]) } func TestJournal_QueryJournal_Good_BucketFilter(t *testing.T) { storeInstance, err := New(":memory:") - require.NoError(t, err) - defer storeInstance.Close() + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() - require.True(t, - storeInstance.CommitToJournal("session-a", map[string]any{"like": 1}, map[string]string{"workspace": "session-a"}).OK, - ) - require.NoError(t, commitJournalEntry( - storeInstance.sqliteDatabase, - "events", - "session-b", - `{"like":2}`, - `{"workspace":"session-b"}`, - time.Now().UnixMilli(), - )) + assertTrue(t, storeInstance.CommitToJournal("session-a", map[string]any{"like": 1}, map[string]string{"workspace": "session-a"}).OK) + assertNoError(t, commitJournalEntry(storeInstance.sqliteDatabase, "events", "session-b", `{"like":2}`, `{"workspace":"session-b"}`, time.Now().UnixMilli())) rows := requireResultRows( t, storeInstance.QueryJournal(`from(bucket: "events") |> range(start: -24h) |> filter(fn: (r) => r._bucket == "events")`), ) - require.Len(t, rows, 1) - assert.Equal(t, "session-b", rows[0]["measurement"]) - assert.Equal(t, "events", rows[0]["bucket_name"]) + assertLen(t, rows, 1) + assertEqual(t, "session-b", rows[0]["measurement"]) + assertEqual(t, "events", rows[0]["bucket_name"]) } func TestJournal_QueryJournal_Good_DeterministicOrderingForSameTimestamp(t *testing.T) { storeInstance, err := New(":memory:") - require.NoError(t, err) - defer storeInstance.Close() - require.NoError(t, ensureJournalSchema(storeInstance.sqliteDatabase)) + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() + assertNoError(t, ensureJournalSchema(storeInstance.sqliteDatabase)) committedAt := time.Date(2026, 3, 30, 12, 0, 0, 0, time.UTC).UnixMilli() - require.NoError(t, commitJournalEntry( - storeInstance.sqliteDatabase, - "events", - "session-b", - `{"like":2}`, - `{"workspace":"session-b"}`, - committedAt, - )) - require.NoError(t, commitJournalEntry( - storeInstance.sqliteDatabase, - "events", - "session-a", - `{"like":1}`, - `{"workspace":"session-a"}`, - committedAt, - )) + assertNoError(t, commitJournalEntry(storeInstance.sqliteDatabase, "events", "session-b", `{"like":2}`, `{"workspace":"session-b"}`, committedAt)) + assertNoError(t, commitJournalEntry(storeInstance.sqliteDatabase, "events", "session-a", `{"like":1}`, `{"workspace":"session-a"}`, committedAt)) rows := requireResultRows( t, storeInstance.QueryJournal(""), ) - require.Len(t, rows, 2) - assert.Equal(t, "session-b", rows[0]["measurement"]) - assert.Equal(t, "session-a", rows[1]["measurement"]) + assertLen(t, rows, 2) + assertEqual(t, "session-b", rows[0]["measurement"]) + assertEqual(t, "session-a", rows[1]["measurement"]) } func TestJournal_QueryJournal_Good_AbsoluteRangeWithStop(t *testing.T) { storeInstance, err := New(":memory:", WithJournal("http://127.0.0.1:8086", "core", "events")) - require.NoError(t, err) - defer storeInstance.Close() + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() - require.True(t, - storeInstance.CommitToJournal("session-a", map[string]any{"like": 1}, map[string]string{"workspace": "session-a"}).OK, - ) - require.True(t, - storeInstance.CommitToJournal("session-b", map[string]any{"like": 2}, map[string]string{"workspace": "session-b"}).OK, - ) + assertTrue(t, storeInstance.CommitToJournal("session-a", map[string]any{"like": 1}, map[string]string{"workspace": "session-a"}).OK) + assertTrue(t, storeInstance.CommitToJournal("session-b", map[string]any{"like": 2}, map[string]string{"workspace": "session-b"}).OK) _, err = storeInstance.sqliteDatabase.Exec( "UPDATE "+journalEntriesTableName+" SET committed_at = ? WHERE measurement = ?", time.Date(2026, 3, 29, 12, 0, 0, 0, time.UTC).UnixMilli(), "session-a", ) - require.NoError(t, err) + assertNoError(t, err) _, err = storeInstance.sqliteDatabase.Exec( "UPDATE "+journalEntriesTableName+" SET committed_at = ? WHERE measurement = ?", time.Date(2026, 3, 30, 12, 0, 0, 0, time.UTC).UnixMilli(), "session-b", ) - require.NoError(t, err) + assertNoError(t, err) rows := requireResultRows( t, storeInstance.QueryJournal(`from(bucket: "events") |> range(start: "2026-03-30T00:00:00Z", stop: now())`), ) - require.Len(t, rows, 1) - assert.Equal(t, "session-b", rows[0]["measurement"]) + assertLen(t, rows, 1) + assertEqual(t, "session-b", rows[0]["measurement"]) } func TestJournal_QueryJournal_Good_AbsoluteRangeHonoursStop(t *testing.T) { storeInstance, err := New(":memory:", WithJournal("http://127.0.0.1:8086", "core", "events")) - require.NoError(t, err) - defer storeInstance.Close() + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() - require.True(t, - storeInstance.CommitToJournal("session-a", map[string]any{"like": 1}, map[string]string{"workspace": "session-a"}).OK, - ) - require.True(t, - storeInstance.CommitToJournal("session-b", map[string]any{"like": 2}, map[string]string{"workspace": "session-b"}).OK, - ) + assertTrue(t, storeInstance.CommitToJournal("session-a", map[string]any{"like": 1}, map[string]string{"workspace": "session-a"}).OK) + assertTrue(t, storeInstance.CommitToJournal("session-b", map[string]any{"like": 2}, map[string]string{"workspace": "session-b"}).OK) _, err = storeInstance.sqliteDatabase.Exec( "UPDATE "+journalEntriesTableName+" SET committed_at = ? WHERE measurement = ?", time.Date(2026, 3, 29, 12, 0, 0, 0, time.UTC).UnixMilli(), "session-a", ) - require.NoError(t, err) + assertNoError(t, err) _, err = storeInstance.sqliteDatabase.Exec( "UPDATE "+journalEntriesTableName+" SET committed_at = ? WHERE measurement = ?", time.Date(2026, 3, 30, 12, 0, 0, 0, time.UTC).UnixMilli(), "session-b", ) - require.NoError(t, err) + assertNoError(t, err) rows := requireResultRows( t, storeInstance.QueryJournal(`from(bucket: "events") |> range(start: "2026-03-29T00:00:00Z", stop: "2026-03-30T00:00:00Z")`), ) - require.Len(t, rows, 1) - assert.Equal(t, "session-a", rows[0]["measurement"]) + assertLen(t, rows, 1) + assertEqual(t, "session-a", rows[0]["measurement"]) } func TestJournal_CommitToJournal_Bad_EmptyMeasurement(t *testing.T) { storeInstance, err := New(":memory:") - require.NoError(t, err) - defer storeInstance.Close() + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() result := storeInstance.CommitToJournal("", map[string]any{"like": 1}, map[string]string{"workspace": "missing"}) - require.False(t, result.OK) - assert.Contains(t, result.Value.(error).Error(), "measurement is empty") + assertFalse(t, result.OK) + assertContainsString(t, result.Value.(error).Error(), "measurement is empty") } diff --git a/json.go b/json.go new file mode 100644 index 0000000..ae602a6 --- /dev/null +++ b/json.go @@ -0,0 +1,171 @@ +// SPDX-License-Identifier: EUPL-1.2 + +// JSON helpers for storage consumers. +// Re-exports the minimum JSON surface needed by downstream users like +// go-cache and go-tenant so they don't need to import encoding/json directly. +// Internally uses core/go JSON primitives. +package store + +import core "dappco.re/go/core" + +// RawMessage is a raw encoded JSON value. +// Use in structs where the JSON should be stored as-is without re-encoding. +// +// Usage example: +// +// type CacheEntry struct { +// Data store.RawMessage `json:"data"` +// } +// cacheEntry := CacheEntry{Data: store.RawMessage([]byte("{\"name\":\"Alice\"}"))} +type RawMessage []byte + +// MarshalJSON returns the raw bytes as-is. If empty, returns `null`. +// +// Usage example: `bytes, err := store.RawMessage([]byte("{\"name\":\"Alice\"}")).MarshalJSON()` +func (raw RawMessage) MarshalJSON() ([]byte, error) { + if len(raw) == 0 { + return []byte("null"), nil + } + return raw, nil +} + +// UnmarshalJSON stores the raw JSON bytes without decoding them. +// +// Usage example: `var raw store.RawMessage; err := raw.UnmarshalJSON([]byte("{\"name\":\"Alice\"}"))` +func (raw *RawMessage) UnmarshalJSON(data []byte) error { + if raw == nil { + return core.E("store.RawMessage.UnmarshalJSON", "nil receiver", nil) + } + *raw = append((*raw)[:0], data...) + return nil +} + +// MarshalIndent serialises a value to pretty-printed JSON bytes. +// Uses core.JSONMarshal internally then applies prefix/indent formatting +// so consumers get readable output without importing encoding/json. +// +// Usage example: `data, err := store.MarshalIndent(map[string]string{"name": "Alice"}, "", " ")` +func MarshalIndent(value any, prefix, indent string) ([]byte, error) { + marshalled := core.JSONMarshal(value) + if !marshalled.OK { + if err, ok := marshalled.Value.(error); ok { + return nil, core.E("store.MarshalIndent", "marshal", err) + } + return nil, core.E("store.MarshalIndent", "marshal", nil) + } + raw, ok := marshalled.Value.([]byte) + if !ok { + return nil, core.E("store.MarshalIndent", "non-bytes result", nil) + } + if prefix == "" && indent == "" { + return raw, nil + } + + buf := core.NewBuilder() + if err := indentCompactJSON(buf, raw, prefix, indent); err != nil { + return nil, core.E("store.MarshalIndent", "indent", err) + } + return []byte(buf.String()), nil +} + +// indentCompactJSON formats compact JSON bytes with prefix+indent. +// Mirrors json.Indent's semantics without importing encoding/json. +// +// Usage example: `builder := core.NewBuilder(); _ = indentCompactJSON(builder, []byte("{\"name\":\"Alice\"}"), "", " ")` +func indentCompactJSON(buf interface { + WriteByte(byte) error + WriteString(string) (int, error) +}, src []byte, prefix, indent string) error { + depth := 0 + inString := false + escaped := false + + writeNewlineIndent := func(level int) error { + if err := buf.WriteByte('\n'); err != nil { + return err + } + if _, err := buf.WriteString(prefix); err != nil { + return err + } + for i := 0; i < level; i++ { + if _, err := buf.WriteString(indent); err != nil { + return err + } + } + return nil + } + + for i := 0; i < len(src); i++ { + c := src[i] + if inString { + if err := buf.WriteByte(c); err != nil { + return err + } + if escaped { + escaped = false + continue + } + if c == '\\' { + escaped = true + continue + } + if c == '"' { + inString = false + } + continue + } + switch c { + case '"': + inString = true + if err := buf.WriteByte(c); err != nil { + return err + } + case '{', '[': + if err := buf.WriteByte(c); err != nil { + return err + } + depth++ + // Look ahead for empty object/array. + if i+1 < len(src) && (src[i+1] == '}' || src[i+1] == ']') { + continue + } + if err := writeNewlineIndent(depth); err != nil { + return err + } + case '}', ']': + // Only indent if previous byte wasn't the matching opener. + if i > 0 && src[i-1] != '{' && src[i-1] != '[' { + depth-- + if err := writeNewlineIndent(depth); err != nil { + return err + } + } else { + depth-- + } + if err := buf.WriteByte(c); err != nil { + return err + } + case ',': + if err := buf.WriteByte(c); err != nil { + return err + } + if err := writeNewlineIndent(depth); err != nil { + return err + } + case ':': + if err := buf.WriteByte(c); err != nil { + return err + } + if err := buf.WriteByte(' '); err != nil { + return err + } + case ' ', '\t', '\n', '\r': + // Drop whitespace from compact source. + default: + if err := buf.WriteByte(c); err != nil { + return err + } + } + } + return nil +} diff --git a/medium.go b/medium.go new file mode 100644 index 0000000..6f308dc --- /dev/null +++ b/medium.go @@ -0,0 +1,323 @@ +// SPDX-License-Identifier: EUPL-1.2 + +package store + +import ( + "bytes" + "encoding/csv" + + core "dappco.re/go/core" + coreio "dappco.re/go/core/io" +) + +// Medium is the minimal storage transport used by the go-store workspace +// import and export helpers and by Compact when writing cold archives. +// +// This is an alias of `dappco.re/go/core/io.Medium`, so callers can pass any +// upstream medium implementation directly without an adapter. +// +// Usage example: `medium, _ := local.New("/tmp/exports"); storeInstance, err := store.NewConfigured(store.StoreConfig{DatabasePath: ":memory:", Medium: medium})` +type Medium = coreio.Medium + +// Usage example: `medium, _ := local.New("/srv/core"); storeInstance, err := store.NewConfigured(store.StoreConfig{DatabasePath: ":memory:", Medium: medium})` +// WithMedium installs an io.Medium-compatible transport on the Store so that +// Compact archives and Import/Export helpers route through the medium instead +// of the raw filesystem. +func WithMedium(medium Medium) StoreOption { + return func(storeInstance *Store) { + if storeInstance == nil { + return + } + storeInstance.medium = medium + } +} + +// Usage example: `medium := storeInstance.Medium(); if medium != nil { _ = medium.EnsureDir("exports") }` +func (storeInstance *Store) Medium() Medium { + if storeInstance == nil { + return nil + } + return storeInstance.medium +} + +// Usage example: `err := store.Import(workspace, medium, "dataset.jsonl")` +// Import reads a JSON, JSONL, or CSV payload from the provided medium and +// appends each record to the workspace buffer as a `Put` entry. Format is +// chosen from the file extension: `.json` expects either a top-level array or +// `{"entries":[...]}` shape, `.jsonl`/`.ndjson` parse line-by-line, and `.csv` +// uses the first row as the header. +func Import(workspace *Workspace, medium Medium, path string) error { + if workspace == nil { + return core.E("store.Import", "workspace is nil", nil) + } + if medium == nil { + return core.E("store.Import", "medium is nil", nil) + } + if path == "" { + return core.E("store.Import", "path is empty", nil) + } + + content, err := medium.Read(path) + if err != nil { + return core.E("store.Import", "read from medium", err) + } + + kind := importEntryKind(path) + switch lowercaseText(importExtension(path)) { + case ".jsonl", ".ndjson": + return importJSONLines(workspace, kind, content) + case ".csv": + return importCSV(workspace, kind, content) + case ".json": + return importJSON(workspace, kind, content) + default: + return importJSONLines(workspace, kind, content) + } +} + +// Usage example: `err := store.Export(workspace, medium, "report.json")` +// Export writes the workspace aggregate summary to the medium at the given +// path. Format is chosen from the extension: `.jsonl` writes one record per +// query row, `.csv` writes header + rows, everything else writes the +// aggregate as JSON. +func Export(workspace *Workspace, medium Medium, path string) error { + if workspace == nil { + return core.E("store.Export", "workspace is nil", nil) + } + if medium == nil { + return core.E("store.Export", "medium is nil", nil) + } + if path == "" { + return core.E("store.Export", "path is empty", nil) + } + + if err := ensureMediumDir(medium, core.PathDir(path)); err != nil { + return core.E("store.Export", "ensure directory", err) + } + + switch lowercaseText(importExtension(path)) { + case ".jsonl", ".ndjson": + return exportJSONLines(workspace, medium, path) + case ".csv": + return exportCSV(workspace, medium, path) + default: + return exportJSON(workspace, medium, path) + } +} + +func ensureMediumDir(medium Medium, directory string) error { + if directory == "" || directory == "." || directory == "/" { + return nil + } + if err := medium.EnsureDir(directory); err != nil { + return core.E("store.ensureMediumDir", "ensure directory", err) + } + return nil +} + +func importExtension(path string) string { + base := core.PathBase(path) + for i := len(base) - 1; i >= 0; i-- { + if base[i] == '.' { + return base[i:] + } + } + return "" +} + +func importEntryKind(path string) string { + base := core.PathBase(path) + for i := len(base) - 1; i >= 0; i-- { + if base[i] == '.' { + base = base[:i] + break + } + } + if base == "" { + return "entry" + } + return base +} + +func importJSONLines(workspace *Workspace, kind, content string) error { + scanner := core.Split(content, "\n") + for _, rawLine := range scanner { + line := core.Trim(rawLine) + if line == "" { + continue + } + record := map[string]any{} + if result := core.JSONUnmarshalString(line, &record); !result.OK { + err, _ := result.Value.(error) + return core.E("store.Import", "parse jsonl line", err) + } + if err := workspace.Put(kind, record); err != nil { + return core.E("store.Import", "put jsonl record", err) + } + } + return nil +} + +func importJSON(workspace *Workspace, kind, content string) error { + trimmed := core.Trim(content) + if trimmed == "" { + return nil + } + + var topLevel any + if result := core.JSONUnmarshalString(trimmed, &topLevel); !result.OK { + err, _ := result.Value.(error) + return core.E("store.Import", "parse json", err) + } + + records, err := collectJSONRecords(topLevel) + if err != nil { + return core.E("store.Import", "normalise json records", err) + } + for _, record := range records { + if err := workspace.Put(kind, record); err != nil { + return core.E("store.Import", "put json record", err) + } + } + return nil +} + +func collectJSONRecords(value any) ([]map[string]any, error) { + switch shape := value.(type) { + case []any: + records := make([]map[string]any, 0, len(shape)) + for index, entry := range shape { + record, ok := entry.(map[string]any) + if !ok { + return nil, core.E("store.Import", core.Concat("json array element is not an object at index ", core.Sprint(index)), nil) + } + records = append(records, record) + } + return records, nil + case map[string]any: + if nested, ok := shape["entries"].([]any); ok { + return collectJSONRecords(nested) + } + if nested, ok := shape["records"].([]any); ok { + return collectJSONRecords(nested) + } + if nested, ok := shape["data"].([]any); ok { + return collectJSONRecords(nested) + } + return []map[string]any{shape}, nil + } + return nil, core.E("store.Import", "unsupported json shape", nil) +} + +func importCSV(workspace *Workspace, kind, content string) error { + reader := csv.NewReader(bytes.NewBufferString(content)) + reader.FieldsPerRecord = -1 + rows, err := reader.ReadAll() + if err != nil { + return core.E("store.Import", "parse csv", err) + } + if len(rows) == 0 { + return nil + } + header := rows[0] + if len(header) == 0 { + return nil + } + for _, fields := range rows[1:] { + if len(fields) == 0 { + continue + } + record := make(map[string]any, len(header)) + for columnIndex, columnName := range header { + if columnIndex < len(fields) { + record[columnName] = fields[columnIndex] + } else { + record[columnName] = "" + } + } + if err := workspace.Put(kind, record); err != nil { + return core.E("store.Import", "put csv record", err) + } + } + return nil +} + +func exportJSON(workspace *Workspace, medium Medium, path string) error { + summary, err := workspace.aggregateFields() + if err != nil { + return core.E("store.Export", "aggregate workspace", err) + } + content := core.JSONMarshalString(summary) + if err := medium.Write(path, content); err != nil { + return core.E("store.Export", "write json", err) + } + return nil +} + +func exportJSONLines(workspace *Workspace, medium Medium, path string) error { + result := workspace.Query("SELECT entry_kind, entry_data, created_at FROM workspace_entries ORDER BY entry_id") + if !result.OK { + err, _ := result.Value.(error) + return core.E("store.Export", "query workspace", err) + } + rows, ok := result.Value.([]map[string]any) + if !ok { + rows = nil + } + + builder := core.NewBuilder() + for _, row := range rows { + line := core.JSONMarshalString(row) + builder.WriteString(line) + builder.WriteString("\n") + } + if err := medium.Write(path, builder.String()); err != nil { + return core.E("store.Export", "write jsonl", err) + } + return nil +} + +func exportCSV(workspace *Workspace, medium Medium, path string) error { + result := workspace.Query("SELECT entry_kind, entry_data, created_at FROM workspace_entries ORDER BY entry_id") + if !result.OK { + err, _ := result.Value.(error) + return core.E("store.Export", "query workspace", err) + } + rows, ok := result.Value.([]map[string]any) + if !ok { + rows = nil + } + + builder := core.NewBuilder() + builder.WriteString("entry_kind,entry_data,created_at\n") + for _, row := range rows { + builder.WriteString(csvField(core.Sprint(row["entry_kind"]))) + builder.WriteString(",") + builder.WriteString(csvField(core.Sprint(row["entry_data"]))) + builder.WriteString(",") + builder.WriteString(csvField(core.Sprint(row["created_at"]))) + builder.WriteString("\n") + } + if err := medium.Write(path, builder.String()); err != nil { + return core.E("store.Export", "write csv", err) + } + return nil +} + +func csvField(value string) string { + needsQuote := false + for index := 0; index < len(value); index++ { + switch value[index] { + case ',', '"', '\n', '\r': + needsQuote = true + } + if needsQuote { + break + } + } + if !needsQuote { + return value + } + escaped := core.Replace(value, `"`, `""`) + return core.Concat(`"`, escaped, `"`) +} diff --git a/medium_test.go b/medium_test.go new file mode 100644 index 0000000..500c372 --- /dev/null +++ b/medium_test.go @@ -0,0 +1,570 @@ +// SPDX-License-Identifier: EUPL-1.2 + +package store + +import ( + "bytes" + goio "io" + "io/fs" + "sync" + "testing" + "time" + + core "dappco.re/go/core" +) + +// memoryMedium is an in-memory implementation of `store.Medium` used by the +// medium tests so assertions do not depend on the local filesystem. +type memoryMedium struct { + lock sync.Mutex + files map[string]string +} + +func newMemoryMedium() *memoryMedium { + return &memoryMedium{files: make(map[string]string)} +} + +func (medium *memoryMedium) Read(path string) (string, error) { + medium.lock.Lock() + defer medium.lock.Unlock() + content, ok := medium.files[path] + if !ok { + return "", core.E("memoryMedium.Read", "file not found: "+path, nil) + } + return content, nil +} + +func (medium *memoryMedium) Write(path, content string) error { + medium.lock.Lock() + defer medium.lock.Unlock() + medium.files[path] = content + return nil +} + +func (medium *memoryMedium) WriteMode(path, content string, _ fs.FileMode) error { + return medium.Write(path, content) +} + +func (medium *memoryMedium) EnsureDir(string) error { return nil } + +func (medium *memoryMedium) Create(path string) (goio.WriteCloser, error) { + return &memoryWriter{medium: medium, path: path}, nil +} + +func (medium *memoryMedium) Append(path string) (goio.WriteCloser, error) { + medium.lock.Lock() + defer medium.lock.Unlock() + return &memoryWriter{medium: medium, path: path, buffer: *bytes.NewBufferString(medium.files[path])}, nil +} + +func (medium *memoryMedium) ReadStream(path string) (goio.ReadCloser, error) { + medium.lock.Lock() + defer medium.lock.Unlock() + return goio.NopCloser(bytes.NewReader([]byte(medium.files[path]))), nil +} + +func (medium *memoryMedium) WriteStream(path string) (goio.WriteCloser, error) { + return medium.Create(path) +} + +func (medium *memoryMedium) Exists(path string) bool { + medium.lock.Lock() + defer medium.lock.Unlock() + _, ok := medium.files[path] + return ok +} + +func (medium *memoryMedium) IsFile(path string) bool { return medium.Exists(path) } + +func (medium *memoryMedium) Delete(path string) error { + medium.lock.Lock() + defer medium.lock.Unlock() + delete(medium.files, path) + return nil +} + +func (medium *memoryMedium) DeleteAll(path string) error { + medium.lock.Lock() + defer medium.lock.Unlock() + for key := range medium.files { + if key == path || core.HasPrefix(key, path+"/") { + delete(medium.files, key) + } + } + return nil +} + +func (medium *memoryMedium) Rename(oldPath, newPath string) error { + medium.lock.Lock() + defer medium.lock.Unlock() + content, ok := medium.files[oldPath] + if !ok { + return core.E("memoryMedium.Rename", "file not found: "+oldPath, nil) + } + medium.files[newPath] = content + delete(medium.files, oldPath) + return nil +} + +type renameFailMedium struct { + *memoryMedium +} + +func (medium *renameFailMedium) Rename(string, string) error { + return core.E("renameFailMedium.Rename", "forced rename failure", nil) +} + +type writeFailOnceMedium struct { + *memoryMedium + failures int +} + +func (medium *writeFailOnceMedium) Write(path, content string) error { + if medium.failures > 0 { + medium.failures-- + return core.E("writeFailOnceMedium.Write", "forced write failure", nil) + } + return medium.memoryMedium.Write(path, content) +} + +func (medium *memoryMedium) List(path string) ([]fs.DirEntry, error) { return nil, nil } + +func (medium *memoryMedium) Stat(path string) (fs.FileInfo, error) { + if !medium.Exists(path) { + return nil, core.E("memoryMedium.Stat", "file not found: "+path, nil) + } + return fileInfoStub{name: core.PathBase(path)}, nil +} + +func (medium *memoryMedium) Open(path string) (fs.File, error) { + if !medium.Exists(path) { + return nil, core.E("memoryMedium.Open", "file not found: "+path, nil) + } + return newMemoryFile(path, medium.files[path]), nil +} + +func (medium *memoryMedium) IsDir(string) bool { return false } + +type memoryWriter struct { + medium *memoryMedium + path string + buffer bytes.Buffer + closed bool +} + +func (writer *memoryWriter) Write(data []byte) (int, error) { + return writer.buffer.Write(data) +} + +func (writer *memoryWriter) Close() error { + if writer.closed { + return nil + } + writer.closed = true + return writer.medium.Write(writer.path, writer.buffer.String()) +} + +type fileInfoStub struct { + name string +} + +func (fileInfoStub) Size() int64 { return 0 } +func (fileInfoStub) Mode() fs.FileMode { return 0 } +func (fileInfoStub) ModTime() time.Time { return time.Time{} } +func (fileInfoStub) IsDir() bool { return false } +func (fileInfoStub) Sys() any { return nil } +func (info fileInfoStub) Name() string { return info.name } + +type memoryFile struct { + *bytes.Reader + name string +} + +func newMemoryFile(name, content string) *memoryFile { + return &memoryFile{Reader: bytes.NewReader([]byte(content)), name: name} +} + +func (file *memoryFile) Stat() (fs.FileInfo, error) { + return fileInfoStub{name: core.PathBase(file.name)}, nil +} +func (file *memoryFile) Close() error { return nil } + +// Ensure memoryMedium still satisfies the internal Medium contract. +var _ Medium = (*memoryMedium)(nil) + +// Compile-time check for fs.FileInfo usage in the tests. +var _ fs.FileInfo = (*FileInfoStub)(nil) + +type FileInfoStub struct{} + +func (FileInfoStub) Name() string { return "" } +func (FileInfoStub) Size() int64 { return 0 } +func (FileInfoStub) Mode() fs.FileMode { return 0 } +func (FileInfoStub) ModTime() time.Time { return time.Time{} } +func (FileInfoStub) IsDir() bool { return false } +func (FileInfoStub) Sys() any { return nil } + +func TestMedium_WithMedium_Good(t *testing.T) { + useWorkspaceStateDirectory(t) + + medium := newMemoryMedium() + storeInstance, err := New(":memory:", WithMedium(medium)) + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() + + assertSamef(t, medium, storeInstance.Medium(), "medium should round-trip via accessor") + assertSamef(t, medium, storeInstance.Config().Medium, "medium should appear in Config()") +} + +func TestMedium_WithMedium_Bad_NilKeepsFilesystemBackend(t *testing.T) { + useWorkspaceStateDirectory(t) + + storeInstance, err := New(":memory:") + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() + + assertNil(t, storeInstance.Medium()) +} + +func TestMedium_WithMedium_Good_PersistsDatabaseThroughMedium(t *testing.T) { + useWorkspaceStateDirectory(t) + + medium := newMemoryMedium() + + storeInstance, err := New("app.db", WithMedium(medium)) + assertNoError(t, err) + + assertNoError(t, storeInstance.Set("g", "k", "v")) + assertNoError(t, storeInstance.Close()) + + reopenedStore, err := New("app.db", WithMedium(medium)) + assertNoError(t, err) + defer func() { _ = reopenedStore.Close() }() + + value, err := reopenedStore.Get("g", "k") + assertNoError(t, err) + assertEqual(t, "v", value) + assertTrue(t, medium.Exists("app.db")) +} + +func TestMedium_Import_Good_JSONL(t *testing.T) { + useWorkspaceStateDirectory(t) + + storeInstance, err := New(":memory:") + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() + + workspace, err := storeInstance.NewWorkspace("medium-import-jsonl") + assertNoError(t, err) + defer workspace.Discard() + + medium := newMemoryMedium() + assertNoError(t, medium.Write("data.jsonl", `{"user":"@alice"} +{"user":"@bob"} +`)) + + assertNoError(t, Import(workspace, medium, "data.jsonl")) + + rows := requireResultRows(t, workspace.Query("SELECT entry_kind, entry_data FROM workspace_entries ORDER BY entry_id")) + assertLen(t, rows, 2) + assertEqual(t, "data", rows[0]["entry_kind"]) + assertContainsElement(t, rows[0]["entry_data"], "@alice") + assertContainsElement(t, rows[1]["entry_data"], "@bob") +} + +func TestMedium_Import_Good_JSONArray(t *testing.T) { + useWorkspaceStateDirectory(t) + + storeInstance, err := New(":memory:") + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() + + workspace, err := storeInstance.NewWorkspace("medium-import-json-array") + assertNoError(t, err) + defer workspace.Discard() + + medium := newMemoryMedium() + assertNoError(t, medium.Write("users.json", `[{"name":"Alice"},{"name":"Bob"},{"name":"Carol"}]`)) + + assertNoError(t, Import(workspace, medium, "users.json")) + + assertEqual(t, map[string]any{"users": 3}, workspace.Aggregate()) +} + +func TestMedium_Import_Good_CSV(t *testing.T) { + useWorkspaceStateDirectory(t) + + storeInstance, err := New(":memory:") + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() + + workspace, err := storeInstance.NewWorkspace("medium-import-csv") + assertNoError(t, err) + defer workspace.Discard() + + medium := newMemoryMedium() + assertNoError(t, medium.Write("findings.csv", "tool,severity\ngosec,high\ngolint,low\n")) + + assertNoError(t, Import(workspace, medium, "findings.csv")) + + assertEqual(t, map[string]any{"findings": 2}, workspace.Aggregate()) +} + +func TestMedium_Import_Good_CSVQuotedMultiline(t *testing.T) { + useWorkspaceStateDirectory(t) + + storeInstance, err := New(":memory:") + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() + + workspace, err := storeInstance.NewWorkspace("medium-import-csv-multiline") + assertNoError(t, err) + defer workspace.Discard() + + medium := newMemoryMedium() + assertNoError(t, medium.Write("notes.csv", "name,note\nAlice,\"hello\nworld\"\n")) + + assertNoError(t, Import(workspace, medium, "notes.csv")) + + assertEqual(t, map[string]any{"notes": 1}, workspace.Aggregate()) +} + +func TestMedium_Import_Bad_JSONArrayNonObject(t *testing.T) { + useWorkspaceStateDirectory(t) + + storeInstance, err := New(":memory:") + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() + + workspace, err := storeInstance.NewWorkspace("medium-import-json-non-object") + assertNoError(t, err) + defer workspace.Discard() + + medium := newMemoryMedium() + assertNoError(t, medium.Write("users.json", `[{"name":"Alice"},"Bob"]`)) + + assertError(t, Import(workspace, medium, "users.json")) + + count, err := workspace.Count() + assertNoError(t, err) + assertEqual(t, 0, count) +} + +func TestMedium_Import_Bad_MalformedCSV(t *testing.T) { + useWorkspaceStateDirectory(t) + + storeInstance, err := New(":memory:") + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() + + workspace, err := storeInstance.NewWorkspace("medium-import-csv-bad") + assertNoError(t, err) + defer workspace.Discard() + + medium := newMemoryMedium() + assertNoError(t, medium.Write("findings.csv", "tool,severity\ngosec,\"high\n")) + + assertError(t, Import(workspace, medium, "findings.csv")) + + count, err := workspace.Count() + assertNoError(t, err) + assertEqual(t, 0, count) +} + +func TestMedium_Import_Bad_NilArguments(t *testing.T) { + useWorkspaceStateDirectory(t) + + storeInstance, err := New(":memory:") + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() + + workspace, err := storeInstance.NewWorkspace("medium-import-bad") + assertNoError(t, err) + defer workspace.Discard() + + medium := newMemoryMedium() + + assertError(t, Import(nil, medium, "data.json")) + assertError(t, Import(workspace, nil, "data.json")) + assertError(t, Import(workspace, medium, "")) +} + +func TestMedium_Import_Ugly_MissingFileReturnsError(t *testing.T) { + useWorkspaceStateDirectory(t) + + storeInstance, err := New(":memory:") + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() + + workspace, err := storeInstance.NewWorkspace("medium-import-missing") + assertNoError(t, err) + defer workspace.Discard() + + medium := newMemoryMedium() + assertError(t, Import(workspace, medium, "ghost.jsonl")) +} + +func TestMedium_Export_Good_JSON(t *testing.T) { + useWorkspaceStateDirectory(t) + + storeInstance, err := New(":memory:") + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() + + workspace, err := storeInstance.NewWorkspace("medium-export-json") + assertNoError(t, err) + defer workspace.Discard() + + assertNoError(t, workspace.Put("like", map[string]any{"user": "@alice"})) + assertNoError(t, workspace.Put("like", map[string]any{"user": "@bob"})) + assertNoError(t, workspace.Put("profile_match", map[string]any{"user": "@carol"})) + + medium := newMemoryMedium() + assertNoError(t, Export(workspace, medium, "report.json")) + + assertTrue(t, medium.Exists("report.json")) + content, err := medium.Read("report.json") + assertNoError(t, err) + assertContainsString(t, content, `"like":2`) + assertContainsString(t, content, `"profile_match":1`) +} + +func TestMedium_Export_Good_JSONLines(t *testing.T) { + useWorkspaceStateDirectory(t) + + storeInstance, err := New(":memory:") + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() + + workspace, err := storeInstance.NewWorkspace("medium-export-jsonl") + assertNoError(t, err) + defer workspace.Discard() + + assertNoError(t, workspace.Put("like", map[string]any{"user": "@alice"})) + assertNoError(t, workspace.Put("like", map[string]any{"user": "@bob"})) + + medium := newMemoryMedium() + assertNoError(t, Export(workspace, medium, "report.jsonl")) + + content, err := medium.Read("report.jsonl") + assertNoError(t, err) + lines := 0 + for _, line := range splitNewlines(content) { + if line != "" { + lines++ + } + } + assertEqual(t, 2, lines) +} + +func TestMedium_Export_Bad_NilArguments(t *testing.T) { + useWorkspaceStateDirectory(t) + + storeInstance, err := New(":memory:") + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() + + workspace, err := storeInstance.NewWorkspace("medium-export-bad") + assertNoError(t, err) + defer workspace.Discard() + + medium := newMemoryMedium() + + assertError(t, Export(nil, medium, "report.json")) + assertError(t, Export(workspace, nil, "report.json")) + assertError(t, Export(workspace, medium, "")) +} + +func TestMedium_Export_Bad_JSONPropagatesWorkspaceFailure(t *testing.T) { + useWorkspaceStateDirectory(t) + + storeInstance, err := New(":memory:") + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() + + workspace, err := storeInstance.NewWorkspace("medium-export-json-closed") + assertNoError(t, err) + assertNoError(t, workspace.Put("like", map[string]any{"user": "@alice"})) + assertNoError(t, workspace.Close()) + + medium := newMemoryMedium() + assertNoError(t, medium.Write("report.json", `{"previous":true}`)) + + err = Export(workspace, medium, "report.json") + + assertError(t, err) + assertContainsString(t, err.Error(), "aggregate workspace") + content, readErr := medium.Read("report.json") + assertNoError(t, readErr) + assertEqual(t, `{"previous":true}`, content) +} + +func TestMedium_Compact_Good_MediumRoutesArchive(t *testing.T) { + useWorkspaceStateDirectory(t) + useArchiveOutputDirectory(t) + + medium := newMemoryMedium() + storeInstance, err := New(":memory:", WithJournal("http://127.0.0.1:8086", "core", "events"), WithMedium(medium)) + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() + + assertTrue(t, storeInstance.CommitToJournal("jobs", map[string]any{"count": 3}, map[string]string{"workspace": "jobs-1"}).OK) + + result := storeInstance.Compact(CompactOptions{ + Before: time.Now().Add(time.Minute), + Output: "archive/", + Format: "gzip", + }) + assertTruef(t, result.OK, "compact result: %v", result.Value) + outputPath, ok := result.Value.(string) + assertTrue(t, ok) + assertNotEmpty(t, outputPath) + assertTruef(t, medium.Exists(outputPath), "compact should write through medium at %s", outputPath) +} + +func TestMedium_Compact_Bad_PreservesStagedArchiveWhenPublishFails(t *testing.T) { + useWorkspaceStateDirectory(t) + useArchiveOutputDirectory(t) + + medium := &renameFailMedium{memoryMedium: newMemoryMedium()} + storeInstance, err := New(":memory:", WithJournal("http://127.0.0.1:8086", "core", "events"), WithMedium(medium)) + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() + + assertTrue(t, storeInstance.CommitToJournal("jobs", map[string]any{"count": 3}, map[string]string{"workspace": "jobs-1"}).OK) + + result := storeInstance.Compact(CompactOptions{ + Before: time.Now().Add(time.Minute), + Output: "archive/", + Format: "gzip", + }) + assertFalse(t, result.OK) + + stagedArchiveFound := false + medium.lock.Lock() + for path := range medium.files { + if core.HasSuffix(path, ".tmp") { + stagedArchiveFound = true + } + } + medium.lock.Unlock() + assertTrue(t, stagedArchiveFound) +} + +func splitNewlines(content string) []string { + var result []string + current := core.NewBuilder() + for index := 0; index < len(content); index++ { + character := content[index] + if character == '\n' { + result = append(result, current.String()) + current.Reset() + continue + } + current.WriteByte(character) + } + if current.Len() > 0 { + result = append(result, current.String()) + } + return result +} diff --git a/parquet.go b/parquet.go new file mode 100644 index 0000000..5d4dcdc --- /dev/null +++ b/parquet.go @@ -0,0 +1,91 @@ +// SPDX-License-Identifier: EUPL-1.2 + +package store + +import core "dappco.re/go/core" + +// ChatMessage represents a single message in a chat conversation, used for +// reading JSONL training data during data import. +// +// Usage example: +// +// msg := store.ChatMessage{Role: "user", Content: "What is sovereignty?"} +type ChatMessage struct { + // Role is the message author role (e.g. "user", "assistant", "system"). + // + // Usage example: + // + // msg.Role // "user" + Role string `json:"role"` + + // Content is the message text. + // + // Usage example: + // + // msg.Content // "What is sovereignty?" + Content string `json:"content"` +} + +// ParquetRow describes the lightweight row shape used by external Parquet +// exporters. +// +// Usage example: +// +// row := store.ParquetRow{Prompt: "What is sovereignty?", Response: "Sovereignty is...", System: "You are LEM."} +type ParquetRow struct { + // Prompt is the user prompt text. + // + // Usage example: + // + // row.Prompt // "What is sovereignty?" + Prompt string `parquet:"prompt"` + + // Response is the assistant response text. + // + // Usage example: + // + // row.Response // "Sovereignty is..." + Response string `parquet:"response"` + + // System is the system prompt text. + // + // Usage example: + // + // row.System // "You are LEM." + System string `parquet:"system"` + + // Messages is the JSON-encoded full conversation messages. + // + // Usage example: + // + // row.Messages // `[{"role":"user","content":"What is sovereignty?"}]` + Messages string `parquet:"messages"` +} + +// ExportParquet reports that Parquet export is intentionally kept outside the +// core package dependency graph. +// +// Usage example: +// +// _, err := store.ExportParquet("/Volumes/Data/lem/training", "/Volumes/Data/lem/parquet") +func ExportParquet(trainingDir, outputDir string) (int, error) { + return 0, core.E( + "store.ExportParquet", + "Parquet export requires an external tool so core does not ship a runtime Parquet dependency", + nil, + ) +} + +// ExportSplitParquet reports that split-level Parquet export is intentionally +// kept outside the core package dependency graph. +// +// Usage example: +// +// _, err := store.ExportSplitParquet("/data/train.jsonl", "/data/parquet", "train") +func ExportSplitParquet(jsonlPath, outputDir, split string) (int, error) { + return 0, core.E( + "store.ExportSplitParquet", + "Parquet export requires an external tool so core does not ship a runtime Parquet dependency", + nil, + ) +} diff --git a/path_test.go b/path_test.go index cfeba71..10bdc40 100644 --- a/path_test.go +++ b/path_test.go @@ -2,12 +2,10 @@ package store import ( "testing" - - "github.com/stretchr/testify/assert" ) func TestPath_Normalise_Good_TrailingSlashes(t *testing.T) { - assert.Equal(t, ".core/state/scroll-session.duckdb", workspaceFilePath(".core/state/", "scroll-session")) - assert.Equal(t, ".core/archive/journal-20260404-010203.jsonl.gz", joinPath(".core/archive/", "journal-20260404-010203.jsonl.gz")) - assert.Equal(t, ".core/archive", normaliseDirectoryPath(".core/archive///")) + assertEqual(t, ".core/state/scroll-session.duckdb", workspaceFilePath(".core/state/", "scroll-session")) + assertEqual(t, ".core/archive/journal-20260404-010203.jsonl.gz", joinPath(".core/archive/", "journal-20260404-010203.jsonl.gz")) + assertEqual(t, ".core/archive", normaliseDirectoryPath(".core/archive///")) } diff --git a/publish.go b/publish.go new file mode 100644 index 0000000..799a4c4 --- /dev/null +++ b/publish.go @@ -0,0 +1,312 @@ +// SPDX-License-Identifier: EUPL-1.2 + +package store + +import ( + "bytes" + "context" + "io" + "io/fs" + "net/http" + "time" + + core "dappco.re/go/core" +) + +// PublishConfig holds options for the publish operation. +// +// Usage example: +// +// cfg := store.PublishConfig{InputDir: "/data/parquet", Repo: "snider/lem-training", Public: true} +type PublishConfig struct { + // InputDir is the directory containing Parquet files to upload. + // + // Usage example: + // + // cfg.InputDir // "/data/parquet" + InputDir string + + // Repo is the HuggingFace dataset repository (e.g. "user/dataset"). + // + // Usage example: + // + // cfg.Repo // "snider/lem-training" + Repo string + + // Public sets the dataset visibility to public when true. + // + // Usage example: + // + // cfg.Public // true + Public bool + + // Token is the HuggingFace API token. Falls back to HF_TOKEN env or ~/.huggingface/token. + // + // Usage example: + // + // cfg.Token // "hf_..." + Token string + + // Context controls cancellation for HuggingFace API requests. When nil, + // Publish uses context.Background(). + // + // Usage example: + // + // cfg.Context = context.Background() + Context context.Context + + // DryRun lists files that would be uploaded without actually uploading. + // + // Usage example: + // + // cfg.DryRun // true + DryRun bool +} + +// uploadEntry pairs a local file path with its remote destination. +type uploadEntry struct { + local string + remote string +} + +// Publish uploads Parquet files to HuggingFace Hub. +// +// It looks for train.parquet, valid.parquet, and test.parquet in InputDir, +// plus an optional dataset_card.md in the parent directory (uploaded as README.md). +// The token is resolved from PublishConfig.Token, the HF_TOKEN environment variable, +// or ~/.huggingface/token, in that order. +// +// Usage example: +// +// err := store.Publish(store.PublishConfig{InputDir: "/data/parquet", Repo: "snider/lem-training"}, os.Stdout) +func Publish(cfg PublishConfig, w io.Writer) error { + if cfg.InputDir == "" { + return core.E("store.Publish", "input directory is required", nil) + } + if cfg.Repo == "" { + return core.E("store.Publish", "repository is required", nil) + } + + publishContext := cfg.Context + if publishContext == nil { + publishContext = context.Background() + } + + token := resolveHFToken(cfg.Token) + if token == "" && !cfg.DryRun { + return core.E("store.Publish", "HuggingFace token required (--token, HF_TOKEN env, or ~/.huggingface/token)", nil) + } + + files, hasSplit, err := collectUploadFiles(cfg.InputDir) + if err != nil { + return err + } + if !hasSplit { + return core.E("store.Publish", core.Sprintf("no Parquet files found in %s", cfg.InputDir), nil) + } + + if cfg.DryRun { + core.Print(w, "Dry run: would publish to %s", cfg.Repo) + if cfg.Public { + core.Print(w, " Visibility: public") + } else { + core.Print(w, " Visibility: private") + } + for _, f := range files { + statResult := localFs.Stat(f.local) + if !statResult.OK { + return core.E("store.Publish", core.Sprintf("stat %s", f.local), statResult.Value.(error)) + } + info := statResult.Value.(fs.FileInfo) + sizeMB := float64(info.Size()) / 1024 / 1024 + core.Print(w, " %s -> %s (%.1f MB)", core.PathBase(f.local), f.remote, sizeMB) + } + return nil + } + + core.Print(w, "Publishing to https://huggingface.co/datasets/%s", cfg.Repo) + + if err := ensureHFDatasetRepo(publishContext, token, cfg.Repo, cfg.Public); err != nil { + return core.E("store.Publish", "ensure HuggingFace dataset", err) + } + + for _, f := range files { + if err := uploadFileToHF(publishContext, token, cfg.Repo, f.local, f.remote); err != nil { + return core.E("store.Publish", core.Sprintf("upload %s", core.PathBase(f.local)), err) + } + core.Print(w, " Uploaded %s -> %s", core.PathBase(f.local), f.remote) + } + + core.Print(w, "\nPublished to https://huggingface.co/datasets/%s", cfg.Repo) + return nil +} + +// resolveHFToken returns a HuggingFace API token from the given value, +// HF_TOKEN env var, or ~/.huggingface/token file. +func resolveHFToken(explicit string) string { + if explicit != "" { + return explicit + } + if env := core.Env("HF_TOKEN"); env != "" { + return env + } + // Core populates DIR_HOME via os.UserHomeDir while this package keeps the + // repository-wide ban on direct os imports. + homes := []string{core.Env("DIR_HOME")} + if homeEnv := core.Env("HOME"); homeEnv != "" && homeEnv != homes[0] { + homes = append(homes, homeEnv) + } + for _, home := range homes { + if home == "" { + continue + } + r := localFs.Read(core.JoinPath(home, ".huggingface", "token")) + if !r.OK { + continue + } + token := core.Trim(r.Value.(string)) + if token != "" { + return token + } + } + return "" +} + +// collectUploadFiles finds Parquet split files and an optional dataset card. +func collectUploadFiles(inputDir string) ([]uploadEntry, bool, error) { + splits := []string{"train", "valid", "test"} + var files []uploadEntry + hasSplit := false + + for _, split := range splits { + path := core.JoinPath(inputDir, split+".parquet") + if !isFile(path) { + continue + } + files = append(files, uploadEntry{path, core.Sprintf("data/%s.parquet", split)}) + hasSplit = true + } + + // Check for dataset card in parent directory. + cardPath := core.JoinPath(inputDir, "..", "dataset_card.md") + if isFile(cardPath) { + files = append(files, uploadEntry{cardPath, "README.md"}) + } + + return files, hasSplit, nil +} + +func ensureHFDatasetRepo(ctx context.Context, token, repoID string, public bool) error { + if repoID == "" { + return core.E("store.ensureHFDatasetRepo", "repository is required", nil) + } + + organisation, name := splitHFRepoID(repoID) + if name == "" { + return core.E("store.ensureHFDatasetRepo", "repository name is required", nil) + } + + createPayload := map[string]any{ + "name": name, + "type": "dataset", + "private": !public, + } + if organisation != "" { + createPayload["organization"] = organisation + } + + createStatus, createBody, err := hfJSONRequest(ctx, token, http.MethodPost, "https://huggingface.co/api/repos/create", createPayload) + if err != nil { + return core.E("store.ensureHFDatasetRepo", "create dataset repository", err) + } + if createStatus >= 300 && createStatus != http.StatusConflict { + return core.E("store.ensureHFDatasetRepo", core.Sprintf("create dataset failed: HTTP %d: %s", createStatus, createBody), nil) + } + + settingsURL := core.Sprintf("https://huggingface.co/api/repos/dataset/%s/settings", repoID) + settingsStatus, settingsBody, err := hfJSONRequest(ctx, token, http.MethodPut, settingsURL, map[string]any{ + "private": !public, + }) + if err != nil { + return core.E("store.ensureHFDatasetRepo", "update dataset visibility", err) + } + if settingsStatus >= 300 { + return core.E("store.ensureHFDatasetRepo", core.Sprintf("update dataset visibility failed: HTTP %d: %s", settingsStatus, settingsBody), nil) + } + return nil +} + +func splitHFRepoID(repoID string) (organisation string, name string) { + parts := core.Split(repoID, "/") + if len(parts) == 1 { + return "", repoID + } + return parts[0], parts[1] +} + +func hfJSONRequest(ctx context.Context, token, method, url string, payload map[string]any) (int, string, error) { + payloadJSON := core.JSONMarshalString(payload) + req, err := http.NewRequestWithContext(ctx, method, url, bytes.NewBufferString(payloadJSON)) + if err != nil { + return 0, "", core.E("store.hfJSONRequest", "create request", err) + } + req.Header.Set("Authorization", "Bearer "+token) + req.Header.Set("Content-Type", "application/json") + + client := &http.Client{Timeout: 120 * time.Second} + resp, err := client.Do(req) + if err != nil { + return 0, "", core.E("store.hfJSONRequest", "send request", err) + } + defer func() { + _ = resp.Body.Close() + }() + + body, err := io.ReadAll(resp.Body) + if err != nil { + return resp.StatusCode, "", core.E("store.hfJSONRequest", "read response body", err) + } + return resp.StatusCode, string(body), nil +} + +// uploadFileToHF uploads a single file to a HuggingFace dataset repo via the +// Hub API. +func uploadFileToHF(ctx context.Context, token, repoID, localPath, remotePath string) error { + openResult := localFs.Open(localPath) + if !openResult.OK { + return core.E("store.uploadFileToHF", core.Sprintf("open %s", localPath), openResult.Value.(error)) + } + file := openResult.Value.(fs.File) + defer func() { _ = file.Close() }() + + url := core.Sprintf("https://huggingface.co/api/datasets/%s/upload/main/%s", repoID, remotePath) + + req, err := http.NewRequestWithContext(ctx, http.MethodPut, url, file) + if err != nil { + return core.E("store.uploadFileToHF", "create request", err) + } + req.Header.Set("Authorization", "Bearer "+token) + req.Header.Set("Content-Type", "application/octet-stream") + if stat, err := file.Stat(); err == nil { + req.ContentLength = stat.Size() + } + + client := &http.Client{} + resp, err := client.Do(req) + if err != nil { + return core.E("store.uploadFileToHF", "upload request", err) + } + defer func() { + _ = resp.Body.Close() + }() + + if resp.StatusCode >= 300 { + body, readErr := io.ReadAll(resp.Body) + if readErr != nil { + return core.E("store.uploadFileToHF", "read error response body", readErr) + } + return core.E("store.uploadFileToHF", core.Sprintf("upload failed: HTTP %d: %s", resp.StatusCode, string(body)), nil) + } + + return nil +} diff --git a/publish_test.go b/publish_test.go new file mode 100644 index 0000000..7cb2d2e --- /dev/null +++ b/publish_test.go @@ -0,0 +1,42 @@ +package store + +import ( + "bytes" + "testing" + + core "dappco.re/go/core" +) + +func TestPublish_Publish_Bad_EmptyRepository(t *testing.T) { + var output bytes.Buffer + + err := Publish(PublishConfig{InputDir: t.TempDir(), DryRun: true}, &output) + + assertError(t, err) + assertContainsString(t, err.Error(), "repository is required") +} + +func TestPublish_Publish_Bad_DatasetCardWithoutParquetSplit(t *testing.T) { + inputDir := core.JoinPath(t.TempDir(), "data") + requireCoreOK(t, testFilesystem().EnsureDir(inputDir)) + requireCoreWriteBytes(t, core.JoinPath(inputDir, "..", "dataset_card.md"), []byte("# Dataset\n")) + + var output bytes.Buffer + err := Publish(PublishConfig{InputDir: inputDir, Repo: "snider/lem-training", DryRun: true}, &output) + + assertError(t, err) + assertContainsString(t, err.Error(), "no Parquet files found") +} + +func TestPublish_ResolveHFToken_Good_UserHomeFallback(t *testing.T) { + homeDirectory := t.TempDir() + t.Setenv("HF_TOKEN", "") + t.Setenv("DIR_HOME", "") + t.Setenv("HOME", homeDirectory) + + tokenDirectory := core.JoinPath(homeDirectory, ".huggingface") + requireCoreOK(t, testFilesystem().EnsureDir(tokenDirectory)) + requireCoreWriteBytes(t, core.JoinPath(tokenDirectory, "token"), []byte(" hf_file_token \n")) + + assertEqual(t, "hf_file_token", resolveHFToken("")) +} diff --git a/recover_test.go b/recover_test.go new file mode 100644 index 0000000..ad6ae70 --- /dev/null +++ b/recover_test.go @@ -0,0 +1,58 @@ +package store + +import "testing" + +func TestRecover_Orphans_Good_RecoversOrphan(t *testing.T) { + stateDirectory := useWorkspaceStateDirectory(t) + + storeInstance, err := New(":memory:") + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() + + workspace, err := storeInstance.NewWorkspace("recover-good") + assertNoError(t, err) + assertNoError(t, workspace.Put("like", map[string]any{"user": "@alice"})) + assertNoError(t, workspace.Close()) + + orphans := storeInstance.RecoverOrphans(stateDirectory) + assertLen(t, orphans, 1) + assertEqual(t, "recover-good", orphans[0].Name()) + assertEqual(t, map[string]any{"like": 1}, orphans[0].Aggregate()) + + orphans[0].Discard() + assertFalse(t, testFilesystem().Exists(workspaceFilePath(stateDirectory, "recover-good"))) +} + +func TestRecover_Orphans_Bad_CorruptMetadataQuarantined(t *testing.T) { + stateDirectory := useWorkspaceStateDirectory(t) + + storeInstance, err := New(":memory:") + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() + + corruptDatabasePath := workspaceFilePath(stateDirectory, "recover-bad") + requireCoreWriteBytes(t, corruptDatabasePath, []byte("not a duckdb database")) + requireCoreWriteBytes(t, corruptDatabasePath+".wal", []byte("wal")) + + orphans := storeInstance.RecoverOrphans(stateDirectory) + assertLen(t, orphans, 0) + assertFalse(t, testFilesystem().Exists(corruptDatabasePath)) + assertFalse(t, testFilesystem().Exists(corruptDatabasePath+".wal")) + + quarantinePath := workspaceQuarantineFilePath(stateDirectory, corruptDatabasePath) + assertTrue(t, testFilesystem().Exists(quarantinePath)) + assertTrue(t, testFilesystem().Exists(quarantinePath+".wal")) + assertEqual(t, "not a duckdb database", string(requireCoreReadBytes(t, quarantinePath))) +} + +func TestRecover_Orphans_Ugly_NoOrphansNoop(t *testing.T) { + stateDirectory := useWorkspaceStateDirectory(t) + + storeInstance, err := New(":memory:") + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() + + orphans := storeInstance.RecoverOrphans(stateDirectory) + assertLen(t, orphans, 0) + assertFalse(t, testFilesystem().Exists(joinPath(stateDirectory, workspaceQuarantineDirName))) +} diff --git a/scope.go b/scope.go index 8e4c68e..55faf46 100644 --- a/scope.go +++ b/scope.go @@ -1,9 +1,10 @@ package store import ( + "database/sql" "iter" "regexp" - "sync" + "sync" // Note: AX-6 — internal concurrency primitive; structural for store infrastructure (RFC §4 explicitly mandates). "time" core "dappco.re/go/core" @@ -14,6 +15,7 @@ var validNamespace = regexp.MustCompile(`^[a-zA-Z0-9-]+$`) const defaultScopedGroupName = "default" +// QuotaConfig sets per-namespace key and group limits. // Usage example: `quota := store.QuotaConfig{MaxKeys: 100, MaxGroups: 10}` type QuotaConfig struct { // Usage example: `store.QuotaConfig{MaxKeys: 100, MaxGroups: 10}` limits a namespace to 100 keys. @@ -22,28 +24,19 @@ type QuotaConfig struct { MaxGroups int } -// Usage example: `scopedStore, err := store.NewScopedConfigured(storeInstance, store.ScopedStoreConfig{Namespace: "tenant-a", Quota: store.QuotaConfig{MaxKeys: 100, MaxGroups: 10}}); if err != nil { return }; _ = scopedStore.Set("colour", "blue")` -// ScopedStore keeps one namespace isolated behind helpers such as Set and -// GetFrom so callers do not repeat the `tenant-a:` prefix manually. -type ScopedStore struct { - store *Store - namespace string - // Usage example: `scopedStore.MaxKeys = 100` - MaxKeys int - // Usage example: `scopedStore.MaxGroups = 10` - MaxGroups int - - scopedWatchersLock sync.Mutex - scopedWatchers map[uintptr]*scopedWatcherBinding -} - -// Usage example: `err := scopedStore.Transaction(func(transaction *store.ScopedStoreTransaction) error { return transaction.Set("colour", "blue") })` -// Usage example: `if err := transaction.Delete("config", "colour"); err != nil { return err }` -type ScopedStoreTransaction struct { - scopedStore *ScopedStore - storeTransaction *StoreTransaction +// Usage example: `if err := (store.QuotaConfig{MaxKeys: 100, MaxGroups: 10}).Validate(); err != nil { return }` +func (quotaConfig QuotaConfig) Validate() error { + if quotaConfig.MaxKeys < 0 || quotaConfig.MaxGroups < 0 { + return core.E( + "store.QuotaConfig.Validate", + core.Sprintf("quota values must be zero or positive; got MaxKeys=%d MaxGroups=%d", quotaConfig.MaxKeys, quotaConfig.MaxGroups), + nil, + ) + } + return nil } +// ScopedStoreConfig combines namespace selection with optional quota limits. // Usage example: `config := store.ScopedStoreConfig{Namespace: "tenant-a", Quota: store.QuotaConfig{MaxKeys: 100, MaxGroups: 10}}` type ScopedStoreConfig struct { // Usage example: `config := store.ScopedStoreConfig{Namespace: "tenant-a"}` @@ -61,50 +54,51 @@ func (scopedConfig ScopedStoreConfig) Validate() error { nil, ) } - if scopedConfig.Quota.MaxKeys < 0 || scopedConfig.Quota.MaxGroups < 0 { - return core.E( - "store.ScopedStoreConfig.Validate", - core.Sprintf("quota values must be zero or positive; got MaxKeys=%d MaxGroups=%d", scopedConfig.Quota.MaxKeys, scopedConfig.Quota.MaxGroups), - nil, - ) + if err := scopedConfig.Quota.Validate(); err != nil { + return core.E("store.ScopedStoreConfig.Validate", "quota", err) } return nil } -type scopedWatcherBinding struct { - store *Store - underlyingEvents <-chan Event - done chan struct{} - stop chan struct{} - stopOnce sync.Once +// Usage example: `scopedStore := store.NewScoped(storeInstance, "tenant-a")` +// Usage example: `if err := scopedStore.Set("colour", "blue"); err != nil { return } // writes tenant-a:default/colour` +// Usage example: `if err := scopedStore.SetIn("config", "colour", "blue"); err != nil { return } // writes tenant-a:config/colour` +type ScopedStore struct { + store *Store + namespace string + // Usage example: `scopedStore.MaxKeys = 100` + MaxKeys int + // Usage example: `scopedStore.MaxGroups = 10` + MaxGroups int + + watcherBridgeLock sync.Mutex + watcherBridges map[uintptr]scopedWatcherBridge } -func (scopedStore *ScopedStore) resolvedStore(operation string) (*Store, error) { - if scopedStore == nil { - return nil, core.E(operation, "scoped store is nil", nil) - } - if scopedStore.store == nil { - return nil, core.E(operation, "underlying store is nil", nil) - } - if err := scopedStore.store.ensureReady(operation); err != nil { - return nil, err - } - return scopedStore.store, nil +type scopedWatcherBridge struct { + sourceGroup string + sourceEvents <-chan Event + done chan struct{} } -// Usage example: `scopedStore := store.NewScoped(storeInstance, "tenant-a"); if scopedStore == nil { return }` +// Usage example: `scopedStore := store.NewScoped(storeInstance, "tenant-a")` +// Prefer `NewScopedConfigured(storeInstance, store.ScopedStoreConfig{Namespace: "tenant-a"})` +// when the namespace and quota are already known at the call site. func NewScoped(storeInstance *Store, namespace string) *ScopedStore { - if storeInstance == nil { + if storeInstance == nil || !validNamespace.MatchString(namespace) { return nil } - if !validNamespace.MatchString(namespace) { - return nil + scopedStore := &ScopedStore{ + store: storeInstance, + namespace: namespace, + watcherBridges: make(map[uintptr]scopedWatcherBridge), } - scopedStore := &ScopedStore{store: storeInstance, namespace: namespace} return scopedStore } // Usage example: `scopedStore, err := store.NewScopedConfigured(storeInstance, store.ScopedStoreConfig{Namespace: "tenant-a", Quota: store.QuotaConfig{MaxKeys: 100, MaxGroups: 10}}); if err != nil { return }` +// This keeps the namespace and quota in one declarative literal instead of an +// option chain. func NewScopedConfigured(storeInstance *Store, scopedConfig ScopedStoreConfig) (*ScopedStore, error) { if storeInstance == nil { return nil, core.E("store.NewScopedConfigured", "store instance is nil", nil) @@ -113,12 +107,17 @@ func NewScopedConfigured(storeInstance *Store, scopedConfig ScopedStoreConfig) ( return nil, core.E("store.NewScopedConfigured", "validate config", err) } scopedStore := NewScoped(storeInstance, scopedConfig.Namespace) + if scopedStore == nil { + return nil, core.E("store.NewScopedConfigured", "construct scoped store", nil) + } scopedStore.MaxKeys = scopedConfig.Quota.MaxKeys scopedStore.MaxGroups = scopedConfig.Quota.MaxGroups return scopedStore, nil } // Usage example: `scopedStore, err := store.NewScopedWithQuota(storeInstance, "tenant-a", store.QuotaConfig{MaxKeys: 100, MaxGroups: 10}); if err != nil { return }` +// This is a convenience constructor for callers that already have the namespace +// and quota values split across separate inputs. func NewScopedWithQuota(storeInstance *Store, namespace string, quota QuotaConfig) (*ScopedStore, error) { return NewScopedConfigured(storeInstance, ScopedStoreConfig{ Namespace: namespace, @@ -134,11 +133,29 @@ func (scopedStore *ScopedStore) namespacePrefix() string { return scopedStore.namespace + ":" } +func (scopedStore *ScopedStore) defaultGroup() string { + return defaultScopedGroupName +} + func (scopedStore *ScopedStore) trimNamespacePrefix(groupName string) string { return core.TrimPrefix(groupName, scopedStore.namespacePrefix()) } -// Usage example: `scopedStore := store.NewScoped(storeInstance, "tenant-a"); if scopedStore == nil { return }; fmt.Println(scopedStore.Namespace())` +func (scopedStore *ScopedStore) ensureReady(operation string) error { + if scopedStore == nil { + return core.E(operation, "scoped store is nil", nil) + } + if scopedStore.store == nil { + return core.E(operation, "scoped store store is nil", nil) + } + if err := scopedStore.store.ensureReady(operation); err != nil { + return err + } + return nil +} + +// Namespace returns the namespace string. +// Usage example: `scopedStore := store.NewScoped(storeInstance, "tenant-a"); namespace := scopedStore.Namespace(); fmt.Println(namespace)` func (scopedStore *ScopedStore) Namespace() string { if scopedStore == nil { return "" @@ -146,148 +163,185 @@ func (scopedStore *ScopedStore) Namespace() string { return scopedStore.namespace } +// Config returns the namespace and quota settings as a single declarative struct. +// Usage example: `config := scopedStore.Config(); fmt.Println(config.Namespace, config.Quota.MaxKeys, config.Quota.MaxGroups)` +func (scopedStore *ScopedStore) Config() ScopedStoreConfig { + if scopedStore == nil { + return ScopedStoreConfig{} + } + return ScopedStoreConfig{ + Namespace: scopedStore.namespace, + Quota: QuotaConfig{ + MaxKeys: scopedStore.MaxKeys, + MaxGroups: scopedStore.MaxGroups, + }, + } +} + +// Usage example: `exists, err := scopedStore.Exists("colour")` +// Usage example: `if exists, _ := scopedStore.Exists("token"); !exists { fmt.Println("session expired") }` +func (scopedStore *ScopedStore) Exists(key string) (bool, error) { + if err := scopedStore.ensureReady("store.ScopedStore.Exists"); err != nil { + return false, err + } + return scopedStore.store.Exists(scopedStore.namespacedGroup(scopedStore.defaultGroup()), key) +} + +// Usage example: `exists, err := scopedStore.ExistsIn("config", "colour")` +// Usage example: `if exists, _ := scopedStore.ExistsIn("session", "token"); !exists { fmt.Println("session expired") }` +func (scopedStore *ScopedStore) ExistsIn(group, key string) (bool, error) { + if err := scopedStore.ensureReady("store.ScopedStore.ExistsIn"); err != nil { + return false, err + } + return scopedStore.store.Exists(scopedStore.namespacedGroup(group), key) +} + +// Usage example: `exists, err := scopedStore.GroupExists("config")` +// Usage example: `if exists, _ := scopedStore.GroupExists("cache"); !exists { fmt.Println("group is empty") }` +func (scopedStore *ScopedStore) GroupExists(group string) (bool, error) { + if err := scopedStore.ensureReady("store.ScopedStore.GroupExists"); err != nil { + return false, err + } + return scopedStore.store.GroupExists(scopedStore.namespacedGroup(group)) +} + // Usage example: `colourValue, err := scopedStore.Get("colour")` func (scopedStore *ScopedStore) Get(key string) (string, error) { - backingStore, err := scopedStore.resolvedStore("store.ScopedStore.Get") - if err != nil { + if err := scopedStore.ensureReady("store.ScopedStore.Get"); err != nil { return "", err } - return backingStore.Get(scopedStore.namespacedGroup(defaultScopedGroupName), key) + return scopedStore.store.Get(scopedStore.namespacedGroup(scopedStore.defaultGroup()), key) } +// GetFrom reads a key from an explicit namespaced group. // Usage example: `colourValue, err := scopedStore.GetFrom("config", "colour")` func (scopedStore *ScopedStore) GetFrom(group, key string) (string, error) { - backingStore, err := scopedStore.resolvedStore("store.ScopedStore.GetFrom") - if err != nil { + if err := scopedStore.ensureReady("store.ScopedStore.GetFrom"); err != nil { return "", err } - return backingStore.Get(scopedStore.namespacedGroup(group), key) + return scopedStore.store.Get(scopedStore.namespacedGroup(group), key) } // Usage example: `if err := scopedStore.Set("colour", "blue"); err != nil { return }` func (scopedStore *ScopedStore) Set(key, value string) error { - return scopedStore.SetIn(defaultScopedGroupName, key, value) + if err := scopedStore.ensureReady("store.ScopedStore.Set"); err != nil { + return err + } + if err := scopedStore.Transaction(func(scopedTransaction *ScopedStoreTransaction) error { + return scopedTransaction.Set(key, value) + }); err != nil { + return core.E("store.ScopedStore.Set", "write scoped key", err) + } + return nil } +// SetIn writes a key to an explicit namespaced group. // Usage example: `if err := scopedStore.SetIn("config", "colour", "blue"); err != nil { return }` func (scopedStore *ScopedStore) SetIn(group, key, value string) error { - backingStore, err := scopedStore.resolvedStore("store.ScopedStore.SetIn") - if err != nil { + if err := scopedStore.ensureReady("store.ScopedStore.SetIn"); err != nil { return err } - if err := scopedStore.checkQuota("store.ScopedStore.SetIn", group, key); err != nil { - return err + if err := scopedStore.Transaction(func(scopedTransaction *ScopedStoreTransaction) error { + return scopedTransaction.SetIn(group, key, value) + }); err != nil { + return core.E("store.ScopedStore.SetIn", "write scoped group key", err) } - return backingStore.Set(scopedStore.namespacedGroup(group), key, value) + return nil } // Usage example: `if err := scopedStore.SetWithTTL("sessions", "token", "abc123", time.Hour); err != nil { return }` func (scopedStore *ScopedStore) SetWithTTL(group, key, value string, timeToLive time.Duration) error { - backingStore, err := scopedStore.resolvedStore("store.ScopedStore.SetWithTTL") - if err != nil { + if err := scopedStore.ensureReady("store.ScopedStore.SetWithTTL"); err != nil { return err } - if err := scopedStore.checkQuota("store.ScopedStore.SetWithTTL", group, key); err != nil { - return err + if err := scopedStore.Transaction(func(scopedTransaction *ScopedStoreTransaction) error { + return scopedTransaction.SetWithTTL(group, key, value, timeToLive) + }); err != nil { + return core.E("store.ScopedStore.SetWithTTL", "write scoped group key with TTL", err) } - return backingStore.SetWithTTL(scopedStore.namespacedGroup(group), key, value, timeToLive) + return nil } // Usage example: `if err := scopedStore.Delete("config", "colour"); err != nil { return }` func (scopedStore *ScopedStore) Delete(group, key string) error { - backingStore, err := scopedStore.resolvedStore("store.ScopedStore.Delete") - if err != nil { + if err := scopedStore.ensureReady("store.ScopedStore.Delete"); err != nil { return err } - return backingStore.Delete(scopedStore.namespacedGroup(group), key) + return scopedStore.store.Delete(scopedStore.namespacedGroup(group), key) } // Usage example: `if err := scopedStore.DeleteGroup("cache"); err != nil { return }` func (scopedStore *ScopedStore) DeleteGroup(group string) error { - backingStore, err := scopedStore.resolvedStore("store.ScopedStore.DeleteGroup") - if err != nil { + if err := scopedStore.ensureReady("store.ScopedStore.DeleteGroup"); err != nil { return err } - return backingStore.DeleteGroup(scopedStore.namespacedGroup(group)) + return scopedStore.store.DeleteGroup(scopedStore.namespacedGroup(group)) } -// Usage example: `if err := scopedStore.DeletePrefix("config"); err != nil { return }` +// Usage example: `if err := scopedStore.DeletePrefix("cache"); err != nil { return }` +// Usage example: `if err := scopedStore.DeletePrefix(""); err != nil { return }` func (scopedStore *ScopedStore) DeletePrefix(groupPrefix string) error { - backingStore, err := scopedStore.resolvedStore("store.ScopedStore.DeletePrefix") - if err != nil { + if err := scopedStore.ensureReady("store.ScopedStore.DeletePrefix"); err != nil { return err } - return backingStore.DeletePrefix(scopedStore.namespacedGroup(groupPrefix)) + return scopedStore.store.DeletePrefix(scopedStore.namespacedGroup(groupPrefix)) } // Usage example: `colourEntries, err := scopedStore.GetAll("config")` func (scopedStore *ScopedStore) GetAll(group string) (map[string]string, error) { - backingStore, err := scopedStore.resolvedStore("store.ScopedStore.GetAll") - if err != nil { + if err := scopedStore.ensureReady("store.ScopedStore.GetAll"); err != nil { return nil, err } - return backingStore.GetAll(scopedStore.namespacedGroup(group)) + return scopedStore.store.GetAll(scopedStore.namespacedGroup(group)) } // Usage example: `page, err := scopedStore.GetPage("config", 0, 25); if err != nil { return }; for _, entry := range page { fmt.Println(entry.Key, entry.Value) }` func (scopedStore *ScopedStore) GetPage(group string, offset, limit int) ([]KeyValue, error) { - backingStore, err := scopedStore.resolvedStore("store.ScopedStore.GetPage") - if err != nil { + if err := scopedStore.ensureReady("store.ScopedStore.GetPage"); err != nil { return nil, err } - return backingStore.GetPage(scopedStore.namespacedGroup(group), offset, limit) + return scopedStore.store.GetPage(scopedStore.namespacedGroup(group), offset, limit) } // Usage example: `for entry, err := range scopedStore.All("config") { if err != nil { break }; fmt.Println(entry.Key, entry.Value) }` func (scopedStore *ScopedStore) All(group string) iter.Seq2[KeyValue, error] { - backingStore, err := scopedStore.resolvedStore("store.ScopedStore.All") - if err != nil { + if err := scopedStore.ensureReady("store.ScopedStore.All"); err != nil { return func(yield func(KeyValue, error) bool) { yield(KeyValue{}, err) } } - return backingStore.All(scopedStore.namespacedGroup(group)) + return scopedStore.store.All(scopedStore.namespacedGroup(group)) } // Usage example: `for entry, err := range scopedStore.AllSeq("config") { if err != nil { break }; fmt.Println(entry.Key, entry.Value) }` func (scopedStore *ScopedStore) AllSeq(group string) iter.Seq2[KeyValue, error] { - backingStore, err := scopedStore.resolvedStore("store.ScopedStore.AllSeq") - if err != nil { - return func(yield func(KeyValue, error) bool) { - yield(KeyValue{}, err) - } - } - return backingStore.AllSeq(scopedStore.namespacedGroup(group)) + return scopedStore.All(group) } // Usage example: `keyCount, err := scopedStore.Count("config")` func (scopedStore *ScopedStore) Count(group string) (int, error) { - backingStore, err := scopedStore.resolvedStore("store.ScopedStore.Count") - if err != nil { + if err := scopedStore.ensureReady("store.ScopedStore.Count"); err != nil { return 0, err } - return backingStore.Count(scopedStore.namespacedGroup(group)) + return scopedStore.store.Count(scopedStore.namespacedGroup(group)) } // Usage example: `keyCount, err := scopedStore.CountAll("config")` // Usage example: `keyCount, err := scopedStore.CountAll()` func (scopedStore *ScopedStore) CountAll(groupPrefix ...string) (int, error) { - backingStore, err := scopedStore.resolvedStore("store.ScopedStore.CountAll") - if err != nil { + if err := scopedStore.ensureReady("store.ScopedStore.CountAll"); err != nil { return 0, err } - return backingStore.CountAll(scopedStore.namespacedGroup(firstOrEmptyString(groupPrefix))) + return scopedStore.store.CountAll(scopedStore.namespacedGroup(firstStringOrEmpty(groupPrefix))) } // Usage example: `groupNames, err := scopedStore.Groups("config")` // Usage example: `groupNames, err := scopedStore.Groups()` func (scopedStore *ScopedStore) Groups(groupPrefix ...string) ([]string, error) { - backingStore, err := scopedStore.resolvedStore("store.ScopedStore.Groups") - if err != nil { + if err := scopedStore.ensureReady("store.ScopedStore.Groups"); err != nil { return nil, err } - - groupNames, err := backingStore.Groups(scopedStore.namespacedGroup(firstOrEmptyString(groupPrefix))) + groupNames, err := scopedStore.store.Groups(scopedStore.namespacedGroup(firstStringOrEmpty(groupPrefix))) if err != nil { return nil, err } @@ -301,13 +355,12 @@ func (scopedStore *ScopedStore) Groups(groupPrefix ...string) ([]string, error) // Usage example: `for groupName, err := range scopedStore.GroupsSeq() { if err != nil { break }; fmt.Println(groupName) }` func (scopedStore *ScopedStore) GroupsSeq(groupPrefix ...string) iter.Seq2[string, error] { return func(yield func(string, error) bool) { - backingStore, err := scopedStore.resolvedStore("store.ScopedStore.GroupsSeq") - if err != nil { + if err := scopedStore.ensureReady("store.ScopedStore.GroupsSeq"); err != nil { yield("", err) return } namespacePrefix := scopedStore.namespacePrefix() - for groupName, err := range backingStore.GroupsSeq(scopedStore.namespacedGroup(firstOrEmptyString(groupPrefix))) { + for groupName, err := range scopedStore.store.GroupsSeq(scopedStore.namespacedGroup(firstStringOrEmpty(groupPrefix))) { if err != nil { if !yield("", err) { return @@ -323,335 +376,414 @@ func (scopedStore *ScopedStore) GroupsSeq(groupPrefix ...string) iter.Seq2[strin // Usage example: `renderedTemplate, err := scopedStore.Render("Hello {{ .name }}", "user")` func (scopedStore *ScopedStore) Render(templateSource, group string) (string, error) { - backingStore, err := scopedStore.resolvedStore("store.ScopedStore.Render") - if err != nil { + if err := scopedStore.ensureReady("store.ScopedStore.Render"); err != nil { return "", err } - return backingStore.Render(templateSource, scopedStore.namespacedGroup(group)) + return scopedStore.store.Render(templateSource, scopedStore.namespacedGroup(group)) } // Usage example: `parts, err := scopedStore.GetSplit("config", "hosts", ","); if err != nil { return }; for part := range parts { fmt.Println(part) }` func (scopedStore *ScopedStore) GetSplit(group, key, separator string) (iter.Seq[string], error) { - backingStore, err := scopedStore.resolvedStore("store.ScopedStore.GetSplit") - if err != nil { + if err := scopedStore.ensureReady("store.ScopedStore.GetSplit"); err != nil { return nil, err } - return backingStore.GetSplit(scopedStore.namespacedGroup(group), key, separator) + return scopedStore.store.GetSplit(scopedStore.namespacedGroup(group), key, separator) } // Usage example: `fields, err := scopedStore.GetFields("config", "flags"); if err != nil { return }; for field := range fields { fmt.Println(field) }` func (scopedStore *ScopedStore) GetFields(group, key string) (iter.Seq[string], error) { - backingStore, err := scopedStore.resolvedStore("store.ScopedStore.GetFields") - if err != nil { + if err := scopedStore.ensureReady("store.ScopedStore.GetFields"); err != nil { return nil, err } - return backingStore.GetFields(scopedStore.namespacedGroup(group), key) + return scopedStore.store.GetFields(scopedStore.namespacedGroup(group), key) +} + +// Usage example: `removedRows, err := scopedStore.PurgeExpired(); if err != nil { return }; fmt.Println(removedRows)` +func (scopedStore *ScopedStore) PurgeExpired() (int64, error) { + if err := scopedStore.ensureReady("store.ScopedStore.PurgeExpired"); err != nil { + return 0, err + } + + cutoffUnixMilli := time.Now().UnixMilli() + expiredEntries, err := deleteExpiredEntriesMatchingGroupPrefix(scopedStore.store.sqliteDatabase, scopedStore.namespacePrefix(), cutoffUnixMilli) + if err != nil { + return 0, core.E("store.ScopedStore.PurgeExpired", "delete expired rows", err) + } + removedRows := int64(len(expiredEntries)) + if removedRows > 0 { + for _, expiredEntry := range expiredEntries { + scopedStore.store.notify(Event{ + Type: EventDelete, + Group: expiredEntry.group, + Key: expiredEntry.key, + Timestamp: time.Now(), + }) + } + } + return removedRows, nil } // Usage example: `events := scopedStore.Watch("config")` +// Usage example: `events := scopedStore.Watch("*")` +// A write to `tenant-a:config` is delivered back to this scoped watcher as +// `config`, so callers never have to strip the namespace themselves. func (scopedStore *ScopedStore) Watch(group string) <-chan Event { - backingStore, err := scopedStore.resolvedStore("store.ScopedStore.Watch") - if err != nil { + if scopedStore == nil || scopedStore.store == nil { return closedEventChannel() } - if group != "*" { - return backingStore.Watch(scopedStore.namespacedGroup(group)) - } - forwardedEvents := make(chan Event, watcherEventBufferCapacity) - binding := &scopedWatcherBinding{ - store: backingStore, - underlyingEvents: backingStore.Watch("*"), - done: make(chan struct{}), - stop: make(chan struct{}), + sourceGroup := scopedStore.namespacedGroup(group) + if group == "*" { + sourceGroup = "*" } - scopedStore.scopedWatchersLock.Lock() - if scopedStore.scopedWatchers == nil { - scopedStore.scopedWatchers = make(map[uintptr]*scopedWatcherBinding) + sourceEvents := scopedStore.store.Watch(sourceGroup) + localEvents := make(chan Event, watcherEventBufferCapacity) + done := make(chan struct{}) + localEventsPointer := channelPointer(localEvents) + + scopedStore.watcherBridgeLock.Lock() + if scopedStore.watcherBridges == nil { + scopedStore.watcherBridges = make(map[uintptr]scopedWatcherBridge) + } + scopedStore.watcherBridges[localEventsPointer] = scopedWatcherBridge{ + sourceGroup: sourceGroup, + sourceEvents: sourceEvents, + done: done, } - scopedStore.scopedWatchers[channelPointer(forwardedEvents)] = binding - scopedStore.scopedWatchersLock.Unlock() + scopedStore.watcherBridgeLock.Unlock() - namespacePrefix := scopedStore.namespacePrefix() go func() { - defer close(forwardedEvents) - defer close(binding.done) - defer scopedStore.forgetScopedWatcher(forwardedEvents) + defer close(localEvents) + defer scopedStore.removeWatcherBridge(localEventsPointer) for { select { - case event, ok := <-binding.underlyingEvents: + case <-done: + return + case event, ok := <-sourceEvents: if !ok { return } - if !core.HasPrefix(event.Group, namespacePrefix) { + + localEvent, allowed := scopedStore.localiseWatchedEvent(event) + if !allowed { continue } + select { - case forwardedEvents <- event: + case localEvents <- localEvent: default: } - case <-binding.stop: - return - case <-backingStore.purgeContext.Done(): - return } } }() - return forwardedEvents + return localEvents } -// Usage example: `scopedStore.Unwatch("config", events)` +// Usage example: `events := scopedStore.Watch("config"); scopedStore.Unwatch("config", events)` +// Usage example: `events := scopedStore.Watch("*"); scopedStore.Unwatch("*", events)` func (scopedStore *ScopedStore) Unwatch(group string, events <-chan Event) { - backingStore, err := scopedStore.resolvedStore("store.ScopedStore.Unwatch") - if err != nil { + if scopedStore == nil || events == nil { return } - if group == "*" { - scopedStore.forgetAndStopScopedWatcher(events) + + scopedStore.watcherBridgeLock.Lock() + watcherBridge, ok := scopedStore.watcherBridges[channelPointer(events)] + if ok { + delete(scopedStore.watcherBridges, channelPointer(events)) + } + scopedStore.watcherBridgeLock.Unlock() + + if !ok { return } - backingStore.Unwatch(scopedStore.namespacedGroup(group), events) + + close(watcherBridge.done) + scopedStore.store.Unwatch(watcherBridge.sourceGroup, watcherBridge.sourceEvents) } -// Usage example: `unregister := scopedStore.OnChange(func(event store.Event) { fmt.Println(event.Group, event.Key) })` +func (scopedStore *ScopedStore) removeWatcherBridge(pointer uintptr) { + if scopedStore == nil { + return + } + + scopedStore.watcherBridgeLock.Lock() + delete(scopedStore.watcherBridges, pointer) + scopedStore.watcherBridgeLock.Unlock() +} + +func (scopedStore *ScopedStore) localiseWatchedEvent(event Event) (Event, bool) { + if scopedStore == nil { + return Event{}, false + } + + namespacePrefix := scopedStore.namespacePrefix() + if event.Group == "*" { + return event, true + } + if !core.HasPrefix(event.Group, namespacePrefix) { + return Event{}, false + } + + event.Group = core.TrimPrefix(event.Group, namespacePrefix) + return event, true +} + +// Usage example: `unregister := scopedStore.OnChange(func(event store.Event) { fmt.Println(event.Group, event.Key, event.Value) })` +// A callback registered on `tenant-a` receives `config` rather than +// `tenant-a:config`. func (scopedStore *ScopedStore) OnChange(callback func(Event)) func() { - backingStore, err := scopedStore.resolvedStore("store.ScopedStore.OnChange") - if err != nil { + if scopedStore == nil || callback == nil { return func() {} } - if callback == nil { + if scopedStore.store == nil { return func() {} } namespacePrefix := scopedStore.namespacePrefix() - return backingStore.OnChange(func(event Event) { + return scopedStore.store.OnChange(func(event Event) { if !core.HasPrefix(event.Group, namespacePrefix) { return } + event.Group = core.TrimPrefix(event.Group, namespacePrefix) callback(event) }) } -// Usage example: `removedRows, err := scopedStore.PurgeExpired(); if err != nil { return }; fmt.Println(removedRows)` -func (scopedStore *ScopedStore) PurgeExpired() (int64, error) { - backingStore, err := scopedStore.resolvedStore("store.ScopedStore.PurgeExpired") - if err != nil { - return 0, err - } - removedRows, err := backingStore.purgeExpiredMatchingGroupPrefix(scopedStore.namespacePrefix()) - if err != nil { - return 0, core.E("store.ScopedStore.PurgeExpired", "delete expired rows", err) - } - return removedRows, nil +// ScopedStoreTransaction exposes namespace-local transaction helpers so callers +// can work inside a scoped namespace without manually prefixing group names. +// +// Usage example: `err := scopedStore.Transaction(func(transaction *store.ScopedStoreTransaction) error { return transaction.Set("theme", "dark") })` +type ScopedStoreTransaction struct { + scopedStore *ScopedStore + storeTransaction *StoreTransaction } -// Usage example: `err := scopedStore.Transaction(func(transaction *store.ScopedStoreTransaction) error { return transaction.SetIn("config", "colour", "blue") })` +// Usage example: `err := scopedStore.Transaction(func(transaction *store.ScopedStoreTransaction) error { return transaction.Set("theme", "dark") })` func (scopedStore *ScopedStore) Transaction(operation func(*ScopedStoreTransaction) error) error { - backingStore, err := scopedStore.resolvedStore("store.ScopedStore.Transaction") - if err != nil { - return err + if scopedStore == nil { + return core.E("store.ScopedStore.Transaction", "scoped store is nil", nil) } if operation == nil { return core.E("store.ScopedStore.Transaction", "operation is nil", nil) } + if scopedStore.store == nil { + return core.E("store.ScopedStore.Transaction", "scoped store store is nil", nil) + } - return backingStore.Transaction(func(transaction *StoreTransaction) error { - scopedTransaction := &ScopedStoreTransaction{ + return scopedStore.store.Transaction(func(storeTransaction *StoreTransaction) error { + return operation(&ScopedStoreTransaction{ scopedStore: scopedStore, - storeTransaction: transaction, - } - return operation(scopedTransaction) + storeTransaction: storeTransaction, + }) }) } -func (scopedTransaction *ScopedStoreTransaction) resolvedTransaction(operation string) (*StoreTransaction, error) { - if scopedTransaction == nil { - return nil, core.E(operation, "scoped transaction is nil", nil) +func (scopedStoreTransaction *ScopedStoreTransaction) ensureReady(operation string) error { + if scopedStoreTransaction == nil { + return core.E(operation, "scoped transaction is nil", nil) } - if scopedTransaction.scopedStore == nil { - return nil, core.E(operation, "scoped store is nil", nil) + if scopedStoreTransaction.scopedStore == nil { + return core.E(operation, "scoped transaction store is nil", nil) } - if scopedTransaction.storeTransaction == nil { - return nil, core.E(operation, "transaction is nil", nil) + if scopedStoreTransaction.storeTransaction == nil { + return core.E(operation, "scoped transaction database is nil", nil) } - if _, err := scopedTransaction.scopedStore.resolvedStore(operation); err != nil { - return nil, err + if err := scopedStoreTransaction.scopedStore.store.ensureReady(operation); err != nil { + return err } - return scopedTransaction.storeTransaction, nil + return scopedStoreTransaction.storeTransaction.ensureReady(operation) } -// Usage example: `value, err := transaction.Get("colour")` -func (scopedTransaction *ScopedStoreTransaction) Get(key string) (string, error) { - storeTransaction, err := scopedTransaction.resolvedTransaction("store.ScopedStoreTransaction.Get") - if err != nil { - return "", err +// Usage example: `exists, err := scopedStoreTransaction.Exists("colour")` +func (scopedStoreTransaction *ScopedStoreTransaction) Exists(key string) (bool, error) { + if err := scopedStoreTransaction.ensureReady("store.ScopedStoreTransaction.Exists"); err != nil { + return false, err } - return storeTransaction.Get(scopedTransaction.scopedStore.namespacedGroup(defaultScopedGroupName), key) + return scopedStoreTransaction.storeTransaction.Exists( + scopedStoreTransaction.scopedStore.namespacedGroup(scopedStoreTransaction.scopedStore.defaultGroup()), + key, + ) } -// Usage example: `value, err := transaction.GetFrom("config", "colour")` -func (scopedTransaction *ScopedStoreTransaction) GetFrom(group, key string) (string, error) { - storeTransaction, err := scopedTransaction.resolvedTransaction("store.ScopedStoreTransaction.GetFrom") - if err != nil { +// Usage example: `exists, err := scopedStoreTransaction.ExistsIn("config", "colour")` +func (scopedStoreTransaction *ScopedStoreTransaction) ExistsIn(group, key string) (bool, error) { + if err := scopedStoreTransaction.ensureReady("store.ScopedStoreTransaction.ExistsIn"); err != nil { + return false, err + } + return scopedStoreTransaction.storeTransaction.Exists(scopedStoreTransaction.scopedStore.namespacedGroup(group), key) +} + +// Usage example: `exists, err := scopedStoreTransaction.GroupExists("config")` +func (scopedStoreTransaction *ScopedStoreTransaction) GroupExists(group string) (bool, error) { + if err := scopedStoreTransaction.ensureReady("store.ScopedStoreTransaction.GroupExists"); err != nil { + return false, err + } + return scopedStoreTransaction.storeTransaction.GroupExists(scopedStoreTransaction.scopedStore.namespacedGroup(group)) +} + +// Usage example: `colourValue, err := scopedStoreTransaction.Get("colour")` +func (scopedStoreTransaction *ScopedStoreTransaction) Get(key string) (string, error) { + if err := scopedStoreTransaction.ensureReady("store.ScopedStoreTransaction.Get"); err != nil { return "", err } - return storeTransaction.Get(scopedTransaction.scopedStore.namespacedGroup(group), key) + return scopedStoreTransaction.storeTransaction.Get( + scopedStoreTransaction.scopedStore.namespacedGroup(scopedStoreTransaction.scopedStore.defaultGroup()), + key, + ) } -// Usage example: `if err := transaction.Set("colour", "blue"); err != nil { return err }` -func (scopedTransaction *ScopedStoreTransaction) Set(key, value string) error { - return scopedTransaction.SetIn(defaultScopedGroupName, key, value) +// Usage example: `colourValue, err := scopedStoreTransaction.GetFrom("config", "colour")` +func (scopedStoreTransaction *ScopedStoreTransaction) GetFrom(group, key string) (string, error) { + if err := scopedStoreTransaction.ensureReady("store.ScopedStoreTransaction.GetFrom"); err != nil { + return "", err + } + return scopedStoreTransaction.storeTransaction.Get(scopedStoreTransaction.scopedStore.namespacedGroup(group), key) } -// Usage example: `if err := transaction.SetIn("config", "colour", "blue"); err != nil { return err }` -func (scopedTransaction *ScopedStoreTransaction) SetIn(group, key, value string) error { - storeTransaction, err := scopedTransaction.resolvedTransaction("store.ScopedStoreTransaction.SetIn") - if err != nil { +// Usage example: `if err := scopedStoreTransaction.Set("theme", "dark"); err != nil { return err }` +func (scopedStoreTransaction *ScopedStoreTransaction) Set(key, value string) error { + if err := scopedStoreTransaction.ensureReady("store.ScopedStoreTransaction.Set"); err != nil { return err } - if err := scopedTransaction.checkQuota("store.ScopedStoreTransaction.SetIn", group, key); err != nil { + defaultGroup := scopedStoreTransaction.scopedStore.defaultGroup() + if err := scopedStoreTransaction.checkQuota("store.ScopedStoreTransaction.Set", defaultGroup, key); err != nil { return err } - return storeTransaction.Set(scopedTransaction.scopedStore.namespacedGroup(group), key, value) + return scopedStoreTransaction.storeTransaction.Set( + scopedStoreTransaction.scopedStore.namespacedGroup(defaultGroup), + key, + value, + ) } -// Usage example: `if err := transaction.SetWithTTL("sessions", "token", "abc123", time.Hour); err != nil { return err }` -func (scopedTransaction *ScopedStoreTransaction) SetWithTTL(group, key, value string, timeToLive time.Duration) error { - storeTransaction, err := scopedTransaction.resolvedTransaction("store.ScopedStoreTransaction.SetWithTTL") - if err != nil { +// Usage example: `if err := scopedStoreTransaction.SetIn("config", "colour", "blue"); err != nil { return err }` +func (scopedStoreTransaction *ScopedStoreTransaction) SetIn(group, key, value string) error { + if err := scopedStoreTransaction.ensureReady("store.ScopedStoreTransaction.SetIn"); err != nil { return err } - if err := scopedTransaction.checkQuota("store.ScopedStoreTransaction.SetWithTTL", group, key); err != nil { + if err := scopedStoreTransaction.checkQuota("store.ScopedStoreTransaction.SetIn", group, key); err != nil { return err } - return storeTransaction.SetWithTTL(scopedTransaction.scopedStore.namespacedGroup(group), key, value, timeToLive) + return scopedStoreTransaction.storeTransaction.Set(scopedStoreTransaction.scopedStore.namespacedGroup(group), key, value) } -// Usage example: `if err := transaction.Delete("config", "colour"); err != nil { return err }` -func (scopedTransaction *ScopedStoreTransaction) Delete(group, key string) error { - storeTransaction, err := scopedTransaction.resolvedTransaction("store.ScopedStoreTransaction.Delete") - if err != nil { +// Usage example: `if err := scopedStoreTransaction.SetWithTTL("sessions", "token", "abc123", time.Hour); err != nil { return err }` +func (scopedStoreTransaction *ScopedStoreTransaction) SetWithTTL(group, key, value string, timeToLive time.Duration) error { + if err := scopedStoreTransaction.ensureReady("store.ScopedStoreTransaction.SetWithTTL"); err != nil { + return err + } + if err := scopedStoreTransaction.checkQuota("store.ScopedStoreTransaction.SetWithTTL", group, key); err != nil { return err } - return storeTransaction.Delete(scopedTransaction.scopedStore.namespacedGroup(group), key) + return scopedStoreTransaction.storeTransaction.SetWithTTL(scopedStoreTransaction.scopedStore.namespacedGroup(group), key, value, timeToLive) } -// Usage example: `if err := transaction.DeleteGroup("cache"); err != nil { return err }` -func (scopedTransaction *ScopedStoreTransaction) DeleteGroup(group string) error { - storeTransaction, err := scopedTransaction.resolvedTransaction("store.ScopedStoreTransaction.DeleteGroup") - if err != nil { +// Usage example: `if err := scopedStoreTransaction.Delete("config", "colour"); err != nil { return err }` +func (scopedStoreTransaction *ScopedStoreTransaction) Delete(group, key string) error { + if err := scopedStoreTransaction.ensureReady("store.ScopedStoreTransaction.Delete"); err != nil { return err } - return storeTransaction.DeleteGroup(scopedTransaction.scopedStore.namespacedGroup(group)) + return scopedStoreTransaction.storeTransaction.Delete(scopedStoreTransaction.scopedStore.namespacedGroup(group), key) } -// Usage example: `if err := transaction.DeletePrefix("config"); err != nil { return err }` -func (scopedTransaction *ScopedStoreTransaction) DeletePrefix(groupPrefix string) error { - storeTransaction, err := scopedTransaction.resolvedTransaction("store.ScopedStoreTransaction.DeletePrefix") - if err != nil { +// Usage example: `if err := scopedStoreTransaction.DeleteGroup("cache"); err != nil { return err }` +func (scopedStoreTransaction *ScopedStoreTransaction) DeleteGroup(group string) error { + if err := scopedStoreTransaction.ensureReady("store.ScopedStoreTransaction.DeleteGroup"); err != nil { return err } - return storeTransaction.DeletePrefix(scopedTransaction.scopedStore.namespacedGroup(groupPrefix)) + return scopedStoreTransaction.storeTransaction.DeleteGroup(scopedStoreTransaction.scopedStore.namespacedGroup(group)) } -// Usage example: `entries, err := transaction.GetAll("config")` -func (scopedTransaction *ScopedStoreTransaction) GetAll(group string) (map[string]string, error) { - storeTransaction, err := scopedTransaction.resolvedTransaction("store.ScopedStoreTransaction.GetAll") - if err != nil { - return nil, err +// Usage example: `if err := scopedStoreTransaction.DeletePrefix("cache"); err != nil { return err }` +// Usage example: `if err := scopedStoreTransaction.DeletePrefix(""); err != nil { return err }` +func (scopedStoreTransaction *ScopedStoreTransaction) DeletePrefix(groupPrefix string) error { + if err := scopedStoreTransaction.ensureReady("store.ScopedStoreTransaction.DeletePrefix"); err != nil { + return err } - return storeTransaction.GetAll(scopedTransaction.scopedStore.namespacedGroup(group)) + return scopedStoreTransaction.storeTransaction.DeletePrefix(scopedStoreTransaction.scopedStore.namespacedGroup(groupPrefix)) } -// Usage example: `page, err := transaction.GetPage("config", 0, 25)` -func (scopedTransaction *ScopedStoreTransaction) GetPage(group string, offset, limit int) ([]KeyValue, error) { - storeTransaction, err := scopedTransaction.resolvedTransaction("store.ScopedStoreTransaction.GetPage") - if err != nil { +// Usage example: `colourEntries, err := scopedStoreTransaction.GetAll("config")` +func (scopedStoreTransaction *ScopedStoreTransaction) GetAll(group string) (map[string]string, error) { + if err := scopedStoreTransaction.ensureReady("store.ScopedStoreTransaction.GetAll"); err != nil { return nil, err } - return storeTransaction.GetPage(scopedTransaction.scopedStore.namespacedGroup(group), offset, limit) + return scopedStoreTransaction.storeTransaction.GetAll(scopedStoreTransaction.scopedStore.namespacedGroup(group)) } -// Usage example: `for entry, err := range transaction.All("config") { if err != nil { return }; fmt.Println(entry.Key, entry.Value) }` -func (scopedTransaction *ScopedStoreTransaction) All(group string) iter.Seq2[KeyValue, error] { - return scopedTransaction.AllSeq(group) +// Usage example: `page, err := scopedStoreTransaction.GetPage("config", 0, 25); if err != nil { return }; for _, entry := range page { fmt.Println(entry.Key, entry.Value) }` +func (scopedStoreTransaction *ScopedStoreTransaction) GetPage(group string, offset, limit int) ([]KeyValue, error) { + if err := scopedStoreTransaction.ensureReady("store.ScopedStoreTransaction.GetPage"); err != nil { + return nil, err + } + return scopedStoreTransaction.storeTransaction.GetPage(scopedStoreTransaction.scopedStore.namespacedGroup(group), offset, limit) } -// Usage example: `for entry, err := range transaction.AllSeq("config") { if err != nil { return }; fmt.Println(entry.Key, entry.Value) }` -func (scopedTransaction *ScopedStoreTransaction) AllSeq(group string) iter.Seq2[KeyValue, error] { - return func(yield func(KeyValue, error) bool) { - storeTransaction, err := scopedTransaction.resolvedTransaction("store.ScopedStoreTransaction.AllSeq") - if err != nil { +// Usage example: `for entry, err := range scopedStoreTransaction.All("config") { if err != nil { break }; fmt.Println(entry.Key, entry.Value) }` +func (scopedStoreTransaction *ScopedStoreTransaction) All(group string) iter.Seq2[KeyValue, error] { + if err := scopedStoreTransaction.ensureReady("store.ScopedStoreTransaction.All"); err != nil { + return func(yield func(KeyValue, error) bool) { yield(KeyValue{}, err) - return - } - for entry, iterationErr := range storeTransaction.AllSeq(scopedTransaction.scopedStore.namespacedGroup(group)) { - if iterationErr != nil { - if !yield(KeyValue{}, iterationErr) { - return - } - continue - } - if !yield(entry, nil) { - return - } } } + return scopedStoreTransaction.storeTransaction.All(scopedStoreTransaction.scopedStore.namespacedGroup(group)) } -// Usage example: `count, err := transaction.Count("config")` -func (scopedTransaction *ScopedStoreTransaction) Count(group string) (int, error) { - storeTransaction, err := scopedTransaction.resolvedTransaction("store.ScopedStoreTransaction.Count") - if err != nil { +// Usage example: `for entry, err := range scopedStoreTransaction.AllSeq("config") { if err != nil { break }; fmt.Println(entry.Key, entry.Value) }` +func (scopedStoreTransaction *ScopedStoreTransaction) AllSeq(group string) iter.Seq2[KeyValue, error] { + return scopedStoreTransaction.All(group) +} + +// Usage example: `keyCount, err := scopedStoreTransaction.Count("config")` +func (scopedStoreTransaction *ScopedStoreTransaction) Count(group string) (int, error) { + if err := scopedStoreTransaction.ensureReady("store.ScopedStoreTransaction.Count"); err != nil { return 0, err } - return storeTransaction.Count(scopedTransaction.scopedStore.namespacedGroup(group)) + return scopedStoreTransaction.storeTransaction.Count(scopedStoreTransaction.scopedStore.namespacedGroup(group)) } -// Usage example: `count, err := transaction.CountAll("config")` -// Usage example: `count, err := transaction.CountAll()` -func (scopedTransaction *ScopedStoreTransaction) CountAll(groupPrefix ...string) (int, error) { - storeTransaction, err := scopedTransaction.resolvedTransaction("store.ScopedStoreTransaction.CountAll") - if err != nil { +// Usage example: `keyCount, err := scopedStoreTransaction.CountAll("config")` +// Usage example: `keyCount, err := scopedStoreTransaction.CountAll()` +func (scopedStoreTransaction *ScopedStoreTransaction) CountAll(groupPrefix ...string) (int, error) { + if err := scopedStoreTransaction.ensureReady("store.ScopedStoreTransaction.CountAll"); err != nil { return 0, err } - return storeTransaction.CountAll(scopedTransaction.scopedStore.namespacedGroup(firstOrEmptyString(groupPrefix))) + return scopedStoreTransaction.storeTransaction.CountAll(scopedStoreTransaction.scopedStore.namespacedGroup(firstStringOrEmpty(groupPrefix))) } -// Usage example: `groups, err := transaction.Groups("config")` -// Usage example: `groups, err := transaction.Groups()` -func (scopedTransaction *ScopedStoreTransaction) Groups(groupPrefix ...string) ([]string, error) { - storeTransaction, err := scopedTransaction.resolvedTransaction("store.ScopedStoreTransaction.Groups") - if err != nil { +// Usage example: `groupNames, err := scopedStoreTransaction.Groups("config")` +// Usage example: `groupNames, err := scopedStoreTransaction.Groups()` +func (scopedStoreTransaction *ScopedStoreTransaction) Groups(groupPrefix ...string) ([]string, error) { + if err := scopedStoreTransaction.ensureReady("store.ScopedStoreTransaction.Groups"); err != nil { return nil, err } - groupNames, err := storeTransaction.Groups(scopedTransaction.scopedStore.namespacedGroup(firstOrEmptyString(groupPrefix))) + groupNames, err := scopedStoreTransaction.storeTransaction.Groups(scopedStoreTransaction.scopedStore.namespacedGroup(firstStringOrEmpty(groupPrefix))) if err != nil { return nil, err } - for index, groupName := range groupNames { - groupNames[index] = scopedTransaction.scopedStore.trimNamespacePrefix(groupName) + for i, groupName := range groupNames { + groupNames[i] = scopedStoreTransaction.scopedStore.trimNamespacePrefix(groupName) } return groupNames, nil } -// Usage example: `for groupName, err := range transaction.GroupsSeq("config") { if err != nil { return }; fmt.Println(groupName) }` -// Usage example: `for groupName, err := range transaction.GroupsSeq() { if err != nil { return }; fmt.Println(groupName) }` -func (scopedTransaction *ScopedStoreTransaction) GroupsSeq(groupPrefix ...string) iter.Seq2[string, error] { +// Usage example: `for groupName, err := range scopedStoreTransaction.GroupsSeq("config") { if err != nil { break }; fmt.Println(groupName) }` +// Usage example: `for groupName, err := range scopedStoreTransaction.GroupsSeq() { if err != nil { break }; fmt.Println(groupName) }` +func (scopedStoreTransaction *ScopedStoreTransaction) GroupsSeq(groupPrefix ...string) iter.Seq2[string, error] { return func(yield func(string, error) bool) { - storeTransaction, err := scopedTransaction.resolvedTransaction("store.ScopedStoreTransaction.GroupsSeq") - if err != nil { + if err := scopedStoreTransaction.ensureReady("store.ScopedStoreTransaction.GroupsSeq"); err != nil { yield("", err) return } - namespacePrefix := scopedTransaction.scopedStore.namespacePrefix() - for groupName, iterationErr := range storeTransaction.GroupsSeq(scopedTransaction.scopedStore.namespacedGroup(firstOrEmptyString(groupPrefix))) { - if iterationErr != nil { - if !yield("", iterationErr) { + + namespacePrefix := scopedStoreTransaction.scopedStore.namespacePrefix() + for groupName, err := range scopedStoreTransaction.storeTransaction.GroupsSeq(scopedStoreTransaction.scopedStore.namespacedGroup(firstStringOrEmpty(groupPrefix))) { + if err != nil { + if !yield("", err) { return } continue @@ -663,140 +795,97 @@ func (scopedTransaction *ScopedStoreTransaction) GroupsSeq(groupPrefix ...string } } -// Usage example: `renderedTemplate, err := transaction.Render("Hello {{ .name }}", "user")` -func (scopedTransaction *ScopedStoreTransaction) Render(templateSource, group string) (string, error) { - storeTransaction, err := scopedTransaction.resolvedTransaction("store.ScopedStoreTransaction.Render") - if err != nil { +// Usage example: `renderedTemplate, err := scopedStoreTransaction.Render("Hello {{ .name }}", "user")` +func (scopedStoreTransaction *ScopedStoreTransaction) Render(templateSource, group string) (string, error) { + if err := scopedStoreTransaction.ensureReady("store.ScopedStoreTransaction.Render"); err != nil { return "", err } - return storeTransaction.Render(templateSource, scopedTransaction.scopedStore.namespacedGroup(group)) + return scopedStoreTransaction.storeTransaction.Render(templateSource, scopedStoreTransaction.scopedStore.namespacedGroup(group)) } -// Usage example: `parts, err := transaction.GetSplit("config", "hosts", ",")` -func (scopedTransaction *ScopedStoreTransaction) GetSplit(group, key, separator string) (iter.Seq[string], error) { - storeTransaction, err := scopedTransaction.resolvedTransaction("store.ScopedStoreTransaction.GetSplit") - if err != nil { +// Usage example: `parts, err := scopedStoreTransaction.GetSplit("config", "hosts", ","); if err != nil { return }; for part := range parts { fmt.Println(part) }` +func (scopedStoreTransaction *ScopedStoreTransaction) GetSplit(group, key, separator string) (iter.Seq[string], error) { + if err := scopedStoreTransaction.ensureReady("store.ScopedStoreTransaction.GetSplit"); err != nil { return nil, err } - return storeTransaction.GetSplit(scopedTransaction.scopedStore.namespacedGroup(group), key, separator) + return scopedStoreTransaction.storeTransaction.GetSplit(scopedStoreTransaction.scopedStore.namespacedGroup(group), key, separator) } -// Usage example: `fields, err := transaction.GetFields("config", "flags")` -func (scopedTransaction *ScopedStoreTransaction) GetFields(group, key string) (iter.Seq[string], error) { - storeTransaction, err := scopedTransaction.resolvedTransaction("store.ScopedStoreTransaction.GetFields") - if err != nil { +// Usage example: `fields, err := scopedStoreTransaction.GetFields("config", "flags"); if err != nil { return }; for field := range fields { fmt.Println(field) }` +func (scopedStoreTransaction *ScopedStoreTransaction) GetFields(group, key string) (iter.Seq[string], error) { + if err := scopedStoreTransaction.ensureReady("store.ScopedStoreTransaction.GetFields"); err != nil { return nil, err } - return storeTransaction.GetFields(scopedTransaction.scopedStore.namespacedGroup(group), key) -} - -// checkQuota("store.ScopedStoreTransaction.SetIn", "config", "colour") uses -// the transaction's own read state so staged writes inside the same -// transaction count towards the namespace limits. -func (scopedTransaction *ScopedStoreTransaction) checkQuota(operation, group, key string) error { - if scopedTransaction == nil { - return core.E(operation, "scoped transaction is nil", nil) - } - if scopedTransaction.scopedStore == nil { - return core.E(operation, "scoped store is nil", nil) - } - storeTransaction, err := scopedTransaction.resolvedTransaction(operation) - if err != nil { - return err - } - return checkNamespaceQuota( - operation, - group, - key, - scopedTransaction.scopedStore.namespacedGroup(group), - scopedTransaction.scopedStore.namespacePrefix(), - scopedTransaction.scopedStore.MaxKeys, - scopedTransaction.scopedStore.MaxGroups, - storeTransaction, - ) -} - -func (scopedStore *ScopedStore) forgetScopedWatcher(events <-chan Event) { - if scopedStore == nil || events == nil { - return - } - - scopedStore.scopedWatchersLock.Lock() - defer scopedStore.scopedWatchersLock.Unlock() - if scopedStore.scopedWatchers == nil { - return - } - delete(scopedStore.scopedWatchers, channelPointer(events)) + return scopedStoreTransaction.storeTransaction.GetFields(scopedStoreTransaction.scopedStore.namespacedGroup(group), key) } -func (scopedStore *ScopedStore) forgetAndStopScopedWatcher(events <-chan Event) { - if scopedStore == nil || events == nil { - return - } - - scopedStore.scopedWatchersLock.Lock() - binding := scopedStore.scopedWatchers[channelPointer(events)] - if binding != nil { - delete(scopedStore.scopedWatchers, channelPointer(events)) +// Usage example: `removedRows, err := scopedStoreTransaction.PurgeExpired(); if err != nil { return err }; fmt.Println(removedRows)` +func (scopedStoreTransaction *ScopedStoreTransaction) PurgeExpired() (int64, error) { + if err := scopedStoreTransaction.ensureReady("store.ScopedStoreTransaction.PurgeExpired"); err != nil { + return 0, err } - scopedStore.scopedWatchersLock.Unlock() - if binding == nil { - return + cutoffUnixMilli := time.Now().UnixMilli() + expiredEntries, err := deleteExpiredEntriesMatchingGroupPrefix(scopedStoreTransaction.storeTransaction.sqliteTransaction, scopedStoreTransaction.scopedStore.namespacePrefix(), cutoffUnixMilli) + if err != nil { + return 0, core.E("store.ScopedStoreTransaction.PurgeExpired", "delete expired rows", err) } - - binding.stopOnce.Do(func() { - close(binding.stop) - }) - if binding.store != nil { - binding.store.Unwatch("*", binding.underlyingEvents) + removedRows := int64(len(expiredEntries)) + if removedRows > 0 { + for _, expiredEntry := range expiredEntries { + scopedStoreTransaction.storeTransaction.recordEvent(Event{ + Type: EventDelete, + Group: expiredEntry.group, + Key: expiredEntry.key, + Timestamp: time.Now(), + }) + } } - <-binding.done + return removedRows, nil } -// checkQuota("store.ScopedStore.Set", "config", "colour") returns nil when the -// namespace still has quota available and QuotaExceededError when a new key or -// group would exceed the configured limit. Existing keys are treated as -// upserts and do not consume quota. -func (scopedStore *ScopedStore) checkQuota(operation, group, key string) error { - if scopedStore == nil { - return core.E(operation, "scoped store is nil", nil) - } - return checkNamespaceQuota( +func (scopedStoreTransaction *ScopedStoreTransaction) checkQuota(operation, group, key string) error { + return enforceQuota( operation, group, key, - scopedStore.namespacedGroup(group), - scopedStore.namespacePrefix(), - scopedStore.MaxKeys, - scopedStore.MaxGroups, - scopedStore.store, + scopedStoreTransaction.scopedStore.namespacePrefix(), + scopedStoreTransaction.scopedStore.namespacedGroup(group), + scopedStoreTransaction.scopedStore.MaxKeys, + scopedStoreTransaction.scopedStore.MaxGroups, + scopedStoreTransaction.storeTransaction.sqliteTransaction, + scopedStoreTransaction.storeTransaction, ) } -type namespaceQuotaReader interface { - Get(group, key string) (string, error) - Count(group string) (int, error) +type quotaCounter interface { CountAll(groupPrefix string) (int, error) - Groups(groupPrefix ...string) ([]string, error) + Count(group string) (int, error) + GroupsSeq(groupPrefix ...string) iter.Seq2[string, error] } -func checkNamespaceQuota(operation, group, key, namespacedGroup, namespacePrefix string, maxKeys, maxGroups int, reader namespaceQuotaReader) error { +func enforceQuota( + operation, group, key, namespacePrefix, namespacedGroup string, + maxKeys, maxGroups int, + queryable keyExistenceQuery, + counter quotaCounter, +) error { if maxKeys == 0 && maxGroups == 0 { return nil } - // Upserts never consume quota. - _, err := reader.Get(namespacedGroup, key) - if err == nil { - return nil - } - if !core.Is(err, NotFoundError) { + exists, err := liveEntryExists(queryable, namespacedGroup, key) + if err != nil { + // A database error occurred, not just a "not found" result. return core.E(operation, "quota check", err) } + if exists { + // Key exists - this is an upsert, no quota check needed. + return nil + } if maxKeys > 0 { - keyCount, err := reader.CountAll(namespacePrefix) + keyCount, err := counter.CountAll(namespacePrefix) if err != nil { return core.E(operation, "quota check", err) } @@ -806,16 +895,19 @@ func checkNamespaceQuota(operation, group, key, namespacedGroup, namespacePrefix } if maxGroups > 0 { - existingGroupCount, err := reader.Count(namespacedGroup) + existingGroupCount, err := counter.Count(namespacedGroup) if err != nil { return core.E(operation, "quota check", err) } if existingGroupCount == 0 { - groupNames, err := reader.Groups(namespacePrefix) - if err != nil { - return core.E(operation, "quota check", err) + knownGroupCount := 0 + for _, iterationErr := range counter.GroupsSeq(namespacePrefix) { + if iterationErr != nil { + return core.E(operation, "quota check", iterationErr) + } + knownGroupCount++ } - if len(groupNames) >= maxGroups { + if knownGroupCount >= maxGroups { return core.E(operation, core.Sprintf("group limit (%d)", maxGroups), QuotaExceededError) } } @@ -823,3 +915,24 @@ func checkNamespaceQuota(operation, group, key, namespacedGroup, namespacePrefix return nil } + +func liveEntryExists(queryable keyExistenceQuery, group, key string) (bool, error) { + var exists int + err := queryable.QueryRow( + "SELECT 1 FROM "+entriesTableName+" WHERE "+entryGroupColumn+" = ? AND "+entryKeyColumn+" = ? AND (expires_at IS NULL OR expires_at > ?) LIMIT 1", + group, + key, + time.Now().UnixMilli(), + ).Scan(&exists) + if err == nil { + return true, nil + } + if err == sql.ErrNoRows { + return false, nil + } + return false, err +} + +type keyExistenceQuery interface { + QueryRow(query string, args ...any) *sql.Row +} diff --git a/scope_test.go b/scope_test.go index 1b72c88..6c54063 100644 --- a/scope_test.go +++ b/scope_test.go @@ -5,148 +5,210 @@ import ( "time" core "dappco.re/go/core" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) -func mustScoped(t *testing.T, storeInstance *Store, namespace string) *ScopedStore { - t.Helper() - - scopedStore := NewScoped(storeInstance, namespace) - require.NotNil(t, scopedStore) - return scopedStore -} - // --------------------------------------------------------------------------- // NewScoped — constructor validation // --------------------------------------------------------------------------- func TestScope_NewScoped_Good(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() - scopedStore := mustScoped(t, storeInstance, "tenant-1") - assert.Equal(t, "tenant-1", scopedStore.Namespace()) + scopedStore := NewScoped(storeInstance, "tenant-1") + assertNotNil(t, scopedStore) + assertEqual(t, "tenant-1", scopedStore.Namespace()) +} + +func TestScope_ScopedStore_Good_Config(t *testing.T) { + storeInstance, _ := New(":memory:") + defer func() { _ = storeInstance.Close() }() + + scopedStore, err := NewScopedConfigured(storeInstance, ScopedStoreConfig{ + Namespace: "tenant-a", + Quota: QuotaConfig{MaxKeys: 4, MaxGroups: 2}, + }) + assertNoError(t, err) + + assertEqual(t, ScopedStoreConfig{Namespace: "tenant-a", Quota: QuotaConfig{MaxKeys: 4, MaxGroups: 2}}, scopedStore.Config()) +} + +func TestScope_ScopedStore_Good_ConfigZeroValueFromNil(t *testing.T) { + var scopedStore *ScopedStore + + assertEqual(t, ScopedStoreConfig{}, scopedStore.Config()) } func TestScope_NewScoped_Good_AlphanumericHyphens(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() - for _, namespace := range []string{"abc", "ABC", "123", "a-b-c", "tenant-42", "A1-B2"} { - require.NotNil(t, NewScoped(storeInstance, namespace), "namespace %q should be valid", namespace) + valid := []string{"abc", "ABC", "123", "a-b-c", "tenant-42", "A1-B2"} + for _, namespace := range valid { + scopedStore := NewScoped(storeInstance, namespace) + assertNotNil(t, scopedStore) } } func TestScope_NewScoped_Bad_Empty(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() - assert.Nil(t, NewScoped(storeInstance, "")) + assertNil(t, NewScoped(storeInstance, "")) } func TestScope_NewScoped_Bad_NilStore(t *testing.T) { - assert.Nil(t, NewScoped(nil, "tenant-a")) + assertNil(t, NewScoped(nil, "tenant-a")) } func TestScope_NewScoped_Bad_InvalidChars(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() - for _, namespace := range []string{"foo.bar", "foo:bar", "foo bar", "foo/bar", "foo_bar", "tenant!", "@ns"} { - assert.Nil(t, NewScoped(storeInstance, namespace), "namespace %q should be invalid", namespace) + invalid := []string{"foo.bar", "foo:bar", "foo bar", "foo/bar", "foo_bar", "tenant!", "@ns"} + for _, namespace := range invalid { + assertNilf(t, NewScoped(storeInstance, namespace), "namespace %q should be invalid", namespace) } } -func TestScope_NewScopedWithQuota_Bad_InvalidNamespace(t *testing.T) { +func TestScope_NewScopedConfigured_Bad_InvalidNamespaceFromQuotaConfig(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() - _, err := NewScopedWithQuota(storeInstance, "tenant_a", QuotaConfig{MaxKeys: 1}) - require.Error(t, err) - assert.Contains(t, err.Error(), "namespace") + _, err := NewScopedConfigured(storeInstance, ScopedStoreConfig{ + Namespace: "tenant_a", + Quota: QuotaConfig{MaxKeys: 1}, + }) + assertError(t, err) + assertContainsString(t, err.Error(), "store.NewScoped") } -func TestScope_NewScopedWithQuota_Bad_NilStore(t *testing.T) { - _, err := NewScopedWithQuota(nil, "tenant-a", QuotaConfig{MaxKeys: 1}) - require.Error(t, err) - assert.Contains(t, err.Error(), "store instance is nil") +func TestScope_NewScopedConfigured_Bad_NilStoreFromQuotaConfig(t *testing.T) { + _, err := NewScopedConfigured(nil, ScopedStoreConfig{ + Namespace: "tenant-a", + Quota: QuotaConfig{MaxKeys: 1}, + }) + assertError(t, err) + assertContainsString(t, err.Error(), "store instance is nil") } -func TestScope_NewScopedWithQuota_Bad_NegativeMaxKeys(t *testing.T) { +func TestScope_NewScopedConfigured_Bad_NegativeMaxKeys(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() - _, err := NewScopedWithQuota(storeInstance, "tenant-a", QuotaConfig{MaxKeys: -1}) - require.Error(t, err) - assert.Contains(t, err.Error(), "zero or positive") + _, err := NewScopedConfigured(storeInstance, ScopedStoreConfig{ + Namespace: "tenant-a", + Quota: QuotaConfig{MaxKeys: -1}, + }) + assertError(t, err) + assertContainsString(t, err.Error(), "zero or positive") } -func TestScope_NewScopedWithQuota_Bad_NegativeMaxGroups(t *testing.T) { +func TestScope_NewScopedConfigured_Bad_NegativeMaxGroups(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() - _, err := NewScopedWithQuota(storeInstance, "tenant-a", QuotaConfig{MaxGroups: -1}) - require.Error(t, err) - assert.Contains(t, err.Error(), "zero or positive") + _, err := NewScopedConfigured(storeInstance, ScopedStoreConfig{ + Namespace: "tenant-a", + Quota: QuotaConfig{MaxGroups: -1}, + }) + assertError(t, err) + assertContainsString(t, err.Error(), "zero or positive") } -func TestScope_NewScopedWithQuota_Good_InlineQuotaFields(t *testing.T) { +func TestScope_NewScopedConfigured_Good_InlineQuotaFields(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() - scopedStore, err := NewScopedWithQuota(storeInstance, "tenant-a", QuotaConfig{MaxKeys: 4, MaxGroups: 2}) - require.NoError(t, err) + scopedStore, err := NewScopedConfigured(storeInstance, ScopedStoreConfig{ + Namespace: "tenant-a", + Quota: QuotaConfig{MaxKeys: 4, MaxGroups: 2}, + }) + assertNoError(t, err) - assert.Equal(t, 4, scopedStore.MaxKeys) - assert.Equal(t, 2, scopedStore.MaxGroups) + assertEqual(t, 4, scopedStore.MaxKeys) + assertEqual(t, 2, scopedStore.MaxGroups) +} + +func TestScope_ScopedStoreConfig_Good_Validate(t *testing.T) { + err := (ScopedStoreConfig{ + Namespace: "tenant-a", + Quota: QuotaConfig{MaxKeys: 4, MaxGroups: 2}, + }).Validate() + assertNoError(t, err) } func TestScope_NewScopedConfigured_Good(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() scopedStore, err := NewScopedConfigured(storeInstance, ScopedStoreConfig{ Namespace: "tenant-a", Quota: QuotaConfig{MaxKeys: 4, MaxGroups: 2}, }) - require.NoError(t, err) + assertNoError(t, err) + assertNotNil(t, scopedStore) + assertEqual(t, 4, scopedStore.MaxKeys) + assertEqual(t, 2, scopedStore.MaxGroups) +} + +func TestScope_NewScopedWithQuota_Good(t *testing.T) { + storeInstance, _ := New(":memory:") + defer func() { _ = storeInstance.Close() }() + + scopedStore, err := NewScopedWithQuota(storeInstance, "tenant-a", QuotaConfig{MaxKeys: 4, MaxGroups: 2}) + assertNoError(t, err) + assertNotNil(t, scopedStore) - assert.Equal(t, "tenant-a", scopedStore.Namespace()) - assert.Equal(t, 4, scopedStore.MaxKeys) - assert.Equal(t, 2, scopedStore.MaxGroups) + assertEqual(t, "tenant-a", scopedStore.Namespace()) + assertEqual(t, 4, scopedStore.MaxKeys) + assertEqual(t, 2, scopedStore.MaxGroups) } func TestScope_NewScopedConfigured_Bad_InvalidNamespace(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() - _, err := NewScopedConfigured(storeInstance, ScopedStoreConfig{Namespace: "tenant_a"}) - require.Error(t, err) - assert.Contains(t, err.Error(), "namespace") + _, err := NewScopedConfigured(storeInstance, ScopedStoreConfig{ + Namespace: "tenant_a", + Quota: QuotaConfig{MaxKeys: 1}, + }) + assertError(t, err) + assertContainsString(t, err.Error(), "namespace") } -func TestScope_ScopedStoreConfig_Good_Validate(t *testing.T) { - err := (ScopedStoreConfig{ - Namespace: "tenant-a", - Quota: QuotaConfig{MaxKeys: 4, MaxGroups: 2}, - }).Validate() - require.NoError(t, err) -} +func TestScope_ScopedStore_Good_NilReceiverReturnsErrors(t *testing.T) { + var scopedStore *ScopedStore -func TestScope_ScopedStoreConfig_Bad_InvalidNamespace(t *testing.T) { - err := (ScopedStoreConfig{Namespace: "tenant_a"}).Validate() - require.Error(t, err) - assert.Contains(t, err.Error(), "namespace") -} + _, err := scopedStore.Get("theme") + assertError(t, err) + assertContainsString(t, err.Error(), "scoped store is nil") -func TestScope_ScopedStoreConfig_Bad_NegativeQuota(t *testing.T) { - err := (ScopedStoreConfig{ - Namespace: "tenant-a", - Quota: QuotaConfig{MaxKeys: -1}, - }).Validate() - require.Error(t, err) - assert.Contains(t, err.Error(), "quota values must be zero or positive") + err = scopedStore.Set("theme", "dark") + assertError(t, err) + assertContainsString(t, err.Error(), "scoped store is nil") + + _, err = scopedStore.Count("config") + assertError(t, err) + assertContainsString(t, err.Error(), "scoped store is nil") + + _, err = scopedStore.Groups() + assertError(t, err) + assertContainsString(t, err.Error(), "scoped store is nil") + + for entry, iterationErr := range scopedStore.All("config") { + _ = entry + assertError(t, iterationErr) + assertContainsString(t, iterationErr.Error(), "scoped store is nil") + break + } + + for groupName, iterationErr := range scopedStore.GroupsSeq() { + _ = groupName + assertError(t, iterationErr) + assertContainsString(t, iterationErr.Error(), "scoped store is nil") + break + } } // --------------------------------------------------------------------------- @@ -155,545 +217,605 @@ func TestScope_ScopedStoreConfig_Bad_NegativeQuota(t *testing.T) { func TestScope_ScopedStore_Good_SetGet(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() - scopedStore := mustScoped(t, storeInstance, "tenant-a") - require.NoError(t, scopedStore.SetIn("config", "theme", "dark")) + scopedStore := NewScoped(storeInstance, "tenant-a") + assertNoError(t, scopedStore.SetIn("config", "theme", "dark")) value, err := scopedStore.GetFrom("config", "theme") - require.NoError(t, err) - assert.Equal(t, "dark", value) + assertNoError(t, err) + assertEqual(t, "dark", value) } func TestScope_ScopedStore_Good_DefaultGroupHelpers(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() - scopedStore := mustScoped(t, storeInstance, "tenant-a") - require.NoError(t, scopedStore.Set("theme", "dark")) + scopedStore := NewScoped(storeInstance, "tenant-a") + assertNoError(t, scopedStore.Set("theme", "dark")) value, err := scopedStore.Get("theme") - require.NoError(t, err) - assert.Equal(t, "dark", value) + assertNoError(t, err) + assertEqual(t, "dark", value) rawValue, err := storeInstance.Get("tenant-a:default", "theme") - require.NoError(t, err) - assert.Equal(t, "dark", rawValue) + assertNoError(t, err) + assertEqual(t, "dark", rawValue) } -func TestScope_ScopedStore_Good_SetInAndGetFrom(t *testing.T) { +func TestScope_ScopedStore_Good_SetInGetFrom(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() - scopedStore := mustScoped(t, storeInstance, "tenant-a") - require.NoError(t, scopedStore.SetIn("config", "colour", "blue")) + scopedStore := NewScoped(storeInstance, "tenant-a") + assertNoError(t, scopedStore.SetIn("config", "theme", "dark")) - value, err := scopedStore.GetFrom("config", "colour") - require.NoError(t, err) - assert.Equal(t, "blue", value) + value, err := scopedStore.GetFrom("config", "theme") + assertNoError(t, err) + assertEqual(t, "dark", value) } -func TestScope_ScopedStore_Good_AllSeq(t *testing.T) { +func TestScope_ScopedStore_Good_PrefixedInUnderlyingStore(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() - scopedStore := mustScoped(t, storeInstance, "tenant-a") - require.NoError(t, scopedStore.SetIn("items", "first", "1")) - require.NoError(t, scopedStore.SetIn("items", "second", "2")) + scopedStore := NewScoped(storeInstance, "tenant-a") + assertNoError(t, scopedStore.SetIn("config", "key", "val")) - var keys []string - for entry, err := range scopedStore.AllSeq("items") { - require.NoError(t, err) - keys = append(keys, entry.Key) - } + // The underlying store should have the prefixed group name. + value, err := storeInstance.Get("tenant-a:config", "key") + assertNoError(t, err) + assertEqual(t, "val", value) - assert.ElementsMatch(t, []string{"first", "second"}, keys) + // Direct access without prefix should fail. + _, err = storeInstance.Get("config", "key") + assertTrue(t, core.Is(err, NotFoundError)) } -func TestScope_ScopedStore_Good_GetPage(t *testing.T) { +func TestScope_ScopedStore_Good_NamespaceIsolation(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() - scopedStore := mustScoped(t, storeInstance, "tenant-a") - require.NoError(t, scopedStore.SetIn("items", "charlie", "3")) - require.NoError(t, scopedStore.SetIn("items", "alpha", "1")) - require.NoError(t, scopedStore.SetIn("items", "bravo", "2")) + alphaStore := NewScoped(storeInstance, "tenant-a") + betaStore := NewScoped(storeInstance, "tenant-b") - page, err := scopedStore.GetPage("items", 0, 2) - require.NoError(t, err) - require.Len(t, page, 2) - assert.Equal(t, []KeyValue{{Key: "alpha", Value: "1"}, {Key: "bravo", Value: "2"}}, page) + assertNoError(t, alphaStore.SetIn("config", "colour", "blue")) + assertNoError(t, betaStore.SetIn("config", "colour", "red")) + + alphaValue, err := alphaStore.GetFrom("config", "colour") + assertNoError(t, err) + assertEqual(t, "blue", alphaValue) + + betaValue, err := betaStore.GetFrom("config", "colour") + assertNoError(t, err) + assertEqual(t, "red", betaValue) } -func TestScope_ScopedStore_Good_PrefixedInUnderlyingStore(t *testing.T) { +// --------------------------------------------------------------------------- +// ScopedStore — Exists / ExistsIn / GroupExists +// --------------------------------------------------------------------------- + +func TestScope_ScopedStore_Good_ExistsInDefaultGroup(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() - scopedStore := mustScoped(t, storeInstance, "tenant-a") - require.NoError(t, scopedStore.SetIn("config", "key", "val")) + scopedStore := NewScoped(storeInstance, "tenant-a") + assertNoError(t, scopedStore.Set("colour", "blue")) - value, err := storeInstance.Get("tenant-a:config", "key") - require.NoError(t, err) - assert.Equal(t, "val", value) + exists, err := scopedStore.Exists("colour") + assertNoError(t, err) + assertTrue(t, exists) - _, err = storeInstance.Get("config", "key") - assert.True(t, core.Is(err, NotFoundError)) + exists, err = scopedStore.Exists("missing") + assertNoError(t, err) + assertFalse(t, exists) } -func TestScope_ScopedStore_Good_NamespaceIsolation(t *testing.T) { +func TestScope_ScopedStore_Good_ExistsInExplicitGroup(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() - alphaStore := mustScoped(t, storeInstance, "tenant-a") - betaStore := mustScoped(t, storeInstance, "tenant-b") + scopedStore := NewScoped(storeInstance, "tenant-a") + assertNoError(t, scopedStore.SetIn("config", "colour", "blue")) - require.NoError(t, alphaStore.SetIn("config", "colour", "blue")) - require.NoError(t, betaStore.SetIn("config", "colour", "red")) + exists, err := scopedStore.ExistsIn("config", "colour") + assertNoError(t, err) + assertTrue(t, exists) - alphaValue, err := alphaStore.GetFrom("config", "colour") - require.NoError(t, err) - assert.Equal(t, "blue", alphaValue) + exists, err = scopedStore.ExistsIn("config", "missing") + assertNoError(t, err) + assertFalse(t, exists) - betaValue, err := betaStore.GetFrom("config", "colour") - require.NoError(t, err) - assert.Equal(t, "red", betaValue) + exists, err = scopedStore.ExistsIn("other-group", "colour") + assertNoError(t, err) + assertFalse(t, exists) +} + +func TestScope_ScopedStore_Good_ExistsExpiredKeyReturnsFalse(t *testing.T) { + storeInstance, _ := New(":memory:") + defer func() { _ = storeInstance.Close() }() + + scopedStore := NewScoped(storeInstance, "tenant-a") + assertNoError(t, scopedStore.SetWithTTL("session", "token", "abc123", 1*time.Millisecond)) + time.Sleep(5 * time.Millisecond) + + exists, err := scopedStore.ExistsIn("session", "token") + assertNoError(t, err) + assertFalse(t, exists) +} + +func TestScope_ScopedStore_Good_GroupExists(t *testing.T) { + storeInstance, _ := New(":memory:") + defer func() { _ = storeInstance.Close() }() + + scopedStore := NewScoped(storeInstance, "tenant-a") + assertNoError(t, scopedStore.SetIn("config", "colour", "blue")) + + exists, err := scopedStore.GroupExists("config") + assertNoError(t, err) + assertTrue(t, exists) + + exists, err = scopedStore.GroupExists("missing-group") + assertNoError(t, err) + assertFalse(t, exists) +} + +func TestScope_ScopedStore_Good_GroupExistsAfterDelete(t *testing.T) { + storeInstance, _ := New(":memory:") + defer func() { _ = storeInstance.Close() }() + + scopedStore := NewScoped(storeInstance, "tenant-a") + assertNoError(t, scopedStore.SetIn("config", "colour", "blue")) + assertNoError(t, scopedStore.DeleteGroup("config")) + + exists, err := scopedStore.GroupExists("config") + assertNoError(t, err) + assertFalse(t, exists) +} + +func TestScope_ScopedStore_Bad_ExistsClosedStore(t *testing.T) { + storeInstance, _ := New(":memory:") + _ = storeInstance.Close() + scopedStore := NewScoped(storeInstance, "tenant-a") + + _, err := scopedStore.Exists("colour") + assertError(t, err) + + _, err = scopedStore.ExistsIn("config", "colour") + assertError(t, err) + + _, err = scopedStore.GroupExists("config") + assertError(t, err) } func TestScope_ScopedStore_Good_Delete(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() - scopedStore := mustScoped(t, storeInstance, "tenant-a") - require.NoError(t, scopedStore.SetIn("g", "k", "v")) - require.NoError(t, scopedStore.Delete("g", "k")) + scopedStore := NewScoped(storeInstance, "tenant-a") + assertNoError(t, scopedStore.SetIn("g", "k", "v")) + assertNoError(t, scopedStore.Delete("g", "k")) _, err := scopedStore.GetFrom("g", "k") - assert.True(t, core.Is(err, NotFoundError)) + assertTrue(t, core.Is(err, NotFoundError)) } func TestScope_ScopedStore_Good_DeleteGroup(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() - scopedStore := mustScoped(t, storeInstance, "tenant-a") - require.NoError(t, scopedStore.SetIn("g", "a", "1")) - require.NoError(t, scopedStore.SetIn("g", "b", "2")) - require.NoError(t, scopedStore.DeleteGroup("g")) + scopedStore := NewScoped(storeInstance, "tenant-a") + assertNoError(t, scopedStore.SetIn("g", "a", "1")) + assertNoError(t, scopedStore.SetIn("g", "b", "2")) + assertNoError(t, scopedStore.DeleteGroup("g")) count, err := scopedStore.Count("g") - require.NoError(t, err) - assert.Equal(t, 0, count) + assertNoError(t, err) + assertEqual(t, 0, count) } func TestScope_ScopedStore_Good_DeletePrefix(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() + + scopedStore := NewScoped(storeInstance, "tenant-a") + otherScopedStore := NewScoped(storeInstance, "tenant-b") - scopedStore := mustScoped(t, storeInstance, "tenant-a") - require.NoError(t, scopedStore.SetIn("config", "colour", "blue")) - require.NoError(t, scopedStore.SetIn("sessions", "token", "abc123")) - require.NoError(t, storeInstance.Set("tenant-b:config", "colour", "green")) + assertNoError(t, scopedStore.SetIn("config", "theme", "dark")) + assertNoError(t, scopedStore.SetIn("cache", "page", "home")) + assertNoError(t, scopedStore.SetIn("cache-warm", "status", "ready")) + assertNoError(t, otherScopedStore.SetIn("cache", "page", "keep")) - require.NoError(t, scopedStore.DeletePrefix("")) + assertNoError(t, scopedStore.DeletePrefix("cache")) - _, err := scopedStore.GetFrom("config", "colour") - assert.Error(t, err) - _, err = scopedStore.GetFrom("sessions", "token") - assert.Error(t, err) + _, err := scopedStore.GetFrom("cache", "page") + assertTrue(t, core.Is(err, NotFoundError)) + _, err = scopedStore.GetFrom("cache-warm", "status") + assertTrue(t, core.Is(err, NotFoundError)) - value, err := storeInstance.Get("tenant-b:config", "colour") - require.NoError(t, err) - assert.Equal(t, "green", value) + value, err := scopedStore.GetFrom("config", "theme") + assertNoError(t, err) + assertEqual(t, "dark", value) + + otherValue, err := otherScopedStore.GetFrom("cache", "page") + assertNoError(t, err) + assertEqual(t, "keep", otherValue) +} + +func TestScope_ScopedStore_Good_OnChange_NamespaceLocal(t *testing.T) { + storeInstance, _ := New(":memory:") + defer func() { _ = storeInstance.Close() }() + + scopedStore := NewScoped(storeInstance, "tenant-a") + otherScopedStore := NewScoped(storeInstance, "tenant-b") + + var events []Event + unregister := scopedStore.OnChange(func(event Event) { + events = append(events, event) + }) + defer unregister() + + assertNoError(t, scopedStore.SetIn("config", "colour", "blue")) + assertNoError(t, otherScopedStore.SetIn("config", "colour", "red")) + assertNoError(t, scopedStore.Delete("config", "colour")) + + assertLen(t, events, 2) + assertEqual(t, "config", events[0].Group) + assertEqual(t, "colour", events[0].Key) + assertEqual(t, "blue", events[0].Value) + assertEqual(t, "config", events[1].Group) + assertEqual(t, "colour", events[1].Key) + assertEqual(t, "", events[1].Value) +} + +func TestScope_ScopedStore_Good_Watch_NamespaceLocal(t *testing.T) { + storeInstance, _ := New(":memory:") + defer func() { _ = storeInstance.Close() }() + + scopedStore := NewScoped(storeInstance, "tenant-a") + otherScopedStore := NewScoped(storeInstance, "tenant-b") + + events := scopedStore.Watch("config") + defer scopedStore.Unwatch("config", events) + + assertNoError(t, scopedStore.SetIn("config", "colour", "blue")) + assertNoError(t, otherScopedStore.SetIn("config", "colour", "red")) + + select { + case event, ok := <-events: + assertTrue(t, ok) + assertEqual(t, EventSet, event.Type) + assertEqual(t, "config", event.Group) + assertEqual(t, "colour", event.Key) + assertEqual(t, "blue", event.Value) + case <-time.After(time.Second): + t.Fatal("timed out waiting for scoped watch event") + } + + select { + case event := <-events: + t.Fatalf("unexpected event from another namespace: %#v", event) + case <-time.After(50 * time.Millisecond): + } +} + +func TestScope_ScopedStore_Good_Watch_All_NamespaceLocal(t *testing.T) { + storeInstance, _ := New(":memory:") + defer func() { _ = storeInstance.Close() }() + + scopedStore := NewScoped(storeInstance, "tenant-a") + otherScopedStore := NewScoped(storeInstance, "tenant-b") + + events := scopedStore.Watch("*") + defer scopedStore.Unwatch("*", events) + + assertNoError(t, scopedStore.SetIn("config", "colour", "blue")) + assertNoError(t, scopedStore.SetIn("cache", "page", "home")) + assertNoError(t, otherScopedStore.SetIn("config", "colour", "red")) + + select { + case event, ok := <-events: + assertTrue(t, ok) + assertEqual(t, "config", event.Group) + assertEqual(t, "colour", event.Key) + case <-time.After(time.Second): + t.Fatal("timed out waiting for first wildcard scoped watch event") + } + + select { + case event, ok := <-events: + assertTrue(t, ok) + assertEqual(t, "cache", event.Group) + assertEqual(t, "page", event.Key) + case <-time.After(time.Second): + t.Fatal("timed out waiting for second wildcard scoped watch event") + } + + select { + case event := <-events: + t.Fatalf("unexpected wildcard event from another namespace: %#v", event) + case <-time.After(50 * time.Millisecond): + } +} + +func TestScope_ScopedStore_Good_Unwatch_ClosesLocalChannel(t *testing.T) { + storeInstance, _ := New(":memory:") + defer func() { _ = storeInstance.Close() }() + + scopedStore := NewScoped(storeInstance, "tenant-a") + + events := scopedStore.Watch("config") + scopedStore.Unwatch("config", events) + + select { + case _, ok := <-events: + assertFalse(t, ok) + case <-time.After(time.Second): + t.Fatal("timed out waiting for scoped watch channel to close") + } } func TestScope_ScopedStore_Good_GetAll(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() - alphaStore := mustScoped(t, storeInstance, "tenant-a") - betaStore := mustScoped(t, storeInstance, "tenant-b") + alphaStore := NewScoped(storeInstance, "tenant-a") + betaStore := NewScoped(storeInstance, "tenant-b") - require.NoError(t, alphaStore.SetIn("items", "x", "1")) - require.NoError(t, alphaStore.SetIn("items", "y", "2")) - require.NoError(t, betaStore.SetIn("items", "z", "3")) + assertNoError(t, alphaStore.SetIn("items", "x", "1")) + assertNoError(t, alphaStore.SetIn("items", "y", "2")) + assertNoError(t, betaStore.SetIn("items", "z", "3")) all, err := alphaStore.GetAll("items") - require.NoError(t, err) - assert.Equal(t, map[string]string{"x": "1", "y": "2"}, all) + assertNoError(t, err) + assertEqual(t, map[string]string{"x": "1", "y": "2"}, all) betaEntries, err := betaStore.GetAll("items") - require.NoError(t, err) - assert.Equal(t, map[string]string{"z": "3"}, betaEntries) + assertNoError(t, err) + assertEqual(t, map[string]string{"z": "3"}, betaEntries) +} + +func TestScope_ScopedStore_Good_GetPage(t *testing.T) { + storeInstance, _ := New(":memory:") + defer func() { _ = storeInstance.Close() }() + + scopedStore := NewScoped(storeInstance, "tenant-a") + assertNoError(t, scopedStore.SetIn("items", "charlie", "3")) + assertNoError(t, scopedStore.SetIn("items", "alpha", "1")) + assertNoError(t, scopedStore.SetIn("items", "bravo", "2")) + + page, err := scopedStore.GetPage("items", 1, 1) + assertNoError(t, err) + assertLen(t, page, 1) + assertEqual(t, KeyValue{Key: "bravo", Value: "2"}, page[0]) } func TestScope_ScopedStore_Good_All(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() - scopedStore := mustScoped(t, storeInstance, "tenant-a") - require.NoError(t, scopedStore.SetIn("items", "first", "1")) - require.NoError(t, scopedStore.SetIn("items", "second", "2")) + scopedStore := NewScoped(storeInstance, "tenant-a") + assertNoError(t, scopedStore.SetIn("items", "first", "1")) + assertNoError(t, scopedStore.SetIn("items", "second", "2")) var keys []string for entry, err := range scopedStore.All("items") { - require.NoError(t, err) + assertNoError(t, err) keys = append(keys, entry.Key) } - assert.ElementsMatch(t, []string{"first", "second"}, keys) + assertElementsMatch(t, []string{"first", "second"}, keys) } func TestScope_ScopedStore_Good_All_SortedByKey(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() - scopedStore := mustScoped(t, storeInstance, "tenant-a") - require.NoError(t, scopedStore.SetIn("items", "charlie", "3")) - require.NoError(t, scopedStore.SetIn("items", "alpha", "1")) - require.NoError(t, scopedStore.SetIn("items", "bravo", "2")) + scopedStore := NewScoped(storeInstance, "tenant-a") + assertNoError(t, scopedStore.SetIn("items", "charlie", "3")) + assertNoError(t, scopedStore.SetIn("items", "alpha", "1")) + assertNoError(t, scopedStore.SetIn("items", "bravo", "2")) var keys []string for entry, err := range scopedStore.All("items") { - require.NoError(t, err) + assertNoError(t, err) keys = append(keys, entry.Key) } - assert.Equal(t, []string{"alpha", "bravo", "charlie"}, keys) + assertEqual(t, []string{"alpha", "bravo", "charlie"}, keys) } func TestScope_ScopedStore_Good_Count(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() - scopedStore := mustScoped(t, storeInstance, "tenant-a") - require.NoError(t, scopedStore.SetIn("g", "a", "1")) - require.NoError(t, scopedStore.SetIn("g", "b", "2")) + scopedStore := NewScoped(storeInstance, "tenant-a") + assertNoError(t, scopedStore.SetIn("g", "a", "1")) + assertNoError(t, scopedStore.SetIn("g", "b", "2")) count, err := scopedStore.Count("g") - require.NoError(t, err) - assert.Equal(t, 2, count) + assertNoError(t, err) + assertEqual(t, 2, count) } func TestScope_ScopedStore_Good_SetWithTTL(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() - scopedStore := mustScoped(t, storeInstance, "tenant-a") - require.NoError(t, scopedStore.SetWithTTL("g", "k", "v", time.Hour)) + scopedStore := NewScoped(storeInstance, "tenant-a") + assertNoError(t, scopedStore.SetWithTTL("g", "k", "v", time.Hour)) value, err := scopedStore.GetFrom("g", "k") - require.NoError(t, err) - assert.Equal(t, "v", value) + assertNoError(t, err) + assertEqual(t, "v", value) } func TestScope_ScopedStore_Good_SetWithTTL_Expires(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() - scopedStore := mustScoped(t, storeInstance, "tenant-a") - require.NoError(t, scopedStore.SetWithTTL("g", "k", "v", 1*time.Millisecond)) + scopedStore := NewScoped(storeInstance, "tenant-a") + assertNoError(t, scopedStore.SetWithTTL("g", "k", "v", 1*time.Millisecond)) time.Sleep(5 * time.Millisecond) _, err := scopedStore.GetFrom("g", "k") - assert.True(t, core.Is(err, NotFoundError)) + assertTrue(t, core.Is(err, NotFoundError)) } func TestScope_ScopedStore_Good_Render(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() - scopedStore := mustScoped(t, storeInstance, "tenant-a") - require.NoError(t, scopedStore.SetIn("user", "name", "Alice")) + scopedStore := NewScoped(storeInstance, "tenant-a") + assertNoError(t, scopedStore.SetIn("user", "name", "Alice")) renderedTemplate, err := scopedStore.Render("Hello {{ .name }}", "user") - require.NoError(t, err) - assert.Equal(t, "Hello Alice", renderedTemplate) + assertNoError(t, err) + assertEqual(t, "Hello Alice", renderedTemplate) } func TestScope_ScopedStore_Good_BulkHelpers(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() - alphaStore := mustScoped(t, storeInstance, "tenant-a") - betaStore := mustScoped(t, storeInstance, "tenant-b") + alphaStore := NewScoped(storeInstance, "tenant-a") + betaStore := NewScoped(storeInstance, "tenant-b") - require.NoError(t, alphaStore.SetIn("config", "colour", "blue")) - require.NoError(t, alphaStore.SetIn("sessions", "token", "abc123")) - require.NoError(t, betaStore.SetIn("config", "colour", "red")) + assertNoError(t, alphaStore.SetIn("config", "colour", "blue")) + assertNoError(t, alphaStore.SetIn("sessions", "token", "abc123")) + assertNoError(t, betaStore.SetIn("config", "colour", "red")) count, err := alphaStore.CountAll("") - require.NoError(t, err) - assert.Equal(t, 2, count) + assertNoError(t, err) + assertEqual(t, 2, count) count, err = alphaStore.CountAll("config") - require.NoError(t, err) - assert.Equal(t, 1, count) + assertNoError(t, err) + assertEqual(t, 1, count) groupNames, err := alphaStore.Groups("") - require.NoError(t, err) - assert.ElementsMatch(t, []string{"config", "sessions"}, groupNames) + assertNoError(t, err) + assertElementsMatch(t, []string{"config", "sessions"}, groupNames) groupNames, err = alphaStore.Groups("conf") - require.NoError(t, err) - assert.Equal(t, []string{"config"}, groupNames) + assertNoError(t, err) + assertEqual(t, []string{"config"}, groupNames) var streamedGroupNames []string for groupName, iterationErr := range alphaStore.GroupsSeq("") { - require.NoError(t, iterationErr) + assertNoError(t, iterationErr) streamedGroupNames = append(streamedGroupNames, groupName) } - assert.ElementsMatch(t, []string{"config", "sessions"}, streamedGroupNames) + assertElementsMatch(t, []string{"config", "sessions"}, streamedGroupNames) var filteredGroupNames []string for groupName, iterationErr := range alphaStore.GroupsSeq("config") { - require.NoError(t, iterationErr) + assertNoError(t, iterationErr) filteredGroupNames = append(filteredGroupNames, groupName) } - assert.Equal(t, []string{"config"}, filteredGroupNames) + assertEqual(t, []string{"config"}, filteredGroupNames) } func TestScope_ScopedStore_Good_GroupsSeqStopsEarly(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() - scopedStore := mustScoped(t, storeInstance, "tenant-a") - require.NoError(t, scopedStore.SetIn("alpha", "a", "1")) - require.NoError(t, scopedStore.SetIn("beta", "b", "2")) + scopedStore := NewScoped(storeInstance, "tenant-a") + assertNoError(t, scopedStore.SetIn("alpha", "a", "1")) + assertNoError(t, scopedStore.SetIn("beta", "b", "2")) groups := scopedStore.GroupsSeq("") var seen []string for groupName, iterationErr := range groups { - require.NoError(t, iterationErr) + assertNoError(t, iterationErr) seen = append(seen, groupName) break } - assert.Len(t, seen, 1) + assertLen(t, seen, 1) } func TestScope_ScopedStore_Good_GroupsSeqSorted(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() - scopedStore := mustScoped(t, storeInstance, "tenant-a") - require.NoError(t, scopedStore.SetIn("charlie", "c", "3")) - require.NoError(t, scopedStore.SetIn("alpha", "a", "1")) - require.NoError(t, scopedStore.SetIn("bravo", "b", "2")) + scopedStore := NewScoped(storeInstance, "tenant-a") + assertNoError(t, scopedStore.SetIn("charlie", "c", "3")) + assertNoError(t, scopedStore.SetIn("alpha", "a", "1")) + assertNoError(t, scopedStore.SetIn("bravo", "b", "2")) var groupNames []string for groupName, iterationErr := range scopedStore.GroupsSeq("") { - require.NoError(t, iterationErr) + assertNoError(t, iterationErr) groupNames = append(groupNames, groupName) } - assert.Equal(t, []string{"alpha", "bravo", "charlie"}, groupNames) + assertEqual(t, []string{"alpha", "bravo", "charlie"}, groupNames) } func TestScope_ScopedStore_Good_GetSplitAndGetFields(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() - scopedStore := mustScoped(t, storeInstance, "tenant-a") - require.NoError(t, scopedStore.SetIn("config", "hosts", "alpha,beta,gamma")) - require.NoError(t, scopedStore.SetIn("config", "flags", "one two\tthree\n")) + scopedStore := NewScoped(storeInstance, "tenant-a") + assertNoError(t, scopedStore.SetIn("config", "hosts", "alpha,beta,gamma")) + assertNoError(t, scopedStore.SetIn("config", "flags", "one two\tthree\n")) parts, err := scopedStore.GetSplit("config", "hosts", ",") - require.NoError(t, err) + assertNoError(t, err) var splitValues []string for value := range parts { splitValues = append(splitValues, value) } - assert.Equal(t, []string{"alpha", "beta", "gamma"}, splitValues) + assertEqual(t, []string{"alpha", "beta", "gamma"}, splitValues) fields, err := scopedStore.GetFields("config", "flags") - require.NoError(t, err) + assertNoError(t, err) var fieldValues []string for value := range fields { fieldValues = append(fieldValues, value) } - assert.Equal(t, []string{"one", "two", "three"}, fieldValues) + assertEqual(t, []string{"one", "two", "three"}, fieldValues) } func TestScope_ScopedStore_Good_PurgeExpired(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() - scopedStore := mustScoped(t, storeInstance, "tenant-a") - require.NoError(t, scopedStore.SetWithTTL("session", "token", "abc123", 1*time.Millisecond)) + scopedStore := NewScoped(storeInstance, "tenant-a") + assertNoError(t, scopedStore.SetWithTTL("session", "token", "abc123", 1*time.Millisecond)) time.Sleep(5 * time.Millisecond) removedRows, err := scopedStore.PurgeExpired() - require.NoError(t, err) - assert.Equal(t, int64(1), removedRows) + assertNoError(t, err) + assertEqual(t, int64(1), removedRows) _, err = scopedStore.GetFrom("session", "token") - assert.True(t, core.Is(err, NotFoundError)) + assertTrue(t, core.Is(err, NotFoundError)) } func TestScope_ScopedStore_Good_PurgeExpired_NamespaceLocal(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() - alphaStore := mustScoped(t, storeInstance, "tenant-a") - betaStore := mustScoped(t, storeInstance, "tenant-b") + alphaStore := NewScoped(storeInstance, "tenant-a") + betaStore := NewScoped(storeInstance, "tenant-b") - require.NoError(t, alphaStore.SetWithTTL("session", "alpha-token", "alpha", 1*time.Millisecond)) - require.NoError(t, betaStore.SetWithTTL("session", "beta-token", "beta", 1*time.Millisecond)) + assertNoError(t, alphaStore.SetWithTTL("session", "alpha-token", "alpha", 1*time.Millisecond)) + assertNoError(t, betaStore.SetWithTTL("session", "beta-token", "beta", 1*time.Millisecond)) time.Sleep(5 * time.Millisecond) - assert.Equal(t, 1, rawEntryCount(t, storeInstance, "tenant-a:session")) - assert.Equal(t, 1, rawEntryCount(t, storeInstance, "tenant-b:session")) + assertEqual(t, 1, rawEntryCount(t, storeInstance, "tenant-a:session")) + assertEqual(t, 1, rawEntryCount(t, storeInstance, "tenant-b:session")) removedRows, err := alphaStore.PurgeExpired() - require.NoError(t, err) - assert.Equal(t, int64(1), removedRows) - - assert.Equal(t, 0, rawEntryCount(t, storeInstance, "tenant-a:session")) - assert.Equal(t, 1, rawEntryCount(t, storeInstance, "tenant-b:session")) -} - -func TestScope_ScopedStore_Good_WatchAndUnwatch(t *testing.T) { - storeInstance, _ := New(":memory:") - defer storeInstance.Close() - - scopedStore := mustScoped(t, storeInstance, "tenant-a") - events := scopedStore.Watch("config") - scopedStore.Unwatch("config", events) - - _, open := <-events - assert.False(t, open, "channel should be closed after Unwatch") - - require.NoError(t, scopedStore.SetIn("config", "theme", "dark")) -} - -func TestScope_ScopedStore_Good_WatchWildcardGroup(t *testing.T) { - storeInstance, _ := New(":memory:") - defer storeInstance.Close() - - scopedStore := mustScoped(t, storeInstance, "tenant-a") - events := scopedStore.Watch("*") - - require.NoError(t, scopedStore.SetIn("config", "theme", "dark")) - require.NoError(t, storeInstance.Set("other", "theme", "light")) - - received := drainEvents(events, 1, time.Second) - require.Len(t, received, 1) - assert.Equal(t, "tenant-a:config", received[0].Group) - assert.Equal(t, "theme", received[0].Key) - assert.Equal(t, "dark", received[0].Value) + assertNoError(t, err) + assertEqual(t, int64(1), removedRows) - scopedStore.Unwatch("*", events) - _, open := <-events - assert.False(t, open, "channel should be closed after wildcard Unwatch") -} - -func TestScope_ScopedStore_Good_OnChange(t *testing.T) { - storeInstance, _ := New(":memory:") - defer storeInstance.Close() - - scopedStore := mustScoped(t, storeInstance, "tenant-a") - - var seen []Event - unregister := scopedStore.OnChange(func(event Event) { - seen = append(seen, event) - }) - defer unregister() - - require.NoError(t, scopedStore.SetIn("config", "theme", "dark")) - require.NoError(t, storeInstance.Set("other", "key", "value")) - - require.Len(t, seen, 1) - assert.Equal(t, "tenant-a:config", seen[0].Group) - assert.Equal(t, "theme", seen[0].Key) - assert.Equal(t, "dark", seen[0].Value) -} - -func TestScope_ScopedStoreTransaction_Good_PrefixesAndReadsPendingWrites(t *testing.T) { - storeInstance, _ := New(":memory:") - defer storeInstance.Close() - - scopedStore := mustScoped(t, storeInstance, "tenant-a") - events := storeInstance.Watch("*") - defer storeInstance.Unwatch("*", events) - - err := scopedStore.Transaction(func(transaction *ScopedStoreTransaction) error { - require.NoError(t, transaction.Set("theme", "dark")) - require.NoError(t, transaction.SetIn("config", "colour", "blue")) - - value, err := transaction.Get("theme") - require.NoError(t, err) - assert.Equal(t, "dark", value) - - entriesByKey, err := transaction.GetAll("config") - require.NoError(t, err) - assert.Equal(t, map[string]string{"colour": "blue"}, entriesByKey) - - count, err := transaction.CountAll("") - require.NoError(t, err) - assert.Equal(t, 2, count) - - groupNames, err := transaction.Groups() - require.NoError(t, err) - assert.Equal(t, []string{"config", "default"}, groupNames) - - renderedTemplate, err := transaction.Render("{{ .theme }} / {{ .colour }}", "default") - require.NoError(t, err) - assert.Equal(t, "dark / ", renderedTemplate) - - return nil - }) - require.NoError(t, err) - - value, err := storeInstance.Get("tenant-a:default", "theme") - require.NoError(t, err) - assert.Equal(t, "dark", value) - - value, err = storeInstance.Get("tenant-a:config", "colour") - require.NoError(t, err) - assert.Equal(t, "blue", value) - - received := drainEvents(events, 2, time.Second) - require.Len(t, received, 2) - assert.Equal(t, "tenant-a:default", received[0].Group) - assert.Equal(t, "tenant-a:config", received[1].Group) -} - -func TestScope_Quota_Good_TransactionEnforcesMaxKeys(t *testing.T) { - storeInstance, _ := New(":memory:") - defer storeInstance.Close() - - scopedStore, err := NewScopedWithQuota(storeInstance, "tenant-a", QuotaConfig{MaxKeys: 1}) - require.NoError(t, err) - - err = scopedStore.Transaction(func(transaction *ScopedStoreTransaction) error { - require.NoError(t, transaction.SetIn("config", "colour", "blue")) - return transaction.SetIn("config", "language", "en-GB") - }) - require.Error(t, err) - assert.True(t, core.Is(err, QuotaExceededError)) - - _, err = scopedStore.GetFrom("config", "colour") - assert.ErrorIs(t, err, NotFoundError) -} - -func TestScope_Quota_Good_TransactionEnforcesMaxGroups(t *testing.T) { - storeInstance, _ := New(":memory:") - defer storeInstance.Close() - - scopedStore, err := NewScopedWithQuota(storeInstance, "tenant-a", QuotaConfig{MaxGroups: 1}) - require.NoError(t, err) - - err = scopedStore.Transaction(func(transaction *ScopedStoreTransaction) error { - require.NoError(t, transaction.SetIn("config", "colour", "blue")) - return transaction.SetWithTTL("preferences", "language", "en-GB", time.Hour) - }) - require.Error(t, err) - assert.True(t, core.Is(err, QuotaExceededError)) - - _, err = scopedStore.GetFrom("config", "colour") - assert.ErrorIs(t, err, NotFoundError) + assertEqual(t, 0, rawEntryCount(t, storeInstance, "tenant-a:session")) + assertEqual(t, 1, rawEntryCount(t, storeInstance, "tenant-b:session")) } // --------------------------------------------------------------------------- @@ -702,121 +824,195 @@ func TestScope_Quota_Good_TransactionEnforcesMaxGroups(t *testing.T) { func TestScope_Quota_Good_MaxKeys(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() - scopedStore, err := NewScopedWithQuota(storeInstance, "tenant-a", QuotaConfig{MaxKeys: 5}) - require.NoError(t, err) + scopedStore, err := NewScopedConfigured(storeInstance, ScopedStoreConfig{ + Namespace: "tenant-a", + Quota: QuotaConfig{MaxKeys: 5}, + }) + assertNoError(t, err) + // Insert 5 keys across different groups — should be fine. for i := range 5 { - require.NoError(t, scopedStore.SetIn("g", keyName(i), "v")) + assertNoError(t, scopedStore.SetIn("g", keyName(i), "v")) } + // 6th key should fail. err = scopedStore.SetIn("g", "overflow", "v") - require.Error(t, err) - assert.True(t, core.Is(err, QuotaExceededError)) + assertError(t, err) + assertTruef(t, core.Is(err, QuotaExceededError), "expected QuotaExceededError, got: %v", err) } func TestScope_Quota_Bad_QuotaCheckQueryError(t *testing.T) { database, _ := openStubSQLiteDatabase(t, stubSQLiteScenario{}) - defer database.Close() + defer func() { _ = database.Close() }() storeInstance := &Store{ sqliteDatabase: database, cancelPurge: func() {}, } - scopedStore, err := NewScopedWithQuota(storeInstance, "tenant-a", QuotaConfig{MaxKeys: 1}) - require.NoError(t, err) + scopedStore, err := NewScopedConfigured(storeInstance, ScopedStoreConfig{ + Namespace: "tenant-a", + Quota: QuotaConfig{MaxKeys: 1}, + }) + assertNoError(t, err) err = scopedStore.SetIn("config", "theme", "dark") - require.Error(t, err) - assert.Contains(t, err.Error(), "quota check") + assertError(t, err) + assertContainsString(t, err.Error(), "quota check") } func TestScope_Quota_Good_MaxKeys_AcrossGroups(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() - scopedStore, _ := NewScopedWithQuota(storeInstance, "tenant-a", QuotaConfig{MaxKeys: 3}) + scopedStore, _ := NewScopedConfigured(storeInstance, ScopedStoreConfig{ + Namespace: "tenant-a", + Quota: QuotaConfig{MaxKeys: 3}, + }) - require.NoError(t, scopedStore.SetIn("g1", "a", "1")) - require.NoError(t, scopedStore.SetIn("g2", "b", "2")) - require.NoError(t, scopedStore.SetIn("g3", "c", "3")) + assertNoError(t, scopedStore.SetIn("g1", "a", "1")) + assertNoError(t, scopedStore.SetIn("g2", "b", "2")) + assertNoError(t, scopedStore.SetIn("g3", "c", "3")) + // Total is now 3 — any new key should fail regardless of group. err := scopedStore.SetIn("g4", "d", "4") - assert.True(t, core.Is(err, QuotaExceededError)) + assertTrue(t, core.Is(err, QuotaExceededError)) } func TestScope_Quota_Good_UpsertDoesNotCount(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() - scopedStore, _ := NewScopedWithQuota(storeInstance, "tenant-a", QuotaConfig{MaxKeys: 3}) + scopedStore, _ := NewScopedConfigured(storeInstance, ScopedStoreConfig{ + Namespace: "tenant-a", + Quota: QuotaConfig{MaxKeys: 3}, + }) + + assertNoError(t, scopedStore.SetIn("g", "a", "1")) + assertNoError(t, scopedStore.SetIn("g", "b", "2")) + assertNoError(t, scopedStore.SetIn("g", "c", "3")) - require.NoError(t, scopedStore.SetIn("g", "a", "1")) - require.NoError(t, scopedStore.SetIn("g", "b", "2")) - require.NoError(t, scopedStore.SetIn("g", "c", "3")) - require.NoError(t, scopedStore.SetIn("g", "a", "updated")) + // Upserting existing key should succeed. + assertNoError(t, scopedStore.SetIn("g", "a", "updated")) value, err := scopedStore.GetFrom("g", "a") - require.NoError(t, err) - assert.Equal(t, "updated", value) + assertNoError(t, err) + assertEqual(t, "updated", value) +} + +func TestScope_Quota_Good_ExpiredUpsertDoesNotEmitDeleteEvent(t *testing.T) { + storeInstance, _ := New(":memory:") + defer func() { _ = storeInstance.Close() }() + + scopedStore, _ := NewScopedConfigured(storeInstance, ScopedStoreConfig{ + Namespace: "tenant-a", + Quota: QuotaConfig{MaxKeys: 1}, + }) + + events := storeInstance.Watch("tenant-a:g") + defer storeInstance.Unwatch("tenant-a:g", events) + + assertNoError(t, scopedStore.SetWithTTL("g", "token", "old", 1*time.Millisecond)) + select { + case event := <-events: + assertEqual(t, EventSet, event.Type) + assertEqual(t, "old", event.Value) + case <-time.After(time.Second): + t.Fatal("timed out waiting for initial set event") + } + time.Sleep(5 * time.Millisecond) + + assertNoError(t, scopedStore.SetIn("g", "token", "new")) + + select { + case event := <-events: + assertEqual(t, EventSet, event.Type) + assertEqual(t, "new", event.Value) + case <-time.After(time.Second): + t.Fatal("timed out waiting for upsert event") + } + + select { + case event := <-events: + t.Fatalf("unexpected extra event: %#v", event) + case <-time.After(50 * time.Millisecond): + } } func TestScope_Quota_Good_DeleteAndReInsert(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() - scopedStore, _ := NewScopedWithQuota(storeInstance, "tenant-a", QuotaConfig{MaxKeys: 3}) + scopedStore, _ := NewScopedConfigured(storeInstance, ScopedStoreConfig{ + Namespace: "tenant-a", + Quota: QuotaConfig{MaxKeys: 3}, + }) + + assertNoError(t, scopedStore.SetIn("g", "a", "1")) + assertNoError(t, scopedStore.SetIn("g", "b", "2")) + assertNoError(t, scopedStore.SetIn("g", "c", "3")) - require.NoError(t, scopedStore.SetIn("g", "a", "1")) - require.NoError(t, scopedStore.SetIn("g", "b", "2")) - require.NoError(t, scopedStore.SetIn("g", "c", "3")) - require.NoError(t, scopedStore.Delete("g", "c")) - require.NoError(t, scopedStore.SetIn("g", "d", "4")) + // Delete one key, then insert a new one — should work. + assertNoError(t, scopedStore.Delete("g", "c")) + assertNoError(t, scopedStore.SetIn("g", "d", "4")) } func TestScope_Quota_Good_ZeroMeansUnlimited(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() - scopedStore, _ := NewScopedWithQuota(storeInstance, "tenant-a", QuotaConfig{MaxKeys: 0, MaxGroups: 0}) + scopedStore, _ := NewScopedConfigured(storeInstance, ScopedStoreConfig{ + Namespace: "tenant-a", + Quota: QuotaConfig{MaxKeys: 0, MaxGroups: 0}, + }) + // Should be able to insert many keys and groups without error. for i := range 100 { - require.NoError(t, scopedStore.SetIn("g", keyName(i), "v")) + assertNoError(t, scopedStore.SetIn("g", keyName(i), "v")) } } func TestScope_Quota_Good_ExpiredKeysExcluded(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() - scopedStore, _ := NewScopedWithQuota(storeInstance, "tenant-a", QuotaConfig{MaxKeys: 3}) + scopedStore, _ := NewScopedConfigured(storeInstance, ScopedStoreConfig{ + Namespace: "tenant-a", + Quota: QuotaConfig{MaxKeys: 3}, + }) - require.NoError(t, scopedStore.SetWithTTL("g", "temp1", "v", 1*time.Millisecond)) - require.NoError(t, scopedStore.SetWithTTL("g", "temp2", "v", 1*time.Millisecond)) - require.NoError(t, scopedStore.SetIn("g", "permanent", "v")) + // Insert 3 keys, 2 with short TTL. + assertNoError(t, scopedStore.SetWithTTL("g", "temp1", "v", 1*time.Millisecond)) + assertNoError(t, scopedStore.SetWithTTL("g", "temp2", "v", 1*time.Millisecond)) + assertNoError(t, scopedStore.SetIn("g", "permanent", "v")) time.Sleep(5 * time.Millisecond) - require.NoError(t, scopedStore.SetIn("g", "new1", "v")) - require.NoError(t, scopedStore.SetIn("g", "new2", "v")) + // After expiry, only 1 key counts — should be able to insert 2 more. + assertNoError(t, scopedStore.SetIn("g", "new1", "v")) + assertNoError(t, scopedStore.SetIn("g", "new2", "v")) + // Now at 3 — next should fail. err := scopedStore.SetIn("g", "new3", "v") - assert.True(t, core.Is(err, QuotaExceededError)) + assertTrue(t, core.Is(err, QuotaExceededError)) } func TestScope_Quota_Good_SetWithTTL_Enforced(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() - scopedStore, _ := NewScopedWithQuota(storeInstance, "tenant-a", QuotaConfig{MaxKeys: 2}) + scopedStore, _ := NewScopedConfigured(storeInstance, ScopedStoreConfig{ + Namespace: "tenant-a", + Quota: QuotaConfig{MaxKeys: 2}, + }) - require.NoError(t, scopedStore.SetWithTTL("g", "a", "1", time.Hour)) - require.NoError(t, scopedStore.SetWithTTL("g", "b", "2", time.Hour)) + assertNoError(t, scopedStore.SetWithTTL("g", "a", "1", time.Hour)) + assertNoError(t, scopedStore.SetWithTTL("g", "b", "2", time.Hour)) err := scopedStore.SetWithTTL("g", "c", "3", time.Hour) - assert.True(t, core.Is(err, QuotaExceededError)) + assertTrue(t, core.Is(err, QuotaExceededError)) } // --------------------------------------------------------------------------- @@ -825,100 +1021,135 @@ func TestScope_Quota_Good_SetWithTTL_Enforced(t *testing.T) { func TestScope_Quota_Good_MaxGroups(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() - scopedStore, _ := NewScopedWithQuota(storeInstance, "tenant-a", QuotaConfig{MaxGroups: 3}) + scopedStore, _ := NewScopedConfigured(storeInstance, ScopedStoreConfig{ + Namespace: "tenant-a", + Quota: QuotaConfig{MaxGroups: 3}, + }) - require.NoError(t, scopedStore.SetIn("g1", "k", "v")) - require.NoError(t, scopedStore.SetIn("g2", "k", "v")) - require.NoError(t, scopedStore.SetIn("g3", "k", "v")) + assertNoError(t, scopedStore.SetIn("g1", "k", "v")) + assertNoError(t, scopedStore.SetIn("g2", "k", "v")) + assertNoError(t, scopedStore.SetIn("g3", "k", "v")) + // 4th group should fail. err := scopedStore.SetIn("g4", "k", "v") - require.Error(t, err) - assert.True(t, core.Is(err, QuotaExceededError)) + assertError(t, err) + assertTrue(t, core.Is(err, QuotaExceededError)) } func TestScope_Quota_Good_MaxGroups_ExistingGroupOK(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() - scopedStore, _ := NewScopedWithQuota(storeInstance, "tenant-a", QuotaConfig{MaxGroups: 2}) + scopedStore, _ := NewScopedConfigured(storeInstance, ScopedStoreConfig{ + Namespace: "tenant-a", + Quota: QuotaConfig{MaxGroups: 2}, + }) - require.NoError(t, scopedStore.SetIn("g1", "a", "1")) - require.NoError(t, scopedStore.SetIn("g2", "b", "2")) - require.NoError(t, scopedStore.SetIn("g1", "c", "3")) - require.NoError(t, scopedStore.SetIn("g2", "d", "4")) + assertNoError(t, scopedStore.SetIn("g1", "a", "1")) + assertNoError(t, scopedStore.SetIn("g2", "b", "2")) + + // Adding more keys to existing groups should be fine. + assertNoError(t, scopedStore.SetIn("g1", "c", "3")) + assertNoError(t, scopedStore.SetIn("g2", "d", "4")) } func TestScope_Quota_Good_MaxGroups_DeleteAndRecreate(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() + + scopedStore, _ := NewScopedConfigured(storeInstance, ScopedStoreConfig{ + Namespace: "tenant-a", + Quota: QuotaConfig{MaxGroups: 2}, + }) - scopedStore, _ := NewScopedWithQuota(storeInstance, "tenant-a", QuotaConfig{MaxGroups: 2}) + assertNoError(t, scopedStore.SetIn("g1", "k", "v")) + assertNoError(t, scopedStore.SetIn("g2", "k", "v")) - require.NoError(t, scopedStore.SetIn("g1", "k", "v")) - require.NoError(t, scopedStore.SetIn("g2", "k", "v")) - require.NoError(t, scopedStore.DeleteGroup("g1")) - require.NoError(t, scopedStore.SetIn("g3", "k", "v")) + // Delete a group, then create a new one. + assertNoError(t, scopedStore.DeleteGroup("g1")) + assertNoError(t, scopedStore.SetIn("g3", "k", "v")) } func TestScope_Quota_Good_MaxGroups_ZeroUnlimited(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() - scopedStore, _ := NewScopedWithQuota(storeInstance, "tenant-a", QuotaConfig{MaxGroups: 0}) + scopedStore, _ := NewScopedConfigured(storeInstance, ScopedStoreConfig{ + Namespace: "tenant-a", + Quota: QuotaConfig{MaxGroups: 0}, + }) for i := range 50 { - require.NoError(t, scopedStore.SetIn(keyName(i), "k", "v")) + assertNoError(t, scopedStore.SetIn(keyName(i), "k", "v")) } } func TestScope_Quota_Good_MaxGroups_ExpiredGroupExcluded(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() - scopedStore, _ := NewScopedWithQuota(storeInstance, "tenant-a", QuotaConfig{MaxGroups: 2}) + scopedStore, _ := NewScopedConfigured(storeInstance, ScopedStoreConfig{ + Namespace: "tenant-a", + Quota: QuotaConfig{MaxGroups: 2}, + }) - require.NoError(t, scopedStore.SetWithTTL("g1", "k", "v", 1*time.Millisecond)) - require.NoError(t, scopedStore.SetIn("g2", "k", "v")) + // Create 2 groups, one with only TTL keys. + assertNoError(t, scopedStore.SetWithTTL("g1", "k", "v", 1*time.Millisecond)) + assertNoError(t, scopedStore.SetIn("g2", "k", "v")) time.Sleep(5 * time.Millisecond) - require.NoError(t, scopedStore.SetIn("g3", "k", "v")) + // g1's only key has expired, so group count should be 1 — we can create a new one. + assertNoError(t, scopedStore.SetIn("g3", "k", "v")) } func TestScope_Quota_Good_BothLimits(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() - scopedStore, _ := NewScopedWithQuota(storeInstance, "tenant-a", QuotaConfig{MaxKeys: 10, MaxGroups: 2}) + scopedStore, _ := NewScopedConfigured(storeInstance, ScopedStoreConfig{ + Namespace: "tenant-a", + Quota: QuotaConfig{MaxKeys: 10, MaxGroups: 2}, + }) - require.NoError(t, scopedStore.SetIn("g1", "a", "1")) - require.NoError(t, scopedStore.SetIn("g2", "b", "2")) + assertNoError(t, scopedStore.SetIn("g1", "a", "1")) + assertNoError(t, scopedStore.SetIn("g2", "b", "2")) + // Group limit hit. err := scopedStore.SetIn("g3", "c", "3") - assert.True(t, core.Is(err, QuotaExceededError)) + assertTrue(t, core.Is(err, QuotaExceededError)) - require.NoError(t, scopedStore.SetIn("g1", "d", "4")) + // But adding to existing groups is fine (within key limit). + assertNoError(t, scopedStore.SetIn("g1", "d", "4")) } func TestScope_Quota_Good_DoesNotAffectOtherNamespaces(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() - alphaStore, _ := NewScopedWithQuota(storeInstance, "tenant-a", QuotaConfig{MaxKeys: 2}) - betaStore, _ := NewScopedWithQuota(storeInstance, "tenant-b", QuotaConfig{MaxKeys: 2}) + alphaStore, _ := NewScopedConfigured(storeInstance, ScopedStoreConfig{ + Namespace: "tenant-a", + Quota: QuotaConfig{MaxKeys: 2}, + }) + betaStore, _ := NewScopedConfigured(storeInstance, ScopedStoreConfig{ + Namespace: "tenant-b", + Quota: QuotaConfig{MaxKeys: 2}, + }) - require.NoError(t, alphaStore.SetIn("g", "a1", "v")) - require.NoError(t, alphaStore.SetIn("g", "a2", "v")) - require.NoError(t, betaStore.SetIn("g", "b1", "v")) - require.NoError(t, betaStore.SetIn("g", "b2", "v")) + assertNoError(t, alphaStore.SetIn("g", "a1", "v")) + assertNoError(t, alphaStore.SetIn("g", "a2", "v")) + assertNoError(t, betaStore.SetIn("g", "b1", "v")) + assertNoError(t, betaStore.SetIn("g", "b2", "v")) + // alphaStore is at limit — but betaStore's keys don't count against alphaStore. err := alphaStore.SetIn("g", "a3", "v") - assert.True(t, core.Is(err, QuotaExceededError)) + assertTrue(t, core.Is(err, QuotaExceededError)) + // betaStore is also at limit independently. err = betaStore.SetIn("g", "b3", "v") - assert.True(t, core.Is(err, QuotaExceededError)) + assertTrue(t, core.Is(err, QuotaExceededError)) } // --------------------------------------------------------------------------- @@ -927,50 +1158,85 @@ func TestScope_Quota_Good_DoesNotAffectOtherNamespaces(t *testing.T) { func TestScope_CountAll_Good_WithPrefix(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() - require.NoError(t, storeInstance.Set("ns-a:g1", "k1", "v")) - require.NoError(t, storeInstance.Set("ns-a:g1", "k2", "v")) - require.NoError(t, storeInstance.Set("ns-a:g2", "k1", "v")) - require.NoError(t, storeInstance.Set("ns-b:g1", "k1", "v")) + assertNoError(t, storeInstance.Set("ns-a:g1", "k1", "v")) + assertNoError(t, storeInstance.Set("ns-a:g1", "k2", "v")) + assertNoError(t, storeInstance.Set("ns-a:g2", "k1", "v")) + assertNoError(t, storeInstance.Set("ns-b:g1", "k1", "v")) count, err := storeInstance.CountAll("ns-a:") - require.NoError(t, err) - assert.Equal(t, 3, count) + assertNoError(t, err) + assertEqual(t, 3, count) count, err = storeInstance.CountAll("ns-b:") - require.NoError(t, err) - assert.Equal(t, 1, count) + assertNoError(t, err) + assertEqual(t, 1, count) } func TestScope_CountAll_Good_WithPrefix_Wildcards(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() - - require.NoError(t, storeInstance.Set("user_1", "k", "v")) - require.NoError(t, storeInstance.Set("user_2", "k", "v")) - require.NoError(t, storeInstance.Set("user%test", "k", "v")) - require.NoError(t, storeInstance.Set("user_test", "k", "v")) - + defer func() { _ = storeInstance.Close() }() + + // Add keys in groups that look like wildcards. + assertNoError(t, storeInstance.Set("user_1", "k", "v")) + assertNoError(t, storeInstance.Set("user_2", "k", "v")) + assertNoError(t, storeInstance.Set("user%test", "k", "v")) + assertNoError(t, storeInstance.Set("user_test", "k", "v")) + + // Prefix "user_" should ONLY match groups starting with "user_". + // Since we escape "_", it matches literal "_". + // Groups: "user_1", "user_2", "user_test" (3 total). + // "user%test" is NOT matched because "_" is literal. count, err := storeInstance.CountAll("user_") - require.NoError(t, err) - assert.Equal(t, 3, count) + assertNoError(t, err) + assertEqual(t, 3, count) + // Prefix "user%" should ONLY match "user%test". count, err = storeInstance.CountAll("user%") - require.NoError(t, err) - assert.Equal(t, 1, count) + assertNoError(t, err) + assertEqual(t, 1, count) } func TestScope_CountAll_Good_EmptyPrefix(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() - require.NoError(t, storeInstance.Set("g1", "k1", "v")) - require.NoError(t, storeInstance.Set("g2", "k2", "v")) + assertNoError(t, storeInstance.Set("g1", "k1", "v")) + assertNoError(t, storeInstance.Set("g2", "k2", "v")) count, err := storeInstance.CountAll("") - require.NoError(t, err) - assert.Equal(t, 2, count) + assertNoError(t, err) + assertEqual(t, 2, count) +} + +func TestScope_CountAll_Good_ExcludesExpired(t *testing.T) { + storeInstance, _ := New(":memory:") + defer func() { _ = storeInstance.Close() }() + + assertNoError(t, storeInstance.Set("ns:g", "permanent", "v")) + assertNoError(t, storeInstance.SetWithTTL("ns:g", "temp", "v", 1*time.Millisecond)) + time.Sleep(5 * time.Millisecond) + + count, err := storeInstance.CountAll("ns:") + assertNoError(t, err) + assertEqualf(t, 1, count, "expired keys should not be counted") +} + +func TestScope_CountAll_Good_Empty(t *testing.T) { + storeInstance, _ := New(":memory:") + defer func() { _ = storeInstance.Close() }() + + count, err := storeInstance.CountAll("nonexistent:") + assertNoError(t, err) + assertEqual(t, 0, count) +} + +func TestScope_CountAll_Bad_ClosedStore(t *testing.T) { + storeInstance, _ := New(":memory:") + _ = storeInstance.Close() + _, err := storeInstance.CountAll("") + assertError(t, err) } // --------------------------------------------------------------------------- @@ -979,59 +1245,107 @@ func TestScope_CountAll_Good_EmptyPrefix(t *testing.T) { func TestScope_Groups_Good_WithPrefix(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() - require.NoError(t, storeInstance.Set("ns-a:group-1", "k", "v")) - require.NoError(t, storeInstance.Set("ns-a:group-2", "k", "v")) - require.NoError(t, storeInstance.Set("ns-b:group-1", "k", "v")) + assertNoError(t, storeInstance.Set("ns-a:g1", "k", "v")) + assertNoError(t, storeInstance.Set("ns-a:g2", "k", "v")) + assertNoError(t, storeInstance.Set("ns-a:g2", "k2", "v")) // duplicate group + assertNoError(t, storeInstance.Set("ns-b:g1", "k", "v")) groups, err := storeInstance.Groups("ns-a:") - require.NoError(t, err) - assert.Equal(t, []string{"ns-a:group-1", "ns-a:group-2"}, groups) + assertNoError(t, err) + assertLen(t, groups, 2) + assertContainsElement(t, groups, "ns-a:g1") + assertContainsElement(t, groups, "ns-a:g2") } -func TestScope_GroupsSeq_Good_EmptyPrefix(t *testing.T) { +func TestScope_Groups_Good_EmptyPrefix(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() - require.NoError(t, storeInstance.Set("g1", "k1", "v")) - require.NoError(t, storeInstance.Set("g2", "k2", "v")) + assertNoError(t, storeInstance.Set("g1", "k", "v")) + assertNoError(t, storeInstance.Set("g2", "k", "v")) + assertNoError(t, storeInstance.Set("g3", "k", "v")) - var groups []string - for groupName, err := range storeInstance.GroupsSeq("") { - require.NoError(t, err) - groups = append(groups, groupName) - } - assert.Equal(t, []string{"g1", "g2"}, groups) + groups, err := storeInstance.Groups("") + assertNoError(t, err) + assertLen(t, groups, 3) } -func TestScope_GroupsSeq_Good_StopsEarly(t *testing.T) { +func TestScope_Groups_Good_Distinct(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() - require.NoError(t, storeInstance.Set("g1", "k1", "v")) - require.NoError(t, storeInstance.Set("g2", "k2", "v")) + // Multiple keys in the same group should produce one entry. + assertNoError(t, storeInstance.Set("g1", "a", "v")) + assertNoError(t, storeInstance.Set("g1", "b", "v")) + assertNoError(t, storeInstance.Set("g1", "c", "v")) - count := 0 - for range storeInstance.GroupsSeq("") { - count++ - break - } - assert.Equal(t, 1, count) + groups, err := storeInstance.Groups("") + assertNoError(t, err) + assertLen(t, groups, 1) + assertEqual(t, "g1", groups[0]) } -func keyName(index int) string { - return core.Sprintf("key-%02d", index) +func TestScope_Groups_Good_ExcludesExpired(t *testing.T) { + storeInstance, _ := New(":memory:") + defer func() { _ = storeInstance.Close() }() + + assertNoError(t, storeInstance.Set("ns:g1", "permanent", "v")) + assertNoError(t, storeInstance.SetWithTTL("ns:g2", "temp", "v", 1*time.Millisecond)) + time.Sleep(5 * time.Millisecond) + + groups, err := storeInstance.Groups("ns:") + assertNoError(t, err) + assertLenf(t, groups, 1, "group with only expired keys should be excluded") + assertEqual(t, "ns:g1", groups[0]) } -func rawEntryCount(tb testing.TB, storeInstance *Store, group string) int { - tb.Helper() +func TestScope_Groups_Good_SortedByGroupName(t *testing.T) { + storeInstance, _ := New(":memory:") + defer func() { _ = storeInstance.Close() }() + + assertNoError(t, storeInstance.Set("charlie", "c", "3")) + assertNoError(t, storeInstance.Set("alpha", "a", "1")) + assertNoError(t, storeInstance.Set("bravo", "b", "2")) + + groups, err := storeInstance.Groups("") + assertNoError(t, err) + assertEqual(t, []string{"alpha", "bravo", "charlie"}, groups) +} + +func TestScope_Groups_Good_Empty(t *testing.T) { + storeInstance, _ := New(":memory:") + defer func() { _ = storeInstance.Close() }() + + groups, err := storeInstance.Groups("nonexistent:") + assertNoError(t, err) + assertEmpty(t, groups) +} + +func TestScope_Groups_Bad_ClosedStore(t *testing.T) { + storeInstance, _ := New(":memory:") + _ = storeInstance.Close() + _, err := storeInstance.Groups("") + assertError(t, err) +} + +// --------------------------------------------------------------------------- +// Helpers +// --------------------------------------------------------------------------- + +func keyName(i int) string { + return core.Concat("key-", core.Sprint(i)) +} + +func rawEntryCount(t *testing.T, storeInstance *Store, group string) int { + t.Helper() var count int err := storeInstance.sqliteDatabase.QueryRow( "SELECT COUNT(*) FROM "+entriesTableName+" WHERE "+entryGroupColumn+" = ?", group, ).Scan(&count) - require.NoError(tb, err) + assertNoError(t, err) return count } diff --git a/store.go b/store.go index 360dfa7..ab9f316 100644 --- a/store.go +++ b/store.go @@ -4,12 +4,13 @@ import ( "context" "database/sql" "iter" - "sync" + "sync" // Note: AX-6 — internal concurrency primitive; structural for store infrastructure (RFC §4 explicitly mandates). "text/template" "time" "unicode" core "dappco.re/go/core" + influxdb2 "github.com/influxdata/influxdb-client-go/v2" _ "modernc.org/sqlite" ) @@ -25,14 +26,15 @@ const ( entryGroupColumn = "group_name" entryKeyColumn = "entry_key" entryValueColumn = "entry_value" + defaultPurgeInterval = 60 * time.Second ) // Usage example: `storeInstance, err := store.NewConfigured(store.StoreConfig{DatabasePath: "/tmp/go-store.db", Journal: store.JournalConfiguration{EndpointURL: "http://127.0.0.1:8086", Organisation: "core", BucketName: "events"}, PurgeInterval: 30 * time.Second})` -// Prefer `store.NewConfigured(store.StoreConfig{...})` when the configuration -// is already known as a struct literal. Use `StoreOption` only when values -// need to be assembled incrementally, such as when a caller receives them from +// Prefer `store.NewConfigured(store.StoreConfig{...})` when the full +// configuration is already known. Use `StoreOption` when values need to be +// assembled incrementally, such as when a caller receives them from // different sources. -type StoreOption func(*StoreConfig) +type StoreOption func(*Store) // Usage example: `config := store.StoreConfig{DatabasePath: ":memory:", PurgeInterval: 30 * time.Second}` type StoreConfig struct { @@ -44,6 +46,24 @@ type StoreConfig struct { PurgeInterval time.Duration // Usage example: `config := store.StoreConfig{WorkspaceStateDirectory: "/tmp/core-state"}` WorkspaceStateDirectory string + // Usage example: `medium, _ := local.New("/srv/core"); config := store.StoreConfig{DatabasePath: ":memory:", Medium: medium}` + // Medium overrides the raw filesystem for Compact archives and Import / + // Export helpers, letting tests and production swap the backing transport + // (memory, S3, cube) without touching the store API. + Medium Medium +} + +// Usage example: `config := (store.StoreConfig{DatabasePath: ":memory:"}).Normalised(); fmt.Println(config.PurgeInterval, config.WorkspaceStateDirectory)` +func (storeConfig StoreConfig) Normalised() StoreConfig { + if storeConfig.PurgeInterval == 0 { + storeConfig.PurgeInterval = defaultPurgeInterval + } + if storeConfig.WorkspaceStateDirectory == "" { + storeConfig.WorkspaceStateDirectory = normaliseWorkspaceStateDirectory(defaultWorkspaceStateDirectory) + } else { + storeConfig.WorkspaceStateDirectory = normaliseWorkspaceStateDirectory(storeConfig.WorkspaceStateDirectory) + } + return storeConfig } // Usage example: `if err := (store.StoreConfig{DatabasePath: ":memory:", PurgeInterval: 30 * time.Second}).Validate(); err != nil { return }` @@ -55,12 +75,10 @@ func (storeConfig StoreConfig) Validate() error { nil, ) } - if storeConfig.Journal != (JournalConfiguration{}) && !storeConfig.Journal.isConfigured() { - return core.E( - "store.StoreConfig.Validate", - "journal configuration must include endpoint URL, organisation, and bucket name", - nil, - ) + if storeConfig.Journal != (JournalConfiguration{}) { + if err := storeConfig.Journal.Validate(); err != nil { + return core.E("store.StoreConfig.Validate", "journal config", err) + } } if storeConfig.PurgeInterval < 0 { return core.E("store.StoreConfig.Validate", "purge interval must be zero or positive", nil) @@ -68,8 +86,10 @@ func (storeConfig StoreConfig) Validate() error { return nil } +// Usage example: `config := store.JournalConfiguration{EndpointURL: "http://127.0.0.1:8086", Organisation: "core", BucketName: "events"}` +// JournalConfiguration keeps the journal connection details in one literal so +// agents can pass a single struct to `StoreConfig.Journal` or `WithJournal`. // Usage example: `config := storeInstance.JournalConfiguration(); fmt.Println(config.EndpointURL, config.Organisation, config.BucketName)` -// Usage example: `store.New(":memory:", store.WithJournal("http://127.0.0.1:8086", "core", "events"))` type JournalConfiguration struct { // Usage example: `config := store.JournalConfiguration{EndpointURL: "http://127.0.0.1:8086"}` EndpointURL string @@ -79,18 +99,45 @@ type JournalConfiguration struct { BucketName string } +// Usage example: `if err := (store.JournalConfiguration{EndpointURL: "http://127.0.0.1:8086", Organisation: "core", BucketName: "events"}).Validate(); err != nil { return }` +func (journalConfig JournalConfiguration) Validate() error { + switch { + case journalConfig.EndpointURL == "": + return core.E( + "store.JournalConfiguration.Validate", + `endpoint URL is empty; use values like "http://127.0.0.1:8086"`, + nil, + ) + case journalConfig.Organisation == "": + return core.E( + "store.JournalConfiguration.Validate", + `organisation is empty; use values like "core"`, + nil, + ) + case journalConfig.BucketName == "": + return core.E( + "store.JournalConfiguration.Validate", + `bucket name is empty; use values like "events"`, + nil, + ) + default: + return nil + } +} + func (journalConfig JournalConfiguration) isConfigured() bool { return journalConfig.EndpointURL != "" && journalConfig.Organisation != "" && journalConfig.BucketName != "" } -// Store is the SQLite KV store with TTL expiry, namespace isolation, +// Store is the SQLite key-value store with TTL expiry, namespace isolation, // reactive events, SQLite journal writes, and orphan recovery. // // Usage example: `storeInstance, err := store.NewConfigured(store.StoreConfig{DatabasePath: ":memory:", Journal: store.JournalConfiguration{EndpointURL: "http://127.0.0.1:8086", Organisation: "core", BucketName: "events"}, PurgeInterval: 30 * time.Second})` // Usage example: `value, err := storeInstance.Get("config", "colour")` type Store struct { + db *sql.DB sqliteDatabase *sql.DB databasePath string workspaceStateDirectory string @@ -98,32 +145,47 @@ type Store struct { cancelPurge context.CancelFunc purgeWaitGroup sync.WaitGroup purgeInterval time.Duration // interval between background purge cycles + sqliteStoragePath string + sqliteStorageDirectory string + mediumBacked bool + journal influxdb2.Client + bucket string + org string journalConfiguration JournalConfiguration + medium Medium + lifecycleLock sync.Mutex closeLock sync.Mutex - closed bool + isClosed bool + isClosing bool // Event dispatch state. - watchers map[string][]chan Event - callbacks []changeCallbackRegistration - watchersLock sync.RWMutex // protects watcher registration and dispatch - callbacksLock sync.RWMutex // protects callback registration and dispatch - nextCallbackRegistrationID uint64 // monotonic ID for callback registrations - - orphanWorkspacesLock sync.Mutex - orphanWorkspaces []*Workspace + watchers map[string][]chan Event + callbacks []changeCallbackRegistration + watcherLock sync.RWMutex // protects watcher registration and dispatch + callbackLock sync.RWMutex // protects callback registration and dispatch + nextCallbackID uint64 // monotonic ID for callback registrations + + orphanWorkspaceLock sync.Mutex + cachedOrphanWorkspaces []*Workspace } func (storeInstance *Store) ensureReady(operation string) error { if storeInstance == nil { return core.E(operation, "store is nil", nil) } + if storeInstance.db == nil { + storeInstance.db = storeInstance.sqliteDatabase + } if storeInstance.sqliteDatabase == nil { + storeInstance.sqliteDatabase = storeInstance.db + } + if storeInstance.db == nil || storeInstance.sqliteDatabase == nil { return core.E(operation, "store is not initialised", nil) } - storeInstance.closeLock.Lock() - closed := storeInstance.closed - storeInstance.closeLock.Unlock() + storeInstance.lifecycleLock.Lock() + closed := storeInstance.isClosed || storeInstance.isClosing + storeInstance.lifecycleLock.Unlock() if closed { return core.E(operation, "store is closed", nil) } @@ -133,15 +195,29 @@ func (storeInstance *Store) ensureReady(operation string) error { // Usage example: `storeInstance, err := store.NewConfigured(store.StoreConfig{DatabasePath: "/tmp/go-store.db", Journal: store.JournalConfiguration{EndpointURL: "http://127.0.0.1:8086", Organisation: "core", BucketName: "events"}})` func WithJournal(endpointURL, organisation, bucketName string) StoreOption { - return func(storeConfig *StoreConfig) { - if storeConfig == nil { + return func(storeInstance *Store) { + if storeInstance == nil { return } - storeConfig.Journal = JournalConfiguration{ + storeInstance.journalConfiguration = JournalConfiguration{ EndpointURL: endpointURL, Organisation: organisation, BucketName: bucketName, } + storeInstance.org = organisation + storeInstance.bucket = bucketName + } +} + +// Usage example: `storeInstance, err := store.NewConfigured(store.StoreConfig{DatabasePath: ":memory:", WorkspaceStateDirectory: "/tmp/core-state"})` +// Use this when the workspace state directory is being assembled +// incrementally; otherwise prefer a StoreConfig literal. +func WithWorkspaceStateDirectory(directory string) StoreOption { + return func(storeInstance *Store) { + if storeInstance == nil { + return + } + storeInstance.workspaceStateDirectory = directory } } @@ -170,7 +246,8 @@ func (storeInstance *Store) Config() StoreConfig { DatabasePath: storeInstance.databasePath, Journal: storeInstance.JournalConfiguration(), PurgeInterval: storeInstance.purgeInterval, - WorkspaceStateDirectory: storeInstance.workspaceStateDirectoryPath(), + WorkspaceStateDirectory: storeInstance.WorkspaceStateDirectory(), + Medium: storeInstance.medium, } } @@ -182,26 +259,36 @@ func (storeInstance *Store) DatabasePath() string { return storeInstance.databasePath } +// Usage example: `stateDirectory := storeInstance.WorkspaceStateDirectory(); fmt.Println(stateDirectory)` +func (storeInstance *Store) WorkspaceStateDirectory() string { + if storeInstance == nil { + return normaliseWorkspaceStateDirectory(defaultWorkspaceStateDirectory) + } + return storeInstance.workspaceStateDirectoryPath() +} + // Usage example: `if storeInstance.IsClosed() { return }` func (storeInstance *Store) IsClosed() bool { if storeInstance == nil { return true } - storeInstance.closeLock.Lock() - closed := storeInstance.closed - storeInstance.closeLock.Unlock() + storeInstance.lifecycleLock.Lock() + closed := storeInstance.isClosed + storeInstance.lifecycleLock.Unlock() return closed } // Usage example: `storeInstance, err := store.NewConfigured(store.StoreConfig{DatabasePath: ":memory:", PurgeInterval: 20 * time.Millisecond})` +// Use this when the purge interval is being assembled incrementally; otherwise +// prefer a StoreConfig literal. func WithPurgeInterval(interval time.Duration) StoreOption { - return func(storeConfig *StoreConfig) { - if storeConfig == nil { + return func(storeInstance *Store) { + if storeInstance == nil { return } if interval > 0 { - storeConfig.PurgeInterval = interval + storeInstance.purgeInterval = interval } } } @@ -215,42 +302,78 @@ func openConfiguredStore(operation string, storeConfig StoreConfig) (*Store, err if err := storeConfig.Validate(); err != nil { return nil, core.E(operation, "validate config", err) } + storeConfig = storeConfig.Normalised() - storeInstance, err := openSQLiteStore(operation, storeConfig.DatabasePath) + storeInstance, err := openSQLiteStore(operation, storeConfig.DatabasePath, storeConfig.Medium) if err != nil { return nil, err } if storeConfig.Journal != (JournalConfiguration{}) { storeInstance.journalConfiguration = storeConfig.Journal + storeInstance.org = storeConfig.Journal.Organisation + storeInstance.bucket = storeConfig.Journal.BucketName + storeInstance.journal = influxdb2.NewClient(storeConfig.Journal.EndpointURL, "") } - if storeConfig.PurgeInterval > 0 { - storeInstance.purgeInterval = storeConfig.PurgeInterval - } - if storeConfig.WorkspaceStateDirectory != "" { - storeInstance.workspaceStateDirectory = normaliseWorkspaceStateDirectory(storeConfig.WorkspaceStateDirectory) - } + storeInstance.purgeInterval = storeConfig.PurgeInterval + storeInstance.workspaceStateDirectory = storeConfig.WorkspaceStateDirectory + storeInstance.medium = storeConfig.Medium // New() performs a non-destructive orphan scan so callers can discover // leftover workspaces via RecoverOrphans(). - storeInstance.orphanWorkspaces = discoverOrphanWorkspaces(storeInstance.workspaceStateDirectoryPath(), storeInstance) + storeInstance.cachedOrphanWorkspaces = discoverOrphanWorkspaces(storeInstance.workspaceStateDirectoryPath(), storeInstance) storeInstance.startBackgroundPurge() return storeInstance, nil } // Usage example: `storeInstance, err := store.NewConfigured(store.StoreConfig{DatabasePath: "/tmp/go-store.db", Journal: store.JournalConfiguration{EndpointURL: "http://127.0.0.1:8086", Organisation: "core", BucketName: "events"}})` func New(databasePath string, options ...StoreOption) (*Store, error) { - storeConfig := StoreConfig{DatabasePath: databasePath} + scratch := &Store{ + databasePath: databasePath, + workspaceStateDirectory: normaliseWorkspaceStateDirectory(defaultWorkspaceStateDirectory), + purgeInterval: defaultPurgeInterval, + watchers: make(map[string][]chan Event), + } for _, option := range options { if option != nil { - option(&storeConfig) + option(scratch) } } - return openConfiguredStore("store.New", storeConfig) + + storeConfig := scratch.Config() + storeConfig.DatabasePath = databasePath + storeConfig.Journal = scratch.JournalConfiguration() + storeConfig.PurgeInterval = scratch.purgeInterval + storeConfig.WorkspaceStateDirectory = scratch.WorkspaceStateDirectory() + storeConfig.Medium = scratch.medium + + storeInstance, err := openConfiguredStore("store.New", storeConfig) + if err != nil { + return nil, err + } + return storeInstance, nil } -func openSQLiteStore(operation, databasePath string) (*Store, error) { - sqliteDatabase, err := sql.Open("sqlite", databasePath) +func openSQLiteStore(operation, databasePath string, medium Medium) (*Store, error) { + sqliteStoragePath := databasePath + sqliteStorageDirectory := "" + mediumBacked := medium != nil && databasePath != "" && databasePath != ":memory:" + if mediumBacked { + filesystem := (&core.Fs{}).NewUnrestricted() + sqliteStorageDirectory = filesystem.TempDir("go-store") + sqliteStoragePath = core.Path(sqliteStorageDirectory, "store.db") + if medium.Exists(databasePath) { + content, err := medium.Read(databasePath) + if err != nil { + return nil, core.E(operation, "read database from medium", err) + } + if result := filesystem.Write(sqliteStoragePath, content); !result.OK { + return nil, core.E(operation, "seed sqlite file from medium", result.Value.(error)) + } + } + } + + sqliteDatabase, err := sql.Open("sqlite", sqliteStoragePath) if err != nil { return nil, core.E(operation, "open database", err) } @@ -260,26 +383,31 @@ func openSQLiteStore(operation, databasePath string) (*Store, error) { // pool hands out different connections for each call. sqliteDatabase.SetMaxOpenConns(1) if _, err := sqliteDatabase.Exec("PRAGMA journal_mode=WAL"); err != nil { - sqliteDatabase.Close() + _ = sqliteDatabase.Close() return nil, core.E(operation, "set WAL journal mode", err) } if _, err := sqliteDatabase.Exec("PRAGMA busy_timeout=5000"); err != nil { - sqliteDatabase.Close() + _ = sqliteDatabase.Close() return nil, core.E(operation, "set busy timeout", err) } if err := ensureSchema(sqliteDatabase); err != nil { - sqliteDatabase.Close() + _ = sqliteDatabase.Close() return nil, core.E(operation, "ensure schema", err) } purgeContext, cancel := context.WithCancel(context.Background()) return &Store{ + db: sqliteDatabase, sqliteDatabase: sqliteDatabase, databasePath: databasePath, workspaceStateDirectory: normaliseWorkspaceStateDirectory(defaultWorkspaceStateDirectory), purgeContext: purgeContext, cancelPurge: cancel, - purgeInterval: 60 * time.Second, + purgeInterval: defaultPurgeInterval, + sqliteStoragePath: sqliteStoragePath, + sqliteStorageDirectory: sqliteStorageDirectory, + mediumBacked: mediumBacked, + medium: medium, watchers: make(map[string][]chan Event), }, nil } @@ -291,60 +419,115 @@ func (storeInstance *Store) workspaceStateDirectoryPath() string { return normaliseWorkspaceStateDirectory(storeInstance.workspaceStateDirectory) } -// Usage example: `storeInstance, err := store.New(":memory:"); if err != nil { return }; defer storeInstance.Close()` +// Usage example: `storeInstance, err := store.New(":memory:"); if err != nil { return }; defer func() { _ = storeInstance.Close() }()` func (storeInstance *Store) Close() error { if storeInstance == nil { return nil } storeInstance.closeLock.Lock() - if storeInstance.closed { - storeInstance.closeLock.Unlock() + defer storeInstance.closeLock.Unlock() + + storeInstance.lifecycleLock.Lock() + if storeInstance.isClosed { + storeInstance.lifecycleLock.Unlock() return nil } - storeInstance.closed = true - storeInstance.closeLock.Unlock() + storeInstance.isClosing = true + storeInstance.lifecycleLock.Unlock() if storeInstance.cancelPurge != nil { storeInstance.cancelPurge() } storeInstance.purgeWaitGroup.Wait() - storeInstance.watchersLock.Lock() + if storeInstance.journal != nil { + storeInstance.journal.Close() + } + + storeInstance.watcherLock.Lock() for groupName, registeredEvents := range storeInstance.watchers { for _, registeredEventChannel := range registeredEvents { close(registeredEventChannel) } delete(storeInstance.watchers, groupName) } - storeInstance.watchersLock.Unlock() + storeInstance.watcherLock.Unlock() - storeInstance.callbacksLock.Lock() + storeInstance.callbackLock.Lock() storeInstance.callbacks = nil - storeInstance.callbacksLock.Unlock() + storeInstance.callbackLock.Unlock() - storeInstance.orphanWorkspacesLock.Lock() + storeInstance.orphanWorkspaceLock.Lock() var orphanCleanupErr error - for _, orphanWorkspace := range storeInstance.orphanWorkspaces { + for _, orphanWorkspace := range storeInstance.cachedOrphanWorkspaces { if err := orphanWorkspace.closeWithoutRemovingFiles(); err != nil && orphanCleanupErr == nil { orphanCleanupErr = err } } - storeInstance.orphanWorkspaces = nil - storeInstance.orphanWorkspacesLock.Unlock() + storeInstance.cachedOrphanWorkspaces = nil + storeInstance.orphanWorkspaceLock.Unlock() + if storeInstance.db == nil { + storeInstance.db = storeInstance.sqliteDatabase + } if storeInstance.sqliteDatabase == nil { + storeInstance.sqliteDatabase = storeInstance.db + } + if storeInstance.sqliteDatabase == nil { + storeInstance.markClosed() return orphanCleanupErr } if err := storeInstance.sqliteDatabase.Close(); err != nil { return core.E("store.Close", "database close", err) } + if err := storeInstance.syncMediumBackedDatabase(); err != nil { + return core.E("store.Close", "sync medium-backed database", err) + } + storeInstance.markClosed() if orphanCleanupErr != nil { return core.E("store.Close", "close orphan workspaces", orphanCleanupErr) } return orphanCleanupErr } +func (storeInstance *Store) markClosed() { + storeInstance.lifecycleLock.Lock() + storeInstance.isClosed = true + storeInstance.isClosing = false + storeInstance.lifecycleLock.Unlock() +} + +func (storeInstance *Store) syncMediumBackedDatabase() error { + if storeInstance == nil || !storeInstance.mediumBacked || storeInstance.medium == nil { + return nil + } + if storeInstance.databasePath == "" || storeInstance.databasePath == ":memory:" { + return nil + } + if storeInstance.sqliteStoragePath == "" { + return nil + } + + filesystem := (&core.Fs{}).NewUnrestricted() + readResult := filesystem.Read(storeInstance.sqliteStoragePath) + if !readResult.OK { + return readResult.Value.(error) + } + if err := storeInstance.medium.Write(storeInstance.databasePath, readResult.Value.(string)); err != nil { + return err + } + + if storeInstance.sqliteStorageDirectory != "" { + _ = filesystem.DeleteAll(storeInstance.sqliteStorageDirectory) + return nil + } + for _, path := range []string{storeInstance.sqliteStoragePath + "-wal", storeInstance.sqliteStoragePath + "-shm"} { + _ = filesystem.Delete(path) + } + return nil +} + // Usage example: `colourValue, err := storeInstance.Get("config", "colour")` func (storeInstance *Store) Get(group, key string) (string, error) { if err := storeInstance.ensureReady("store.Get"); err != nil { @@ -429,6 +612,30 @@ func (storeInstance *Store) Delete(group, key string) error { return nil } +// Usage example: `exists, err := storeInstance.Exists("config", "colour")` +// Usage example: `if exists, _ := storeInstance.Exists("session", "token"); !exists { fmt.Println("session expired") }` +func (storeInstance *Store) Exists(group, key string) (bool, error) { + if err := storeInstance.ensureReady("store.Exists"); err != nil { + return false, err + } + + return liveEntryExists(storeInstance.sqliteDatabase, group, key) +} + +// Usage example: `exists, err := storeInstance.GroupExists("config")` +// Usage example: `if exists, _ := storeInstance.GroupExists("tenant-a:config"); !exists { fmt.Println("group is empty") }` +func (storeInstance *Store) GroupExists(group string) (bool, error) { + if err := storeInstance.ensureReady("store.GroupExists"); err != nil { + return false, err + } + + count, err := storeInstance.Count(group) + if err != nil { + return false, err + } + return count > 0, nil +} + // Usage example: `keyCount, err := storeInstance.Count("config")` func (storeInstance *Store) Count(group string) (int, error) { if err := storeInstance.ensureReady("store.Count"); err != nil { @@ -487,7 +694,7 @@ func (storeInstance *Store) DeletePrefix(groupPrefix string) error { if err != nil { return core.E("store.DeletePrefix", "list groups", err) } - defer rows.Close() + defer func() { _ = rows.Close() }() var groupNames []string for rows.Next() { @@ -551,7 +758,7 @@ func (storeInstance *Store) GetPage(group string, offset, limit int) ([]KeyValue if err != nil { return nil, core.E("store.GetPage", "query rows", err) } - defer rows.Close() + defer func() { _ = rows.Close() }() page := make([]KeyValue, 0, limit) for rows.Next() { @@ -583,7 +790,7 @@ func (storeInstance *Store) AllSeq(group string) iter.Seq2[KeyValue, error] { yield(KeyValue{}, core.E("store.All", "query rows", err)) return } - defer rows.Close() + defer func() { _ = rows.Close() }() for rows.Next() { var entry KeyValue @@ -704,7 +911,7 @@ func (storeInstance *Store) Groups(groupPrefix ...string) ([]string, error) { // Usage example: `for tenantGroupName, err := range storeInstance.GroupsSeq("tenant-a:") { if err != nil { break }; fmt.Println(tenantGroupName) }` // Usage example: `for groupName, err := range storeInstance.GroupsSeq() { if err != nil { break }; fmt.Println(groupName) }` func (storeInstance *Store) GroupsSeq(groupPrefix ...string) iter.Seq2[string, error] { - actualGroupPrefix := firstOrEmptyString(groupPrefix) + actualGroupPrefix := firstStringOrEmpty(groupPrefix) return func(yield func(string, error) bool) { if err := storeInstance.ensureReady("store.GroupsSeq"); err != nil { yield("", err) @@ -729,7 +936,7 @@ func (storeInstance *Store) GroupsSeq(groupPrefix ...string) iter.Seq2[string, e yield("", core.E("store.GroupsSeq", "query group names", err)) return } - defer rows.Close() + defer func() { _ = rows.Close() }() for rows.Next() { var groupName string @@ -749,7 +956,7 @@ func (storeInstance *Store) GroupsSeq(groupPrefix ...string) iter.Seq2[string, e } } -func firstOrEmptyString(values []string) string { +func firstStringOrEmpty(values []string) string { if len(values) == 0 { return "" } @@ -771,10 +978,22 @@ func (storeInstance *Store) PurgeExpired() (int64, error) { return 0, err } - removedRows, err := storeInstance.purgeExpiredMatchingGroupPrefix("") + cutoffUnixMilli := time.Now().UnixMilli() + expiredEntries, err := deleteExpiredEntriesMatchingGroupPrefix(storeInstance.sqliteDatabase, "", cutoffUnixMilli) if err != nil { return 0, core.E("store.PurgeExpired", "delete expired rows", err) } + removedRows := int64(len(expiredEntries)) + if removedRows > 0 { + for _, expiredEntry := range expiredEntries { + storeInstance.notify(Event{ + Type: EventDelete, + Group: expiredEntry.group, + Key: expiredEntry.key, + Timestamp: time.Now(), + }) + } + } return removedRows, nil } @@ -789,7 +1008,7 @@ func (storeInstance *Store) startBackgroundPurge() { return } if storeInstance.purgeInterval <= 0 { - storeInstance.purgeInterval = 60 * time.Second + storeInstance.purgeInterval = defaultPurgeInterval } purgeInterval := storeInstance.purgeInterval @@ -804,6 +1023,7 @@ func (storeInstance *Store) startBackgroundPurge() { if _, err := storeInstance.PurgeExpired(); err != nil { // For example, a logger could record the failure here. The loop // keeps running so the next tick can retry. + _ = err } } } @@ -847,38 +1067,44 @@ func fieldsValueSeq(value string) iter.Seq[string] { } } -// purgeExpiredMatchingGroupPrefix deletes expired rows globally when -// groupPrefix is empty, otherwise only rows whose group starts with the given -// prefix. -func (storeInstance *Store) purgeExpiredMatchingGroupPrefix(groupPrefix string) (int64, error) { - if err := storeInstance.ensureReady("store.purgeExpiredMatchingGroupPrefix"); err != nil { - return 0, err - } +type expiredEntryRef struct { + group string + key string +} +func deleteExpiredEntriesMatchingGroupPrefix(database schemaDatabase, groupPrefix string, cutoffUnixMilli int64) ([]expiredEntryRef, error) { var ( - deleteResult sql.Result - err error + rows *sql.Rows + err error ) - now := time.Now().UnixMilli() if groupPrefix == "" { - deleteResult, err = storeInstance.sqliteDatabase.Exec( - "DELETE FROM "+entriesTableName+" WHERE expires_at IS NOT NULL AND expires_at <= ?", - now, + rows, err = database.Query( + "DELETE FROM "+entriesTableName+" WHERE expires_at IS NOT NULL AND expires_at <= ? RETURNING "+entryGroupColumn+", "+entryKeyColumn, + cutoffUnixMilli, ) } else { - deleteResult, err = storeInstance.sqliteDatabase.Exec( - "DELETE FROM "+entriesTableName+" WHERE expires_at IS NOT NULL AND expires_at <= ? AND "+entryGroupColumn+" LIKE ? ESCAPE '^'", - now, escapeLike(groupPrefix)+"%", + rows, err = database.Query( + "DELETE FROM "+entriesTableName+" WHERE expires_at IS NOT NULL AND expires_at <= ? AND "+entryGroupColumn+" LIKE ? ESCAPE '^' RETURNING "+entryGroupColumn+", "+entryKeyColumn, + cutoffUnixMilli, escapeLike(groupPrefix)+"%", ) } if err != nil { - return 0, err + return nil, err } - removedRows, rowsAffectedErr := deleteResult.RowsAffected() - if rowsAffectedErr != nil { - return 0, rowsAffectedErr + defer func() { _ = rows.Close() }() + + expiredEntries := make([]expiredEntryRef, 0) + for rows.Next() { + var expiredEntry expiredEntryRef + if err := rows.Scan(&expiredEntry.group, &expiredEntry.key); err != nil { + return nil, err + } + expiredEntries = append(expiredEntries, expiredEntry) } - return removedRows, nil + if err := rows.Err(); err != nil { + return nil, err + } + return expiredEntries, nil } type schemaDatabase interface { @@ -964,6 +1190,7 @@ func migrateLegacyEntriesTable(database *sql.DB) error { if !committed { if rollbackErr := transaction.Rollback(); rollbackErr != nil { // Ignore rollback failures; the original error is already being returned. + _ = rollbackErr } } }() @@ -1020,7 +1247,7 @@ func tableHasColumn(database schemaDatabase, tableName, columnName string) (bool if err != nil { return false, err } - defer rows.Close() + defer func() { _ = rows.Close() }() for rows.Next() { var ( diff --git a/store_test.go b/store_test.go index a59094d..527624d 100644 --- a/store_test.go +++ b/store_test.go @@ -10,8 +10,6 @@ import ( "time" core "dappco.re/go/core" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) // --------------------------------------------------------------------------- @@ -20,37 +18,37 @@ import ( func TestStore_New_Good_Memory(t *testing.T) { storeInstance, err := New(":memory:") - require.NoError(t, err) - require.NotNil(t, storeInstance) - defer storeInstance.Close() + assertNoError(t, err) + assertNotNil(t, storeInstance) + defer func() { _ = storeInstance.Close() }() } func TestStore_New_Good_FileBacked(t *testing.T) { databasePath := testPath(t, "test.db") storeInstance, err := New(databasePath) - require.NoError(t, err) - require.NotNil(t, storeInstance) - defer storeInstance.Close() + assertNoError(t, err) + assertNotNil(t, storeInstance) + defer func() { _ = storeInstance.Close() }() // Verify data persists: write, close, reopen. - require.NoError(t, storeInstance.Set("g", "k", "v")) - require.NoError(t, storeInstance.Close()) + assertNoError(t, storeInstance.Set("g", "k", "v")) + assertNoError(t, storeInstance.Close()) reopenedStore, err := New(databasePath) - require.NoError(t, err) - defer reopenedStore.Close() + assertNoError(t, err) + defer func() { _ = reopenedStore.Close() }() value, err := reopenedStore.Get("g", "k") - require.NoError(t, err) - assert.Equal(t, "v", value) + assertNoError(t, err) + assertEqual(t, "v", value) } func TestStore_New_Bad_InvalidPath(t *testing.T) { // A path under a non-existent directory should fail at the WAL pragma step // because sql.Open is lazy and only validates on first use. _, err := New("/no/such/directory/test.db") - require.Error(t, err) - assert.Contains(t, err.Error(), "store.New") + assertError(t, err) + assertContainsString(t, err.Error(), "store.New") } func TestStore_New_Bad_CorruptFile(t *testing.T) { @@ -59,8 +57,8 @@ func TestStore_New_Bad_CorruptFile(t *testing.T) { requireCoreOK(t, testFilesystem().Write(databasePath, "not a sqlite database")) _, err := New(databasePath) - require.Error(t, err) - assert.Contains(t, err.Error(), "store.New") + assertError(t, err) + assertContainsString(t, err.Error(), "store.New") } func TestStore_New_Bad_ReadOnlyDir(t *testing.T) { @@ -70,42 +68,59 @@ func TestStore_New_Bad_ReadOnlyDir(t *testing.T) { // Create a valid database first, then make the directory read-only. storeInstance, err := New(databasePath) - require.NoError(t, err) - require.NoError(t, storeInstance.Close()) + assertNoError(t, err) + assertNoError(t, storeInstance.Close()) // Remove WAL/SHM files and make directory read-only. _ = testFilesystem().Delete(databasePath + "-wal") _ = testFilesystem().Delete(databasePath + "-shm") - require.NoError(t, syscall.Chmod(dir, 0555)) + assertNoError(t, syscall.Chmod(dir, 0555)) defer func() { _ = syscall.Chmod(dir, 0755) }() // restore for cleanup _, err = New(databasePath) // May or may not fail depending on OS/filesystem — just exercise the code path. if err != nil { - assert.Contains(t, err.Error(), "store.New") + assertContainsString(t, err.Error(), "store.New") } } func TestStore_New_Good_WALMode(t *testing.T) { databasePath := testPath(t, "wal.db") storeInstance, err := New(databasePath) - require.NoError(t, err) - defer storeInstance.Close() + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() var mode string err = storeInstance.sqliteDatabase.QueryRow("PRAGMA journal_mode").Scan(&mode) - require.NoError(t, err) - assert.Equal(t, "wal", mode, "journal_mode should be WAL") + assertNoError(t, err) + assertEqualf(t, "wal", mode, "journal_mode should be WAL") } func TestStore_New_Good_WithJournalOption(t *testing.T) { storeInstance, err := New(":memory:", WithJournal("http://127.0.0.1:8086", "core", "events")) - require.NoError(t, err) - defer storeInstance.Close() + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() - assert.Equal(t, "events", storeInstance.journalConfiguration.BucketName) - assert.Equal(t, "core", storeInstance.journalConfiguration.Organisation) - assert.Equal(t, "http://127.0.0.1:8086", storeInstance.journalConfiguration.EndpointURL) + assertEqual(t, "events", storeInstance.journalConfiguration.BucketName) + assertEqual(t, "core", storeInstance.journalConfiguration.Organisation) + assertEqual(t, "http://127.0.0.1:8086", storeInstance.journalConfiguration.EndpointURL) +} + +func TestStore_New_Good_WithWorkspaceStateDirectoryOption(t *testing.T) { + workspaceStateDirectory := testPath(t, "workspace-state-option") + + storeInstance, err := New(":memory:", WithWorkspaceStateDirectory(workspaceStateDirectory)) + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() + + assertEqual(t, workspaceStateDirectory, storeInstance.WorkspaceStateDirectory()) + + workspace, err := storeInstance.NewWorkspace("scroll-session") + assertNoError(t, err) + defer workspace.Discard() + + assertEqual(t, workspaceFilePath(workspaceStateDirectory, "scroll-session"), workspace.DatabasePath()) + assertTrue(t, testFilesystem().Exists(workspace.DatabasePath())) } func TestStore_NewConfigured_Good_WorkspaceStateDirectory(t *testing.T) { @@ -115,45 +130,87 @@ func TestStore_NewConfigured_Good_WorkspaceStateDirectory(t *testing.T) { DatabasePath: ":memory:", WorkspaceStateDirectory: workspaceStateDirectory, }) - require.NoError(t, err) - defer storeInstance.Close() + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() - assert.Equal(t, workspaceStateDirectory, storeInstance.Config().WorkspaceStateDirectory) + assertEqual(t, workspaceStateDirectory, storeInstance.Config().WorkspaceStateDirectory) workspace, err := storeInstance.NewWorkspace("scroll-session") - require.NoError(t, err) + assertNoError(t, err) defer workspace.Discard() - assert.Equal(t, workspaceFilePath(workspaceStateDirectory, "scroll-session"), workspace.DatabasePath()) - assert.True(t, testFilesystem().Exists(workspace.DatabasePath())) + assertEqual(t, workspaceFilePath(workspaceStateDirectory, "scroll-session"), workspace.DatabasePath()) + assertTrue(t, testFilesystem().Exists(workspace.DatabasePath())) +} + +func TestStore_WorkspaceStateDirectory_Good_Default(t *testing.T) { + storeInstance, err := New(":memory:") + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() + + assertEqual(t, normaliseWorkspaceStateDirectory(defaultWorkspaceStateDirectory), storeInstance.WorkspaceStateDirectory()) + assertEqual(t, storeInstance.WorkspaceStateDirectory(), storeInstance.Config().WorkspaceStateDirectory) + assertEqual(t, defaultPurgeInterval, storeInstance.Config().PurgeInterval) } func TestStore_JournalConfiguration_Good(t *testing.T) { storeInstance, err := New(":memory:", WithJournal("http://127.0.0.1:8086", "core", "events")) - require.NoError(t, err) - defer storeInstance.Close() + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() config := storeInstance.JournalConfiguration() - assert.Equal(t, JournalConfiguration{ + assertEqual(t, JournalConfiguration{EndpointURL: "http://127.0.0.1:8086", Organisation: "core", BucketName: "events"}, config) +} + +func TestStore_JournalConfiguration_Good_Validate(t *testing.T) { + err := (JournalConfiguration{ EndpointURL: "http://127.0.0.1:8086", Organisation: "core", BucketName: "events", - }, config) + }).Validate() + assertNoError(t, err) +} + +func TestStore_JournalConfiguration_Bad_ValidateMissingEndpointURL(t *testing.T) { + err := (JournalConfiguration{ + Organisation: "core", + BucketName: "events", + }).Validate() + assertError(t, err) + assertContainsString(t, err.Error(), "endpoint URL is empty") +} + +func TestStore_JournalConfiguration_Bad_ValidateMissingOrganisation(t *testing.T) { + err := (JournalConfiguration{ + EndpointURL: "http://127.0.0.1:8086", + BucketName: "events", + }).Validate() + assertError(t, err) + assertContainsString(t, err.Error(), "organisation is empty") +} + +func TestStore_JournalConfiguration_Bad_ValidateMissingBucketName(t *testing.T) { + err := (JournalConfiguration{ + EndpointURL: "http://127.0.0.1:8086", + Organisation: "core", + }).Validate() + assertError(t, err) + assertContainsString(t, err.Error(), "bucket name is empty") } func TestStore_JournalConfigured_Good(t *testing.T) { storeInstance, err := New(":memory:", WithJournal("http://127.0.0.1:8086", "core", "events")) - require.NoError(t, err) - defer storeInstance.Close() + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() - assert.True(t, storeInstance.JournalConfigured()) - assert.False(t, (*Store)(nil).JournalConfigured()) + assertTrue(t, storeInstance.JournalConfigured()) + assertFalse(t, (*Store)(nil).JournalConfigured()) unconfiguredStore, err := New(":memory:") - require.NoError(t, err) - defer unconfiguredStore.Close() + assertNoError(t, err) + defer func() { _ = unconfiguredStore.Close() }() - assert.False(t, unconfiguredStore.JournalConfigured()) + assertFalse(t, unconfiguredStore.JournalConfigured()) } func TestStore_NewConfigured_Bad_PartialJournalConfiguration(t *testing.T) { @@ -164,8 +221,9 @@ func TestStore_NewConfigured_Bad_PartialJournalConfiguration(t *testing.T) { Organisation: "core", }, }) - require.Error(t, err) - assert.Contains(t, err.Error(), "journal configuration must include endpoint URL, organisation, and bucket name") + assertError(t, err) + assertContainsString(t, err.Error(), "journal config") + assertContainsString(t, err.Error(), "bucket name is empty") } func TestStore_StoreConfig_Good_Validate(t *testing.T) { @@ -178,7 +236,24 @@ func TestStore_StoreConfig_Good_Validate(t *testing.T) { }, PurgeInterval: 20 * time.Millisecond, }).Validate() - require.NoError(t, err) + assertNoError(t, err) +} + +func TestStore_StoreConfig_Good_NormalisedDefaults(t *testing.T) { + normalisedConfig := (StoreConfig{DatabasePath: ":memory:"}).Normalised() + + assertEqual(t, ":memory:", normalisedConfig.DatabasePath) + assertEqual(t, defaultPurgeInterval, normalisedConfig.PurgeInterval) + assertEqual(t, normaliseWorkspaceStateDirectory(defaultWorkspaceStateDirectory), normalisedConfig.WorkspaceStateDirectory) +} + +func TestStore_StoreConfig_Good_NormalisedWorkspaceStateDirectory(t *testing.T) { + normalisedConfig := (StoreConfig{ + DatabasePath: ":memory:", + WorkspaceStateDirectory: ".core/state///", + }).Normalised() + + assertEqual(t, ".core/state", normalisedConfig.WorkspaceStateDirectory) } func TestStore_StoreConfig_Bad_NegativePurgeInterval(t *testing.T) { @@ -186,14 +261,14 @@ func TestStore_StoreConfig_Bad_NegativePurgeInterval(t *testing.T) { DatabasePath: ":memory:", PurgeInterval: -time.Second, }).Validate() - require.Error(t, err) - assert.Contains(t, err.Error(), "purge interval must be zero or positive") + assertError(t, err) + assertContainsString(t, err.Error(), "purge interval must be zero or positive") } func TestStore_StoreConfig_Bad_EmptyDatabasePath(t *testing.T) { err := (StoreConfig{}).Validate() - require.Error(t, err) - assert.Contains(t, err.Error(), "database path is empty") + assertError(t, err) + assertContainsString(t, err.Error(), "database path is empty") } func TestStore_NewConfigured_Bad_NegativePurgeInterval(t *testing.T) { @@ -201,15 +276,15 @@ func TestStore_NewConfigured_Bad_NegativePurgeInterval(t *testing.T) { DatabasePath: ":memory:", PurgeInterval: -time.Second, }) - require.Error(t, err) - assert.Contains(t, err.Error(), "validate config") - assert.Contains(t, err.Error(), "purge interval must be zero or positive") + assertError(t, err) + assertContainsString(t, err.Error(), "validate config") + assertContainsString(t, err.Error(), "purge interval must be zero or positive") } func TestStore_NewConfigured_Bad_EmptyDatabasePath(t *testing.T) { _, err := NewConfigured(StoreConfig{}) - require.Error(t, err) - assert.Contains(t, err.Error(), "database path is empty") + assertError(t, err) + assertContainsString(t, err.Error(), "database path is empty") } func TestStore_Config_Good(t *testing.T) { @@ -222,39 +297,30 @@ func TestStore_Config_Good(t *testing.T) { }, PurgeInterval: 20 * time.Millisecond, }) - require.NoError(t, err) - defer storeInstance.Close() + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() - assert.Equal(t, StoreConfig{ - DatabasePath: ":memory:", - Journal: JournalConfiguration{ - EndpointURL: "http://127.0.0.1:8086", - Organisation: "core", - BucketName: "events", - }, - PurgeInterval: 20 * time.Millisecond, - WorkspaceStateDirectory: normaliseWorkspaceStateDirectory(defaultWorkspaceStateDirectory), - }, storeInstance.Config()) + assertEqual(t, StoreConfig{DatabasePath: ":memory:", Journal: JournalConfiguration{EndpointURL: "http://127.0.0.1:8086", Organisation: "core", BucketName: "events"}, PurgeInterval: 20 * time.Millisecond, WorkspaceStateDirectory: normaliseWorkspaceStateDirectory(defaultWorkspaceStateDirectory)}, storeInstance.Config()) } func TestStore_DatabasePath_Good(t *testing.T) { databasePath := testPath(t, "database-path.db") storeInstance, err := New(databasePath) - require.NoError(t, err) - defer storeInstance.Close() + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() - assert.Equal(t, databasePath, storeInstance.DatabasePath()) + assertEqual(t, databasePath, storeInstance.DatabasePath()) } func TestStore_IsClosed_Good(t *testing.T) { storeInstance, err := New(":memory:") - require.NoError(t, err) + assertNoError(t, err) - assert.False(t, storeInstance.IsClosed()) - require.NoError(t, storeInstance.Close()) - assert.True(t, storeInstance.IsClosed()) - assert.True(t, (*Store)(nil).IsClosed()) + assertFalse(t, storeInstance.IsClosed()) + assertNoError(t, storeInstance.Close()) + assertTrue(t, storeInstance.IsClosed()) + assertTrue(t, (*Store)(nil).IsClosed()) } func TestStore_NewConfigured_Good(t *testing.T) { @@ -267,20 +333,16 @@ func TestStore_NewConfigured_Good(t *testing.T) { }, PurgeInterval: 20 * time.Millisecond, }) - require.NoError(t, err) - defer storeInstance.Close() + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() - assert.Equal(t, JournalConfiguration{ - EndpointURL: "http://127.0.0.1:8086", - Organisation: "core", - BucketName: "events", - }, storeInstance.JournalConfiguration()) - assert.Equal(t, 20*time.Millisecond, storeInstance.purgeInterval) + assertEqual(t, JournalConfiguration{EndpointURL: "http://127.0.0.1:8086", Organisation: "core", BucketName: "events"}, storeInstance.JournalConfiguration()) + assertEqual(t, 20*time.Millisecond, storeInstance.purgeInterval) - require.NoError(t, storeInstance.Set("g", "k", "v")) + assertNoError(t, storeInstance.Set("g", "k", "v")) value, err := storeInstance.Get("g", "k") - require.NoError(t, err) - assert.Equal(t, "v", value) + assertNoError(t, err) + assertEqual(t, "v", value) } // --------------------------------------------------------------------------- @@ -289,65 +351,157 @@ func TestStore_NewConfigured_Good(t *testing.T) { func TestStore_SetGet_Good(t *testing.T) { storeInstance, err := New(":memory:") - require.NoError(t, err) - defer storeInstance.Close() + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() err = storeInstance.Set("config", "theme", "dark") - require.NoError(t, err) + assertNoError(t, err) value, err := storeInstance.Get("config", "theme") - require.NoError(t, err) - assert.Equal(t, "dark", value) + assertNoError(t, err) + assertEqual(t, "dark", value) } func TestStore_Set_Good_Upsert(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() - require.NoError(t, storeInstance.Set("g", "k", "v1")) - require.NoError(t, storeInstance.Set("g", "k", "v2")) + assertNoError(t, storeInstance.Set("g", "k", "v1")) + assertNoError(t, storeInstance.Set("g", "k", "v2")) value, err := storeInstance.Get("g", "k") - require.NoError(t, err) - assert.Equal(t, "v2", value, "upsert should overwrite the value") + assertNoError(t, err) + assertEqualf(t, "v2", value, "upsert should overwrite the value") count, err := storeInstance.Count("g") - require.NoError(t, err) - assert.Equal(t, 1, count, "upsert should not duplicate keys") + assertNoError(t, err) + assertEqualf(t, 1, count, "upsert should not duplicate keys") } func TestStore_Get_Bad_NotFound(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() _, err := storeInstance.Get("config", "missing") - require.Error(t, err) - assert.True(t, core.Is(err, NotFoundError), "should wrap NotFoundError") + assertError(t, err) + assertTruef(t, core.Is(err, NotFoundError), "should wrap NotFoundError") } func TestStore_Get_Bad_NonExistentGroup(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() _, err := storeInstance.Get("no-such-group", "key") - require.Error(t, err) - assert.True(t, core.Is(err, NotFoundError)) + assertError(t, err) + assertTrue(t, core.Is(err, NotFoundError)) } func TestStore_Get_Bad_ClosedStore(t *testing.T) { storeInstance, _ := New(":memory:") - storeInstance.Close() - + _ = storeInstance.Close() _, err := storeInstance.Get("g", "k") - require.Error(t, err) + assertError(t, err) } func TestStore_Set_Bad_ClosedStore(t *testing.T) { storeInstance, _ := New(":memory:") - storeInstance.Close() - + _ = storeInstance.Close() err := storeInstance.Set("g", "k", "v") - require.Error(t, err) + assertError(t, err) +} + +// --------------------------------------------------------------------------- +// Exists +// --------------------------------------------------------------------------- + +func TestStore_Exists_Good_Present(t *testing.T) { + storeInstance, err := New(":memory:") + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() + + assertNoError(t, storeInstance.Set("config", "colour", "blue")) + + exists, err := storeInstance.Exists("config", "colour") + assertNoError(t, err) + assertTrue(t, exists) +} + +func TestStore_Exists_Good_Absent(t *testing.T) { + storeInstance, err := New(":memory:") + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() + + exists, err := storeInstance.Exists("config", "colour") + assertNoError(t, err) + assertFalse(t, exists) +} + +func TestStore_Exists_Good_ExpiredKeyReturnsFalse(t *testing.T) { + storeInstance, err := New(":memory:") + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() + + assertNoError(t, storeInstance.SetWithTTL("session", "token", "abc123", 1*time.Millisecond)) + time.Sleep(5 * time.Millisecond) + + exists, err := storeInstance.Exists("session", "token") + assertNoError(t, err) + assertFalse(t, exists) +} + +func TestStore_Exists_Bad_ClosedStore(t *testing.T) { + storeInstance, err := New(":memory:") + assertNoError(t, err) + assertNoError(t, storeInstance.Close()) + _, err = storeInstance.Exists("g", "k") + assertError(t, err) +} + +// --------------------------------------------------------------------------- +// GroupExists +// --------------------------------------------------------------------------- + +func TestStore_GroupExists_Good_Present(t *testing.T) { + storeInstance, err := New(":memory:") + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() + + assertNoError(t, storeInstance.Set("config", "colour", "blue")) + + exists, err := storeInstance.GroupExists("config") + assertNoError(t, err) + assertTrue(t, exists) +} + +func TestStore_GroupExists_Good_Absent(t *testing.T) { + storeInstance, err := New(":memory:") + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() + + exists, err := storeInstance.GroupExists("config") + assertNoError(t, err) + assertFalse(t, exists) +} + +func TestStore_GroupExists_Good_EmptyAfterDelete(t *testing.T) { + storeInstance, err := New(":memory:") + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() + + assertNoError(t, storeInstance.Set("config", "colour", "blue")) + assertNoError(t, storeInstance.DeleteGroup("config")) + + exists, err := storeInstance.GroupExists("config") + assertNoError(t, err) + assertFalse(t, exists) +} + +func TestStore_GroupExists_Bad_ClosedStore(t *testing.T) { + storeInstance, err := New(":memory:") + assertNoError(t, err) + assertNoError(t, storeInstance.Close()) + _, err = storeInstance.GroupExists("config") + assertError(t, err) } // --------------------------------------------------------------------------- @@ -356,31 +510,30 @@ func TestStore_Set_Bad_ClosedStore(t *testing.T) { func TestStore_Delete_Good(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() _ = storeInstance.Set("config", "key", "val") err := storeInstance.Delete("config", "key") - require.NoError(t, err) + assertNoError(t, err) _, err = storeInstance.Get("config", "key") - assert.Error(t, err) + assertError(t, err) } func TestStore_Delete_Good_NonExistent(t *testing.T) { // Deleting a key that does not exist should not error. storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() err := storeInstance.Delete("g", "nope") - assert.NoError(t, err) + assertNoError(t, err) } func TestStore_Delete_Bad_ClosedStore(t *testing.T) { storeInstance, _ := New(":memory:") - storeInstance.Close() - + _ = storeInstance.Close() err := storeInstance.Delete("g", "k") - require.Error(t, err) + assertError(t, err) } // --------------------------------------------------------------------------- @@ -389,45 +542,44 @@ func TestStore_Delete_Bad_ClosedStore(t *testing.T) { func TestStore_Count_Good(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() _ = storeInstance.Set("grp", "a", "1") _ = storeInstance.Set("grp", "b", "2") _ = storeInstance.Set("other", "c", "3") count, err := storeInstance.Count("grp") - require.NoError(t, err) - assert.Equal(t, 2, count) + assertNoError(t, err) + assertEqual(t, 2, count) } func TestStore_Count_Good_Empty(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() count, err := storeInstance.Count("empty") - require.NoError(t, err) - assert.Equal(t, 0, count) + assertNoError(t, err) + assertEqual(t, 0, count) } func TestStore_Count_Good_BulkInsert(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() const total = 500 for i := range total { - require.NoError(t, storeInstance.Set("bulk", core.Sprintf("key-%04d", i), "v")) + assertNoError(t, storeInstance.Set("bulk", core.Sprintf("key-%04d", i), "v")) } count, err := storeInstance.Count("bulk") - require.NoError(t, err) - assert.Equal(t, total, count) + assertNoError(t, err) + assertEqual(t, total, count) } func TestStore_Count_Bad_ClosedStore(t *testing.T) { storeInstance, _ := New(":memory:") - storeInstance.Close() - + _ = storeInstance.Close() _, err := storeInstance.Count("g") - require.Error(t, err) + assertError(t, err) } // --------------------------------------------------------------------------- @@ -436,72 +588,71 @@ func TestStore_Count_Bad_ClosedStore(t *testing.T) { func TestStore_DeleteGroup_Good(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() _ = storeInstance.Set("grp", "a", "1") _ = storeInstance.Set("grp", "b", "2") err := storeInstance.DeleteGroup("grp") - require.NoError(t, err) + assertNoError(t, err) count, _ := storeInstance.Count("grp") - assert.Equal(t, 0, count) + assertEqual(t, 0, count) } func TestStore_DeleteGroup_Good_ThenGetAllEmpty(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() _ = storeInstance.Set("grp", "a", "1") _ = storeInstance.Set("grp", "b", "2") - require.NoError(t, storeInstance.DeleteGroup("grp")) + assertNoError(t, storeInstance.DeleteGroup("grp")) all, err := storeInstance.GetAll("grp") - require.NoError(t, err) - assert.Empty(t, all) + assertNoError(t, err) + assertEmpty(t, all) } func TestStore_DeleteGroup_Good_IsolatesOtherGroups(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() _ = storeInstance.Set("a", "k", "1") _ = storeInstance.Set("b", "k", "2") - require.NoError(t, storeInstance.DeleteGroup("a")) + assertNoError(t, storeInstance.DeleteGroup("a")) _, err := storeInstance.Get("a", "k") - assert.Error(t, err) + assertError(t, err) value, err := storeInstance.Get("b", "k") - require.NoError(t, err) - assert.Equal(t, "2", value, "other group should be untouched") + assertNoError(t, err) + assertEqualf(t, "2", value, "other group should be untouched") } func TestStore_DeletePrefix_Good(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() _ = storeInstance.Set("tenant-a:config", "colour", "blue") _ = storeInstance.Set("tenant-a:sessions", "token", "abc123") _ = storeInstance.Set("tenant-b:config", "colour", "green") - require.NoError(t, storeInstance.DeletePrefix("tenant-a:")) + assertNoError(t, storeInstance.DeletePrefix("tenant-a:")) _, err := storeInstance.Get("tenant-a:config", "colour") - assert.Error(t, err) + assertError(t, err) _, err = storeInstance.Get("tenant-a:sessions", "token") - assert.Error(t, err) + assertError(t, err) value, err := storeInstance.Get("tenant-b:config", "colour") - require.NoError(t, err) - assert.Equal(t, "green", value) + assertNoError(t, err) + assertEqual(t, "green", value) } func TestStore_DeleteGroup_Bad_ClosedStore(t *testing.T) { storeInstance, _ := New(":memory:") - storeInstance.Close() - + _ = storeInstance.Close() err := storeInstance.DeleteGroup("g") - require.Error(t, err) + assertError(t, err) } // --------------------------------------------------------------------------- @@ -510,61 +661,60 @@ func TestStore_DeleteGroup_Bad_ClosedStore(t *testing.T) { func TestStore_GetAll_Good(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() _ = storeInstance.Set("grp", "a", "1") _ = storeInstance.Set("grp", "b", "2") _ = storeInstance.Set("other", "c", "3") all, err := storeInstance.GetAll("grp") - require.NoError(t, err) - assert.Equal(t, map[string]string{"a": "1", "b": "2"}, all) + assertNoError(t, err) + assertEqual(t, map[string]string{"a": "1", "b": "2"}, all) } func TestStore_GetAll_Good_Empty(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() all, err := storeInstance.GetAll("empty") - require.NoError(t, err) - assert.Empty(t, all) + assertNoError(t, err) + assertEmpty(t, all) } func TestStore_GetPage_Good(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() - require.NoError(t, storeInstance.Set("grp", "charlie", "3")) - require.NoError(t, storeInstance.Set("grp", "alpha", "1")) - require.NoError(t, storeInstance.Set("grp", "bravo", "2")) + assertNoError(t, storeInstance.Set("grp", "charlie", "3")) + assertNoError(t, storeInstance.Set("grp", "alpha", "1")) + assertNoError(t, storeInstance.Set("grp", "bravo", "2")) page, err := storeInstance.GetPage("grp", 1, 2) - require.NoError(t, err) - require.Len(t, page, 2) - assert.Equal(t, []KeyValue{{Key: "bravo", Value: "2"}, {Key: "charlie", Value: "3"}}, page) + assertNoError(t, err) + assertLen(t, page, 2) + assertEqual(t, []KeyValue{{Key: "bravo", Value: "2"}, {Key: "charlie", Value: "3"}}, page) } func TestStore_GetPage_Good_EmptyAndBounds(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() page, err := storeInstance.GetPage("grp", 0, 0) - require.NoError(t, err) - assert.Empty(t, page) + assertNoError(t, err) + assertEmpty(t, page) _, err = storeInstance.GetPage("grp", -1, 1) - require.Error(t, err) + assertError(t, err) _, err = storeInstance.GetPage("grp", 0, -1) - require.Error(t, err) + assertError(t, err) } func TestStore_GetAll_Bad_ClosedStore(t *testing.T) { storeInstance, _ := New(":memory:") - storeInstance.Close() - + _ = storeInstance.Close() _, err := storeInstance.GetAll("g") - require.Error(t, err) + assertError(t, err) } // --------------------------------------------------------------------------- @@ -573,140 +723,138 @@ func TestStore_GetAll_Bad_ClosedStore(t *testing.T) { func TestStore_All_Good_StopsEarly(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() - require.NoError(t, storeInstance.Set("g", "a", "1")) - require.NoError(t, storeInstance.Set("g", "b", "2")) + assertNoError(t, storeInstance.Set("g", "a", "1")) + assertNoError(t, storeInstance.Set("g", "b", "2")) entries := storeInstance.All("g") var seen []string for entry, err := range entries { - require.NoError(t, err) + assertNoError(t, err) seen = append(seen, entry.Key) break } - assert.Len(t, seen, 1) + assertLen(t, seen, 1) } func TestStore_All_Good_SortedByKey(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() - require.NoError(t, storeInstance.Set("g", "charlie", "3")) - require.NoError(t, storeInstance.Set("g", "alpha", "1")) - require.NoError(t, storeInstance.Set("g", "bravo", "2")) + assertNoError(t, storeInstance.Set("g", "charlie", "3")) + assertNoError(t, storeInstance.Set("g", "alpha", "1")) + assertNoError(t, storeInstance.Set("g", "bravo", "2")) var keys []string for entry, err := range storeInstance.All("g") { - require.NoError(t, err) + assertNoError(t, err) keys = append(keys, entry.Key) } - assert.Equal(t, []string{"alpha", "bravo", "charlie"}, keys) + assertEqual(t, []string{"alpha", "bravo", "charlie"}, keys) } func TestStore_AllSeq_Good_SortedByKey(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() - require.NoError(t, storeInstance.Set("g", "charlie", "3")) - require.NoError(t, storeInstance.Set("g", "alpha", "1")) - require.NoError(t, storeInstance.Set("g", "bravo", "2")) + assertNoError(t, storeInstance.Set("g", "charlie", "3")) + assertNoError(t, storeInstance.Set("g", "alpha", "1")) + assertNoError(t, storeInstance.Set("g", "bravo", "2")) var keys []string for entry, err := range storeInstance.AllSeq("g") { - require.NoError(t, err) + assertNoError(t, err) keys = append(keys, entry.Key) } - assert.Equal(t, []string{"alpha", "bravo", "charlie"}, keys) + assertEqual(t, []string{"alpha", "bravo", "charlie"}, keys) } func TestStore_All_Bad_ClosedStore(t *testing.T) { storeInstance, _ := New(":memory:") - storeInstance.Close() - + _ = storeInstance.Close() for _, err := range storeInstance.All("g") { - require.Error(t, err) + assertError(t, err) } } func TestStore_GroupsSeq_Good_StopsEarly(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() - require.NoError(t, storeInstance.Set("alpha", "a", "1")) - require.NoError(t, storeInstance.Set("beta", "b", "2")) + assertNoError(t, storeInstance.Set("alpha", "a", "1")) + assertNoError(t, storeInstance.Set("beta", "b", "2")) groups := storeInstance.GroupsSeq("") var seen []string for group, err := range groups { - require.NoError(t, err) + assertNoError(t, err) seen = append(seen, group) break } - assert.Len(t, seen, 1) + assertLen(t, seen, 1) } func TestStore_GroupsSeq_Good_PrefixStopsEarly(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() - require.NoError(t, storeInstance.Set("alpha", "a", "1")) - require.NoError(t, storeInstance.Set("beta", "b", "2")) + assertNoError(t, storeInstance.Set("alpha", "a", "1")) + assertNoError(t, storeInstance.Set("beta", "b", "2")) groups := storeInstance.GroupsSeq("alpha") var seen []string for group, err := range groups { - require.NoError(t, err) + assertNoError(t, err) seen = append(seen, group) break } - assert.Equal(t, []string{"alpha"}, seen) + assertEqual(t, []string{"alpha"}, seen) } func TestStore_GroupsSeq_Good_SortedByGroupName(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() - require.NoError(t, storeInstance.Set("charlie", "c", "3")) - require.NoError(t, storeInstance.Set("alpha", "a", "1")) - require.NoError(t, storeInstance.Set("bravo", "b", "2")) + assertNoError(t, storeInstance.Set("charlie", "c", "3")) + assertNoError(t, storeInstance.Set("alpha", "a", "1")) + assertNoError(t, storeInstance.Set("bravo", "b", "2")) var groups []string for group, err := range storeInstance.GroupsSeq("") { - require.NoError(t, err) + assertNoError(t, err) groups = append(groups, group) } - assert.Equal(t, []string{"alpha", "bravo", "charlie"}, groups) + assertEqual(t, []string{"alpha", "bravo", "charlie"}, groups) } func TestStore_GroupsSeq_Good_DefaultArgument(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() - require.NoError(t, storeInstance.Set("alpha", "a", "1")) - require.NoError(t, storeInstance.Set("beta", "b", "2")) + assertNoError(t, storeInstance.Set("alpha", "a", "1")) + assertNoError(t, storeInstance.Set("beta", "b", "2")) var groups []string for group, err := range storeInstance.GroupsSeq() { - require.NoError(t, err) + assertNoError(t, err) groups = append(groups, group) } - assert.Equal(t, []string{"alpha", "beta"}, groups) + assertEqual(t, []string{"alpha", "beta"}, groups) } func TestStore_GroupsSeq_Bad_ClosedStore(t *testing.T) { storeInstance, _ := New(":memory:") - storeInstance.Close() - + _ = storeInstance.Close() for _, err := range storeInstance.GroupsSeq("") { - require.Error(t, err) + assertError(t, err) } } @@ -716,29 +864,29 @@ func TestStore_GroupsSeq_Bad_ClosedStore(t *testing.T) { func TestStore_GetSplit_Good_SplitsValue(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() - require.NoError(t, storeInstance.Set("g", "comma", "alpha,beta,gamma")) + assertNoError(t, storeInstance.Set("g", "comma", "alpha,beta,gamma")) parts, err := storeInstance.GetSplit("g", "comma", ",") - require.NoError(t, err) + assertNoError(t, err) var values []string for value := range parts { values = append(values, value) } - assert.Equal(t, []string{"alpha", "beta", "gamma"}, values) + assertEqual(t, []string{"alpha", "beta", "gamma"}, values) } func TestStore_GetSplit_Good_StopsEarly(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() - require.NoError(t, storeInstance.Set("g", "comma", "alpha,beta,gamma")) + assertNoError(t, storeInstance.Set("g", "comma", "alpha,beta,gamma")) parts, err := storeInstance.GetSplit("g", "comma", ",") - require.NoError(t, err) + assertNoError(t, err) var values []string for value := range parts { @@ -746,43 +894,43 @@ func TestStore_GetSplit_Good_StopsEarly(t *testing.T) { break } - assert.Equal(t, []string{"alpha"}, values) + assertEqual(t, []string{"alpha"}, values) } func TestStore_GetSplit_Bad_MissingKey(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() _, err := storeInstance.GetSplit("g", "missing", ",") - require.Error(t, err) - assert.True(t, core.Is(err, NotFoundError)) + assertError(t, err) + assertTrue(t, core.Is(err, NotFoundError)) } func TestStore_GetFields_Good_SplitsWhitespace(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() - require.NoError(t, storeInstance.Set("g", "fields", "alpha beta\tgamma\n")) + assertNoError(t, storeInstance.Set("g", "fields", "alpha beta\tgamma\n")) fields, err := storeInstance.GetFields("g", "fields") - require.NoError(t, err) + assertNoError(t, err) var values []string for value := range fields { values = append(values, value) } - assert.Equal(t, []string{"alpha", "beta", "gamma"}, values) + assertEqual(t, []string{"alpha", "beta", "gamma"}, values) } func TestStore_GetFields_Good_StopsEarly(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() - require.NoError(t, storeInstance.Set("g", "fields", "alpha beta\tgamma\n")) + assertNoError(t, storeInstance.Set("g", "fields", "alpha beta\tgamma\n")) fields, err := storeInstance.GetFields("g", "fields") - require.NoError(t, err) + assertNoError(t, err) var values []string for value := range fields { @@ -790,16 +938,16 @@ func TestStore_GetFields_Good_StopsEarly(t *testing.T) { break } - assert.Equal(t, []string{"alpha"}, values) + assertEqual(t, []string{"alpha"}, values) } func TestStore_GetFields_Bad_MissingKey(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() _, err := storeInstance.GetFields("g", "missing") - require.Error(t, err) - assert.True(t, core.Is(err, NotFoundError)) + assertError(t, err) + assertTrue(t, core.Is(err, NotFoundError)) } // --------------------------------------------------------------------------- @@ -808,66 +956,65 @@ func TestStore_GetFields_Bad_MissingKey(t *testing.T) { func TestStore_Render_Good(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() _ = storeInstance.Set("user", "pool", "pool.lthn.io:3333") _ = storeInstance.Set("user", "wallet", "iz...") templateSource := `{"pool":"{{ .pool }}","wallet":"{{ .wallet }}"}` renderedTemplate, err := storeInstance.Render(templateSource, "user") - require.NoError(t, err) - assert.Contains(t, renderedTemplate, "pool.lthn.io:3333") - assert.Contains(t, renderedTemplate, "iz...") + assertNoError(t, err) + assertContainsString(t, renderedTemplate, "pool.lthn.io:3333") + assertContainsString(t, renderedTemplate, "iz...") } func TestStore_Render_Good_EmptyGroup(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() // Template that does not reference any variables. renderedTemplate, err := storeInstance.Render("static content", "empty") - require.NoError(t, err) - assert.Equal(t, "static content", renderedTemplate) + assertNoError(t, err) + assertEqual(t, "static content", renderedTemplate) } func TestStore_Render_Bad_InvalidTemplateSyntax(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() _, err := storeInstance.Render("{{ .unclosed", "g") - require.Error(t, err) - assert.Contains(t, err.Error(), "store.Render: parse") + assertError(t, err) + assertContainsString(t, err.Error(), "store.Render: parse") } func TestStore_Render_Bad_MissingTemplateVar(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() // text/template with a missing key on a map returns , not an error, // unless Option("missingkey=error") is set. The default behaviour is no error. renderedTemplate, err := storeInstance.Render("hello {{ .missing }}", "g") - require.NoError(t, err) - assert.Contains(t, renderedTemplate, "hello") + assertNoError(t, err) + assertContainsString(t, renderedTemplate, "hello") } func TestStore_Render_Bad_ExecError(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() _ = storeInstance.Set("g", "name", "hello") // Calling a string as a function triggers a template execution error. _, err := storeInstance.Render(`{{ call .name }}`, "g") - require.Error(t, err) - assert.Contains(t, err.Error(), "store.Render: exec") + assertError(t, err) + assertContainsString(t, err.Error(), "store.Render: exec") } func TestStore_Render_Bad_ClosedStore(t *testing.T) { storeInstance, _ := New(":memory:") - storeInstance.Close() - + _ = storeInstance.Close() _, err := storeInstance.Render("{{ .x }}", "g") - require.Error(t, err) + assertError(t, err) } // --------------------------------------------------------------------------- @@ -877,41 +1024,58 @@ func TestStore_Render_Bad_ClosedStore(t *testing.T) { func TestStore_Close_Good(t *testing.T) { storeInstance, _ := New(":memory:") err := storeInstance.Close() - require.NoError(t, err) + assertNoError(t, err) } func TestStore_Close_Good_Idempotent(t *testing.T) { storeInstance, _ := New(":memory:") - require.NoError(t, storeInstance.Close()) - require.NoError(t, storeInstance.Close()) + assertNoError(t, storeInstance.Close()) + assertNoError(t, storeInstance.Close()) +} + +func TestStore_Close_Good_BackfillsDatabaseAlias(t *testing.T) { + database, err := sql.Open("sqlite", ":memory:") + assertNoError(t, err) + + storeInstance := &Store{ + db: database, + cancelPurge: func() {}, + purgeContext: context.Background(), + } + + assertNoError(t, storeInstance.Close()) + + _, err = database.Exec("SELECT 1") + assertError(t, err) + assertContainsString(t, err.Error(), "closed") } func TestStore_Close_Good_OperationsFailAfterClose(t *testing.T) { storeInstance, _ := New(":memory:") - require.NoError(t, storeInstance.Close()) + assertNoError(t, storeInstance.Close()) // All operations on a closed store should fail. _, err := storeInstance.Get("g", "k") - assert.Error(t, err, "Get on closed store should fail") + assertError(t, err) err = storeInstance.Set("g", "k", "v") - assert.Error(t, err, "Set on closed store should fail") + assertError(t, err) err = storeInstance.Delete("g", "k") - assert.Error(t, err, "Delete on closed store should fail") + assertError(t, err) _, err = storeInstance.Count("g") - assert.Error(t, err, "Count on closed store should fail") + assertError(t, err) err = storeInstance.DeleteGroup("g") - assert.Error(t, err, "DeleteGroup on closed store should fail") + assertError(t, err) _, err = storeInstance.GetAll("g") - assert.Error(t, err, "GetAll on closed store should fail") + assertError(t, err) _, err = storeInstance.Render("{{ .x }}", "g") - assert.Error(t, err, "Render on closed store should fail") + assertError(t, err) } func TestStore_Close_Bad_DriverCloseError(t *testing.T) { @@ -922,8 +1086,29 @@ func TestStore_Close_Bad_DriverCloseError(t *testing.T) { } err := storeInstance.Close() - require.Error(t, err) - assert.Contains(t, err.Error(), "store.Close") + assertError(t, err) + assertContainsString(t, err.Error(), "store.Close") +} + +func TestStore_Close_Bad_MediumSyncFailureRetryable(t *testing.T) { + useWorkspaceStateDirectory(t) + + medium := &writeFailOnceMedium{memoryMedium: newMemoryMedium(), failures: 1} + storeInstance, err := New("retryable-close.db", WithMedium(medium)) + assertNoError(t, err) + assertNoError(t, storeInstance.Set("g", "k", "v")) + + err = storeInstance.Close() + assertError(t, err) + assertContainsString(t, err.Error(), "sync medium-backed database") + assertFalse(t, storeInstance.IsClosed()) + + _, err = storeInstance.Get("g", "k") + assertError(t, err) + + assertNoError(t, storeInstance.Close()) + assertTrue(t, storeInstance.IsClosed()) + assertTrue(t, medium.Exists("retryable-close.db")) } // --------------------------------------------------------------------------- @@ -940,8 +1125,8 @@ func testCloseErrorDatabase(t *testing.T) *sql.DB { }) database, err := sql.Open("test-close-error-driver", "") - require.NoError(t, err) - require.NoError(t, database.Ping()) + assertNoError(t, err) + assertNoError(t, database.Ping()) return database } @@ -979,7 +1164,7 @@ func testRowsAffectedErrorDatabase(t *testing.T) *sql.DB { }) database, err := sql.Open("test-rows-affected-error-driver", "") - require.NoError(t, err) + assertNoError(t, err) return database } @@ -1023,7 +1208,7 @@ func (testRowsAffectedErrorResult) RowsAffected() (int64, error) { func TestStore_SetGet_Good_EdgeCases(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() tests := []struct { name string @@ -1054,11 +1239,11 @@ func TestStore_SetGet_Good_EdgeCases(t *testing.T) { for _, testCase := range tests { t.Run(testCase.name, func(t *testing.T) { err := storeInstance.Set(testCase.group, testCase.key, testCase.value) - require.NoError(t, err, "Set should succeed") + assertNoErrorf(t, err, "Set should succeed") got, err := storeInstance.Get(testCase.group, testCase.key) - require.NoError(t, err, "Get should succeed") - assert.Equal(t, testCase.value, got, "round-trip should preserve value") + assertNoErrorf(t, err, "Get should succeed") + assertEqualf(t, testCase.value, got, "round-trip should preserve value") }) } } @@ -1069,27 +1254,27 @@ func TestStore_SetGet_Good_EdgeCases(t *testing.T) { func TestStore_GroupIsolation_Good(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() - require.NoError(t, storeInstance.Set("alpha", "k", "a-val")) - require.NoError(t, storeInstance.Set("beta", "k", "b-val")) + assertNoError(t, storeInstance.Set("alpha", "k", "a-val")) + assertNoError(t, storeInstance.Set("beta", "k", "b-val")) alphaValue, err := storeInstance.Get("alpha", "k") - require.NoError(t, err) - assert.Equal(t, "a-val", alphaValue) + assertNoError(t, err) + assertEqual(t, "a-val", alphaValue) betaValue, err := storeInstance.Get("beta", "k") - require.NoError(t, err) - assert.Equal(t, "b-val", betaValue) + assertNoError(t, err) + assertEqual(t, "b-val", betaValue) // Delete from alpha should not affect beta. - require.NoError(t, storeInstance.Delete("alpha", "k")) + assertNoError(t, storeInstance.Delete("alpha", "k")) _, err = storeInstance.Get("alpha", "k") - assert.Error(t, err) + assertError(t, err) betaValueAfterDelete, err := storeInstance.Get("beta", "k") - require.NoError(t, err) - assert.Equal(t, "b-val", betaValueAfterDelete) + assertNoError(t, err) + assertEqual(t, "b-val", betaValueAfterDelete) } // --------------------------------------------------------------------------- @@ -1099,8 +1284,8 @@ func TestStore_GroupIsolation_Good(t *testing.T) { func TestStore_Concurrent_Good_ReadWrite(t *testing.T) { databasePath := testPath(t, "concurrent.db") storeInstance, err := New(databasePath) - require.NoError(t, err) - defer storeInstance.Close() + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() const goroutines = 10 const opsPerGoroutine = 100 @@ -1152,19 +1337,19 @@ func TestStore_Concurrent_Good_ReadWrite(t *testing.T) { for g := range goroutines { group := core.Sprintf("grp-%d", g) count, err := storeInstance.Count(group) - require.NoError(t, err) - assert.Equal(t, opsPerGoroutine, count, "group %s should have all keys", group) + assertNoError(t, err) + assertEqualf(t, opsPerGoroutine, count, "group %s should have all keys", group) } } func TestStore_Concurrent_Good_GetAll(t *testing.T) { storeInstance, err := New(testPath(t, "getall.db")) - require.NoError(t, err) - defer storeInstance.Close() + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() // Seed data. for i := range 50 { - require.NoError(t, storeInstance.Set("shared", core.Sprintf("k%d", i), core.Sprintf("v%d", i))) + assertNoError(t, storeInstance.Set("shared", core.Sprintf("k%d", i), core.Sprintf("v%d", i))) } var waitGroup sync.WaitGroup @@ -1185,8 +1370,8 @@ func TestStore_Concurrent_Good_GetAll(t *testing.T) { func TestStore_Concurrent_Good_DeleteGroup(t *testing.T) { storeInstance, err := New(testPath(t, "delgrp.db")) - require.NoError(t, err) - defer storeInstance.Close() + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() var waitGroup sync.WaitGroup for g := range 10 { @@ -1209,12 +1394,12 @@ func TestStore_Concurrent_Good_DeleteGroup(t *testing.T) { func TestStore_NotFoundError_Good_Is(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() _, err := storeInstance.Get("g", "k") - require.Error(t, err) - assert.True(t, core.Is(err, NotFoundError), "error should be NotFoundError via core.Is") - assert.Contains(t, err.Error(), "g/k", "error message should include group/key") + assertError(t, err) + assertTruef(t, core.Is(err, NotFoundError), "error should be NotFoundError via core.Is") + assertContainsString(t, err.Error(), "g/k") } // --------------------------------------------------------------------------- @@ -1223,7 +1408,7 @@ func TestStore_NotFoundError_Good_Is(t *testing.T) { func BenchmarkSet(benchmark *testing.B) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() benchmark.ResetTimer() for i := range benchmark.N { @@ -1233,7 +1418,7 @@ func BenchmarkSet(benchmark *testing.B) { func BenchmarkGet(benchmark *testing.B) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() // Pre-populate. const keys = 10000 @@ -1249,7 +1434,7 @@ func BenchmarkGet(benchmark *testing.B) { func BenchmarkGetAll(benchmark *testing.B) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() const keys = 10000 for i := range keys { @@ -1265,7 +1450,7 @@ func BenchmarkGetAll(benchmark *testing.B) { func BenchmarkSet_FileBacked(benchmark *testing.B) { databasePath := testPath(benchmark, "bench.db") storeInstance, _ := New(databasePath) - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() benchmark.ResetTimer() for i := range benchmark.N { @@ -1279,69 +1464,74 @@ func BenchmarkSet_FileBacked(benchmark *testing.B) { func TestStore_SetWithTTL_Good(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() err := storeInstance.SetWithTTL("g", "k", "v", 5*time.Second) - require.NoError(t, err) + assertNoError(t, err) value, err := storeInstance.Get("g", "k") - require.NoError(t, err) - assert.Equal(t, "v", value) + assertNoError(t, err) + assertEqual(t, "v", value) } func TestStore_SetWithTTL_Good_Upsert(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() - require.NoError(t, storeInstance.SetWithTTL("g", "k", "v1", time.Hour)) - require.NoError(t, storeInstance.SetWithTTL("g", "k", "v2", time.Hour)) + assertNoError(t, storeInstance.SetWithTTL("g", "k", "v1", time.Hour)) + assertNoError(t, storeInstance.SetWithTTL("g", "k", "v2", time.Hour)) value, err := storeInstance.Get("g", "k") - require.NoError(t, err) - assert.Equal(t, "v2", value, "upsert should overwrite the value") + assertNoError(t, err) + assertEqualf(t, "v2", value, "upsert should overwrite the value") count, err := storeInstance.Count("g") - require.NoError(t, err) - assert.Equal(t, 1, count, "upsert should not duplicate keys") + assertNoError(t, err) + assertEqualf(t, 1, count, "upsert should not duplicate keys") } func TestStore_SetWithTTL_Good_ExpiresOnGet(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() // Set a key with a very short TTL. - require.NoError(t, storeInstance.SetWithTTL("g", "ephemeral", "gone-soon", 1*time.Millisecond)) + assertNoError(t, storeInstance.SetWithTTL("g", "ephemeral", "gone-soon", 1*time.Millisecond)) // Wait for it to expire. time.Sleep(5 * time.Millisecond) _, err := storeInstance.Get("g", "ephemeral") - require.Error(t, err) - assert.True(t, core.Is(err, NotFoundError), "expired key should be NotFoundError") + assertError(t, err) + assertTruef(t, core.Is(err, NotFoundError), "expired key should be NotFoundError") } func TestStore_SetWithTTL_Good_ExpiresOnGetEmitsDeleteEvent(t *testing.T) { - storeInstance, _ := New(":memory:") - defer storeInstance.Close() + storeInstance, err := New(":memory:") + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() events := storeInstance.Watch("g") defer storeInstance.Unwatch("g", events) - require.NoError(t, storeInstance.SetWithTTL("g", "ephemeral", "gone-soon", 1*time.Millisecond)) - <-events + assertNoError(t, storeInstance.SetWithTTL("g", "ephemeral", "gone-soon", 1*time.Millisecond)) + select { + case <-events: + case <-time.After(time.Second): + t.Fatal("timed out waiting for initial TTL set event") + } time.Sleep(5 * time.Millisecond) - _, err := storeInstance.Get("g", "ephemeral") - require.Error(t, err) - assert.True(t, core.Is(err, NotFoundError), "expired key should be NotFoundError") + _, err = storeInstance.Get("g", "ephemeral") + assertError(t, err) + assertTruef(t, core.Is(err, NotFoundError), "expired key should be NotFoundError") select { case event := <-events: - assert.Equal(t, EventDelete, event.Type) - assert.Equal(t, "g", event.Group) - assert.Equal(t, "ephemeral", event.Key) - assert.Empty(t, event.Value) + assertEqual(t, EventDelete, event.Type) + assertEqual(t, "g", event.Group) + assertEqual(t, "ephemeral", event.Key) + assertEmpty(t, event.Value) case <-time.After(time.Second): t.Fatal("timed out waiting for lazy expiry delete event") } @@ -1349,78 +1539,77 @@ func TestStore_SetWithTTL_Good_ExpiresOnGetEmitsDeleteEvent(t *testing.T) { func TestStore_SetWithTTL_Good_ExcludedFromCount(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() - require.NoError(t, storeInstance.Set("g", "permanent", "stays")) - require.NoError(t, storeInstance.SetWithTTL("g", "temp", "goes", 1*time.Millisecond)) + assertNoError(t, storeInstance.Set("g", "permanent", "stays")) + assertNoError(t, storeInstance.SetWithTTL("g", "temp", "goes", 1*time.Millisecond)) time.Sleep(5 * time.Millisecond) count, err := storeInstance.Count("g") - require.NoError(t, err) - assert.Equal(t, 1, count, "expired key should not be counted") + assertNoError(t, err) + assertEqualf(t, 1, count, "expired key should not be counted") } func TestStore_SetWithTTL_Good_ExcludedFromGetAll(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() - require.NoError(t, storeInstance.Set("g", "a", "1")) - require.NoError(t, storeInstance.SetWithTTL("g", "b", "2", 1*time.Millisecond)) + assertNoError(t, storeInstance.Set("g", "a", "1")) + assertNoError(t, storeInstance.SetWithTTL("g", "b", "2", 1*time.Millisecond)) time.Sleep(5 * time.Millisecond) all, err := storeInstance.GetAll("g") - require.NoError(t, err) - assert.Equal(t, map[string]string{"a": "1"}, all, "expired key should be excluded") + assertNoError(t, err) + assertEqualf(t, map[string]string{"a": "1"}, all, "expired key should be excluded") } func TestStore_SetWithTTL_Good_ExcludedFromRender(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() - require.NoError(t, storeInstance.Set("g", "name", "Alice")) - require.NoError(t, storeInstance.SetWithTTL("g", "temp", "gone", 1*time.Millisecond)) + assertNoError(t, storeInstance.Set("g", "name", "Alice")) + assertNoError(t, storeInstance.SetWithTTL("g", "temp", "gone", 1*time.Millisecond)) time.Sleep(5 * time.Millisecond) renderedTemplate, err := storeInstance.Render("Hello {{ .name }}", "g") - require.NoError(t, err) - assert.Equal(t, "Hello Alice", renderedTemplate) + assertNoError(t, err) + assertEqual(t, "Hello Alice", renderedTemplate) } func TestStore_SetWithTTL_Good_SetClearsTTL(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() // Set with TTL, then overwrite with plain Set — TTL should be cleared. - require.NoError(t, storeInstance.SetWithTTL("g", "k", "temp", 1*time.Millisecond)) - require.NoError(t, storeInstance.Set("g", "k", "permanent")) + assertNoError(t, storeInstance.SetWithTTL("g", "k", "temp", 1*time.Millisecond)) + assertNoError(t, storeInstance.Set("g", "k", "permanent")) time.Sleep(5 * time.Millisecond) value, err := storeInstance.Get("g", "k") - require.NoError(t, err) - assert.Equal(t, "permanent", value, "plain Set should clear TTL") + assertNoError(t, err) + assertEqualf(t, "permanent", value, "plain Set should clear TTL") } func TestStore_SetWithTTL_Good_FutureTTLAccessible(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() - require.NoError(t, storeInstance.SetWithTTL("g", "k", "v", 1*time.Hour)) + assertNoError(t, storeInstance.SetWithTTL("g", "k", "v", 1*time.Hour)) value, err := storeInstance.Get("g", "k") - require.NoError(t, err) - assert.Equal(t, "v", value, "far-future TTL should be accessible") + assertNoError(t, err) + assertEqualf(t, "v", value, "far-future TTL should be accessible") count, err := storeInstance.Count("g") - require.NoError(t, err) - assert.Equal(t, 1, count) + assertNoError(t, err) + assertEqual(t, 1, count) } func TestStore_SetWithTTL_Bad_ClosedStore(t *testing.T) { storeInstance, _ := New(":memory:") - storeInstance.Close() - + _ = storeInstance.Close() err := storeInstance.SetWithTTL("g", "k", "v", time.Hour) - require.Error(t, err) + assertError(t, err) } // --------------------------------------------------------------------------- @@ -1429,49 +1618,79 @@ func TestStore_SetWithTTL_Bad_ClosedStore(t *testing.T) { func TestStore_PurgeExpired_Good(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() - require.NoError(t, storeInstance.SetWithTTL("g", "a", "1", 1*time.Millisecond)) - require.NoError(t, storeInstance.SetWithTTL("g", "b", "2", 1*time.Millisecond)) - require.NoError(t, storeInstance.Set("g", "c", "3")) + assertNoError(t, storeInstance.SetWithTTL("g", "a", "1", 1*time.Millisecond)) + assertNoError(t, storeInstance.SetWithTTL("g", "b", "2", 1*time.Millisecond)) + assertNoError(t, storeInstance.Set("g", "c", "3")) time.Sleep(5 * time.Millisecond) removed, err := storeInstance.PurgeExpired() - require.NoError(t, err) - assert.Equal(t, int64(2), removed, "should purge 2 expired keys") + assertNoError(t, err) + assertEqualf(t, int64(2), removed, "should purge 2 expired keys") count, err := storeInstance.Count("g") - require.NoError(t, err) - assert.Equal(t, 1, count, "only non-expiring key should remain") + assertNoError(t, err) + assertEqualf(t, 1, count, "only non-expiring key should remain") +} + +func TestStore_PurgeExpired_Good_NotifiesDeletedRows(t *testing.T) { + storeInstance, err := New(":memory:") + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() + + assertNoError(t, storeInstance.SetWithTTL("g", "expired", "1", 1*time.Millisecond)) + assertNoError(t, storeInstance.SetWithTTL("g", "live", "2", time.Hour)) + time.Sleep(5 * time.Millisecond) + + events := storeInstance.Watch("*") + defer storeInstance.Unwatch("*", events) + + removed, err := storeInstance.PurgeExpired() + assertNoError(t, err) + assertEqual(t, int64(1), removed) + + select { + case event := <-events: + assertEqual(t, EventDelete, event.Type) + assertEqual(t, "g", event.Group) + assertEqual(t, "expired", event.Key) + case <-time.After(time.Second): + t.Fatal("timed out waiting for purge delete event") + } + select { + case extraEvent := <-events: + t.Fatalf("unexpected extra purge event: %#v", extraEvent) + default: + } } func TestStore_PurgeExpired_Good_NoneExpired(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() - require.NoError(t, storeInstance.Set("g", "a", "1")) - require.NoError(t, storeInstance.SetWithTTL("g", "b", "2", time.Hour)) + assertNoError(t, storeInstance.Set("g", "a", "1")) + assertNoError(t, storeInstance.SetWithTTL("g", "b", "2", time.Hour)) removed, err := storeInstance.PurgeExpired() - require.NoError(t, err) - assert.Equal(t, int64(0), removed) + assertNoError(t, err) + assertEqual(t, int64(0), removed) } func TestStore_PurgeExpired_Good_Empty(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() removed, err := storeInstance.PurgeExpired() - require.NoError(t, err) - assert.Equal(t, int64(0), removed) + assertNoError(t, err) + assertEqual(t, int64(0), removed) } func TestStore_PurgeExpired_Bad_ClosedStore(t *testing.T) { storeInstance, _ := New(":memory:") - storeInstance.Close() - + _ = storeInstance.Close() _, err := storeInstance.PurgeExpired() - require.Error(t, err) + assertError(t, err) } func TestStore_PurgeExpired_Bad_RowsAffectedError(t *testing.T) { @@ -1482,17 +1701,17 @@ func TestStore_PurgeExpired_Bad_RowsAffectedError(t *testing.T) { } _, err := storeInstance.PurgeExpired() - require.Error(t, err) - assert.Contains(t, err.Error(), "store.PurgeExpired") + assertError(t, err) + assertContainsString(t, err.Error(), "store.PurgeExpired") } func TestStore_PurgeExpired_Good_BackgroundPurge(t *testing.T) { storeInstance, err := New(":memory:", WithPurgeInterval(20*time.Millisecond)) - require.NoError(t, err) - defer storeInstance.Close() + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() - require.NoError(t, storeInstance.SetWithTTL("g", "ephemeral", "v", 1*time.Millisecond)) - require.NoError(t, storeInstance.Set("g", "permanent", "stays")) + assertNoError(t, storeInstance.SetWithTTL("g", "ephemeral", "v", 1*time.Millisecond)) + assertNoError(t, storeInstance.Set("g", "permanent", "stays")) // Wait for the background purge to fire. time.Sleep(60 * time.Millisecond) @@ -1501,19 +1720,19 @@ func TestStore_PurgeExpired_Good_BackgroundPurge(t *testing.T) { // Use a raw query to check the row is actually gone (not just filtered by Get). var count int err = storeInstance.sqliteDatabase.QueryRow("SELECT COUNT(*) FROM entries WHERE group_name = ?", "g").Scan(&count) - require.NoError(t, err) - assert.Equal(t, 1, count, "background purge should have deleted the expired row") + assertNoError(t, err) + assertEqualf(t, 1, count, "background purge should have deleted the expired row") } func TestStore_StartBackgroundPurge_Good_DefaultsWhenIntervalUnset(t *testing.T) { storeInstance, err := New(":memory:") - require.NoError(t, err) + assertNoError(t, err) storeInstance.purgeInterval = 0 - require.NotPanics(t, func() { + assertNotPanics(t, func() { storeInstance.startBackgroundPurge() }) - require.NoError(t, storeInstance.Close()) + assertNoError(t, storeInstance.Close()) } // --------------------------------------------------------------------------- @@ -1525,65 +1744,65 @@ func TestStore_SchemaUpgrade_Good_ExistingDB(t *testing.T) { // Open, write, close. initialStore, err := New(databasePath) - require.NoError(t, err) - require.NoError(t, initialStore.Set("g", "k", "v")) - require.NoError(t, initialStore.Close()) + assertNoError(t, err) + assertNoError(t, initialStore.Set("g", "k", "v")) + assertNoError(t, initialStore.Close()) // Reopen — the ALTER TABLE ADD COLUMN should be a no-op. reopenedStore, err := New(databasePath) - require.NoError(t, err) - defer reopenedStore.Close() + assertNoError(t, err) + defer func() { _ = reopenedStore.Close() }() value, err := reopenedStore.Get("g", "k") - require.NoError(t, err) - assert.Equal(t, "v", value) + assertNoError(t, err) + assertEqual(t, "v", value) // TTL features should work on the reopened store. - require.NoError(t, reopenedStore.SetWithTTL("g", "ttl-key", "ttl-val", time.Hour)) + assertNoError(t, reopenedStore.SetWithTTL("g", "ttl-key", "ttl-val", time.Hour)) secondValue, err := reopenedStore.Get("g", "ttl-key") - require.NoError(t, err) - assert.Equal(t, "ttl-val", secondValue) + assertNoError(t, err) + assertEqual(t, "ttl-val", secondValue) } func TestStore_SchemaUpgrade_Good_EntriesWithoutExpiryColumn(t *testing.T) { databasePath := testPath(t, "entries-no-expiry.db") database, err := sql.Open("sqlite", databasePath) - require.NoError(t, err) + assertNoError(t, err) database.SetMaxOpenConns(1) _, err = database.Exec("PRAGMA journal_mode=WAL") - require.NoError(t, err) + assertNoError(t, err) _, err = database.Exec(`CREATE TABLE entries ( group_name TEXT NOT NULL, entry_key TEXT NOT NULL, entry_value TEXT NOT NULL, PRIMARY KEY (group_name, entry_key) )`) - require.NoError(t, err) + assertNoError(t, err) _, err = database.Exec("INSERT INTO entries (group_name, entry_key, entry_value) VALUES ('g', 'k', 'v')") - require.NoError(t, err) - require.NoError(t, database.Close()) + assertNoError(t, err) + assertNoError(t, database.Close()) storeInstance, err := New(databasePath) - require.NoError(t, err) - defer storeInstance.Close() + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() value, err := storeInstance.Get("g", "k") - require.NoError(t, err) - assert.Equal(t, "v", value) + assertNoError(t, err) + assertEqual(t, "v", value) - require.NoError(t, storeInstance.SetWithTTL("g", "ttl-key", "ttl-val", time.Hour)) + assertNoError(t, storeInstance.SetWithTTL("g", "ttl-key", "ttl-val", time.Hour)) secondValue, err := storeInstance.Get("g", "ttl-key") - require.NoError(t, err) - assert.Equal(t, "ttl-val", secondValue) + assertNoError(t, err) + assertEqual(t, "ttl-val", secondValue) } func TestStore_SchemaUpgrade_Good_LegacyAndCurrentTables(t *testing.T) { databasePath := testPath(t, "entries-and-legacy.db") database, err := sql.Open("sqlite", databasePath) - require.NoError(t, err) + assertNoError(t, err) database.SetMaxOpenConns(1) _, err = database.Exec("PRAGMA journal_mode=WAL") - require.NoError(t, err) + assertNoError(t, err) _, err = database.Exec(`CREATE TABLE entries ( group_name TEXT NOT NULL, entry_key TEXT NOT NULL, @@ -1591,31 +1810,31 @@ func TestStore_SchemaUpgrade_Good_LegacyAndCurrentTables(t *testing.T) { expires_at INTEGER, PRIMARY KEY (group_name, entry_key) )`) - require.NoError(t, err) + assertNoError(t, err) _, err = database.Exec("INSERT INTO entries (group_name, entry_key, entry_value) VALUES ('existing', 'k', 'v')") - require.NoError(t, err) + assertNoError(t, err) _, err = database.Exec(`CREATE TABLE kv ( grp TEXT NOT NULL, key TEXT NOT NULL, value TEXT NOT NULL, PRIMARY KEY (grp, key) )`) - require.NoError(t, err) + assertNoError(t, err) _, err = database.Exec("INSERT INTO kv (grp, key, value) VALUES ('legacy', 'k', 'legacy-v')") - require.NoError(t, err) - require.NoError(t, database.Close()) + assertNoError(t, err) + assertNoError(t, database.Close()) storeInstance, err := New(databasePath) - require.NoError(t, err) - defer storeInstance.Close() + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() value, err := storeInstance.Get("existing", "k") - require.NoError(t, err) - assert.Equal(t, "v", value) + assertNoError(t, err) + assertEqual(t, "v", value) legacyVal, err := storeInstance.Get("legacy", "k") - require.NoError(t, err) - assert.Equal(t, "legacy-v", legacyVal) + assertNoError(t, err) + assertEqual(t, "legacy-v", legacyVal) } func TestStore_SchemaUpgrade_Good_PreTTLDatabase(t *testing.T) { @@ -1623,36 +1842,36 @@ func TestStore_SchemaUpgrade_Good_PreTTLDatabase(t *testing.T) { // The legacy key-value table has no expires_at column yet. databasePath := testPath(t, "pre-ttl.db") database, err := sql.Open("sqlite", databasePath) - require.NoError(t, err) + assertNoError(t, err) database.SetMaxOpenConns(1) _, err = database.Exec("PRAGMA journal_mode=WAL") - require.NoError(t, err) + assertNoError(t, err) _, err = database.Exec(`CREATE TABLE kv ( grp TEXT NOT NULL, key TEXT NOT NULL, value TEXT NOT NULL, PRIMARY KEY (grp, key) )`) - require.NoError(t, err) + assertNoError(t, err) _, err = database.Exec("INSERT INTO kv (grp, key, value) VALUES ('g', 'k', 'v')") - require.NoError(t, err) - require.NoError(t, database.Close()) + assertNoError(t, err) + assertNoError(t, database.Close()) // Open with New — should migrate the legacy table into the descriptive schema. storeInstance, err := New(databasePath) - require.NoError(t, err) - defer storeInstance.Close() + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() // Existing data should be readable. value, err := storeInstance.Get("g", "k") - require.NoError(t, err) - assert.Equal(t, "v", value) + assertNoError(t, err) + assertEqual(t, "v", value) // TTL features should work after migration. - require.NoError(t, storeInstance.SetWithTTL("g", "ttl-key", "ttl-val", time.Hour)) + assertNoError(t, storeInstance.SetWithTTL("g", "ttl-key", "ttl-val", time.Hour)) secondValue, err := storeInstance.Get("g", "ttl-key") - require.NoError(t, err) - assert.Equal(t, "ttl-val", secondValue) + assertNoError(t, err) + assertEqual(t, "ttl-val", secondValue) } // --------------------------------------------------------------------------- @@ -1661,8 +1880,8 @@ func TestStore_SchemaUpgrade_Good_PreTTLDatabase(t *testing.T) { func TestStore_Concurrent_Good_TTL(t *testing.T) { storeInstance, err := New(testPath(t, "concurrent-ttl.db")) - require.NoError(t, err) - defer storeInstance.Close() + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() const goroutines = 10 const ops = 50 @@ -1691,7 +1910,7 @@ func TestStore_Concurrent_Good_TTL(t *testing.T) { for g := range goroutines { groupName := core.Sprintf("ttl-%d", g) count, err := storeInstance.Count(groupName) - require.NoError(t, err) - assert.Equal(t, ops/2, count, "only non-TTL keys should remain in %s", groupName) + assertNoError(t, err) + assertEqualf(t, ops/2, count, "only non-TTL keys should remain in %s", groupName) } } diff --git a/test_asserts_test.go b/test_asserts_test.go new file mode 100644 index 0000000..611ec6d --- /dev/null +++ b/test_asserts_test.go @@ -0,0 +1,342 @@ +package store + +import ( + "reflect" + "sort" + "testing" + + core "dappco.re/go/core" +) + +func assertNoError(t testing.TB, err error) { + t.Helper() + if err != nil { + t.Fatalf("unexpected error: %v", err) + } +} + +func assertNoErrorf(t testing.TB, err error, format string, args ...any) { + t.Helper() + if err != nil { + t.Fatalf("unexpected error: %v — "+format, append([]any{err}, args...)...) + } +} + +func assertError(t testing.TB, err error) { + t.Helper() + if err == nil { + t.Fatal("expected error, got nil") + } +} + +func assertErrorIs(t testing.TB, err, target error) { + t.Helper() + if !errIs(err, target) { + t.Fatalf("expected error matching %v, got %v", target, err) + } +} + +func assertEqual(t testing.TB, want, got any) { + t.Helper() + if !reflect.DeepEqual(want, got) { + t.Fatalf("want %v, got %v", want, got) + } +} + +func assertEqualf(t testing.TB, want, got any, format string, args ...any) { + t.Helper() + if !reflect.DeepEqual(want, got) { + t.Fatalf("want %v, got %v — "+format, append([]any{want, got}, args...)...) + } +} + +func assertTrue(t testing.TB, cond bool) { + t.Helper() + if !cond { + t.Fatal("expected true") + } +} + +func assertTruef(t testing.TB, cond bool, format string, args ...any) { + t.Helper() + if !cond { + t.Fatalf("expected true — "+format, args...) + } +} + +func assertFalse(t testing.TB, cond bool) { + t.Helper() + if cond { + t.Fatal("expected false") + } +} + +func assertFalsef(t testing.TB, cond bool, format string, args ...any) { + t.Helper() + if cond { + t.Fatalf("expected false — "+format, args...) + } +} + +func assertNil(t testing.TB, value any) { + t.Helper() + if !isNil(value) { + t.Fatalf("expected nil, got %v", value) + } +} + +func assertNilf(t testing.TB, value any, format string, args ...any) { + t.Helper() + if !isNil(value) { + t.Fatalf("expected nil, got %v — "+format, append([]any{value}, args...)...) + } +} + +func assertNotNil(t testing.TB, value any) { + t.Helper() + if isNil(value) { + t.Fatal("expected non-nil") + } +} + +func assertEmpty(t testing.TB, value any) { + t.Helper() + if !isEmpty(value) { + t.Fatalf("expected empty, got %v", value) + } +} + +func assertEmptyf(t testing.TB, value any, format string, args ...any) { + t.Helper() + if !isEmpty(value) { + t.Fatalf("expected empty, got %v — "+format, append([]any{value}, args...)...) + } +} + +func assertNotEmpty(t testing.TB, value any) { + t.Helper() + if isEmpty(value) { + t.Fatal("expected non-empty") + } +} + +func assertLen(t testing.TB, value any, want int) { + t.Helper() + got := lenOf(value) + if got != want { + t.Fatalf("expected len %d, got %d", want, got) + } +} + +func assertLenf(t testing.TB, value any, want int, format string, args ...any) { + t.Helper() + got := lenOf(value) + if got != want { + t.Fatalf("expected len %d, got %d — "+format, append([]any{want, got}, args...)...) + } +} + +func assertContainsString(t testing.TB, haystack, needle string) { + t.Helper() + if !stringContains(haystack, needle) { + t.Fatalf("expected %q to contain %q", haystack, needle) + } +} + +func assertContainsElement(t testing.TB, collection, element any) { + t.Helper() + if !containsElement(collection, element) { + t.Fatalf("expected collection to contain %v", element) + } +} + +func assertElementsMatch(t testing.TB, want, got any) { + t.Helper() + if !elementsMatch(want, got) { + t.Fatalf("expected same elements: want %v, got %v", want, got) + } +} + +func assertLessOrEqual(t testing.TB, got, want int) { + t.Helper() + if got > want { + t.Fatalf("expected %d <= %d", got, want) + } +} + +func assertSamef(t testing.TB, want, got any, format string, args ...any) { + t.Helper() + if !samePointer(want, got) { + t.Fatalf("expected same pointer, got %v vs %v — "+format, append([]any{want, got}, args...)...) + } +} + +func assertGreaterf(t testing.TB, got, want int, format string, args ...any) { + t.Helper() + if got <= want { + t.Fatalf("expected %d > %d — "+format, append([]any{got, want}, args...)...) + } +} + +func assertNotPanics(t testing.TB, fn func()) { + t.Helper() + defer func() { + if r := recover(); r != nil { + t.Fatalf("unexpected panic: %v", r) + } + }() + fn() +} + +func errIs(err, target error) bool { + return core.Is(err, target) +} + +func isNil(value any) bool { + if value == nil { + return true + } + rv := reflect.ValueOf(value) + switch rv.Kind() { + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return rv.IsNil() + } + return false +} + +func isEmpty(value any) bool { + if value == nil { + return true + } + rv := reflect.ValueOf(value) + switch rv.Kind() { + case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String: + return rv.Len() == 0 + case reflect.Ptr, reflect.Interface: + if rv.IsNil() { + return true + } + return isEmpty(rv.Elem().Interface()) + } + return reflect.DeepEqual(value, reflect.Zero(rv.Type()).Interface()) +} + +func lenOf(value any) int { + rv := reflect.ValueOf(value) + switch rv.Kind() { + case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String: + return rv.Len() + } + return -1 +} + +func stringContains(haystack, needle string) bool { + if len(needle) == 0 { + return true + } + if len(needle) > len(haystack) { + return false + } + for i := 0; i+len(needle) <= len(haystack); i++ { + if haystack[i:i+len(needle)] == needle { + return true + } + } + return false +} + +func containsElement(collection, element any) bool { + rv := reflect.ValueOf(collection) + switch rv.Kind() { + case reflect.String: + needle, ok := element.(string) + if !ok { + return false + } + return stringContains(rv.String(), needle) + case reflect.Array, reflect.Slice: + for i := 0; i < rv.Len(); i++ { + if reflect.DeepEqual(rv.Index(i).Interface(), element) { + return true + } + } + return false + case reflect.Map: + for _, key := range rv.MapKeys() { + if reflect.DeepEqual(key.Interface(), element) { + return true + } + } + return false + } + return false +} + +func elementsMatch(want, got any) bool { + wantSlice := toAnySlice(want) + gotSlice := toAnySlice(got) + if wantSlice == nil || gotSlice == nil { + return false + } + if len(wantSlice) != len(gotSlice) { + return false + } + sortAny(wantSlice) + sortAny(gotSlice) + for i := range wantSlice { + if !reflect.DeepEqual(wantSlice[i], gotSlice[i]) { + return false + } + } + return true +} + +func toAnySlice(value any) []any { + rv := reflect.ValueOf(value) + switch rv.Kind() { + case reflect.Array, reflect.Slice: + result := make([]any, rv.Len()) + for i := 0; i < rv.Len(); i++ { + result[i] = rv.Index(i).Interface() + } + return result + } + return nil +} + +func sortAny(values []any) { + sort.Slice(values, func(i, j int) bool { + return less(values[i], values[j]) + }) +} + +func less(a, b any) bool { + aValue := reflect.ValueOf(a) + bValue := reflect.ValueOf(b) + if aValue.Kind() != bValue.Kind() { + return aValue.Kind() < bValue.Kind() + } + switch aValue.Kind() { + case reflect.String: + return aValue.String() < bValue.String() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return aValue.Int() < bValue.Int() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return aValue.Uint() < bValue.Uint() + case reflect.Float32, reflect.Float64: + return aValue.Float() < bValue.Float() + } + return false +} + +func samePointer(want, got any) bool { + wantValue := reflect.ValueOf(want) + gotValue := reflect.ValueOf(got) + if !wantValue.IsValid() || !gotValue.IsValid() { + return false + } + if wantValue.Kind() != reflect.Ptr || gotValue.Kind() != reflect.Ptr { + return false + } + return wantValue.Pointer() == gotValue.Pointer() +} diff --git a/test_helpers_test.go b/test_helpers_test.go index 8d4a052..1635d9c 100644 --- a/test_helpers_test.go +++ b/test_helpers_test.go @@ -4,7 +4,6 @@ import ( "testing" core "dappco.re/go/core" - "github.com/stretchr/testify/require" ) func testFilesystem() *core.Fs { @@ -18,7 +17,7 @@ func testPath(tb testing.TB, name string) string { func requireCoreOK(tb testing.TB, result core.Result) { tb.Helper() - require.True(tb, result.OK, "core result failed: %v", result.Value) + assertTruef(tb, result.OK, "core result failed: %v", result.Value) } func requireCoreReadBytes(tb testing.TB, path string) []byte { @@ -73,8 +72,8 @@ func useArchiveOutputDirectory(tb testing.TB) string { func requireResultRows(tb testing.TB, result core.Result) []map[string]any { tb.Helper() - require.True(tb, result.OK, "core result failed: %v", result.Value) + assertTruef(tb, result.OK, "core result failed: %v", result.Value) rows, ok := result.Value.([]map[string]any) - require.True(tb, ok, "unexpected row type: %T", result.Value) + assertTruef(tb, ok, "unexpected row type: %T", result.Value) return rows } diff --git a/tests/cli/store/Taskfile.yaml b/tests/cli/store/Taskfile.yaml new file mode 100644 index 0000000..5b694f8 --- /dev/null +++ b/tests/cli/store/Taskfile.yaml @@ -0,0 +1,30 @@ +version: "3" + +tasks: + default: + deps: [build, vet, test] + + build: + dir: ../../.. + cmds: + - go build ./... + + vet: + dir: ../../.. + cmds: + - go vet ./... + + test: + dir: ../../.. + cmds: + - go test -count=1 -race ./... + + test-memory: + dir: ../../.. + cmds: + - go test -count=1 -race -run "^TestStore_.*Memory" ./... + + test-workspace: + dir: ../../.. + cmds: + - go test -count=1 -race -run "^TestWorkspace_" ./... diff --git a/transaction.go b/transaction.go index 87de767..2bfef73 100644 --- a/transaction.go +++ b/transaction.go @@ -12,9 +12,9 @@ import ( // Usage example: `err := storeInstance.Transaction(func(transaction *store.StoreTransaction) error { return transaction.Set("config", "colour", "blue") })` // Usage example: `if err := transaction.Delete("config", "colour"); err != nil { return err }` type StoreTransaction struct { - store *Store - transaction *sql.Tx - pendingEvents []Event + storeInstance *Store + sqliteTransaction *sql.Tx + pendingEvents []Event } // Usage example: `err := storeInstance.Transaction(func(transaction *store.StoreTransaction) error { if err := transaction.Set("tenant-a:config", "colour", "blue"); err != nil { return err }; return transaction.Set("tenant-b:config", "language", "en-GB") })` @@ -32,8 +32,8 @@ func (storeInstance *Store) Transaction(operation func(*StoreTransaction) error) } storeTransaction := &StoreTransaction{ - store: storeInstance, - transaction: transaction, + storeInstance: storeInstance, + sqliteTransaction: transaction, } committed := false @@ -61,13 +61,13 @@ func (storeTransaction *StoreTransaction) ensureReady(operation string) error { if storeTransaction == nil { return core.E(operation, "transaction is nil", nil) } - if storeTransaction.store == nil { + if storeTransaction.storeInstance == nil { return core.E(operation, "transaction store is nil", nil) } - if storeTransaction.transaction == nil { + if storeTransaction.sqliteTransaction == nil { return core.E(operation, "transaction database is nil", nil) } - if err := storeTransaction.store.ensureReady(operation); err != nil { + if err := storeTransaction.storeInstance.ensureReady(operation); err != nil { return err } return nil @@ -80,6 +80,29 @@ func (storeTransaction *StoreTransaction) recordEvent(event Event) { storeTransaction.pendingEvents = append(storeTransaction.pendingEvents, event) } +// Usage example: `exists, err := transaction.Exists("config", "colour")` +// Usage example: `if exists, _ := transaction.Exists("session", "token"); !exists { return core.E("auth", "session expired", nil) }` +func (storeTransaction *StoreTransaction) Exists(group, key string) (bool, error) { + if err := storeTransaction.ensureReady("store.Transaction.Exists"); err != nil { + return false, err + } + + return liveEntryExists(storeTransaction.sqliteTransaction, group, key) +} + +// Usage example: `exists, err := transaction.GroupExists("config")` +func (storeTransaction *StoreTransaction) GroupExists(group string) (bool, error) { + if err := storeTransaction.ensureReady("store.Transaction.GroupExists"); err != nil { + return false, err + } + + count, err := storeTransaction.Count(group) + if err != nil { + return false, err + } + return count > 0, nil +} + // Usage example: `value, err := transaction.Get("config", "colour")` func (storeTransaction *StoreTransaction) Get(group, key string) (string, error) { if err := storeTransaction.ensureReady("store.Transaction.Get"); err != nil { @@ -88,7 +111,7 @@ func (storeTransaction *StoreTransaction) Get(group, key string) (string, error) var value string var expiresAt sql.NullInt64 - err := storeTransaction.transaction.QueryRow( + err := storeTransaction.sqliteTransaction.QueryRow( "SELECT "+entryValueColumn+", expires_at FROM "+entriesTableName+" WHERE "+entryGroupColumn+" = ? AND "+entryKeyColumn+" = ?", group, key, ).Scan(&value, &expiresAt) @@ -113,7 +136,7 @@ func (storeTransaction *StoreTransaction) Set(group, key, value string) error { return err } - _, err := storeTransaction.transaction.Exec( + _, err := storeTransaction.sqliteTransaction.Exec( "INSERT INTO "+entriesTableName+" ("+entryGroupColumn+", "+entryKeyColumn+", "+entryValueColumn+", expires_at) VALUES (?, ?, ?, NULL) "+ "ON CONFLICT("+entryGroupColumn+", "+entryKeyColumn+") DO UPDATE SET "+entryValueColumn+" = excluded."+entryValueColumn+", expires_at = NULL", group, key, value, @@ -132,7 +155,7 @@ func (storeTransaction *StoreTransaction) SetWithTTL(group, key, value string, t } expiresAt := time.Now().Add(timeToLive).UnixMilli() - _, err := storeTransaction.transaction.Exec( + _, err := storeTransaction.sqliteTransaction.Exec( "INSERT INTO "+entriesTableName+" ("+entryGroupColumn+", "+entryKeyColumn+", "+entryValueColumn+", expires_at) VALUES (?, ?, ?, ?) "+ "ON CONFLICT("+entryGroupColumn+", "+entryKeyColumn+") DO UPDATE SET "+entryValueColumn+" = excluded."+entryValueColumn+", expires_at = excluded.expires_at", group, key, value, expiresAt, @@ -150,7 +173,7 @@ func (storeTransaction *StoreTransaction) Delete(group, key string) error { return err } - deleteResult, err := storeTransaction.transaction.Exec( + deleteResult, err := storeTransaction.sqliteTransaction.Exec( "DELETE FROM "+entriesTableName+" WHERE "+entryGroupColumn+" = ? AND "+entryKeyColumn+" = ?", group, key, ) @@ -173,7 +196,7 @@ func (storeTransaction *StoreTransaction) DeleteGroup(group string) error { return err } - deleteResult, err := storeTransaction.transaction.Exec( + deleteResult, err := storeTransaction.sqliteTransaction.Exec( "DELETE FROM "+entriesTableName+" WHERE "+entryGroupColumn+" = ?", group, ) @@ -199,11 +222,11 @@ func (storeTransaction *StoreTransaction) DeletePrefix(groupPrefix string) error var rows *sql.Rows var err error if groupPrefix == "" { - rows, err = storeTransaction.transaction.Query( + rows, err = storeTransaction.sqliteTransaction.Query( "SELECT DISTINCT " + entryGroupColumn + " FROM " + entriesTableName + " ORDER BY " + entryGroupColumn, ) } else { - rows, err = storeTransaction.transaction.Query( + rows, err = storeTransaction.sqliteTransaction.Query( "SELECT DISTINCT "+entryGroupColumn+" FROM "+entriesTableName+" WHERE "+entryGroupColumn+" LIKE ? ESCAPE '^' ORDER BY "+entryGroupColumn, escapeLike(groupPrefix)+"%", ) @@ -211,7 +234,7 @@ func (storeTransaction *StoreTransaction) DeletePrefix(groupPrefix string) error if err != nil { return core.E("store.Transaction.DeletePrefix", "list groups", err) } - defer rows.Close() + defer func() { _ = rows.Close() }() var groupNames []string for rows.Next() { @@ -239,7 +262,7 @@ func (storeTransaction *StoreTransaction) Count(group string) (int, error) { } var count int - err := storeTransaction.transaction.QueryRow( + err := storeTransaction.sqliteTransaction.QueryRow( "SELECT COUNT(*) FROM "+entriesTableName+" WHERE "+entryGroupColumn+" = ? AND (expires_at IS NULL OR expires_at > ?)", group, time.Now().UnixMilli(), ).Scan(&count) @@ -277,14 +300,14 @@ func (storeTransaction *StoreTransaction) GetPage(group string, offset, limit in return nil, core.E("store.Transaction.GetPage", "limit must be zero or positive", nil) } - rows, err := storeTransaction.transaction.Query( + rows, err := storeTransaction.sqliteTransaction.Query( "SELECT "+entryKeyColumn+", "+entryValueColumn+" FROM "+entriesTableName+" WHERE "+entryGroupColumn+" = ? AND (expires_at IS NULL OR expires_at > ?) ORDER BY "+entryKeyColumn+" LIMIT ? OFFSET ?", group, time.Now().UnixMilli(), limit, offset, ) if err != nil { return nil, core.E("store.Transaction.GetPage", "query rows", err) } - defer rows.Close() + defer func() { _ = rows.Close() }() page := make([]KeyValue, 0, limit) for rows.Next() { @@ -313,7 +336,7 @@ func (storeTransaction *StoreTransaction) AllSeq(group string) iter.Seq2[KeyValu return } - rows, err := storeTransaction.transaction.Query( + rows, err := storeTransaction.sqliteTransaction.Query( "SELECT "+entryKeyColumn+", "+entryValueColumn+" FROM "+entriesTableName+" WHERE "+entryGroupColumn+" = ? AND (expires_at IS NULL OR expires_at > ?) ORDER BY "+entryKeyColumn, group, time.Now().UnixMilli(), ) @@ -321,7 +344,7 @@ func (storeTransaction *StoreTransaction) AllSeq(group string) iter.Seq2[KeyValu yield(KeyValue{}, core.E("store.Transaction.All", "query rows", err)) return } - defer rows.Close() + defer func() { _ = rows.Close() }() for rows.Next() { var entry KeyValue @@ -350,12 +373,12 @@ func (storeTransaction *StoreTransaction) CountAll(groupPrefix string) (int, err var count int var err error if groupPrefix == "" { - err = storeTransaction.transaction.QueryRow( + err = storeTransaction.sqliteTransaction.QueryRow( "SELECT COUNT(*) FROM "+entriesTableName+" WHERE (expires_at IS NULL OR expires_at > ?)", time.Now().UnixMilli(), ).Scan(&count) } else { - err = storeTransaction.transaction.QueryRow( + err = storeTransaction.sqliteTransaction.QueryRow( "SELECT COUNT(*) FROM "+entriesTableName+" WHERE "+entryGroupColumn+" LIKE ? ESCAPE '^' AND (expires_at IS NULL OR expires_at > ?)", escapeLike(groupPrefix)+"%", time.Now().UnixMilli(), ).Scan(&count) @@ -386,7 +409,7 @@ func (storeTransaction *StoreTransaction) Groups(groupPrefix ...string) ([]strin // Usage example: `for groupName, err := range transaction.GroupsSeq("tenant-a:") { if err != nil { break }; fmt.Println(groupName) }` // Usage example: `for groupName, err := range transaction.GroupsSeq() { if err != nil { break }; fmt.Println(groupName) }` func (storeTransaction *StoreTransaction) GroupsSeq(groupPrefix ...string) iter.Seq2[string, error] { - actualGroupPrefix := firstOrEmptyString(groupPrefix) + actualGroupPrefix := firstStringOrEmpty(groupPrefix) return func(yield func(string, error) bool) { if err := storeTransaction.ensureReady("store.Transaction.GroupsSeq"); err != nil { yield("", err) @@ -397,12 +420,12 @@ func (storeTransaction *StoreTransaction) GroupsSeq(groupPrefix ...string) iter. var err error now := time.Now().UnixMilli() if actualGroupPrefix == "" { - rows, err = storeTransaction.transaction.Query( + rows, err = storeTransaction.sqliteTransaction.Query( "SELECT DISTINCT "+entryGroupColumn+" FROM "+entriesTableName+" WHERE (expires_at IS NULL OR expires_at > ?) ORDER BY "+entryGroupColumn, now, ) } else { - rows, err = storeTransaction.transaction.Query( + rows, err = storeTransaction.sqliteTransaction.Query( "SELECT DISTINCT "+entryGroupColumn+" FROM "+entriesTableName+" WHERE "+entryGroupColumn+" LIKE ? ESCAPE '^' AND (expires_at IS NULL OR expires_at > ?) ORDER BY "+entryGroupColumn, escapeLike(actualGroupPrefix)+"%", now, ) @@ -411,7 +434,7 @@ func (storeTransaction *StoreTransaction) GroupsSeq(groupPrefix ...string) iter. yield("", core.E("store.Transaction.GroupsSeq", "query group names", err)) return } - defer rows.Close() + defer func() { _ = rows.Close() }() for rows.Next() { var groupName string @@ -481,3 +504,28 @@ func (storeTransaction *StoreTransaction) GetFields(group, key string) (iter.Seq } return fieldsValueSeq(value), nil } + +// Usage example: `removedRows, err := transaction.PurgeExpired(); if err != nil { return err }; fmt.Println(removedRows)` +func (storeTransaction *StoreTransaction) PurgeExpired() (int64, error) { + if err := storeTransaction.ensureReady("store.Transaction.PurgeExpired"); err != nil { + return 0, err + } + + cutoffUnixMilli := time.Now().UnixMilli() + expiredEntries, err := deleteExpiredEntriesMatchingGroupPrefix(storeTransaction.sqliteTransaction, "", cutoffUnixMilli) + if err != nil { + return 0, core.E("store.Transaction.PurgeExpired", "delete expired rows", err) + } + removedRows := int64(len(expiredEntries)) + if removedRows > 0 { + for _, expiredEntry := range expiredEntries { + storeTransaction.recordEvent(Event{ + Type: EventDelete, + Group: expiredEntry.group, + Key: expiredEntry.key, + Timestamp: time.Now(), + }) + } + } + return removedRows, nil +} diff --git a/transaction_test.go b/transaction_test.go index 73411d3..e74a1f6 100644 --- a/transaction_test.go +++ b/transaction_test.go @@ -6,13 +6,11 @@ import ( "time" core "dappco.re/go/core" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func TestTransaction_Transaction_Good_CommitsMultipleWrites(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() events := storeInstance.Watch("*") defer storeInstance.Unwatch("*", events) @@ -26,29 +24,29 @@ func TestTransaction_Transaction_Good_CommitsMultipleWrites(t *testing.T) { } return nil }) - require.NoError(t, err) + assertNoError(t, err) firstValue, err := storeInstance.Get("alpha", "first") - require.NoError(t, err) - assert.Equal(t, "1", firstValue) + assertNoError(t, err) + assertEqual(t, "1", firstValue) secondValue, err := storeInstance.Get("beta", "second") - require.NoError(t, err) - assert.Equal(t, "2", secondValue) + assertNoError(t, err) + assertEqual(t, "2", secondValue) received := drainEvents(events, 2, time.Second) - require.Len(t, received, 2) - assert.Equal(t, EventSet, received[0].Type) - assert.Equal(t, "alpha", received[0].Group) - assert.Equal(t, "first", received[0].Key) - assert.Equal(t, EventSet, received[1].Type) - assert.Equal(t, "beta", received[1].Group) - assert.Equal(t, "second", received[1].Key) + assertLen(t, received, 2) + assertEqual(t, EventSet, received[0].Type) + assertEqual(t, "alpha", received[0].Group) + assertEqual(t, "first", received[0].Key) + assertEqual(t, EventSet, received[1].Type) + assertEqual(t, "beta", received[1].Group) + assertEqual(t, "second", received[1].Key) } func TestTransaction_Transaction_Good_RollbackOnError(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() err := storeInstance.Transaction(func(transaction *StoreTransaction) error { if err := transaction.Set("alpha", "first", "1"); err != nil { @@ -56,18 +54,18 @@ func TestTransaction_Transaction_Good_RollbackOnError(t *testing.T) { } return core.E("test", "force rollback", nil) }) - require.Error(t, err) + assertError(t, err) _, err = storeInstance.Get("alpha", "first") - assert.ErrorIs(t, err, NotFoundError) + assertErrorIs(t, err, NotFoundError) } func TestTransaction_Transaction_Good_DeletesAtomically(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() - require.NoError(t, storeInstance.Set("alpha", "first", "1")) - require.NoError(t, storeInstance.Set("beta", "second", "2")) + assertNoError(t, storeInstance.Set("alpha", "first", "1")) + assertNoError(t, storeInstance.Set("beta", "second", "2")) err := storeInstance.Transaction(func(transaction *StoreTransaction) error { if err := transaction.DeletePrefix(""); err != nil { @@ -75,17 +73,17 @@ func TestTransaction_Transaction_Good_DeletesAtomically(t *testing.T) { } return nil }) - require.NoError(t, err) + assertNoError(t, err) _, err = storeInstance.Get("alpha", "first") - assert.ErrorIs(t, err, NotFoundError) + assertErrorIs(t, err, NotFoundError) _, err = storeInstance.Get("beta", "second") - assert.ErrorIs(t, err, NotFoundError) + assertErrorIs(t, err, NotFoundError) } func TestTransaction_Transaction_Good_ReadHelpersSeePendingWrites(t *testing.T) { storeInstance, _ := New(":memory:") - defer storeInstance.Close() + defer func() { _ = storeInstance.Close() }() err := storeInstance.Transaction(func(transaction *StoreTransaction) error { if err := transaction.Set("config", "colour", "blue"); err != nil { @@ -99,32 +97,302 @@ func TestTransaction_Transaction_Good_ReadHelpersSeePendingWrites(t *testing.T) } entriesByKey, err := transaction.GetAll("config") - require.NoError(t, err) - assert.Equal(t, map[string]string{"colour": "blue", "hosts": "alpha beta"}, entriesByKey) + assertNoError(t, err) + assertEqual(t, map[string]string{"colour": "blue", "hosts": "alpha beta"}, entriesByKey) count, err := transaction.CountAll("") - require.NoError(t, err) - assert.Equal(t, 3, count) + assertNoError(t, err) + assertEqual(t, 3, count) groupNames, err := transaction.Groups() - require.NoError(t, err) - assert.Equal(t, []string{"audit", "config"}, groupNames) + assertNoError(t, err) + assertEqual(t, []string{"audit", "config"}, groupNames) renderedTemplate, err := transaction.Render("{{ .colour }} / {{ .hosts }}", "config") - require.NoError(t, err) - assert.Equal(t, "blue / alpha beta", renderedTemplate) + assertNoError(t, err) + assertEqual(t, "blue / alpha beta", renderedTemplate) splitParts, err := transaction.GetSplit("config", "hosts", " ") - require.NoError(t, err) - assert.Equal(t, []string{"alpha", "beta"}, collectSeq(t, splitParts)) + assertNoError(t, err) + assertEqual(t, []string{"alpha", "beta"}, collectSeq(t, splitParts)) fieldParts, err := transaction.GetFields("config", "hosts") - require.NoError(t, err) - assert.Equal(t, []string{"alpha", "beta"}, collectSeq(t, fieldParts)) + assertNoError(t, err) + assertEqual(t, []string{"alpha", "beta"}, collectSeq(t, fieldParts)) return nil }) - require.NoError(t, err) + assertNoError(t, err) +} + +func TestTransaction_Transaction_Good_PurgeExpired(t *testing.T) { + storeInstance, _ := New(":memory:") + defer func() { _ = storeInstance.Close() }() + + assertNoError(t, storeInstance.SetWithTTL("alpha", "ephemeral", "gone", 1*time.Millisecond)) + time.Sleep(5 * time.Millisecond) + + err := storeInstance.Transaction(func(transaction *StoreTransaction) error { + removedRows, err := transaction.PurgeExpired() + assertNoError(t, err) + assertEqual(t, int64(1), removedRows) + return nil + }) + assertNoError(t, err) + + _, err = storeInstance.Get("alpha", "ephemeral") + assertErrorIs(t, err, NotFoundError) +} + +func TestTransaction_Transaction_Good_Exists(t *testing.T) { + storeInstance, _ := New(":memory:") + defer func() { _ = storeInstance.Close() }() + + assertNoError(t, storeInstance.Set("config", "colour", "blue")) + + err := storeInstance.Transaction(func(transaction *StoreTransaction) error { + exists, err := transaction.Exists("config", "colour") + assertNoError(t, err) + assertTrue(t, exists) + + exists, err = transaction.Exists("config", "missing") + assertNoError(t, err) + assertFalse(t, exists) + + return nil + }) + assertNoError(t, err) +} + +func TestTransaction_Transaction_Good_ExistsSeesPendingWrites(t *testing.T) { + storeInstance, _ := New(":memory:") + defer func() { _ = storeInstance.Close() }() + + err := storeInstance.Transaction(func(transaction *StoreTransaction) error { + exists, err := transaction.Exists("config", "colour") + assertNoError(t, err) + assertFalse(t, exists) + + if err := transaction.Set("config", "colour", "blue"); err != nil { + return err + } + + exists, err = transaction.Exists("config", "colour") + assertNoError(t, err) + assertTrue(t, exists) + + return nil + }) + assertNoError(t, err) +} + +func TestTransaction_Transaction_Good_GroupExists(t *testing.T) { + storeInstance, _ := New(":memory:") + defer func() { _ = storeInstance.Close() }() + + err := storeInstance.Transaction(func(transaction *StoreTransaction) error { + exists, err := transaction.GroupExists("config") + assertNoError(t, err) + assertFalse(t, exists) + + if err := transaction.Set("config", "colour", "blue"); err != nil { + return err + } + + exists, err = transaction.GroupExists("config") + assertNoError(t, err) + assertTrue(t, exists) + + return nil + }) + assertNoError(t, err) +} + +func TestTransaction_ScopedStoreTransaction_Good_ExistsAndGroupExists(t *testing.T) { + storeInstance, _ := New(":memory:") + defer func() { _ = storeInstance.Close() }() + + scopedStore := NewScoped(storeInstance, "tenant-a") + + err := scopedStore.Transaction(func(transaction *ScopedStoreTransaction) error { + exists, err := transaction.Exists("colour") + assertNoError(t, err) + assertFalse(t, exists) + + if err := transaction.Set("colour", "blue"); err != nil { + return err + } + + exists, err = transaction.Exists("colour") + assertNoError(t, err) + assertTrue(t, exists) + + exists, err = transaction.ExistsIn("other", "colour") + assertNoError(t, err) + assertFalse(t, exists) + + if err := transaction.SetIn("config", "theme", "dark"); err != nil { + return err + } + + groupExists, err := transaction.GroupExists("config") + assertNoError(t, err) + assertTrue(t, groupExists) + + groupExists, err = transaction.GroupExists("missing-group") + assertNoError(t, err) + assertFalse(t, groupExists) + + return nil + }) + assertNoError(t, err) +} + +func TestTransaction_ScopedStoreTransaction_Good_GetPage(t *testing.T) { + storeInstance, _ := New(":memory:") + defer func() { _ = storeInstance.Close() }() + + scopedStore := NewScoped(storeInstance, "tenant-a") + + err := scopedStore.Transaction(func(transaction *ScopedStoreTransaction) error { + if err := transaction.SetIn("items", "charlie", "3"); err != nil { + return err + } + if err := transaction.SetIn("items", "alpha", "1"); err != nil { + return err + } + if err := transaction.SetIn("items", "bravo", "2"); err != nil { + return err + } + + page, err := transaction.GetPage("items", 1, 1) + assertNoError(t, err) + assertLen(t, page, 1) + assertEqual(t, KeyValue{Key: "bravo", Value: "2"}, page[0]) + return nil + }) + assertNoError(t, err) +} + +func TestTransaction_ScopedStoreTransaction_Good_CommitsNamespacedWrites(t *testing.T) { + storeInstance, _ := New(":memory:") + defer func() { _ = storeInstance.Close() }() + + scopedStore, err := NewScopedConfigured(storeInstance, ScopedStoreConfig{ + Namespace: "tenant-a", + Quota: QuotaConfig{MaxKeys: 4, MaxGroups: 2}, + }) + assertNoError(t, err) + + err = scopedStore.Transaction(func(transaction *ScopedStoreTransaction) error { + if err := transaction.Set("theme", "dark"); err != nil { + return err + } + if err := transaction.SetIn("preferences", "locale", "en-GB"); err != nil { + return err + } + + themeValue, err := transaction.Get("theme") + assertNoError(t, err) + assertEqual(t, "dark", themeValue) + + localeValue, err := transaction.GetFrom("preferences", "locale") + assertNoError(t, err) + assertEqual(t, "en-GB", localeValue) + + groupNames, err := transaction.Groups() + assertNoError(t, err) + assertEqual(t, []string{"default", "preferences"}, groupNames) + + return nil + }) + assertNoError(t, err) + + themeValue, err := storeInstance.Get("tenant-a:default", "theme") + assertNoError(t, err) + assertEqual(t, "dark", themeValue) + + localeValue, err := storeInstance.Get("tenant-a:preferences", "locale") + assertNoError(t, err) + assertEqual(t, "en-GB", localeValue) +} + +func TestTransaction_ScopedStoreTransaction_Good_PurgeExpired(t *testing.T) { + storeInstance, _ := New(":memory:") + defer func() { _ = storeInstance.Close() }() + + scopedStore := NewScoped(storeInstance, "tenant-a") + + assertNoError(t, scopedStore.SetWithTTL("session", "token", "abc123", 1*time.Millisecond)) + time.Sleep(5 * time.Millisecond) + + err := scopedStore.Transaction(func(transaction *ScopedStoreTransaction) error { + removedRows, err := transaction.PurgeExpired() + assertNoError(t, err) + assertEqual(t, int64(1), removedRows) + return nil + }) + assertNoError(t, err) + + _, err = scopedStore.GetFrom("session", "token") + assertErrorIs(t, err, NotFoundError) +} + +func TestTransaction_ScopedStoreTransaction_Good_QuotaUsesPendingWrites(t *testing.T) { + storeInstance, _ := New(":memory:") + defer func() { _ = storeInstance.Close() }() + + scopedStore, err := NewScopedConfigured(storeInstance, ScopedStoreConfig{ + Namespace: "tenant-a", + Quota: QuotaConfig{MaxKeys: 2, MaxGroups: 2}, + }) + assertNoError(t, err) + + err = scopedStore.Transaction(func(transaction *ScopedStoreTransaction) error { + assertNoError(t, transaction.SetIn("group-1", "first", "1")) + assertNoError(t, transaction.SetIn("group-2", "second", "2")) + + err := transaction.SetIn("group-2", "third", "3") + assertError(t, err) + assertTrue(t, core.Is(err, QuotaExceededError)) + return err + }) + assertError(t, err) + assertTrue(t, core.Is(err, QuotaExceededError)) + + _, getErr := storeInstance.Get("tenant-a:group-1", "first") + assertTrue(t, core.Is(getErr, NotFoundError)) +} + +func TestTransaction_ScopedStoreTransaction_Good_DeletePrefix(t *testing.T) { + storeInstance, _ := New(":memory:") + defer func() { _ = storeInstance.Close() }() + + scopedStore := NewScoped(storeInstance, "tenant-a") + otherScopedStore := NewScoped(storeInstance, "tenant-b") + + assertNoError(t, scopedStore.SetIn("cache", "theme", "dark")) + assertNoError(t, scopedStore.SetIn("cache-warm", "status", "ready")) + assertNoError(t, scopedStore.SetIn("config", "colour", "blue")) + assertNoError(t, otherScopedStore.SetIn("cache", "theme", "keep")) + + err := scopedStore.Transaction(func(transaction *ScopedStoreTransaction) error { + return transaction.DeletePrefix("cache") + }) + assertNoError(t, err) + + _, getErr := scopedStore.GetFrom("cache", "theme") + assertTrue(t, core.Is(getErr, NotFoundError)) + _, getErr = scopedStore.GetFrom("cache-warm", "status") + assertTrue(t, core.Is(getErr, NotFoundError)) + + colourValue, getErr := scopedStore.GetFrom("config", "colour") + assertNoError(t, getErr) + assertEqual(t, "blue", colourValue) + + otherValue, getErr := otherScopedStore.GetFrom("cache", "theme") + assertNoError(t, getErr) + assertEqual(t, "keep", otherValue) } func collectSeq[T any](t *testing.T, sequence iter.Seq[T]) []T { diff --git a/workspace.go b/workspace.go index cdb47bb..6c14dd4 100644 --- a/workspace.go +++ b/workspace.go @@ -5,7 +5,7 @@ import ( "io/fs" "maps" "slices" - "sync" + "sync" // Note: AX-6 — internal concurrency primitive; structural for store infrastructure (RFC §4 explicitly mandates). "time" core "dappco.re/go/core" @@ -14,13 +14,14 @@ import ( const ( workspaceEntriesTableName = "workspace_entries" workspaceSummaryGroupPrefix = "workspace" + workspaceQuarantineDirName = "quarantine" ) const createWorkspaceEntriesTableSQL = `CREATE TABLE IF NOT EXISTS workspace_entries ( - entry_id INTEGER PRIMARY KEY AUTOINCREMENT, + entry_id BIGINT PRIMARY KEY DEFAULT nextval('workspace_entries_entry_id_seq'), entry_kind TEXT NOT NULL, entry_data TEXT NOT NULL, - created_at INTEGER NOT NULL + created_at BIGINT NOT NULL )` const createWorkspaceEntriesViewSQL = `CREATE VIEW IF NOT EXISTS entries AS @@ -33,20 +34,21 @@ FROM workspace_entries` var defaultWorkspaceStateDirectory = ".core/state/" -// Workspace keeps mutable work-in-progress in a SQLite file such as -// `.core/state/scroll-session.duckdb` until Commit or Discard removes it. -// +// Usage example: `workspace, err := storeInstance.NewWorkspace("scroll-session"); if err != nil { return }; defer workspace.Discard()` // Usage example: `workspace, err := storeInstance.NewWorkspace("scroll-session-2026-03-30"); if err != nil { return }; defer workspace.Discard(); _ = workspace.Put("like", map[string]any{"user": "@alice"})` +// Each workspace keeps mutable work-in-progress in a DuckDB file such as +// `.core/state/scroll-session.duckdb` until `Commit()` or `Discard()` removes +// it. type Workspace struct { - name string - parentStore *Store - sqliteDatabase *sql.DB - databasePath string - filesystem *core.Fs - orphanAggregate map[string]any - - closeLock sync.Mutex - closed bool + name string + store *Store + db *sql.DB + databasePath string + filesystem *core.Fs + cachedOrphanAggregate map[string]any + + lifecycleLock sync.Mutex + isClosed bool } // Usage example: `workspaceName := workspace.Name(); fmt.Println(workspaceName)` @@ -65,10 +67,10 @@ func (workspace *Workspace) DatabasePath() string { return workspace.databasePath } -// Close keeps the workspace file on disk so `RecoverOrphans(".core/state/")` -// can reopen it later. -// +// Usage example: `if err := workspace.Close(); err != nil { return }` // Usage example: `if err := workspace.Close(); err != nil { return }; orphans := storeInstance.RecoverOrphans(".core/state"); _ = orphans` +// `Close()` keeps the `.duckdb` file on disk so `RecoverOrphans(".core/state")` +// can reopen it after a crash or interrupted agent run. func (workspace *Workspace) Close() error { return workspace.closeWithoutRemovingFiles() } @@ -77,22 +79,22 @@ func (workspace *Workspace) ensureReady(operation string) error { if workspace == nil { return core.E(operation, "workspace is nil", nil) } - if workspace.parentStore == nil { + if workspace.store == nil { return core.E(operation, "workspace store is nil", nil) } - if workspace.sqliteDatabase == nil { + if workspace.db == nil { return core.E(operation, "workspace database is nil", nil) } if workspace.filesystem == nil { return core.E(operation, "workspace filesystem is nil", nil) } - if err := workspace.parentStore.ensureReady(operation); err != nil { + if err := workspace.store.ensureReady(operation); err != nil { return err } - workspace.closeLock.Lock() - closed := workspace.closed - workspace.closeLock.Unlock() + workspace.lifecycleLock.Lock() + closed := workspace.isClosed + workspace.lifecycleLock.Unlock() if closed { return core.E(operation, "workspace is closed", nil) } @@ -100,19 +102,17 @@ func (workspace *Workspace) ensureReady(operation string) error { return nil } -// NewWorkspace opens a SQLite workspace file such as -// `.core/state/scroll-session-2026-03-30.duckdb` and removes it when the -// workspace is committed or discarded. -// // Usage example: `workspace, err := storeInstance.NewWorkspace("scroll-session-2026-03-30"); if err != nil { return }; defer workspace.Discard()` +// This creates `.core/state/scroll-session-2026-03-30.duckdb` by default and +// removes it when the workspace is committed or discarded. func (storeInstance *Store) NewWorkspace(name string) (*Workspace, error) { if err := storeInstance.ensureReady("store.NewWorkspace"); err != nil { return nil, err } - validation := core.ValidateName(name) - if !validation.OK { - return nil, core.E("store.NewWorkspace", "validate workspace name", validation.Value.(error)) + workspaceNameValidation := core.ValidateName(name) + if !workspaceNameValidation.OK { + return nil, core.E("store.NewWorkspace", "validate workspace name", workspaceNameValidation.Value.(error)) } filesystem := (&core.Fs{}).NewUnrestricted() @@ -125,17 +125,17 @@ func (storeInstance *Store) NewWorkspace(name string) (*Workspace, error) { return nil, core.E("store.NewWorkspace", "ensure state directory", result.Value.(error)) } - sqliteDatabase, err := openWorkspaceDatabase(databasePath) + database, err := openWorkspaceDatabase(databasePath) if err != nil { return nil, core.E("store.NewWorkspace", "open workspace database", err) } return &Workspace{ - name: name, - parentStore: storeInstance, - sqliteDatabase: sqliteDatabase, - databasePath: databasePath, - filesystem: filesystem, + name: name, + store: storeInstance, + db: database, + databasePath: databasePath, + filesystem: filesystem, }, nil } @@ -181,26 +181,38 @@ func discoverOrphanWorkspacePaths(stateDirectory string) []string { return orphanPaths } -func discoverOrphanWorkspaces(stateDirectory string, parentStore *Store) []*Workspace { - return loadRecoveredWorkspaces(stateDirectory, parentStore) +func discoverOrphanWorkspaces(stateDirectory string, store *Store) []*Workspace { + return loadRecoveredWorkspaces(stateDirectory, store) } -func loadRecoveredWorkspaces(stateDirectory string, parentStore *Store) []*Workspace { +func loadRecoveredWorkspaces(stateDirectory string, store *Store) []*Workspace { filesystem := (&core.Fs{}).NewUnrestricted() orphanWorkspaces := make([]*Workspace, 0) for _, databasePath := range discoverOrphanWorkspacePaths(stateDirectory) { - sqliteDatabase, err := openWorkspaceDatabase(databasePath) + workspaceName := workspaceNameFromPath(stateDirectory, databasePath) + if workspaceCommitMarkerExists(store, workspaceName) { + removeWorkspaceDatabaseFiles(filesystem, databasePath) + continue + } + database, err := openWorkspaceDatabase(databasePath) if err != nil { + quarantineOrphanWorkspaceFiles(filesystem, stateDirectory, databasePath) continue } orphanWorkspace := &Workspace{ - name: workspaceNameFromPath(stateDirectory, databasePath), - parentStore: parentStore, - sqliteDatabase: sqliteDatabase, - databasePath: databasePath, - filesystem: filesystem, + name: workspaceName, + store: store, + db: database, + databasePath: databasePath, + filesystem: filesystem, + } + aggregate, err := orphanWorkspace.aggregateFieldsWithoutReadiness() + if err != nil { + _ = orphanWorkspace.closeWithoutRemovingFiles() + quarantineOrphanWorkspaceFiles(filesystem, stateDirectory, databasePath) + continue } - orphanWorkspace.orphanAggregate = orphanWorkspace.captureAggregateSnapshot() + orphanWorkspace.cachedOrphanAggregate = aggregate orphanWorkspaces = append(orphanWorkspaces, orphanWorkspace) } return orphanWorkspaces @@ -215,11 +227,10 @@ func workspaceNameFromPath(stateDirectory, databasePath string) string { return core.TrimSuffix(relativePath, ".duckdb") } -// RecoverOrphans(".core/state") returns orphaned workspaces such as -// `scroll-session-2026-03-30.duckdb` so callers can inspect Aggregate() and -// choose Commit() or Discard(). -// // Usage example: `orphans := storeInstance.RecoverOrphans(".core/state"); for _, orphanWorkspace := range orphans { fmt.Println(orphanWorkspace.Name(), orphanWorkspace.Aggregate()) }` +// This reopens leftover `.duckdb` files such as `scroll-session-2026-03-30` +// so callers can inspect `Aggregate()` and choose `Commit()` or `Discard()`. +// Unreadable orphan files are moved under `.core/state/quarantine/`. func (storeInstance *Store) RecoverOrphans(stateDirectory string) []*Workspace { if storeInstance == nil { return nil @@ -231,10 +242,10 @@ func (storeInstance *Store) RecoverOrphans(stateDirectory string) []*Workspace { stateDirectory = normaliseWorkspaceStateDirectory(stateDirectory) if stateDirectory == storeInstance.workspaceStateDirectoryPath() { - storeInstance.orphanWorkspacesLock.Lock() - cachedWorkspaces := slices.Clone(storeInstance.orphanWorkspaces) - storeInstance.orphanWorkspaces = nil - storeInstance.orphanWorkspacesLock.Unlock() + storeInstance.orphanWorkspaceLock.Lock() + cachedWorkspaces := slices.Clone(storeInstance.cachedOrphanWorkspaces) + storeInstance.cachedOrphanWorkspaces = nil + storeInstance.orphanWorkspaceLock.Unlock() if len(cachedWorkspaces) > 0 { return cachedWorkspaces } @@ -260,7 +271,7 @@ func (workspace *Workspace) Put(kind string, data map[string]any) error { return err } - _, err = workspace.sqliteDatabase.Exec( + _, err = workspace.db.Exec( "INSERT INTO "+workspaceEntriesTableName+" (entry_kind, entry_data, created_at) VALUES (?, ?, ?)", kind, dataJSON, @@ -272,6 +283,22 @@ func (workspace *Workspace) Put(kind string, data map[string]any) error { return nil } +// Usage example: `entryCount, err := workspace.Count(); if err != nil { return }; fmt.Println(entryCount)` +func (workspace *Workspace) Count() (int, error) { + if err := workspace.ensureReady("store.Workspace.Count"); err != nil { + return 0, err + } + + var count int + err := workspace.db.QueryRow( + "SELECT COUNT(*) FROM " + workspaceEntriesTableName, + ).Scan(&count) + if err != nil { + return 0, core.E("store.Workspace.Count", "count entries", err) + } + return count, nil +} + // Usage example: `summary := workspace.Aggregate(); fmt.Println(summary["like"])` func (workspace *Workspace) Aggregate() map[string]any { if workspace.shouldUseOrphanAggregate() { @@ -288,10 +315,9 @@ func (workspace *Workspace) Aggregate() map[string]any { return fields } -// Commit writes one completed workspace row to the journal and upserts the -// summary entry in `workspace:NAME`. -// // Usage example: `result := workspace.Commit(); if !result.OK { return }; fmt.Println(result.Value)` +// `Commit()` writes one completed workspace row to the journal, upserts the +// `workspace:NAME/summary` entry, and removes the workspace file. func (workspace *Workspace) Commit() core.Result { if err := workspace.ensureReady("store.Workspace.Commit"); err != nil { return core.Result{Value: err, OK: false} @@ -301,11 +327,11 @@ func (workspace *Workspace) Commit() core.Result { if err != nil { return core.Result{Value: core.E("store.Workspace.Commit", "aggregate workspace", err), OK: false} } - if err := workspace.parentStore.commitWorkspaceAggregate(workspace.name, fields); err != nil { + if err := workspace.store.commitWorkspaceAggregate(workspace.name, fields); err != nil { return core.Result{Value: err, OK: false} } if err := workspace.closeAndRemoveFiles(); err != nil { - return core.Result{Value: err, OK: false} + return core.Result{Value: cloneAnyMap(fields), OK: true} } return core.Result{Value: cloneAnyMap(fields), OK: true} } @@ -318,20 +344,19 @@ func (workspace *Workspace) Discard() { _ = workspace.closeAndRemoveFiles() } -// Query runs SQL against the workspace buffer and returns rows as -// `[]map[string]any` for ad-hoc inspection. -// // Usage example: `result := workspace.Query("SELECT entry_kind, COUNT(*) AS count FROM workspace_entries GROUP BY entry_kind")` +// `result.Value` contains `[]map[string]any`, which lets an agent inspect the +// current buffer state without defining extra result types. func (workspace *Workspace) Query(query string) core.Result { if err := workspace.ensureReady("store.Workspace.Query"); err != nil { return core.Result{Value: err, OK: false} } - rows, err := workspace.sqliteDatabase.Query(query) + rows, err := workspace.db.Query(query) if err != nil { return core.Result{Value: core.E("store.Workspace.Query", "query workspace", err), OK: false} } - defer rows.Close() + defer func() { _ = rows.Close() }() rowMaps, err := queryRowsAsMaps(rows) if err != nil { @@ -347,27 +372,15 @@ func (workspace *Workspace) aggregateFields() (map[string]any, error) { return workspace.aggregateFieldsWithoutReadiness() } -func (workspace *Workspace) captureAggregateSnapshot() map[string]any { - if workspace == nil || workspace.sqliteDatabase == nil { - return nil - } - - fields, err := workspace.aggregateFieldsWithoutReadiness() - if err != nil { - return nil - } - return fields -} - func (workspace *Workspace) aggregateFallback() map[string]any { - if workspace == nil || workspace.orphanAggregate == nil { + if workspace == nil || workspace.cachedOrphanAggregate == nil { return map[string]any{} } - return maps.Clone(workspace.orphanAggregate) + return maps.Clone(workspace.cachedOrphanAggregate) } func (workspace *Workspace) shouldUseOrphanAggregate() bool { - if workspace == nil || workspace.orphanAggregate == nil { + if workspace == nil || workspace.cachedOrphanAggregate == nil { return false } if workspace.filesystem == nil || workspace.databasePath == "" { @@ -377,13 +390,13 @@ func (workspace *Workspace) shouldUseOrphanAggregate() bool { } func (workspace *Workspace) aggregateFieldsWithoutReadiness() (map[string]any, error) { - rows, err := workspace.sqliteDatabase.Query( + rows, err := workspace.db.Query( "SELECT entry_kind, COUNT(*) FROM " + workspaceEntriesTableName + " GROUP BY entry_kind ORDER BY entry_kind", ) if err != nil { return nil, err } - defer rows.Close() + defer func() { _ = rows.Close() }() fields := make(map[string]any) for rows.Next() { @@ -416,26 +429,26 @@ func (workspace *Workspace) closeAndCleanup(removeFiles bool) error { if workspace == nil { return nil } - if workspace.sqliteDatabase == nil || workspace.filesystem == nil { + if workspace.db == nil { return nil } - workspace.closeLock.Lock() - alreadyClosed := workspace.closed + workspace.lifecycleLock.Lock() + alreadyClosed := workspace.isClosed if !alreadyClosed { - workspace.closed = true + workspace.isClosed = true } - workspace.closeLock.Unlock() + workspace.lifecycleLock.Unlock() if !alreadyClosed { - if err := workspace.sqliteDatabase.Close(); err != nil { + if err := workspace.db.Close(); err != nil { return core.E("store.Workspace.closeAndCleanup", "close workspace database", err) } } - if !removeFiles { + if !removeFiles || workspace.filesystem == nil { return nil } - for _, path := range []string{workspace.databasePath, workspace.databasePath + "-wal", workspace.databasePath + "-shm"} { + for _, path := range workspaceDatabaseFilePaths(workspace.databasePath) { if result := workspace.filesystem.Delete(path); !result.OK && workspace.filesystem.Exists(path) { return core.E("store.Workspace.closeAndCleanup", "delete workspace file", result.Value.(error)) } @@ -508,28 +521,28 @@ func (storeInstance *Store) commitWorkspaceAggregate(workspaceName string, field } func openWorkspaceDatabase(databasePath string) (*sql.DB, error) { - sqliteDatabase, err := sql.Open("sqlite", databasePath) + database, err := sql.Open("duckdb", databasePath) if err != nil { return nil, core.E("store.openWorkspaceDatabase", "open workspace database", err) } - sqliteDatabase.SetMaxOpenConns(1) - if _, err := sqliteDatabase.Exec("PRAGMA journal_mode=WAL"); err != nil { - sqliteDatabase.Close() - return nil, core.E("store.openWorkspaceDatabase", "set WAL journal mode", err) + database.SetMaxOpenConns(1) + if err := database.Ping(); err != nil { + _ = database.Close() + return nil, core.E("store.openWorkspaceDatabase", "ping workspace database", err) } - if _, err := sqliteDatabase.Exec("PRAGMA busy_timeout=5000"); err != nil { - sqliteDatabase.Close() - return nil, core.E("store.openWorkspaceDatabase", "set busy timeout", err) + if _, err := database.Exec("CREATE SEQUENCE IF NOT EXISTS workspace_entries_entry_id_seq START 1"); err != nil { + _ = database.Close() + return nil, core.E("store.openWorkspaceDatabase", "create workspace entry sequence", err) } - if _, err := sqliteDatabase.Exec(createWorkspaceEntriesTableSQL); err != nil { - sqliteDatabase.Close() + if _, err := database.Exec(createWorkspaceEntriesTableSQL); err != nil { + _ = database.Close() return nil, core.E("store.openWorkspaceDatabase", "create workspace entries table", err) } - if _, err := sqliteDatabase.Exec(createWorkspaceEntriesViewSQL); err != nil { - sqliteDatabase.Close() + if _, err := database.Exec(createWorkspaceEntriesViewSQL); err != nil { + _ = database.Close() return nil, core.E("store.openWorkspaceDatabase", "create workspace entries view", err) } - return sqliteDatabase, nil + return database, nil } func workspaceSummaryGroup(workspaceName string) string { @@ -540,6 +553,85 @@ func workspaceFilePath(stateDirectory, name string) string { return joinPath(stateDirectory, core.Concat(name, ".duckdb")) } +func workspaceQuarantineFilePath(stateDirectory, databasePath string) string { + return joinPath( + joinPath(stateDirectory, workspaceQuarantineDirName), + core.Concat(workspaceNameFromPath(stateDirectory, databasePath), ".duckdb"), + ) +} + +func quarantineOrphanWorkspaceFiles(filesystem *core.Fs, stateDirectory, databasePath string) { + if filesystem == nil || databasePath == "" { + return + } + quarantineDirectory := joinPath(stateDirectory, workspaceQuarantineDirName) + if result := filesystem.EnsureDir(quarantineDirectory); !result.OK { + return + } + quarantinePath := availableQuarantineWorkspacePath( + filesystem, + workspaceQuarantineFilePath(stateDirectory, databasePath), + ) + sourcePaths := workspaceDatabaseFilePaths(databasePath) + quarantinePaths := workspaceDatabaseFilePaths(quarantinePath) + for index, sourcePath := range sourcePaths { + quarantineWorkspaceFile(filesystem, sourcePath, quarantinePaths[index]) + } +} + +func availableQuarantineWorkspacePath(filesystem *core.Fs, preferredPath string) string { + if !workspaceQuarantinePathExists(filesystem, preferredPath) { + return preferredPath + } + stem := core.TrimSuffix(preferredPath, ".duckdb") + for index := 1; ; index++ { + candidatePath := core.Concat(stem, ".", core.Sprint(index), ".duckdb") + if !workspaceQuarantinePathExists(filesystem, candidatePath) { + return candidatePath + } + } +} + +func workspaceQuarantinePathExists(filesystem *core.Fs, databasePath string) bool { + for _, path := range workspaceDatabaseFilePaths(databasePath) { + if filesystem.Exists(path) { + return true + } + } + return false +} + +func workspaceCommitMarkerExists(storeInstance *Store, workspaceName string) bool { + if storeInstance == nil || workspaceName == "" { + return false + } + exists, err := storeInstance.Exists(workspaceSummaryGroup(workspaceName), "summary") + return err == nil && exists +} + +func removeWorkspaceDatabaseFiles(filesystem *core.Fs, databasePath string) { + if filesystem == nil || databasePath == "" { + return + } + for _, path := range workspaceDatabaseFilePaths(databasePath) { + _ = filesystem.Delete(path) + } +} + +func workspaceDatabaseFilePaths(databasePath string) []string { + if core.HasSuffix(databasePath, ".duckdb") { + return []string{databasePath, databasePath + ".wal"} + } + return []string{databasePath, databasePath + "-wal", databasePath + "-shm"} +} + +func quarantineWorkspaceFile(filesystem *core.Fs, sourcePath, quarantinePath string) { + if filesystem == nil || !filesystem.Exists(sourcePath) { + return + } + _ = filesystem.Rename(sourcePath, quarantinePath) +} + func joinPath(base, name string) string { if base == "" { return name diff --git a/workspace_test.go b/workspace_test.go index 95eff0a..774b838 100644 --- a/workspace_test.go +++ b/workspace_test.go @@ -5,246 +5,341 @@ import ( "time" core "dappco.re/go/core" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func TestWorkspace_NewWorkspace_Good_CreatePutAggregateQuery(t *testing.T) { stateDirectory := useWorkspaceStateDirectory(t) storeInstance, err := New(":memory:", WithJournal("http://127.0.0.1:8086", "core", "events")) - require.NoError(t, err) - defer storeInstance.Close() + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() workspace, err := storeInstance.NewWorkspace("scroll-session") - require.NoError(t, err) + assertNoError(t, err) defer workspace.Discard() - assert.Equal(t, workspaceFilePath(stateDirectory, "scroll-session"), workspace.databasePath) - assert.True(t, testFilesystem().Exists(workspace.databasePath)) + assertEqual(t, workspaceFilePath(stateDirectory, "scroll-session"), workspace.databasePath) + assertTrue(t, testFilesystem().Exists(workspace.databasePath)) - require.NoError(t, workspace.Put("like", map[string]any{"user": "@alice"})) - require.NoError(t, workspace.Put("like", map[string]any{"user": "@bob"})) - require.NoError(t, workspace.Put("profile_match", map[string]any{"user": "@charlie"})) + assertNoError(t, workspace.Put("like", map[string]any{"user": "@alice"})) + assertNoError(t, workspace.Put("like", map[string]any{"user": "@bob"})) + assertNoError(t, workspace.Put("profile_match", map[string]any{"user": "@charlie"})) - assert.Equal(t, map[string]any{"like": 2, "profile_match": 1}, workspace.Aggregate()) + assertEqual(t, map[string]any{"like": 2, "profile_match": 1}, workspace.Aggregate()) rows := requireResultRows( t, workspace.Query("SELECT entry_kind, COUNT(*) AS entry_count FROM workspace_entries GROUP BY entry_kind ORDER BY entry_kind"), ) - require.Len(t, rows, 2) - assert.Equal(t, "like", rows[0]["entry_kind"]) - assert.Equal(t, int64(2), rows[0]["entry_count"]) - assert.Equal(t, "profile_match", rows[1]["entry_kind"]) - assert.Equal(t, int64(1), rows[1]["entry_count"]) + assertLen(t, rows, 2) + assertEqual(t, "like", rows[0]["entry_kind"]) + assertEqual(t, int64(2), rows[0]["entry_count"]) + assertEqual(t, "profile_match", rows[1]["entry_kind"]) + assertEqual(t, int64(1), rows[1]["entry_count"]) } func TestWorkspace_DatabasePath_Good(t *testing.T) { stateDirectory := useWorkspaceStateDirectory(t) storeInstance, err := New(":memory:") - require.NoError(t, err) - defer storeInstance.Close() + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() workspace, err := storeInstance.NewWorkspace("scroll-session") - require.NoError(t, err) + assertNoError(t, err) defer workspace.Discard() - assert.Equal(t, workspaceFilePath(stateDirectory, "scroll-session"), workspace.DatabasePath()) + assertEqual(t, workspaceFilePath(stateDirectory, "scroll-session"), workspace.DatabasePath()) +} + +func TestWorkspace_Count_Good_Empty(t *testing.T) { + useWorkspaceStateDirectory(t) + + storeInstance, err := New(":memory:") + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() + + workspace, err := storeInstance.NewWorkspace("count-empty") + assertNoError(t, err) + defer workspace.Discard() + + count, err := workspace.Count() + assertNoError(t, err) + assertEqual(t, 0, count) +} + +func TestWorkspace_Count_Good_AfterPuts(t *testing.T) { + useWorkspaceStateDirectory(t) + + storeInstance, err := New(":memory:") + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() + + workspace, err := storeInstance.NewWorkspace("count-puts") + assertNoError(t, err) + defer workspace.Discard() + + assertNoError(t, workspace.Put("like", map[string]any{"user": "@alice"})) + assertNoError(t, workspace.Put("like", map[string]any{"user": "@bob"})) + assertNoError(t, workspace.Put("profile_match", map[string]any{"user": "@charlie"})) + + count, err := workspace.Count() + assertNoError(t, err) + assertEqual(t, 3, count) +} + +func TestWorkspace_Count_Bad_ClosedWorkspace(t *testing.T) { + useWorkspaceStateDirectory(t) + + storeInstance, err := New(":memory:") + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() + + workspace, err := storeInstance.NewWorkspace("count-closed") + assertNoError(t, err) + workspace.Discard() + + _, err = workspace.Count() + assertError(t, err) } func TestWorkspace_Query_Good_RFCEntriesView(t *testing.T) { useWorkspaceStateDirectory(t) storeInstance, err := New(":memory:") - require.NoError(t, err) - defer storeInstance.Close() + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() workspace, err := storeInstance.NewWorkspace("scroll-session") - require.NoError(t, err) + assertNoError(t, err) defer workspace.Discard() - require.NoError(t, workspace.Put("like", map[string]any{"user": "@alice"})) - require.NoError(t, workspace.Put("like", map[string]any{"user": "@bob"})) - require.NoError(t, workspace.Put("profile_match", map[string]any{"user": "@charlie"})) + assertNoError(t, workspace.Put("like", map[string]any{"user": "@alice"})) + assertNoError(t, workspace.Put("like", map[string]any{"user": "@bob"})) + assertNoError(t, workspace.Put("profile_match", map[string]any{"user": "@charlie"})) rows := requireResultRows( t, workspace.Query("SELECT kind, COUNT(*) AS entry_count FROM entries GROUP BY kind ORDER BY kind"), ) - require.Len(t, rows, 2) - assert.Equal(t, "like", rows[0]["kind"]) - assert.Equal(t, int64(2), rows[0]["entry_count"]) - assert.Equal(t, "profile_match", rows[1]["kind"]) - assert.Equal(t, int64(1), rows[1]["entry_count"]) + assertLen(t, rows, 2) + assertEqual(t, "like", rows[0]["kind"]) + assertEqual(t, int64(2), rows[0]["entry_count"]) + assertEqual(t, "profile_match", rows[1]["kind"]) + assertEqual(t, int64(1), rows[1]["entry_count"]) } func TestWorkspace_Commit_Good_JournalAndSummary(t *testing.T) { useWorkspaceStateDirectory(t) storeInstance, err := New(":memory:", WithJournal("http://127.0.0.1:8086", "core", "events")) - require.NoError(t, err) - defer storeInstance.Close() + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() workspace, err := storeInstance.NewWorkspace("scroll-session") - require.NoError(t, err) + assertNoError(t, err) - require.NoError(t, workspace.Put("like", map[string]any{"user": "@alice"})) - require.NoError(t, workspace.Put("like", map[string]any{"user": "@bob"})) - require.NoError(t, workspace.Put("profile_match", map[string]any{"user": "@charlie"})) + assertNoError(t, workspace.Put("like", map[string]any{"user": "@alice"})) + assertNoError(t, workspace.Put("like", map[string]any{"user": "@bob"})) + assertNoError(t, workspace.Put("profile_match", map[string]any{"user": "@charlie"})) result := workspace.Commit() - require.True(t, result.OK, "workspace commit failed: %v", result.Value) - assert.Equal(t, map[string]any{"like": 2, "profile_match": 1}, result.Value) - assert.False(t, testFilesystem().Exists(workspace.databasePath)) + assertTruef(t, result.OK, "workspace commit failed: %v", result.Value) + assertEqual(t, map[string]any{"like": 2, "profile_match": 1}, result.Value) + assertFalse(t, testFilesystem().Exists(workspace.databasePath)) summaryJSON, err := storeInstance.Get(workspaceSummaryGroup("scroll-session"), "summary") - require.NoError(t, err) + assertNoError(t, err) summary := make(map[string]any) summaryResult := core.JSONUnmarshalString(summaryJSON, &summary) - require.True(t, summaryResult.OK, "summary unmarshal failed: %v", summaryResult.Value) - assert.Equal(t, float64(2), summary["like"]) - assert.Equal(t, float64(1), summary["profile_match"]) + assertTruef(t, summaryResult.OK, "summary unmarshal failed: %v", summaryResult.Value) + assertEqual(t, float64(2), summary["like"]) + assertEqual(t, float64(1), summary["profile_match"]) rows := requireResultRows( t, storeInstance.QueryJournal(`from(bucket: "events") |> range(start: -24h) |> filter(fn: (r) => r._measurement == "scroll-session")`), ) - require.Len(t, rows, 1) - assert.Equal(t, "scroll-session", rows[0]["measurement"]) + assertLen(t, rows, 1) + assertEqual(t, "scroll-session", rows[0]["measurement"]) fields, ok := rows[0]["fields"].(map[string]any) - require.True(t, ok, "unexpected fields type: %T", rows[0]["fields"]) - assert.Equal(t, float64(2), fields["like"]) - assert.Equal(t, float64(1), fields["profile_match"]) + assertTruef(t, ok, "unexpected fields type: %T", rows[0]["fields"]) + assertEqual(t, float64(2), fields["like"]) + assertEqual(t, float64(1), fields["profile_match"]) tags, ok := rows[0]["tags"].(map[string]string) - require.True(t, ok, "unexpected tags type: %T", rows[0]["tags"]) - assert.Equal(t, "scroll-session", tags["workspace"]) + assertTruef(t, ok, "unexpected tags type: %T", rows[0]["tags"]) + assertEqual(t, "scroll-session", tags["workspace"]) } func TestWorkspace_Commit_Good_ResultCopiesAggregatedMap(t *testing.T) { useWorkspaceStateDirectory(t) storeInstance, err := New(":memory:", WithJournal("http://127.0.0.1:8086", "core", "events")) - require.NoError(t, err) - defer storeInstance.Close() + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() workspace, err := storeInstance.NewWorkspace("scroll-session") - require.NoError(t, err) + assertNoError(t, err) aggregateSource := map[string]any{"like": 1} - require.NoError(t, workspace.Put("like", aggregateSource)) + assertNoError(t, workspace.Put("like", aggregateSource)) result := workspace.Commit() - require.True(t, result.OK, "workspace commit failed: %v", result.Value) + assertTruef(t, result.OK, "workspace commit failed: %v", result.Value) aggregateSource["like"] = 99 value, ok := result.Value.(map[string]any) - require.True(t, ok, "unexpected result type: %T", result.Value) - assert.Equal(t, 1, value["like"]) + assertTruef(t, ok, "unexpected result type: %T", result.Value) + assertEqual(t, 1, value["like"]) } func TestWorkspace_Commit_Good_EmitsSummaryEvent(t *testing.T) { useWorkspaceStateDirectory(t) storeInstance, err := New(":memory:", WithJournal("http://127.0.0.1:8086", "core", "events")) - require.NoError(t, err) - defer storeInstance.Close() + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() events := storeInstance.Watch(workspaceSummaryGroup("scroll-session")) defer storeInstance.Unwatch(workspaceSummaryGroup("scroll-session"), events) workspace, err := storeInstance.NewWorkspace("scroll-session") - require.NoError(t, err) + assertNoError(t, err) - require.NoError(t, workspace.Put("like", map[string]any{"user": "@alice"})) - require.NoError(t, workspace.Put("profile_match", map[string]any{"user": "@charlie"})) + assertNoError(t, workspace.Put("like", map[string]any{"user": "@alice"})) + assertNoError(t, workspace.Put("profile_match", map[string]any{"user": "@charlie"})) result := workspace.Commit() - require.True(t, result.OK, "workspace commit failed: %v", result.Value) + assertTruef(t, result.OK, "workspace commit failed: %v", result.Value) select { case event := <-events: - assert.Equal(t, EventSet, event.Type) - assert.Equal(t, workspaceSummaryGroup("scroll-session"), event.Group) - assert.Equal(t, "summary", event.Key) - assert.False(t, event.Timestamp.IsZero()) + assertEqual(t, EventSet, event.Type) + assertEqual(t, workspaceSummaryGroup("scroll-session"), event.Group) + assertEqual(t, "summary", event.Key) + assertFalse(t, event.Timestamp.IsZero()) summary := make(map[string]any) summaryResult := core.JSONUnmarshalString(event.Value, &summary) - require.True(t, summaryResult.OK, "summary event unmarshal failed: %v", summaryResult.Value) - assert.Equal(t, float64(1), summary["like"]) - assert.Equal(t, float64(1), summary["profile_match"]) + assertTruef(t, summaryResult.OK, "summary event unmarshal failed: %v", summaryResult.Value) + assertEqual(t, float64(1), summary["like"]) + assertEqual(t, float64(1), summary["profile_match"]) case <-time.After(time.Second): t.Fatal("timed out waiting for workspace summary event") } } +func TestWorkspace_RecoverOrphans_Good_SkipsAlreadyCommittedWorkspaceFile(t *testing.T) { + stateDirectory := useWorkspaceStateDirectory(t) + + storeInstance, err := New(":memory:", WithJournal("http://127.0.0.1:8086", "core", "events")) + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() + + workspace, err := storeInstance.NewWorkspace("committed-leftover") + assertNoError(t, err) + + assertNoError(t, workspace.Put("like", map[string]any{"user": "@alice"})) + fields, err := workspace.aggregateFields() + assertNoError(t, err) + assertNoError(t, storeInstance.commitWorkspaceAggregate(workspace.Name(), fields)) + assertNoError(t, workspace.closeWithoutRemovingFiles()) + assertTrue(t, testFilesystem().Exists(workspace.databasePath)) + + orphans := storeInstance.RecoverOrphans(stateDirectory) + assertLen(t, orphans, 0) + assertFalse(t, testFilesystem().Exists(workspace.databasePath)) +} + func TestWorkspace_Discard_Good_Idempotent(t *testing.T) { useWorkspaceStateDirectory(t) storeInstance, err := New(":memory:") - require.NoError(t, err) - defer storeInstance.Close() + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() workspace, err := storeInstance.NewWorkspace("discard-session") - require.NoError(t, err) + assertNoError(t, err) workspace.Discard() workspace.Discard() - assert.False(t, testFilesystem().Exists(workspace.databasePath)) + assertFalse(t, testFilesystem().Exists(workspace.databasePath)) } func TestWorkspace_Close_Good_PreservesFileForRecovery(t *testing.T) { stateDirectory := useWorkspaceStateDirectory(t) storeInstance, err := New(":memory:") - require.NoError(t, err) - defer storeInstance.Close() + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() workspace, err := storeInstance.NewWorkspace("close-session") - require.NoError(t, err) + assertNoError(t, err) - require.NoError(t, workspace.Put("like", map[string]any{"user": "@alice"})) - require.NoError(t, workspace.Close()) + assertNoError(t, workspace.Put("like", map[string]any{"user": "@alice"})) + assertNoError(t, workspace.Close()) - assert.True(t, testFilesystem().Exists(workspace.databasePath)) + assertTrue(t, testFilesystem().Exists(workspace.databasePath)) err = workspace.Put("like", map[string]any{"user": "@bob"}) - require.Error(t, err) + assertError(t, err) orphans := storeInstance.RecoverOrphans(stateDirectory) - require.Len(t, orphans, 1) - assert.Equal(t, "close-session", orphans[0].Name()) - assert.Equal(t, map[string]any{"like": 1}, orphans[0].Aggregate()) + assertLen(t, orphans, 1) + assertEqual(t, "close-session", orphans[0].Name()) + assertEqual(t, map[string]any{"like": 1}, orphans[0].Aggregate()) orphans[0].Discard() - assert.False(t, testFilesystem().Exists(workspace.databasePath)) + assertFalse(t, testFilesystem().Exists(workspace.databasePath)) +} + +func TestWorkspace_Close_Good_ClosesDatabaseWithoutFilesystem(t *testing.T) { + databasePath := testPath(t, "workspace-no-filesystem.duckdb") + + database, err := openWorkspaceDatabase(databasePath) + assertNoError(t, err) + + workspace := &Workspace{ + name: "partial-workspace", + db: database, + databasePath: databasePath, + } + + assertNoError(t, workspace.Close()) + + _, execErr := database.Exec("SELECT 1") + assertError(t, execErr) + assertContainsString(t, execErr.Error(), "closed") + + assertTrue(t, testFilesystem().Exists(databasePath)) + for _, path := range workspaceDatabaseFilePaths(databasePath) { + _ = testFilesystem().Delete(path) + } } func TestWorkspace_RecoverOrphans_Good(t *testing.T) { stateDirectory := useWorkspaceStateDirectory(t) storeInstance, err := New(":memory:", WithJournal("http://127.0.0.1:8086", "core", "events")) - require.NoError(t, err) - defer storeInstance.Close() + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() workspace, err := storeInstance.NewWorkspace("orphan-session") - require.NoError(t, err) - require.NoError(t, workspace.Put("like", map[string]any{"user": "@alice"})) - require.NoError(t, workspace.sqliteDatabase.Close()) + assertNoError(t, err) + assertNoError(t, workspace.Put("like", map[string]any{"user": "@alice"})) + assertNoError(t, workspace.db.Close()) orphans := storeInstance.RecoverOrphans(stateDirectory) - require.Len(t, orphans, 1) - assert.Equal(t, "orphan-session", orphans[0].Name()) - assert.Equal(t, map[string]any{"like": 1}, orphans[0].Aggregate()) + assertLen(t, orphans, 1) + assertEqual(t, "orphan-session", orphans[0].Name()) + assertEqual(t, map[string]any{"like": 1}, orphans[0].Aggregate()) orphans[0].Discard() - assert.False(t, testFilesystem().Exists(workspaceFilePath(stateDirectory, "orphan-session"))) + assertFalse(t, testFilesystem().Exists(workspaceFilePath(stateDirectory, "orphan-session"))) } func TestWorkspace_New_Good_LeavesOrphanedWorkspacesForRecovery(t *testing.T) { @@ -253,30 +348,30 @@ func TestWorkspace_New_Good_LeavesOrphanedWorkspacesForRecovery(t *testing.T) { orphanDatabasePath := workspaceFilePath(stateDirectory, "orphan-session") orphanDatabase, err := openWorkspaceDatabase(orphanDatabasePath) - require.NoError(t, err) + assertNoError(t, err) _, err = orphanDatabase.Exec( "INSERT INTO "+workspaceEntriesTableName+" (entry_kind, entry_data, created_at) VALUES (?, ?, ?)", "like", `{"user":"@alice"}`, time.Now().UnixMilli(), ) - require.NoError(t, err) - require.NoError(t, orphanDatabase.Close()) - assert.True(t, testFilesystem().Exists(orphanDatabasePath)) + assertNoError(t, err) + assertNoError(t, orphanDatabase.Close()) + assertTrue(t, testFilesystem().Exists(orphanDatabasePath)) storeInstance, err := New(":memory:") - require.NoError(t, err) - defer storeInstance.Close() + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() - assert.True(t, testFilesystem().Exists(orphanDatabasePath)) + assertTrue(t, testFilesystem().Exists(orphanDatabasePath)) orphans := storeInstance.RecoverOrphans(stateDirectory) - require.Len(t, orphans, 1) - assert.Equal(t, "orphan-session", orphans[0].Name()) + assertLen(t, orphans, 1) + assertEqual(t, "orphan-session", orphans[0].Name()) orphans[0].Discard() - assert.False(t, testFilesystem().Exists(orphanDatabasePath)) - assert.False(t, testFilesystem().Exists(orphanDatabasePath+"-wal")) - assert.False(t, testFilesystem().Exists(orphanDatabasePath+"-shm")) + assertFalse(t, testFilesystem().Exists(orphanDatabasePath)) + assertFalse(t, testFilesystem().Exists(orphanDatabasePath+"-wal")) + assertFalse(t, testFilesystem().Exists(orphanDatabasePath+"-shm")) } func TestWorkspace_New_Good_CachesOrphansDuringConstruction(t *testing.T) { @@ -285,28 +380,28 @@ func TestWorkspace_New_Good_CachesOrphansDuringConstruction(t *testing.T) { orphanDatabasePath := workspaceFilePath(stateDirectory, "orphan-session") orphanDatabase, err := openWorkspaceDatabase(orphanDatabasePath) - require.NoError(t, err) + assertNoError(t, err) _, err = orphanDatabase.Exec( "INSERT INTO "+workspaceEntriesTableName+" (entry_kind, entry_data, created_at) VALUES (?, ?, ?)", "like", `{"user":"@alice"}`, time.Now().UnixMilli(), ) - require.NoError(t, err) - require.NoError(t, orphanDatabase.Close()) - assert.True(t, testFilesystem().Exists(orphanDatabasePath)) + assertNoError(t, err) + assertNoError(t, orphanDatabase.Close()) + assertTrue(t, testFilesystem().Exists(orphanDatabasePath)) storeInstance, err := New(":memory:") - require.NoError(t, err) - defer storeInstance.Close() + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() requireCoreOK(t, testFilesystem().DeleteAll(stateDirectory)) - assert.False(t, testFilesystem().Exists(orphanDatabasePath)) + assertFalse(t, testFilesystem().Exists(orphanDatabasePath)) orphans := storeInstance.RecoverOrphans(stateDirectory) - require.Len(t, orphans, 1) - assert.Equal(t, "orphan-session", orphans[0].Name()) - assert.Equal(t, map[string]any{"like": 1}, orphans[0].Aggregate()) + assertLen(t, orphans, 1) + assertEqual(t, "orphan-session", orphans[0].Name()) + assertEqual(t, map[string]any{"like": 1}, orphans[0].Aggregate()) orphans[0].Discard() } @@ -316,30 +411,30 @@ func TestWorkspace_NewConfigured_Good_CachesOrphansFromConfiguredStateDirectory( orphanDatabasePath := workspaceFilePath(stateDirectory, "orphan-session") orphanDatabase, err := openWorkspaceDatabase(orphanDatabasePath) - require.NoError(t, err) + assertNoError(t, err) _, err = orphanDatabase.Exec( "INSERT INTO "+workspaceEntriesTableName+" (entry_kind, entry_data, created_at) VALUES (?, ?, ?)", "like", `{"user":"@alice"}`, time.Now().UnixMilli(), ) - require.NoError(t, err) - require.NoError(t, orphanDatabase.Close()) + assertNoError(t, err) + assertNoError(t, orphanDatabase.Close()) storeInstance, err := NewConfigured(StoreConfig{ DatabasePath: ":memory:", WorkspaceStateDirectory: stateDirectory, }) - require.NoError(t, err) - defer storeInstance.Close() + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() requireCoreOK(t, testFilesystem().DeleteAll(stateDirectory)) - assert.False(t, testFilesystem().Exists(orphanDatabasePath)) + assertFalse(t, testFilesystem().Exists(orphanDatabasePath)) orphans := storeInstance.RecoverOrphans("") - require.Len(t, orphans, 1) - assert.Equal(t, "orphan-session", orphans[0].Name()) - assert.Equal(t, map[string]any{"like": 1}, orphans[0].Aggregate()) + assertLen(t, orphans, 1) + assertEqual(t, "orphan-session", orphans[0].Name()) + assertEqual(t, map[string]any{"like": 1}, orphans[0].Aggregate()) orphans[0].Discard() } @@ -349,20 +444,20 @@ func TestWorkspace_RecoverOrphans_Good_TrailingSlashUsesCache(t *testing.T) { orphanDatabasePath := workspaceFilePath(stateDirectory, "orphan-session") orphanDatabase, err := openWorkspaceDatabase(orphanDatabasePath) - require.NoError(t, err) - require.NoError(t, orphanDatabase.Close()) - assert.True(t, testFilesystem().Exists(orphanDatabasePath)) + assertNoError(t, err) + assertNoError(t, orphanDatabase.Close()) + assertTrue(t, testFilesystem().Exists(orphanDatabasePath)) storeInstance, err := New(":memory:") - require.NoError(t, err) - defer storeInstance.Close() + assertNoError(t, err) + defer func() { _ = storeInstance.Close() }() requireCoreOK(t, testFilesystem().DeleteAll(stateDirectory)) - assert.False(t, testFilesystem().Exists(orphanDatabasePath)) + assertFalse(t, testFilesystem().Exists(orphanDatabasePath)) orphans := storeInstance.RecoverOrphans(stateDirectory + "/") - require.Len(t, orphans, 1) - assert.Equal(t, "orphan-session", orphans[0].Name()) + assertLen(t, orphans, 1) + assertEqual(t, "orphan-session", orphans[0].Name()) orphans[0].Discard() } @@ -372,24 +467,24 @@ func TestWorkspace_Close_Good_PreservesOrphansForRecovery(t *testing.T) { orphanDatabasePath := workspaceFilePath(stateDirectory, "orphan-session") orphanDatabase, err := openWorkspaceDatabase(orphanDatabasePath) - require.NoError(t, err) - require.NoError(t, orphanDatabase.Close()) - assert.True(t, testFilesystem().Exists(orphanDatabasePath)) + assertNoError(t, err) + assertNoError(t, orphanDatabase.Close()) + assertTrue(t, testFilesystem().Exists(orphanDatabasePath)) storeInstance, err := New(":memory:") - require.NoError(t, err) + assertNoError(t, err) - require.NoError(t, storeInstance.Close()) + assertNoError(t, storeInstance.Close()) - assert.True(t, testFilesystem().Exists(orphanDatabasePath)) + assertTrue(t, testFilesystem().Exists(orphanDatabasePath)) recoveryStore, err := New(":memory:") - require.NoError(t, err) - defer recoveryStore.Close() + assertNoError(t, err) + defer func() { _ = recoveryStore.Close() }() orphans := recoveryStore.RecoverOrphans(stateDirectory) - require.Len(t, orphans, 1) - assert.Equal(t, "orphan-session", orphans[0].Name()) + assertLen(t, orphans, 1) + assertEqual(t, "orphan-session", orphans[0].Name()) orphans[0].Discard() - assert.False(t, testFilesystem().Exists(orphanDatabasePath)) + assertFalse(t, testFilesystem().Exists(orphanDatabasePath)) }