From 6b74ae2afe3f867cabf3085042ee78c4360edfbc Mon Sep 17 00:00:00 2001 From: Virgil Date: Mon, 23 Mar 2026 07:26:09 +0000 Subject: [PATCH 01/83] fix(io): address audit issue 4 findings Co-Authored-By: Virgil --- datanode/client.go | 144 ++++++++++++++++++--------- datanode/client_test.go | 88 +++++++++++++++++ go.mod | 6 +- go.sum | 12 +-- io.go | 10 +- local/client.go | 203 +++++++++++++++++++++++++++++++------- local/client_test.go | 28 ++++++ s3/s3.go | 33 ++++++- s3/s3_test.go | 53 ++++++++-- workspace/service.go | 69 ++++++++++--- workspace/service_test.go | 90 ++++++++++++----- 11 files changed, 590 insertions(+), 146 deletions(-) diff --git a/datanode/client.go b/datanode/client.go index fcfe524..c4f09ad 100644 --- a/datanode/client.go +++ b/datanode/client.go @@ -17,14 +17,26 @@ import ( "sync" "time" + borgdatanode "forge.lthn.ai/Snider/Borg/pkg/datanode" coreerr "forge.lthn.ai/core/go-log" - "forge.lthn.ai/Snider/Borg/pkg/datanode" +) + +var ( + dataNodeWalkDir = func(fsys fs.FS, root string, fn fs.WalkDirFunc) error { + return fs.WalkDir(fsys, root, fn) + } + dataNodeOpen = func(dn *borgdatanode.DataNode, name string) (fs.File, error) { + return dn.Open(name) + } + dataNodeReadAll = func(r goio.Reader) ([]byte, error) { + return goio.ReadAll(r) + } ) // Medium is an in-memory storage backend backed by a Borg DataNode. // All paths are relative (no leading slash). Thread-safe via RWMutex. type Medium struct { - dn *datanode.DataNode + dn *borgdatanode.DataNode dirs map[string]bool // explicit directory tracking mu sync.RWMutex } @@ -32,14 +44,14 @@ type Medium struct { // New creates a new empty DataNode Medium. func New() *Medium { return &Medium{ - dn: datanode.New(), + dn: borgdatanode.New(), dirs: make(map[string]bool), } } // FromTar creates a Medium from a tarball, restoring all files. func FromTar(data []byte) (*Medium, error) { - dn, err := datanode.FromTar(data) + dn, err := borgdatanode.FromTar(data) if err != nil { return nil, coreerr.E("datanode.FromTar", "failed to restore", err) } @@ -63,7 +75,7 @@ func (m *Medium) Snapshot() ([]byte, error) { // Restore replaces the filesystem contents from a tarball. func (m *Medium) Restore(data []byte) error { - dn, err := datanode.FromTar(data) + dn, err := borgdatanode.FromTar(data) if err != nil { return coreerr.E("datanode.Restore", "tar failed", err) } @@ -76,7 +88,7 @@ func (m *Medium) Restore(data []byte) error { // DataNode returns the underlying Borg DataNode. // Use this to wrap the filesystem in a TIM container. -func (m *Medium) DataNode() *datanode.DataNode { +func (m *Medium) DataNode() *borgdatanode.DataNode { m.mu.RLock() defer m.mu.RUnlock() return m.dn @@ -195,7 +207,11 @@ func (m *Medium) Delete(p string) error { // Check explicit dirs if m.dirs[p] { // Check if dir is empty - if m.hasPrefixLocked(p + "/") { + hasChildren, err := m.hasPrefixLocked(p + "/") + if err != nil { + return coreerr.E("datanode.Delete", "failed to inspect directory: "+p, err) + } + if hasChildren { return coreerr.E("datanode.Delete", "directory not empty: "+p, os.ErrExist) } delete(m.dirs, p) @@ -205,7 +221,11 @@ func (m *Medium) Delete(p string) error { } if info.IsDir() { - if m.hasPrefixLocked(p + "/") { + hasChildren, err := m.hasPrefixLocked(p + "/") + if err != nil { + return coreerr.E("datanode.Delete", "failed to inspect directory: "+p, err) + } + if hasChildren { return coreerr.E("datanode.Delete", "directory not empty: "+p, os.ErrExist) } delete(m.dirs, p) @@ -213,7 +233,9 @@ func (m *Medium) Delete(p string) error { } // Remove the file by creating a new DataNode without it - m.removeFileLocked(p) + if err := m.removeFileLocked(p); err != nil { + return coreerr.E("datanode.Delete", "failed to delete file: "+p, err) + } return nil } @@ -232,15 +254,22 @@ func (m *Medium) DeleteAll(p string) error { // Check if p itself is a file info, err := m.dn.Stat(p) if err == nil && !info.IsDir() { - m.removeFileLocked(p) + if err := m.removeFileLocked(p); err != nil { + return coreerr.E("datanode.DeleteAll", "failed to delete file: "+p, err) + } found = true } // Remove all files under prefix - entries, _ := m.collectAllLocked() + entries, err := m.collectAllLocked() + if err != nil { + return coreerr.E("datanode.DeleteAll", "failed to inspect tree: "+p, err) + } for _, name := range entries { if name == p || strings.HasPrefix(name, prefix) { - m.removeFileLocked(name) + if err := m.removeFileLocked(name); err != nil { + return coreerr.E("datanode.DeleteAll", "failed to delete file: "+name, err) + } found = true } } @@ -274,18 +303,15 @@ func (m *Medium) Rename(oldPath, newPath string) error { if !info.IsDir() { // Read old, write new, delete old - f, err := m.dn.Open(oldPath) - if err != nil { - return coreerr.E("datanode.Rename", "open failed: "+oldPath, err) - } - data, err := goio.ReadAll(f) - f.Close() + data, err := m.readFileLocked(oldPath) if err != nil { - return coreerr.E("datanode.Rename", "read failed: "+oldPath, err) + return coreerr.E("datanode.Rename", "failed to read source file: "+oldPath, err) } m.dn.AddData(newPath, data) m.ensureDirsLocked(path.Dir(newPath)) - m.removeFileLocked(oldPath) + if err := m.removeFileLocked(oldPath); err != nil { + return coreerr.E("datanode.Rename", "failed to remove source file: "+oldPath, err) + } return nil } @@ -293,18 +319,21 @@ func (m *Medium) Rename(oldPath, newPath string) error { oldPrefix := oldPath + "/" newPrefix := newPath + "/" - entries, _ := m.collectAllLocked() + entries, err := m.collectAllLocked() + if err != nil { + return coreerr.E("datanode.Rename", "failed to inspect tree: "+oldPath, err) + } for _, name := range entries { if strings.HasPrefix(name, oldPrefix) { newName := newPrefix + strings.TrimPrefix(name, oldPrefix) - f, err := m.dn.Open(name) + data, err := m.readFileLocked(name) if err != nil { - continue + return coreerr.E("datanode.Rename", "failed to read source file: "+name, err) } - data, _ := goio.ReadAll(f) - f.Close() m.dn.AddData(newName, data) - m.removeFileLocked(name) + if err := m.removeFileLocked(name); err != nil { + return coreerr.E("datanode.Rename", "failed to remove source file: "+name, err) + } } } @@ -416,10 +445,13 @@ func (m *Medium) Append(p string) (goio.WriteCloser, error) { // Read existing content var existing []byte m.mu.RLock() - f, err := m.dn.Open(p) - if err == nil { - existing, _ = goio.ReadAll(f) - f.Close() + if m.IsFile(p) { + data, err := m.readFileLocked(p) + if err != nil { + m.mu.RUnlock() + return nil, coreerr.E("datanode.Append", "failed to read existing content: "+p, err) + } + existing = data } m.mu.RUnlock() @@ -475,27 +507,30 @@ func (m *Medium) IsDir(p string) bool { // --- internal helpers --- // hasPrefixLocked checks if any file path starts with prefix. Caller holds lock. -func (m *Medium) hasPrefixLocked(prefix string) bool { - entries, _ := m.collectAllLocked() +func (m *Medium) hasPrefixLocked(prefix string) (bool, error) { + entries, err := m.collectAllLocked() + if err != nil { + return false, err + } for _, name := range entries { if strings.HasPrefix(name, prefix) { - return true + return true, nil } } for d := range m.dirs { if strings.HasPrefix(d, prefix) { - return true + return true, nil } } - return false + return false, nil } // collectAllLocked returns all file paths in the DataNode. Caller holds lock. func (m *Medium) collectAllLocked() ([]string, error) { var names []string - err := fs.WalkDir(m.dn, ".", func(p string, d fs.DirEntry, err error) error { + err := dataNodeWalkDir(m.dn, ".", func(p string, d fs.DirEntry, err error) error { if err != nil { - return nil + return err } if !d.IsDir() { names = append(names, p) @@ -505,28 +540,43 @@ func (m *Medium) collectAllLocked() ([]string, error) { return names, err } +func (m *Medium) readFileLocked(name string) ([]byte, error) { + f, err := dataNodeOpen(m.dn, name) + if err != nil { + return nil, err + } + data, readErr := dataNodeReadAll(f) + closeErr := f.Close() + if readErr != nil { + return nil, readErr + } + if closeErr != nil { + return nil, closeErr + } + return data, nil +} + // removeFileLocked removes a single file by rebuilding the DataNode. // This is necessary because Borg's DataNode doesn't expose a Remove method. // Caller must hold m.mu write lock. -func (m *Medium) removeFileLocked(target string) { - entries, _ := m.collectAllLocked() - newDN := datanode.New() +func (m *Medium) removeFileLocked(target string) error { + entries, err := m.collectAllLocked() + if err != nil { + return err + } + newDN := borgdatanode.New() for _, name := range entries { if name == target { continue } - f, err := m.dn.Open(name) + data, err := m.readFileLocked(name) if err != nil { - continue - } - data, err := goio.ReadAll(f) - f.Close() - if err != nil { - continue + return err } newDN.AddData(name, data) } m.dn = newDN + return nil } // --- writeCloser buffers writes and flushes to DataNode on Close --- diff --git a/datanode/client_test.go b/datanode/client_test.go index 651d322..8beb6cd 100644 --- a/datanode/client_test.go +++ b/datanode/client_test.go @@ -1,7 +1,9 @@ package datanode import ( + "errors" "io" + "io/fs" "testing" coreio "dappco.re/go/core/io" @@ -102,6 +104,23 @@ func TestDelete_Bad(t *testing.T) { assert.Error(t, m.Delete("dir")) } +func TestDelete_Bad_DirectoryInspectionFailure(t *testing.T) { + m := New() + require.NoError(t, m.Write("dir/file.txt", "content")) + + original := dataNodeWalkDir + dataNodeWalkDir = func(_ fs.FS, _ string, _ fs.WalkDirFunc) error { + return errors.New("walk failed") + } + t.Cleanup(func() { + dataNodeWalkDir = original + }) + + err := m.Delete("dir") + require.Error(t, err) + assert.Contains(t, err.Error(), "failed to inspect directory") +} + func TestDeleteAll_Good(t *testing.T) { m := New() @@ -116,6 +135,41 @@ func TestDeleteAll_Good(t *testing.T) { assert.True(t, m.Exists("keep.txt")) } +func TestDeleteAll_Bad_WalkFailure(t *testing.T) { + m := New() + require.NoError(t, m.Write("tree/a.txt", "a")) + + original := dataNodeWalkDir + dataNodeWalkDir = func(_ fs.FS, _ string, _ fs.WalkDirFunc) error { + return errors.New("walk failed") + } + t.Cleanup(func() { + dataNodeWalkDir = original + }) + + err := m.DeleteAll("tree") + require.Error(t, err) + assert.Contains(t, err.Error(), "failed to inspect tree") +} + +func TestDelete_Bad_RemoveFailure(t *testing.T) { + m := New() + require.NoError(t, m.Write("keep.txt", "keep")) + require.NoError(t, m.Write("bad.txt", "bad")) + + original := dataNodeReadAll + dataNodeReadAll = func(_ io.Reader) ([]byte, error) { + return nil, errors.New("read failed") + } + t.Cleanup(func() { + dataNodeReadAll = original + }) + + err := m.Delete("bad.txt") + require.Error(t, err) + assert.Contains(t, err.Error(), "failed to delete file") +} + func TestRename_Good(t *testing.T) { m := New() @@ -147,6 +201,23 @@ func TestRenameDir_Good(t *testing.T) { assert.Equal(t, "package b", got) } +func TestRenameDir_Bad_ReadFailure(t *testing.T) { + m := New() + require.NoError(t, m.Write("src/a.go", "package a")) + + original := dataNodeReadAll + dataNodeReadAll = func(_ io.Reader) ([]byte, error) { + return nil, errors.New("read failed") + } + t.Cleanup(func() { + dataNodeReadAll = original + }) + + err := m.Rename("src", "dst") + require.Error(t, err) + assert.Contains(t, err.Error(), "failed to read source file") +} + func TestList_Good(t *testing.T) { m := New() @@ -230,6 +301,23 @@ func TestCreateAppend_Good(t *testing.T) { assert.Equal(t, "hello world", got) } +func TestAppend_Bad_ReadFailure(t *testing.T) { + m := New() + require.NoError(t, m.Write("new.txt", "hello")) + + original := dataNodeReadAll + dataNodeReadAll = func(_ io.Reader) ([]byte, error) { + return nil, errors.New("read failed") + } + t.Cleanup(func() { + dataNodeReadAll = original + }) + + _, err := m.Append("new.txt") + require.Error(t, err) + assert.Contains(t, err.Error(), "failed to read existing content") +} + func TestStreams_Good(t *testing.T) { m := New() diff --git a/go.mod b/go.mod index 6c77560..9135ed8 100644 --- a/go.mod +++ b/go.mod @@ -3,9 +3,8 @@ module dappco.re/go/core/io go 1.26.0 require ( - dappco.re/go/core v0.4.7 + dappco.re/go/core v0.6.0 forge.lthn.ai/Snider/Borg v0.3.1 - forge.lthn.ai/core/go-crypt v0.1.6 forge.lthn.ai/core/go-log v0.0.4 github.com/aws/aws-sdk-go-v2 v1.41.4 github.com/aws/aws-sdk-go-v2/service/s3 v1.97.1 @@ -15,8 +14,6 @@ require ( ) require ( - forge.lthn.ai/core/go v0.3.0 // indirect - github.com/ProtonMail/go-crypto v1.4.0 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.7 // indirect github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.20 // indirect github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.20 // indirect @@ -26,7 +23,6 @@ require ( github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.20 // indirect github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.20 // indirect github.com/aws/smithy-go v1.24.2 // indirect - github.com/cloudflare/circl v1.6.3 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/google/uuid v1.6.0 // indirect diff --git a/go.sum b/go.sum index d25c96d..87d11bc 100644 --- a/go.sum +++ b/go.sum @@ -1,15 +1,9 @@ -dappco.re/go/core v0.4.7 h1:KmIA/2lo6rl1NMtLrKqCWfMlUqpDZYH3q0/d10dTtGA= -dappco.re/go/core v0.4.7/go.mod h1:f2/tBZ3+3IqDrg2F5F598llv0nmb/4gJVCFzM5geE4A= +dappco.re/go/core v0.6.0 h1:0wmuO/UmCWXxJkxQ6XvVLnqkAuWitbd49PhxjCsplyk= +dappco.re/go/core v0.6.0/go.mod h1:f2/tBZ3+3IqDrg2F5F598llv0nmb/4gJVCFzM5geE4A= forge.lthn.ai/Snider/Borg v0.3.1 h1:gfC1ZTpLoZai07oOWJiVeQ8+qJYK8A795tgVGJHbVL8= forge.lthn.ai/Snider/Borg v0.3.1/go.mod h1:Z7DJD0yHXsxSyM7Mjl6/g4gH1NBsIz44Bf5AFlV76Wg= -forge.lthn.ai/core/go v0.3.0 h1:mOG97ApMprwx9Ked62FdWVwXTGSF6JO6m0DrVpoH2Q4= -forge.lthn.ai/core/go v0.3.0/go.mod h1:gE6c8h+PJ2287qNhVUJ5SOe1kopEwHEquvinstpuyJc= -forge.lthn.ai/core/go-crypt v0.1.6 h1:jB7L/28S1NR+91u3GcOYuKfBLzPhhBUY1fRe6WkGVns= -forge.lthn.ai/core/go-crypt v0.1.6/go.mod h1:4VZAGqxlbadhSB66sJkdj54/HSJ+bSxVgwWK5kMMYDo= forge.lthn.ai/core/go-log v0.0.4 h1:KTuCEPgFmuM8KJfnyQ8vPOU1Jg654W74h8IJvfQMfv0= forge.lthn.ai/core/go-log v0.0.4/go.mod h1:r14MXKOD3LF/sI8XUJQhRk/SZHBE7jAFVuCfgkXoZPw= -github.com/ProtonMail/go-crypto v1.4.0 h1:Zq/pbM3F5DFgJiMouxEdSVY44MVoQNEKp5d5QxIQceQ= -github.com/ProtonMail/go-crypto v1.4.0/go.mod h1:e1OaTyu5SYVrO9gKOEhTc+5UcXtTUa+P3uLudwcgPqo= github.com/aws/aws-sdk-go-v2 v1.41.4 h1:10f50G7WyU02T56ox1wWXq+zTX9I1zxG46HYuG1hH/k= github.com/aws/aws-sdk-go-v2 v1.41.4/go.mod h1:mwsPRE8ceUUpiTgF7QmQIJ7lgsKUPQOUl3o72QBrE1o= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.7 h1:3kGOqnh1pPeddVa/E37XNTaWJ8W6vrbYV9lJEkCnhuY= @@ -32,8 +26,6 @@ github.com/aws/aws-sdk-go-v2/service/s3 v1.97.1 h1:csi9NLpFZXb9fxY7rS1xVzgPRGMt7 github.com/aws/aws-sdk-go-v2/service/s3 v1.97.1/go.mod h1:qXVal5H0ChqXP63t6jze5LmFalc7+ZE7wOdLtZ0LCP0= github.com/aws/smithy-go v1.24.2 h1:FzA3bu/nt/vDvmnkg+R8Xl46gmzEDam6mZ1hzmwXFng= github.com/aws/smithy-go v1.24.2/go.mod h1:YE2RhdIuDbA5E5bTdciG9KrW3+TiEONeUWCqxX9i1Fc= -github.com/cloudflare/circl v1.6.3 h1:9GPOhQGF9MCYUeXyMYlqTR6a5gTrgR/fBLXvUgtVcg8= -github.com/cloudflare/circl v1.6.3/go.mod h1:2eXP6Qfat4O/Yhh8BznvKnJ+uzEoTQ6jVKJRn81BiS4= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= diff --git a/io.go b/io.go index c31592f..25fe801 100644 --- a/io.go +++ b/io.go @@ -4,12 +4,12 @@ import ( goio "io" "io/fs" "os" - "path/filepath" "strings" "time" - coreerr "forge.lthn.ai/core/go-log" + core "dappco.re/go/core" "dappco.re/go/core/io/local" + coreerr "forge.lthn.ai/core/go-log" ) // Medium defines the standard interface for a storage backend. @@ -361,7 +361,7 @@ func (m *MockMedium) Open(path string) (fs.File, error) { return nil, coreerr.E("io.MockMedium.Open", "file not found: "+path, os.ErrNotExist) } return &MockFile{ - name: filepath.Base(path), + name: core.PathBase(path), content: []byte(content), }, nil } @@ -556,7 +556,7 @@ func (m *MockMedium) Stat(path string) (fs.FileInfo, error) { modTime = time.Now() } return FileInfo{ - name: filepath.Base(path), + name: core.PathBase(path), size: int64(len(content)), mode: 0644, modTime: modTime, @@ -564,7 +564,7 @@ func (m *MockMedium) Stat(path string) (fs.FileInfo, error) { } if _, ok := m.Dirs[path]; ok { return FileInfo{ - name: filepath.Base(path), + name: core.PathBase(path), isDir: true, mode: fs.ModeDir | 0755, }, nil diff --git a/local/client.go b/local/client.go index 22fd769..d4aaafc 100644 --- a/local/client.go +++ b/local/client.go @@ -6,11 +6,10 @@ import ( goio "io" "io/fs" "os" - "os/user" - "path/filepath" "strings" "time" + core "dappco.re/go/core" coreerr "forge.lthn.ai/core/go-log" ) @@ -22,20 +21,163 @@ type Medium struct { // New creates a new local Medium rooted at the given directory. // Pass "/" for full filesystem access, or a specific path to sandbox. func New(root string) (*Medium, error) { - abs, err := filepath.Abs(root) - if err != nil { - return nil, err - } + abs := absolutePath(root) // Resolve symlinks so sandbox checks compare like-for-like. // On macOS, /var is a symlink to /private/var — without this, - // EvalSymlinks on child paths resolves to /private/var/... while + // resolving child paths resolves to /private/var/... while // root stays /var/..., causing false sandbox escape detections. - if resolved, err := filepath.EvalSymlinks(abs); err == nil { + if resolved, err := resolveSymlinksPath(abs); err == nil { abs = resolved } return &Medium{root: abs}, nil } +func dirSeparator() string { + if sep := core.Env("DS"); sep != "" { + return sep + } + return string(os.PathSeparator) +} + +func normalisePath(p string) string { + sep := dirSeparator() + if sep == "/" { + return strings.ReplaceAll(p, "\\", sep) + } + return strings.ReplaceAll(p, "/", sep) +} + +func currentWorkingDir() string { + if cwd, err := os.Getwd(); err == nil && cwd != "" { + return cwd + } + if cwd := core.Env("DIR_CWD"); cwd != "" { + return cwd + } + return "." +} + +func absolutePath(p string) string { + p = normalisePath(p) + if core.PathIsAbs(p) { + return core.Path(p) + } + return core.Path(currentWorkingDir(), p) +} + +func cleanSandboxPath(p string) string { + return core.Path(dirSeparator() + normalisePath(p)) +} + +func splitPathParts(p string) []string { + trimmed := strings.TrimPrefix(p, dirSeparator()) + if trimmed == "" { + return nil + } + var parts []string + for _, part := range strings.Split(trimmed, dirSeparator()) { + if part == "" { + continue + } + parts = append(parts, part) + } + return parts +} + +func resolveSymlinksPath(p string) (string, error) { + return resolveSymlinksRecursive(absolutePath(p), map[string]struct{}{}) +} + +func resolveSymlinksRecursive(p string, seen map[string]struct{}) (string, error) { + p = core.Path(p) + if p == dirSeparator() { + return p, nil + } + + current := dirSeparator() + for _, part := range splitPathParts(p) { + next := core.Path(current, part) + info, err := os.Lstat(next) + if err != nil { + if os.IsNotExist(err) { + current = next + continue + } + return "", err + } + if info.Mode()&os.ModeSymlink == 0 { + current = next + continue + } + + target, err := os.Readlink(next) + if err != nil { + return "", err + } + target = normalisePath(target) + if !core.PathIsAbs(target) { + target = core.Path(current, target) + } else { + target = core.Path(target) + } + if _, ok := seen[target]; ok { + return "", coreerr.E("local.resolveSymlinksPath", "symlink cycle: "+target, os.ErrInvalid) + } + seen[target] = struct{}{} + resolved, err := resolveSymlinksRecursive(target, seen) + delete(seen, target) + if err != nil { + return "", err + } + current = resolved + } + + return current, nil +} + +func isWithinRoot(root, target string) bool { + root = core.Path(root) + target = core.Path(target) + if root == dirSeparator() { + return true + } + return target == root || strings.HasPrefix(target, root+dirSeparator()) +} + +func canonicalPath(p string) string { + if p == "" { + return "" + } + if resolved, err := resolveSymlinksPath(p); err == nil { + return resolved + } + return absolutePath(p) +} + +func isProtectedPath(full string) bool { + full = canonicalPath(full) + protected := map[string]struct{}{ + canonicalPath(dirSeparator()): {}, + } + for _, home := range []string{core.Env("HOME"), core.Env("DIR_HOME")} { + if home == "" { + continue + } + protected[canonicalPath(home)] = struct{}{} + } + _, ok := protected[full] + return ok +} + +func logSandboxEscape(root, path, attempted string) { + username := core.Env("USER") + if username == "" { + username = "unknown" + } + fmt.Fprintf(os.Stderr, "[%s] SECURITY sandbox escape detected root=%s path=%s attempted=%s user=%s\n", + time.Now().Format(time.RFC3339), root, path, attempted, username) +} + // path sanitises and returns the full path. // Absolute paths are sandboxed under root (unless root is "/"). func (m *Medium) path(p string) string { @@ -46,41 +188,36 @@ func (m *Medium) path(p string) string { // If the path is relative and the medium is rooted at "/", // treat it as relative to the current working directory. // This makes io.Local behave more like the standard 'os' package. - if m.root == "/" && !filepath.IsAbs(p) { - cwd, _ := os.Getwd() - return filepath.Join(cwd, p) + if m.root == dirSeparator() && !core.PathIsAbs(normalisePath(p)) { + return core.Path(currentWorkingDir(), normalisePath(p)) } - // Use filepath.Clean with a leading slash to resolve all .. and . internally + // Use a cleaned absolute path to resolve all .. and . internally // before joining with the root. This is a standard way to sandbox paths. - clean := filepath.Clean("/" + p) + clean := cleanSandboxPath(p) // If root is "/", allow absolute paths through - if m.root == "/" { + if m.root == dirSeparator() { return clean } // Join cleaned relative path with root - return filepath.Join(m.root, clean) + return core.Path(m.root, strings.TrimPrefix(clean, dirSeparator())) } // validatePath ensures the path is within the sandbox, following symlinks if they exist. func (m *Medium) validatePath(p string) (string, error) { - if m.root == "/" { + if m.root == dirSeparator() { return m.path(p), nil } // Split the cleaned path into components - parts := strings.Split(filepath.Clean("/"+p), string(os.PathSeparator)) + parts := splitPathParts(cleanSandboxPath(p)) current := m.root for _, part := range parts { - if part == "" { - continue - } - - next := filepath.Join(current, part) - realNext, err := filepath.EvalSymlinks(next) + next := core.Path(current, part) + realNext, err := resolveSymlinksPath(next) if err != nil { if os.IsNotExist(err) { // Part doesn't exist, we can't follow symlinks anymore. @@ -93,15 +230,9 @@ func (m *Medium) validatePath(p string) (string, error) { } // Verify the resolved part is still within the root - rel, err := filepath.Rel(m.root, realNext) - if err != nil || strings.HasPrefix(rel, "..") { + if !isWithinRoot(m.root, realNext) { // Security event: sandbox escape attempt - username := "unknown" - if u, err := user.Current(); err == nil { - username = u.Username - } - fmt.Fprintf(os.Stderr, "[%s] SECURITY sandbox escape detected root=%s path=%s attempted=%s user=%s\n", - time.Now().Format(time.RFC3339), m.root, p, realNext, username) + logSandboxEscape(m.root, p, realNext) return "", os.ErrPermission // Path escapes sandbox } current = realNext @@ -137,7 +268,7 @@ func (m *Medium) WriteMode(p, content string, mode os.FileMode) error { if err != nil { return err } - if err := os.MkdirAll(filepath.Dir(full), 0755); err != nil { + if err := os.MkdirAll(core.PathDir(full), 0755); err != nil { return err } return os.WriteFile(full, []byte(content), mode) @@ -221,7 +352,7 @@ func (m *Medium) Create(p string) (goio.WriteCloser, error) { if err != nil { return nil, err } - if err := os.MkdirAll(filepath.Dir(full), 0755); err != nil { + if err := os.MkdirAll(core.PathDir(full), 0755); err != nil { return nil, err } return os.Create(full) @@ -233,7 +364,7 @@ func (m *Medium) Append(p string) (goio.WriteCloser, error) { if err != nil { return nil, err } - if err := os.MkdirAll(filepath.Dir(full), 0755); err != nil { + if err := os.MkdirAll(core.PathDir(full), 0755); err != nil { return nil, err } return os.OpenFile(full, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) @@ -265,7 +396,7 @@ func (m *Medium) Delete(p string) error { if err != nil { return err } - if full == "/" || full == os.Getenv("HOME") { + if isProtectedPath(full) { return coreerr.E("local.Delete", "refusing to delete protected path: "+full, nil) } return os.Remove(full) @@ -277,7 +408,7 @@ func (m *Medium) DeleteAll(p string) error { if err != nil { return err } - if full == "/" || full == os.Getenv("HOME") { + if isProtectedPath(full) { return coreerr.E("local.DeleteAll", "refusing to delete protected path: "+full, nil) } return os.RemoveAll(full) diff --git a/local/client_test.go b/local/client_test.go index f3deb15..120ee0e 100644 --- a/local/client_test.go +++ b/local/client_test.go @@ -8,6 +8,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestNew(t *testing.T) { @@ -170,6 +171,33 @@ func TestDeleteAll(t *testing.T) { assert.False(t, m.Exists("dir")) } +func TestDelete_ProtectedHomeViaSymlinkEnv(t *testing.T) { + realHome := t.TempDir() + linkParent := t.TempDir() + homeLink := filepath.Join(linkParent, "home-link") + require.NoError(t, os.Symlink(realHome, homeLink)) + t.Setenv("HOME", homeLink) + + m, err := New("/") + require.NoError(t, err) + + err = m.Delete(realHome) + require.Error(t, err) + assert.DirExists(t, realHome) +} + +func TestDeleteAll_ProtectedHomeViaEnv(t *testing.T) { + tempHome := t.TempDir() + t.Setenv("HOME", tempHome) + + m, err := New("/") + require.NoError(t, err) + + err = m.DeleteAll(tempHome) + require.Error(t, err) + assert.DirExists(t, tempHome) +} + func TestRename(t *testing.T) { root := t.TempDir() m, _ := New(root) diff --git a/s3/s3.go b/s3/s3.go index 86443fe..3ca4ab9 100644 --- a/s3/s3.go +++ b/s3/s3.go @@ -37,6 +37,29 @@ type Medium struct { prefix string } +func deleteObjectsError(prefix string, errs []types.Error) error { + if len(errs) == 0 { + return nil + } + details := make([]string, 0, len(errs)) + for _, item := range errs { + key := aws.ToString(item.Key) + code := aws.ToString(item.Code) + msg := aws.ToString(item.Message) + switch { + case code != "" && msg != "": + details = append(details, key+": "+code+" "+msg) + case code != "": + details = append(details, key+": "+code) + case msg != "": + details = append(details, key+": "+msg) + default: + details = append(details, key) + } + } + return coreerr.E("s3.DeleteAll", "partial delete failed under "+prefix+": "+strings.Join(details, "; "), nil) +} + // Option configures a Medium. type Option func(*Medium) @@ -197,10 +220,13 @@ func (m *Medium) DeleteAll(p string) error { } // First, try deleting the exact key - _, _ = m.client.DeleteObject(context.Background(), &s3.DeleteObjectInput{ + _, err := m.client.DeleteObject(context.Background(), &s3.DeleteObjectInput{ Bucket: aws.String(m.bucket), Key: aws.String(key), }) + if err != nil { + return coreerr.E("s3.DeleteAll", "failed to delete object: "+key, err) + } // Then delete all objects under the prefix prefix := key @@ -230,13 +256,16 @@ func (m *Medium) DeleteAll(p string) error { objects[i] = types.ObjectIdentifier{Key: obj.Key} } - _, err = m.client.DeleteObjects(context.Background(), &s3.DeleteObjectsInput{ + deleteOut, err := m.client.DeleteObjects(context.Background(), &s3.DeleteObjectsInput{ Bucket: aws.String(m.bucket), Delete: &types.Delete{Objects: objects, Quiet: aws.Bool(true)}, }) if err != nil { return coreerr.E("s3.DeleteAll", "failed to delete objects", err) } + if err := deleteObjectsError(prefix, deleteOut.Errors); err != nil { + return err + } if listOut.IsTruncated != nil && *listOut.IsTruncated { continuationToken = listOut.NextContinuationToken diff --git a/s3/s3_test.go b/s3/s3_test.go index 1f226e7..a81efff 100644 --- a/s3/s3_test.go +++ b/s3/s3_test.go @@ -3,6 +3,7 @@ package s3 import ( "bytes" "context" + "errors" "fmt" goio "io" "io/fs" @@ -21,15 +22,19 @@ import ( // mockS3 is an in-memory mock implementing the s3API interface. type mockS3 struct { - mu sync.RWMutex - objects map[string][]byte - mtimes map[string]time.Time + mu sync.RWMutex + objects map[string][]byte + mtimes map[string]time.Time + deleteObjectErrors map[string]error + deleteObjectsErrs map[string]types.Error } func newMockS3() *mockS3 { return &mockS3{ - objects: make(map[string][]byte), - mtimes: make(map[string]time.Time), + objects: make(map[string][]byte), + mtimes: make(map[string]time.Time), + deleteObjectErrors: make(map[string]error), + deleteObjectsErrs: make(map[string]types.Error), } } @@ -69,6 +74,9 @@ func (m *mockS3) DeleteObject(_ context.Context, params *s3.DeleteObjectInput, _ defer m.mu.Unlock() key := aws.ToString(params.Key) + if err, ok := m.deleteObjectErrors[key]; ok { + return nil, err + } delete(m.objects, key) delete(m.mtimes, key) return &s3.DeleteObjectOutput{}, nil @@ -78,12 +86,17 @@ func (m *mockS3) DeleteObjects(_ context.Context, params *s3.DeleteObjectsInput, m.mu.Lock() defer m.mu.Unlock() + var outErrs []types.Error for _, obj := range params.Delete.Objects { key := aws.ToString(obj.Key) + if errInfo, ok := m.deleteObjectsErrs[key]; ok { + outErrs = append(outErrs, errInfo) + continue + } delete(m.objects, key) delete(m.mtimes, key) } - return &s3.DeleteObjectsOutput{}, nil + return &s3.DeleteObjectsOutput{Errors: outErrs}, nil } func (m *mockS3) HeadObject(_ context.Context, params *s3.HeadObjectInput, _ ...func(*s3.Options)) (*s3.HeadObjectOutput, error) { @@ -350,6 +363,34 @@ func TestDeleteAll_Bad_EmptyPath(t *testing.T) { assert.Error(t, err) } +func TestDeleteAll_Bad_DeleteObjectError(t *testing.T) { + m, mock := newTestMedium(t) + mock.deleteObjectErrors["dir"] = errors.New("boom") + + err := m.DeleteAll("dir") + require.Error(t, err) + assert.Contains(t, err.Error(), "failed to delete object: dir") +} + +func TestDeleteAll_Bad_PartialDelete(t *testing.T) { + m, mock := newTestMedium(t) + + require.NoError(t, m.Write("dir/file1.txt", "a")) + require.NoError(t, m.Write("dir/file2.txt", "b")) + mock.deleteObjectsErrs["dir/file2.txt"] = types.Error{ + Key: aws.String("dir/file2.txt"), + Code: aws.String("AccessDenied"), + Message: aws.String("blocked"), + } + + err := m.DeleteAll("dir") + require.Error(t, err) + assert.Contains(t, err.Error(), "partial delete failed") + assert.Contains(t, err.Error(), "dir/file2.txt") + assert.True(t, m.IsFile("dir/file2.txt")) + assert.False(t, m.IsFile("dir/file1.txt")) +} + func TestRename_Good(t *testing.T) { m, _ := newTestMedium(t) diff --git a/workspace/service.go b/workspace/service.go index c1978a1..9e81764 100644 --- a/workspace/service.go +++ b/workspace/service.go @@ -4,7 +4,7 @@ import ( "crypto/sha256" "encoding/hex" "os" - "path/filepath" + "strings" "sync" core "dappco.re/go/core" @@ -39,11 +39,11 @@ type Service struct { // New creates a new Workspace service instance. // An optional cryptProvider can be passed to supply PGP key generation. func New(c *core.Core, crypt ...cryptProvider) (any, error) { - home, err := os.UserHomeDir() - if err != nil { - return nil, coreerr.E("workspace.New", "failed to determine home directory", err) + home := workspaceHome() + if home == "" { + return nil, coreerr.E("workspace.New", "failed to determine home directory", os.ErrNotExist) } - rootPath := filepath.Join(home, ".core", "workspaces") + rootPath := core.Path(home, ".core", "workspaces") s := &Service{ core: c, @@ -75,14 +75,17 @@ func (s *Service) CreateWorkspace(identifier, password string) (string, error) { hash := sha256.Sum256([]byte(identifier)) wsID := hex.EncodeToString(hash[:]) - wsPath := filepath.Join(s.rootPath, wsID) + wsPath, err := s.workspacePath("workspace.CreateWorkspace", wsID) + if err != nil { + return "", err + } if s.medium.Exists(wsPath) { return "", coreerr.E("workspace.CreateWorkspace", "workspace already exists", nil) } for _, d := range []string{"config", "log", "data", "files", "keys"} { - if err := s.medium.EnsureDir(filepath.Join(wsPath, d)); err != nil { + if err := s.medium.EnsureDir(core.Path(wsPath, d)); err != nil { return "", coreerr.E("workspace.CreateWorkspace", "failed to create directory: "+d, err) } } @@ -92,7 +95,7 @@ func (s *Service) CreateWorkspace(identifier, password string) (string, error) { return "", coreerr.E("workspace.CreateWorkspace", "failed to generate keys", err) } - if err := s.medium.WriteMode(filepath.Join(wsPath, "keys", "private.key"), privKey, 0600); err != nil { + if err := s.medium.WriteMode(core.Path(wsPath, "keys", "private.key"), privKey, 0600); err != nil { return "", coreerr.E("workspace.CreateWorkspace", "failed to save private key", err) } @@ -104,12 +107,15 @@ func (s *Service) SwitchWorkspace(name string) error { s.mu.Lock() defer s.mu.Unlock() - wsPath := filepath.Join(s.rootPath, name) + wsPath, err := s.workspacePath("workspace.SwitchWorkspace", name) + if err != nil { + return err + } if !s.medium.IsDir(wsPath) { return coreerr.E("workspace.SwitchWorkspace", "workspace not found: "+name, nil) } - s.activeWorkspace = name + s.activeWorkspace = core.PathBase(wsPath) return nil } @@ -119,7 +125,15 @@ func (s *Service) activeFilePath(op, filename string) (string, error) { if s.activeWorkspace == "" { return "", coreerr.E(op, "no active workspace", nil) } - return filepath.Join(s.rootPath, s.activeWorkspace, "files", filename), nil + filesRoot := core.Path(s.rootPath, s.activeWorkspace, "files") + path, err := joinWithinRoot(filesRoot, filename) + if err != nil { + return "", coreerr.E(op, "file path escapes workspace files", os.ErrPermission) + } + if path == filesRoot { + return "", coreerr.E(op, "filename is required", os.ErrInvalid) + } + return path, nil } // WorkspaceFileGet retrieves the content of a file from the active workspace. @@ -171,5 +185,38 @@ func (s *Service) HandleIPCEvents(c *core.Core, msg core.Message) core.Result { return core.Result{OK: true} } +func workspaceHome() string { + if home := core.Env("CORE_HOME"); home != "" { + return home + } + if home := core.Env("HOME"); home != "" { + return home + } + return core.Env("DIR_HOME") +} + +func joinWithinRoot(root string, parts ...string) (string, error) { + candidate := core.Path(append([]string{root}, parts...)...) + sep := core.Env("DS") + if candidate == root || strings.HasPrefix(candidate, root+sep) { + return candidate, nil + } + return "", os.ErrPermission +} + +func (s *Service) workspacePath(op, name string) (string, error) { + if name == "" { + return "", coreerr.E(op, "workspace name is required", os.ErrInvalid) + } + path, err := joinWithinRoot(s.rootPath, name) + if err != nil { + return "", coreerr.E(op, "workspace path escapes root", err) + } + if core.PathDir(path) != s.rootPath { + return "", coreerr.E(op, "invalid workspace name: "+name, os.ErrPermission) + } + return path, nil +} + // Ensure Service implements Workspace. var _ Workspace = (*Service)(nil) diff --git a/workspace/service_test.go b/workspace/service_test.go index 1cab667..1fc7abe 100644 --- a/workspace/service_test.go +++ b/workspace/service_test.go @@ -1,48 +1,90 @@ package workspace import ( - "path/filepath" + "os" "testing" core "dappco.re/go/core" - "forge.lthn.ai/core/go-crypt/crypt/openpgp" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) -func TestWorkspace(t *testing.T) { - c := core.New() - pgpSvc, err := openpgp.New(nil) - assert.NoError(t, err) +type stubCrypt struct { + key string + err error +} + +func (s stubCrypt) CreateKeyPair(_, _ string) (string, error) { + if s.err != nil { + return "", s.err + } + return s.key, nil +} + +func newTestService(t *testing.T) (*Service, string) { + t.Helper() tempHome := t.TempDir() t.Setenv("HOME", tempHome) - svc, err := New(c, pgpSvc.(cryptProvider)) - assert.NoError(t, err) - s := svc.(*Service) + svc, err := New(core.New(), stubCrypt{key: "private-key"}) + require.NoError(t, err) + return svc.(*Service), tempHome +} + +func TestWorkspace(t *testing.T) { + s, tempHome := newTestService(t) - // Test CreateWorkspace id, err := s.CreateWorkspace("test-user", "pass123") - assert.NoError(t, err) + require.NoError(t, err) assert.NotEmpty(t, id) - wsPath := filepath.Join(tempHome, ".core", "workspaces", id) + wsPath := core.Path(tempHome, ".core", "workspaces", id) assert.DirExists(t, wsPath) - assert.DirExists(t, filepath.Join(wsPath, "keys")) - assert.FileExists(t, filepath.Join(wsPath, "keys", "private.key")) + assert.DirExists(t, core.Path(wsPath, "keys")) + assert.FileExists(t, core.Path(wsPath, "keys", "private.key")) - // Test SwitchWorkspace err = s.SwitchWorkspace(id) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, id, s.activeWorkspace) - // Test File operations - filename := "secret.txt" - content := "top secret info" - err = s.WorkspaceFileSet(filename, content) - assert.NoError(t, err) + err = s.WorkspaceFileSet("secret.txt", "top secret info") + require.NoError(t, err) + + got, err := s.WorkspaceFileGet("secret.txt") + require.NoError(t, err) + assert.Equal(t, "top secret info", got) +} + +func TestSwitchWorkspace_TraversalBlocked(t *testing.T) { + s, tempHome := newTestService(t) + + outside := core.Path(tempHome, ".core", "escaped") + require.NoError(t, os.MkdirAll(outside, 0755)) + + err := s.SwitchWorkspace("../escaped") + require.Error(t, err) + assert.Empty(t, s.activeWorkspace) +} + +func TestWorkspaceFileSet_TraversalBlocked(t *testing.T) { + s, tempHome := newTestService(t) + + id, err := s.CreateWorkspace("test-user", "pass123") + require.NoError(t, err) + require.NoError(t, s.SwitchWorkspace(id)) + + keyPath := core.Path(tempHome, ".core", "workspaces", id, "keys", "private.key") + before, err := os.ReadFile(keyPath) + require.NoError(t, err) + + err = s.WorkspaceFileSet("../keys/private.key", "hijack") + require.Error(t, err) + + after, err := os.ReadFile(keyPath) + require.NoError(t, err) + assert.Equal(t, string(before), string(after)) - got, err := s.WorkspaceFileGet(filename) - assert.NoError(t, err) - assert.Equal(t, content, got) + _, err = s.WorkspaceFileGet("../keys/private.key") + require.Error(t, err) } From 238d6c6b9163a3dd0f4ad0bf549219d90125ac45 Mon Sep 17 00:00:00 2001 From: Virgil Date: Thu, 26 Mar 2026 10:54:10 +0000 Subject: [PATCH 02/83] chore(ax): align imports, tests, and usage comments Co-Authored-By: Virgil --- datanode/client.go | 47 +++++++------ io.go | 28 ++++---- local/client.go | 7 +- local/client_test.go | 137 +++++++++++++++++--------------------- node/node.go | 20 +++++- node/node_test.go | 16 ++--- s3/s3.go | 26 +++++--- sigil/crypto_sigil.go | 10 +++ sqlite/sqlite.go | 50 +++++++------- store/medium.go | 52 +++++++++------ store/store.go | 10 +++ workspace/service.go | 19 ++++-- workspace/service_test.go | 15 ++--- 13 files changed, 250 insertions(+), 187 deletions(-) diff --git a/datanode/client.go b/datanode/client.go index c4f09ad..943b7b8 100644 --- a/datanode/client.go +++ b/datanode/client.go @@ -1,6 +1,6 @@ // Package datanode provides an in-memory io.Medium backed by Borg's DataNode. // -// DataNode is an in-memory fs.FS that serializes to tar. Wrapping it as a +// DataNode is an in-memory fs.FS that serialises to tar. Wrapping it as a // Medium lets any code that works with io.Medium transparently operate on // an in-memory filesystem that can be snapshotted, shipped as a crash report, // or wrapped in a TIM container for runc execution. @@ -10,7 +10,6 @@ import ( "cmp" goio "io" "io/fs" - "os" "path" "slices" "strings" @@ -42,6 +41,11 @@ type Medium struct { } // New creates a new empty DataNode Medium. +// +// Example usage: +// +// m := datanode.New() +// _ = m.Write("jobs/run.log", "started") func New() *Medium { return &Medium{ dn: borgdatanode.New(), @@ -50,6 +54,11 @@ func New() *Medium { } // FromTar creates a Medium from a tarball, restoring all files. +// +// Example usage: +// +// snapshot, _ := m.Snapshot() +// restored, _ := datanode.FromTar(snapshot) func FromTar(data []byte) (*Medium, error) { dn, err := borgdatanode.FromTar(data) if err != nil { @@ -61,7 +70,7 @@ func FromTar(data []byte) (*Medium, error) { }, nil } -// Snapshot serializes the entire filesystem to a tarball. +// Snapshot serialises the entire filesystem to a tarball. // Use this for crash reports, workspace packaging, or TIM creation. func (m *Medium) Snapshot() ([]byte, error) { m.mu.RLock() @@ -113,7 +122,7 @@ func (m *Medium) Read(p string) (string, error) { p = clean(p) f, err := m.dn.Open(p) if err != nil { - return "", coreerr.E("datanode.Read", "not found: "+p, os.ErrNotExist) + return "", coreerr.E("datanode.Read", "not found: "+p, fs.ErrNotExist) } defer f.Close() @@ -122,7 +131,7 @@ func (m *Medium) Read(p string) (string, error) { return "", coreerr.E("datanode.Read", "stat failed: "+p, err) } if info.IsDir() { - return "", coreerr.E("datanode.Read", "is a directory: "+p, os.ErrInvalid) + return "", coreerr.E("datanode.Read", "is a directory: "+p, fs.ErrInvalid) } data, err := goio.ReadAll(f) @@ -138,7 +147,7 @@ func (m *Medium) Write(p, content string) error { p = clean(p) if p == "" { - return coreerr.E("datanode.Write", "empty path", os.ErrInvalid) + return coreerr.E("datanode.Write", "empty path", fs.ErrInvalid) } m.dn.AddData(p, []byte(content)) @@ -147,7 +156,7 @@ func (m *Medium) Write(p, content string) error { return nil } -func (m *Medium) WriteMode(p, content string, mode os.FileMode) error { +func (m *Medium) WriteMode(p, content string, mode fs.FileMode) error { return m.Write(p, content) } @@ -198,7 +207,7 @@ func (m *Medium) Delete(p string) error { p = clean(p) if p == "" { - return coreerr.E("datanode.Delete", "cannot delete root", os.ErrPermission) + return coreerr.E("datanode.Delete", "cannot delete root", fs.ErrPermission) } // Check if it's a file in the DataNode @@ -212,12 +221,12 @@ func (m *Medium) Delete(p string) error { return coreerr.E("datanode.Delete", "failed to inspect directory: "+p, err) } if hasChildren { - return coreerr.E("datanode.Delete", "directory not empty: "+p, os.ErrExist) + return coreerr.E("datanode.Delete", "directory not empty: "+p, fs.ErrExist) } delete(m.dirs, p) return nil } - return coreerr.E("datanode.Delete", "not found: "+p, os.ErrNotExist) + return coreerr.E("datanode.Delete", "not found: "+p, fs.ErrNotExist) } if info.IsDir() { @@ -226,7 +235,7 @@ func (m *Medium) Delete(p string) error { return coreerr.E("datanode.Delete", "failed to inspect directory: "+p, err) } if hasChildren { - return coreerr.E("datanode.Delete", "directory not empty: "+p, os.ErrExist) + return coreerr.E("datanode.Delete", "directory not empty: "+p, fs.ErrExist) } delete(m.dirs, p) return nil @@ -245,7 +254,7 @@ func (m *Medium) DeleteAll(p string) error { p = clean(p) if p == "" { - return coreerr.E("datanode.DeleteAll", "cannot delete root", os.ErrPermission) + return coreerr.E("datanode.DeleteAll", "cannot delete root", fs.ErrPermission) } prefix := p + "/" @@ -283,7 +292,7 @@ func (m *Medium) DeleteAll(p string) error { } if !found { - return coreerr.E("datanode.DeleteAll", "not found: "+p, os.ErrNotExist) + return coreerr.E("datanode.DeleteAll", "not found: "+p, fs.ErrNotExist) } return nil } @@ -298,7 +307,7 @@ func (m *Medium) Rename(oldPath, newPath string) error { // Check if source is a file info, err := m.dn.Stat(oldPath) if err != nil { - return coreerr.E("datanode.Rename", "not found: "+oldPath, os.ErrNotExist) + return coreerr.E("datanode.Rename", "not found: "+oldPath, fs.ErrNotExist) } if !info.IsDir() { @@ -365,7 +374,7 @@ func (m *Medium) List(p string) ([]fs.DirEntry, error) { if p == "" || m.dirs[p] { return []fs.DirEntry{}, nil } - return nil, coreerr.E("datanode.List", "not found: "+p, os.ErrNotExist) + return nil, coreerr.E("datanode.List", "not found: "+p, fs.ErrNotExist) } // Also include explicit subdirectories not discovered via files @@ -417,7 +426,7 @@ func (m *Medium) Stat(p string) (fs.FileInfo, error) { if m.dirs[p] { return &fileInfo{name: path.Base(p), isDir: true, mode: fs.ModeDir | 0755}, nil } - return nil, coreerr.E("datanode.Stat", "not found: "+p, os.ErrNotExist) + return nil, coreerr.E("datanode.Stat", "not found: "+p, fs.ErrNotExist) } func (m *Medium) Open(p string) (fs.File, error) { @@ -431,7 +440,7 @@ func (m *Medium) Open(p string) (fs.File, error) { func (m *Medium) Create(p string) (goio.WriteCloser, error) { p = clean(p) if p == "" { - return nil, coreerr.E("datanode.Create", "empty path", os.ErrInvalid) + return nil, coreerr.E("datanode.Create", "empty path", fs.ErrInvalid) } return &writeCloser{m: m, path: p}, nil } @@ -439,7 +448,7 @@ func (m *Medium) Create(p string) (goio.WriteCloser, error) { func (m *Medium) Append(p string) (goio.WriteCloser, error) { p = clean(p) if p == "" { - return nil, coreerr.E("datanode.Append", "empty path", os.ErrInvalid) + return nil, coreerr.E("datanode.Append", "empty path", fs.ErrInvalid) } // Read existing content @@ -465,7 +474,7 @@ func (m *Medium) ReadStream(p string) (goio.ReadCloser, error) { p = clean(p) f, err := m.dn.Open(p) if err != nil { - return nil, coreerr.E("datanode.ReadStream", "not found: "+p, os.ErrNotExist) + return nil, coreerr.E("datanode.ReadStream", "not found: "+p, fs.ErrNotExist) } return f.(goio.ReadCloser), nil } diff --git a/io.go b/io.go index 25fe801..8aef06e 100644 --- a/io.go +++ b/io.go @@ -3,7 +3,6 @@ package io import ( goio "io" "io/fs" - "os" "strings" "time" @@ -25,7 +24,7 @@ type Medium interface { // WriteMode saves content with explicit file permissions. // Use 0600 for sensitive files (keys, secrets, encrypted output). - WriteMode(path, content string, mode os.FileMode) error + WriteMode(path, content string, mode fs.FileMode) error // EnsureDir makes sure a directory exists, creating it if necessary. EnsureDir(path string) error @@ -123,6 +122,11 @@ func init() { // NewSandboxed creates a new Medium sandboxed to the given root directory. // All file operations are restricted to paths within the root. // The root directory will be created if it doesn't exist. +// +// Example usage: +// +// m, _ := io.NewSandboxed("/srv/app") +// _ = m.Write("config/app.yaml", "port: 8080") func NewSandboxed(root string) (Medium, error) { return local.New(root) } @@ -193,7 +197,7 @@ func NewMockMedium() *MockMedium { func (m *MockMedium) Read(path string) (string, error) { content, ok := m.Files[path] if !ok { - return "", coreerr.E("io.MockMedium.Read", "file not found: "+path, os.ErrNotExist) + return "", coreerr.E("io.MockMedium.Read", "file not found: "+path, fs.ErrNotExist) } return content, nil } @@ -205,7 +209,7 @@ func (m *MockMedium) Write(path, content string) error { return nil } -func (m *MockMedium) WriteMode(path, content string, mode os.FileMode) error { +func (m *MockMedium) WriteMode(path, content string, mode fs.FileMode) error { return m.Write(path, content) } @@ -245,18 +249,18 @@ func (m *MockMedium) Delete(path string) error { } for f := range m.Files { if strings.HasPrefix(f, prefix) { - return coreerr.E("io.MockMedium.Delete", "directory not empty: "+path, os.ErrExist) + return coreerr.E("io.MockMedium.Delete", "directory not empty: "+path, fs.ErrExist) } } for d := range m.Dirs { if d != path && strings.HasPrefix(d, prefix) { - return coreerr.E("io.MockMedium.Delete", "directory not empty: "+path, os.ErrExist) + return coreerr.E("io.MockMedium.Delete", "directory not empty: "+path, fs.ErrExist) } } delete(m.Dirs, path) return nil } - return coreerr.E("io.MockMedium.Delete", "path not found: "+path, os.ErrNotExist) + return coreerr.E("io.MockMedium.Delete", "path not found: "+path, fs.ErrNotExist) } // DeleteAll removes a file or directory and all contents from the mock filesystem. @@ -290,7 +294,7 @@ func (m *MockMedium) DeleteAll(path string) error { } if !found { - return coreerr.E("io.MockMedium.DeleteAll", "path not found: "+path, os.ErrNotExist) + return coreerr.E("io.MockMedium.DeleteAll", "path not found: "+path, fs.ErrNotExist) } return nil } @@ -351,14 +355,14 @@ func (m *MockMedium) Rename(oldPath, newPath string) error { } return nil } - return coreerr.E("io.MockMedium.Rename", "path not found: "+oldPath, os.ErrNotExist) + return coreerr.E("io.MockMedium.Rename", "path not found: "+oldPath, fs.ErrNotExist) } // Open opens a file from the mock filesystem. func (m *MockMedium) Open(path string) (fs.File, error) { content, ok := m.Files[path] if !ok { - return nil, coreerr.E("io.MockMedium.Open", "file not found: "+path, os.ErrNotExist) + return nil, coreerr.E("io.MockMedium.Open", "file not found: "+path, fs.ErrNotExist) } return &MockFile{ name: core.PathBase(path), @@ -463,7 +467,7 @@ func (m *MockMedium) List(path string) ([]fs.DirEntry, error) { } } if !hasChildren && path != "" { - return nil, coreerr.E("io.MockMedium.List", "directory not found: "+path, os.ErrNotExist) + return nil, coreerr.E("io.MockMedium.List", "directory not found: "+path, fs.ErrNotExist) } } @@ -569,7 +573,7 @@ func (m *MockMedium) Stat(path string) (fs.FileInfo, error) { mode: fs.ModeDir | 0755, }, nil } - return nil, coreerr.E("io.MockMedium.Stat", "path not found: "+path, os.ErrNotExist) + return nil, coreerr.E("io.MockMedium.Stat", "path not found: "+path, fs.ErrNotExist) } // Exists checks if a path exists in the mock filesystem. diff --git a/local/client.go b/local/client.go index d4aaafc..da17039 100644 --- a/local/client.go +++ b/local/client.go @@ -20,6 +20,11 @@ type Medium struct { // New creates a new local Medium rooted at the given directory. // Pass "/" for full filesystem access, or a specific path to sandbox. +// +// Example usage: +// +// m, _ := local.New("/srv/app") +// _ = m.Write("config/app.yaml", "port: 8080") func New(root string) (*Medium, error) { abs := absolutePath(root) // Resolve symlinks so sandbox checks compare like-for-like. @@ -263,7 +268,7 @@ func (m *Medium) Write(p, content string) error { // WriteMode saves content to file with explicit permissions. // Use 0600 for sensitive files (encryption output, private keys, auth hashes). -func (m *Medium) WriteMode(p, content string, mode os.FileMode) error { +func (m *Medium) WriteMode(p, content string, mode fs.FileMode) error { full, err := m.validatePath(p) if err != nil { return err diff --git a/local/client_test.go b/local/client_test.go index 120ee0e..dfd8044 100644 --- a/local/client_test.go +++ b/local/client_test.go @@ -2,25 +2,27 @@ package local import ( "io" + "io/fs" "os" - "path/filepath" "strings" "testing" + core "dappco.re/go/core" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -func TestNew(t *testing.T) { +func TestNew_Good_ResolvesRoot(t *testing.T) { root := t.TempDir() m, err := New(root) assert.NoError(t, err) // New() resolves symlinks (macOS /var → /private/var), so compare resolved paths. - resolved, _ := filepath.EvalSymlinks(root) + resolved, err := resolveSymlinksPath(root) + require.NoError(t, err) assert.Equal(t, resolved, m.root) } -func TestPath(t *testing.T) { +func TestPath_Good_Sandboxed(t *testing.T) { m := &Medium{root: "/home/user"} // Normal paths @@ -38,7 +40,7 @@ func TestPath(t *testing.T) { assert.Equal(t, "/home/user/etc/passwd", m.path("/etc/passwd")) } -func TestPath_RootFilesystem(t *testing.T) { +func TestPath_Good_RootFilesystem(t *testing.T) { m := &Medium{root: "/"} // When root is "/", absolute paths pass through @@ -46,11 +48,11 @@ func TestPath_RootFilesystem(t *testing.T) { assert.Equal(t, "/home/user/file.txt", m.path("/home/user/file.txt")) // Relative paths are relative to CWD when root is "/" - cwd, _ := os.Getwd() - assert.Equal(t, filepath.Join(cwd, "file.txt"), m.path("file.txt")) + cwd := currentWorkingDir() + assert.Equal(t, core.Path(cwd, "file.txt"), m.path("file.txt")) } -func TestReadWrite(t *testing.T) { +func TestReadWrite_Good_Basic(t *testing.T) { root := t.TempDir() m, _ := New(root) @@ -75,24 +77,24 @@ func TestReadWrite(t *testing.T) { assert.Error(t, err) } -func TestEnsureDir(t *testing.T) { +func TestEnsureDir_Good_Basic(t *testing.T) { root := t.TempDir() m, _ := New(root) err := m.EnsureDir("one/two/three") assert.NoError(t, err) - info, err := os.Stat(filepath.Join(root, "one/two/three")) + info, err := m.Stat("one/two/three") assert.NoError(t, err) assert.True(t, info.IsDir()) } -func TestIsDir(t *testing.T) { +func TestIsDir_Good_Basic(t *testing.T) { root := t.TempDir() m, _ := New(root) - _ = os.Mkdir(filepath.Join(root, "mydir"), 0755) - _ = os.WriteFile(filepath.Join(root, "myfile"), []byte("x"), 0644) + _ = m.EnsureDir("mydir") + _ = m.Write("myfile", "x") assert.True(t, m.IsDir("mydir")) assert.False(t, m.IsDir("myfile")) @@ -100,12 +102,12 @@ func TestIsDir(t *testing.T) { assert.False(t, m.IsDir("")) } -func TestIsFile(t *testing.T) { +func TestIsFile_Good_Basic(t *testing.T) { root := t.TempDir() m, _ := New(root) - _ = os.Mkdir(filepath.Join(root, "mydir"), 0755) - _ = os.WriteFile(filepath.Join(root, "myfile"), []byte("x"), 0644) + _ = m.EnsureDir("mydir") + _ = m.Write("myfile", "x") assert.True(t, m.IsFile("myfile")) assert.False(t, m.IsFile("mydir")) @@ -113,45 +115,45 @@ func TestIsFile(t *testing.T) { assert.False(t, m.IsFile("")) } -func TestExists(t *testing.T) { +func TestExists_Good_Basic(t *testing.T) { root := t.TempDir() m, _ := New(root) - _ = os.WriteFile(filepath.Join(root, "exists"), []byte("x"), 0644) + _ = m.Write("exists", "x") assert.True(t, m.Exists("exists")) assert.False(t, m.Exists("nope")) } -func TestList(t *testing.T) { +func TestList_Good_Basic(t *testing.T) { root := t.TempDir() m, _ := New(root) - _ = os.WriteFile(filepath.Join(root, "a.txt"), []byte("a"), 0644) - _ = os.WriteFile(filepath.Join(root, "b.txt"), []byte("b"), 0644) - _ = os.Mkdir(filepath.Join(root, "subdir"), 0755) + _ = m.Write("a.txt", "a") + _ = m.Write("b.txt", "b") + _ = m.EnsureDir("subdir") entries, err := m.List("") assert.NoError(t, err) assert.Len(t, entries, 3) } -func TestStat(t *testing.T) { +func TestStat_Good_Basic(t *testing.T) { root := t.TempDir() m, _ := New(root) - _ = os.WriteFile(filepath.Join(root, "file"), []byte("content"), 0644) + _ = m.Write("file", "content") info, err := m.Stat("file") assert.NoError(t, err) assert.Equal(t, int64(7), info.Size()) } -func TestDelete(t *testing.T) { +func TestDelete_Good_Basic(t *testing.T) { root := t.TempDir() m, _ := New(root) - _ = os.WriteFile(filepath.Join(root, "todelete"), []byte("x"), 0644) + _ = m.Write("todelete", "x") assert.True(t, m.Exists("todelete")) err := m.Delete("todelete") @@ -159,22 +161,21 @@ func TestDelete(t *testing.T) { assert.False(t, m.Exists("todelete")) } -func TestDeleteAll(t *testing.T) { +func TestDeleteAll_Good_Basic(t *testing.T) { root := t.TempDir() m, _ := New(root) - _ = os.MkdirAll(filepath.Join(root, "dir/sub"), 0755) - _ = os.WriteFile(filepath.Join(root, "dir/sub/file"), []byte("x"), 0644) + _ = m.Write("dir/sub/file", "x") err := m.DeleteAll("dir") assert.NoError(t, err) assert.False(t, m.Exists("dir")) } -func TestDelete_ProtectedHomeViaSymlinkEnv(t *testing.T) { +func TestDelete_Bad_ProtectedHomeViaSymlinkEnv(t *testing.T) { realHome := t.TempDir() linkParent := t.TempDir() - homeLink := filepath.Join(linkParent, "home-link") + homeLink := core.Path(linkParent, "home-link") require.NoError(t, os.Symlink(realHome, homeLink)) t.Setenv("HOME", homeLink) @@ -186,7 +187,7 @@ func TestDelete_ProtectedHomeViaSymlinkEnv(t *testing.T) { assert.DirExists(t, realHome) } -func TestDeleteAll_ProtectedHomeViaEnv(t *testing.T) { +func TestDeleteAll_Bad_ProtectedHomeViaEnv(t *testing.T) { tempHome := t.TempDir() t.Setenv("HOME", tempHome) @@ -198,11 +199,11 @@ func TestDeleteAll_ProtectedHomeViaEnv(t *testing.T) { assert.DirExists(t, tempHome) } -func TestRename(t *testing.T) { +func TestRename_Good_Basic(t *testing.T) { root := t.TempDir() m, _ := New(root) - _ = os.WriteFile(filepath.Join(root, "old"), []byte("x"), 0644) + _ = m.Write("old", "x") err := m.Rename("old", "new") assert.NoError(t, err) @@ -210,7 +211,7 @@ func TestRename(t *testing.T) { assert.True(t, m.Exists("new")) } -func TestFileGetFileSet(t *testing.T) { +func TestFileGetFileSet_Good_Basic(t *testing.T) { root := t.TempDir() m, _ := New(root) @@ -223,9 +224,7 @@ func TestFileGetFileSet(t *testing.T) { } func TestDelete_Good(t *testing.T) { - testRoot, err := os.MkdirTemp("", "local_delete_test") - assert.NoError(t, err) - defer func() { _ = os.RemoveAll(testRoot) }() + testRoot := t.TempDir() medium, err := New(testRoot) assert.NoError(t, err) @@ -248,9 +247,7 @@ func TestDelete_Good(t *testing.T) { } func TestDelete_Bad_NotEmpty(t *testing.T) { - testRoot, err := os.MkdirTemp("", "local_delete_notempty_test") - assert.NoError(t, err) - defer func() { _ = os.RemoveAll(testRoot) }() + testRoot := t.TempDir() medium, err := New(testRoot) assert.NoError(t, err) @@ -265,9 +262,7 @@ func TestDelete_Bad_NotEmpty(t *testing.T) { } func TestDeleteAll_Good(t *testing.T) { - testRoot, err := os.MkdirTemp("", "local_deleteall_test") - assert.NoError(t, err) - defer func() { _ = os.RemoveAll(testRoot) }() + testRoot := t.TempDir() medium, err := New(testRoot) assert.NoError(t, err) @@ -287,9 +282,7 @@ func TestDeleteAll_Good(t *testing.T) { } func TestRename_Good(t *testing.T) { - testRoot, err := os.MkdirTemp("", "local_rename_test") - assert.NoError(t, err) - defer func() { _ = os.RemoveAll(testRoot) }() + testRoot := t.TempDir() medium, err := New(testRoot) assert.NoError(t, err) @@ -307,10 +300,8 @@ func TestRename_Good(t *testing.T) { assert.Equal(t, "content", content) } -func TestRename_Traversal_Sanitised(t *testing.T) { - testRoot, err := os.MkdirTemp("", "local_rename_traversal_test") - assert.NoError(t, err) - defer func() { _ = os.RemoveAll(testRoot) }() +func TestRename_Good_TraversalSanitised(t *testing.T) { + testRoot := t.TempDir() medium, err := New(testRoot) assert.NoError(t, err) @@ -327,9 +318,7 @@ func TestRename_Traversal_Sanitised(t *testing.T) { } func TestList_Good(t *testing.T) { - testRoot, err := os.MkdirTemp("", "local_list_test") - assert.NoError(t, err) - defer func() { _ = os.RemoveAll(testRoot) }() + testRoot := t.TempDir() medium, err := New(testRoot) assert.NoError(t, err) @@ -357,9 +346,7 @@ func TestList_Good(t *testing.T) { } func TestStat_Good(t *testing.T) { - testRoot, err := os.MkdirTemp("", "local_stat_test") - assert.NoError(t, err) - defer func() { _ = os.RemoveAll(testRoot) }() + testRoot := t.TempDir() medium, err := New(testRoot) assert.NoError(t, err) @@ -383,9 +370,7 @@ func TestStat_Good(t *testing.T) { } func TestExists_Good(t *testing.T) { - testRoot, err := os.MkdirTemp("", "local_exists_test") - assert.NoError(t, err) - defer func() { _ = os.RemoveAll(testRoot) }() + testRoot := t.TempDir() medium, err := New(testRoot) assert.NoError(t, err) @@ -402,9 +387,7 @@ func TestExists_Good(t *testing.T) { } func TestIsDir_Good(t *testing.T) { - testRoot, err := os.MkdirTemp("", "local_isdir_test") - assert.NoError(t, err) - defer func() { _ = os.RemoveAll(testRoot) }() + testRoot := t.TempDir() medium, err := New(testRoot) assert.NoError(t, err) @@ -420,7 +403,7 @@ func TestIsDir_Good(t *testing.T) { assert.False(t, medium.IsDir("nonexistent")) } -func TestReadStream(t *testing.T) { +func TestReadStream_Good_Basic(t *testing.T) { root := t.TempDir() m, _ := New(root) @@ -439,7 +422,7 @@ func TestReadStream(t *testing.T) { assert.Equal(t, "streaming", string(data)) } -func TestWriteStream(t *testing.T) { +func TestWriteStream_Good_Basic(t *testing.T) { root := t.TempDir() m, _ := New(root) @@ -456,7 +439,7 @@ func TestWriteStream(t *testing.T) { assert.Equal(t, "piped data", content) } -func TestPath_Traversal_Advanced(t *testing.T) { +func TestPath_Ugly_TraversalAdvanced(t *testing.T) { m := &Medium{root: "/sandbox"} // Multiple levels of traversal @@ -471,15 +454,17 @@ func TestPath_Traversal_Advanced(t *testing.T) { assert.Equal(t, "/sandbox/file\x00.txt", m.path("file\x00.txt")) } -func TestValidatePath_Security(t *testing.T) { +func TestValidatePath_Bad_SymlinkEscape(t *testing.T) { root := t.TempDir() m, err := New(root) assert.NoError(t, err) // Create a directory outside the sandbox outside := t.TempDir() - outsideFile := filepath.Join(outside, "secret.txt") - err = os.WriteFile(outsideFile, []byte("secret"), 0644) + outsideFile := core.Path(outside, "secret.txt") + outsideMedium, err := New("/") + require.NoError(t, err) + err = outsideMedium.Write(outsideFile, "secret") assert.NoError(t, err) // Test 1: Simple traversal @@ -488,29 +473,29 @@ func TestValidatePath_Security(t *testing.T) { // Test 2: Symlink escape // Create a symlink inside the sandbox pointing outside - linkPath := filepath.Join(root, "evil_link") + linkPath := core.Path(root, "evil_link") err = os.Symlink(outside, linkPath) assert.NoError(t, err) // Try to access a file through the symlink _, err = m.validatePath("evil_link/secret.txt") assert.Error(t, err) - assert.ErrorIs(t, err, os.ErrPermission) + assert.ErrorIs(t, err, fs.ErrPermission) // Test 3: Nested symlink escape - innerDir := filepath.Join(root, "inner") - err = os.Mkdir(innerDir, 0755) + err = m.EnsureDir("inner") assert.NoError(t, err) - nestedLink := filepath.Join(innerDir, "nested_evil") + innerDir := core.Path(root, "inner") + nestedLink := core.Path(innerDir, "nested_evil") err = os.Symlink(outside, nestedLink) assert.NoError(t, err) _, err = m.validatePath("inner/nested_evil/secret.txt") assert.Error(t, err) - assert.ErrorIs(t, err, os.ErrPermission) + assert.ErrorIs(t, err, fs.ErrPermission) } -func TestEmptyPaths(t *testing.T) { +func TestEmptyPaths_Ugly(t *testing.T) { root := t.TempDir() m, err := New(root) assert.NoError(t, err) diff --git a/node/node.go b/node/node.go index 418d590..2255f9c 100644 --- a/node/node.go +++ b/node/node.go @@ -9,12 +9,12 @@ import ( "cmp" goio "io" "io/fs" - "os" "path" "slices" "strings" "time" + core "dappco.re/go/core" coreio "dappco.re/go/core/io" ) @@ -30,6 +30,11 @@ var _ coreio.Medium = (*Node)(nil) var _ fs.ReadFileFS = (*Node)(nil) // New creates a new, empty Node. +// +// Example usage: +// +// n := node.New() +// n.AddData("config/app.yaml", []byte("port: 8080")) func New() *Node { return &Node{files: make(map[string]*dataFile)} } @@ -211,10 +216,19 @@ func (n *Node) CopyFile(src, dst string, perm fs.FileMode) error { } return &fs.PathError{Op: "copyfile", Path: src, Err: fs.ErrNotExist} } - return os.WriteFile(dst, f.content, perm) + parent := core.PathDir(dst) + if parent != "." && parent != "" && parent != dst && !coreio.Local.IsDir(parent) { + return &fs.PathError{Op: "copyfile", Path: dst, Err: fs.ErrNotExist} + } + return coreio.Local.WriteMode(dst, string(f.content), perm) } // CopyTo copies a file (or directory tree) from the node to any Medium. +// +// Example usage: +// +// dst := io.NewMockMedium() +// _ = n.CopyTo(dst, "config", "backup/config") func (n *Node) CopyTo(target coreio.Medium, sourcePath, destPath string) error { sourcePath = strings.TrimPrefix(sourcePath, "/") info, err := n.Stat(sourcePath) @@ -362,7 +376,7 @@ func (n *Node) Write(p, content string) error { } // WriteMode saves content with explicit permissions (no-op for in-memory node). -func (n *Node) WriteMode(p, content string, mode os.FileMode) error { +func (n *Node) WriteMode(p, content string, mode fs.FileMode) error { return n.Write(p, content) } diff --git a/node/node_test.go b/node/node_test.go index 1ecbe3f..c6aa121 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -6,12 +6,12 @@ import ( "errors" "io" "io/fs" - "os" - "path/filepath" "sort" "strings" "testing" + core "dappco.re/go/core" + coreio "dappco.re/go/core/io" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -314,7 +314,7 @@ func TestWalk_Ugly(t *testing.T) { assert.Equal(t, walkErr, err, "Walk must propagate the callback error") } -func TestWalk_Options(t *testing.T) { +func TestWalk_Good_Options(t *testing.T) { n := New() n.AddData("root.txt", []byte("root")) n.AddData("a/a1.txt", []byte("a1")) @@ -367,18 +367,18 @@ func TestCopyFile_Good(t *testing.T) { n := New() n.AddData("foo.txt", []byte("foo")) - tmpfile := filepath.Join(t.TempDir(), "test.txt") + tmpfile := core.Path(t.TempDir(), "test.txt") err := n.CopyFile("foo.txt", tmpfile, 0644) require.NoError(t, err) - content, err := os.ReadFile(tmpfile) + content, err := coreio.Local.Read(tmpfile) require.NoError(t, err) - assert.Equal(t, "foo", string(content)) + assert.Equal(t, "foo", content) } func TestCopyFile_Bad(t *testing.T) { n := New() - tmpfile := filepath.Join(t.TempDir(), "test.txt") + tmpfile := core.Path(t.TempDir(), "test.txt") // Source does not exist. err := n.CopyFile("nonexistent.txt", tmpfile, 0644) @@ -393,7 +393,7 @@ func TestCopyFile_Bad(t *testing.T) { func TestCopyFile_Ugly(t *testing.T) { n := New() n.AddData("bar/baz.txt", []byte("baz")) - tmpfile := filepath.Join(t.TempDir(), "test.txt") + tmpfile := core.Path(t.TempDir(), "test.txt") // Attempting to copy a directory should fail. err := n.CopyFile("bar", tmpfile, 0644) diff --git a/s3/s3.go b/s3/s3.go index 3ca4ab9..455b15f 100644 --- a/s3/s3.go +++ b/s3/s3.go @@ -6,7 +6,6 @@ import ( "context" goio "io" "io/fs" - "os" "path" "strings" "time" @@ -89,6 +88,11 @@ func withAPI(api s3API) Option { } // New creates a new S3 Medium for the given bucket. +// +// Example usage: +// +// awsClient := awss3.NewFromConfig(cfg) +// m, _ := s3.New("backups", s3.WithClient(awsClient), s3.WithPrefix("daily")) func New(bucket string, opts ...Option) (*Medium, error) { if bucket == "" { return nil, coreerr.E("s3.New", "bucket name is required", nil) @@ -126,7 +130,7 @@ func (m *Medium) key(p string) string { func (m *Medium) Read(p string) (string, error) { key := m.key(p) if key == "" { - return "", coreerr.E("s3.Read", "path is required", os.ErrInvalid) + return "", coreerr.E("s3.Read", "path is required", fs.ErrInvalid) } out, err := m.client.GetObject(context.Background(), &s3.GetObjectInput{ @@ -149,7 +153,7 @@ func (m *Medium) Read(p string) (string, error) { func (m *Medium) Write(p, content string) error { key := m.key(p) if key == "" { - return coreerr.E("s3.Write", "path is required", os.ErrInvalid) + return coreerr.E("s3.Write", "path is required", fs.ErrInvalid) } _, err := m.client.PutObject(context.Background(), &s3.PutObjectInput{ @@ -199,7 +203,7 @@ func (m *Medium) FileSet(p, content string) error { func (m *Medium) Delete(p string) error { key := m.key(p) if key == "" { - return coreerr.E("s3.Delete", "path is required", os.ErrInvalid) + return coreerr.E("s3.Delete", "path is required", fs.ErrInvalid) } _, err := m.client.DeleteObject(context.Background(), &s3.DeleteObjectInput{ @@ -216,7 +220,7 @@ func (m *Medium) Delete(p string) error { func (m *Medium) DeleteAll(p string) error { key := m.key(p) if key == "" { - return coreerr.E("s3.DeleteAll", "path is required", os.ErrInvalid) + return coreerr.E("s3.DeleteAll", "path is required", fs.ErrInvalid) } // First, try deleting the exact key @@ -282,7 +286,7 @@ func (m *Medium) Rename(oldPath, newPath string) error { oldKey := m.key(oldPath) newKey := m.key(newPath) if oldKey == "" || newKey == "" { - return coreerr.E("s3.Rename", "both old and new paths are required", os.ErrInvalid) + return coreerr.E("s3.Rename", "both old and new paths are required", fs.ErrInvalid) } copySource := m.bucket + "/" + oldKey @@ -384,7 +388,7 @@ func (m *Medium) List(p string) ([]fs.DirEntry, error) { func (m *Medium) Stat(p string) (fs.FileInfo, error) { key := m.key(p) if key == "" { - return nil, coreerr.E("s3.Stat", "path is required", os.ErrInvalid) + return nil, coreerr.E("s3.Stat", "path is required", fs.ErrInvalid) } out, err := m.client.HeadObject(context.Background(), &s3.HeadObjectInput{ @@ -417,7 +421,7 @@ func (m *Medium) Stat(p string) (fs.FileInfo, error) { func (m *Medium) Open(p string) (fs.File, error) { key := m.key(p) if key == "" { - return nil, coreerr.E("s3.Open", "path is required", os.ErrInvalid) + return nil, coreerr.E("s3.Open", "path is required", fs.ErrInvalid) } out, err := m.client.GetObject(context.Background(), &s3.GetObjectInput{ @@ -456,7 +460,7 @@ func (m *Medium) Open(p string) (fs.File, error) { func (m *Medium) Create(p string) (goio.WriteCloser, error) { key := m.key(p) if key == "" { - return nil, coreerr.E("s3.Create", "path is required", os.ErrInvalid) + return nil, coreerr.E("s3.Create", "path is required", fs.ErrInvalid) } return &s3WriteCloser{ medium: m, @@ -469,7 +473,7 @@ func (m *Medium) Create(p string) (goio.WriteCloser, error) { func (m *Medium) Append(p string) (goio.WriteCloser, error) { key := m.key(p) if key == "" { - return nil, coreerr.E("s3.Append", "path is required", os.ErrInvalid) + return nil, coreerr.E("s3.Append", "path is required", fs.ErrInvalid) } var existing []byte @@ -493,7 +497,7 @@ func (m *Medium) Append(p string) (goio.WriteCloser, error) { func (m *Medium) ReadStream(p string) (goio.ReadCloser, error) { key := m.key(p) if key == "" { - return nil, coreerr.E("s3.ReadStream", "path is required", os.ErrInvalid) + return nil, coreerr.E("s3.ReadStream", "path is required", fs.ErrInvalid) } out, err := m.client.GetObject(context.Background(), &s3.GetObjectInput{ diff --git a/sigil/crypto_sigil.go b/sigil/crypto_sigil.go index 8bacd44..2979b24 100644 --- a/sigil/crypto_sigil.go +++ b/sigil/crypto_sigil.go @@ -244,6 +244,11 @@ type ChaChaPolySigil struct { // NewChaChaPolySigil creates a new encryption sigil with the given key. // The key must be exactly 32 bytes. +// +// Example usage: +// +// key := []byte("0123456789abcdef0123456789abcdef") +// s, _ := sigil.NewChaChaPolySigil(key) func NewChaChaPolySigil(key []byte) (*ChaChaPolySigil, error) { if len(key) != 32 { return nil, ErrInvalidKey @@ -260,6 +265,11 @@ func NewChaChaPolySigil(key []byte) (*ChaChaPolySigil, error) { } // NewChaChaPolySigilWithObfuscator creates a new encryption sigil with custom obfuscator. +// +// Example usage: +// +// key := []byte("0123456789abcdef0123456789abcdef") +// s, _ := sigil.NewChaChaPolySigilWithObfuscator(key, &sigil.ShuffleMaskObfuscator{}) func NewChaChaPolySigilWithObfuscator(key []byte, obfuscator PreObfuscator) (*ChaChaPolySigil, error) { sigil, err := NewChaChaPolySigil(key) if err != nil { diff --git a/sqlite/sqlite.go b/sqlite/sqlite.go index fe1642d..bd36ba7 100644 --- a/sqlite/sqlite.go +++ b/sqlite/sqlite.go @@ -6,7 +6,6 @@ import ( "database/sql" goio "io" "io/fs" - "os" "path" "strings" "time" @@ -34,6 +33,11 @@ func WithTable(table string) Option { // New creates a new SQLite Medium at the given database path. // Use ":memory:" for an in-memory database. +// +// Example usage: +// +// m, _ := sqlite.New(":memory:", sqlite.WithTable("files")) +// _ = m.Write("config/app.yaml", "port: 8080") func New(dbPath string, opts ...Option) (*Medium, error) { if dbPath == "" { return nil, coreerr.E("sqlite.New", "database path is required", nil) @@ -94,7 +98,7 @@ func cleanPath(p string) string { func (m *Medium) Read(p string) (string, error) { key := cleanPath(p) if key == "" { - return "", coreerr.E("sqlite.Read", "path is required", os.ErrInvalid) + return "", coreerr.E("sqlite.Read", "path is required", fs.ErrInvalid) } var content []byte @@ -103,13 +107,13 @@ func (m *Medium) Read(p string) (string, error) { `SELECT content, is_dir FROM `+m.table+` WHERE path = ?`, key, ).Scan(&content, &isDir) if err == sql.ErrNoRows { - return "", coreerr.E("sqlite.Read", "file not found: "+key, os.ErrNotExist) + return "", coreerr.E("sqlite.Read", "file not found: "+key, fs.ErrNotExist) } if err != nil { return "", coreerr.E("sqlite.Read", "query failed: "+key, err) } if isDir { - return "", coreerr.E("sqlite.Read", "path is a directory: "+key, os.ErrInvalid) + return "", coreerr.E("sqlite.Read", "path is a directory: "+key, fs.ErrInvalid) } return string(content), nil } @@ -118,7 +122,7 @@ func (m *Medium) Read(p string) (string, error) { func (m *Medium) Write(p, content string) error { key := cleanPath(p) if key == "" { - return coreerr.E("sqlite.Write", "path is required", os.ErrInvalid) + return coreerr.E("sqlite.Write", "path is required", fs.ErrInvalid) } _, err := m.db.Exec( @@ -182,7 +186,7 @@ func (m *Medium) FileSet(p, content string) error { func (m *Medium) Delete(p string) error { key := cleanPath(p) if key == "" { - return coreerr.E("sqlite.Delete", "path is required", os.ErrInvalid) + return coreerr.E("sqlite.Delete", "path is required", fs.ErrInvalid) } // Check if it's a directory with children @@ -191,7 +195,7 @@ func (m *Medium) Delete(p string) error { `SELECT is_dir FROM `+m.table+` WHERE path = ?`, key, ).Scan(&isDir) if err == sql.ErrNoRows { - return coreerr.E("sqlite.Delete", "path not found: "+key, os.ErrNotExist) + return coreerr.E("sqlite.Delete", "path not found: "+key, fs.ErrNotExist) } if err != nil { return coreerr.E("sqlite.Delete", "query failed: "+key, err) @@ -208,7 +212,7 @@ func (m *Medium) Delete(p string) error { return coreerr.E("sqlite.Delete", "count failed: "+key, err) } if count > 0 { - return coreerr.E("sqlite.Delete", "directory not empty: "+key, os.ErrExist) + return coreerr.E("sqlite.Delete", "directory not empty: "+key, fs.ErrExist) } } @@ -218,7 +222,7 @@ func (m *Medium) Delete(p string) error { } n, _ := res.RowsAffected() if n == 0 { - return coreerr.E("sqlite.Delete", "path not found: "+key, os.ErrNotExist) + return coreerr.E("sqlite.Delete", "path not found: "+key, fs.ErrNotExist) } return nil } @@ -227,7 +231,7 @@ func (m *Medium) Delete(p string) error { func (m *Medium) DeleteAll(p string) error { key := cleanPath(p) if key == "" { - return coreerr.E("sqlite.DeleteAll", "path is required", os.ErrInvalid) + return coreerr.E("sqlite.DeleteAll", "path is required", fs.ErrInvalid) } prefix := key + "/" @@ -242,7 +246,7 @@ func (m *Medium) DeleteAll(p string) error { } n, _ := res.RowsAffected() if n == 0 { - return coreerr.E("sqlite.DeleteAll", "path not found: "+key, os.ErrNotExist) + return coreerr.E("sqlite.DeleteAll", "path not found: "+key, fs.ErrNotExist) } return nil } @@ -252,7 +256,7 @@ func (m *Medium) Rename(oldPath, newPath string) error { oldKey := cleanPath(oldPath) newKey := cleanPath(newPath) if oldKey == "" || newKey == "" { - return coreerr.E("sqlite.Rename", "both old and new paths are required", os.ErrInvalid) + return coreerr.E("sqlite.Rename", "both old and new paths are required", fs.ErrInvalid) } tx, err := m.db.Begin() @@ -270,7 +274,7 @@ func (m *Medium) Rename(oldPath, newPath string) error { `SELECT content, mode, is_dir, mtime FROM `+m.table+` WHERE path = ?`, oldKey, ).Scan(&content, &mode, &isDir, &mtime) if err == sql.ErrNoRows { - return coreerr.E("sqlite.Rename", "source not found: "+oldKey, os.ErrNotExist) + return coreerr.E("sqlite.Rename", "source not found: "+oldKey, fs.ErrNotExist) } if err != nil { return coreerr.E("sqlite.Rename", "query failed: "+oldKey, err) @@ -424,7 +428,7 @@ func (m *Medium) List(p string) ([]fs.DirEntry, error) { func (m *Medium) Stat(p string) (fs.FileInfo, error) { key := cleanPath(p) if key == "" { - return nil, coreerr.E("sqlite.Stat", "path is required", os.ErrInvalid) + return nil, coreerr.E("sqlite.Stat", "path is required", fs.ErrInvalid) } var content []byte @@ -435,7 +439,7 @@ func (m *Medium) Stat(p string) (fs.FileInfo, error) { `SELECT content, mode, is_dir, mtime FROM `+m.table+` WHERE path = ?`, key, ).Scan(&content, &mode, &isDir, &mtime) if err == sql.ErrNoRows { - return nil, coreerr.E("sqlite.Stat", "path not found: "+key, os.ErrNotExist) + return nil, coreerr.E("sqlite.Stat", "path not found: "+key, fs.ErrNotExist) } if err != nil { return nil, coreerr.E("sqlite.Stat", "query failed: "+key, err) @@ -455,7 +459,7 @@ func (m *Medium) Stat(p string) (fs.FileInfo, error) { func (m *Medium) Open(p string) (fs.File, error) { key := cleanPath(p) if key == "" { - return nil, coreerr.E("sqlite.Open", "path is required", os.ErrInvalid) + return nil, coreerr.E("sqlite.Open", "path is required", fs.ErrInvalid) } var content []byte @@ -466,13 +470,13 @@ func (m *Medium) Open(p string) (fs.File, error) { `SELECT content, mode, is_dir, mtime FROM `+m.table+` WHERE path = ?`, key, ).Scan(&content, &mode, &isDir, &mtime) if err == sql.ErrNoRows { - return nil, coreerr.E("sqlite.Open", "file not found: "+key, os.ErrNotExist) + return nil, coreerr.E("sqlite.Open", "file not found: "+key, fs.ErrNotExist) } if err != nil { return nil, coreerr.E("sqlite.Open", "query failed: "+key, err) } if isDir { - return nil, coreerr.E("sqlite.Open", "path is a directory: "+key, os.ErrInvalid) + return nil, coreerr.E("sqlite.Open", "path is a directory: "+key, fs.ErrInvalid) } return &sqliteFile{ @@ -487,7 +491,7 @@ func (m *Medium) Open(p string) (fs.File, error) { func (m *Medium) Create(p string) (goio.WriteCloser, error) { key := cleanPath(p) if key == "" { - return nil, coreerr.E("sqlite.Create", "path is required", os.ErrInvalid) + return nil, coreerr.E("sqlite.Create", "path is required", fs.ErrInvalid) } return &sqliteWriteCloser{ medium: m, @@ -499,7 +503,7 @@ func (m *Medium) Create(p string) (goio.WriteCloser, error) { func (m *Medium) Append(p string) (goio.WriteCloser, error) { key := cleanPath(p) if key == "" { - return nil, coreerr.E("sqlite.Append", "path is required", os.ErrInvalid) + return nil, coreerr.E("sqlite.Append", "path is required", fs.ErrInvalid) } var existing []byte @@ -521,7 +525,7 @@ func (m *Medium) Append(p string) (goio.WriteCloser, error) { func (m *Medium) ReadStream(p string) (goio.ReadCloser, error) { key := cleanPath(p) if key == "" { - return nil, coreerr.E("sqlite.ReadStream", "path is required", os.ErrInvalid) + return nil, coreerr.E("sqlite.ReadStream", "path is required", fs.ErrInvalid) } var content []byte @@ -530,13 +534,13 @@ func (m *Medium) ReadStream(p string) (goio.ReadCloser, error) { `SELECT content, is_dir FROM `+m.table+` WHERE path = ?`, key, ).Scan(&content, &isDir) if err == sql.ErrNoRows { - return nil, coreerr.E("sqlite.ReadStream", "file not found: "+key, os.ErrNotExist) + return nil, coreerr.E("sqlite.ReadStream", "file not found: "+key, fs.ErrNotExist) } if err != nil { return nil, coreerr.E("sqlite.ReadStream", "query failed: "+key, err) } if isDir { - return nil, coreerr.E("sqlite.ReadStream", "path is a directory: "+key, os.ErrInvalid) + return nil, coreerr.E("sqlite.ReadStream", "path is a directory: "+key, fs.ErrInvalid) } return goio.NopCloser(bytes.NewReader(content)), nil diff --git a/store/medium.go b/store/medium.go index 4363ca4..c1f5b45 100644 --- a/store/medium.go +++ b/store/medium.go @@ -3,7 +3,6 @@ package store import ( goio "io" "io/fs" - "os" "path" "strings" "time" @@ -20,6 +19,11 @@ type Medium struct { } // NewMedium creates an io.Medium backed by a KV store at the given SQLite path. +// +// Example usage: +// +// m, _ := store.NewMedium("config.db") +// _ = m.Write("app/theme", "midnight") func NewMedium(dbPath string) (*Medium, error) { s, err := New(dbPath) if err != nil { @@ -62,7 +66,7 @@ func splitPath(p string) (group, key string) { func (m *Medium) Read(p string) (string, error) { group, key := splitPath(p) if key == "" { - return "", coreerr.E("store.Read", "path must include group/key", os.ErrInvalid) + return "", coreerr.E("store.Read", "path must include group/key", fs.ErrInvalid) } return m.s.Get(group, key) } @@ -71,7 +75,7 @@ func (m *Medium) Read(p string) (string, error) { func (m *Medium) Write(p, content string) error { group, key := splitPath(p) if key == "" { - return coreerr.E("store.Write", "path must include group/key", os.ErrInvalid) + return coreerr.E("store.Write", "path must include group/key", fs.ErrInvalid) } return m.s.Set(group, key, content) } @@ -105,7 +109,7 @@ func (m *Medium) FileSet(p, content string) error { func (m *Medium) Delete(p string) error { group, key := splitPath(p) if group == "" { - return coreerr.E("store.Delete", "path is required", os.ErrInvalid) + return coreerr.E("store.Delete", "path is required", fs.ErrInvalid) } if key == "" { n, err := m.s.Count(group) @@ -113,7 +117,7 @@ func (m *Medium) Delete(p string) error { return err } if n > 0 { - return coreerr.E("store.Delete", "group not empty: "+group, os.ErrExist) + return coreerr.E("store.Delete", "group not empty: "+group, fs.ErrExist) } return nil } @@ -124,7 +128,7 @@ func (m *Medium) Delete(p string) error { func (m *Medium) DeleteAll(p string) error { group, key := splitPath(p) if group == "" { - return coreerr.E("store.DeleteAll", "path is required", os.ErrInvalid) + return coreerr.E("store.DeleteAll", "path is required", fs.ErrInvalid) } if key == "" { return m.s.DeleteGroup(group) @@ -137,7 +141,7 @@ func (m *Medium) Rename(oldPath, newPath string) error { og, ok := splitPath(oldPath) ng, nk := splitPath(newPath) if ok == "" || nk == "" { - return coreerr.E("store.Rename", "both paths must include group/key", os.ErrInvalid) + return coreerr.E("store.Rename", "both paths must include group/key", fs.ErrInvalid) } val, err := m.s.Get(og, ok) if err != nil { @@ -191,7 +195,7 @@ func (m *Medium) List(p string) ([]fs.DirEntry, error) { func (m *Medium) Stat(p string) (fs.FileInfo, error) { group, key := splitPath(p) if group == "" { - return nil, coreerr.E("store.Stat", "path is required", os.ErrInvalid) + return nil, coreerr.E("store.Stat", "path is required", fs.ErrInvalid) } if key == "" { n, err := m.s.Count(group) @@ -199,7 +203,7 @@ func (m *Medium) Stat(p string) (fs.FileInfo, error) { return nil, err } if n == 0 { - return nil, coreerr.E("store.Stat", "group not found: "+group, os.ErrNotExist) + return nil, coreerr.E("store.Stat", "group not found: "+group, fs.ErrNotExist) } return &kvFileInfo{name: group, isDir: true}, nil } @@ -214,7 +218,7 @@ func (m *Medium) Stat(p string) (fs.FileInfo, error) { func (m *Medium) Open(p string) (fs.File, error) { group, key := splitPath(p) if key == "" { - return nil, coreerr.E("store.Open", "path must include group/key", os.ErrInvalid) + return nil, coreerr.E("store.Open", "path must include group/key", fs.ErrInvalid) } val, err := m.s.Get(group, key) if err != nil { @@ -227,7 +231,7 @@ func (m *Medium) Open(p string) (fs.File, error) { func (m *Medium) Create(p string) (goio.WriteCloser, error) { group, key := splitPath(p) if key == "" { - return nil, coreerr.E("store.Create", "path must include group/key", os.ErrInvalid) + return nil, coreerr.E("store.Create", "path must include group/key", fs.ErrInvalid) } return &kvWriteCloser{s: m.s, group: group, key: key}, nil } @@ -236,7 +240,7 @@ func (m *Medium) Create(p string) (goio.WriteCloser, error) { func (m *Medium) Append(p string) (goio.WriteCloser, error) { group, key := splitPath(p) if key == "" { - return nil, coreerr.E("store.Append", "path must include group/key", os.ErrInvalid) + return nil, coreerr.E("store.Append", "path must include group/key", fs.ErrInvalid) } existing, _ := m.s.Get(group, key) return &kvWriteCloser{s: m.s, group: group, key: key, data: []byte(existing)}, nil @@ -246,7 +250,7 @@ func (m *Medium) Append(p string) (goio.WriteCloser, error) { func (m *Medium) ReadStream(p string) (goio.ReadCloser, error) { group, key := splitPath(p) if key == "" { - return nil, coreerr.E("store.ReadStream", "path must include group/key", os.ErrInvalid) + return nil, coreerr.E("store.ReadStream", "path must include group/key", fs.ErrInvalid) } val, err := m.s.Get(group, key) if err != nil { @@ -292,9 +296,14 @@ type kvFileInfo struct { isDir bool } -func (fi *kvFileInfo) Name() string { return fi.name } -func (fi *kvFileInfo) Size() int64 { return fi.size } -func (fi *kvFileInfo) Mode() fs.FileMode { if fi.isDir { return fs.ModeDir | 0755 }; return 0644 } +func (fi *kvFileInfo) Name() string { return fi.name } +func (fi *kvFileInfo) Size() int64 { return fi.size } +func (fi *kvFileInfo) Mode() fs.FileMode { + if fi.isDir { + return fs.ModeDir | 0755 + } + return 0644 +} func (fi *kvFileInfo) ModTime() time.Time { return time.Time{} } func (fi *kvFileInfo) IsDir() bool { return fi.isDir } func (fi *kvFileInfo) Sys() any { return nil } @@ -305,9 +314,14 @@ type kvDirEntry struct { size int64 } -func (de *kvDirEntry) Name() string { return de.name } -func (de *kvDirEntry) IsDir() bool { return de.isDir } -func (de *kvDirEntry) Type() fs.FileMode { if de.isDir { return fs.ModeDir }; return 0 } +func (de *kvDirEntry) Name() string { return de.name } +func (de *kvDirEntry) IsDir() bool { return de.isDir } +func (de *kvDirEntry) Type() fs.FileMode { + if de.isDir { + return fs.ModeDir + } + return 0 +} func (de *kvDirEntry) Info() (fs.FileInfo, error) { return &kvFileInfo{name: de.name, size: de.size, isDir: de.isDir}, nil } diff --git a/store/store.go b/store/store.go index 7c531fb..bc92a8f 100644 --- a/store/store.go +++ b/store/store.go @@ -19,6 +19,11 @@ type Store struct { } // New creates a Store at the given SQLite path. Use ":memory:" for tests. +// +// Example usage: +// +// s, _ := store.New(":memory:") +// _ = s.Set("app", "theme", "midnight") func New(dbPath string) (*Store, error) { db, err := sql.Open("sqlite", dbPath) if err != nil { @@ -122,6 +127,11 @@ func (s *Store) GetAll(group string) (map[string]string, error) { } // Render loads all key-value pairs from a group and renders a Go template. +// +// Example usage: +// +// _ = s.Set("user", "name", "alice") +// out, _ := s.Render("hello {{ .name }}", "user") func (s *Store) Render(tmplStr, group string) (string, error) { rows, err := s.db.Query("SELECT key, value FROM kv WHERE grp = ?", group) if err != nil { diff --git a/workspace/service.go b/workspace/service.go index 9e81764..b17567f 100644 --- a/workspace/service.go +++ b/workspace/service.go @@ -3,7 +3,7 @@ package workspace import ( "crypto/sha256" "encoding/hex" - "os" + "io/fs" "strings" "sync" @@ -38,10 +38,15 @@ type Service struct { // New creates a new Workspace service instance. // An optional cryptProvider can be passed to supply PGP key generation. +// +// Example usage: +// +// svcAny, _ := workspace.New(core.New(), myCryptProvider) +// svc := svcAny.(*workspace.Service) func New(c *core.Core, crypt ...cryptProvider) (any, error) { home := workspaceHome() if home == "" { - return nil, coreerr.E("workspace.New", "failed to determine home directory", os.ErrNotExist) + return nil, coreerr.E("workspace.New", "failed to determine home directory", fs.ErrNotExist) } rootPath := core.Path(home, ".core", "workspaces") @@ -128,10 +133,10 @@ func (s *Service) activeFilePath(op, filename string) (string, error) { filesRoot := core.Path(s.rootPath, s.activeWorkspace, "files") path, err := joinWithinRoot(filesRoot, filename) if err != nil { - return "", coreerr.E(op, "file path escapes workspace files", os.ErrPermission) + return "", coreerr.E(op, "file path escapes workspace files", fs.ErrPermission) } if path == filesRoot { - return "", coreerr.E(op, "filename is required", os.ErrInvalid) + return "", coreerr.E(op, "filename is required", fs.ErrInvalid) } return path, nil } @@ -201,19 +206,19 @@ func joinWithinRoot(root string, parts ...string) (string, error) { if candidate == root || strings.HasPrefix(candidate, root+sep) { return candidate, nil } - return "", os.ErrPermission + return "", fs.ErrPermission } func (s *Service) workspacePath(op, name string) (string, error) { if name == "" { - return "", coreerr.E(op, "workspace name is required", os.ErrInvalid) + return "", coreerr.E(op, "workspace name is required", fs.ErrInvalid) } path, err := joinWithinRoot(s.rootPath, name) if err != nil { return "", coreerr.E(op, "workspace path escapes root", err) } if core.PathDir(path) != s.rootPath { - return "", coreerr.E(op, "invalid workspace name: "+name, os.ErrPermission) + return "", coreerr.E(op, "invalid workspace name: "+name, fs.ErrPermission) } return path, nil } diff --git a/workspace/service_test.go b/workspace/service_test.go index 1fc7abe..de81b30 100644 --- a/workspace/service_test.go +++ b/workspace/service_test.go @@ -1,7 +1,6 @@ package workspace import ( - "os" "testing" core "dappco.re/go/core" @@ -32,7 +31,7 @@ func newTestService(t *testing.T) (*Service, string) { return svc.(*Service), tempHome } -func TestWorkspace(t *testing.T) { +func TestWorkspace_Good_RoundTrip(t *testing.T) { s, tempHome := newTestService(t) id, err := s.CreateWorkspace("test-user", "pass123") @@ -56,18 +55,18 @@ func TestWorkspace(t *testing.T) { assert.Equal(t, "top secret info", got) } -func TestSwitchWorkspace_TraversalBlocked(t *testing.T) { +func TestSwitchWorkspace_Bad_TraversalBlocked(t *testing.T) { s, tempHome := newTestService(t) outside := core.Path(tempHome, ".core", "escaped") - require.NoError(t, os.MkdirAll(outside, 0755)) + require.NoError(t, s.medium.EnsureDir(outside)) err := s.SwitchWorkspace("../escaped") require.Error(t, err) assert.Empty(t, s.activeWorkspace) } -func TestWorkspaceFileSet_TraversalBlocked(t *testing.T) { +func TestWorkspaceFileSet_Bad_TraversalBlocked(t *testing.T) { s, tempHome := newTestService(t) id, err := s.CreateWorkspace("test-user", "pass123") @@ -75,15 +74,15 @@ func TestWorkspaceFileSet_TraversalBlocked(t *testing.T) { require.NoError(t, s.SwitchWorkspace(id)) keyPath := core.Path(tempHome, ".core", "workspaces", id, "keys", "private.key") - before, err := os.ReadFile(keyPath) + before, err := s.medium.Read(keyPath) require.NoError(t, err) err = s.WorkspaceFileSet("../keys/private.key", "hijack") require.Error(t, err) - after, err := os.ReadFile(keyPath) + after, err := s.medium.Read(keyPath) require.NoError(t, err) - assert.Equal(t, string(before), string(after)) + assert.Equal(t, before, after) _, err = s.WorkspaceFileGet("../keys/private.key") require.Error(t, err) From 514ecd7e7a5328a619e7a8272da65c1434a73fbf Mon Sep 17 00:00:00 2001 From: Virgil Date: Thu, 26 Mar 2026 16:23:45 +0000 Subject: [PATCH 03/83] fix(io): enforce ax v0.8.0 polish spec Co-Authored-By: Virgil --- client_test.go | 52 ++++---- datanode/client.go | 212 ++++++++++++++++++++++-------- datanode/client_test.go | 68 +++++----- go.mod | 3 +- go.sum | 6 +- io.go | 197 +++++++++++++++++++++------- local/client.go | 177 +++++++++++++++++-------- local/client_test.go | 71 +++++----- node/node.go | 256 ++++++++++++++++++++++++++++--------- node/node_test.go | 72 +++++------ s3/s3.go | 198 ++++++++++++++++++++-------- s3/s3_test.go | 103 ++++++++------- sigil/crypto_sigil.go | 32 +++-- sigil/crypto_sigil_test.go | 90 ++++++------- sigil/sigil.go | 10 +- sigil/sigil_test.go | 63 ++++----- sigil/sigils.go | 135 +++++++++++++++++-- sqlite/sqlite.go | 219 ++++++++++++++++++++++--------- sqlite/sqlite_test.go | 112 ++++++++-------- store/medium.go | 140 ++++++++++++++++---- store/medium_test.go | 32 ++--- store/store.go | 58 +++++---- store/store_test.go | 16 +-- workspace/service.go | 42 +++--- workspace/service_test.go | 6 +- 25 files changed, 1604 insertions(+), 766 deletions(-) diff --git a/client_test.go b/client_test.go index 69f625e..0383c5b 100644 --- a/client_test.go +++ b/client_test.go @@ -8,7 +8,7 @@ import ( // --- MockMedium Tests --- -func TestNewMockMedium_Good(t *testing.T) { +func TestClient_NewMockMedium_Good(t *testing.T) { m := NewMockMedium() assert.NotNil(t, m) assert.NotNil(t, m.Files) @@ -17,7 +17,7 @@ func TestNewMockMedium_Good(t *testing.T) { assert.Empty(t, m.Dirs) } -func TestMockMedium_Read_Good(t *testing.T) { +func TestClient_MockMedium_Read_Good(t *testing.T) { m := NewMockMedium() m.Files["test.txt"] = "hello world" content, err := m.Read("test.txt") @@ -25,13 +25,13 @@ func TestMockMedium_Read_Good(t *testing.T) { assert.Equal(t, "hello world", content) } -func TestMockMedium_Read_Bad(t *testing.T) { +func TestClient_MockMedium_Read_Bad(t *testing.T) { m := NewMockMedium() _, err := m.Read("nonexistent.txt") assert.Error(t, err) } -func TestMockMedium_Write_Good(t *testing.T) { +func TestClient_MockMedium_Write_Good(t *testing.T) { m := NewMockMedium() err := m.Write("test.txt", "content") assert.NoError(t, err) @@ -43,14 +43,14 @@ func TestMockMedium_Write_Good(t *testing.T) { assert.Equal(t, "new content", m.Files["test.txt"]) } -func TestMockMedium_EnsureDir_Good(t *testing.T) { +func TestClient_MockMedium_EnsureDir_Good(t *testing.T) { m := NewMockMedium() err := m.EnsureDir("/path/to/dir") assert.NoError(t, err) assert.True(t, m.Dirs["/path/to/dir"]) } -func TestMockMedium_IsFile_Good(t *testing.T) { +func TestClient_MockMedium_IsFile_Good(t *testing.T) { m := NewMockMedium() m.Files["exists.txt"] = "content" @@ -58,7 +58,7 @@ func TestMockMedium_IsFile_Good(t *testing.T) { assert.False(t, m.IsFile("nonexistent.txt")) } -func TestMockMedium_FileGet_Good(t *testing.T) { +func TestClient_MockMedium_FileGet_Good(t *testing.T) { m := NewMockMedium() m.Files["test.txt"] = "content" content, err := m.FileGet("test.txt") @@ -66,14 +66,14 @@ func TestMockMedium_FileGet_Good(t *testing.T) { assert.Equal(t, "content", content) } -func TestMockMedium_FileSet_Good(t *testing.T) { +func TestClient_MockMedium_FileSet_Good(t *testing.T) { m := NewMockMedium() err := m.FileSet("test.txt", "content") assert.NoError(t, err) assert.Equal(t, "content", m.Files["test.txt"]) } -func TestMockMedium_Delete_Good(t *testing.T) { +func TestClient_MockMedium_Delete_Good(t *testing.T) { m := NewMockMedium() m.Files["test.txt"] = "content" @@ -82,13 +82,13 @@ func TestMockMedium_Delete_Good(t *testing.T) { assert.False(t, m.IsFile("test.txt")) } -func TestMockMedium_Delete_Bad_NotFound(t *testing.T) { +func TestClient_MockMedium_Delete_NotFound_Bad(t *testing.T) { m := NewMockMedium() err := m.Delete("nonexistent.txt") assert.Error(t, err) } -func TestMockMedium_Delete_Bad_DirNotEmpty(t *testing.T) { +func TestClient_MockMedium_Delete_DirNotEmpty_Bad(t *testing.T) { m := NewMockMedium() m.Dirs["mydir"] = true m.Files["mydir/file.txt"] = "content" @@ -97,7 +97,7 @@ func TestMockMedium_Delete_Bad_DirNotEmpty(t *testing.T) { assert.Error(t, err) } -func TestMockMedium_DeleteAll_Good(t *testing.T) { +func TestClient_MockMedium_DeleteAll_Good(t *testing.T) { m := NewMockMedium() m.Dirs["mydir"] = true m.Dirs["mydir/subdir"] = true @@ -110,7 +110,7 @@ func TestMockMedium_DeleteAll_Good(t *testing.T) { assert.Empty(t, m.Files) } -func TestMockMedium_Rename_Good(t *testing.T) { +func TestClient_MockMedium_Rename_Good(t *testing.T) { m := NewMockMedium() m.Files["old.txt"] = "content" @@ -121,7 +121,7 @@ func TestMockMedium_Rename_Good(t *testing.T) { assert.Equal(t, "content", m.Files["new.txt"]) } -func TestMockMedium_Rename_Good_Dir(t *testing.T) { +func TestClient_MockMedium_Rename_Dir_Good(t *testing.T) { m := NewMockMedium() m.Dirs["olddir"] = true m.Files["olddir/file.txt"] = "content" @@ -133,7 +133,7 @@ func TestMockMedium_Rename_Good_Dir(t *testing.T) { assert.Equal(t, "content", m.Files["newdir/file.txt"]) } -func TestMockMedium_List_Good(t *testing.T) { +func TestClient_MockMedium_List_Good(t *testing.T) { m := NewMockMedium() m.Dirs["mydir"] = true m.Files["mydir/file1.txt"] = "content1" @@ -153,7 +153,7 @@ func TestMockMedium_List_Good(t *testing.T) { assert.True(t, names["subdir"]) } -func TestMockMedium_Stat_Good(t *testing.T) { +func TestClient_MockMedium_Stat_Good(t *testing.T) { m := NewMockMedium() m.Files["test.txt"] = "hello world" @@ -164,7 +164,7 @@ func TestMockMedium_Stat_Good(t *testing.T) { assert.False(t, info.IsDir()) } -func TestMockMedium_Stat_Good_Dir(t *testing.T) { +func TestClient_MockMedium_Stat_Dir_Good(t *testing.T) { m := NewMockMedium() m.Dirs["mydir"] = true @@ -174,7 +174,7 @@ func TestMockMedium_Stat_Good_Dir(t *testing.T) { assert.True(t, info.IsDir()) } -func TestMockMedium_Exists_Good(t *testing.T) { +func TestClient_MockMedium_Exists_Good(t *testing.T) { m := NewMockMedium() m.Files["file.txt"] = "content" m.Dirs["mydir"] = true @@ -184,7 +184,7 @@ func TestMockMedium_Exists_Good(t *testing.T) { assert.False(t, m.Exists("nonexistent")) } -func TestMockMedium_IsDir_Good(t *testing.T) { +func TestClient_MockMedium_IsDir_Good(t *testing.T) { m := NewMockMedium() m.Files["file.txt"] = "content" m.Dirs["mydir"] = true @@ -196,7 +196,7 @@ func TestMockMedium_IsDir_Good(t *testing.T) { // --- Wrapper Function Tests --- -func TestRead_Good(t *testing.T) { +func TestClient_Read_Good(t *testing.T) { m := NewMockMedium() m.Files["test.txt"] = "hello" content, err := Read(m, "test.txt") @@ -204,21 +204,21 @@ func TestRead_Good(t *testing.T) { assert.Equal(t, "hello", content) } -func TestWrite_Good(t *testing.T) { +func TestClient_Write_Good(t *testing.T) { m := NewMockMedium() err := Write(m, "test.txt", "hello") assert.NoError(t, err) assert.Equal(t, "hello", m.Files["test.txt"]) } -func TestEnsureDir_Good(t *testing.T) { +func TestClient_EnsureDir_Good(t *testing.T) { m := NewMockMedium() err := EnsureDir(m, "/my/dir") assert.NoError(t, err) assert.True(t, m.Dirs["/my/dir"]) } -func TestIsFile_Good(t *testing.T) { +func TestClient_IsFile_Good(t *testing.T) { m := NewMockMedium() m.Files["exists.txt"] = "content" @@ -226,7 +226,7 @@ func TestIsFile_Good(t *testing.T) { assert.False(t, IsFile(m, "nonexistent.txt")) } -func TestCopy_Good(t *testing.T) { +func TestClient_Copy_Good(t *testing.T) { source := NewMockMedium() dest := NewMockMedium() source.Files["test.txt"] = "hello" @@ -241,7 +241,7 @@ func TestCopy_Good(t *testing.T) { assert.Equal(t, "content", dest.Files["copied.txt"]) } -func TestCopy_Bad(t *testing.T) { +func TestClient_Copy_Bad(t *testing.T) { source := NewMockMedium() dest := NewMockMedium() err := Copy(source, "nonexistent.txt", dest, "dest.txt") @@ -250,7 +250,7 @@ func TestCopy_Bad(t *testing.T) { // --- Local Global Tests --- -func TestLocalGlobal_Good(t *testing.T) { +func TestClient_LocalGlobal_Good(t *testing.T) { // io.Local should be initialised by init() assert.NotNil(t, Local, "io.Local should be initialised") diff --git a/datanode/client.go b/datanode/client.go index 943b7b8..0504cbb 100644 --- a/datanode/client.go +++ b/datanode/client.go @@ -12,12 +12,11 @@ import ( "io/fs" "path" "slices" - "strings" "sync" "time" + core "dappco.re/go/core" borgdatanode "forge.lthn.ai/Snider/Borg/pkg/datanode" - coreerr "forge.lthn.ai/core/go-log" ) var ( @@ -62,7 +61,7 @@ func New() *Medium { func FromTar(data []byte) (*Medium, error) { dn, err := borgdatanode.FromTar(data) if err != nil { - return nil, coreerr.E("datanode.FromTar", "failed to restore", err) + return nil, core.E("datanode.FromTar", "failed to restore", err) } return &Medium{ dn: dn, @@ -72,21 +71,25 @@ func FromTar(data []byte) (*Medium, error) { // Snapshot serialises the entire filesystem to a tarball. // Use this for crash reports, workspace packaging, or TIM creation. +// +// result := m.Snapshot(...) func (m *Medium) Snapshot() ([]byte, error) { m.mu.RLock() defer m.mu.RUnlock() data, err := m.dn.ToTar() if err != nil { - return nil, coreerr.E("datanode.Snapshot", "tar failed", err) + return nil, core.E("datanode.Snapshot", "tar failed", err) } return data, nil } // Restore replaces the filesystem contents from a tarball. +// +// result := m.Restore(...) func (m *Medium) Restore(data []byte) error { dn, err := borgdatanode.FromTar(data) if err != nil { - return coreerr.E("datanode.Restore", "tar failed", err) + return core.E("datanode.Restore", "tar failed", err) } m.mu.Lock() defer m.mu.Unlock() @@ -97,6 +100,8 @@ func (m *Medium) Restore(data []byte) error { // DataNode returns the underlying Borg DataNode. // Use this to wrap the filesystem in a TIM container. +// +// result := m.DataNode(...) func (m *Medium) DataNode() *borgdatanode.DataNode { m.mu.RLock() defer m.mu.RUnlock() @@ -105,7 +110,7 @@ func (m *Medium) DataNode() *borgdatanode.DataNode { // clean normalises a path: strips leading slash, cleans traversal. func clean(p string) string { - p = strings.TrimPrefix(p, "/") + p = core.TrimPrefix(p, "/") p = path.Clean(p) if p == "." { return "" @@ -115,6 +120,9 @@ func clean(p string) string { // --- io.Medium interface --- +// Read documents the Read operation. +// +// result := m.Read(...) func (m *Medium) Read(p string) (string, error) { m.mu.RLock() defer m.mu.RUnlock() @@ -122,32 +130,35 @@ func (m *Medium) Read(p string) (string, error) { p = clean(p) f, err := m.dn.Open(p) if err != nil { - return "", coreerr.E("datanode.Read", "not found: "+p, fs.ErrNotExist) + return "", core.E("datanode.Read", core.Concat("not found: ", p), fs.ErrNotExist) } defer f.Close() info, err := f.Stat() if err != nil { - return "", coreerr.E("datanode.Read", "stat failed: "+p, err) + return "", core.E("datanode.Read", core.Concat("stat failed: ", p), err) } if info.IsDir() { - return "", coreerr.E("datanode.Read", "is a directory: "+p, fs.ErrInvalid) + return "", core.E("datanode.Read", core.Concat("is a directory: ", p), fs.ErrInvalid) } data, err := goio.ReadAll(f) if err != nil { - return "", coreerr.E("datanode.Read", "read failed: "+p, err) + return "", core.E("datanode.Read", core.Concat("read failed: ", p), err) } return string(data), nil } +// Write documents the Write operation. +// +// result := m.Write(...) func (m *Medium) Write(p, content string) error { m.mu.Lock() defer m.mu.Unlock() p = clean(p) if p == "" { - return coreerr.E("datanode.Write", "empty path", fs.ErrInvalid) + return core.E("datanode.Write", "empty path", fs.ErrInvalid) } m.dn.AddData(p, []byte(content)) @@ -156,10 +167,16 @@ func (m *Medium) Write(p, content string) error { return nil } +// WriteMode documents the WriteMode operation. +// +// result := m.WriteMode(...) func (m *Medium) WriteMode(p, content string, mode fs.FileMode) error { return m.Write(p, content) } +// EnsureDir documents the EnsureDir operation. +// +// result := m.EnsureDir(...) func (m *Medium) EnsureDir(p string) error { m.mu.Lock() defer m.mu.Unlock() @@ -184,6 +201,9 @@ func (m *Medium) ensureDirsLocked(p string) { } } +// IsFile documents the IsFile operation. +// +// result := m.IsFile(...) func (m *Medium) IsFile(p string) bool { m.mu.RLock() defer m.mu.RUnlock() @@ -193,21 +213,30 @@ func (m *Medium) IsFile(p string) bool { return err == nil && !info.IsDir() } +// FileGet documents the FileGet operation. +// +// result := m.FileGet(...) func (m *Medium) FileGet(p string) (string, error) { return m.Read(p) } +// FileSet documents the FileSet operation. +// +// result := m.FileSet(...) func (m *Medium) FileSet(p, content string) error { return m.Write(p, content) } +// Delete documents the Delete operation. +// +// result := m.Delete(...) func (m *Medium) Delete(p string) error { m.mu.Lock() defer m.mu.Unlock() p = clean(p) if p == "" { - return coreerr.E("datanode.Delete", "cannot delete root", fs.ErrPermission) + return core.E("datanode.Delete", "cannot delete root", fs.ErrPermission) } // Check if it's a file in the DataNode @@ -218,24 +247,24 @@ func (m *Medium) Delete(p string) error { // Check if dir is empty hasChildren, err := m.hasPrefixLocked(p + "/") if err != nil { - return coreerr.E("datanode.Delete", "failed to inspect directory: "+p, err) + return core.E("datanode.Delete", core.Concat("failed to inspect directory: ", p), err) } if hasChildren { - return coreerr.E("datanode.Delete", "directory not empty: "+p, fs.ErrExist) + return core.E("datanode.Delete", core.Concat("directory not empty: ", p), fs.ErrExist) } delete(m.dirs, p) return nil } - return coreerr.E("datanode.Delete", "not found: "+p, fs.ErrNotExist) + return core.E("datanode.Delete", core.Concat("not found: ", p), fs.ErrNotExist) } if info.IsDir() { hasChildren, err := m.hasPrefixLocked(p + "/") if err != nil { - return coreerr.E("datanode.Delete", "failed to inspect directory: "+p, err) + return core.E("datanode.Delete", core.Concat("failed to inspect directory: ", p), err) } if hasChildren { - return coreerr.E("datanode.Delete", "directory not empty: "+p, fs.ErrExist) + return core.E("datanode.Delete", core.Concat("directory not empty: ", p), fs.ErrExist) } delete(m.dirs, p) return nil @@ -243,18 +272,21 @@ func (m *Medium) Delete(p string) error { // Remove the file by creating a new DataNode without it if err := m.removeFileLocked(p); err != nil { - return coreerr.E("datanode.Delete", "failed to delete file: "+p, err) + return core.E("datanode.Delete", core.Concat("failed to delete file: ", p), err) } return nil } +// DeleteAll documents the DeleteAll operation. +// +// result := m.DeleteAll(...) func (m *Medium) DeleteAll(p string) error { m.mu.Lock() defer m.mu.Unlock() p = clean(p) if p == "" { - return coreerr.E("datanode.DeleteAll", "cannot delete root", fs.ErrPermission) + return core.E("datanode.DeleteAll", "cannot delete root", fs.ErrPermission) } prefix := p + "/" @@ -264,7 +296,7 @@ func (m *Medium) DeleteAll(p string) error { info, err := m.dn.Stat(p) if err == nil && !info.IsDir() { if err := m.removeFileLocked(p); err != nil { - return coreerr.E("datanode.DeleteAll", "failed to delete file: "+p, err) + return core.E("datanode.DeleteAll", core.Concat("failed to delete file: ", p), err) } found = true } @@ -272,12 +304,12 @@ func (m *Medium) DeleteAll(p string) error { // Remove all files under prefix entries, err := m.collectAllLocked() if err != nil { - return coreerr.E("datanode.DeleteAll", "failed to inspect tree: "+p, err) + return core.E("datanode.DeleteAll", core.Concat("failed to inspect tree: ", p), err) } for _, name := range entries { - if name == p || strings.HasPrefix(name, prefix) { + if name == p || core.HasPrefix(name, prefix) { if err := m.removeFileLocked(name); err != nil { - return coreerr.E("datanode.DeleteAll", "failed to delete file: "+name, err) + return core.E("datanode.DeleteAll", core.Concat("failed to delete file: ", name), err) } found = true } @@ -285,18 +317,21 @@ func (m *Medium) DeleteAll(p string) error { // Remove explicit dirs under prefix for d := range m.dirs { - if d == p || strings.HasPrefix(d, prefix) { + if d == p || core.HasPrefix(d, prefix) { delete(m.dirs, d) found = true } } if !found { - return coreerr.E("datanode.DeleteAll", "not found: "+p, fs.ErrNotExist) + return core.E("datanode.DeleteAll", core.Concat("not found: ", p), fs.ErrNotExist) } return nil } +// Rename documents the Rename operation. +// +// result := m.Rename(...) func (m *Medium) Rename(oldPath, newPath string) error { m.mu.Lock() defer m.mu.Unlock() @@ -307,19 +342,19 @@ func (m *Medium) Rename(oldPath, newPath string) error { // Check if source is a file info, err := m.dn.Stat(oldPath) if err != nil { - return coreerr.E("datanode.Rename", "not found: "+oldPath, fs.ErrNotExist) + return core.E("datanode.Rename", core.Concat("not found: ", oldPath), fs.ErrNotExist) } if !info.IsDir() { // Read old, write new, delete old data, err := m.readFileLocked(oldPath) if err != nil { - return coreerr.E("datanode.Rename", "failed to read source file: "+oldPath, err) + return core.E("datanode.Rename", core.Concat("failed to read source file: ", oldPath), err) } m.dn.AddData(newPath, data) m.ensureDirsLocked(path.Dir(newPath)) if err := m.removeFileLocked(oldPath); err != nil { - return coreerr.E("datanode.Rename", "failed to remove source file: "+oldPath, err) + return core.E("datanode.Rename", core.Concat("failed to remove source file: ", oldPath), err) } return nil } @@ -330,18 +365,18 @@ func (m *Medium) Rename(oldPath, newPath string) error { entries, err := m.collectAllLocked() if err != nil { - return coreerr.E("datanode.Rename", "failed to inspect tree: "+oldPath, err) + return core.E("datanode.Rename", core.Concat("failed to inspect tree: ", oldPath), err) } for _, name := range entries { - if strings.HasPrefix(name, oldPrefix) { - newName := newPrefix + strings.TrimPrefix(name, oldPrefix) + if core.HasPrefix(name, oldPrefix) { + newName := core.Concat(newPrefix, core.TrimPrefix(name, oldPrefix)) data, err := m.readFileLocked(name) if err != nil { - return coreerr.E("datanode.Rename", "failed to read source file: "+name, err) + return core.E("datanode.Rename", core.Concat("failed to read source file: ", name), err) } m.dn.AddData(newName, data) if err := m.removeFileLocked(name); err != nil { - return coreerr.E("datanode.Rename", "failed to remove source file: "+name, err) + return core.E("datanode.Rename", core.Concat("failed to remove source file: ", name), err) } } } @@ -349,8 +384,8 @@ func (m *Medium) Rename(oldPath, newPath string) error { // Move explicit dirs dirsToMove := make(map[string]string) for d := range m.dirs { - if d == oldPath || strings.HasPrefix(d, oldPrefix) { - newD := newPath + strings.TrimPrefix(d, oldPath) + if d == oldPath || core.HasPrefix(d, oldPrefix) { + newD := core.Concat(newPath, core.TrimPrefix(d, oldPath)) dirsToMove[d] = newD } } @@ -362,6 +397,9 @@ func (m *Medium) Rename(oldPath, newPath string) error { return nil } +// List documents the List operation. +// +// result := m.List(...) func (m *Medium) List(p string) ([]fs.DirEntry, error) { m.mu.RLock() defer m.mu.RUnlock() @@ -374,7 +412,7 @@ func (m *Medium) List(p string) ([]fs.DirEntry, error) { if p == "" || m.dirs[p] { return []fs.DirEntry{}, nil } - return nil, coreerr.E("datanode.List", "not found: "+p, fs.ErrNotExist) + return nil, core.E("datanode.List", core.Concat("not found: ", p), fs.ErrNotExist) } // Also include explicit subdirectories not discovered via files @@ -388,14 +426,14 @@ func (m *Medium) List(p string) ([]fs.DirEntry, error) { } for d := range m.dirs { - if !strings.HasPrefix(d, prefix) { + if !core.HasPrefix(d, prefix) { continue } - rest := strings.TrimPrefix(d, prefix) + rest := core.TrimPrefix(d, prefix) if rest == "" { continue } - first := strings.SplitN(rest, "/", 2)[0] + first := core.SplitN(rest, "/", 2)[0] if !seen[first] { seen[first] = true entries = append(entries, &dirEntry{name: first}) @@ -409,6 +447,9 @@ func (m *Medium) List(p string) ([]fs.DirEntry, error) { return entries, nil } +// Stat documents the Stat operation. +// +// result := m.Stat(...) func (m *Medium) Stat(p string) (fs.FileInfo, error) { m.mu.RLock() defer m.mu.RUnlock() @@ -426,9 +467,12 @@ func (m *Medium) Stat(p string) (fs.FileInfo, error) { if m.dirs[p] { return &fileInfo{name: path.Base(p), isDir: true, mode: fs.ModeDir | 0755}, nil } - return nil, coreerr.E("datanode.Stat", "not found: "+p, fs.ErrNotExist) + return nil, core.E("datanode.Stat", core.Concat("not found: ", p), fs.ErrNotExist) } +// Open documents the Open operation. +// +// result := m.Open(...) func (m *Medium) Open(p string) (fs.File, error) { m.mu.RLock() defer m.mu.RUnlock() @@ -437,18 +481,24 @@ func (m *Medium) Open(p string) (fs.File, error) { return m.dn.Open(p) } +// Create documents the Create operation. +// +// result := m.Create(...) func (m *Medium) Create(p string) (goio.WriteCloser, error) { p = clean(p) if p == "" { - return nil, coreerr.E("datanode.Create", "empty path", fs.ErrInvalid) + return nil, core.E("datanode.Create", "empty path", fs.ErrInvalid) } return &writeCloser{m: m, path: p}, nil } +// Append documents the Append operation. +// +// result := m.Append(...) func (m *Medium) Append(p string) (goio.WriteCloser, error) { p = clean(p) if p == "" { - return nil, coreerr.E("datanode.Append", "empty path", fs.ErrInvalid) + return nil, core.E("datanode.Append", "empty path", fs.ErrInvalid) } // Read existing content @@ -458,7 +508,7 @@ func (m *Medium) Append(p string) (goio.WriteCloser, error) { data, err := m.readFileLocked(p) if err != nil { m.mu.RUnlock() - return nil, coreerr.E("datanode.Append", "failed to read existing content: "+p, err) + return nil, core.E("datanode.Append", core.Concat("failed to read existing content: ", p), err) } existing = data } @@ -467,6 +517,9 @@ func (m *Medium) Append(p string) (goio.WriteCloser, error) { return &writeCloser{m: m, path: p, buf: existing}, nil } +// ReadStream documents the ReadStream operation. +// +// result := m.ReadStream(...) func (m *Medium) ReadStream(p string) (goio.ReadCloser, error) { m.mu.RLock() defer m.mu.RUnlock() @@ -474,15 +527,21 @@ func (m *Medium) ReadStream(p string) (goio.ReadCloser, error) { p = clean(p) f, err := m.dn.Open(p) if err != nil { - return nil, coreerr.E("datanode.ReadStream", "not found: "+p, fs.ErrNotExist) + return nil, core.E("datanode.ReadStream", core.Concat("not found: ", p), fs.ErrNotExist) } return f.(goio.ReadCloser), nil } +// WriteStream documents the WriteStream operation. +// +// result := m.WriteStream(...) func (m *Medium) WriteStream(p string) (goio.WriteCloser, error) { return m.Create(p) } +// Exists documents the Exists operation. +// +// result := m.Exists(...) func (m *Medium) Exists(p string) bool { m.mu.RLock() defer m.mu.RUnlock() @@ -498,6 +557,9 @@ func (m *Medium) Exists(p string) bool { return m.dirs[p] } +// IsDir documents the IsDir operation. +// +// result := m.IsDir(...) func (m *Medium) IsDir(p string) bool { m.mu.RLock() defer m.mu.RUnlock() @@ -522,12 +584,12 @@ func (m *Medium) hasPrefixLocked(prefix string) (bool, error) { return false, err } for _, name := range entries { - if strings.HasPrefix(name, prefix) { + if core.HasPrefix(name, prefix) { return true, nil } } for d := range m.dirs { - if strings.HasPrefix(d, prefix) { + if core.HasPrefix(d, prefix) { return true, nil } } @@ -596,11 +658,17 @@ type writeCloser struct { buf []byte } +// Write documents the Write operation. +// +// result := w.Write(...) func (w *writeCloser) Write(p []byte) (int, error) { w.buf = append(w.buf, p...) return len(p), nil } +// Close documents the Close operation. +// +// result := w.Close(...) func (w *writeCloser) Close() error { w.m.mu.Lock() defer w.m.mu.Unlock() @@ -616,9 +684,24 @@ type dirEntry struct { name string } -func (d *dirEntry) Name() string { return d.name } -func (d *dirEntry) IsDir() bool { return true } +// Name documents the Name operation. +// +// result := d.Name(...) +func (d *dirEntry) Name() string { return d.name } + +// IsDir documents the IsDir operation. +// +// result := d.IsDir(...) +func (d *dirEntry) IsDir() bool { return true } + +// Type documents the Type operation. +// +// result := d.Type(...) func (d *dirEntry) Type() fs.FileMode { return fs.ModeDir } + +// Info documents the Info operation. +// +// result := d.Info(...) func (d *dirEntry) Info() (fs.FileInfo, error) { return &fileInfo{name: d.name, isDir: true, mode: fs.ModeDir | 0755}, nil } @@ -631,9 +714,32 @@ type fileInfo struct { isDir bool } -func (fi *fileInfo) Name() string { return fi.name } -func (fi *fileInfo) Size() int64 { return fi.size } -func (fi *fileInfo) Mode() fs.FileMode { return fi.mode } +// Name documents the Name operation. +// +// result := fi.Name(...) +func (fi *fileInfo) Name() string { return fi.name } + +// Size documents the Size operation. +// +// result := fi.Size(...) +func (fi *fileInfo) Size() int64 { return fi.size } + +// Mode documents the Mode operation. +// +// result := fi.Mode(...) +func (fi *fileInfo) Mode() fs.FileMode { return fi.mode } + +// ModTime documents the ModTime operation. +// +// result := fi.ModTime(...) func (fi *fileInfo) ModTime() time.Time { return fi.modTime } -func (fi *fileInfo) IsDir() bool { return fi.isDir } -func (fi *fileInfo) Sys() any { return nil } + +// IsDir documents the IsDir operation. +// +// result := fi.IsDir(...) +func (fi *fileInfo) IsDir() bool { return fi.isDir } + +// Sys documents the Sys operation. +// +// result := fi.Sys(...) +func (fi *fileInfo) Sys() any { return nil } diff --git a/datanode/client_test.go b/datanode/client_test.go index 8beb6cd..123e8c8 100644 --- a/datanode/client_test.go +++ b/datanode/client_test.go @@ -1,11 +1,11 @@ package datanode import ( - "errors" "io" "io/fs" "testing" + core "dappco.re/go/core" coreio "dappco.re/go/core/io" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -14,7 +14,7 @@ import ( // Compile-time check: Medium implements io.Medium. var _ coreio.Medium = (*Medium)(nil) -func TestReadWrite_Good(t *testing.T) { +func TestClient_ReadWrite_Good(t *testing.T) { m := New() err := m.Write("hello.txt", "world") @@ -25,7 +25,7 @@ func TestReadWrite_Good(t *testing.T) { assert.Equal(t, "world", got) } -func TestReadWrite_Bad(t *testing.T) { +func TestClient_ReadWrite_Bad(t *testing.T) { m := New() _, err := m.Read("missing.txt") @@ -35,7 +35,7 @@ func TestReadWrite_Bad(t *testing.T) { assert.Error(t, err) } -func TestNestedPaths_Good(t *testing.T) { +func TestClient_NestedPaths_Good(t *testing.T) { m := New() require.NoError(t, m.Write("a/b/c/deep.txt", "deep")) @@ -49,7 +49,7 @@ func TestNestedPaths_Good(t *testing.T) { assert.True(t, m.IsDir("a/b/c")) } -func TestLeadingSlash_Good(t *testing.T) { +func TestClient_LeadingSlash_Good(t *testing.T) { m := New() require.NoError(t, m.Write("/leading/file.txt", "stripped")) @@ -62,7 +62,7 @@ func TestLeadingSlash_Good(t *testing.T) { assert.Equal(t, "stripped", got) } -func TestIsFile_Good(t *testing.T) { +func TestClient_IsFile_Good(t *testing.T) { m := New() require.NoError(t, m.Write("file.go", "package main")) @@ -72,7 +72,7 @@ func TestIsFile_Good(t *testing.T) { assert.False(t, m.IsFile("")) // empty path } -func TestEnsureDir_Good(t *testing.T) { +func TestClient_EnsureDir_Good(t *testing.T) { m := New() require.NoError(t, m.EnsureDir("foo/bar/baz")) @@ -83,7 +83,7 @@ func TestEnsureDir_Good(t *testing.T) { assert.True(t, m.Exists("foo/bar/baz")) } -func TestDelete_Good(t *testing.T) { +func TestClient_Delete_Good(t *testing.T) { m := New() require.NoError(t, m.Write("delete-me.txt", "bye")) @@ -93,7 +93,7 @@ func TestDelete_Good(t *testing.T) { assert.False(t, m.Exists("delete-me.txt")) } -func TestDelete_Bad(t *testing.T) { +func TestClient_Delete_Bad(t *testing.T) { m := New() // Delete non-existent @@ -104,13 +104,13 @@ func TestDelete_Bad(t *testing.T) { assert.Error(t, m.Delete("dir")) } -func TestDelete_Bad_DirectoryInspectionFailure(t *testing.T) { +func TestClient_Delete_DirectoryInspectionFailure_Bad(t *testing.T) { m := New() require.NoError(t, m.Write("dir/file.txt", "content")) original := dataNodeWalkDir dataNodeWalkDir = func(_ fs.FS, _ string, _ fs.WalkDirFunc) error { - return errors.New("walk failed") + return core.NewError("walk failed") } t.Cleanup(func() { dataNodeWalkDir = original @@ -121,7 +121,7 @@ func TestDelete_Bad_DirectoryInspectionFailure(t *testing.T) { assert.Contains(t, err.Error(), "failed to inspect directory") } -func TestDeleteAll_Good(t *testing.T) { +func TestClient_DeleteAll_Good(t *testing.T) { m := New() require.NoError(t, m.Write("tree/a.txt", "a")) @@ -135,13 +135,13 @@ func TestDeleteAll_Good(t *testing.T) { assert.True(t, m.Exists("keep.txt")) } -func TestDeleteAll_Bad_WalkFailure(t *testing.T) { +func TestClient_DeleteAll_WalkFailure_Bad(t *testing.T) { m := New() require.NoError(t, m.Write("tree/a.txt", "a")) original := dataNodeWalkDir dataNodeWalkDir = func(_ fs.FS, _ string, _ fs.WalkDirFunc) error { - return errors.New("walk failed") + return core.NewError("walk failed") } t.Cleanup(func() { dataNodeWalkDir = original @@ -152,14 +152,14 @@ func TestDeleteAll_Bad_WalkFailure(t *testing.T) { assert.Contains(t, err.Error(), "failed to inspect tree") } -func TestDelete_Bad_RemoveFailure(t *testing.T) { +func TestClient_Delete_RemoveFailure_Bad(t *testing.T) { m := New() require.NoError(t, m.Write("keep.txt", "keep")) require.NoError(t, m.Write("bad.txt", "bad")) original := dataNodeReadAll dataNodeReadAll = func(_ io.Reader) ([]byte, error) { - return nil, errors.New("read failed") + return nil, core.NewError("read failed") } t.Cleanup(func() { dataNodeReadAll = original @@ -170,7 +170,7 @@ func TestDelete_Bad_RemoveFailure(t *testing.T) { assert.Contains(t, err.Error(), "failed to delete file") } -func TestRename_Good(t *testing.T) { +func TestClient_Rename_Good(t *testing.T) { m := New() require.NoError(t, m.Write("old.txt", "content")) @@ -182,7 +182,7 @@ func TestRename_Good(t *testing.T) { assert.Equal(t, "content", got) } -func TestRenameDir_Good(t *testing.T) { +func TestClient_RenameDir_Good(t *testing.T) { m := New() require.NoError(t, m.Write("src/a.go", "package a")) @@ -201,13 +201,13 @@ func TestRenameDir_Good(t *testing.T) { assert.Equal(t, "package b", got) } -func TestRenameDir_Bad_ReadFailure(t *testing.T) { +func TestClient_RenameDir_ReadFailure_Bad(t *testing.T) { m := New() require.NoError(t, m.Write("src/a.go", "package a")) original := dataNodeReadAll dataNodeReadAll = func(_ io.Reader) ([]byte, error) { - return nil, errors.New("read failed") + return nil, core.NewError("read failed") } t.Cleanup(func() { dataNodeReadAll = original @@ -218,7 +218,7 @@ func TestRenameDir_Bad_ReadFailure(t *testing.T) { assert.Contains(t, err.Error(), "failed to read source file") } -func TestList_Good(t *testing.T) { +func TestClient_List_Good(t *testing.T) { m := New() require.NoError(t, m.Write("root.txt", "r")) @@ -247,7 +247,7 @@ func TestList_Good(t *testing.T) { assert.Contains(t, names, "sub") } -func TestStat_Good(t *testing.T) { +func TestClient_Stat_Good(t *testing.T) { m := New() require.NoError(t, m.Write("stat.txt", "hello")) @@ -263,7 +263,7 @@ func TestStat_Good(t *testing.T) { assert.True(t, info.IsDir()) } -func TestOpen_Good(t *testing.T) { +func TestClient_Open_Good(t *testing.T) { m := New() require.NoError(t, m.Write("open.txt", "opened")) @@ -277,7 +277,7 @@ func TestOpen_Good(t *testing.T) { assert.Equal(t, "opened", string(data)) } -func TestCreateAppend_Good(t *testing.T) { +func TestClient_CreateAppend_Good(t *testing.T) { m := New() // Create @@ -301,13 +301,13 @@ func TestCreateAppend_Good(t *testing.T) { assert.Equal(t, "hello world", got) } -func TestAppend_Bad_ReadFailure(t *testing.T) { +func TestClient_Append_ReadFailure_Bad(t *testing.T) { m := New() require.NoError(t, m.Write("new.txt", "hello")) original := dataNodeReadAll dataNodeReadAll = func(_ io.Reader) ([]byte, error) { - return nil, errors.New("read failed") + return nil, core.NewError("read failed") } t.Cleanup(func() { dataNodeReadAll = original @@ -318,7 +318,7 @@ func TestAppend_Bad_ReadFailure(t *testing.T) { assert.Contains(t, err.Error(), "failed to read existing content") } -func TestStreams_Good(t *testing.T) { +func TestClient_Streams_Good(t *testing.T) { m := New() // WriteStream @@ -336,7 +336,7 @@ func TestStreams_Good(t *testing.T) { rs.Close() } -func TestFileGetFileSet_Good(t *testing.T) { +func TestClient_FileGetFileSet_Good(t *testing.T) { m := New() require.NoError(t, m.FileSet("alias.txt", "via set")) @@ -346,7 +346,7 @@ func TestFileGetFileSet_Good(t *testing.T) { assert.Equal(t, "via set", got) } -func TestSnapshotRestore_Good(t *testing.T) { +func TestClient_SnapshotRestore_Good(t *testing.T) { m := New() require.NoError(t, m.Write("a.txt", "alpha")) @@ -369,7 +369,7 @@ func TestSnapshotRestore_Good(t *testing.T) { assert.Equal(t, "charlie", got) } -func TestRestore_Good(t *testing.T) { +func TestClient_Restore_Good(t *testing.T) { m := New() require.NoError(t, m.Write("original.txt", "before")) @@ -391,7 +391,7 @@ func TestRestore_Good(t *testing.T) { assert.False(t, m.Exists("extra.txt")) } -func TestDataNode_Good(t *testing.T) { +func TestClient_DataNode_Good(t *testing.T) { m := New() require.NoError(t, m.Write("test.txt", "borg")) @@ -409,7 +409,7 @@ func TestDataNode_Good(t *testing.T) { assert.Equal(t, "borg", string(data)) } -func TestOverwrite_Good(t *testing.T) { +func TestClient_Overwrite_Good(t *testing.T) { m := New() require.NoError(t, m.Write("file.txt", "v1")) @@ -420,7 +420,7 @@ func TestOverwrite_Good(t *testing.T) { assert.Equal(t, "v2", got) } -func TestExists_Good(t *testing.T) { +func TestClient_Exists_Good(t *testing.T) { m := New() assert.True(t, m.Exists("")) // root @@ -430,7 +430,7 @@ func TestExists_Good(t *testing.T) { assert.True(t, m.Exists("x")) } -func TestReadDir_Ugly(t *testing.T) { +func TestClient_ReadDir_Ugly(t *testing.T) { m := New() // Read from a file path (not a dir) should return empty or error diff --git a/go.mod b/go.mod index 9135ed8..b35de0d 100644 --- a/go.mod +++ b/go.mod @@ -3,9 +3,8 @@ module dappco.re/go/core/io go 1.26.0 require ( - dappco.re/go/core v0.6.0 + dappco.re/go/core v0.8.0-alpha.1 forge.lthn.ai/Snider/Borg v0.3.1 - forge.lthn.ai/core/go-log v0.0.4 github.com/aws/aws-sdk-go-v2 v1.41.4 github.com/aws/aws-sdk-go-v2/service/s3 v1.97.1 github.com/stretchr/testify v1.11.1 diff --git a/go.sum b/go.sum index 87d11bc..0164e68 100644 --- a/go.sum +++ b/go.sum @@ -1,9 +1,7 @@ -dappco.re/go/core v0.6.0 h1:0wmuO/UmCWXxJkxQ6XvVLnqkAuWitbd49PhxjCsplyk= -dappco.re/go/core v0.6.0/go.mod h1:f2/tBZ3+3IqDrg2F5F598llv0nmb/4gJVCFzM5geE4A= +dappco.re/go/core v0.8.0-alpha.1 h1:gj7+Scv+L63Z7wMxbJYHhaRFkHJo2u4MMPuUSv/Dhtk= +dappco.re/go/core v0.8.0-alpha.1/go.mod h1:f2/tBZ3+3IqDrg2F5F598llv0nmb/4gJVCFzM5geE4A= forge.lthn.ai/Snider/Borg v0.3.1 h1:gfC1ZTpLoZai07oOWJiVeQ8+qJYK8A795tgVGJHbVL8= forge.lthn.ai/Snider/Borg v0.3.1/go.mod h1:Z7DJD0yHXsxSyM7Mjl6/g4gH1NBsIz44Bf5AFlV76Wg= -forge.lthn.ai/core/go-log v0.0.4 h1:KTuCEPgFmuM8KJfnyQ8vPOU1Jg654W74h8IJvfQMfv0= -forge.lthn.ai/core/go-log v0.0.4/go.mod h1:r14MXKOD3LF/sI8XUJQhRk/SZHBE7jAFVuCfgkXoZPw= github.com/aws/aws-sdk-go-v2 v1.41.4 h1:10f50G7WyU02T56ox1wWXq+zTX9I1zxG46HYuG1hH/k= github.com/aws/aws-sdk-go-v2 v1.41.4/go.mod h1:mwsPRE8ceUUpiTgF7QmQIJ7lgsKUPQOUl3o72QBrE1o= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.7 h1:3kGOqnh1pPeddVa/E37XNTaWJ8W6vrbYV9lJEkCnhuY= diff --git a/io.go b/io.go index 8aef06e..3b20131 100644 --- a/io.go +++ b/io.go @@ -1,14 +1,13 @@ package io import ( + "bytes" goio "io" "io/fs" - "strings" "time" core "dappco.re/go/core" "dappco.re/go/core/io/local" - coreerr "forge.lthn.ai/core/go-log" ) // Medium defines the standard interface for a storage backend. @@ -86,12 +85,35 @@ type FileInfo struct { isDir bool } -func (fi FileInfo) Name() string { return fi.name } -func (fi FileInfo) Size() int64 { return fi.size } -func (fi FileInfo) Mode() fs.FileMode { return fi.mode } +// Name documents the Name operation. +// +// result := fi.Name(...) +func (fi FileInfo) Name() string { return fi.name } + +// Size documents the Size operation. +// +// result := fi.Size(...) +func (fi FileInfo) Size() int64 { return fi.size } + +// Mode documents the Mode operation. +// +// result := fi.Mode(...) +func (fi FileInfo) Mode() fs.FileMode { return fi.mode } + +// ModTime documents the ModTime operation. +// +// result := fi.ModTime(...) func (fi FileInfo) ModTime() time.Time { return fi.modTime } -func (fi FileInfo) IsDir() bool { return fi.isDir } -func (fi FileInfo) Sys() any { return nil } + +// IsDir documents the IsDir operation. +// +// result := fi.IsDir(...) +func (fi FileInfo) IsDir() bool { return fi.isDir } + +// Sys documents the Sys operation. +// +// result := fi.Sys(...) +func (fi FileInfo) Sys() any { return nil } // DirEntry provides a simple implementation of fs.DirEntry for mock testing. type DirEntry struct { @@ -101,9 +123,24 @@ type DirEntry struct { info fs.FileInfo } -func (de DirEntry) Name() string { return de.name } -func (de DirEntry) IsDir() bool { return de.isDir } -func (de DirEntry) Type() fs.FileMode { return de.mode.Type() } +// Name documents the Name operation. +// +// result := de.Name(...) +func (de DirEntry) Name() string { return de.name } + +// IsDir documents the IsDir operation. +// +// result := de.IsDir(...) +func (de DirEntry) IsDir() bool { return de.isDir } + +// Type documents the Type operation. +// +// result := de.Type(...) +func (de DirEntry) Type() fs.FileMode { return de.mode.Type() } + +// Info documents the Info operation. +// +// result := de.Info(...) func (de DirEntry) Info() (fs.FileInfo, error) { return de.info, nil } // Local is a pre-initialised medium for the local filesystem. @@ -115,7 +152,7 @@ func init() { var err error Local, err = local.New("/") if err != nil { - coreerr.Warn("io: failed to initialise Local medium, io.Local will be nil", "error", err) + core.Warn("io: failed to initialise Local medium, io.Local will be nil", "error", err) } } @@ -134,43 +171,57 @@ func NewSandboxed(root string) (Medium, error) { // --- Helper Functions --- // Read retrieves the content of a file from the given medium. +// +// result := io.Read(...) func Read(m Medium, path string) (string, error) { return m.Read(path) } // Write saves the given content to a file in the given medium. +// +// result := io.Write(...) func Write(m Medium, path, content string) error { return m.Write(path, content) } // ReadStream returns a reader for the file content from the given medium. +// +// result := io.ReadStream(...) func ReadStream(m Medium, path string) (goio.ReadCloser, error) { return m.ReadStream(path) } // WriteStream returns a writer for the file content in the given medium. +// +// result := io.WriteStream(...) func WriteStream(m Medium, path string) (goio.WriteCloser, error) { return m.WriteStream(path) } // EnsureDir makes sure a directory exists in the given medium. +// +// result := io.EnsureDir(...) func EnsureDir(m Medium, path string) error { return m.EnsureDir(path) } // IsFile checks if a path exists and is a regular file in the given medium. +// +// result := io.IsFile(...) func IsFile(m Medium, path string) bool { return m.IsFile(path) } // Copy copies a file from one medium to another. +// +// result := io.Copy(...) func Copy(src Medium, srcPath string, dst Medium, dstPath string) error { content, err := src.Read(srcPath) if err != nil { - return coreerr.E("io.Copy", "read failed: "+srcPath, err) + return core.E("io.Copy", core.Concat("read failed: ", srcPath), err) } if err := dst.Write(dstPath, content); err != nil { - return coreerr.E("io.Copy", "write failed: "+dstPath, err) + return core.E("io.Copy", core.Concat("write failed: ", dstPath), err) } return nil } @@ -185,6 +236,8 @@ type MockMedium struct { } // NewMockMedium creates a new MockMedium instance. +// +// result := io.NewMockMedium(...) func NewMockMedium() *MockMedium { return &MockMedium{ Files: make(map[string]string), @@ -194,48 +247,65 @@ func NewMockMedium() *MockMedium { } // Read retrieves the content of a file from the mock filesystem. +// +// result := m.Read(...) func (m *MockMedium) Read(path string) (string, error) { content, ok := m.Files[path] if !ok { - return "", coreerr.E("io.MockMedium.Read", "file not found: "+path, fs.ErrNotExist) + return "", core.E("io.MockMedium.Read", core.Concat("file not found: ", path), fs.ErrNotExist) } return content, nil } // Write saves the given content to a file in the mock filesystem. +// +// result := m.Write(...) func (m *MockMedium) Write(path, content string) error { m.Files[path] = content m.ModTimes[path] = time.Now() return nil } +// WriteMode documents the WriteMode operation. +// +// result := m.WriteMode(...) func (m *MockMedium) WriteMode(path, content string, mode fs.FileMode) error { return m.Write(path, content) } // EnsureDir records that a directory exists in the mock filesystem. +// +// result := m.EnsureDir(...) func (m *MockMedium) EnsureDir(path string) error { m.Dirs[path] = true return nil } // IsFile checks if a path exists as a file in the mock filesystem. +// +// result := m.IsFile(...) func (m *MockMedium) IsFile(path string) bool { _, ok := m.Files[path] return ok } // FileGet is a convenience function that reads a file from the mock filesystem. +// +// result := m.FileGet(...) func (m *MockMedium) FileGet(path string) (string, error) { return m.Read(path) } // FileSet is a convenience function that writes a file to the mock filesystem. +// +// result := m.FileSet(...) func (m *MockMedium) FileSet(path, content string) error { return m.Write(path, content) } // Delete removes a file or empty directory from the mock filesystem. +// +// result := m.Delete(...) func (m *MockMedium) Delete(path string) error { if _, ok := m.Files[path]; ok { delete(m.Files, path) @@ -244,26 +314,28 @@ func (m *MockMedium) Delete(path string) error { if _, ok := m.Dirs[path]; ok { // Check if directory is empty (no files or subdirs with this prefix) prefix := path - if !strings.HasSuffix(prefix, "/") { + if !core.HasSuffix(prefix, "/") { prefix += "/" } for f := range m.Files { - if strings.HasPrefix(f, prefix) { - return coreerr.E("io.MockMedium.Delete", "directory not empty: "+path, fs.ErrExist) + if core.HasPrefix(f, prefix) { + return core.E("io.MockMedium.Delete", core.Concat("directory not empty: ", path), fs.ErrExist) } } for d := range m.Dirs { - if d != path && strings.HasPrefix(d, prefix) { - return coreerr.E("io.MockMedium.Delete", "directory not empty: "+path, fs.ErrExist) + if d != path && core.HasPrefix(d, prefix) { + return core.E("io.MockMedium.Delete", core.Concat("directory not empty: ", path), fs.ErrExist) } } delete(m.Dirs, path) return nil } - return coreerr.E("io.MockMedium.Delete", "path not found: "+path, fs.ErrNotExist) + return core.E("io.MockMedium.Delete", core.Concat("path not found: ", path), fs.ErrNotExist) } // DeleteAll removes a file or directory and all contents from the mock filesystem. +// +// result := m.DeleteAll(...) func (m *MockMedium) DeleteAll(path string) error { found := false if _, ok := m.Files[path]; ok { @@ -277,29 +349,31 @@ func (m *MockMedium) DeleteAll(path string) error { // Delete all entries under this path prefix := path - if !strings.HasSuffix(prefix, "/") { + if !core.HasSuffix(prefix, "/") { prefix += "/" } for f := range m.Files { - if strings.HasPrefix(f, prefix) { + if core.HasPrefix(f, prefix) { delete(m.Files, f) found = true } } for d := range m.Dirs { - if strings.HasPrefix(d, prefix) { + if core.HasPrefix(d, prefix) { delete(m.Dirs, d) found = true } } if !found { - return coreerr.E("io.MockMedium.DeleteAll", "path not found: "+path, fs.ErrNotExist) + return core.E("io.MockMedium.DeleteAll", core.Concat("path not found: ", path), fs.ErrNotExist) } return nil } // Rename moves a file or directory in the mock filesystem. +// +// result := m.Rename(...) func (m *MockMedium) Rename(oldPath, newPath string) error { if content, ok := m.Files[oldPath]; ok { m.Files[newPath] = content @@ -316,19 +390,19 @@ func (m *MockMedium) Rename(oldPath, newPath string) error { delete(m.Dirs, oldPath) oldPrefix := oldPath - if !strings.HasSuffix(oldPrefix, "/") { + if !core.HasSuffix(oldPrefix, "/") { oldPrefix += "/" } newPrefix := newPath - if !strings.HasSuffix(newPrefix, "/") { + if !core.HasSuffix(newPrefix, "/") { newPrefix += "/" } // Collect files to move first (don't mutate during iteration) filesToMove := make(map[string]string) for f := range m.Files { - if strings.HasPrefix(f, oldPrefix) { - newF := newPrefix + strings.TrimPrefix(f, oldPrefix) + if core.HasPrefix(f, oldPrefix) { + newF := core.Concat(newPrefix, core.TrimPrefix(f, oldPrefix)) filesToMove[f] = newF } } @@ -344,8 +418,8 @@ func (m *MockMedium) Rename(oldPath, newPath string) error { // Collect directories to move first dirsToMove := make(map[string]string) for d := range m.Dirs { - if strings.HasPrefix(d, oldPrefix) { - newD := newPrefix + strings.TrimPrefix(d, oldPrefix) + if core.HasPrefix(d, oldPrefix) { + newD := core.Concat(newPrefix, core.TrimPrefix(d, oldPrefix)) dirsToMove[d] = newD } } @@ -355,14 +429,16 @@ func (m *MockMedium) Rename(oldPath, newPath string) error { } return nil } - return coreerr.E("io.MockMedium.Rename", "path not found: "+oldPath, fs.ErrNotExist) + return core.E("io.MockMedium.Rename", core.Concat("path not found: ", oldPath), fs.ErrNotExist) } // Open opens a file from the mock filesystem. +// +// result := m.Open(...) func (m *MockMedium) Open(path string) (fs.File, error) { content, ok := m.Files[path] if !ok { - return nil, coreerr.E("io.MockMedium.Open", "file not found: "+path, fs.ErrNotExist) + return nil, core.E("io.MockMedium.Open", core.Concat("file not found: ", path), fs.ErrNotExist) } return &MockFile{ name: core.PathBase(path), @@ -371,6 +447,8 @@ func (m *MockMedium) Open(path string) (fs.File, error) { } // Create creates a file in the mock filesystem. +// +// result := m.Create(...) func (m *MockMedium) Create(path string) (goio.WriteCloser, error) { return &MockWriteCloser{ medium: m, @@ -379,6 +457,8 @@ func (m *MockMedium) Create(path string) (goio.WriteCloser, error) { } // Append opens a file for appending in the mock filesystem. +// +// result := m.Append(...) func (m *MockMedium) Append(path string) (goio.WriteCloser, error) { content := m.Files[path] return &MockWriteCloser{ @@ -389,11 +469,15 @@ func (m *MockMedium) Append(path string) (goio.WriteCloser, error) { } // ReadStream returns a reader for the file content in the mock filesystem. +// +// result := m.ReadStream(...) func (m *MockMedium) ReadStream(path string) (goio.ReadCloser, error) { return m.Open(path) } // WriteStream returns a writer for the file content in the mock filesystem. +// +// result := m.WriteStream(...) func (m *MockMedium) WriteStream(path string) (goio.WriteCloser, error) { return m.Create(path) } @@ -405,6 +489,9 @@ type MockFile struct { offset int64 } +// Stat documents the Stat operation. +// +// result := f.Stat(...) func (f *MockFile) Stat() (fs.FileInfo, error) { return FileInfo{ name: f.name, @@ -412,6 +499,9 @@ func (f *MockFile) Stat() (fs.FileInfo, error) { }, nil } +// Read documents the Read operation. +// +// result := f.Read(...) func (f *MockFile) Read(b []byte) (int, error) { if f.offset >= int64(len(f.content)) { return 0, goio.EOF @@ -421,6 +511,9 @@ func (f *MockFile) Read(b []byte) (int, error) { return n, nil } +// Close documents the Close operation. +// +// result := f.Close(...) func (f *MockFile) Close() error { return nil } @@ -432,11 +525,17 @@ type MockWriteCloser struct { data []byte } +// Write documents the Write operation. +// +// result := w.Write(...) func (w *MockWriteCloser) Write(p []byte) (int, error) { w.data = append(w.data, p...) return len(p), nil } +// Close documents the Close operation. +// +// result := w.Close(...) func (w *MockWriteCloser) Close() error { w.medium.Files[w.path] = string(w.data) w.medium.ModTimes[w.path] = time.Now() @@ -444,35 +543,37 @@ func (w *MockWriteCloser) Close() error { } // List returns directory entries for the mock filesystem. +// +// result := m.List(...) func (m *MockMedium) List(path string) ([]fs.DirEntry, error) { if _, ok := m.Dirs[path]; !ok { // Check if it's the root or has children hasChildren := false prefix := path - if path != "" && !strings.HasSuffix(prefix, "/") { + if path != "" && !core.HasSuffix(prefix, "/") { prefix += "/" } for f := range m.Files { - if strings.HasPrefix(f, prefix) { + if core.HasPrefix(f, prefix) { hasChildren = true break } } if !hasChildren { for d := range m.Dirs { - if strings.HasPrefix(d, prefix) { + if core.HasPrefix(d, prefix) { hasChildren = true break } } } if !hasChildren && path != "" { - return nil, coreerr.E("io.MockMedium.List", "directory not found: "+path, fs.ErrNotExist) + return nil, core.E("io.MockMedium.List", core.Concat("directory not found: ", path), fs.ErrNotExist) } } prefix := path - if path != "" && !strings.HasSuffix(prefix, "/") { + if path != "" && !core.HasSuffix(prefix, "/") { prefix += "/" } @@ -481,13 +582,13 @@ func (m *MockMedium) List(path string) ([]fs.DirEntry, error) { // Find immediate children (files) for f, content := range m.Files { - if !strings.HasPrefix(f, prefix) { + if !core.HasPrefix(f, prefix) { continue } - rest := strings.TrimPrefix(f, prefix) - if rest == "" || strings.Contains(rest, "/") { + rest := core.TrimPrefix(f, prefix) + if rest == "" || core.Contains(rest, "/") { // Skip if it's not an immediate child - if idx := strings.Index(rest, "/"); idx != -1 { + if idx := bytes.IndexByte([]byte(rest), '/'); idx != -1 { // This is a subdirectory dirName := rest[:idx] if !seen[dirName] { @@ -523,15 +624,15 @@ func (m *MockMedium) List(path string) ([]fs.DirEntry, error) { // Find immediate subdirectories for d := range m.Dirs { - if !strings.HasPrefix(d, prefix) { + if !core.HasPrefix(d, prefix) { continue } - rest := strings.TrimPrefix(d, prefix) + rest := core.TrimPrefix(d, prefix) if rest == "" { continue } // Get only immediate child - if idx := strings.Index(rest, "/"); idx != -1 { + if idx := bytes.IndexByte([]byte(rest), '/'); idx != -1 { rest = rest[:idx] } if !seen[rest] { @@ -553,6 +654,8 @@ func (m *MockMedium) List(path string) ([]fs.DirEntry, error) { } // Stat returns file information for the mock filesystem. +// +// result := m.Stat(...) func (m *MockMedium) Stat(path string) (fs.FileInfo, error) { if content, ok := m.Files[path]; ok { modTime, ok := m.ModTimes[path] @@ -573,10 +676,12 @@ func (m *MockMedium) Stat(path string) (fs.FileInfo, error) { mode: fs.ModeDir | 0755, }, nil } - return nil, coreerr.E("io.MockMedium.Stat", "path not found: "+path, fs.ErrNotExist) + return nil, core.E("io.MockMedium.Stat", core.Concat("path not found: ", path), fs.ErrNotExist) } // Exists checks if a path exists in the mock filesystem. +// +// result := m.Exists(...) func (m *MockMedium) Exists(path string) bool { if _, ok := m.Files[path]; ok { return true @@ -588,6 +693,8 @@ func (m *MockMedium) Exists(path string) bool { } // IsDir checks if a path is a directory in the mock filesystem. +// +// result := m.IsDir(...) func (m *MockMedium) IsDir(path string) bool { _, ok := m.Dirs[path] return ok diff --git a/local/client.go b/local/client.go index da17039..5481079 100644 --- a/local/client.go +++ b/local/client.go @@ -2,15 +2,11 @@ package local import ( - "fmt" goio "io" "io/fs" - "os" - "strings" - "time" + "syscall" core "dappco.re/go/core" - coreerr "forge.lthn.ai/core/go-log" ) // Medium is a local filesystem storage backend. @@ -18,6 +14,8 @@ type Medium struct { root string } +var rawFS = (&core.Fs{}).NewUnrestricted() + // New creates a new local Medium rooted at the given directory. // Pass "/" for full filesystem access, or a specific path to sandbox. // @@ -41,21 +39,18 @@ func dirSeparator() string { if sep := core.Env("DS"); sep != "" { return sep } - return string(os.PathSeparator) + return "/" } func normalisePath(p string) string { sep := dirSeparator() if sep == "/" { - return strings.ReplaceAll(p, "\\", sep) + return core.Replace(p, "\\", sep) } - return strings.ReplaceAll(p, "/", sep) + return core.Replace(p, "/", sep) } func currentWorkingDir() string { - if cwd, err := os.Getwd(); err == nil && cwd != "" { - return cwd - } if cwd := core.Env("DIR_CWD"); cwd != "" { return cwd } @@ -75,12 +70,12 @@ func cleanSandboxPath(p string) string { } func splitPathParts(p string) []string { - trimmed := strings.TrimPrefix(p, dirSeparator()) + trimmed := core.TrimPrefix(p, dirSeparator()) if trimmed == "" { return nil } var parts []string - for _, part := range strings.Split(trimmed, dirSeparator()) { + for _, part := range core.Split(trimmed, dirSeparator()) { if part == "" { continue } @@ -102,20 +97,20 @@ func resolveSymlinksRecursive(p string, seen map[string]struct{}) (string, error current := dirSeparator() for _, part := range splitPathParts(p) { next := core.Path(current, part) - info, err := os.Lstat(next) + info, err := lstat(next) if err != nil { - if os.IsNotExist(err) { + if core.Is(err, syscall.ENOENT) { current = next continue } return "", err } - if info.Mode()&os.ModeSymlink == 0 { + if !isSymlink(info.Mode) { current = next continue } - target, err := os.Readlink(next) + target, err := readlink(next) if err != nil { return "", err } @@ -126,7 +121,7 @@ func resolveSymlinksRecursive(p string, seen map[string]struct{}) (string, error target = core.Path(target) } if _, ok := seen[target]; ok { - return "", coreerr.E("local.resolveSymlinksPath", "symlink cycle: "+target, os.ErrInvalid) + return "", core.E("local.resolveSymlinksPath", core.Concat("symlink cycle: ", target), fs.ErrInvalid) } seen[target] = struct{}{} resolved, err := resolveSymlinksRecursive(target, seen) @@ -146,7 +141,7 @@ func isWithinRoot(root, target string) bool { if root == dirSeparator() { return true } - return target == root || strings.HasPrefix(target, root+dirSeparator()) + return target == root || core.HasPrefix(target, root+dirSeparator()) } func canonicalPath(p string) string { @@ -179,8 +174,7 @@ func logSandboxEscape(root, path, attempted string) { if username == "" { username = "unknown" } - fmt.Fprintf(os.Stderr, "[%s] SECURITY sandbox escape detected root=%s path=%s attempted=%s user=%s\n", - time.Now().Format(time.RFC3339), root, path, attempted, username) + core.Security("sandbox escape detected", "root", root, "path", path, "attempted", attempted, "user", username) } // path sanitises and returns the full path. @@ -207,7 +201,7 @@ func (m *Medium) path(p string) string { } // Join cleaned relative path with root - return core.Path(m.root, strings.TrimPrefix(clean, dirSeparator())) + return core.Path(m.root, core.TrimPrefix(clean, dirSeparator())) } // validatePath ensures the path is within the sandbox, following symlinks if they exist. @@ -224,7 +218,7 @@ func (m *Medium) validatePath(p string) (string, error) { next := core.Path(current, part) realNext, err := resolveSymlinksPath(next) if err != nil { - if os.IsNotExist(err) { + if core.Is(err, syscall.ENOENT) { // Part doesn't exist, we can't follow symlinks anymore. // Since the path is already Cleaned and current is safe, // appending a component to current will not escape. @@ -238,7 +232,7 @@ func (m *Medium) validatePath(p string) (string, error) { if !isWithinRoot(m.root, realNext) { // Security event: sandbox escape attempt logSandboxEscape(m.root, p, realNext) - return "", os.ErrPermission // Path escapes sandbox + return "", fs.ErrPermission } current = realNext } @@ -247,48 +241,51 @@ func (m *Medium) validatePath(p string) (string, error) { } // Read returns file contents as string. +// +// result := m.Read(...) func (m *Medium) Read(p string) (string, error) { full, err := m.validatePath(p) if err != nil { return "", err } - data, err := os.ReadFile(full) - if err != nil { - return "", err - } - return string(data), nil + return resultValue[string]("local.Read", core.Concat("read failed: ", p), rawFS.Read(full)) } // Write saves content to file, creating parent directories as needed. // Files are created with mode 0644. For sensitive files (keys, secrets), // use WriteMode with 0600. +// +// result := m.Write(...) func (m *Medium) Write(p, content string) error { return m.WriteMode(p, content, 0644) } // WriteMode saves content to file with explicit permissions. // Use 0600 for sensitive files (encryption output, private keys, auth hashes). +// +// result := m.WriteMode(...) func (m *Medium) WriteMode(p, content string, mode fs.FileMode) error { full, err := m.validatePath(p) if err != nil { return err } - if err := os.MkdirAll(core.PathDir(full), 0755); err != nil { - return err - } - return os.WriteFile(full, []byte(content), mode) + return resultErr("local.WriteMode", core.Concat("write failed: ", p), rawFS.WriteMode(full, content, mode)) } // EnsureDir creates directory if it doesn't exist. +// +// result := m.EnsureDir(...) func (m *Medium) EnsureDir(p string) error { full, err := m.validatePath(p) if err != nil { return err } - return os.MkdirAll(full, 0755) + return resultErr("local.EnsureDir", core.Concat("ensure dir failed: ", p), rawFS.EnsureDir(full)) } // IsDir returns true if path is a directory. +// +// result := m.IsDir(...) func (m *Medium) IsDir(p string) bool { if p == "" { return false @@ -297,11 +294,12 @@ func (m *Medium) IsDir(p string) bool { if err != nil { return false } - info, err := os.Stat(full) - return err == nil && info.IsDir() + return rawFS.IsDir(full) } // IsFile returns true if path is a regular file. +// +// result := m.IsFile(...) func (m *Medium) IsFile(p string) bool { if p == "" { return false @@ -310,69 +308,73 @@ func (m *Medium) IsFile(p string) bool { if err != nil { return false } - info, err := os.Stat(full) - return err == nil && info.Mode().IsRegular() + return rawFS.IsFile(full) } // Exists returns true if path exists. +// +// result := m.Exists(...) func (m *Medium) Exists(p string) bool { full, err := m.validatePath(p) if err != nil { return false } - _, err = os.Stat(full) - return err == nil + return rawFS.Exists(full) } // List returns directory entries. +// +// result := m.List(...) func (m *Medium) List(p string) ([]fs.DirEntry, error) { full, err := m.validatePath(p) if err != nil { return nil, err } - return os.ReadDir(full) + return resultValue[[]fs.DirEntry]("local.List", core.Concat("list failed: ", p), rawFS.List(full)) } // Stat returns file info. +// +// result := m.Stat(...) func (m *Medium) Stat(p string) (fs.FileInfo, error) { full, err := m.validatePath(p) if err != nil { return nil, err } - return os.Stat(full) + return resultValue[fs.FileInfo]("local.Stat", core.Concat("stat failed: ", p), rawFS.Stat(full)) } // Open opens the named file for reading. +// +// result := m.Open(...) func (m *Medium) Open(p string) (fs.File, error) { full, err := m.validatePath(p) if err != nil { return nil, err } - return os.Open(full) + return resultValue[fs.File]("local.Open", core.Concat("open failed: ", p), rawFS.Open(full)) } // Create creates or truncates the named file. +// +// result := m.Create(...) func (m *Medium) Create(p string) (goio.WriteCloser, error) { full, err := m.validatePath(p) if err != nil { return nil, err } - if err := os.MkdirAll(core.PathDir(full), 0755); err != nil { - return nil, err - } - return os.Create(full) + return resultValue[goio.WriteCloser]("local.Create", core.Concat("create failed: ", p), rawFS.Create(full)) } // Append opens the named file for appending, creating it if it doesn't exist. +// +// result := m.Append(...) func (m *Medium) Append(p string) (goio.WriteCloser, error) { full, err := m.validatePath(p) if err != nil { return nil, err } - if err := os.MkdirAll(core.PathDir(full), 0755); err != nil { - return nil, err - } - return os.OpenFile(full, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + return resultValue[goio.WriteCloser]("local.Append", core.Concat("append failed: ", p), rawFS.Append(full)) } // ReadStream returns a reader for the file content. @@ -381,6 +383,8 @@ func (m *Medium) Append(p string) (goio.WriteCloser, error) { // API, as required by the io.Medium interface, while Open provides the more // general filesystem-level operation. Both methods are kept for semantic // clarity and backward compatibility. +// +// result := m.ReadStream(...) func (m *Medium) ReadStream(path string) (goio.ReadCloser, error) { return m.Open(path) } @@ -391,35 +395,43 @@ func (m *Medium) ReadStream(path string) (goio.ReadCloser, error) { // API, as required by the io.Medium interface, while Create provides the more // general filesystem-level operation. Both methods are kept for semantic // clarity and backward compatibility. +// +// result := m.WriteStream(...) func (m *Medium) WriteStream(path string) (goio.WriteCloser, error) { return m.Create(path) } // Delete removes a file or empty directory. +// +// result := m.Delete(...) func (m *Medium) Delete(p string) error { full, err := m.validatePath(p) if err != nil { return err } if isProtectedPath(full) { - return coreerr.E("local.Delete", "refusing to delete protected path: "+full, nil) + return core.E("local.Delete", core.Concat("refusing to delete protected path: ", full), nil) } - return os.Remove(full) + return resultErr("local.Delete", core.Concat("delete failed: ", p), rawFS.Delete(full)) } // DeleteAll removes a file or directory recursively. +// +// result := m.DeleteAll(...) func (m *Medium) DeleteAll(p string) error { full, err := m.validatePath(p) if err != nil { return err } if isProtectedPath(full) { - return coreerr.E("local.DeleteAll", "refusing to delete protected path: "+full, nil) + return core.E("local.DeleteAll", core.Concat("refusing to delete protected path: ", full), nil) } - return os.RemoveAll(full) + return resultErr("local.DeleteAll", core.Concat("delete all failed: ", p), rawFS.DeleteAll(full)) } // Rename moves a file or directory. +// +// result := m.Rename(...) func (m *Medium) Rename(oldPath, newPath string) error { oldFull, err := m.validatePath(oldPath) if err != nil { @@ -429,15 +441,68 @@ func (m *Medium) Rename(oldPath, newPath string) error { if err != nil { return err } - return os.Rename(oldFull, newFull) + return resultErr("local.Rename", core.Concat("rename failed: ", oldPath), rawFS.Rename(oldFull, newFull)) } // FileGet is an alias for Read. +// +// result := m.FileGet(...) func (m *Medium) FileGet(p string) (string, error) { return m.Read(p) } // FileSet is an alias for Write. +// +// result := m.FileSet(...) func (m *Medium) FileSet(p, content string) error { return m.Write(p, content) } + +func lstat(path string) (*syscall.Stat_t, error) { + info := &syscall.Stat_t{} + if err := syscall.Lstat(path, info); err != nil { + return nil, err + } + return info, nil +} + +func isSymlink(mode uint32) bool { + return mode&syscall.S_IFMT == syscall.S_IFLNK +} + +func readlink(path string) (string, error) { + size := 256 + for { + buf := make([]byte, size) + n, err := syscall.Readlink(path, buf) + if err != nil { + return "", err + } + if n < len(buf) { + return string(buf[:n]), nil + } + size *= 2 + } +} + +func resultErr(op, msg string, result core.Result) error { + if result.OK { + return nil + } + if err, ok := result.Value.(error); ok { + return core.E(op, msg, err) + } + return core.E(op, msg, nil) +} + +func resultValue[T any](op, msg string, result core.Result) (T, error) { + var zero T + if !result.OK { + return zero, resultErr(op, msg, result) + } + value, ok := result.Value.(T) + if !ok { + return zero, core.E(op, "unexpected result type", nil) + } + return value, nil +} diff --git a/local/client_test.go b/local/client_test.go index dfd8044..9acd09c 100644 --- a/local/client_test.go +++ b/local/client_test.go @@ -3,8 +3,7 @@ package local import ( "io" "io/fs" - "os" - "strings" + "syscall" "testing" core "dappco.re/go/core" @@ -12,7 +11,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestNew_Good_ResolvesRoot(t *testing.T) { +func TestClient_New_ResolvesRoot_Good(t *testing.T) { root := t.TempDir() m, err := New(root) assert.NoError(t, err) @@ -22,7 +21,7 @@ func TestNew_Good_ResolvesRoot(t *testing.T) { assert.Equal(t, resolved, m.root) } -func TestPath_Good_Sandboxed(t *testing.T) { +func TestClient_Path_Sandboxed_Good(t *testing.T) { m := &Medium{root: "/home/user"} // Normal paths @@ -40,7 +39,7 @@ func TestPath_Good_Sandboxed(t *testing.T) { assert.Equal(t, "/home/user/etc/passwd", m.path("/etc/passwd")) } -func TestPath_Good_RootFilesystem(t *testing.T) { +func TestClient_Path_RootFilesystem_Good(t *testing.T) { m := &Medium{root: "/"} // When root is "/", absolute paths pass through @@ -52,7 +51,7 @@ func TestPath_Good_RootFilesystem(t *testing.T) { assert.Equal(t, core.Path(cwd, "file.txt"), m.path("file.txt")) } -func TestReadWrite_Good_Basic(t *testing.T) { +func TestClient_ReadWrite_Basic_Good(t *testing.T) { root := t.TempDir() m, _ := New(root) @@ -77,7 +76,7 @@ func TestReadWrite_Good_Basic(t *testing.T) { assert.Error(t, err) } -func TestEnsureDir_Good_Basic(t *testing.T) { +func TestClient_EnsureDir_Basic_Good(t *testing.T) { root := t.TempDir() m, _ := New(root) @@ -89,7 +88,7 @@ func TestEnsureDir_Good_Basic(t *testing.T) { assert.True(t, info.IsDir()) } -func TestIsDir_Good_Basic(t *testing.T) { +func TestClient_IsDir_Basic_Good(t *testing.T) { root := t.TempDir() m, _ := New(root) @@ -102,7 +101,7 @@ func TestIsDir_Good_Basic(t *testing.T) { assert.False(t, m.IsDir("")) } -func TestIsFile_Good_Basic(t *testing.T) { +func TestClient_IsFile_Basic_Good(t *testing.T) { root := t.TempDir() m, _ := New(root) @@ -115,7 +114,7 @@ func TestIsFile_Good_Basic(t *testing.T) { assert.False(t, m.IsFile("")) } -func TestExists_Good_Basic(t *testing.T) { +func TestClient_Exists_Basic_Good(t *testing.T) { root := t.TempDir() m, _ := New(root) @@ -125,7 +124,7 @@ func TestExists_Good_Basic(t *testing.T) { assert.False(t, m.Exists("nope")) } -func TestList_Good_Basic(t *testing.T) { +func TestClient_List_Basic_Good(t *testing.T) { root := t.TempDir() m, _ := New(root) @@ -138,7 +137,7 @@ func TestList_Good_Basic(t *testing.T) { assert.Len(t, entries, 3) } -func TestStat_Good_Basic(t *testing.T) { +func TestClient_Stat_Basic_Good(t *testing.T) { root := t.TempDir() m, _ := New(root) @@ -149,7 +148,7 @@ func TestStat_Good_Basic(t *testing.T) { assert.Equal(t, int64(7), info.Size()) } -func TestDelete_Good_Basic(t *testing.T) { +func TestClient_Delete_Basic_Good(t *testing.T) { root := t.TempDir() m, _ := New(root) @@ -161,7 +160,7 @@ func TestDelete_Good_Basic(t *testing.T) { assert.False(t, m.Exists("todelete")) } -func TestDeleteAll_Good_Basic(t *testing.T) { +func TestClient_DeleteAll_Basic_Good(t *testing.T) { root := t.TempDir() m, _ := New(root) @@ -172,11 +171,11 @@ func TestDeleteAll_Good_Basic(t *testing.T) { assert.False(t, m.Exists("dir")) } -func TestDelete_Bad_ProtectedHomeViaSymlinkEnv(t *testing.T) { +func TestClient_Delete_ProtectedHomeViaSymlinkEnv_Bad(t *testing.T) { realHome := t.TempDir() linkParent := t.TempDir() homeLink := core.Path(linkParent, "home-link") - require.NoError(t, os.Symlink(realHome, homeLink)) + require.NoError(t, syscall.Symlink(realHome, homeLink)) t.Setenv("HOME", homeLink) m, err := New("/") @@ -187,7 +186,7 @@ func TestDelete_Bad_ProtectedHomeViaSymlinkEnv(t *testing.T) { assert.DirExists(t, realHome) } -func TestDeleteAll_Bad_ProtectedHomeViaEnv(t *testing.T) { +func TestClient_DeleteAll_ProtectedHomeViaEnv_Bad(t *testing.T) { tempHome := t.TempDir() t.Setenv("HOME", tempHome) @@ -199,7 +198,7 @@ func TestDeleteAll_Bad_ProtectedHomeViaEnv(t *testing.T) { assert.DirExists(t, tempHome) } -func TestRename_Good_Basic(t *testing.T) { +func TestClient_Rename_Basic_Good(t *testing.T) { root := t.TempDir() m, _ := New(root) @@ -211,7 +210,7 @@ func TestRename_Good_Basic(t *testing.T) { assert.True(t, m.Exists("new")) } -func TestFileGetFileSet_Good_Basic(t *testing.T) { +func TestClient_FileGetFileSet_Basic_Good(t *testing.T) { root := t.TempDir() m, _ := New(root) @@ -223,7 +222,7 @@ func TestFileGetFileSet_Good_Basic(t *testing.T) { assert.Equal(t, "value", val) } -func TestDelete_Good(t *testing.T) { +func TestClient_Delete_Good(t *testing.T) { testRoot := t.TempDir() medium, err := New(testRoot) @@ -246,7 +245,7 @@ func TestDelete_Good(t *testing.T) { assert.False(t, medium.IsDir("emptydir")) } -func TestDelete_Bad_NotEmpty(t *testing.T) { +func TestClient_Delete_NotEmpty_Bad(t *testing.T) { testRoot := t.TempDir() medium, err := New(testRoot) @@ -261,7 +260,7 @@ func TestDelete_Bad_NotEmpty(t *testing.T) { assert.Error(t, err) } -func TestDeleteAll_Good(t *testing.T) { +func TestClient_DeleteAll_Good(t *testing.T) { testRoot := t.TempDir() medium, err := New(testRoot) @@ -281,7 +280,7 @@ func TestDeleteAll_Good(t *testing.T) { assert.False(t, medium.Exists("mydir/subdir/file2.txt")) } -func TestRename_Good(t *testing.T) { +func TestClient_Rename_Good(t *testing.T) { testRoot := t.TempDir() medium, err := New(testRoot) @@ -300,7 +299,7 @@ func TestRename_Good(t *testing.T) { assert.Equal(t, "content", content) } -func TestRename_Good_TraversalSanitised(t *testing.T) { +func TestClient_Rename_TraversalSanitised_Good(t *testing.T) { testRoot := t.TempDir() medium, err := New(testRoot) @@ -317,7 +316,7 @@ func TestRename_Good_TraversalSanitised(t *testing.T) { assert.True(t, medium.Exists("escaped.txt")) } -func TestList_Good(t *testing.T) { +func TestClient_List_Good(t *testing.T) { testRoot := t.TempDir() medium, err := New(testRoot) @@ -345,7 +344,7 @@ func TestList_Good(t *testing.T) { assert.True(t, names["subdir"]) } -func TestStat_Good(t *testing.T) { +func TestClient_Stat_Good(t *testing.T) { testRoot := t.TempDir() medium, err := New(testRoot) @@ -369,7 +368,7 @@ func TestStat_Good(t *testing.T) { assert.True(t, info.IsDir()) } -func TestExists_Good(t *testing.T) { +func TestClient_Exists_Good(t *testing.T) { testRoot := t.TempDir() medium, err := New(testRoot) @@ -386,7 +385,7 @@ func TestExists_Good(t *testing.T) { assert.True(t, medium.Exists("mydir")) } -func TestIsDir_Good(t *testing.T) { +func TestClient_IsDir_Good(t *testing.T) { testRoot := t.TempDir() medium, err := New(testRoot) @@ -403,7 +402,7 @@ func TestIsDir_Good(t *testing.T) { assert.False(t, medium.IsDir("nonexistent")) } -func TestReadStream_Good_Basic(t *testing.T) { +func TestClient_ReadStream_Basic_Good(t *testing.T) { root := t.TempDir() m, _ := New(root) @@ -422,14 +421,14 @@ func TestReadStream_Good_Basic(t *testing.T) { assert.Equal(t, "streaming", string(data)) } -func TestWriteStream_Good_Basic(t *testing.T) { +func TestClient_WriteStream_Basic_Good(t *testing.T) { root := t.TempDir() m, _ := New(root) writer, err := m.WriteStream("output.txt") assert.NoError(t, err) - _, err = io.Copy(writer, strings.NewReader("piped data")) + _, err = io.Copy(writer, core.NewReader("piped data")) assert.NoError(t, err) err = writer.Close() assert.NoError(t, err) @@ -439,7 +438,7 @@ func TestWriteStream_Good_Basic(t *testing.T) { assert.Equal(t, "piped data", content) } -func TestPath_Ugly_TraversalAdvanced(t *testing.T) { +func TestClient_Path_TraversalAdvanced_Ugly(t *testing.T) { m := &Medium{root: "/sandbox"} // Multiple levels of traversal @@ -454,7 +453,7 @@ func TestPath_Ugly_TraversalAdvanced(t *testing.T) { assert.Equal(t, "/sandbox/file\x00.txt", m.path("file\x00.txt")) } -func TestValidatePath_Bad_SymlinkEscape(t *testing.T) { +func TestClient_ValidatePath_SymlinkEscape_Bad(t *testing.T) { root := t.TempDir() m, err := New(root) assert.NoError(t, err) @@ -474,7 +473,7 @@ func TestValidatePath_Bad_SymlinkEscape(t *testing.T) { // Test 2: Symlink escape // Create a symlink inside the sandbox pointing outside linkPath := core.Path(root, "evil_link") - err = os.Symlink(outside, linkPath) + err = syscall.Symlink(outside, linkPath) assert.NoError(t, err) // Try to access a file through the symlink @@ -487,7 +486,7 @@ func TestValidatePath_Bad_SymlinkEscape(t *testing.T) { assert.NoError(t, err) innerDir := core.Path(root, "inner") nestedLink := core.Path(innerDir, "nested_evil") - err = os.Symlink(outside, nestedLink) + err = syscall.Symlink(outside, nestedLink) assert.NoError(t, err) _, err = m.validatePath("inner/nested_evil/secret.txt") @@ -495,7 +494,7 @@ func TestValidatePath_Bad_SymlinkEscape(t *testing.T) { assert.ErrorIs(t, err, fs.ErrPermission) } -func TestEmptyPaths_Ugly(t *testing.T) { +func TestClient_EmptyPaths_Ugly(t *testing.T) { root := t.TempDir() m, err := New(root) assert.NoError(t, err) diff --git a/node/node.go b/node/node.go index 2255f9c..d59964c 100644 --- a/node/node.go +++ b/node/node.go @@ -11,7 +11,6 @@ import ( "io/fs" "path" "slices" - "strings" "time" core "dappco.re/go/core" @@ -42,13 +41,15 @@ func New() *Node { // ---------- Node-specific methods ---------- // AddData stages content in the in-memory filesystem. +// +// result := n.AddData(...) func (n *Node) AddData(name string, content []byte) { - name = strings.TrimPrefix(name, "/") + name = core.TrimPrefix(name, "/") if name == "" { return } // Directories are implicit, so we don't store them. - if strings.HasSuffix(name, "/") { + if core.HasSuffix(name, "/") { return } n.files[name] = &dataFile{ @@ -59,6 +60,8 @@ func (n *Node) AddData(name string, content []byte) { } // ToTar serialises the entire in-memory tree to a tar archive. +// +// result := n.ToTar(...) func (n *Node) ToTar() ([]byte, error) { buf := new(bytes.Buffer) tw := tar.NewWriter(buf) @@ -86,6 +89,8 @@ func (n *Node) ToTar() ([]byte, error) { } // FromTar creates a new Node from a tar archive. +// +// result := node.FromTar(...) func FromTar(data []byte) (*Node, error) { n := New() if err := n.LoadTar(data); err != nil { @@ -95,6 +100,8 @@ func FromTar(data []byte) (*Node, error) { } // LoadTar replaces the in-memory tree with the contents of a tar archive. +// +// result := n.LoadTar(...) func (n *Node) LoadTar(data []byte) error { newFiles := make(map[string]*dataFile) tr := tar.NewReader(bytes.NewReader(data)) @@ -111,10 +118,10 @@ func (n *Node) LoadTar(data []byte) error { if header.Typeflag == tar.TypeReg { content, err := goio.ReadAll(tr) if err != nil { - return err + return core.E("node.LoadTar", "read tar entry", err) } - name := strings.TrimPrefix(header.Name, "/") - if name == "" || strings.HasSuffix(name, "/") { + name := core.TrimPrefix(header.Name, "/") + if name == "" || core.HasSuffix(name, "/") { continue } newFiles[name] = &dataFile{ @@ -130,6 +137,8 @@ func (n *Node) LoadTar(data []byte) error { } // WalkNode walks the in-memory tree, calling fn for each entry. +// +// result := n.WalkNode(...) func (n *Node) WalkNode(root string, fn fs.WalkDirFunc) error { return fs.WalkDir(n, root, fn) } @@ -147,6 +156,8 @@ type WalkOptions struct { } // Walk walks the in-memory tree with optional WalkOptions. +// +// result := n.Walk(...) func (n *Node) Walk(root string, fn fs.WalkDirFunc, opts ...WalkOptions) error { var opt WalkOptions if len(opts) > 0 { @@ -175,9 +186,9 @@ func (n *Node) Walk(root string, fn fs.WalkDirFunc, opts ...WalkOptions) error { // After visiting a directory at MaxDepth, prevent descending further. if result == nil && opt.MaxDepth > 0 && d != nil && d.IsDir() && p != root { - rel := strings.TrimPrefix(p, root) - rel = strings.TrimPrefix(rel, "/") - depth := strings.Count(rel, "/") + 1 + rel := core.TrimPrefix(p, root) + rel = core.TrimPrefix(rel, "/") + depth := len(core.Split(rel, "/")) if depth >= opt.MaxDepth { return fs.SkipDir } @@ -189,11 +200,13 @@ func (n *Node) Walk(root string, fn fs.WalkDirFunc, opts ...WalkOptions) error { // ReadFile returns the content of the named file as a byte slice. // Implements fs.ReadFileFS. +// +// result := n.ReadFile(...) func (n *Node) ReadFile(name string) ([]byte, error) { - name = strings.TrimPrefix(name, "/") + name = core.TrimPrefix(name, "/") f, ok := n.files[name] if !ok { - return nil, &fs.PathError{Op: "read", Path: name, Err: fs.ErrNotExist} + return nil, core.E("node.ReadFile", core.Concat("path not found: ", name), fs.ErrNotExist) } // Return a copy to prevent callers from mutating internal state. result := make([]byte, len(f.content)) @@ -202,19 +215,21 @@ func (n *Node) ReadFile(name string) ([]byte, error) { } // CopyFile copies a file from the in-memory tree to the local filesystem. +// +// result := n.CopyFile(...) func (n *Node) CopyFile(src, dst string, perm fs.FileMode) error { - src = strings.TrimPrefix(src, "/") + src = core.TrimPrefix(src, "/") f, ok := n.files[src] if !ok { // Check if it's a directory — can't copy directories this way. info, err := n.Stat(src) if err != nil { - return &fs.PathError{Op: "copyfile", Path: src, Err: fs.ErrNotExist} + return core.E("node.CopyFile", core.Concat("source not found: ", src), fs.ErrNotExist) } if info.IsDir() { - return &fs.PathError{Op: "copyfile", Path: src, Err: fs.ErrInvalid} + return core.E("node.CopyFile", core.Concat("source is a directory: ", src), fs.ErrInvalid) } - return &fs.PathError{Op: "copyfile", Path: src, Err: fs.ErrNotExist} + return core.E("node.CopyFile", core.Concat("source not found: ", src), fs.ErrNotExist) } parent := core.PathDir(dst) if parent != "." && parent != "" && parent != dst && !coreio.Local.IsDir(parent) { @@ -230,7 +245,7 @@ func (n *Node) CopyFile(src, dst string, perm fs.FileMode) error { // dst := io.NewMockMedium() // _ = n.CopyTo(dst, "config", "backup/config") func (n *Node) CopyTo(target coreio.Medium, sourcePath, destPath string) error { - sourcePath = strings.TrimPrefix(sourcePath, "/") + sourcePath = core.TrimPrefix(sourcePath, "/") info, err := n.Stat(sourcePath) if err != nil { return err @@ -240,25 +255,25 @@ func (n *Node) CopyTo(target coreio.Medium, sourcePath, destPath string) error { // Single file copy f, ok := n.files[sourcePath] if !ok { - return fs.ErrNotExist + return core.E("node.CopyTo", core.Concat("path not found: ", sourcePath), fs.ErrNotExist) } return target.Write(destPath, string(f.content)) } // Directory: walk and copy all files underneath prefix := sourcePath - if prefix != "" && !strings.HasSuffix(prefix, "/") { + if prefix != "" && !core.HasSuffix(prefix, "/") { prefix += "/" } for p, f := range n.files { - if !strings.HasPrefix(p, prefix) && p != sourcePath { + if !core.HasPrefix(p, prefix) && p != sourcePath { continue } - rel := strings.TrimPrefix(p, prefix) + rel := core.TrimPrefix(p, prefix) dest := destPath if rel != "" { - dest = destPath + "/" + rel + dest = core.Concat(destPath, "/", rel) } if err := target.Write(dest, string(f.content)); err != nil { return err @@ -270,8 +285,10 @@ func (n *Node) CopyTo(target coreio.Medium, sourcePath, destPath string) error { // ---------- Medium interface: fs.FS methods ---------- // Open opens a file from the Node. Implements fs.FS. +// +// result := n.Open(...) func (n *Node) Open(name string) (fs.File, error) { - name = strings.TrimPrefix(name, "/") + name = core.TrimPrefix(name, "/") if file, ok := n.files[name]; ok { return &dataFileReader{file: file}, nil } @@ -281,16 +298,18 @@ func (n *Node) Open(name string) (fs.File, error) { prefix = "" } for p := range n.files { - if strings.HasPrefix(p, prefix) { + if core.HasPrefix(p, prefix) { return &dirFile{path: name, modTime: time.Now()}, nil } } - return nil, fs.ErrNotExist + return nil, core.E("node.Open", core.Concat("path not found: ", name), fs.ErrNotExist) } // Stat returns file information for the given path. +// +// result := n.Stat(...) func (n *Node) Stat(name string) (fs.FileInfo, error) { - name = strings.TrimPrefix(name, "/") + name = core.TrimPrefix(name, "/") if file, ok := n.files[name]; ok { return file.Stat() } @@ -300,16 +319,18 @@ func (n *Node) Stat(name string) (fs.FileInfo, error) { prefix = "" } for p := range n.files { - if strings.HasPrefix(p, prefix) { + if core.HasPrefix(p, prefix) { return &dirInfo{name: path.Base(name), modTime: time.Now()}, nil } } - return nil, fs.ErrNotExist + return nil, core.E("node.Stat", core.Concat("path not found: ", name), fs.ErrNotExist) } // ReadDir reads and returns all directory entries for the named directory. +// +// result := n.ReadDir(...) func (n *Node) ReadDir(name string) ([]fs.DirEntry, error) { - name = strings.TrimPrefix(name, "/") + name = core.TrimPrefix(name, "/") if name == "." { name = "" } @@ -328,19 +349,19 @@ func (n *Node) ReadDir(name string) ([]fs.DirEntry, error) { } for p := range n.files { - if !strings.HasPrefix(p, prefix) { + if !core.HasPrefix(p, prefix) { continue } - relPath := strings.TrimPrefix(p, prefix) - firstComponent := strings.Split(relPath, "/")[0] + relPath := core.TrimPrefix(p, prefix) + firstComponent := core.SplitN(relPath, "/", 2)[0] if seen[firstComponent] { continue } seen[firstComponent] = true - if strings.Contains(relPath, "/") { + if core.Contains(relPath, "/") { dir := &dirInfo{name: firstComponent, modTime: time.Now()} entries = append(entries, fs.FileInfoToDirEntry(dir)) } else { @@ -360,37 +381,49 @@ func (n *Node) ReadDir(name string) ([]fs.DirEntry, error) { // ---------- Medium interface: read/write ---------- // Read retrieves the content of a file as a string. +// +// result := n.Read(...) func (n *Node) Read(p string) (string, error) { - p = strings.TrimPrefix(p, "/") + p = core.TrimPrefix(p, "/") f, ok := n.files[p] if !ok { - return "", fs.ErrNotExist + return "", core.E("node.Read", core.Concat("path not found: ", p), fs.ErrNotExist) } return string(f.content), nil } // Write saves the given content to a file, overwriting it if it exists. +// +// result := n.Write(...) func (n *Node) Write(p, content string) error { n.AddData(p, []byte(content)) return nil } // WriteMode saves content with explicit permissions (no-op for in-memory node). +// +// result := n.WriteMode(...) func (n *Node) WriteMode(p, content string, mode fs.FileMode) error { return n.Write(p, content) } // FileGet is an alias for Read. +// +// result := n.FileGet(...) func (n *Node) FileGet(p string) (string, error) { return n.Read(p) } // FileSet is an alias for Write. +// +// result := n.FileSet(...) func (n *Node) FileSet(p, content string) error { return n.Write(p, content) } // EnsureDir is a no-op because directories are implicit in Node. +// +// result := n.EnsureDir(...) func (n *Node) EnsureDir(_ string) error { return nil } @@ -398,19 +431,25 @@ func (n *Node) EnsureDir(_ string) error { // ---------- Medium interface: existence checks ---------- // Exists checks if a path exists (file or directory). +// +// result := n.Exists(...) func (n *Node) Exists(p string) bool { _, err := n.Stat(p) return err == nil } // IsFile checks if a path exists and is a regular file. +// +// result := n.IsFile(...) func (n *Node) IsFile(p string) bool { - p = strings.TrimPrefix(p, "/") + p = core.TrimPrefix(p, "/") _, ok := n.files[p] return ok } // IsDir checks if a path exists and is a directory. +// +// result := n.IsDir(...) func (n *Node) IsDir(p string) bool { info, err := n.Stat(p) if err != nil { @@ -422,18 +461,22 @@ func (n *Node) IsDir(p string) bool { // ---------- Medium interface: mutations ---------- // Delete removes a single file. +// +// result := n.Delete(...) func (n *Node) Delete(p string) error { - p = strings.TrimPrefix(p, "/") + p = core.TrimPrefix(p, "/") if _, ok := n.files[p]; ok { delete(n.files, p) return nil } - return fs.ErrNotExist + return core.E("node.Delete", core.Concat("path not found: ", p), fs.ErrNotExist) } // DeleteAll removes a file or directory and all children. +// +// result := n.DeleteAll(...) func (n *Node) DeleteAll(p string) error { - p = strings.TrimPrefix(p, "/") + p = core.TrimPrefix(p, "/") found := false if _, ok := n.files[p]; ok { @@ -443,26 +486,28 @@ func (n *Node) DeleteAll(p string) error { prefix := p + "/" for k := range n.files { - if strings.HasPrefix(k, prefix) { + if core.HasPrefix(k, prefix) { delete(n.files, k) found = true } } if !found { - return fs.ErrNotExist + return core.E("node.DeleteAll", core.Concat("path not found: ", p), fs.ErrNotExist) } return nil } // Rename moves a file from oldPath to newPath. +// +// result := n.Rename(...) func (n *Node) Rename(oldPath, newPath string) error { - oldPath = strings.TrimPrefix(oldPath, "/") - newPath = strings.TrimPrefix(newPath, "/") + oldPath = core.TrimPrefix(oldPath, "/") + newPath = core.TrimPrefix(newPath, "/") f, ok := n.files[oldPath] if !ok { - return fs.ErrNotExist + return core.E("node.Rename", core.Concat("path not found: ", oldPath), fs.ErrNotExist) } f.name = newPath @@ -472,8 +517,10 @@ func (n *Node) Rename(oldPath, newPath string) error { } // List returns directory entries for the given path. +// +// result := n.List(...) func (n *Node) List(p string) ([]fs.DirEntry, error) { - p = strings.TrimPrefix(p, "/") + p = core.TrimPrefix(p, "/") if p == "" || p == "." { return n.ReadDir(".") } @@ -484,15 +531,19 @@ func (n *Node) List(p string) ([]fs.DirEntry, error) { // Create creates or truncates the named file, returning a WriteCloser. // Content is committed to the Node on Close. +// +// result := n.Create(...) func (n *Node) Create(p string) (goio.WriteCloser, error) { - p = strings.TrimPrefix(p, "/") + p = core.TrimPrefix(p, "/") return &nodeWriter{node: n, path: p}, nil } // Append opens the named file for appending, creating it if needed. // Content is committed to the Node on Close. +// +// result := n.Append(...) func (n *Node) Append(p string) (goio.WriteCloser, error) { - p = strings.TrimPrefix(p, "/") + p = core.TrimPrefix(p, "/") var existing []byte if f, ok := n.files[p]; ok { existing = make([]byte, len(f.content)) @@ -502,6 +553,8 @@ func (n *Node) Append(p string) (goio.WriteCloser, error) { } // ReadStream returns a ReadCloser for the file content. +// +// result := n.ReadStream(...) func (n *Node) ReadStream(p string) (goio.ReadCloser, error) { f, err := n.Open(p) if err != nil { @@ -511,6 +564,8 @@ func (n *Node) ReadStream(p string) (goio.ReadCloser, error) { } // WriteStream returns a WriteCloser for the file content. +// +// result := n.WriteStream(...) func (n *Node) WriteStream(p string) (goio.WriteCloser, error) { return n.Create(p) } @@ -524,11 +579,17 @@ type nodeWriter struct { buf []byte } +// Write documents the Write operation. +// +// result := w.Write(...) func (w *nodeWriter) Write(p []byte) (int, error) { w.buf = append(w.buf, p...) return len(p), nil } +// Close documents the Close operation. +// +// result := w.Close(...) func (w *nodeWriter) Close() error { w.node.files[w.path] = &dataFile{ name: w.path, @@ -545,19 +606,53 @@ type dataFile struct { modTime time.Time } +// Stat documents the Stat operation. +// +// result := d.Stat(...) func (d *dataFile) Stat() (fs.FileInfo, error) { return &dataFileInfo{file: d}, nil } + +// Read documents the Read operation. +// +// result := d.Read(...) func (d *dataFile) Read(_ []byte) (int, error) { return 0, goio.EOF } -func (d *dataFile) Close() error { return nil } + +// Close documents the Close operation. +// +// result := d.Close(...) +func (d *dataFile) Close() error { return nil } // dataFileInfo implements fs.FileInfo for a dataFile. type dataFileInfo struct{ file *dataFile } -func (d *dataFileInfo) Name() string { return path.Base(d.file.name) } -func (d *dataFileInfo) Size() int64 { return int64(len(d.file.content)) } -func (d *dataFileInfo) Mode() fs.FileMode { return 0444 } +// Name documents the Name operation. +// +// result := d.Name(...) +func (d *dataFileInfo) Name() string { return path.Base(d.file.name) } + +// Size documents the Size operation. +// +// result := d.Size(...) +func (d *dataFileInfo) Size() int64 { return int64(len(d.file.content)) } + +// Mode documents the Mode operation. +// +// result := d.Mode(...) +func (d *dataFileInfo) Mode() fs.FileMode { return 0444 } + +// ModTime documents the ModTime operation. +// +// result := d.ModTime(...) func (d *dataFileInfo) ModTime() time.Time { return d.file.modTime } -func (d *dataFileInfo) IsDir() bool { return false } -func (d *dataFileInfo) Sys() any { return nil } + +// IsDir documents the IsDir operation. +// +// result := d.IsDir(...) +func (d *dataFileInfo) IsDir() bool { return false } + +// Sys documents the Sys operation. +// +// result := d.Sys(...) +func (d *dataFileInfo) Sys() any { return nil } // dataFileReader implements fs.File for reading a dataFile. type dataFileReader struct { @@ -565,13 +660,24 @@ type dataFileReader struct { reader *bytes.Reader } +// Stat documents the Stat operation. +// +// result := d.Stat(...) func (d *dataFileReader) Stat() (fs.FileInfo, error) { return d.file.Stat() } + +// Read documents the Read operation. +// +// result := d.Read(...) func (d *dataFileReader) Read(p []byte) (int, error) { if d.reader == nil { d.reader = bytes.NewReader(d.file.content) } return d.reader.Read(p) } + +// Close documents the Close operation. +// +// result := d.Close(...) func (d *dataFileReader) Close() error { return nil } // dirInfo implements fs.FileInfo for an implicit directory. @@ -580,12 +686,35 @@ type dirInfo struct { modTime time.Time } -func (d *dirInfo) Name() string { return d.name } -func (d *dirInfo) Size() int64 { return 0 } -func (d *dirInfo) Mode() fs.FileMode { return fs.ModeDir | 0555 } +// Name documents the Name operation. +// +// result := d.Name(...) +func (d *dirInfo) Name() string { return d.name } + +// Size documents the Size operation. +// +// result := d.Size(...) +func (d *dirInfo) Size() int64 { return 0 } + +// Mode documents the Mode operation. +// +// result := d.Mode(...) +func (d *dirInfo) Mode() fs.FileMode { return fs.ModeDir | 0555 } + +// ModTime documents the ModTime operation. +// +// result := d.ModTime(...) func (d *dirInfo) ModTime() time.Time { return d.modTime } -func (d *dirInfo) IsDir() bool { return true } -func (d *dirInfo) Sys() any { return nil } + +// IsDir documents the IsDir operation. +// +// result := d.IsDir(...) +func (d *dirInfo) IsDir() bool { return true } + +// Sys documents the Sys operation. +// +// result := d.Sys(...) +func (d *dirInfo) Sys() any { return nil } // dirFile implements fs.File for a directory. type dirFile struct { @@ -593,12 +722,23 @@ type dirFile struct { modTime time.Time } +// Stat documents the Stat operation. +// +// result := d.Stat(...) func (d *dirFile) Stat() (fs.FileInfo, error) { return &dirInfo{name: path.Base(d.path), modTime: d.modTime}, nil } + +// Read documents the Read operation. +// +// result := d.Read(...) func (d *dirFile) Read([]byte) (int, error) { - return 0, &fs.PathError{Op: "read", Path: d.path, Err: fs.ErrInvalid} + return 0, core.E("node.dirFile.Read", core.Concat("cannot read directory: ", d.path), &fs.PathError{Op: "read", Path: d.path, Err: fs.ErrInvalid}) } + +// Close documents the Close operation. +// +// result := d.Close(...) func (d *dirFile) Close() error { return nil } // Ensure Node implements fs.FS so WalkDir works. diff --git a/node/node_test.go b/node/node_test.go index c6aa121..277c8a1 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -3,11 +3,9 @@ package node import ( "archive/tar" "bytes" - "errors" "io" "io/fs" "sort" - "strings" "testing" core "dappco.re/go/core" @@ -20,7 +18,7 @@ import ( // New // --------------------------------------------------------------------------- -func TestNew_Good(t *testing.T) { +func TestNode_New_Good(t *testing.T) { n := New() require.NotNil(t, n, "New() must not return nil") assert.NotNil(t, n.files, "New() must initialise the files map") @@ -30,7 +28,7 @@ func TestNew_Good(t *testing.T) { // AddData // --------------------------------------------------------------------------- -func TestAddData_Good(t *testing.T) { +func TestNode_AddData_Good(t *testing.T) { n := New() n.AddData("foo.txt", []byte("foo")) @@ -43,7 +41,7 @@ func TestAddData_Good(t *testing.T) { assert.Equal(t, "foo.txt", info.Name()) } -func TestAddData_Bad(t *testing.T) { +func TestNode_AddData_Bad(t *testing.T) { n := New() // Empty name is silently ignored. @@ -55,7 +53,7 @@ func TestAddData_Bad(t *testing.T) { assert.Empty(t, n.files, "directory entry must not be stored") } -func TestAddData_Ugly(t *testing.T) { +func TestNode_AddData_Ugly(t *testing.T) { t.Run("Overwrite", func(t *testing.T) { n := New() n.AddData("foo.txt", []byte("foo")) @@ -77,7 +75,7 @@ func TestAddData_Ugly(t *testing.T) { // Open // --------------------------------------------------------------------------- -func TestOpen_Good(t *testing.T) { +func TestNode_Open_Good(t *testing.T) { n := New() n.AddData("foo.txt", []byte("foo")) @@ -91,14 +89,14 @@ func TestOpen_Good(t *testing.T) { assert.Equal(t, "foo", string(buf[:nr])) } -func TestOpen_Bad(t *testing.T) { +func TestNode_Open_Bad(t *testing.T) { n := New() _, err := n.Open("nonexistent.txt") require.Error(t, err) assert.ErrorIs(t, err, fs.ErrNotExist) } -func TestOpen_Ugly(t *testing.T) { +func TestNode_Open_Ugly(t *testing.T) { n := New() n.AddData("bar/baz.txt", []byte("baz")) @@ -112,7 +110,7 @@ func TestOpen_Ugly(t *testing.T) { require.Error(t, err) var pathErr *fs.PathError - require.True(t, errors.As(err, &pathErr)) + require.True(t, core.As(err, &pathErr)) assert.Equal(t, fs.ErrInvalid, pathErr.Err) } @@ -120,7 +118,7 @@ func TestOpen_Ugly(t *testing.T) { // Stat // --------------------------------------------------------------------------- -func TestStat_Good(t *testing.T) { +func TestNode_Stat_Good(t *testing.T) { n := New() n.AddData("foo.txt", []byte("foo")) n.AddData("bar/baz.txt", []byte("baz")) @@ -139,14 +137,14 @@ func TestStat_Good(t *testing.T) { assert.Equal(t, "bar", dirInfo.Name()) } -func TestStat_Bad(t *testing.T) { +func TestNode_Stat_Bad(t *testing.T) { n := New() _, err := n.Stat("nonexistent") require.Error(t, err) assert.ErrorIs(t, err, fs.ErrNotExist) } -func TestStat_Ugly(t *testing.T) { +func TestNode_Stat_Ugly(t *testing.T) { n := New() n.AddData("foo.txt", []byte("foo")) @@ -161,7 +159,7 @@ func TestStat_Ugly(t *testing.T) { // ReadFile // --------------------------------------------------------------------------- -func TestReadFile_Good(t *testing.T) { +func TestNode_ReadFile_Good(t *testing.T) { n := New() n.AddData("hello.txt", []byte("hello world")) @@ -170,14 +168,14 @@ func TestReadFile_Good(t *testing.T) { assert.Equal(t, []byte("hello world"), data) } -func TestReadFile_Bad(t *testing.T) { +func TestNode_ReadFile_Bad(t *testing.T) { n := New() _, err := n.ReadFile("missing.txt") require.Error(t, err) assert.ErrorIs(t, err, fs.ErrNotExist) } -func TestReadFile_Ugly(t *testing.T) { +func TestNode_ReadFile_Ugly(t *testing.T) { n := New() n.AddData("data.bin", []byte("original")) @@ -195,7 +193,7 @@ func TestReadFile_Ugly(t *testing.T) { // ReadDir // --------------------------------------------------------------------------- -func TestReadDir_Good(t *testing.T) { +func TestNode_ReadDir_Good(t *testing.T) { n := New() n.AddData("foo.txt", []byte("foo")) n.AddData("bar/baz.txt", []byte("baz")) @@ -212,7 +210,7 @@ func TestReadDir_Good(t *testing.T) { assert.Equal(t, []string{"baz.txt", "qux.txt"}, sortedNames(barEntries)) } -func TestReadDir_Bad(t *testing.T) { +func TestNode_ReadDir_Bad(t *testing.T) { n := New() n.AddData("foo.txt", []byte("foo")) @@ -220,11 +218,11 @@ func TestReadDir_Bad(t *testing.T) { _, err := n.ReadDir("foo.txt") require.Error(t, err) var pathErr *fs.PathError - require.True(t, errors.As(err, &pathErr)) + require.True(t, core.As(err, &pathErr)) assert.Equal(t, fs.ErrInvalid, pathErr.Err) } -func TestReadDir_Ugly(t *testing.T) { +func TestNode_ReadDir_Ugly(t *testing.T) { n := New() n.AddData("bar/baz.txt", []byte("baz")) n.AddData("empty_dir/", nil) // Ignored by AddData. @@ -238,7 +236,7 @@ func TestReadDir_Ugly(t *testing.T) { // Exists // --------------------------------------------------------------------------- -func TestExists_Good(t *testing.T) { +func TestNode_Exists_Good(t *testing.T) { n := New() n.AddData("foo.txt", []byte("foo")) n.AddData("bar/baz.txt", []byte("baz")) @@ -247,12 +245,12 @@ func TestExists_Good(t *testing.T) { assert.True(t, n.Exists("bar")) } -func TestExists_Bad(t *testing.T) { +func TestNode_Exists_Bad(t *testing.T) { n := New() assert.False(t, n.Exists("nonexistent")) } -func TestExists_Ugly(t *testing.T) { +func TestNode_Exists_Ugly(t *testing.T) { n := New() n.AddData("dummy.txt", []byte("dummy")) @@ -264,7 +262,7 @@ func TestExists_Ugly(t *testing.T) { // Walk // --------------------------------------------------------------------------- -func TestWalk_Good(t *testing.T) { +func TestNode_Walk_Good(t *testing.T) { n := New() n.AddData("foo.txt", []byte("foo")) n.AddData("bar/baz.txt", []byte("baz")) @@ -281,7 +279,7 @@ func TestWalk_Good(t *testing.T) { assert.Equal(t, []string{".", "bar", "bar/baz.txt", "bar/qux.txt", "foo.txt"}, paths) } -func TestWalk_Bad(t *testing.T) { +func TestNode_Walk_Bad(t *testing.T) { n := New() var called bool @@ -295,13 +293,13 @@ func TestWalk_Bad(t *testing.T) { assert.ErrorIs(t, err, fs.ErrNotExist) } -func TestWalk_Ugly(t *testing.T) { +func TestNode_Walk_Ugly(t *testing.T) { n := New() n.AddData("a/b.txt", []byte("b")) n.AddData("a/c.txt", []byte("c")) // Stop walk early with a custom error. - walkErr := errors.New("stop walking") + walkErr := core.NewError("stop walking") var paths []string err := n.Walk(".", func(p string, d fs.DirEntry, err error) error { if p == "a/b.txt" { @@ -314,7 +312,7 @@ func TestWalk_Ugly(t *testing.T) { assert.Equal(t, walkErr, err, "Walk must propagate the callback error") } -func TestWalk_Good_Options(t *testing.T) { +func TestNode_Walk_Options_Good(t *testing.T) { n := New() n.AddData("root.txt", []byte("root")) n.AddData("a/a1.txt", []byte("a1")) @@ -339,7 +337,7 @@ func TestWalk_Good_Options(t *testing.T) { paths = append(paths, p) return nil }, WalkOptions{Filter: func(p string, d fs.DirEntry) bool { - return !strings.HasPrefix(p, "a") + return !core.HasPrefix(p, "a") }}) require.NoError(t, err) @@ -363,7 +361,7 @@ func TestWalk_Good_Options(t *testing.T) { // CopyFile // --------------------------------------------------------------------------- -func TestCopyFile_Good(t *testing.T) { +func TestNode_CopyFile_Good(t *testing.T) { n := New() n.AddData("foo.txt", []byte("foo")) @@ -376,7 +374,7 @@ func TestCopyFile_Good(t *testing.T) { assert.Equal(t, "foo", content) } -func TestCopyFile_Bad(t *testing.T) { +func TestNode_CopyFile_Bad(t *testing.T) { n := New() tmpfile := core.Path(t.TempDir(), "test.txt") @@ -390,7 +388,7 @@ func TestCopyFile_Bad(t *testing.T) { assert.Error(t, err) } -func TestCopyFile_Ugly(t *testing.T) { +func TestNode_CopyFile_Ugly(t *testing.T) { n := New() n.AddData("bar/baz.txt", []byte("baz")) tmpfile := core.Path(t.TempDir(), "test.txt") @@ -404,7 +402,7 @@ func TestCopyFile_Ugly(t *testing.T) { // ToTar / FromTar // --------------------------------------------------------------------------- -func TestToTar_Good(t *testing.T) { +func TestNode_ToTar_Good(t *testing.T) { n := New() n.AddData("foo.txt", []byte("foo")) n.AddData("bar/baz.txt", []byte("baz")) @@ -431,7 +429,7 @@ func TestToTar_Good(t *testing.T) { assert.Equal(t, "baz", files["bar/baz.txt"]) } -func TestFromTar_Good(t *testing.T) { +func TestNode_FromTar_Good(t *testing.T) { buf := new(bytes.Buffer) tw := tar.NewWriter(buf) @@ -458,14 +456,14 @@ func TestFromTar_Good(t *testing.T) { assert.True(t, n.Exists("bar/baz.txt"), "bar/baz.txt should exist") } -func TestFromTar_Bad(t *testing.T) { +func TestNode_FromTar_Bad(t *testing.T) { // Truncated data that cannot be a valid tar. truncated := make([]byte, 100) _, err := FromTar(truncated) assert.Error(t, err, "truncated data should produce an error") } -func TestTarRoundTrip_Good(t *testing.T) { +func TestNode_TarRoundTrip_Good(t *testing.T) { n1 := New() n1.AddData("a.txt", []byte("alpha")) n1.AddData("b/c.txt", []byte("charlie")) @@ -490,7 +488,7 @@ func TestTarRoundTrip_Good(t *testing.T) { // fs.FS interface compliance // --------------------------------------------------------------------------- -func TestFSInterface_Good(t *testing.T) { +func TestNode_FSInterface_Good(t *testing.T) { n := New() n.AddData("hello.txt", []byte("world")) diff --git a/s3/s3.go b/s3/s3.go index 455b15f..a0e4074 100644 --- a/s3/s3.go +++ b/s3/s3.go @@ -7,14 +7,13 @@ import ( goio "io" "io/fs" "path" - "strings" "time" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/s3" "github.com/aws/aws-sdk-go-v2/service/s3/types" - coreerr "forge.lthn.ai/core/go-log" + core "dappco.re/go/core" ) // s3API is the subset of the S3 client API used by this package. @@ -47,26 +46,28 @@ func deleteObjectsError(prefix string, errs []types.Error) error { msg := aws.ToString(item.Message) switch { case code != "" && msg != "": - details = append(details, key+": "+code+" "+msg) + details = append(details, core.Concat(key, ": ", code, " ", msg)) case code != "": - details = append(details, key+": "+code) + details = append(details, core.Concat(key, ": ", code)) case msg != "": - details = append(details, key+": "+msg) + details = append(details, core.Concat(key, ": ", msg)) default: details = append(details, key) } } - return coreerr.E("s3.DeleteAll", "partial delete failed under "+prefix+": "+strings.Join(details, "; "), nil) + return core.E("s3.DeleteAll", core.Concat("partial delete failed under ", prefix, ": ", core.Join("; ", details...)), nil) } // Option configures a Medium. type Option func(*Medium) // WithPrefix sets an optional key prefix for all operations. +// +// result := s3.WithPrefix(...) func WithPrefix(prefix string) Option { return func(m *Medium) { // Ensure prefix ends with "/" if non-empty - if prefix != "" && !strings.HasSuffix(prefix, "/") { + if prefix != "" && !core.HasSuffix(prefix, "/") { prefix += "/" } m.prefix = prefix @@ -74,6 +75,8 @@ func WithPrefix(prefix string) Option { } // WithClient sets the S3 client for dependency injection. +// +// result := s3.WithClient(...) func WithClient(client *s3.Client) Option { return func(m *Medium) { m.client = client @@ -95,14 +98,14 @@ func withAPI(api s3API) Option { // m, _ := s3.New("backups", s3.WithClient(awsClient), s3.WithPrefix("daily")) func New(bucket string, opts ...Option) (*Medium, error) { if bucket == "" { - return nil, coreerr.E("s3.New", "bucket name is required", nil) + return nil, core.E("s3.New", "bucket name is required", nil) } m := &Medium{bucket: bucket} for _, opt := range opts { opt(m) } if m.client == nil { - return nil, coreerr.E("s3.New", "S3 client is required (use WithClient option)", nil) + return nil, core.E("s3.New", "S3 client is required (use WithClient option)", nil) } return m, nil } @@ -115,7 +118,7 @@ func (m *Medium) key(p string) string { if clean == "/" { clean = "" } - clean = strings.TrimPrefix(clean, "/") + clean = core.TrimPrefix(clean, "/") if m.prefix == "" { return clean @@ -127,10 +130,12 @@ func (m *Medium) key(p string) string { } // Read retrieves the content of a file as a string. +// +// result := m.Read(...) func (m *Medium) Read(p string) (string, error) { key := m.key(p) if key == "" { - return "", coreerr.E("s3.Read", "path is required", fs.ErrInvalid) + return "", core.E("s3.Read", "path is required", fs.ErrInvalid) } out, err := m.client.GetObject(context.Background(), &s3.GetObjectInput{ @@ -138,48 +143,54 @@ func (m *Medium) Read(p string) (string, error) { Key: aws.String(key), }) if err != nil { - return "", coreerr.E("s3.Read", "failed to get object: "+key, err) + return "", core.E("s3.Read", core.Concat("failed to get object: ", key), err) } defer out.Body.Close() data, err := goio.ReadAll(out.Body) if err != nil { - return "", coreerr.E("s3.Read", "failed to read body: "+key, err) + return "", core.E("s3.Read", core.Concat("failed to read body: ", key), err) } return string(data), nil } // Write saves the given content to a file, overwriting it if it exists. +// +// result := m.Write(...) func (m *Medium) Write(p, content string) error { key := m.key(p) if key == "" { - return coreerr.E("s3.Write", "path is required", fs.ErrInvalid) + return core.E("s3.Write", "path is required", fs.ErrInvalid) } _, err := m.client.PutObject(context.Background(), &s3.PutObjectInput{ Bucket: aws.String(m.bucket), Key: aws.String(key), - Body: strings.NewReader(content), + Body: core.NewReader(content), }) if err != nil { - return coreerr.E("s3.Write", "failed to put object: "+key, err) + return core.E("s3.Write", core.Concat("failed to put object: ", key), err) } return nil } // EnsureDir is a no-op for S3 (S3 has no real directories). +// +// result := m.EnsureDir(...) func (m *Medium) EnsureDir(_ string) error { return nil } // IsFile checks if a path exists and is a regular file (not a "directory" prefix). +// +// result := m.IsFile(...) func (m *Medium) IsFile(p string) bool { key := m.key(p) if key == "" { return false } // A "file" in S3 is an object whose key does not end with "/" - if strings.HasSuffix(key, "/") { + if core.HasSuffix(key, "/") { return false } _, err := m.client.HeadObject(context.Background(), &s3.HeadObjectInput{ @@ -190,20 +201,26 @@ func (m *Medium) IsFile(p string) bool { } // FileGet is a convenience function that reads a file from the medium. +// +// result := m.FileGet(...) func (m *Medium) FileGet(p string) (string, error) { return m.Read(p) } // FileSet is a convenience function that writes a file to the medium. +// +// result := m.FileSet(...) func (m *Medium) FileSet(p, content string) error { return m.Write(p, content) } // Delete removes a single object. +// +// result := m.Delete(...) func (m *Medium) Delete(p string) error { key := m.key(p) if key == "" { - return coreerr.E("s3.Delete", "path is required", fs.ErrInvalid) + return core.E("s3.Delete", "path is required", fs.ErrInvalid) } _, err := m.client.DeleteObject(context.Background(), &s3.DeleteObjectInput{ @@ -211,16 +228,18 @@ func (m *Medium) Delete(p string) error { Key: aws.String(key), }) if err != nil { - return coreerr.E("s3.Delete", "failed to delete object: "+key, err) + return core.E("s3.Delete", core.Concat("failed to delete object: ", key), err) } return nil } // DeleteAll removes all objects under the given prefix. +// +// result := m.DeleteAll(...) func (m *Medium) DeleteAll(p string) error { key := m.key(p) if key == "" { - return coreerr.E("s3.DeleteAll", "path is required", fs.ErrInvalid) + return core.E("s3.DeleteAll", "path is required", fs.ErrInvalid) } // First, try deleting the exact key @@ -229,12 +248,12 @@ func (m *Medium) DeleteAll(p string) error { Key: aws.String(key), }) if err != nil { - return coreerr.E("s3.DeleteAll", "failed to delete object: "+key, err) + return core.E("s3.DeleteAll", core.Concat("failed to delete object: ", key), err) } // Then delete all objects under the prefix prefix := key - if !strings.HasSuffix(prefix, "/") { + if !core.HasSuffix(prefix, "/") { prefix += "/" } @@ -248,7 +267,7 @@ func (m *Medium) DeleteAll(p string) error { ContinuationToken: continuationToken, }) if err != nil { - return coreerr.E("s3.DeleteAll", "failed to list objects: "+prefix, err) + return core.E("s3.DeleteAll", core.Concat("failed to list objects: ", prefix), err) } if len(listOut.Contents) == 0 { @@ -265,7 +284,7 @@ func (m *Medium) DeleteAll(p string) error { Delete: &types.Delete{Objects: objects, Quiet: aws.Bool(true)}, }) if err != nil { - return coreerr.E("s3.DeleteAll", "failed to delete objects", err) + return core.E("s3.DeleteAll", "failed to delete objects", err) } if err := deleteObjectsError(prefix, deleteOut.Errors); err != nil { return err @@ -282,11 +301,13 @@ func (m *Medium) DeleteAll(p string) error { } // Rename moves an object by copying then deleting the original. +// +// result := m.Rename(...) func (m *Medium) Rename(oldPath, newPath string) error { oldKey := m.key(oldPath) newKey := m.key(newPath) if oldKey == "" || newKey == "" { - return coreerr.E("s3.Rename", "both old and new paths are required", fs.ErrInvalid) + return core.E("s3.Rename", "both old and new paths are required", fs.ErrInvalid) } copySource := m.bucket + "/" + oldKey @@ -297,7 +318,7 @@ func (m *Medium) Rename(oldPath, newPath string) error { Key: aws.String(newKey), }) if err != nil { - return coreerr.E("s3.Rename", "failed to copy object: "+oldKey+" -> "+newKey, err) + return core.E("s3.Rename", core.Concat("failed to copy object: ", oldKey, " -> ", newKey), err) } _, err = m.client.DeleteObject(context.Background(), &s3.DeleteObjectInput{ @@ -305,16 +326,18 @@ func (m *Medium) Rename(oldPath, newPath string) error { Key: aws.String(oldKey), }) if err != nil { - return coreerr.E("s3.Rename", "failed to delete source object: "+oldKey, err) + return core.E("s3.Rename", core.Concat("failed to delete source object: ", oldKey), err) } return nil } // List returns directory entries for the given path using ListObjectsV2 with delimiter. +// +// result := m.List(...) func (m *Medium) List(p string) ([]fs.DirEntry, error) { prefix := m.key(p) - if prefix != "" && !strings.HasSuffix(prefix, "/") { + if prefix != "" && !core.HasSuffix(prefix, "/") { prefix += "/" } @@ -326,7 +349,7 @@ func (m *Medium) List(p string) ([]fs.DirEntry, error) { Delimiter: aws.String("/"), }) if err != nil { - return nil, coreerr.E("s3.List", "failed to list objects: "+prefix, err) + return nil, core.E("s3.List", core.Concat("failed to list objects: ", prefix), err) } // Common prefixes are "directories" @@ -334,8 +357,8 @@ func (m *Medium) List(p string) ([]fs.DirEntry, error) { if cp.Prefix == nil { continue } - name := strings.TrimPrefix(*cp.Prefix, prefix) - name = strings.TrimSuffix(name, "/") + name := core.TrimPrefix(*cp.Prefix, prefix) + name = core.TrimSuffix(name, "/") if name == "" { continue } @@ -356,8 +379,8 @@ func (m *Medium) List(p string) ([]fs.DirEntry, error) { if obj.Key == nil { continue } - name := strings.TrimPrefix(*obj.Key, prefix) - if name == "" || strings.Contains(name, "/") { + name := core.TrimPrefix(*obj.Key, prefix) + if name == "" || core.Contains(name, "/") { continue } var size int64 @@ -385,10 +408,12 @@ func (m *Medium) List(p string) ([]fs.DirEntry, error) { } // Stat returns file information for the given path using HeadObject. +// +// result := m.Stat(...) func (m *Medium) Stat(p string) (fs.FileInfo, error) { key := m.key(p) if key == "" { - return nil, coreerr.E("s3.Stat", "path is required", fs.ErrInvalid) + return nil, core.E("s3.Stat", "path is required", fs.ErrInvalid) } out, err := m.client.HeadObject(context.Background(), &s3.HeadObjectInput{ @@ -396,7 +421,7 @@ func (m *Medium) Stat(p string) (fs.FileInfo, error) { Key: aws.String(key), }) if err != nil { - return nil, coreerr.E("s3.Stat", "failed to head object: "+key, err) + return nil, core.E("s3.Stat", core.Concat("failed to head object: ", key), err) } var size int64 @@ -418,10 +443,12 @@ func (m *Medium) Stat(p string) (fs.FileInfo, error) { } // Open opens the named file for reading. +// +// result := m.Open(...) func (m *Medium) Open(p string) (fs.File, error) { key := m.key(p) if key == "" { - return nil, coreerr.E("s3.Open", "path is required", fs.ErrInvalid) + return nil, core.E("s3.Open", "path is required", fs.ErrInvalid) } out, err := m.client.GetObject(context.Background(), &s3.GetObjectInput{ @@ -429,13 +456,13 @@ func (m *Medium) Open(p string) (fs.File, error) { Key: aws.String(key), }) if err != nil { - return nil, coreerr.E("s3.Open", "failed to get object: "+key, err) + return nil, core.E("s3.Open", core.Concat("failed to get object: ", key), err) } data, err := goio.ReadAll(out.Body) out.Body.Close() if err != nil { - return nil, coreerr.E("s3.Open", "failed to read body: "+key, err) + return nil, core.E("s3.Open", core.Concat("failed to read body: ", key), err) } var size int64 @@ -457,10 +484,12 @@ func (m *Medium) Open(p string) (fs.File, error) { // Create creates or truncates the named file. Returns a writer that // uploads the content on Close. +// +// result := m.Create(...) func (m *Medium) Create(p string) (goio.WriteCloser, error) { key := m.key(p) if key == "" { - return nil, coreerr.E("s3.Create", "path is required", fs.ErrInvalid) + return nil, core.E("s3.Create", "path is required", fs.ErrInvalid) } return &s3WriteCloser{ medium: m, @@ -470,10 +499,12 @@ func (m *Medium) Create(p string) (goio.WriteCloser, error) { // Append opens the named file for appending. It downloads the existing // content (if any) and re-uploads the combined content on Close. +// +// result := m.Append(...) func (m *Medium) Append(p string) (goio.WriteCloser, error) { key := m.key(p) if key == "" { - return nil, coreerr.E("s3.Append", "path is required", fs.ErrInvalid) + return nil, core.E("s3.Append", "path is required", fs.ErrInvalid) } var existing []byte @@ -494,10 +525,12 @@ func (m *Medium) Append(p string) (goio.WriteCloser, error) { } // ReadStream returns a reader for the file content. +// +// result := m.ReadStream(...) func (m *Medium) ReadStream(p string) (goio.ReadCloser, error) { key := m.key(p) if key == "" { - return nil, coreerr.E("s3.ReadStream", "path is required", fs.ErrInvalid) + return nil, core.E("s3.ReadStream", "path is required", fs.ErrInvalid) } out, err := m.client.GetObject(context.Background(), &s3.GetObjectInput{ @@ -505,17 +538,21 @@ func (m *Medium) ReadStream(p string) (goio.ReadCloser, error) { Key: aws.String(key), }) if err != nil { - return nil, coreerr.E("s3.ReadStream", "failed to get object: "+key, err) + return nil, core.E("s3.ReadStream", core.Concat("failed to get object: ", key), err) } return out.Body, nil } // WriteStream returns a writer for the file content. Content is uploaded on Close. +// +// result := m.WriteStream(...) func (m *Medium) WriteStream(p string) (goio.WriteCloser, error) { return m.Create(p) } // Exists checks if a path exists (file or directory prefix). +// +// result := m.Exists(...) func (m *Medium) Exists(p string) bool { key := m.key(p) if key == "" { @@ -533,7 +570,7 @@ func (m *Medium) Exists(p string) bool { // Check as a "directory" prefix prefix := key - if !strings.HasSuffix(prefix, "/") { + if !core.HasSuffix(prefix, "/") { prefix += "/" } listOut, err := m.client.ListObjectsV2(context.Background(), &s3.ListObjectsV2Input{ @@ -548,6 +585,8 @@ func (m *Medium) Exists(p string) bool { } // IsDir checks if a path exists and is a directory (has objects under it as a prefix). +// +// result := m.IsDir(...) func (m *Medium) IsDir(p string) bool { key := m.key(p) if key == "" { @@ -555,7 +594,7 @@ func (m *Medium) IsDir(p string) bool { } prefix := key - if !strings.HasSuffix(prefix, "/") { + if !core.HasSuffix(prefix, "/") { prefix += "/" } @@ -581,12 +620,35 @@ type fileInfo struct { isDir bool } -func (fi *fileInfo) Name() string { return fi.name } -func (fi *fileInfo) Size() int64 { return fi.size } -func (fi *fileInfo) Mode() fs.FileMode { return fi.mode } +// Name documents the Name operation. +// +// result := fi.Name(...) +func (fi *fileInfo) Name() string { return fi.name } + +// Size documents the Size operation. +// +// result := fi.Size(...) +func (fi *fileInfo) Size() int64 { return fi.size } + +// Mode documents the Mode operation. +// +// result := fi.Mode(...) +func (fi *fileInfo) Mode() fs.FileMode { return fi.mode } + +// ModTime documents the ModTime operation. +// +// result := fi.ModTime(...) func (fi *fileInfo) ModTime() time.Time { return fi.modTime } -func (fi *fileInfo) IsDir() bool { return fi.isDir } -func (fi *fileInfo) Sys() any { return nil } + +// IsDir documents the IsDir operation. +// +// result := fi.IsDir(...) +func (fi *fileInfo) IsDir() bool { return fi.isDir } + +// Sys documents the Sys operation. +// +// result := fi.Sys(...) +func (fi *fileInfo) Sys() any { return nil } // dirEntry implements fs.DirEntry for S3 listings. type dirEntry struct { @@ -596,9 +658,24 @@ type dirEntry struct { info fs.FileInfo } -func (de *dirEntry) Name() string { return de.name } -func (de *dirEntry) IsDir() bool { return de.isDir } -func (de *dirEntry) Type() fs.FileMode { return de.mode.Type() } +// Name documents the Name operation. +// +// result := de.Name(...) +func (de *dirEntry) Name() string { return de.name } + +// IsDir documents the IsDir operation. +// +// result := de.IsDir(...) +func (de *dirEntry) IsDir() bool { return de.isDir } + +// Type documents the Type operation. +// +// result := de.Type(...) +func (de *dirEntry) Type() fs.FileMode { return de.mode.Type() } + +// Info documents the Info operation. +// +// result := de.Info(...) func (de *dirEntry) Info() (fs.FileInfo, error) { return de.info, nil } // s3File implements fs.File for S3 objects. @@ -610,6 +687,9 @@ type s3File struct { modTime time.Time } +// Stat documents the Stat operation. +// +// result := f.Stat(...) func (f *s3File) Stat() (fs.FileInfo, error) { return &fileInfo{ name: f.name, @@ -619,6 +699,9 @@ func (f *s3File) Stat() (fs.FileInfo, error) { }, nil } +// Read documents the Read operation. +// +// result := f.Read(...) func (f *s3File) Read(b []byte) (int, error) { if f.offset >= int64(len(f.content)) { return 0, goio.EOF @@ -628,6 +711,9 @@ func (f *s3File) Read(b []byte) (int, error) { return n, nil } +// Close documents the Close operation. +// +// result := f.Close(...) func (f *s3File) Close() error { return nil } @@ -639,11 +725,17 @@ type s3WriteCloser struct { data []byte } +// Write documents the Write operation. +// +// result := w.Write(...) func (w *s3WriteCloser) Write(p []byte) (int, error) { w.data = append(w.data, p...) return len(p), nil } +// Close documents the Close operation. +// +// result := w.Close(...) func (w *s3WriteCloser) Close() error { _, err := w.medium.client.PutObject(context.Background(), &s3.PutObjectInput{ Bucket: aws.String(w.medium.bucket), @@ -651,7 +743,7 @@ func (w *s3WriteCloser) Close() error { Body: bytes.NewReader(w.data), }) if err != nil { - return coreerr.E("s3.writeCloser.Close", "failed to upload on close", err) + return core.E("s3.writeCloser.Close", "failed to upload on close", err) } return nil } diff --git a/s3/s3_test.go b/s3/s3_test.go index a81efff..b46e47b 100644 --- a/s3/s3_test.go +++ b/s3/s3_test.go @@ -3,16 +3,14 @@ package s3 import ( "bytes" "context" - "errors" - "fmt" goio "io" "io/fs" "sort" - "strings" "sync" "testing" "time" + core "dappco.re/go/core" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/s3" "github.com/aws/aws-sdk-go-v2/service/s3/types" @@ -45,7 +43,7 @@ func (m *mockS3) GetObject(_ context.Context, params *s3.GetObjectInput, _ ...fu key := aws.ToString(params.Key) data, ok := m.objects[key] if !ok { - return nil, fmt.Errorf("NoSuchKey: key %q not found", key) + return nil, core.E("s3test.mockS3.GetObject", core.Sprintf("NoSuchKey: key %q not found", key), fs.ErrNotExist) } mtime := m.mtimes[key] return &s3.GetObjectOutput{ @@ -106,7 +104,7 @@ func (m *mockS3) HeadObject(_ context.Context, params *s3.HeadObjectInput, _ ... key := aws.ToString(params.Key) data, ok := m.objects[key] if !ok { - return nil, fmt.Errorf("NotFound: key %q not found", key) + return nil, core.E("s3test.mockS3.HeadObject", core.Sprintf("NotFound: key %q not found", key), fs.ErrNotExist) } mtime := m.mtimes[key] return &s3.HeadObjectOutput{ @@ -129,7 +127,7 @@ func (m *mockS3) ListObjectsV2(_ context.Context, params *s3.ListObjectsV2Input, // Collect all matching keys sorted var allKeys []string for k := range m.objects { - if strings.HasPrefix(k, prefix) { + if core.HasPrefix(k, prefix) { allKeys = append(allKeys, k) } } @@ -139,12 +137,13 @@ func (m *mockS3) ListObjectsV2(_ context.Context, params *s3.ListObjectsV2Input, commonPrefixes := make(map[string]bool) for _, k := range allKeys { - rest := strings.TrimPrefix(k, prefix) + rest := core.TrimPrefix(k, prefix) if delimiter != "" { - if idx := strings.Index(rest, delimiter); idx >= 0 { + parts := core.SplitN(rest, delimiter, 2) + if len(parts) == 2 { // This key has a delimiter after the prefix -> common prefix - cp := prefix + rest[:idx+len(delimiter)] + cp := core.Concat(prefix, parts[0], delimiter) commonPrefixes[cp] = true continue } @@ -187,15 +186,15 @@ func (m *mockS3) CopyObject(_ context.Context, params *s3.CopyObjectInput, _ ... // CopySource is "bucket/key" source := aws.ToString(params.CopySource) - parts := strings.SplitN(source, "/", 2) + parts := core.SplitN(source, "/", 2) if len(parts) != 2 { - return nil, fmt.Errorf("invalid CopySource: %s", source) + return nil, core.E("s3test.mockS3.CopyObject", core.Sprintf("invalid CopySource: %s", source), fs.ErrInvalid) } srcKey := parts[1] data, ok := m.objects[srcKey] if !ok { - return nil, fmt.Errorf("NoSuchKey: source key %q not found", srcKey) + return nil, core.E("s3test.mockS3.CopyObject", core.Sprintf("NoSuchKey: source key %q not found", srcKey), fs.ErrNotExist) } destKey := aws.ToString(params.Key) @@ -217,7 +216,7 @@ func newTestMedium(t *testing.T) (*Medium, *mockS3) { // --- Tests --- -func TestNew_Good(t *testing.T) { +func TestS3_New_Good(t *testing.T) { mock := newMockS3() m, err := New("my-bucket", withAPI(mock)) require.NoError(t, err) @@ -225,19 +224,19 @@ func TestNew_Good(t *testing.T) { assert.Equal(t, "", m.prefix) } -func TestNew_Bad_NoBucket(t *testing.T) { +func TestS3_New_NoBucket_Bad(t *testing.T) { _, err := New("") assert.Error(t, err) assert.Contains(t, err.Error(), "bucket name is required") } -func TestNew_Bad_NoClient(t *testing.T) { +func TestS3_New_NoClient_Bad(t *testing.T) { _, err := New("bucket") assert.Error(t, err) assert.Contains(t, err.Error(), "S3 client is required") } -func TestWithPrefix_Good(t *testing.T) { +func TestS3_WithPrefix_Good(t *testing.T) { mock := newMockS3() m, err := New("bucket", withAPI(mock), WithPrefix("data/")) require.NoError(t, err) @@ -249,7 +248,7 @@ func TestWithPrefix_Good(t *testing.T) { assert.Equal(t, "data/", m2.prefix) } -func TestReadWrite_Good(t *testing.T) { +func TestS3_ReadWrite_Good(t *testing.T) { m, _ := newTestMedium(t) err := m.Write("hello.txt", "world") @@ -260,14 +259,14 @@ func TestReadWrite_Good(t *testing.T) { assert.Equal(t, "world", content) } -func TestReadWrite_Bad_NotFound(t *testing.T) { +func TestS3_ReadWrite_NotFound_Bad(t *testing.T) { m, _ := newTestMedium(t) _, err := m.Read("nonexistent.txt") assert.Error(t, err) } -func TestReadWrite_Bad_EmptyPath(t *testing.T) { +func TestS3_ReadWrite_EmptyPath_Bad(t *testing.T) { m, _ := newTestMedium(t) _, err := m.Read("") @@ -277,7 +276,7 @@ func TestReadWrite_Bad_EmptyPath(t *testing.T) { assert.Error(t, err) } -func TestReadWrite_Good_WithPrefix(t *testing.T) { +func TestS3_ReadWrite_WithPrefix_Good(t *testing.T) { mock := newMockS3() m, err := New("bucket", withAPI(mock), WithPrefix("pfx")) require.NoError(t, err) @@ -294,14 +293,14 @@ func TestReadWrite_Good_WithPrefix(t *testing.T) { assert.Equal(t, "data", content) } -func TestEnsureDir_Good(t *testing.T) { +func TestS3_EnsureDir_Good(t *testing.T) { m, _ := newTestMedium(t) // EnsureDir is a no-op for S3 err := m.EnsureDir("any/path") assert.NoError(t, err) } -func TestIsFile_Good(t *testing.T) { +func TestS3_IsFile_Good(t *testing.T) { m, _ := newTestMedium(t) err := m.Write("file.txt", "content") @@ -312,7 +311,7 @@ func TestIsFile_Good(t *testing.T) { assert.False(t, m.IsFile("")) } -func TestFileGetFileSet_Good(t *testing.T) { +func TestS3_FileGetFileSet_Good(t *testing.T) { m, _ := newTestMedium(t) err := m.FileSet("key.txt", "value") @@ -323,7 +322,7 @@ func TestFileGetFileSet_Good(t *testing.T) { assert.Equal(t, "value", val) } -func TestDelete_Good(t *testing.T) { +func TestS3_Delete_Good(t *testing.T) { m, _ := newTestMedium(t) err := m.Write("to-delete.txt", "content") @@ -335,13 +334,13 @@ func TestDelete_Good(t *testing.T) { assert.False(t, m.IsFile("to-delete.txt")) } -func TestDelete_Bad_EmptyPath(t *testing.T) { +func TestS3_Delete_EmptyPath_Bad(t *testing.T) { m, _ := newTestMedium(t) err := m.Delete("") assert.Error(t, err) } -func TestDeleteAll_Good(t *testing.T) { +func TestS3_DeleteAll_Good(t *testing.T) { m, _ := newTestMedium(t) // Create nested structure @@ -357,22 +356,22 @@ func TestDeleteAll_Good(t *testing.T) { assert.True(t, m.IsFile("other.txt")) } -func TestDeleteAll_Bad_EmptyPath(t *testing.T) { +func TestS3_DeleteAll_EmptyPath_Bad(t *testing.T) { m, _ := newTestMedium(t) err := m.DeleteAll("") assert.Error(t, err) } -func TestDeleteAll_Bad_DeleteObjectError(t *testing.T) { +func TestS3_DeleteAll_DeleteObjectError_Bad(t *testing.T) { m, mock := newTestMedium(t) - mock.deleteObjectErrors["dir"] = errors.New("boom") + mock.deleteObjectErrors["dir"] = core.NewError("boom") err := m.DeleteAll("dir") require.Error(t, err) assert.Contains(t, err.Error(), "failed to delete object: dir") } -func TestDeleteAll_Bad_PartialDelete(t *testing.T) { +func TestS3_DeleteAll_PartialDelete_Bad(t *testing.T) { m, mock := newTestMedium(t) require.NoError(t, m.Write("dir/file1.txt", "a")) @@ -391,7 +390,7 @@ func TestDeleteAll_Bad_PartialDelete(t *testing.T) { assert.False(t, m.IsFile("dir/file1.txt")) } -func TestRename_Good(t *testing.T) { +func TestS3_Rename_Good(t *testing.T) { m, _ := newTestMedium(t) require.NoError(t, m.Write("old.txt", "content")) @@ -408,7 +407,7 @@ func TestRename_Good(t *testing.T) { assert.Equal(t, "content", content) } -func TestRename_Bad_EmptyPath(t *testing.T) { +func TestS3_Rename_EmptyPath_Bad(t *testing.T) { m, _ := newTestMedium(t) err := m.Rename("", "new.txt") assert.Error(t, err) @@ -417,13 +416,13 @@ func TestRename_Bad_EmptyPath(t *testing.T) { assert.Error(t, err) } -func TestRename_Bad_SourceNotFound(t *testing.T) { +func TestS3_Rename_SourceNotFound_Bad(t *testing.T) { m, _ := newTestMedium(t) err := m.Rename("nonexistent.txt", "new.txt") assert.Error(t, err) } -func TestList_Good(t *testing.T) { +func TestS3_List_Good(t *testing.T) { m, _ := newTestMedium(t) require.NoError(t, m.Write("dir/file1.txt", "a")) @@ -454,7 +453,7 @@ func TestList_Good(t *testing.T) { } } -func TestList_Good_Root(t *testing.T) { +func TestS3_List_Root_Good(t *testing.T) { m, _ := newTestMedium(t) require.NoError(t, m.Write("root.txt", "content")) @@ -472,7 +471,7 @@ func TestList_Good_Root(t *testing.T) { assert.True(t, names["dir"]) } -func TestStat_Good(t *testing.T) { +func TestS3_Stat_Good(t *testing.T) { m, _ := newTestMedium(t) require.NoError(t, m.Write("file.txt", "hello world")) @@ -484,20 +483,20 @@ func TestStat_Good(t *testing.T) { assert.False(t, info.IsDir()) } -func TestStat_Bad_NotFound(t *testing.T) { +func TestS3_Stat_NotFound_Bad(t *testing.T) { m, _ := newTestMedium(t) _, err := m.Stat("nonexistent.txt") assert.Error(t, err) } -func TestStat_Bad_EmptyPath(t *testing.T) { +func TestS3_Stat_EmptyPath_Bad(t *testing.T) { m, _ := newTestMedium(t) _, err := m.Stat("") assert.Error(t, err) } -func TestOpen_Good(t *testing.T) { +func TestS3_Open_Good(t *testing.T) { m, _ := newTestMedium(t) require.NoError(t, m.Write("file.txt", "open me")) @@ -515,14 +514,14 @@ func TestOpen_Good(t *testing.T) { assert.Equal(t, "file.txt", stat.Name()) } -func TestOpen_Bad_NotFound(t *testing.T) { +func TestS3_Open_NotFound_Bad(t *testing.T) { m, _ := newTestMedium(t) _, err := m.Open("nonexistent.txt") assert.Error(t, err) } -func TestCreate_Good(t *testing.T) { +func TestS3_Create_Good(t *testing.T) { m, _ := newTestMedium(t) w, err := m.Create("new.txt") @@ -540,7 +539,7 @@ func TestCreate_Good(t *testing.T) { assert.Equal(t, "created", content) } -func TestAppend_Good(t *testing.T) { +func TestS3_Append_Good(t *testing.T) { m, _ := newTestMedium(t) require.NoError(t, m.Write("append.txt", "hello")) @@ -558,7 +557,7 @@ func TestAppend_Good(t *testing.T) { assert.Equal(t, "hello world", content) } -func TestAppend_Good_NewFile(t *testing.T) { +func TestS3_Append_NewFile_Good(t *testing.T) { m, _ := newTestMedium(t) w, err := m.Append("new.txt") @@ -574,7 +573,7 @@ func TestAppend_Good_NewFile(t *testing.T) { assert.Equal(t, "fresh", content) } -func TestReadStream_Good(t *testing.T) { +func TestS3_ReadStream_Good(t *testing.T) { m, _ := newTestMedium(t) require.NoError(t, m.Write("stream.txt", "streaming content")) @@ -588,19 +587,19 @@ func TestReadStream_Good(t *testing.T) { assert.Equal(t, "streaming content", string(data)) } -func TestReadStream_Bad_NotFound(t *testing.T) { +func TestS3_ReadStream_NotFound_Bad(t *testing.T) { m, _ := newTestMedium(t) _, err := m.ReadStream("nonexistent.txt") assert.Error(t, err) } -func TestWriteStream_Good(t *testing.T) { +func TestS3_WriteStream_Good(t *testing.T) { m, _ := newTestMedium(t) writer, err := m.WriteStream("output.txt") require.NoError(t, err) - _, err = goio.Copy(writer, strings.NewReader("piped data")) + _, err = goio.Copy(writer, core.NewReader("piped data")) require.NoError(t, err) err = writer.Close() require.NoError(t, err) @@ -610,7 +609,7 @@ func TestWriteStream_Good(t *testing.T) { assert.Equal(t, "piped data", content) } -func TestExists_Good(t *testing.T) { +func TestS3_Exists_Good(t *testing.T) { m, _ := newTestMedium(t) assert.False(t, m.Exists("nonexistent.txt")) @@ -619,7 +618,7 @@ func TestExists_Good(t *testing.T) { assert.True(t, m.Exists("file.txt")) } -func TestExists_Good_DirectoryPrefix(t *testing.T) { +func TestS3_Exists_DirectoryPrefix_Good(t *testing.T) { m, _ := newTestMedium(t) require.NoError(t, m.Write("dir/file.txt", "content")) @@ -627,7 +626,7 @@ func TestExists_Good_DirectoryPrefix(t *testing.T) { assert.True(t, m.Exists("dir")) } -func TestIsDir_Good(t *testing.T) { +func TestS3_IsDir_Good(t *testing.T) { m, _ := newTestMedium(t) require.NoError(t, m.Write("dir/file.txt", "content")) @@ -638,7 +637,7 @@ func TestIsDir_Good(t *testing.T) { assert.False(t, m.IsDir("")) } -func TestKey_Good(t *testing.T) { +func TestS3_Key_Good(t *testing.T) { mock := newMockS3() // No prefix @@ -657,7 +656,7 @@ func TestKey_Good(t *testing.T) { } // Ugly: verify the Medium interface is satisfied at compile time. -func TestInterfaceCompliance_Ugly(t *testing.T) { +func TestS3_InterfaceCompliance_Ugly(t *testing.T) { mock := newMockS3() m, err := New("bucket", withAPI(mock)) require.NoError(t, err) diff --git a/sigil/crypto_sigil.go b/sigil/crypto_sigil.go index 2979b24..b04e0b2 100644 --- a/sigil/crypto_sigil.go +++ b/sigil/crypto_sigil.go @@ -16,21 +16,21 @@ import ( "crypto/rand" "crypto/sha256" "encoding/binary" - "errors" "io" + core "dappco.re/go/core" "golang.org/x/crypto/chacha20poly1305" ) var ( // ErrInvalidKey is returned when the encryption key is invalid. - ErrInvalidKey = errors.New("sigil: invalid key size, must be 32 bytes") + ErrInvalidKey = core.E("sigil.ErrInvalidKey", "invalid key size, must be 32 bytes", nil) // ErrCiphertextTooShort is returned when the ciphertext is too short to decrypt. - ErrCiphertextTooShort = errors.New("sigil: ciphertext too short") + ErrCiphertextTooShort = core.E("sigil.ErrCiphertextTooShort", "ciphertext too short", nil) // ErrDecryptionFailed is returned when decryption or authentication fails. - ErrDecryptionFailed = errors.New("sigil: decryption failed") + ErrDecryptionFailed = core.E("sigil.ErrDecryptionFailed", "decryption failed", nil) // ErrNoKeyConfigured is returned when no encryption key has been set. - ErrNoKeyConfigured = errors.New("sigil: no encryption key configured") + ErrNoKeyConfigured = core.E("sigil.ErrNoKeyConfigured", "no encryption key configured", nil) ) // PreObfuscator applies a reversible transformation to data before encryption. @@ -62,6 +62,8 @@ type PreObfuscator interface { type XORObfuscator struct{} // Obfuscate XORs the data with a key stream derived from the entropy. +// +// result := x.Obfuscate(...) func (x *XORObfuscator) Obfuscate(data []byte, entropy []byte) []byte { if len(data) == 0 { return data @@ -70,6 +72,8 @@ func (x *XORObfuscator) Obfuscate(data []byte, entropy []byte) []byte { } // Deobfuscate reverses the XOR transformation (XOR is symmetric). +// +// result := x.Deobfuscate(...) func (x *XORObfuscator) Deobfuscate(data []byte, entropy []byte) []byte { if len(data) == 0 { return data @@ -124,6 +128,8 @@ func (x *XORObfuscator) deriveKeyStream(entropy []byte, length int) []byte { type ShuffleMaskObfuscator struct{} // Obfuscate shuffles bytes and applies a mask derived from entropy. +// +// result := s.Obfuscate(...) func (s *ShuffleMaskObfuscator) Obfuscate(data []byte, entropy []byte) []byte { if len(data) == 0 { return data @@ -151,6 +157,8 @@ func (s *ShuffleMaskObfuscator) Obfuscate(data []byte, entropy []byte) []byte { } // Deobfuscate reverses the shuffle and mask operations. +// +// result := s.Deobfuscate(...) func (s *ShuffleMaskObfuscator) Deobfuscate(data []byte, entropy []byte) []byte { if len(data) == 0 { return data @@ -283,6 +291,8 @@ func NewChaChaPolySigilWithObfuscator(key []byte, obfuscator PreObfuscator) (*Ch // In encrypts the data with pre-obfuscation. // The flow is: plaintext -> obfuscate -> encrypt +// +// result := s.In(...) func (s *ChaChaPolySigil) In(data []byte) ([]byte, error) { if s.Key == nil { return nil, ErrNoKeyConfigured @@ -293,7 +303,7 @@ func (s *ChaChaPolySigil) In(data []byte) ([]byte, error) { aead, err := chacha20poly1305.NewX(s.Key) if err != nil { - return nil, err + return nil, core.E("sigil.ChaChaPolySigil.In", "create cipher", err) } // Generate nonce @@ -303,7 +313,7 @@ func (s *ChaChaPolySigil) In(data []byte) ([]byte, error) { reader = rand.Reader } if _, err := io.ReadFull(reader, nonce); err != nil { - return nil, err + return nil, core.E("sigil.ChaChaPolySigil.In", "read nonce", err) } // Pre-obfuscate the plaintext using nonce as entropy @@ -322,6 +332,8 @@ func (s *ChaChaPolySigil) In(data []byte) ([]byte, error) { // Out decrypts the data and reverses obfuscation. // The flow is: decrypt -> deobfuscate -> plaintext +// +// result := s.Out(...) func (s *ChaChaPolySigil) Out(data []byte) ([]byte, error) { if s.Key == nil { return nil, ErrNoKeyConfigured @@ -332,7 +344,7 @@ func (s *ChaChaPolySigil) Out(data []byte) ([]byte, error) { aead, err := chacha20poly1305.NewX(s.Key) if err != nil { - return nil, err + return nil, core.E("sigil.ChaChaPolySigil.Out", "create cipher", err) } minLen := aead.NonceSize() + aead.Overhead() @@ -347,7 +359,7 @@ func (s *ChaChaPolySigil) Out(data []byte) ([]byte, error) { // Decrypt obfuscated, err := aead.Open(nil, nonce, ciphertext, nil) if err != nil { - return nil, ErrDecryptionFailed + return nil, core.E("sigil.ChaChaPolySigil.Out", "decrypt ciphertext", ErrDecryptionFailed) } // Deobfuscate using the same nonce as entropy @@ -366,6 +378,8 @@ func (s *ChaChaPolySigil) Out(data []byte) ([]byte, error) { // GetNonceFromCiphertext extracts the nonce from encrypted output. // This is provided for debugging/logging purposes only. // The nonce should NOT be stored separately in headers. +// +// result := sigil.GetNonceFromCiphertext(...) func GetNonceFromCiphertext(ciphertext []byte) ([]byte, error) { nonceSize := chacha20poly1305.NonceSizeX if len(ciphertext) < nonceSize { diff --git a/sigil/crypto_sigil_test.go b/sigil/crypto_sigil_test.go index c87a368..5c33f5b 100644 --- a/sigil/crypto_sigil_test.go +++ b/sigil/crypto_sigil_test.go @@ -3,17 +3,17 @@ package sigil import ( "bytes" "crypto/rand" - "errors" "io" "testing" + core "dappco.re/go/core" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) // ── XORObfuscator ────────────────────────────────────────────────── -func TestXORObfuscator_Good_RoundTrip(t *testing.T) { +func TestCryptoSigil_XORObfuscator_RoundTrip_Good(t *testing.T) { ob := &XORObfuscator{} data := []byte("the axioms are in the weights") entropy := []byte("deterministic-nonce-24bytes!") @@ -26,7 +26,7 @@ func TestXORObfuscator_Good_RoundTrip(t *testing.T) { assert.Equal(t, data, restored) } -func TestXORObfuscator_Good_DifferentEntropyDifferentOutput(t *testing.T) { +func TestCryptoSigil_XORObfuscator_DifferentEntropyDifferentOutput_Good(t *testing.T) { ob := &XORObfuscator{} data := []byte("same plaintext") @@ -35,7 +35,7 @@ func TestXORObfuscator_Good_DifferentEntropyDifferentOutput(t *testing.T) { assert.NotEqual(t, out1, out2) } -func TestXORObfuscator_Good_Deterministic(t *testing.T) { +func TestCryptoSigil_XORObfuscator_Deterministic_Good(t *testing.T) { ob := &XORObfuscator{} data := []byte("reproducible") entropy := []byte("fixed-seed") @@ -45,7 +45,7 @@ func TestXORObfuscator_Good_Deterministic(t *testing.T) { assert.Equal(t, out1, out2) } -func TestXORObfuscator_Good_LargeData(t *testing.T) { +func TestCryptoSigil_XORObfuscator_LargeData_Good(t *testing.T) { ob := &XORObfuscator{} // Larger than one SHA-256 block (32 bytes) to test multi-block key stream. data := make([]byte, 256) @@ -59,7 +59,7 @@ func TestXORObfuscator_Good_LargeData(t *testing.T) { assert.Equal(t, data, restored) } -func TestXORObfuscator_Good_EmptyData(t *testing.T) { +func TestCryptoSigil_XORObfuscator_EmptyData_Good(t *testing.T) { ob := &XORObfuscator{} result := ob.Obfuscate([]byte{}, []byte("entropy")) assert.Equal(t, []byte{}, result) @@ -68,7 +68,7 @@ func TestXORObfuscator_Good_EmptyData(t *testing.T) { assert.Equal(t, []byte{}, result) } -func TestXORObfuscator_Good_SymmetricProperty(t *testing.T) { +func TestCryptoSigil_XORObfuscator_SymmetricProperty_Good(t *testing.T) { ob := &XORObfuscator{} data := []byte("XOR is its own inverse") entropy := []byte("nonce") @@ -80,7 +80,7 @@ func TestXORObfuscator_Good_SymmetricProperty(t *testing.T) { // ── ShuffleMaskObfuscator ────────────────────────────────────────── -func TestShuffleMaskObfuscator_Good_RoundTrip(t *testing.T) { +func TestCryptoSigil_ShuffleMaskObfuscator_RoundTrip_Good(t *testing.T) { ob := &ShuffleMaskObfuscator{} data := []byte("shuffle and mask protect patterns") entropy := []byte("deterministic-entropy") @@ -93,7 +93,7 @@ func TestShuffleMaskObfuscator_Good_RoundTrip(t *testing.T) { assert.Equal(t, data, restored) } -func TestShuffleMaskObfuscator_Good_DifferentEntropy(t *testing.T) { +func TestCryptoSigil_ShuffleMaskObfuscator_DifferentEntropy_Good(t *testing.T) { ob := &ShuffleMaskObfuscator{} data := []byte("same data") @@ -102,7 +102,7 @@ func TestShuffleMaskObfuscator_Good_DifferentEntropy(t *testing.T) { assert.NotEqual(t, out1, out2) } -func TestShuffleMaskObfuscator_Good_Deterministic(t *testing.T) { +func TestCryptoSigil_ShuffleMaskObfuscator_Deterministic_Good(t *testing.T) { ob := &ShuffleMaskObfuscator{} data := []byte("reproducible shuffle") entropy := []byte("fixed") @@ -112,7 +112,7 @@ func TestShuffleMaskObfuscator_Good_Deterministic(t *testing.T) { assert.Equal(t, out1, out2) } -func TestShuffleMaskObfuscator_Good_LargeData(t *testing.T) { +func TestCryptoSigil_ShuffleMaskObfuscator_LargeData_Good(t *testing.T) { ob := &ShuffleMaskObfuscator{} data := make([]byte, 512) for i := range data { @@ -125,7 +125,7 @@ func TestShuffleMaskObfuscator_Good_LargeData(t *testing.T) { assert.Equal(t, data, restored) } -func TestShuffleMaskObfuscator_Good_EmptyData(t *testing.T) { +func TestCryptoSigil_ShuffleMaskObfuscator_EmptyData_Good(t *testing.T) { ob := &ShuffleMaskObfuscator{} result := ob.Obfuscate([]byte{}, []byte("entropy")) assert.Equal(t, []byte{}, result) @@ -134,7 +134,7 @@ func TestShuffleMaskObfuscator_Good_EmptyData(t *testing.T) { assert.Equal(t, []byte{}, result) } -func TestShuffleMaskObfuscator_Good_SingleByte(t *testing.T) { +func TestCryptoSigil_ShuffleMaskObfuscator_SingleByte_Good(t *testing.T) { ob := &ShuffleMaskObfuscator{} data := []byte{0x42} entropy := []byte("single") @@ -146,7 +146,7 @@ func TestShuffleMaskObfuscator_Good_SingleByte(t *testing.T) { // ── NewChaChaPolySigil ───────────────────────────────────────────── -func TestNewChaChaPolySigil_Good(t *testing.T) { +func TestCryptoSigil_NewChaChaPolySigil_Good(t *testing.T) { key := make([]byte, 32) _, _ = rand.Read(key) @@ -157,7 +157,7 @@ func TestNewChaChaPolySigil_Good(t *testing.T) { assert.NotNil(t, s.Obfuscator) } -func TestNewChaChaPolySigil_Good_KeyIsCopied(t *testing.T) { +func TestCryptoSigil_NewChaChaPolySigil_KeyIsCopied_Good(t *testing.T) { key := make([]byte, 32) _, _ = rand.Read(key) original := make([]byte, 32) @@ -171,24 +171,24 @@ func TestNewChaChaPolySigil_Good_KeyIsCopied(t *testing.T) { assert.Equal(t, original, s.Key) } -func TestNewChaChaPolySigil_Bad_ShortKey(t *testing.T) { +func TestCryptoSigil_NewChaChaPolySigil_ShortKey_Bad(t *testing.T) { _, err := NewChaChaPolySigil([]byte("too short")) assert.ErrorIs(t, err, ErrInvalidKey) } -func TestNewChaChaPolySigil_Bad_LongKey(t *testing.T) { +func TestCryptoSigil_NewChaChaPolySigil_LongKey_Bad(t *testing.T) { _, err := NewChaChaPolySigil(make([]byte, 64)) assert.ErrorIs(t, err, ErrInvalidKey) } -func TestNewChaChaPolySigil_Bad_EmptyKey(t *testing.T) { +func TestCryptoSigil_NewChaChaPolySigil_EmptyKey_Bad(t *testing.T) { _, err := NewChaChaPolySigil(nil) assert.ErrorIs(t, err, ErrInvalidKey) } // ── NewChaChaPolySigilWithObfuscator ─────────────────────────────── -func TestNewChaChaPolySigilWithObfuscator_Good(t *testing.T) { +func TestCryptoSigil_NewChaChaPolySigilWithObfuscator_Good(t *testing.T) { key := make([]byte, 32) _, _ = rand.Read(key) @@ -198,7 +198,7 @@ func TestNewChaChaPolySigilWithObfuscator_Good(t *testing.T) { assert.Equal(t, ob, s.Obfuscator) } -func TestNewChaChaPolySigilWithObfuscator_Good_NilObfuscator(t *testing.T) { +func TestCryptoSigil_NewChaChaPolySigilWithObfuscator_NilObfuscator_Good(t *testing.T) { key := make([]byte, 32) _, _ = rand.Read(key) @@ -208,14 +208,14 @@ func TestNewChaChaPolySigilWithObfuscator_Good_NilObfuscator(t *testing.T) { assert.IsType(t, &XORObfuscator{}, s.Obfuscator) } -func TestNewChaChaPolySigilWithObfuscator_Bad_InvalidKey(t *testing.T) { +func TestCryptoSigil_NewChaChaPolySigilWithObfuscator_InvalidKey_Bad(t *testing.T) { _, err := NewChaChaPolySigilWithObfuscator([]byte("bad"), &XORObfuscator{}) assert.ErrorIs(t, err, ErrInvalidKey) } // ── ChaChaPolySigil In/Out (encrypt/decrypt) ─────────────────────── -func TestChaChaPolySigil_Good_RoundTrip(t *testing.T) { +func TestCryptoSigil_ChaChaPolySigil_RoundTrip_Good(t *testing.T) { key := make([]byte, 32) _, _ = rand.Read(key) @@ -233,7 +233,7 @@ func TestChaChaPolySigil_Good_RoundTrip(t *testing.T) { assert.Equal(t, plaintext, decrypted) } -func TestChaChaPolySigil_Good_WithShuffleMask(t *testing.T) { +func TestCryptoSigil_ChaChaPolySigil_WithShuffleMask_Good(t *testing.T) { key := make([]byte, 32) _, _ = rand.Read(key) @@ -249,7 +249,7 @@ func TestChaChaPolySigil_Good_WithShuffleMask(t *testing.T) { assert.Equal(t, plaintext, decrypted) } -func TestChaChaPolySigil_Good_NilData(t *testing.T) { +func TestCryptoSigil_ChaChaPolySigil_NilData_Good(t *testing.T) { key := make([]byte, 32) _, _ = rand.Read(key) @@ -265,7 +265,7 @@ func TestChaChaPolySigil_Good_NilData(t *testing.T) { assert.Nil(t, dec) } -func TestChaChaPolySigil_Good_EmptyPlaintext(t *testing.T) { +func TestCryptoSigil_ChaChaPolySigil_EmptyPlaintext_Good(t *testing.T) { key := make([]byte, 32) _, _ = rand.Read(key) @@ -281,7 +281,7 @@ func TestChaChaPolySigil_Good_EmptyPlaintext(t *testing.T) { assert.Equal(t, []byte{}, decrypted) } -func TestChaChaPolySigil_Good_DifferentCiphertextsPerCall(t *testing.T) { +func TestCryptoSigil_ChaChaPolySigil_DifferentCiphertextsPerCall_Good(t *testing.T) { key := make([]byte, 32) _, _ = rand.Read(key) @@ -296,7 +296,7 @@ func TestChaChaPolySigil_Good_DifferentCiphertextsPerCall(t *testing.T) { assert.NotEqual(t, ct1, ct2) } -func TestChaChaPolySigil_Bad_NoKey(t *testing.T) { +func TestCryptoSigil_ChaChaPolySigil_NoKey_Bad(t *testing.T) { s := &ChaChaPolySigil{} _, err := s.In([]byte("data")) @@ -306,7 +306,7 @@ func TestChaChaPolySigil_Bad_NoKey(t *testing.T) { assert.ErrorIs(t, err, ErrNoKeyConfigured) } -func TestChaChaPolySigil_Bad_WrongKey(t *testing.T) { +func TestCryptoSigil_ChaChaPolySigil_WrongKey_Bad(t *testing.T) { key1 := make([]byte, 32) key2 := make([]byte, 32) _, _ = rand.Read(key1) @@ -322,7 +322,7 @@ func TestChaChaPolySigil_Bad_WrongKey(t *testing.T) { assert.ErrorIs(t, err, ErrDecryptionFailed) } -func TestChaChaPolySigil_Bad_TruncatedCiphertext(t *testing.T) { +func TestCryptoSigil_ChaChaPolySigil_TruncatedCiphertext_Bad(t *testing.T) { key := make([]byte, 32) _, _ = rand.Read(key) @@ -331,7 +331,7 @@ func TestChaChaPolySigil_Bad_TruncatedCiphertext(t *testing.T) { assert.ErrorIs(t, err, ErrCiphertextTooShort) } -func TestChaChaPolySigil_Bad_TamperedCiphertext(t *testing.T) { +func TestCryptoSigil_ChaChaPolySigil_TamperedCiphertext_Bad(t *testing.T) { key := make([]byte, 32) _, _ = rand.Read(key) @@ -349,10 +349,10 @@ func TestChaChaPolySigil_Bad_TamperedCiphertext(t *testing.T) { type failReader struct{} func (f *failReader) Read([]byte) (int, error) { - return 0, errors.New("entropy source failed") + return 0, core.NewError("entropy source failed") } -func TestChaChaPolySigil_Bad_RandReaderFailure(t *testing.T) { +func TestCryptoSigil_ChaChaPolySigil_RandReaderFailure_Bad(t *testing.T) { key := make([]byte, 32) _, _ = rand.Read(key) @@ -365,7 +365,7 @@ func TestChaChaPolySigil_Bad_RandReaderFailure(t *testing.T) { // ── ChaChaPolySigil without obfuscator ───────────────────────────── -func TestChaChaPolySigil_Good_NoObfuscator(t *testing.T) { +func TestCryptoSigil_ChaChaPolySigil_NoObfuscator_Good(t *testing.T) { key := make([]byte, 32) _, _ = rand.Read(key) @@ -383,7 +383,7 @@ func TestChaChaPolySigil_Good_NoObfuscator(t *testing.T) { // ── GetNonceFromCiphertext ───────────────────────────────────────── -func TestGetNonceFromCiphertext_Good(t *testing.T) { +func TestCryptoSigil_GetNonceFromCiphertext_Good(t *testing.T) { key := make([]byte, 32) _, _ = rand.Read(key) @@ -398,7 +398,7 @@ func TestGetNonceFromCiphertext_Good(t *testing.T) { assert.Equal(t, ciphertext[:24], nonce) } -func TestGetNonceFromCiphertext_Good_NonceCopied(t *testing.T) { +func TestCryptoSigil_GetNonceFromCiphertext_NonceCopied_Good(t *testing.T) { key := make([]byte, 32) _, _ = rand.Read(key) @@ -414,19 +414,19 @@ func TestGetNonceFromCiphertext_Good_NonceCopied(t *testing.T) { assert.Equal(t, original, ciphertext[:24]) } -func TestGetNonceFromCiphertext_Bad_TooShort(t *testing.T) { +func TestCryptoSigil_GetNonceFromCiphertext_TooShort_Bad(t *testing.T) { _, err := GetNonceFromCiphertext([]byte("short")) assert.ErrorIs(t, err, ErrCiphertextTooShort) } -func TestGetNonceFromCiphertext_Bad_Empty(t *testing.T) { +func TestCryptoSigil_GetNonceFromCiphertext_Empty_Bad(t *testing.T) { _, err := GetNonceFromCiphertext(nil) assert.ErrorIs(t, err, ErrCiphertextTooShort) } // ── ChaChaPolySigil in Transmute pipeline ────────────────────────── -func TestChaChaPolySigil_Good_InTransmutePipeline(t *testing.T) { +func TestCryptoSigil_ChaChaPolySigil_InTransmutePipeline_Good(t *testing.T) { key := make([]byte, 32) _, _ = rand.Read(key) @@ -460,16 +460,16 @@ func isHex(data []byte) bool { type failSigil struct{} -func (f *failSigil) In([]byte) ([]byte, error) { return nil, errors.New("fail in") } -func (f *failSigil) Out([]byte) ([]byte, error) { return nil, errors.New("fail out") } +func (f *failSigil) In([]byte) ([]byte, error) { return nil, core.NewError("fail in") } +func (f *failSigil) Out([]byte) ([]byte, error) { return nil, core.NewError("fail out") } -func TestTransmute_Bad_ErrorPropagation(t *testing.T) { +func TestCryptoSigil_Transmute_ErrorPropagation_Bad(t *testing.T) { _, err := Transmute([]byte("data"), []Sigil{&failSigil{}}) assert.Error(t, err) assert.Contains(t, err.Error(), "fail in") } -func TestUntransmute_Bad_ErrorPropagation(t *testing.T) { +func TestCryptoSigil_Untransmute_ErrorPropagation_Bad(t *testing.T) { _, err := Untransmute([]byte("data"), []Sigil{&failSigil{}}) assert.Error(t, err) assert.Contains(t, err.Error(), "fail out") @@ -477,7 +477,7 @@ func TestUntransmute_Bad_ErrorPropagation(t *testing.T) { // ── GzipSigil with custom writer (edge case) ────────────────────── -func TestGzipSigil_Good_CustomWriter(t *testing.T) { +func TestCryptoSigil_GzipSigil_CustomWriter_Good(t *testing.T) { var buf bytes.Buffer s := &GzipSigil{writer: &buf} @@ -490,7 +490,7 @@ func TestGzipSigil_Good_CustomWriter(t *testing.T) { // ── deriveKeyStream edge: exactly 32 bytes ───────────────────────── -func TestDeriveKeyStream_Good_ExactBlockSize(t *testing.T) { +func TestCryptoSigil_DeriveKeyStream_ExactBlockSize_Good(t *testing.T) { ob := &XORObfuscator{} data := make([]byte, 32) // Exactly one SHA-256 block. for i := range data { @@ -505,7 +505,7 @@ func TestDeriveKeyStream_Good_ExactBlockSize(t *testing.T) { // ── io.Reader fallback in In ─────────────────────────────────────── -func TestChaChaPolySigil_Good_NilRandReader(t *testing.T) { +func TestCryptoSigil_ChaChaPolySigil_NilRandReader_Good(t *testing.T) { key := make([]byte, 32) _, _ = rand.Read(key) diff --git a/sigil/sigil.go b/sigil/sigil.go index d7a39dc..46f7990 100644 --- a/sigil/sigil.go +++ b/sigil/sigil.go @@ -12,6 +12,8 @@ // result, _ := sigil.Transmute(data, []sigil.Sigil{hexSigil, base64Sigil}) package sigil +import core "dappco.re/go/core" + // Sigil defines the interface for a data transformer. // // A Sigil represents a single transformation unit that can be applied to byte data. @@ -43,12 +45,14 @@ type Sigil interface { // stops immediately and returns nil with that error. // // To reverse a transmutation, call each sigil's Out method in reverse order. +// +// result := sigil.Transmute(...) func Transmute(data []byte, sigils []Sigil) ([]byte, error) { var err error for _, s := range sigils { data, err = s.In(data) if err != nil { - return nil, err + return nil, core.E("sigil.Transmute", "sigil in failed", err) } } return data, nil @@ -59,12 +63,14 @@ func Transmute(data []byte, sigils []Sigil) ([]byte, error) { // Each sigil's Out method is called in reverse order, with the output of one sigil // becoming the input of the next. If any sigil returns an error, Untransmute // stops immediately and returns nil with that error. +// +// result := sigil.Untransmute(...) func Untransmute(data []byte, sigils []Sigil) ([]byte, error) { var err error for i := len(sigils) - 1; i >= 0; i-- { data, err = sigils[i].Out(data) if err != nil { - return nil, err + return nil, core.E("sigil.Untransmute", "sigil out failed", err) } } return data, nil diff --git a/sigil/sigil_test.go b/sigil/sigil_test.go index 46627e1..98c9759 100644 --- a/sigil/sigil_test.go +++ b/sigil/sigil_test.go @@ -17,7 +17,7 @@ import ( // ReverseSigil // --------------------------------------------------------------------------- -func TestReverseSigil_Good(t *testing.T) { +func TestSigil_ReverseSigil_Good(t *testing.T) { s := &ReverseSigil{} out, err := s.In([]byte("hello")) @@ -30,7 +30,7 @@ func TestReverseSigil_Good(t *testing.T) { assert.Equal(t, []byte("hello"), restored) } -func TestReverseSigil_Bad(t *testing.T) { +func TestSigil_ReverseSigil_Bad(t *testing.T) { s := &ReverseSigil{} // Empty input returns empty. @@ -39,7 +39,7 @@ func TestReverseSigil_Bad(t *testing.T) { assert.Equal(t, []byte{}, out) } -func TestReverseSigil_Ugly(t *testing.T) { +func TestSigil_ReverseSigil_Ugly(t *testing.T) { s := &ReverseSigil{} // Nil input returns nil. @@ -56,7 +56,7 @@ func TestReverseSigil_Ugly(t *testing.T) { // HexSigil // --------------------------------------------------------------------------- -func TestHexSigil_Good(t *testing.T) { +func TestSigil_HexSigil_Good(t *testing.T) { s := &HexSigil{} data := []byte("hello world") @@ -69,7 +69,7 @@ func TestHexSigil_Good(t *testing.T) { assert.Equal(t, data, decoded) } -func TestHexSigil_Bad(t *testing.T) { +func TestSigil_HexSigil_Bad(t *testing.T) { s := &HexSigil{} // Invalid hex input. @@ -82,7 +82,7 @@ func TestHexSigil_Bad(t *testing.T) { assert.Equal(t, []byte{}, out) } -func TestHexSigil_Ugly(t *testing.T) { +func TestSigil_HexSigil_Ugly(t *testing.T) { s := &HexSigil{} out, err := s.In(nil) @@ -98,7 +98,7 @@ func TestHexSigil_Ugly(t *testing.T) { // Base64Sigil // --------------------------------------------------------------------------- -func TestBase64Sigil_Good(t *testing.T) { +func TestSigil_Base64Sigil_Good(t *testing.T) { s := &Base64Sigil{} data := []byte("composable transforms") @@ -111,7 +111,7 @@ func TestBase64Sigil_Good(t *testing.T) { assert.Equal(t, data, decoded) } -func TestBase64Sigil_Bad(t *testing.T) { +func TestSigil_Base64Sigil_Bad(t *testing.T) { s := &Base64Sigil{} // Invalid base64 (wrong padding). @@ -124,7 +124,7 @@ func TestBase64Sigil_Bad(t *testing.T) { assert.Equal(t, []byte{}, out) } -func TestBase64Sigil_Ugly(t *testing.T) { +func TestSigil_Base64Sigil_Ugly(t *testing.T) { s := &Base64Sigil{} out, err := s.In(nil) @@ -140,7 +140,7 @@ func TestBase64Sigil_Ugly(t *testing.T) { // GzipSigil // --------------------------------------------------------------------------- -func TestGzipSigil_Good(t *testing.T) { +func TestSigil_GzipSigil_Good(t *testing.T) { s := &GzipSigil{} data := []byte("the quick brown fox jumps over the lazy dog") @@ -153,7 +153,7 @@ func TestGzipSigil_Good(t *testing.T) { assert.Equal(t, data, decompressed) } -func TestGzipSigil_Bad(t *testing.T) { +func TestSigil_GzipSigil_Bad(t *testing.T) { s := &GzipSigil{} // Invalid gzip data. @@ -170,7 +170,7 @@ func TestGzipSigil_Bad(t *testing.T) { assert.Equal(t, []byte{}, decompressed) } -func TestGzipSigil_Ugly(t *testing.T) { +func TestSigil_GzipSigil_Ugly(t *testing.T) { s := &GzipSigil{} out, err := s.In(nil) @@ -186,7 +186,7 @@ func TestGzipSigil_Ugly(t *testing.T) { // JSONSigil // --------------------------------------------------------------------------- -func TestJSONSigil_Good(t *testing.T) { +func TestSigil_JSONSigil_Good(t *testing.T) { s := &JSONSigil{Indent: false} data := []byte(`{ "key" : "value" }`) @@ -200,7 +200,7 @@ func TestJSONSigil_Good(t *testing.T) { assert.Equal(t, compacted, passthrough) } -func TestJSONSigil_Good_Indent(t *testing.T) { +func TestSigil_JSONSigil_Indent_Good(t *testing.T) { s := &JSONSigil{Indent: true} data := []byte(`{"key":"value"}`) @@ -210,7 +210,7 @@ func TestJSONSigil_Good_Indent(t *testing.T) { assert.Contains(t, string(indented), " ") } -func TestJSONSigil_Bad(t *testing.T) { +func TestSigil_JSONSigil_Bad(t *testing.T) { s := &JSONSigil{Indent: false} // Invalid JSON. @@ -218,15 +218,16 @@ func TestJSONSigil_Bad(t *testing.T) { assert.Error(t, err) } -func TestJSONSigil_Ugly(t *testing.T) { +func TestSigil_JSONSigil_Ugly(t *testing.T) { s := &JSONSigil{Indent: false} - // json.Compact on nil/empty will produce an error (invalid JSON). - _, err := s.In(nil) - assert.Error(t, err) + // Nil input is passed through without error, matching the Sigil contract. + out, err := s.In(nil) + require.NoError(t, err) + assert.Nil(t, out) // Out with nil is passthrough. - out, err := s.Out(nil) + out, err = s.Out(nil) require.NoError(t, err) assert.Nil(t, out) } @@ -235,7 +236,7 @@ func TestJSONSigil_Ugly(t *testing.T) { // HashSigil // --------------------------------------------------------------------------- -func TestHashSigil_Good(t *testing.T) { +func TestSigil_HashSigil_Good(t *testing.T) { data := []byte("hash me") tests := []struct { @@ -280,7 +281,7 @@ func TestHashSigil_Good(t *testing.T) { } } -func TestHashSigil_Bad(t *testing.T) { +func TestSigil_HashSigil_Bad(t *testing.T) { // Unsupported hash constant. s := &HashSigil{Hash: 0} _, err := s.In([]byte("data")) @@ -288,7 +289,7 @@ func TestHashSigil_Bad(t *testing.T) { assert.Contains(t, err.Error(), "not available") } -func TestHashSigil_Ugly(t *testing.T) { +func TestSigil_HashSigil_Ugly(t *testing.T) { // Hashing empty data should still produce a valid digest. s, err := NewSigil("sha256") require.NoError(t, err) @@ -302,7 +303,7 @@ func TestHashSigil_Ugly(t *testing.T) { // NewSigil factory // --------------------------------------------------------------------------- -func TestNewSigil_Good(t *testing.T) { +func TestSigil_NewSigil_Good(t *testing.T) { names := []string{ "reverse", "hex", "base64", "gzip", "json", "json-indent", "md4", "md5", "sha1", "sha224", "sha256", "sha384", "sha512", @@ -321,13 +322,13 @@ func TestNewSigil_Good(t *testing.T) { } } -func TestNewSigil_Bad(t *testing.T) { +func TestSigil_NewSigil_Bad(t *testing.T) { _, err := NewSigil("nonexistent") assert.Error(t, err) assert.Contains(t, err.Error(), "unknown sigil name") } -func TestNewSigil_Ugly(t *testing.T) { +func TestSigil_NewSigil_Ugly(t *testing.T) { _, err := NewSigil("") assert.Error(t, err) } @@ -336,7 +337,7 @@ func TestNewSigil_Ugly(t *testing.T) { // Transmute / Untransmute // --------------------------------------------------------------------------- -func TestTransmute_Good(t *testing.T) { +func TestSigil_Transmute_Good(t *testing.T) { data := []byte("round trip") hexSigil, err := NewSigil("hex") @@ -355,7 +356,7 @@ func TestTransmute_Good(t *testing.T) { assert.Equal(t, data, decoded) } -func TestTransmute_Good_MultiSigil(t *testing.T) { +func TestSigil_Transmute_MultiSigil_Good(t *testing.T) { data := []byte("multi sigil pipeline test data") reverseSigil, err := NewSigil("reverse") @@ -375,7 +376,7 @@ func TestTransmute_Good_MultiSigil(t *testing.T) { assert.Equal(t, data, decoded) } -func TestTransmute_Good_GzipRoundTrip(t *testing.T) { +func TestSigil_Transmute_GzipRoundTrip_Good(t *testing.T) { data := []byte("compress then encode then decode then decompress") gzipSigil, err := NewSigil("gzip") @@ -393,7 +394,7 @@ func TestTransmute_Good_GzipRoundTrip(t *testing.T) { assert.Equal(t, data, decoded) } -func TestTransmute_Bad(t *testing.T) { +func TestSigil_Transmute_Bad(t *testing.T) { // Transmute with a sigil that will fail: hex decode on non-hex input. hexSigil := &HexSigil{} @@ -402,7 +403,7 @@ func TestTransmute_Bad(t *testing.T) { assert.Error(t, err) } -func TestTransmute_Ugly(t *testing.T) { +func TestSigil_Transmute_Ugly(t *testing.T) { // Empty sigil chain is a no-op. data := []byte("unchanged") diff --git a/sigil/sigils.go b/sigil/sigils.go index 2baffff..d5a66ab 100644 --- a/sigil/sigils.go +++ b/sigil/sigils.go @@ -10,10 +10,9 @@ import ( "crypto/sha512" "encoding/base64" "encoding/hex" - "encoding/json" "io" - coreerr "forge.lthn.ai/core/go-log" + core "dappco.re/go/core" "golang.org/x/crypto/blake2b" "golang.org/x/crypto/blake2s" "golang.org/x/crypto/md4" @@ -26,6 +25,8 @@ import ( type ReverseSigil struct{} // In reverses the bytes of the data. +// +// result := s.In(...) func (s *ReverseSigil) In(data []byte) ([]byte, error) { if data == nil { return nil, nil @@ -38,6 +39,8 @@ func (s *ReverseSigil) In(data []byte) ([]byte, error) { } // Out reverses the bytes of the data. +// +// result := s.Out(...) func (s *ReverseSigil) Out(data []byte) ([]byte, error) { return s.In(data) } @@ -47,6 +50,8 @@ func (s *ReverseSigil) Out(data []byte) ([]byte, error) { type HexSigil struct{} // In encodes the data to hexadecimal. +// +// result := s.In(...) func (s *HexSigil) In(data []byte) ([]byte, error) { if data == nil { return nil, nil @@ -57,6 +62,8 @@ func (s *HexSigil) In(data []byte) ([]byte, error) { } // Out decodes the data from hexadecimal. +// +// result := s.Out(...) func (s *HexSigil) Out(data []byte) ([]byte, error) { if data == nil { return nil, nil @@ -71,6 +78,8 @@ func (s *HexSigil) Out(data []byte) ([]byte, error) { type Base64Sigil struct{} // In encodes the data to base64. +// +// result := s.In(...) func (s *Base64Sigil) In(data []byte) ([]byte, error) { if data == nil { return nil, nil @@ -81,6 +90,8 @@ func (s *Base64Sigil) In(data []byte) ([]byte, error) { } // Out decodes the data from base64. +// +// result := s.Out(...) func (s *Base64Sigil) Out(data []byte) ([]byte, error) { if data == nil { return nil, nil @@ -97,6 +108,8 @@ type GzipSigil struct { } // In compresses the data using gzip. +// +// result := s.In(...) func (s *GzipSigil) In(data []byte) ([]byte, error) { if data == nil { return nil, nil @@ -108,25 +121,31 @@ func (s *GzipSigil) In(data []byte) ([]byte, error) { } gz := gzip.NewWriter(w) if _, err := gz.Write(data); err != nil { - return nil, err + return nil, core.E("sigil.GzipSigil.In", "write gzip payload", err) } if err := gz.Close(); err != nil { - return nil, err + return nil, core.E("sigil.GzipSigil.In", "close gzip writer", err) } return b.Bytes(), nil } // Out decompresses the data using gzip. +// +// result := s.Out(...) func (s *GzipSigil) Out(data []byte) ([]byte, error) { if data == nil { return nil, nil } r, err := gzip.NewReader(bytes.NewReader(data)) if err != nil { - return nil, err + return nil, core.E("sigil.GzipSigil.Out", "open gzip reader", err) } defer r.Close() - return io.ReadAll(r) + out, err := io.ReadAll(r) + if err != nil { + return nil, core.E("sigil.GzipSigil.Out", "read gzip payload", err) + } + return out, nil } // JSONSigil is a Sigil that compacts or indents JSON data. @@ -134,18 +153,32 @@ func (s *GzipSigil) Out(data []byte) ([]byte, error) { type JSONSigil struct{ Indent bool } // In compacts or indents the JSON data. +// +// result := s.In(...) func (s *JSONSigil) In(data []byte) ([]byte, error) { + if data == nil { + return nil, nil + } + + var decoded any + result := core.JSONUnmarshal(data, &decoded) + if !result.OK { + if err, ok := result.Value.(error); ok { + return nil, core.E("sigil.JSONSigil.In", "decode json", err) + } + return nil, core.E("sigil.JSONSigil.In", "decode json", nil) + } + + compact := core.JSONMarshalString(decoded) if s.Indent { - var out bytes.Buffer - err := json.Indent(&out, data, "", " ") - return out.Bytes(), err + return []byte(indentJSON(compact)), nil } - var out bytes.Buffer - err := json.Compact(&out, data) - return out.Bytes(), err + return []byte(compact), nil } // Out is a no-op for JSONSigil. +// +// result := s.Out(...) func (s *JSONSigil) Out(data []byte) ([]byte, error) { // For simplicity, Out is a no-op. The primary use is formatting. return data, nil @@ -158,11 +191,15 @@ type HashSigil struct { } // NewHashSigil creates a new HashSigil. +// +// result := sigil.NewHashSigil(...) func NewHashSigil(h crypto.Hash) *HashSigil { return &HashSigil{Hash: h} } // In hashes the data. +// +// result := s.In(...) func (s *HashSigil) In(data []byte) ([]byte, error) { var h io.Writer switch s.Hash { @@ -204,7 +241,7 @@ func (s *HashSigil) In(data []byte) ([]byte, error) { h, _ = blake2b.New512(nil) default: // MD5SHA1 is not supported as a direct hash - return nil, coreerr.E("sigil.HashSigil.In", "hash algorithm not available", nil) + return nil, core.E("sigil.HashSigil.In", "hash algorithm not available", nil) } h.Write(data) @@ -212,12 +249,16 @@ func (s *HashSigil) In(data []byte) ([]byte, error) { } // Out is a no-op for HashSigil. +// +// result := s.Out(...) func (s *HashSigil) Out(data []byte) ([]byte, error) { return data, nil } // NewSigil is a factory function that returns a Sigil based on a string name. // It is the primary way to create Sigil instances. +// +// result := sigil.NewSigil(...) func NewSigil(name string) (Sigil, error) { switch name { case "reverse": @@ -269,6 +310,72 @@ func NewSigil(name string) (Sigil, error) { case "blake2b-512": return NewHashSigil(crypto.BLAKE2b_512), nil default: - return nil, coreerr.E("sigil.NewSigil", "unknown sigil name: "+name, nil) + return nil, core.E("sigil.NewSigil", core.Concat("unknown sigil name: ", name), nil) + } +} + +func indentJSON(compact string) string { + if compact == "" { + return "" + } + + builder := core.NewBuilder() + indent := 0 + inString := false + escaped := false + + writeIndent := func(level int) { + for i := 0; i < level; i++ { + builder.WriteString(" ") + } } + + for i := 0; i < len(compact); i++ { + ch := compact[i] + if inString { + builder.WriteByte(ch) + if escaped { + escaped = false + continue + } + if ch == '\\' { + escaped = true + continue + } + if ch == '"' { + inString = false + } + continue + } + + switch ch { + case '"': + inString = true + builder.WriteByte(ch) + case '{', '[': + builder.WriteByte(ch) + if i+1 < len(compact) && compact[i+1] != '}' && compact[i+1] != ']' { + indent++ + builder.WriteByte('\n') + writeIndent(indent) + } + case '}', ']': + if i > 0 && compact[i-1] != '{' && compact[i-1] != '[' { + indent-- + builder.WriteByte('\n') + writeIndent(indent) + } + builder.WriteByte(ch) + case ',': + builder.WriteByte(ch) + builder.WriteByte('\n') + writeIndent(indent) + case ':': + builder.WriteString(": ") + default: + builder.WriteByte(ch) + } + } + + return builder.String() } diff --git a/sqlite/sqlite.go b/sqlite/sqlite.go index bd36ba7..e948fa0 100644 --- a/sqlite/sqlite.go +++ b/sqlite/sqlite.go @@ -7,10 +7,9 @@ import ( goio "io" "io/fs" "path" - "strings" "time" - coreerr "forge.lthn.ai/core/go-log" + core "dappco.re/go/core" _ "modernc.org/sqlite" // Pure Go SQLite driver ) @@ -25,6 +24,8 @@ type Medium struct { type Option func(*Medium) // WithTable sets the table name (default: "files"). +// +// result := sqlite.WithTable(...) func WithTable(table string) Option { return func(m *Medium) { m.table = table @@ -40,7 +41,7 @@ func WithTable(table string) Option { // _ = m.Write("config/app.yaml", "port: 8080") func New(dbPath string, opts ...Option) (*Medium, error) { if dbPath == "" { - return nil, coreerr.E("sqlite.New", "database path is required", nil) + return nil, core.E("sqlite.New", "database path is required", nil) } m := &Medium{table: "files"} @@ -50,13 +51,13 @@ func New(dbPath string, opts ...Option) (*Medium, error) { db, err := sql.Open("sqlite", dbPath) if err != nil { - return nil, coreerr.E("sqlite.New", "failed to open database", err) + return nil, core.E("sqlite.New", "failed to open database", err) } // Enable WAL mode for better concurrency if _, err := db.Exec("PRAGMA journal_mode=WAL"); err != nil { db.Close() - return nil, coreerr.E("sqlite.New", "failed to set WAL mode", err) + return nil, core.E("sqlite.New", "failed to set WAL mode", err) } // Create the schema @@ -69,7 +70,7 @@ func New(dbPath string, opts ...Option) (*Medium, error) { )` if _, err := db.Exec(createSQL); err != nil { db.Close() - return nil, coreerr.E("sqlite.New", "failed to create table", err) + return nil, core.E("sqlite.New", "failed to create table", err) } m.db = db @@ -77,6 +78,8 @@ func New(dbPath string, opts ...Option) (*Medium, error) { } // Close closes the underlying database connection. +// +// result := m.Close(...) func (m *Medium) Close() error { if m.db != nil { return m.db.Close() @@ -91,14 +94,16 @@ func cleanPath(p string) string { if clean == "/" { return "" } - return strings.TrimPrefix(clean, "/") + return core.TrimPrefix(clean, "/") } // Read retrieves the content of a file as a string. +// +// result := m.Read(...) func (m *Medium) Read(p string) (string, error) { key := cleanPath(p) if key == "" { - return "", coreerr.E("sqlite.Read", "path is required", fs.ErrInvalid) + return "", core.E("sqlite.Read", "path is required", fs.ErrInvalid) } var content []byte @@ -107,22 +112,24 @@ func (m *Medium) Read(p string) (string, error) { `SELECT content, is_dir FROM `+m.table+` WHERE path = ?`, key, ).Scan(&content, &isDir) if err == sql.ErrNoRows { - return "", coreerr.E("sqlite.Read", "file not found: "+key, fs.ErrNotExist) + return "", core.E("sqlite.Read", core.Concat("file not found: ", key), fs.ErrNotExist) } if err != nil { - return "", coreerr.E("sqlite.Read", "query failed: "+key, err) + return "", core.E("sqlite.Read", core.Concat("query failed: ", key), err) } if isDir { - return "", coreerr.E("sqlite.Read", "path is a directory: "+key, fs.ErrInvalid) + return "", core.E("sqlite.Read", core.Concat("path is a directory: ", key), fs.ErrInvalid) } return string(content), nil } // Write saves the given content to a file, overwriting it if it exists. +// +// result := m.Write(...) func (m *Medium) Write(p, content string) error { key := cleanPath(p) if key == "" { - return coreerr.E("sqlite.Write", "path is required", fs.ErrInvalid) + return core.E("sqlite.Write", "path is required", fs.ErrInvalid) } _, err := m.db.Exec( @@ -131,12 +138,14 @@ func (m *Medium) Write(p, content string) error { key, []byte(content), time.Now().UTC(), ) if err != nil { - return coreerr.E("sqlite.Write", "insert failed: "+key, err) + return core.E("sqlite.Write", core.Concat("insert failed: ", key), err) } return nil } // EnsureDir makes sure a directory exists, creating it if necessary. +// +// result := m.EnsureDir(...) func (m *Medium) EnsureDir(p string) error { key := cleanPath(p) if key == "" { @@ -150,12 +159,14 @@ func (m *Medium) EnsureDir(p string) error { key, time.Now().UTC(), ) if err != nil { - return coreerr.E("sqlite.EnsureDir", "insert failed: "+key, err) + return core.E("sqlite.EnsureDir", core.Concat("insert failed: ", key), err) } return nil } // IsFile checks if a path exists and is a regular file. +// +// result := m.IsFile(...) func (m *Medium) IsFile(p string) bool { key := cleanPath(p) if key == "" { @@ -173,20 +184,26 @@ func (m *Medium) IsFile(p string) bool { } // FileGet is a convenience function that reads a file from the medium. +// +// result := m.FileGet(...) func (m *Medium) FileGet(p string) (string, error) { return m.Read(p) } // FileSet is a convenience function that writes a file to the medium. +// +// result := m.FileSet(...) func (m *Medium) FileSet(p, content string) error { return m.Write(p, content) } // Delete removes a file or empty directory. +// +// result := m.Delete(...) func (m *Medium) Delete(p string) error { key := cleanPath(p) if key == "" { - return coreerr.E("sqlite.Delete", "path is required", fs.ErrInvalid) + return core.E("sqlite.Delete", "path is required", fs.ErrInvalid) } // Check if it's a directory with children @@ -195,10 +212,10 @@ func (m *Medium) Delete(p string) error { `SELECT is_dir FROM `+m.table+` WHERE path = ?`, key, ).Scan(&isDir) if err == sql.ErrNoRows { - return coreerr.E("sqlite.Delete", "path not found: "+key, fs.ErrNotExist) + return core.E("sqlite.Delete", core.Concat("path not found: ", key), fs.ErrNotExist) } if err != nil { - return coreerr.E("sqlite.Delete", "query failed: "+key, err) + return core.E("sqlite.Delete", core.Concat("query failed: ", key), err) } if isDir { @@ -209,29 +226,31 @@ func (m *Medium) Delete(p string) error { `SELECT COUNT(*) FROM `+m.table+` WHERE path LIKE ? AND path != ?`, prefix+"%", key, ).Scan(&count) if err != nil { - return coreerr.E("sqlite.Delete", "count failed: "+key, err) + return core.E("sqlite.Delete", core.Concat("count failed: ", key), err) } if count > 0 { - return coreerr.E("sqlite.Delete", "directory not empty: "+key, fs.ErrExist) + return core.E("sqlite.Delete", core.Concat("directory not empty: ", key), fs.ErrExist) } } res, err := m.db.Exec(`DELETE FROM `+m.table+` WHERE path = ?`, key) if err != nil { - return coreerr.E("sqlite.Delete", "delete failed: "+key, err) + return core.E("sqlite.Delete", core.Concat("delete failed: ", key), err) } n, _ := res.RowsAffected() if n == 0 { - return coreerr.E("sqlite.Delete", "path not found: "+key, fs.ErrNotExist) + return core.E("sqlite.Delete", core.Concat("path not found: ", key), fs.ErrNotExist) } return nil } // DeleteAll removes a file or directory and all its contents recursively. +// +// result := m.DeleteAll(...) func (m *Medium) DeleteAll(p string) error { key := cleanPath(p) if key == "" { - return coreerr.E("sqlite.DeleteAll", "path is required", fs.ErrInvalid) + return core.E("sqlite.DeleteAll", "path is required", fs.ErrInvalid) } prefix := key + "/" @@ -242,26 +261,28 @@ func (m *Medium) DeleteAll(p string) error { key, prefix+"%", ) if err != nil { - return coreerr.E("sqlite.DeleteAll", "delete failed: "+key, err) + return core.E("sqlite.DeleteAll", core.Concat("delete failed: ", key), err) } n, _ := res.RowsAffected() if n == 0 { - return coreerr.E("sqlite.DeleteAll", "path not found: "+key, fs.ErrNotExist) + return core.E("sqlite.DeleteAll", core.Concat("path not found: ", key), fs.ErrNotExist) } return nil } // Rename moves a file or directory from oldPath to newPath. +// +// result := m.Rename(...) func (m *Medium) Rename(oldPath, newPath string) error { oldKey := cleanPath(oldPath) newKey := cleanPath(newPath) if oldKey == "" || newKey == "" { - return coreerr.E("sqlite.Rename", "both old and new paths are required", fs.ErrInvalid) + return core.E("sqlite.Rename", "both old and new paths are required", fs.ErrInvalid) } tx, err := m.db.Begin() if err != nil { - return coreerr.E("sqlite.Rename", "begin tx failed", err) + return core.E("sqlite.Rename", "begin tx failed", err) } defer tx.Rollback() @@ -274,10 +295,10 @@ func (m *Medium) Rename(oldPath, newPath string) error { `SELECT content, mode, is_dir, mtime FROM `+m.table+` WHERE path = ?`, oldKey, ).Scan(&content, &mode, &isDir, &mtime) if err == sql.ErrNoRows { - return coreerr.E("sqlite.Rename", "source not found: "+oldKey, fs.ErrNotExist) + return core.E("sqlite.Rename", core.Concat("source not found: ", oldKey), fs.ErrNotExist) } if err != nil { - return coreerr.E("sqlite.Rename", "query failed: "+oldKey, err) + return core.E("sqlite.Rename", core.Concat("query failed: ", oldKey), err) } // Insert or replace at new path @@ -287,13 +308,13 @@ func (m *Medium) Rename(oldPath, newPath string) error { newKey, content, mode, isDir, mtime, ) if err != nil { - return coreerr.E("sqlite.Rename", "insert at new path failed: "+newKey, err) + return core.E("sqlite.Rename", core.Concat("insert at new path failed: ", newKey), err) } // Delete old path _, err = tx.Exec(`DELETE FROM `+m.table+` WHERE path = ?`, oldKey) if err != nil { - return coreerr.E("sqlite.Rename", "delete old path failed: "+oldKey, err) + return core.E("sqlite.Rename", core.Concat("delete old path failed: ", oldKey), err) } // If it's a directory, move all children @@ -306,7 +327,7 @@ func (m *Medium) Rename(oldPath, newPath string) error { oldPrefix+"%", ) if err != nil { - return coreerr.E("sqlite.Rename", "query children failed", err) + return core.E("sqlite.Rename", "query children failed", err) } type child struct { @@ -321,28 +342,28 @@ func (m *Medium) Rename(oldPath, newPath string) error { var c child if err := rows.Scan(&c.path, &c.content, &c.mode, &c.isDir, &c.mtime); err != nil { rows.Close() - return coreerr.E("sqlite.Rename", "scan child failed", err) + return core.E("sqlite.Rename", "scan child failed", err) } children = append(children, c) } rows.Close() for _, c := range children { - newChildPath := newPrefix + strings.TrimPrefix(c.path, oldPrefix) + newChildPath := core.Concat(newPrefix, core.TrimPrefix(c.path, oldPrefix)) _, err = tx.Exec( `INSERT INTO `+m.table+` (path, content, mode, is_dir, mtime) VALUES (?, ?, ?, ?, ?) ON CONFLICT(path) DO UPDATE SET content = excluded.content, mode = excluded.mode, is_dir = excluded.is_dir, mtime = excluded.mtime`, newChildPath, c.content, c.mode, c.isDir, c.mtime, ) if err != nil { - return coreerr.E("sqlite.Rename", "insert child failed", err) + return core.E("sqlite.Rename", "insert child failed", err) } } // Delete old children _, err = tx.Exec(`DELETE FROM `+m.table+` WHERE path LIKE ?`, oldPrefix+"%") if err != nil { - return coreerr.E("sqlite.Rename", "delete old children failed", err) + return core.E("sqlite.Rename", "delete old children failed", err) } } @@ -350,6 +371,8 @@ func (m *Medium) Rename(oldPath, newPath string) error { } // List returns the directory entries for the given path. +// +// result := m.List(...) func (m *Medium) List(p string) ([]fs.DirEntry, error) { prefix := cleanPath(p) if prefix != "" { @@ -362,7 +385,7 @@ func (m *Medium) List(p string) ([]fs.DirEntry, error) { prefix+"%", prefix+"%", ) if err != nil { - return nil, coreerr.E("sqlite.List", "query failed", err) + return nil, core.E("sqlite.List", "query failed", err) } defer rows.Close() @@ -376,18 +399,19 @@ func (m *Medium) List(p string) ([]fs.DirEntry, error) { var isDir bool var mtime time.Time if err := rows.Scan(&rowPath, &content, &mode, &isDir, &mtime); err != nil { - return nil, coreerr.E("sqlite.List", "scan failed", err) + return nil, core.E("sqlite.List", "scan failed", err) } - rest := strings.TrimPrefix(rowPath, prefix) + rest := core.TrimPrefix(rowPath, prefix) if rest == "" { continue } // Check if this is a direct child or nested - if idx := strings.Index(rest, "/"); idx >= 0 { + parts := core.SplitN(rest, "/", 2) + if len(parts) == 2 { // Nested - register as a directory - dirName := rest[:idx] + dirName := parts[0] if !seen[dirName] { seen[dirName] = true entries = append(entries, &dirEntry{ @@ -425,10 +449,12 @@ func (m *Medium) List(p string) ([]fs.DirEntry, error) { } // Stat returns file information for the given path. +// +// result := m.Stat(...) func (m *Medium) Stat(p string) (fs.FileInfo, error) { key := cleanPath(p) if key == "" { - return nil, coreerr.E("sqlite.Stat", "path is required", fs.ErrInvalid) + return nil, core.E("sqlite.Stat", "path is required", fs.ErrInvalid) } var content []byte @@ -439,10 +465,10 @@ func (m *Medium) Stat(p string) (fs.FileInfo, error) { `SELECT content, mode, is_dir, mtime FROM `+m.table+` WHERE path = ?`, key, ).Scan(&content, &mode, &isDir, &mtime) if err == sql.ErrNoRows { - return nil, coreerr.E("sqlite.Stat", "path not found: "+key, fs.ErrNotExist) + return nil, core.E("sqlite.Stat", core.Concat("path not found: ", key), fs.ErrNotExist) } if err != nil { - return nil, coreerr.E("sqlite.Stat", "query failed: "+key, err) + return nil, core.E("sqlite.Stat", core.Concat("query failed: ", key), err) } name := path.Base(key) @@ -456,10 +482,12 @@ func (m *Medium) Stat(p string) (fs.FileInfo, error) { } // Open opens the named file for reading. +// +// result := m.Open(...) func (m *Medium) Open(p string) (fs.File, error) { key := cleanPath(p) if key == "" { - return nil, coreerr.E("sqlite.Open", "path is required", fs.ErrInvalid) + return nil, core.E("sqlite.Open", "path is required", fs.ErrInvalid) } var content []byte @@ -470,13 +498,13 @@ func (m *Medium) Open(p string) (fs.File, error) { `SELECT content, mode, is_dir, mtime FROM `+m.table+` WHERE path = ?`, key, ).Scan(&content, &mode, &isDir, &mtime) if err == sql.ErrNoRows { - return nil, coreerr.E("sqlite.Open", "file not found: "+key, fs.ErrNotExist) + return nil, core.E("sqlite.Open", core.Concat("file not found: ", key), fs.ErrNotExist) } if err != nil { - return nil, coreerr.E("sqlite.Open", "query failed: "+key, err) + return nil, core.E("sqlite.Open", core.Concat("query failed: ", key), err) } if isDir { - return nil, coreerr.E("sqlite.Open", "path is a directory: "+key, fs.ErrInvalid) + return nil, core.E("sqlite.Open", core.Concat("path is a directory: ", key), fs.ErrInvalid) } return &sqliteFile{ @@ -488,10 +516,12 @@ func (m *Medium) Open(p string) (fs.File, error) { } // Create creates or truncates the named file. +// +// result := m.Create(...) func (m *Medium) Create(p string) (goio.WriteCloser, error) { key := cleanPath(p) if key == "" { - return nil, coreerr.E("sqlite.Create", "path is required", fs.ErrInvalid) + return nil, core.E("sqlite.Create", "path is required", fs.ErrInvalid) } return &sqliteWriteCloser{ medium: m, @@ -500,10 +530,12 @@ func (m *Medium) Create(p string) (goio.WriteCloser, error) { } // Append opens the named file for appending, creating it if it doesn't exist. +// +// result := m.Append(...) func (m *Medium) Append(p string) (goio.WriteCloser, error) { key := cleanPath(p) if key == "" { - return nil, coreerr.E("sqlite.Append", "path is required", fs.ErrInvalid) + return nil, core.E("sqlite.Append", "path is required", fs.ErrInvalid) } var existing []byte @@ -511,7 +543,7 @@ func (m *Medium) Append(p string) (goio.WriteCloser, error) { `SELECT content FROM `+m.table+` WHERE path = ? AND is_dir = FALSE`, key, ).Scan(&existing) if err != nil && err != sql.ErrNoRows { - return nil, coreerr.E("sqlite.Append", "query failed: "+key, err) + return nil, core.E("sqlite.Append", core.Concat("query failed: ", key), err) } return &sqliteWriteCloser{ @@ -522,10 +554,12 @@ func (m *Medium) Append(p string) (goio.WriteCloser, error) { } // ReadStream returns a reader for the file content. +// +// result := m.ReadStream(...) func (m *Medium) ReadStream(p string) (goio.ReadCloser, error) { key := cleanPath(p) if key == "" { - return nil, coreerr.E("sqlite.ReadStream", "path is required", fs.ErrInvalid) + return nil, core.E("sqlite.ReadStream", "path is required", fs.ErrInvalid) } var content []byte @@ -534,24 +568,28 @@ func (m *Medium) ReadStream(p string) (goio.ReadCloser, error) { `SELECT content, is_dir FROM `+m.table+` WHERE path = ?`, key, ).Scan(&content, &isDir) if err == sql.ErrNoRows { - return nil, coreerr.E("sqlite.ReadStream", "file not found: "+key, fs.ErrNotExist) + return nil, core.E("sqlite.ReadStream", core.Concat("file not found: ", key), fs.ErrNotExist) } if err != nil { - return nil, coreerr.E("sqlite.ReadStream", "query failed: "+key, err) + return nil, core.E("sqlite.ReadStream", core.Concat("query failed: ", key), err) } if isDir { - return nil, coreerr.E("sqlite.ReadStream", "path is a directory: "+key, fs.ErrInvalid) + return nil, core.E("sqlite.ReadStream", core.Concat("path is a directory: ", key), fs.ErrInvalid) } return goio.NopCloser(bytes.NewReader(content)), nil } // WriteStream returns a writer for the file content. Content is stored on Close. +// +// result := m.WriteStream(...) func (m *Medium) WriteStream(p string) (goio.WriteCloser, error) { return m.Create(p) } // Exists checks if a path exists (file or directory). +// +// result := m.Exists(...) func (m *Medium) Exists(p string) bool { key := cleanPath(p) if key == "" { @@ -570,6 +608,8 @@ func (m *Medium) Exists(p string) bool { } // IsDir checks if a path exists and is a directory. +// +// result := m.IsDir(...) func (m *Medium) IsDir(p string) bool { key := cleanPath(p) if key == "" { @@ -597,12 +637,35 @@ type fileInfo struct { isDir bool } -func (fi *fileInfo) Name() string { return fi.name } -func (fi *fileInfo) Size() int64 { return fi.size } -func (fi *fileInfo) Mode() fs.FileMode { return fi.mode } +// Name documents the Name operation. +// +// result := fi.Name(...) +func (fi *fileInfo) Name() string { return fi.name } + +// Size documents the Size operation. +// +// result := fi.Size(...) +func (fi *fileInfo) Size() int64 { return fi.size } + +// Mode documents the Mode operation. +// +// result := fi.Mode(...) +func (fi *fileInfo) Mode() fs.FileMode { return fi.mode } + +// ModTime documents the ModTime operation. +// +// result := fi.ModTime(...) func (fi *fileInfo) ModTime() time.Time { return fi.modTime } -func (fi *fileInfo) IsDir() bool { return fi.isDir } -func (fi *fileInfo) Sys() any { return nil } + +// IsDir documents the IsDir operation. +// +// result := fi.IsDir(...) +func (fi *fileInfo) IsDir() bool { return fi.isDir } + +// Sys documents the Sys operation. +// +// result := fi.Sys(...) +func (fi *fileInfo) Sys() any { return nil } // dirEntry implements fs.DirEntry for SQLite listings. type dirEntry struct { @@ -612,9 +675,24 @@ type dirEntry struct { info fs.FileInfo } -func (de *dirEntry) Name() string { return de.name } -func (de *dirEntry) IsDir() bool { return de.isDir } -func (de *dirEntry) Type() fs.FileMode { return de.mode.Type() } +// Name documents the Name operation. +// +// result := de.Name(...) +func (de *dirEntry) Name() string { return de.name } + +// IsDir documents the IsDir operation. +// +// result := de.IsDir(...) +func (de *dirEntry) IsDir() bool { return de.isDir } + +// Type documents the Type operation. +// +// result := de.Type(...) +func (de *dirEntry) Type() fs.FileMode { return de.mode.Type() } + +// Info documents the Info operation. +// +// result := de.Info(...) func (de *dirEntry) Info() (fs.FileInfo, error) { return de.info, nil } // sqliteFile implements fs.File for SQLite entries. @@ -626,6 +704,9 @@ type sqliteFile struct { modTime time.Time } +// Stat documents the Stat operation. +// +// result := f.Stat(...) func (f *sqliteFile) Stat() (fs.FileInfo, error) { return &fileInfo{ name: f.name, @@ -635,6 +716,9 @@ func (f *sqliteFile) Stat() (fs.FileInfo, error) { }, nil } +// Read documents the Read operation. +// +// result := f.Read(...) func (f *sqliteFile) Read(b []byte) (int, error) { if f.offset >= int64(len(f.content)) { return 0, goio.EOF @@ -644,6 +728,9 @@ func (f *sqliteFile) Read(b []byte) (int, error) { return n, nil } +// Close documents the Close operation. +// +// result := f.Close(...) func (f *sqliteFile) Close() error { return nil } @@ -655,11 +742,17 @@ type sqliteWriteCloser struct { data []byte } +// Write documents the Write operation. +// +// result := w.Write(...) func (w *sqliteWriteCloser) Write(p []byte) (int, error) { w.data = append(w.data, p...) return len(p), nil } +// Close documents the Close operation. +// +// result := w.Close(...) func (w *sqliteWriteCloser) Close() error { _, err := w.medium.db.Exec( `INSERT INTO `+w.medium.table+` (path, content, mode, is_dir, mtime) VALUES (?, ?, 420, FALSE, ?) @@ -667,7 +760,7 @@ func (w *sqliteWriteCloser) Close() error { w.path, w.data, time.Now().UTC(), ) if err != nil { - return coreerr.E("sqlite.WriteCloser.Close", "store failed: "+w.path, err) + return core.E("sqlite.WriteCloser.Close", core.Concat("store failed: ", w.path), err) } return nil } diff --git a/sqlite/sqlite_test.go b/sqlite/sqlite_test.go index 97d6304..6a1e592 100644 --- a/sqlite/sqlite_test.go +++ b/sqlite/sqlite_test.go @@ -3,9 +3,9 @@ package sqlite import ( goio "io" "io/fs" - "strings" "testing" + core "dappco.re/go/core" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -20,21 +20,21 @@ func newTestMedium(t *testing.T) *Medium { // --- Constructor Tests --- -func TestNew_Good(t *testing.T) { +func TestSqlite_New_Good(t *testing.T) { m, err := New(":memory:") require.NoError(t, err) defer m.Close() assert.Equal(t, "files", m.table) } -func TestNew_Good_WithTable(t *testing.T) { +func TestSqlite_New_WithTable_Good(t *testing.T) { m, err := New(":memory:", WithTable("custom")) require.NoError(t, err) defer m.Close() assert.Equal(t, "custom", m.table) } -func TestNew_Bad_EmptyPath(t *testing.T) { +func TestSqlite_New_EmptyPath_Bad(t *testing.T) { _, err := New("") assert.Error(t, err) assert.Contains(t, err.Error(), "database path is required") @@ -42,7 +42,7 @@ func TestNew_Bad_EmptyPath(t *testing.T) { // --- Read/Write Tests --- -func TestReadWrite_Good(t *testing.T) { +func TestSqlite_ReadWrite_Good(t *testing.T) { m := newTestMedium(t) err := m.Write("hello.txt", "world") @@ -53,7 +53,7 @@ func TestReadWrite_Good(t *testing.T) { assert.Equal(t, "world", content) } -func TestReadWrite_Good_Overwrite(t *testing.T) { +func TestSqlite_ReadWrite_Overwrite_Good(t *testing.T) { m := newTestMedium(t) require.NoError(t, m.Write("file.txt", "first")) @@ -64,7 +64,7 @@ func TestReadWrite_Good_Overwrite(t *testing.T) { assert.Equal(t, "second", content) } -func TestReadWrite_Good_NestedPath(t *testing.T) { +func TestSqlite_ReadWrite_NestedPath_Good(t *testing.T) { m := newTestMedium(t) err := m.Write("a/b/c.txt", "nested") @@ -75,28 +75,28 @@ func TestReadWrite_Good_NestedPath(t *testing.T) { assert.Equal(t, "nested", content) } -func TestRead_Bad_NotFound(t *testing.T) { +func TestSqlite_Read_NotFound_Bad(t *testing.T) { m := newTestMedium(t) _, err := m.Read("nonexistent.txt") assert.Error(t, err) } -func TestRead_Bad_EmptyPath(t *testing.T) { +func TestSqlite_Read_EmptyPath_Bad(t *testing.T) { m := newTestMedium(t) _, err := m.Read("") assert.Error(t, err) } -func TestWrite_Bad_EmptyPath(t *testing.T) { +func TestSqlite_Write_EmptyPath_Bad(t *testing.T) { m := newTestMedium(t) err := m.Write("", "content") assert.Error(t, err) } -func TestRead_Bad_IsDirectory(t *testing.T) { +func TestSqlite_Read_IsDirectory_Bad(t *testing.T) { m := newTestMedium(t) require.NoError(t, m.EnsureDir("mydir")) @@ -106,7 +106,7 @@ func TestRead_Bad_IsDirectory(t *testing.T) { // --- EnsureDir Tests --- -func TestEnsureDir_Good(t *testing.T) { +func TestSqlite_EnsureDir_Good(t *testing.T) { m := newTestMedium(t) err := m.EnsureDir("mydir") @@ -114,14 +114,14 @@ func TestEnsureDir_Good(t *testing.T) { assert.True(t, m.IsDir("mydir")) } -func TestEnsureDir_Good_EmptyPath(t *testing.T) { +func TestSqlite_EnsureDir_EmptyPath_Good(t *testing.T) { m := newTestMedium(t) // Root always exists, no-op err := m.EnsureDir("") assert.NoError(t, err) } -func TestEnsureDir_Good_Idempotent(t *testing.T) { +func TestSqlite_EnsureDir_Idempotent_Good(t *testing.T) { m := newTestMedium(t) require.NoError(t, m.EnsureDir("mydir")) @@ -131,7 +131,7 @@ func TestEnsureDir_Good_Idempotent(t *testing.T) { // --- IsFile Tests --- -func TestIsFile_Good(t *testing.T) { +func TestSqlite_IsFile_Good(t *testing.T) { m := newTestMedium(t) require.NoError(t, m.Write("file.txt", "content")) @@ -145,7 +145,7 @@ func TestIsFile_Good(t *testing.T) { // --- FileGet/FileSet Tests --- -func TestFileGetFileSet_Good(t *testing.T) { +func TestSqlite_FileGetFileSet_Good(t *testing.T) { m := newTestMedium(t) err := m.FileSet("key.txt", "value") @@ -158,7 +158,7 @@ func TestFileGetFileSet_Good(t *testing.T) { // --- Delete Tests --- -func TestDelete_Good(t *testing.T) { +func TestSqlite_Delete_Good(t *testing.T) { m := newTestMedium(t) require.NoError(t, m.Write("to-delete.txt", "content")) @@ -169,7 +169,7 @@ func TestDelete_Good(t *testing.T) { assert.False(t, m.Exists("to-delete.txt")) } -func TestDelete_Good_EmptyDir(t *testing.T) { +func TestSqlite_Delete_EmptyDir_Good(t *testing.T) { m := newTestMedium(t) require.NoError(t, m.EnsureDir("emptydir")) @@ -180,21 +180,21 @@ func TestDelete_Good_EmptyDir(t *testing.T) { assert.False(t, m.IsDir("emptydir")) } -func TestDelete_Bad_NotFound(t *testing.T) { +func TestSqlite_Delete_NotFound_Bad(t *testing.T) { m := newTestMedium(t) err := m.Delete("nonexistent") assert.Error(t, err) } -func TestDelete_Bad_EmptyPath(t *testing.T) { +func TestSqlite_Delete_EmptyPath_Bad(t *testing.T) { m := newTestMedium(t) err := m.Delete("") assert.Error(t, err) } -func TestDelete_Bad_NotEmpty(t *testing.T) { +func TestSqlite_Delete_NotEmpty_Bad(t *testing.T) { m := newTestMedium(t) require.NoError(t, m.EnsureDir("mydir")) @@ -206,7 +206,7 @@ func TestDelete_Bad_NotEmpty(t *testing.T) { // --- DeleteAll Tests --- -func TestDeleteAll_Good(t *testing.T) { +func TestSqlite_DeleteAll_Good(t *testing.T) { m := newTestMedium(t) require.NoError(t, m.Write("dir/file1.txt", "a")) @@ -221,7 +221,7 @@ func TestDeleteAll_Good(t *testing.T) { assert.True(t, m.Exists("other.txt")) } -func TestDeleteAll_Good_SingleFile(t *testing.T) { +func TestSqlite_DeleteAll_SingleFile_Good(t *testing.T) { m := newTestMedium(t) require.NoError(t, m.Write("file.txt", "content")) @@ -231,14 +231,14 @@ func TestDeleteAll_Good_SingleFile(t *testing.T) { assert.False(t, m.Exists("file.txt")) } -func TestDeleteAll_Bad_NotFound(t *testing.T) { +func TestSqlite_DeleteAll_NotFound_Bad(t *testing.T) { m := newTestMedium(t) err := m.DeleteAll("nonexistent") assert.Error(t, err) } -func TestDeleteAll_Bad_EmptyPath(t *testing.T) { +func TestSqlite_DeleteAll_EmptyPath_Bad(t *testing.T) { m := newTestMedium(t) err := m.DeleteAll("") @@ -247,7 +247,7 @@ func TestDeleteAll_Bad_EmptyPath(t *testing.T) { // --- Rename Tests --- -func TestRename_Good(t *testing.T) { +func TestSqlite_Rename_Good(t *testing.T) { m := newTestMedium(t) require.NoError(t, m.Write("old.txt", "content")) @@ -263,7 +263,7 @@ func TestRename_Good(t *testing.T) { assert.Equal(t, "content", content) } -func TestRename_Good_Directory(t *testing.T) { +func TestSqlite_Rename_Directory_Good(t *testing.T) { m := newTestMedium(t) require.NoError(t, m.EnsureDir("olddir")) @@ -282,14 +282,14 @@ func TestRename_Good_Directory(t *testing.T) { assert.Equal(t, "content", content) } -func TestRename_Bad_SourceNotFound(t *testing.T) { +func TestSqlite_Rename_SourceNotFound_Bad(t *testing.T) { m := newTestMedium(t) err := m.Rename("nonexistent", "new") assert.Error(t, err) } -func TestRename_Bad_EmptyPath(t *testing.T) { +func TestSqlite_Rename_EmptyPath_Bad(t *testing.T) { m := newTestMedium(t) err := m.Rename("", "new") @@ -301,7 +301,7 @@ func TestRename_Bad_EmptyPath(t *testing.T) { // --- List Tests --- -func TestList_Good(t *testing.T) { +func TestSqlite_List_Good(t *testing.T) { m := newTestMedium(t) require.NoError(t, m.Write("dir/file1.txt", "a")) @@ -322,7 +322,7 @@ func TestList_Good(t *testing.T) { assert.Len(t, entries, 3) } -func TestList_Good_Root(t *testing.T) { +func TestSqlite_List_Root_Good(t *testing.T) { m := newTestMedium(t) require.NoError(t, m.Write("root.txt", "content")) @@ -340,7 +340,7 @@ func TestList_Good_Root(t *testing.T) { assert.True(t, names["dir"]) } -func TestList_Good_DirectoryEntry(t *testing.T) { +func TestSqlite_List_DirectoryEntry_Good(t *testing.T) { m := newTestMedium(t) require.NoError(t, m.Write("dir/sub/file.txt", "content")) @@ -359,7 +359,7 @@ func TestList_Good_DirectoryEntry(t *testing.T) { // --- Stat Tests --- -func TestStat_Good(t *testing.T) { +func TestSqlite_Stat_Good(t *testing.T) { m := newTestMedium(t) require.NoError(t, m.Write("file.txt", "hello world")) @@ -371,7 +371,7 @@ func TestStat_Good(t *testing.T) { assert.False(t, info.IsDir()) } -func TestStat_Good_Directory(t *testing.T) { +func TestSqlite_Stat_Directory_Good(t *testing.T) { m := newTestMedium(t) require.NoError(t, m.EnsureDir("mydir")) @@ -382,14 +382,14 @@ func TestStat_Good_Directory(t *testing.T) { assert.True(t, info.IsDir()) } -func TestStat_Bad_NotFound(t *testing.T) { +func TestSqlite_Stat_NotFound_Bad(t *testing.T) { m := newTestMedium(t) _, err := m.Stat("nonexistent") assert.Error(t, err) } -func TestStat_Bad_EmptyPath(t *testing.T) { +func TestSqlite_Stat_EmptyPath_Bad(t *testing.T) { m := newTestMedium(t) _, err := m.Stat("") @@ -398,7 +398,7 @@ func TestStat_Bad_EmptyPath(t *testing.T) { // --- Open Tests --- -func TestOpen_Good(t *testing.T) { +func TestSqlite_Open_Good(t *testing.T) { m := newTestMedium(t) require.NoError(t, m.Write("file.txt", "open me")) @@ -416,14 +416,14 @@ func TestOpen_Good(t *testing.T) { assert.Equal(t, "file.txt", stat.Name()) } -func TestOpen_Bad_NotFound(t *testing.T) { +func TestSqlite_Open_NotFound_Bad(t *testing.T) { m := newTestMedium(t) _, err := m.Open("nonexistent.txt") assert.Error(t, err) } -func TestOpen_Bad_IsDirectory(t *testing.T) { +func TestSqlite_Open_IsDirectory_Bad(t *testing.T) { m := newTestMedium(t) require.NoError(t, m.EnsureDir("mydir")) @@ -433,7 +433,7 @@ func TestOpen_Bad_IsDirectory(t *testing.T) { // --- Create Tests --- -func TestCreate_Good(t *testing.T) { +func TestSqlite_Create_Good(t *testing.T) { m := newTestMedium(t) w, err := m.Create("new.txt") @@ -451,7 +451,7 @@ func TestCreate_Good(t *testing.T) { assert.Equal(t, "created", content) } -func TestCreate_Good_Overwrite(t *testing.T) { +func TestSqlite_Create_Overwrite_Good(t *testing.T) { m := newTestMedium(t) require.NoError(t, m.Write("file.txt", "old content")) @@ -467,7 +467,7 @@ func TestCreate_Good_Overwrite(t *testing.T) { assert.Equal(t, "new", content) } -func TestCreate_Bad_EmptyPath(t *testing.T) { +func TestSqlite_Create_EmptyPath_Bad(t *testing.T) { m := newTestMedium(t) _, err := m.Create("") @@ -476,7 +476,7 @@ func TestCreate_Bad_EmptyPath(t *testing.T) { // --- Append Tests --- -func TestAppend_Good(t *testing.T) { +func TestSqlite_Append_Good(t *testing.T) { m := newTestMedium(t) require.NoError(t, m.Write("append.txt", "hello")) @@ -493,7 +493,7 @@ func TestAppend_Good(t *testing.T) { assert.Equal(t, "hello world", content) } -func TestAppend_Good_NewFile(t *testing.T) { +func TestSqlite_Append_NewFile_Good(t *testing.T) { m := newTestMedium(t) w, err := m.Append("new.txt") @@ -508,7 +508,7 @@ func TestAppend_Good_NewFile(t *testing.T) { assert.Equal(t, "fresh", content) } -func TestAppend_Bad_EmptyPath(t *testing.T) { +func TestSqlite_Append_EmptyPath_Bad(t *testing.T) { m := newTestMedium(t) _, err := m.Append("") @@ -517,7 +517,7 @@ func TestAppend_Bad_EmptyPath(t *testing.T) { // --- ReadStream Tests --- -func TestReadStream_Good(t *testing.T) { +func TestSqlite_ReadStream_Good(t *testing.T) { m := newTestMedium(t) require.NoError(t, m.Write("stream.txt", "streaming content")) @@ -531,14 +531,14 @@ func TestReadStream_Good(t *testing.T) { assert.Equal(t, "streaming content", string(data)) } -func TestReadStream_Bad_NotFound(t *testing.T) { +func TestSqlite_ReadStream_NotFound_Bad(t *testing.T) { m := newTestMedium(t) _, err := m.ReadStream("nonexistent.txt") assert.Error(t, err) } -func TestReadStream_Bad_IsDirectory(t *testing.T) { +func TestSqlite_ReadStream_IsDirectory_Bad(t *testing.T) { m := newTestMedium(t) require.NoError(t, m.EnsureDir("mydir")) @@ -548,13 +548,13 @@ func TestReadStream_Bad_IsDirectory(t *testing.T) { // --- WriteStream Tests --- -func TestWriteStream_Good(t *testing.T) { +func TestSqlite_WriteStream_Good(t *testing.T) { m := newTestMedium(t) writer, err := m.WriteStream("output.txt") require.NoError(t, err) - _, err = goio.Copy(writer, strings.NewReader("piped data")) + _, err = goio.Copy(writer, core.NewReader("piped data")) require.NoError(t, err) require.NoError(t, writer.Close()) @@ -565,7 +565,7 @@ func TestWriteStream_Good(t *testing.T) { // --- Exists Tests --- -func TestExists_Good(t *testing.T) { +func TestSqlite_Exists_Good(t *testing.T) { m := newTestMedium(t) assert.False(t, m.Exists("nonexistent")) @@ -577,7 +577,7 @@ func TestExists_Good(t *testing.T) { assert.True(t, m.Exists("mydir")) } -func TestExists_Good_EmptyPath(t *testing.T) { +func TestSqlite_Exists_EmptyPath_Good(t *testing.T) { m := newTestMedium(t) // Root always exists assert.True(t, m.Exists("")) @@ -585,7 +585,7 @@ func TestExists_Good_EmptyPath(t *testing.T) { // --- IsDir Tests --- -func TestIsDir_Good(t *testing.T) { +func TestSqlite_IsDir_Good(t *testing.T) { m := newTestMedium(t) require.NoError(t, m.Write("file.txt", "content")) @@ -599,7 +599,7 @@ func TestIsDir_Good(t *testing.T) { // --- cleanPath Tests --- -func TestCleanPath_Good(t *testing.T) { +func TestSqlite_CleanPath_Good(t *testing.T) { assert.Equal(t, "file.txt", cleanPath("file.txt")) assert.Equal(t, "dir/file.txt", cleanPath("dir/file.txt")) assert.Equal(t, "file.txt", cleanPath("/file.txt")) @@ -612,7 +612,7 @@ func TestCleanPath_Good(t *testing.T) { // --- Interface Compliance --- -func TestInterfaceCompliance_Ugly(t *testing.T) { +func TestSqlite_InterfaceCompliance_Ugly(t *testing.T) { m := newTestMedium(t) // Verify all methods exist by asserting the interface shape. @@ -640,7 +640,7 @@ func TestInterfaceCompliance_Ugly(t *testing.T) { // --- Custom Table --- -func TestCustomTable_Good(t *testing.T) { +func TestSqlite_CustomTable_Good(t *testing.T) { m, err := New(":memory:", WithTable("my_files")) require.NoError(t, err) defer m.Close() diff --git a/store/medium.go b/store/medium.go index c1f5b45..2380983 100644 --- a/store/medium.go +++ b/store/medium.go @@ -4,10 +4,9 @@ import ( goio "io" "io/fs" "path" - "strings" "time" - coreerr "forge.lthn.ai/core/go-log" + core "dappco.re/go/core" ) // Medium wraps a Store to satisfy the io.Medium interface. @@ -33,16 +32,22 @@ func NewMedium(dbPath string) (*Medium, error) { } // AsMedium returns a Medium adapter for an existing Store. +// +// result := s.AsMedium(...) func (s *Store) AsMedium() *Medium { return &Medium{s: s} } // Store returns the underlying KV store for direct access. +// +// result := m.Store(...) func (m *Medium) Store() *Store { return m.s } // Close closes the underlying store. +// +// result := m.Close(...) func (m *Medium) Close() error { return m.s.Close() } @@ -51,11 +56,11 @@ func (m *Medium) Close() error { // First segment = group, remainder = key. func splitPath(p string) (group, key string) { clean := path.Clean(p) - clean = strings.TrimPrefix(clean, "/") + clean = core.TrimPrefix(clean, "/") if clean == "" || clean == "." { return "", "" } - parts := strings.SplitN(clean, "/", 2) + parts := core.SplitN(clean, "/", 2) if len(parts) == 1 { return parts[0], "" } @@ -63,29 +68,37 @@ func splitPath(p string) (group, key string) { } // Read retrieves the value at group/key. +// +// result := m.Read(...) func (m *Medium) Read(p string) (string, error) { group, key := splitPath(p) if key == "" { - return "", coreerr.E("store.Read", "path must include group/key", fs.ErrInvalid) + return "", core.E("store.Read", "path must include group/key", fs.ErrInvalid) } return m.s.Get(group, key) } // Write stores a value at group/key. +// +// result := m.Write(...) func (m *Medium) Write(p, content string) error { group, key := splitPath(p) if key == "" { - return coreerr.E("store.Write", "path must include group/key", fs.ErrInvalid) + return core.E("store.Write", "path must include group/key", fs.ErrInvalid) } return m.s.Set(group, key, content) } // EnsureDir is a no-op — groups are created implicitly on Set. +// +// result := m.EnsureDir(...) func (m *Medium) EnsureDir(_ string) error { return nil } // IsFile returns true if a group/key pair exists. +// +// result := m.IsFile(...) func (m *Medium) IsFile(p string) bool { group, key := splitPath(p) if key == "" { @@ -96,20 +109,26 @@ func (m *Medium) IsFile(p string) bool { } // FileGet is an alias for Read. +// +// result := m.FileGet(...) func (m *Medium) FileGet(p string) (string, error) { return m.Read(p) } // FileSet is an alias for Write. +// +// result := m.FileSet(...) func (m *Medium) FileSet(p, content string) error { return m.Write(p, content) } // Delete removes a key, or checks that a group is empty. +// +// result := m.Delete(...) func (m *Medium) Delete(p string) error { group, key := splitPath(p) if group == "" { - return coreerr.E("store.Delete", "path is required", fs.ErrInvalid) + return core.E("store.Delete", "path is required", fs.ErrInvalid) } if key == "" { n, err := m.s.Count(group) @@ -117,7 +136,7 @@ func (m *Medium) Delete(p string) error { return err } if n > 0 { - return coreerr.E("store.Delete", "group not empty: "+group, fs.ErrExist) + return core.E("store.Delete", core.Concat("group not empty: ", group), fs.ErrExist) } return nil } @@ -125,10 +144,12 @@ func (m *Medium) Delete(p string) error { } // DeleteAll removes a key, or all keys in a group. +// +// result := m.DeleteAll(...) func (m *Medium) DeleteAll(p string) error { group, key := splitPath(p) if group == "" { - return coreerr.E("store.DeleteAll", "path is required", fs.ErrInvalid) + return core.E("store.DeleteAll", "path is required", fs.ErrInvalid) } if key == "" { return m.s.DeleteGroup(group) @@ -137,11 +158,13 @@ func (m *Medium) DeleteAll(p string) error { } // Rename moves a key from one path to another. +// +// result := m.Rename(...) func (m *Medium) Rename(oldPath, newPath string) error { og, ok := splitPath(oldPath) ng, nk := splitPath(newPath) if ok == "" || nk == "" { - return coreerr.E("store.Rename", "both paths must include group/key", fs.ErrInvalid) + return core.E("store.Rename", "both paths must include group/key", fs.ErrInvalid) } val, err := m.s.Get(og, ok) if err != nil { @@ -155,13 +178,15 @@ func (m *Medium) Rename(oldPath, newPath string) error { // List returns directory entries. Empty path returns groups. // A group path returns keys in that group. +// +// result := m.List(...) func (m *Medium) List(p string) ([]fs.DirEntry, error) { group, key := splitPath(p) if group == "" { rows, err := m.s.db.Query("SELECT DISTINCT grp FROM kv ORDER BY grp") if err != nil { - return nil, coreerr.E("store.List", "query groups", err) + return nil, core.E("store.List", "query groups", err) } defer rows.Close() @@ -169,7 +194,7 @@ func (m *Medium) List(p string) ([]fs.DirEntry, error) { for rows.Next() { var g string if err := rows.Scan(&g); err != nil { - return nil, coreerr.E("store.List", "scan", err) + return nil, core.E("store.List", "scan", err) } entries = append(entries, &kvDirEntry{name: g, isDir: true}) } @@ -192,10 +217,12 @@ func (m *Medium) List(p string) ([]fs.DirEntry, error) { } // Stat returns file info for a group (dir) or key (file). +// +// result := m.Stat(...) func (m *Medium) Stat(p string) (fs.FileInfo, error) { group, key := splitPath(p) if group == "" { - return nil, coreerr.E("store.Stat", "path is required", fs.ErrInvalid) + return nil, core.E("store.Stat", "path is required", fs.ErrInvalid) } if key == "" { n, err := m.s.Count(group) @@ -203,7 +230,7 @@ func (m *Medium) Stat(p string) (fs.FileInfo, error) { return nil, err } if n == 0 { - return nil, coreerr.E("store.Stat", "group not found: "+group, fs.ErrNotExist) + return nil, core.E("store.Stat", core.Concat("group not found: ", group), fs.ErrNotExist) } return &kvFileInfo{name: group, isDir: true}, nil } @@ -215,10 +242,12 @@ func (m *Medium) Stat(p string) (fs.FileInfo, error) { } // Open opens a key for reading. +// +// result := m.Open(...) func (m *Medium) Open(p string) (fs.File, error) { group, key := splitPath(p) if key == "" { - return nil, coreerr.E("store.Open", "path must include group/key", fs.ErrInvalid) + return nil, core.E("store.Open", "path must include group/key", fs.ErrInvalid) } val, err := m.s.Get(group, key) if err != nil { @@ -228,43 +257,53 @@ func (m *Medium) Open(p string) (fs.File, error) { } // Create creates or truncates a key. Content is stored on Close. +// +// result := m.Create(...) func (m *Medium) Create(p string) (goio.WriteCloser, error) { group, key := splitPath(p) if key == "" { - return nil, coreerr.E("store.Create", "path must include group/key", fs.ErrInvalid) + return nil, core.E("store.Create", "path must include group/key", fs.ErrInvalid) } return &kvWriteCloser{s: m.s, group: group, key: key}, nil } // Append opens a key for appending. Content is stored on Close. +// +// result := m.Append(...) func (m *Medium) Append(p string) (goio.WriteCloser, error) { group, key := splitPath(p) if key == "" { - return nil, coreerr.E("store.Append", "path must include group/key", fs.ErrInvalid) + return nil, core.E("store.Append", "path must include group/key", fs.ErrInvalid) } existing, _ := m.s.Get(group, key) return &kvWriteCloser{s: m.s, group: group, key: key, data: []byte(existing)}, nil } // ReadStream returns a reader for the value. +// +// result := m.ReadStream(...) func (m *Medium) ReadStream(p string) (goio.ReadCloser, error) { group, key := splitPath(p) if key == "" { - return nil, coreerr.E("store.ReadStream", "path must include group/key", fs.ErrInvalid) + return nil, core.E("store.ReadStream", "path must include group/key", fs.ErrInvalid) } val, err := m.s.Get(group, key) if err != nil { return nil, err } - return goio.NopCloser(strings.NewReader(val)), nil + return goio.NopCloser(core.NewReader(val)), nil } // WriteStream returns a writer. Content is stored on Close. +// +// result := m.WriteStream(...) func (m *Medium) WriteStream(p string) (goio.WriteCloser, error) { return m.Create(p) } // Exists returns true if a group or key exists. +// +// result := m.Exists(...) func (m *Medium) Exists(p string) bool { group, key := splitPath(p) if group == "" { @@ -279,6 +318,8 @@ func (m *Medium) Exists(p string) bool { } // IsDir returns true if the path is a group with entries. +// +// result := m.IsDir(...) func (m *Medium) IsDir(p string) bool { group, key := splitPath(p) if key != "" || group == "" { @@ -296,17 +337,40 @@ type kvFileInfo struct { isDir bool } +// Name documents the Name operation. +// +// result := fi.Name(...) func (fi *kvFileInfo) Name() string { return fi.name } -func (fi *kvFileInfo) Size() int64 { return fi.size } + +// Size documents the Size operation. +// +// result := fi.Size(...) +func (fi *kvFileInfo) Size() int64 { return fi.size } + +// Mode documents the Mode operation. +// +// result := fi.Mode(...) func (fi *kvFileInfo) Mode() fs.FileMode { if fi.isDir { return fs.ModeDir | 0755 } return 0644 } + +// ModTime documents the ModTime operation. +// +// result := fi.ModTime(...) func (fi *kvFileInfo) ModTime() time.Time { return time.Time{} } -func (fi *kvFileInfo) IsDir() bool { return fi.isDir } -func (fi *kvFileInfo) Sys() any { return nil } + +// IsDir documents the IsDir operation. +// +// result := fi.IsDir(...) +func (fi *kvFileInfo) IsDir() bool { return fi.isDir } + +// Sys documents the Sys operation. +// +// result := fi.Sys(...) +func (fi *kvFileInfo) Sys() any { return nil } type kvDirEntry struct { name string @@ -314,14 +378,29 @@ type kvDirEntry struct { size int64 } +// Name documents the Name operation. +// +// result := de.Name(...) func (de *kvDirEntry) Name() string { return de.name } -func (de *kvDirEntry) IsDir() bool { return de.isDir } + +// IsDir documents the IsDir operation. +// +// result := de.IsDir(...) +func (de *kvDirEntry) IsDir() bool { return de.isDir } + +// Type documents the Type operation. +// +// result := de.Type(...) func (de *kvDirEntry) Type() fs.FileMode { if de.isDir { return fs.ModeDir } return 0 } + +// Info documents the Info operation. +// +// result := de.Info(...) func (de *kvDirEntry) Info() (fs.FileInfo, error) { return &kvFileInfo{name: de.name, size: de.size, isDir: de.isDir}, nil } @@ -332,10 +411,16 @@ type kvFile struct { offset int64 } +// Stat documents the Stat operation. +// +// result := f.Stat(...) func (f *kvFile) Stat() (fs.FileInfo, error) { return &kvFileInfo{name: f.name, size: int64(len(f.content))}, nil } +// Read documents the Read operation. +// +// result := f.Read(...) func (f *kvFile) Read(b []byte) (int, error) { if f.offset >= int64(len(f.content)) { return 0, goio.EOF @@ -345,6 +430,9 @@ func (f *kvFile) Read(b []byte) (int, error) { return n, nil } +// Close documents the Close operation. +// +// result := f.Close(...) func (f *kvFile) Close() error { return nil } type kvWriteCloser struct { @@ -354,11 +442,17 @@ type kvWriteCloser struct { data []byte } +// Write documents the Write operation. +// +// result := w.Write(...) func (w *kvWriteCloser) Write(p []byte) (int, error) { w.data = append(w.data, p...) return len(p), nil } +// Close documents the Close operation. +// +// result := w.Close(...) func (w *kvWriteCloser) Close() error { return w.s.Set(w.group, w.key, string(w.data)) } diff --git a/store/medium_test.go b/store/medium_test.go index 19722e7..400ba61 100644 --- a/store/medium_test.go +++ b/store/medium_test.go @@ -16,7 +16,7 @@ func newTestMedium(t *testing.T) *Medium { return m } -func TestMedium_ReadWrite_Good(t *testing.T) { +func TestMedium_Medium_ReadWrite_Good(t *testing.T) { m := newTestMedium(t) err := m.Write("config/theme", "dark") @@ -27,19 +27,19 @@ func TestMedium_ReadWrite_Good(t *testing.T) { assert.Equal(t, "dark", val) } -func TestMedium_Read_Bad_NoKey(t *testing.T) { +func TestMedium_Medium_Read_NoKey_Bad(t *testing.T) { m := newTestMedium(t) _, err := m.Read("config") assert.Error(t, err) } -func TestMedium_Read_Bad_NotFound(t *testing.T) { +func TestMedium_Medium_Read_NotFound_Bad(t *testing.T) { m := newTestMedium(t) _, err := m.Read("config/missing") assert.Error(t, err) } -func TestMedium_IsFile_Good(t *testing.T) { +func TestMedium_Medium_IsFile_Good(t *testing.T) { m := newTestMedium(t) _ = m.Write("grp/key", "val") @@ -48,7 +48,7 @@ func TestMedium_IsFile_Good(t *testing.T) { assert.False(t, m.IsFile("grp")) } -func TestMedium_Delete_Good(t *testing.T) { +func TestMedium_Medium_Delete_Good(t *testing.T) { m := newTestMedium(t) _ = m.Write("grp/key", "val") @@ -57,7 +57,7 @@ func TestMedium_Delete_Good(t *testing.T) { assert.False(t, m.IsFile("grp/key")) } -func TestMedium_Delete_Bad_NonEmptyGroup(t *testing.T) { +func TestMedium_Medium_Delete_NonEmptyGroup_Bad(t *testing.T) { m := newTestMedium(t) _ = m.Write("grp/key", "val") @@ -65,7 +65,7 @@ func TestMedium_Delete_Bad_NonEmptyGroup(t *testing.T) { assert.Error(t, err) } -func TestMedium_DeleteAll_Good(t *testing.T) { +func TestMedium_Medium_DeleteAll_Good(t *testing.T) { m := newTestMedium(t) _ = m.Write("grp/a", "1") _ = m.Write("grp/b", "2") @@ -75,7 +75,7 @@ func TestMedium_DeleteAll_Good(t *testing.T) { assert.False(t, m.Exists("grp")) } -func TestMedium_Rename_Good(t *testing.T) { +func TestMedium_Medium_Rename_Good(t *testing.T) { m := newTestMedium(t) _ = m.Write("old/key", "val") @@ -88,7 +88,7 @@ func TestMedium_Rename_Good(t *testing.T) { assert.False(t, m.IsFile("old/key")) } -func TestMedium_List_Good_Groups(t *testing.T) { +func TestMedium_Medium_List_Groups_Good(t *testing.T) { m := newTestMedium(t) _ = m.Write("alpha/a", "1") _ = m.Write("beta/b", "2") @@ -106,7 +106,7 @@ func TestMedium_List_Good_Groups(t *testing.T) { assert.True(t, names["beta"]) } -func TestMedium_List_Good_Keys(t *testing.T) { +func TestMedium_Medium_List_Keys_Good(t *testing.T) { m := newTestMedium(t) _ = m.Write("grp/a", "1") _ = m.Write("grp/b", "22") @@ -116,7 +116,7 @@ func TestMedium_List_Good_Keys(t *testing.T) { assert.Len(t, entries, 2) } -func TestMedium_Stat_Good(t *testing.T) { +func TestMedium_Medium_Stat_Good(t *testing.T) { m := newTestMedium(t) _ = m.Write("grp/key", "hello") @@ -132,7 +132,7 @@ func TestMedium_Stat_Good(t *testing.T) { assert.False(t, info.IsDir()) } -func TestMedium_Exists_IsDir_Good(t *testing.T) { +func TestMedium_Medium_Exists_IsDir_Good(t *testing.T) { m := newTestMedium(t) _ = m.Write("grp/key", "val") @@ -143,7 +143,7 @@ func TestMedium_Exists_IsDir_Good(t *testing.T) { assert.False(t, m.Exists("nope")) } -func TestMedium_Open_Read_Good(t *testing.T) { +func TestMedium_Medium_Open_Read_Good(t *testing.T) { m := newTestMedium(t) _ = m.Write("grp/key", "hello world") @@ -156,7 +156,7 @@ func TestMedium_Open_Read_Good(t *testing.T) { assert.Equal(t, "hello world", string(data)) } -func TestMedium_CreateClose_Good(t *testing.T) { +func TestMedium_Medium_CreateClose_Good(t *testing.T) { m := newTestMedium(t) w, err := m.Create("grp/key") @@ -169,7 +169,7 @@ func TestMedium_CreateClose_Good(t *testing.T) { assert.Equal(t, "streamed", val) } -func TestMedium_Append_Good(t *testing.T) { +func TestMedium_Medium_Append_Good(t *testing.T) { m := newTestMedium(t) _ = m.Write("grp/key", "hello") @@ -183,7 +183,7 @@ func TestMedium_Append_Good(t *testing.T) { assert.Equal(t, "hello world", val) } -func TestMedium_AsMedium_Good(t *testing.T) { +func TestMedium_Medium_AsMedium_Good(t *testing.T) { s, err := New(":memory:") require.NoError(t, err) defer s.Close() diff --git a/store/store.go b/store/store.go index bc92a8f..df5d6a6 100644 --- a/store/store.go +++ b/store/store.go @@ -2,16 +2,14 @@ package store import ( "database/sql" - "errors" - "strings" "text/template" - coreerr "forge.lthn.ai/core/go-log" + core "dappco.re/go/core" _ "modernc.org/sqlite" ) // ErrNotFound is returned when a key does not exist in the store. -var ErrNotFound = errors.New("store: not found") +var ErrNotFound = core.E("store.ErrNotFound", "key not found", nil) // Store is a group-namespaced key-value store backed by SQLite. type Store struct { @@ -27,11 +25,11 @@ type Store struct { func New(dbPath string) (*Store, error) { db, err := sql.Open("sqlite", dbPath) if err != nil { - return nil, coreerr.E("store.New", "open db", err) + return nil, core.E("store.New", "open db", err) } if _, err := db.Exec("PRAGMA journal_mode=WAL"); err != nil { db.Close() - return nil, coreerr.E("store.New", "WAL mode", err) + return nil, core.E("store.New", "WAL mode", err) } if _, err := db.Exec(`CREATE TABLE IF NOT EXISTS kv ( grp TEXT NOT NULL, @@ -40,30 +38,36 @@ func New(dbPath string) (*Store, error) { PRIMARY KEY (grp, key) )`); err != nil { db.Close() - return nil, coreerr.E("store.New", "create schema", err) + return nil, core.E("store.New", "create schema", err) } return &Store{db: db}, nil } // Close closes the underlying database. +// +// result := s.Close(...) func (s *Store) Close() error { return s.db.Close() } // Get retrieves a value by group and key. +// +// result := s.Get(...) func (s *Store) Get(group, key string) (string, error) { var val string err := s.db.QueryRow("SELECT value FROM kv WHERE grp = ? AND key = ?", group, key).Scan(&val) if err == sql.ErrNoRows { - return "", coreerr.E("store.Get", "not found: "+group+"/"+key, ErrNotFound) + return "", core.E("store.Get", core.Concat("not found: ", group, "/", key), ErrNotFound) } if err != nil { - return "", coreerr.E("store.Get", "query", err) + return "", core.E("store.Get", "query", err) } return val, nil } // Set stores a value by group and key, overwriting if exists. +// +// result := s.Set(...) func (s *Store) Set(group, key, value string) error { _, err := s.db.Exec( `INSERT INTO kv (grp, key, value) VALUES (?, ?, ?) @@ -71,44 +75,52 @@ func (s *Store) Set(group, key, value string) error { group, key, value, ) if err != nil { - return coreerr.E("store.Set", "exec", err) + return core.E("store.Set", "exec", err) } return nil } // Delete removes a single key from a group. +// +// result := s.Delete(...) func (s *Store) Delete(group, key string) error { _, err := s.db.Exec("DELETE FROM kv WHERE grp = ? AND key = ?", group, key) if err != nil { - return coreerr.E("store.Delete", "exec", err) + return core.E("store.Delete", "exec", err) } return nil } // Count returns the number of keys in a group. +// +// result := s.Count(...) func (s *Store) Count(group string) (int, error) { var n int err := s.db.QueryRow("SELECT COUNT(*) FROM kv WHERE grp = ?", group).Scan(&n) if err != nil { - return 0, coreerr.E("store.Count", "query", err) + return 0, core.E("store.Count", "query", err) } return n, nil } // DeleteGroup removes all keys in a group. +// +// result := s.DeleteGroup(...) func (s *Store) DeleteGroup(group string) error { _, err := s.db.Exec("DELETE FROM kv WHERE grp = ?", group) if err != nil { - return coreerr.E("store.DeleteGroup", "exec", err) + return core.E("store.DeleteGroup", "exec", err) } return nil } // GetAll returns all key-value pairs in a group. +// +// result := s.GetAll(...) func (s *Store) GetAll(group string) (map[string]string, error) { rows, err := s.db.Query("SELECT key, value FROM kv WHERE grp = ?", group) if err != nil { - return nil, coreerr.E("store.GetAll", "query", err) + return nil, core.E("store.GetAll", "query", err) } defer rows.Close() @@ -116,12 +128,12 @@ func (s *Store) GetAll(group string) (map[string]string, error) { for rows.Next() { var k, v string if err := rows.Scan(&k, &v); err != nil { - return nil, coreerr.E("store.GetAll", "scan", err) + return nil, core.E("store.GetAll", "scan", err) } result[k] = v } if err := rows.Err(); err != nil { - return nil, coreerr.E("store.GetAll", "rows", err) + return nil, core.E("store.GetAll", "rows", err) } return result, nil } @@ -135,7 +147,7 @@ func (s *Store) GetAll(group string) (map[string]string, error) { func (s *Store) Render(tmplStr, group string) (string, error) { rows, err := s.db.Query("SELECT key, value FROM kv WHERE grp = ?", group) if err != nil { - return "", coreerr.E("store.Render", "query", err) + return "", core.E("store.Render", "query", err) } defer rows.Close() @@ -143,21 +155,21 @@ func (s *Store) Render(tmplStr, group string) (string, error) { for rows.Next() { var k, v string if err := rows.Scan(&k, &v); err != nil { - return "", coreerr.E("store.Render", "scan", err) + return "", core.E("store.Render", "scan", err) } vars[k] = v } if err := rows.Err(); err != nil { - return "", coreerr.E("store.Render", "rows", err) + return "", core.E("store.Render", "rows", err) } tmpl, err := template.New("render").Parse(tmplStr) if err != nil { - return "", coreerr.E("store.Render", "parse template", err) + return "", core.E("store.Render", "parse template", err) } - var b strings.Builder - if err := tmpl.Execute(&b, vars); err != nil { - return "", coreerr.E("store.Render", "execute template", err) + b := core.NewBuilder() + if err := tmpl.Execute(b, vars); err != nil { + return "", core.E("store.Render", "execute template", err) } return b.String(), nil } diff --git a/store/store_test.go b/store/store_test.go index b62b88b..624ec07 100644 --- a/store/store_test.go +++ b/store/store_test.go @@ -7,7 +7,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestSetGet_Good(t *testing.T) { +func TestStore_SetGet_Good(t *testing.T) { s, err := New(":memory:") require.NoError(t, err) defer s.Close() @@ -20,7 +20,7 @@ func TestSetGet_Good(t *testing.T) { assert.Equal(t, "dark", val) } -func TestGet_Bad_NotFound(t *testing.T) { +func TestStore_Get_NotFound_Bad(t *testing.T) { s, _ := New(":memory:") defer s.Close() @@ -28,7 +28,7 @@ func TestGet_Bad_NotFound(t *testing.T) { assert.Error(t, err) } -func TestDelete_Good(t *testing.T) { +func TestStore_Delete_Good(t *testing.T) { s, _ := New(":memory:") defer s.Close() @@ -40,7 +40,7 @@ func TestDelete_Good(t *testing.T) { assert.Error(t, err) } -func TestCount_Good(t *testing.T) { +func TestStore_Count_Good(t *testing.T) { s, _ := New(":memory:") defer s.Close() @@ -53,7 +53,7 @@ func TestCount_Good(t *testing.T) { assert.Equal(t, 2, n) } -func TestDeleteGroup_Good(t *testing.T) { +func TestStore_DeleteGroup_Good(t *testing.T) { s, _ := New(":memory:") defer s.Close() @@ -66,7 +66,7 @@ func TestDeleteGroup_Good(t *testing.T) { assert.Equal(t, 0, n) } -func TestGetAll_Good(t *testing.T) { +func TestStore_GetAll_Good(t *testing.T) { s, _ := New(":memory:") defer s.Close() @@ -79,7 +79,7 @@ func TestGetAll_Good(t *testing.T) { assert.Equal(t, map[string]string{"a": "1", "b": "2"}, all) } -func TestGetAll_Good_Empty(t *testing.T) { +func TestStore_GetAll_Empty_Good(t *testing.T) { s, _ := New(":memory:") defer s.Close() @@ -88,7 +88,7 @@ func TestGetAll_Good_Empty(t *testing.T) { assert.Empty(t, all) } -func TestRender_Good(t *testing.T) { +func TestStore_Render_Good(t *testing.T) { s, _ := New(":memory:") defer s.Close() diff --git a/workspace/service.go b/workspace/service.go index b17567f..5599356 100644 --- a/workspace/service.go +++ b/workspace/service.go @@ -4,11 +4,9 @@ import ( "crypto/sha256" "encoding/hex" "io/fs" - "strings" "sync" core "dappco.re/go/core" - coreerr "forge.lthn.ai/core/go-log" "dappco.re/go/core/io" ) @@ -46,7 +44,7 @@ type Service struct { func New(c *core.Core, crypt ...cryptProvider) (any, error) { home := workspaceHome() if home == "" { - return nil, coreerr.E("workspace.New", "failed to determine home directory", fs.ErrNotExist) + return nil, core.E("workspace.New", "failed to determine home directory", fs.ErrNotExist) } rootPath := core.Path(home, ".core", "workspaces") @@ -61,7 +59,7 @@ func New(c *core.Core, crypt ...cryptProvider) (any, error) { } if err := s.medium.EnsureDir(rootPath); err != nil { - return nil, coreerr.E("workspace.New", "failed to ensure root directory", err) + return nil, core.E("workspace.New", "failed to ensure root directory", err) } return s, nil @@ -70,12 +68,14 @@ func New(c *core.Core, crypt ...cryptProvider) (any, error) { // CreateWorkspace creates a new encrypted workspace. // Identifier is hashed (SHA-256) to create the directory name. // A PGP keypair is generated using the password. +// +// result := s.CreateWorkspace(...) func (s *Service) CreateWorkspace(identifier, password string) (string, error) { s.mu.Lock() defer s.mu.Unlock() if s.crypt == nil { - return "", coreerr.E("workspace.CreateWorkspace", "crypt service not available", nil) + return "", core.E("workspace.CreateWorkspace", "crypt service not available", nil) } hash := sha256.Sum256([]byte(identifier)) @@ -86,28 +86,30 @@ func (s *Service) CreateWorkspace(identifier, password string) (string, error) { } if s.medium.Exists(wsPath) { - return "", coreerr.E("workspace.CreateWorkspace", "workspace already exists", nil) + return "", core.E("workspace.CreateWorkspace", "workspace already exists", nil) } for _, d := range []string{"config", "log", "data", "files", "keys"} { if err := s.medium.EnsureDir(core.Path(wsPath, d)); err != nil { - return "", coreerr.E("workspace.CreateWorkspace", "failed to create directory: "+d, err) + return "", core.E("workspace.CreateWorkspace", core.Concat("failed to create directory: ", d), err) } } privKey, err := s.crypt.CreateKeyPair(identifier, password) if err != nil { - return "", coreerr.E("workspace.CreateWorkspace", "failed to generate keys", err) + return "", core.E("workspace.CreateWorkspace", "failed to generate keys", err) } if err := s.medium.WriteMode(core.Path(wsPath, "keys", "private.key"), privKey, 0600); err != nil { - return "", coreerr.E("workspace.CreateWorkspace", "failed to save private key", err) + return "", core.E("workspace.CreateWorkspace", "failed to save private key", err) } return wsID, nil } // SwitchWorkspace changes the active workspace. +// +// result := s.SwitchWorkspace(...) func (s *Service) SwitchWorkspace(name string) error { s.mu.Lock() defer s.mu.Unlock() @@ -117,7 +119,7 @@ func (s *Service) SwitchWorkspace(name string) error { return err } if !s.medium.IsDir(wsPath) { - return coreerr.E("workspace.SwitchWorkspace", "workspace not found: "+name, nil) + return core.E("workspace.SwitchWorkspace", core.Concat("workspace not found: ", name), nil) } s.activeWorkspace = core.PathBase(wsPath) @@ -128,20 +130,22 @@ func (s *Service) SwitchWorkspace(name string) error { // or an error if no workspace is active. func (s *Service) activeFilePath(op, filename string) (string, error) { if s.activeWorkspace == "" { - return "", coreerr.E(op, "no active workspace", nil) + return "", core.E(op, "no active workspace", nil) } filesRoot := core.Path(s.rootPath, s.activeWorkspace, "files") path, err := joinWithinRoot(filesRoot, filename) if err != nil { - return "", coreerr.E(op, "file path escapes workspace files", fs.ErrPermission) + return "", core.E(op, "file path escapes workspace files", fs.ErrPermission) } if path == filesRoot { - return "", coreerr.E(op, "filename is required", fs.ErrInvalid) + return "", core.E(op, "filename is required", fs.ErrInvalid) } return path, nil } // WorkspaceFileGet retrieves the content of a file from the active workspace. +// +// result := s.WorkspaceFileGet(...) func (s *Service) WorkspaceFileGet(filename string) (string, error) { s.mu.RLock() defer s.mu.RUnlock() @@ -154,6 +158,8 @@ func (s *Service) WorkspaceFileGet(filename string) (string, error) { } // WorkspaceFileSet saves content to a file in the active workspace. +// +// result := s.WorkspaceFileSet(...) func (s *Service) WorkspaceFileSet(filename, content string) error { s.mu.Lock() defer s.mu.Unlock() @@ -166,6 +172,8 @@ func (s *Service) WorkspaceFileSet(filename, content string) error { } // HandleIPCEvents handles workspace-related IPC messages. +// +// result := s.HandleIPCEvents(...) func (s *Service) HandleIPCEvents(c *core.Core, msg core.Message) core.Result { switch m := msg.(type) { case map[string]any: @@ -203,7 +211,7 @@ func workspaceHome() string { func joinWithinRoot(root string, parts ...string) (string, error) { candidate := core.Path(append([]string{root}, parts...)...) sep := core.Env("DS") - if candidate == root || strings.HasPrefix(candidate, root+sep) { + if candidate == root || core.HasPrefix(candidate, root+sep) { return candidate, nil } return "", fs.ErrPermission @@ -211,14 +219,14 @@ func joinWithinRoot(root string, parts ...string) (string, error) { func (s *Service) workspacePath(op, name string) (string, error) { if name == "" { - return "", coreerr.E(op, "workspace name is required", fs.ErrInvalid) + return "", core.E(op, "workspace name is required", fs.ErrInvalid) } path, err := joinWithinRoot(s.rootPath, name) if err != nil { - return "", coreerr.E(op, "workspace path escapes root", err) + return "", core.E(op, "workspace path escapes root", err) } if core.PathDir(path) != s.rootPath { - return "", coreerr.E(op, "invalid workspace name: "+name, fs.ErrPermission) + return "", core.E(op, core.Concat("invalid workspace name: ", name), fs.ErrPermission) } return path, nil } diff --git a/workspace/service_test.go b/workspace/service_test.go index de81b30..157cbcf 100644 --- a/workspace/service_test.go +++ b/workspace/service_test.go @@ -31,7 +31,7 @@ func newTestService(t *testing.T) (*Service, string) { return svc.(*Service), tempHome } -func TestWorkspace_Good_RoundTrip(t *testing.T) { +func TestService_Workspace_RoundTrip_Good(t *testing.T) { s, tempHome := newTestService(t) id, err := s.CreateWorkspace("test-user", "pass123") @@ -55,7 +55,7 @@ func TestWorkspace_Good_RoundTrip(t *testing.T) { assert.Equal(t, "top secret info", got) } -func TestSwitchWorkspace_Bad_TraversalBlocked(t *testing.T) { +func TestService_SwitchWorkspace_TraversalBlocked_Bad(t *testing.T) { s, tempHome := newTestService(t) outside := core.Path(tempHome, ".core", "escaped") @@ -66,7 +66,7 @@ func TestSwitchWorkspace_Bad_TraversalBlocked(t *testing.T) { assert.Empty(t, s.activeWorkspace) } -func TestWorkspaceFileSet_Bad_TraversalBlocked(t *testing.T) { +func TestService_WorkspaceFileSet_TraversalBlocked_Bad(t *testing.T) { s, tempHome := newTestService(t) id, err := s.CreateWorkspace("test-user", "pass123") From 5e4bc3b0ac6bf6adea93e782dc9de30f0d1698a1 Mon Sep 17 00:00:00 2001 From: Virgil Date: Mon, 30 Mar 2026 06:18:01 +0000 Subject: [PATCH 04/83] test(ax): cover wrapper APIs and add package docs --- client_test.go | 107 ++++++++++++++++++++++++++++++++++ doc.go | 6 ++ node/node_test.go | 118 ++++++++++++++++++++++++++++++++++++++ store/doc.go | 5 ++ store/medium_test.go | 60 +++++++++++++++++++ workspace/doc.go | 5 ++ workspace/service_test.go | 31 ++++++++++ 7 files changed, 332 insertions(+) create mode 100644 doc.go create mode 100644 store/doc.go create mode 100644 workspace/doc.go diff --git a/client_test.go b/client_test.go index 0383c5b..d59219e 100644 --- a/client_test.go +++ b/client_test.go @@ -1,9 +1,12 @@ package io import ( + goio "io" + "io/fs" "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) // --- MockMedium Tests --- @@ -43,6 +46,17 @@ func TestClient_MockMedium_Write_Good(t *testing.T) { assert.Equal(t, "new content", m.Files["test.txt"]) } +func TestClient_MockMedium_WriteMode_Good(t *testing.T) { + m := NewMockMedium() + + err := m.WriteMode("secure.txt", "secret", 0600) + require.NoError(t, err) + + content, err := m.Read("secure.txt") + require.NoError(t, err) + assert.Equal(t, "secret", content) +} + func TestClient_MockMedium_EnsureDir_Good(t *testing.T) { m := NewMockMedium() err := m.EnsureDir("/path/to/dir") @@ -194,6 +208,68 @@ func TestClient_MockMedium_IsDir_Good(t *testing.T) { assert.False(t, m.IsDir("nonexistent")) } +func TestClient_MockMedium_StreamAndFSHelpers_Good(t *testing.T) { + m := NewMockMedium() + require.NoError(t, m.EnsureDir("dir")) + require.NoError(t, m.Write("dir/file.txt", "alpha")) + + file, err := m.Open("dir/file.txt") + require.NoError(t, err) + + info, err := file.Stat() + require.NoError(t, err) + assert.Equal(t, "file.txt", info.Name()) + assert.Equal(t, int64(5), info.Size()) + assert.Equal(t, fs.FileMode(0), info.Mode()) + assert.True(t, info.ModTime().IsZero()) + assert.False(t, info.IsDir()) + assert.Nil(t, info.Sys()) + + data, err := goio.ReadAll(file) + require.NoError(t, err) + assert.Equal(t, "alpha", string(data)) + require.NoError(t, file.Close()) + + entries, err := m.List("dir") + require.NoError(t, err) + require.Len(t, entries, 1) + assert.Equal(t, "file.txt", entries[0].Name()) + assert.False(t, entries[0].IsDir()) + assert.Equal(t, fs.FileMode(0), entries[0].Type()) + + entryInfo, err := entries[0].Info() + require.NoError(t, err) + assert.Equal(t, "file.txt", entryInfo.Name()) + assert.Equal(t, int64(5), entryInfo.Size()) + + writer, err := m.Create("created.txt") + require.NoError(t, err) + _, err = writer.Write([]byte("created")) + require.NoError(t, err) + require.NoError(t, writer.Close()) + + appendWriter, err := m.Append("created.txt") + require.NoError(t, err) + _, err = appendWriter.Write([]byte(" later")) + require.NoError(t, err) + require.NoError(t, appendWriter.Close()) + + reader, err := m.ReadStream("created.txt") + require.NoError(t, err) + streamed, err := goio.ReadAll(reader) + require.NoError(t, err) + assert.Equal(t, "created later", string(streamed)) + require.NoError(t, reader.Close()) + + writeStream, err := m.WriteStream("streamed.txt") + require.NoError(t, err) + _, err = writeStream.Write([]byte("stream output")) + require.NoError(t, err) + require.NoError(t, writeStream.Close()) + + assert.Equal(t, "stream output", m.Files["streamed.txt"]) +} + // --- Wrapper Function Tests --- func TestClient_Read_Good(t *testing.T) { @@ -226,6 +302,37 @@ func TestClient_IsFile_Good(t *testing.T) { assert.False(t, IsFile(m, "nonexistent.txt")) } +func TestClient_NewSandboxed_Good(t *testing.T) { + root := t.TempDir() + + m, err := NewSandboxed(root) + require.NoError(t, err) + + require.NoError(t, m.Write("config/app.yaml", "port: 8080")) + + content, err := m.Read("config/app.yaml") + require.NoError(t, err) + assert.Equal(t, "port: 8080", content) + assert.True(t, m.IsDir("config")) +} + +func TestClient_ReadWriteStream_Good(t *testing.T) { + m := NewMockMedium() + + writer, err := WriteStream(m, "logs/run.txt") + require.NoError(t, err) + _, err = writer.Write([]byte("started")) + require.NoError(t, err) + require.NoError(t, writer.Close()) + + reader, err := ReadStream(m, "logs/run.txt") + require.NoError(t, err) + data, err := goio.ReadAll(reader) + require.NoError(t, err) + assert.Equal(t, "started", string(data)) + require.NoError(t, reader.Close()) +} + func TestClient_Copy_Good(t *testing.T) { source := NewMockMedium() dest := NewMockMedium() diff --git a/doc.go b/doc.go new file mode 100644 index 0000000..83e4627 --- /dev/null +++ b/doc.go @@ -0,0 +1,6 @@ +// Package io defines the storage abstraction used across CoreGO. +// +// Callers work against Medium so the same code can read and write state from +// sandboxed local paths, in-memory nodes, SQLite, S3, or other backends +// without changing application logic. +package io diff --git a/node/node_test.go b/node/node_test.go index 277c8a1..facca7f 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -357,6 +357,23 @@ func TestNode_Walk_Options_Good(t *testing.T) { }) } +func TestNode_WalkNode_Good(t *testing.T) { + n := New() + n.AddData("alpha.txt", []byte("alpha")) + n.AddData("nested/beta.txt", []byte("beta")) + + var paths []string + err := n.WalkNode(".", func(p string, d fs.DirEntry, err error) error { + require.NoError(t, err) + paths = append(paths, p) + return nil + }) + require.NoError(t, err) + + sort.Strings(paths) + assert.Equal(t, []string{".", "alpha.txt", "nested", "nested/beta.txt"}, paths) +} + // --------------------------------------------------------------------------- // CopyFile // --------------------------------------------------------------------------- @@ -398,6 +415,107 @@ func TestNode_CopyFile_Ugly(t *testing.T) { assert.Error(t, err) } +func TestNode_CopyTo_Good(t *testing.T) { + n := New() + n.AddData("config/app.yaml", []byte("port: 8080")) + n.AddData("config/env/app.env", []byte("MODE=test")) + + fileTarget := coreio.NewMockMedium() + err := n.CopyTo(fileTarget, "config/app.yaml", "backup/app.yaml") + require.NoError(t, err) + assert.Equal(t, "port: 8080", fileTarget.Files["backup/app.yaml"]) + + dirTarget := coreio.NewMockMedium() + err = n.CopyTo(dirTarget, "config", "backup/config") + require.NoError(t, err) + assert.Equal(t, "port: 8080", dirTarget.Files["backup/config/app.yaml"]) + assert.Equal(t, "MODE=test", dirTarget.Files["backup/config/env/app.env"]) +} + +func TestNode_CopyTo_Bad(t *testing.T) { + n := New() + err := n.CopyTo(coreio.NewMockMedium(), "missing", "backup/missing") + assert.Error(t, err) +} + +func TestNode_MediumFacade_Good(t *testing.T) { + n := New() + + require.NoError(t, n.Write("docs/readme.txt", "hello")) + require.NoError(t, n.WriteMode("docs/mode.txt", "mode", 0600)) + require.NoError(t, n.FileSet("docs/guide.txt", "guide")) + require.NoError(t, n.EnsureDir("ignored")) + + value, err := n.Read("docs/readme.txt") + require.NoError(t, err) + assert.Equal(t, "hello", value) + + value, err = n.FileGet("docs/guide.txt") + require.NoError(t, err) + assert.Equal(t, "guide", value) + + assert.True(t, n.IsFile("docs/readme.txt")) + assert.True(t, n.IsDir("docs")) + + entries, err := n.List("docs") + require.NoError(t, err) + assert.Equal(t, []string{"guide.txt", "mode.txt", "readme.txt"}, sortedNames(entries)) + + file, err := n.Open("docs/readme.txt") + require.NoError(t, err) + info, err := file.Stat() + require.NoError(t, err) + assert.Equal(t, "readme.txt", info.Name()) + assert.Equal(t, fs.FileMode(0444), info.Mode()) + assert.False(t, info.IsDir()) + assert.Nil(t, info.Sys()) + require.NoError(t, file.Close()) + + dir, err := n.Open("docs") + require.NoError(t, err) + dirInfo, err := dir.Stat() + require.NoError(t, err) + assert.Equal(t, "docs", dirInfo.Name()) + assert.True(t, dirInfo.IsDir()) + assert.Equal(t, fs.ModeDir|0555, dirInfo.Mode()) + assert.Nil(t, dirInfo.Sys()) + require.NoError(t, dir.Close()) + + createWriter, err := n.Create("docs/generated.txt") + require.NoError(t, err) + _, err = createWriter.Write([]byte("generated")) + require.NoError(t, err) + require.NoError(t, createWriter.Close()) + + appendWriter, err := n.Append("docs/generated.txt") + require.NoError(t, err) + _, err = appendWriter.Write([]byte(" content")) + require.NoError(t, err) + require.NoError(t, appendWriter.Close()) + + streamReader, err := n.ReadStream("docs/generated.txt") + require.NoError(t, err) + streamData, err := io.ReadAll(streamReader) + require.NoError(t, err) + assert.Equal(t, "generated content", string(streamData)) + require.NoError(t, streamReader.Close()) + + writeStream, err := n.WriteStream("docs/stream.txt") + require.NoError(t, err) + _, err = writeStream.Write([]byte("stream")) + require.NoError(t, err) + require.NoError(t, writeStream.Close()) + + require.NoError(t, n.Rename("docs/stream.txt", "docs/stream-renamed.txt")) + assert.True(t, n.Exists("docs/stream-renamed.txt")) + + require.NoError(t, n.Delete("docs/stream-renamed.txt")) + assert.False(t, n.Exists("docs/stream-renamed.txt")) + + require.NoError(t, n.DeleteAll("docs")) + assert.False(t, n.Exists("docs")) +} + // --------------------------------------------------------------------------- // ToTar / FromTar // --------------------------------------------------------------------------- diff --git a/store/doc.go b/store/doc.go new file mode 100644 index 0000000..06e62aa --- /dev/null +++ b/store/doc.go @@ -0,0 +1,5 @@ +// Package store provides a group-namespaced key-value store backed by SQLite. +// +// It also exposes an io.Medium adapter so grouped values can participate in +// the same storage workflows as filesystem-backed mediums. +package store diff --git a/store/medium_test.go b/store/medium_test.go index 400ba61..31809b4 100644 --- a/store/medium_test.go +++ b/store/medium_test.go @@ -2,6 +2,7 @@ package store import ( "io" + "io/fs" "testing" "github.com/stretchr/testify/assert" @@ -200,3 +201,62 @@ func TestMedium_Medium_AsMedium_Good(t *testing.T) { require.NoError(t, err) assert.Equal(t, "val", val) } + +func TestMedium_Medium_Store_Good(t *testing.T) { + m := newTestMedium(t) + + assert.NotNil(t, m.Store()) + assert.Same(t, m.Store(), m.Store()) +} + +func TestMedium_Medium_EnsureDir_FileHelpers_Good(t *testing.T) { + m := newTestMedium(t) + + require.NoError(t, m.EnsureDir("ignored")) + require.NoError(t, m.FileSet("grp/key", "value")) + + value, err := m.FileGet("grp/key") + require.NoError(t, err) + assert.Equal(t, "value", value) +} + +func TestMedium_Medium_StreamHelpers_Good(t *testing.T) { + m := newTestMedium(t) + + writer, err := m.WriteStream("grp/key") + require.NoError(t, err) + _, err = writer.Write([]byte("streamed")) + require.NoError(t, err) + require.NoError(t, writer.Close()) + + reader, err := m.ReadStream("grp/key") + require.NoError(t, err) + data, err := io.ReadAll(reader) + require.NoError(t, err) + assert.Equal(t, "streamed", string(data)) + require.NoError(t, reader.Close()) + + file, err := m.Open("grp/key") + require.NoError(t, err) + info, err := file.Stat() + require.NoError(t, err) + assert.Equal(t, "key", info.Name()) + assert.Equal(t, int64(8), info.Size()) + assert.Equal(t, fs.FileMode(0644), info.Mode()) + assert.True(t, info.ModTime().IsZero()) + assert.False(t, info.IsDir()) + assert.Nil(t, info.Sys()) + require.NoError(t, file.Close()) + + entries, err := m.List("grp") + require.NoError(t, err) + require.Len(t, entries, 1) + assert.Equal(t, "key", entries[0].Name()) + assert.False(t, entries[0].IsDir()) + assert.Equal(t, fs.FileMode(0), entries[0].Type()) + + entryInfo, err := entries[0].Info() + require.NoError(t, err) + assert.Equal(t, "key", entryInfo.Name()) + assert.Equal(t, int64(8), entryInfo.Size()) +} diff --git a/workspace/doc.go b/workspace/doc.go new file mode 100644 index 0000000..9da3c51 --- /dev/null +++ b/workspace/doc.go @@ -0,0 +1,5 @@ +// Package workspace provides encrypted user workspaces backed by io.Medium. +// +// Workspaces are rooted under the caller's configured home directory and keep +// file access constrained to the active workspace. +package workspace diff --git a/workspace/service_test.go b/workspace/service_test.go index 157cbcf..d2bc99d 100644 --- a/workspace/service_test.go +++ b/workspace/service_test.go @@ -87,3 +87,34 @@ func TestService_WorkspaceFileSet_TraversalBlocked_Bad(t *testing.T) { _, err = s.WorkspaceFileGet("../keys/private.key") require.Error(t, err) } + +func TestService_HandleIPCEvents_Good(t *testing.T) { + s, _ := newTestService(t) + + create := s.HandleIPCEvents(core.New(), map[string]any{ + "action": "workspace.create", + "identifier": "ipc-user", + "password": "pass123", + }) + assert.True(t, create.OK) + + id, ok := create.Value.(string) + require.True(t, ok) + require.NotEmpty(t, id) + + switchResult := s.HandleIPCEvents(core.New(), map[string]any{ + "action": "workspace.switch", + "name": id, + }) + assert.True(t, switchResult.OK) + assert.Equal(t, id, s.activeWorkspace) + + failedSwitch := s.HandleIPCEvents(core.New(), map[string]any{ + "action": "workspace.switch", + "name": "missing", + }) + assert.False(t, failedSwitch.OK) + + unknown := s.HandleIPCEvents(core.New(), "noop") + assert.True(t, unknown.OK) +} From bdd925e77174ecde0826c288be5febbf97972bd2 Mon Sep 17 00:00:00 2001 From: Virgil Date: Mon, 30 Mar 2026 09:14:45 +0000 Subject: [PATCH 05/83] Add complete API reference --- docs/RFC.md | 2521 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 2521 insertions(+) create mode 100644 docs/RFC.md diff --git a/docs/RFC.md b/docs/RFC.md new file mode 100644 index 0000000..85eef8f --- /dev/null +++ b/docs/RFC.md @@ -0,0 +1,2521 @@ +--- +title: API Reference +description: Complete API reference for go-io. +--- + +# API Reference + +This document enumerates every exported type, function, method, and variable in go-io, with short usage examples. + +Examples use the import paths from `docs/index.md` (`forge.lthn.ai/core/go-io`). Adjust paths if your module path differs. + +## Package io (`forge.lthn.ai/core/go-io`) + +Defines the `Medium` interface, helper functions, and in-memory mock implementations. + +### Medium (interface) + +The common storage abstraction implemented by every backend. + +Example: +```go +var m io.Medium = io.NewMockMedium() +_ = m.Write("notes.txt", "hello") +``` + +**Read(path string) (string, error)** +Reads a file as a string. +Example: +```go +m := io.NewMockMedium() +_ = m.Write("notes.txt", "hello") +value, _ := m.Read("notes.txt") +``` + +**Write(path, content string) error** +Writes content to a file, creating it if needed. +Example: +```go +m := io.NewMockMedium() +_ = m.Write("notes.txt", "hello") +``` + +**WriteMode(path, content string, mode fs.FileMode) error** +Writes content with explicit permissions. +Example: +```go +m := io.NewMockMedium() +_ = m.WriteMode("secret.txt", "secret", 0600) +``` + +**EnsureDir(path string) error** +Ensures a directory exists. +Example: +```go +m := io.NewMockMedium() +_ = m.EnsureDir("config") +``` + +**IsFile(path string) bool** +Reports whether a path is a regular file. +Example: +```go +m := io.NewMockMedium() +_ = m.Write("notes.txt", "hello") +ok := m.IsFile("notes.txt") +``` + +**FileGet(path string) (string, error)** +Alias for `Read`. +Example: +```go +m := io.NewMockMedium() +_ = m.FileSet("notes.txt", "hello") +value, _ := m.FileGet("notes.txt") +``` + +**FileSet(path, content string) error** +Alias for `Write`. +Example: +```go +m := io.NewMockMedium() +_ = m.FileSet("notes.txt", "hello") +``` + +**Delete(path string) error** +Deletes a file or empty directory. +Example: +```go +m := io.NewMockMedium() +_ = m.Write("old.txt", "data") +_ = m.Delete("old.txt") +``` + +**DeleteAll(path string) error** +Deletes a file or directory tree recursively. +Example: +```go +m := io.NewMockMedium() +_ = m.Write("logs/run.txt", "started") +_ = m.DeleteAll("logs") +``` + +**Rename(oldPath, newPath string) error** +Moves or renames a file or directory. +Example: +```go +m := io.NewMockMedium() +_ = m.Write("old.txt", "data") +_ = m.Rename("old.txt", "new.txt") +``` + +**List(path string) ([]fs.DirEntry, error)** +Lists immediate directory entries. +Example: +```go +m := io.NewMockMedium() +_ = m.Write("dir/file.txt", "data") +entries, _ := m.List("dir") +``` + +**Stat(path string) (fs.FileInfo, error)** +Returns file metadata. +Example: +```go +m := io.NewMockMedium() +_ = m.Write("notes.txt", "hello") +info, _ := m.Stat("notes.txt") +``` + +**Open(path string) (fs.File, error)** +Opens a file for reading. +Example: +```go +m := io.NewMockMedium() +_ = m.Write("notes.txt", "hello") +f, _ := m.Open("notes.txt") +defer f.Close() +``` + +**Create(path string) (io.WriteCloser, error)** +Creates or truncates a file and returns a writer. +Example: +```go +m := io.NewMockMedium() +w, _ := m.Create("notes.txt") +_, _ = w.Write([]byte("hello")) +_ = w.Close() +``` + +**Append(path string) (io.WriteCloser, error)** +Opens a file for appending, creating it if needed. +Example: +```go +m := io.NewMockMedium() +_ = m.Write("notes.txt", "hello") +w, _ := m.Append("notes.txt") +_, _ = w.Write([]byte(" world")) +_ = w.Close() +``` + +**ReadStream(path string) (io.ReadCloser, error)** +Opens a streaming reader for a file. +Example: +```go +m := io.NewMockMedium() +_ = m.Write("notes.txt", "hello") +r, _ := m.ReadStream("notes.txt") +defer r.Close() +``` + +**WriteStream(path string) (io.WriteCloser, error)** +Opens a streaming writer for a file. +Example: +```go +m := io.NewMockMedium() +w, _ := m.WriteStream("notes.txt") +_, _ = w.Write([]byte("hello")) +_ = w.Close() +``` + +**Exists(path string) bool** +Reports whether a path exists. +Example: +```go +m := io.NewMockMedium() +_ = m.Write("notes.txt", "hello") +ok := m.Exists("notes.txt") +``` + +**IsDir(path string) bool** +Reports whether a path is a directory. +Example: +```go +m := io.NewMockMedium() +_ = m.EnsureDir("config") +ok := m.IsDir("config") +``` + +### FileInfo + +Lightweight `fs.FileInfo` implementation used by `MockMedium`. + +**Name() string** +Example: +```go +m := io.NewMockMedium() +_ = m.Write("file.txt", "data") +info, _ := m.Stat("file.txt") +_ = info.Name() +``` + +**Size() int64** +Example: +```go +m := io.NewMockMedium() +_ = m.Write("file.txt", "data") +info, _ := m.Stat("file.txt") +_ = info.Size() +``` + +**Mode() fs.FileMode** +Example: +```go +m := io.NewMockMedium() +_ = m.Write("file.txt", "data") +info, _ := m.Stat("file.txt") +_ = info.Mode() +``` + +**ModTime() time.Time** +Example: +```go +m := io.NewMockMedium() +_ = m.Write("file.txt", "data") +info, _ := m.Stat("file.txt") +_ = info.ModTime() +``` + +**IsDir() bool** +Example: +```go +m := io.NewMockMedium() +_ = m.Write("file.txt", "data") +info, _ := m.Stat("file.txt") +_ = info.IsDir() +``` + +**Sys() any** +Example: +```go +m := io.NewMockMedium() +_ = m.Write("file.txt", "data") +info, _ := m.Stat("file.txt") +_ = info.Sys() +``` + +### DirEntry + +Lightweight `fs.DirEntry` implementation used by `MockMedium` listings. + +**Name() string** +Example: +```go +m := io.NewMockMedium() +_ = m.Write("dir/file.txt", "data") +entries, _ := m.List("dir") +_ = entries[0].Name() +``` + +**IsDir() bool** +Example: +```go +m := io.NewMockMedium() +_ = m.EnsureDir("dir") +entries, _ := m.List("") +_ = entries[0].IsDir() +``` + +**Type() fs.FileMode** +Example: +```go +m := io.NewMockMedium() +_ = m.Write("dir/file.txt", "data") +entries, _ := m.List("dir") +_ = entries[0].Type() +``` + +**Info() (fs.FileInfo, error)** +Example: +```go +m := io.NewMockMedium() +_ = m.Write("dir/file.txt", "data") +entries, _ := m.List("dir") +info, _ := entries[0].Info() +_ = info.Name() +``` + +### Local + +Pre-initialised local filesystem medium rooted at `/`. + +Example: +```go +content, _ := io.Local.Read("/etc/hostname") +``` + +### NewSandboxed(root string) (Medium, error) + +Creates a local filesystem medium sandboxed to `root`. + +Example: +```go +m, _ := io.NewSandboxed("/srv/app") +_ = m.Write("config/app.yaml", "port: 8080") +``` + +### Read(m Medium, path string) (string, error) + +Helper that calls `Medium.Read` on a supplied backend. + +Example: +```go +m := io.NewMockMedium() +_ = m.Write("notes.txt", "hello") +value, _ := io.Read(m, "notes.txt") +``` + +### Write(m Medium, path, content string) error + +Helper that calls `Medium.Write` on a supplied backend. + +Example: +```go +m := io.NewMockMedium() +_ = io.Write(m, "notes.txt", "hello") +``` + +### ReadStream(m Medium, path string) (io.ReadCloser, error) + +Helper that calls `Medium.ReadStream` on a supplied backend. + +Example: +```go +m := io.NewMockMedium() +_ = m.Write("notes.txt", "hello") +r, _ := io.ReadStream(m, "notes.txt") +defer r.Close() +``` + +### WriteStream(m Medium, path string) (io.WriteCloser, error) + +Helper that calls `Medium.WriteStream` on a supplied backend. + +Example: +```go +m := io.NewMockMedium() +w, _ := io.WriteStream(m, "notes.txt") +_, _ = w.Write([]byte("hello")) +_ = w.Close() +``` + +### EnsureDir(m Medium, path string) error + +Helper that calls `Medium.EnsureDir` on a supplied backend. + +Example: +```go +m := io.NewMockMedium() +_ = io.EnsureDir(m, "config") +``` + +### IsFile(m Medium, path string) bool + +Helper that calls `Medium.IsFile` on a supplied backend. + +Example: +```go +m := io.NewMockMedium() +_ = m.Write("notes.txt", "hello") +ok := io.IsFile(m, "notes.txt") +``` + +### Copy(src Medium, srcPath string, dst Medium, dstPath string) error + +Copies a file between two mediums. + +Example: +```go +src := io.NewMockMedium() +dst := io.NewMockMedium() +_ = src.Write("source.txt", "data") +_ = io.Copy(src, "source.txt", dst, "dest.txt") +``` + +### MockMedium + +In-memory `Medium` implementation for tests. Exposes `Files`, `Dirs`, and `ModTimes` maps for seeding state. + +Example: +```go +m := io.NewMockMedium() +m.Files["seed.txt"] = "seeded" +``` + +**Read(path string) (string, error)** +Example: +```go +m := io.NewMockMedium() +m.Files["notes.txt"] = "hello" +value, _ := m.Read("notes.txt") +``` + +**Write(path, content string) error** +Example: +```go +m := io.NewMockMedium() +_ = m.Write("notes.txt", "hello") +``` + +**WriteMode(path, content string, mode fs.FileMode) error** +Example: +```go +m := io.NewMockMedium() +_ = m.WriteMode("secret.txt", "secret", 0600) +``` + +**EnsureDir(path string) error** +Example: +```go +m := io.NewMockMedium() +_ = m.EnsureDir("config") +``` + +**IsFile(path string) bool** +Example: +```go +m := io.NewMockMedium() +_ = m.Write("notes.txt", "hello") +ok := m.IsFile("notes.txt") +``` + +**FileGet(path string) (string, error)** +Example: +```go +m := io.NewMockMedium() +_ = m.FileSet("notes.txt", "hello") +value, _ := m.FileGet("notes.txt") +``` + +**FileSet(path, content string) error** +Example: +```go +m := io.NewMockMedium() +_ = m.FileSet("notes.txt", "hello") +``` + +**Delete(path string) error** +Example: +```go +m := io.NewMockMedium() +_ = m.Write("old.txt", "data") +_ = m.Delete("old.txt") +``` + +**DeleteAll(path string) error** +Example: +```go +m := io.NewMockMedium() +_ = m.Write("logs/run.txt", "started") +_ = m.DeleteAll("logs") +``` + +**Rename(oldPath, newPath string) error** +Example: +```go +m := io.NewMockMedium() +_ = m.Write("old.txt", "data") +_ = m.Rename("old.txt", "new.txt") +``` + +**List(path string) ([]fs.DirEntry, error)** +Example: +```go +m := io.NewMockMedium() +_ = m.Write("dir/file.txt", "data") +entries, _ := m.List("dir") +``` + +**Stat(path string) (fs.FileInfo, error)** +Example: +```go +m := io.NewMockMedium() +_ = m.Write("notes.txt", "hello") +info, _ := m.Stat("notes.txt") +``` + +**Open(path string) (fs.File, error)** +Example: +```go +m := io.NewMockMedium() +_ = m.Write("notes.txt", "hello") +f, _ := m.Open("notes.txt") +defer f.Close() +``` + +**Create(path string) (io.WriteCloser, error)** +Example: +```go +m := io.NewMockMedium() +w, _ := m.Create("notes.txt") +_, _ = w.Write([]byte("hello")) +_ = w.Close() +``` + +**Append(path string) (io.WriteCloser, error)** +Example: +```go +m := io.NewMockMedium() +_ = m.Write("notes.txt", "hello") +w, _ := m.Append("notes.txt") +_, _ = w.Write([]byte(" world")) +_ = w.Close() +``` + +**ReadStream(path string) (io.ReadCloser, error)** +Example: +```go +m := io.NewMockMedium() +_ = m.Write("notes.txt", "hello") +r, _ := m.ReadStream("notes.txt") +defer r.Close() +``` + +**WriteStream(path string) (io.WriteCloser, error)** +Example: +```go +m := io.NewMockMedium() +w, _ := m.WriteStream("notes.txt") +_, _ = w.Write([]byte("hello")) +_ = w.Close() +``` + +**Exists(path string) bool** +Example: +```go +m := io.NewMockMedium() +_ = m.Write("notes.txt", "hello") +ok := m.Exists("notes.txt") +``` + +**IsDir(path string) bool** +Example: +```go +m := io.NewMockMedium() +_ = m.EnsureDir("config") +ok := m.IsDir("config") +``` + +### NewMockMedium() *MockMedium + +Creates a new empty in-memory medium. + +Example: +```go +m := io.NewMockMedium() +_ = m.Write("notes.txt", "hello") +``` + +### MockFile + +`fs.File` implementation returned by `MockMedium.Open`. + +**Stat() (fs.FileInfo, error)** +Example: +```go +m := io.NewMockMedium() +_ = m.Write("notes.txt", "hello") +f, _ := m.Open("notes.txt") +info, _ := f.Stat() +_ = info.Name() +``` + +**Read(b []byte) (int, error)** +Example: +```go +m := io.NewMockMedium() +_ = m.Write("notes.txt", "hello") +f, _ := m.Open("notes.txt") +buf := make([]byte, 5) +_, _ = f.Read(buf) +``` + +**Close() error** +Example: +```go +m := io.NewMockMedium() +_ = m.Write("notes.txt", "hello") +f, _ := m.Open("notes.txt") +_ = f.Close() +``` + +### MockWriteCloser + +`io.WriteCloser` implementation returned by `MockMedium.Create` and `MockMedium.Append`. + +**Write(p []byte) (int, error)** +Example: +```go +m := io.NewMockMedium() +w, _ := m.Create("notes.txt") +_, _ = w.Write([]byte("hello")) +``` + +**Close() error** +Example: +```go +m := io.NewMockMedium() +w, _ := m.Create("notes.txt") +_, _ = w.Write([]byte("hello")) +_ = w.Close() +``` + +## Package local (`forge.lthn.ai/core/go-io/local`) + +Local filesystem backend with sandboxed roots and symlink-escape protection. + +### New(root string) (*Medium, error) + +Creates a new local filesystem medium rooted at `root`. + +Example: +```go +m, _ := local.New("/srv/app") +_ = m.Write("config/app.yaml", "port: 8080") +``` + +### Medium + +Local filesystem implementation of `io.Medium`. + +Example: +```go +m, _ := local.New("/srv/app") +_ = m.EnsureDir("config") +``` + +**Read(path string) (string, error)** +Example: +```go +m, _ := local.New("/srv/app") +_ = m.Write("notes.txt", "hello") +value, _ := m.Read("notes.txt") +``` + +**Write(path, content string) error** +Example: +```go +m, _ := local.New("/srv/app") +_ = m.Write("notes.txt", "hello") +``` + +**WriteMode(path, content string, mode fs.FileMode) error** +Example: +```go +m, _ := local.New("/srv/app") +_ = m.WriteMode("secret.txt", "secret", 0600) +``` + +**EnsureDir(path string) error** +Example: +```go +m, _ := local.New("/srv/app") +_ = m.EnsureDir("config") +``` + +**IsDir(path string) bool** +Example: +```go +m, _ := local.New("/srv/app") +_ = m.EnsureDir("config") +ok := m.IsDir("config") +``` + +**IsFile(path string) bool** +Example: +```go +m, _ := local.New("/srv/app") +_ = m.Write("notes.txt", "hello") +ok := m.IsFile("notes.txt") +``` + +**Exists(path string) bool** +Example: +```go +m, _ := local.New("/srv/app") +_ = m.Write("notes.txt", "hello") +ok := m.Exists("notes.txt") +``` + +**List(path string) ([]fs.DirEntry, error)** +Example: +```go +m, _ := local.New("/srv/app") +_ = m.Write("dir/file.txt", "data") +entries, _ := m.List("dir") +``` + +**Stat(path string) (fs.FileInfo, error)** +Example: +```go +m, _ := local.New("/srv/app") +_ = m.Write("notes.txt", "hello") +info, _ := m.Stat("notes.txt") +``` + +**Open(path string) (fs.File, error)** +Example: +```go +m, _ := local.New("/srv/app") +_ = m.Write("notes.txt", "hello") +f, _ := m.Open("notes.txt") +defer f.Close() +``` + +**Create(path string) (io.WriteCloser, error)** +Example: +```go +m, _ := local.New("/srv/app") +w, _ := m.Create("notes.txt") +_, _ = w.Write([]byte("hello")) +_ = w.Close() +``` + +**Append(path string) (io.WriteCloser, error)** +Example: +```go +m, _ := local.New("/srv/app") +_ = m.Write("notes.txt", "hello") +w, _ := m.Append("notes.txt") +_, _ = w.Write([]byte(" world")) +_ = w.Close() +``` + +**ReadStream(path string) (io.ReadCloser, error)** +Example: +```go +m, _ := local.New("/srv/app") +_ = m.Write("notes.txt", "hello") +r, _ := m.ReadStream("notes.txt") +defer r.Close() +``` + +**WriteStream(path string) (io.WriteCloser, error)** +Example: +```go +m, _ := local.New("/srv/app") +w, _ := m.WriteStream("notes.txt") +_, _ = w.Write([]byte("hello")) +_ = w.Close() +``` + +**Delete(path string) error** +Example: +```go +m, _ := local.New("/srv/app") +_ = m.Write("old.txt", "data") +_ = m.Delete("old.txt") +``` + +**DeleteAll(path string) error** +Example: +```go +m, _ := local.New("/srv/app") +_ = m.Write("logs/run.txt", "started") +_ = m.DeleteAll("logs") +``` + +**Rename(oldPath, newPath string) error** +Example: +```go +m, _ := local.New("/srv/app") +_ = m.Write("old.txt", "data") +_ = m.Rename("old.txt", "new.txt") +``` + +**FileGet(path string) (string, error)** +Example: +```go +m, _ := local.New("/srv/app") +_ = m.FileSet("notes.txt", "hello") +value, _ := m.FileGet("notes.txt") +``` + +**FileSet(path, content string) error** +Example: +```go +m, _ := local.New("/srv/app") +_ = m.FileSet("notes.txt", "hello") +``` + +## Package node (`forge.lthn.ai/core/go-io/node`) + +In-memory filesystem implementing `io.Medium` and `fs.FS`, with tar serialisation. + +### New() *Node + +Creates a new empty in-memory filesystem. + +Example: +```go +n := node.New() +``` + +### FromTar(data []byte) (*Node, error) + +Creates a new `Node` by loading a tar archive. + +Example: +```go +tarball := []byte{} +n, _ := node.FromTar(tarball) +``` + +### WalkOptions + +Options for `Node.Walk`. + +Example: +```go +opts := node.WalkOptions{MaxDepth: 1, SkipErrors: true} +_ = opts.MaxDepth +``` + +### Node + +In-memory filesystem with implicit directories and tar support. + +Example: +```go +n := node.New() +n.AddData("config/app.yaml", []byte("port: 8080")) +``` + +**AddData(name string, content []byte)** +Stages content in the in-memory filesystem. +Example: +```go +n := node.New() +n.AddData("config/app.yaml", []byte("port: 8080")) +``` + +**ToTar() ([]byte, error)** +Serialises the tree to a tar archive. +Example: +```go +n := node.New() +_ = n.Write("a.txt", "alpha") +blob, _ := n.ToTar() +``` + +**LoadTar(data []byte) error** +Replaces the tree with a tar archive. +Example: +```go +n := node.New() +_ = n.LoadTar([]byte{}) +``` + +**WalkNode(root string, fn fs.WalkDirFunc) error** +Walks the tree using `fs.WalkDir`. +Example: +```go +n := node.New() +_ = n.WalkNode(".", func(path string, d fs.DirEntry, err error) error { + return nil +}) +``` + +**Walk(root string, fn fs.WalkDirFunc, opts ...WalkOptions) error** +Walks the tree with optional depth or filter controls. +Example: +```go +n := node.New() +_ = n.Walk(".", func(path string, d fs.DirEntry, err error) error { + return nil +}, node.WalkOptions{MaxDepth: 1}) +``` + +**ReadFile(name string) ([]byte, error)** +Reads file content as bytes. +Example: +```go +n := node.New() +_ = n.Write("file.txt", "data") +b, _ := n.ReadFile("file.txt") +``` + +**CopyFile(src, dst string, perm fs.FileMode) error** +Copies a file to the local filesystem. +Example: +```go +n := node.New() +_ = n.Write("file.txt", "data") +_ = n.CopyFile("file.txt", "/tmp/file.txt", 0644) +``` + +**CopyTo(target io.Medium, sourcePath, destPath string) error** +Copies a file or directory tree to another medium. +Example: +```go +n := node.New() +_ = n.Write("config/app.yaml", "port: 8080") +copyTarget := io.NewMockMedium() +_ = n.CopyTo(copyTarget, "config", "backup/config") +``` + +**Open(name string) (fs.File, error)** +Opens a file for reading. +Example: +```go +n := node.New() +_ = n.Write("file.txt", "data") +f, _ := n.Open("file.txt") +defer f.Close() +``` + +**Stat(name string) (fs.FileInfo, error)** +Returns metadata for a file or directory. +Example: +```go +n := node.New() +_ = n.Write("file.txt", "data") +info, _ := n.Stat("file.txt") +``` + +**ReadDir(name string) ([]fs.DirEntry, error)** +Lists directory entries. +Example: +```go +n := node.New() +_ = n.Write("dir/file.txt", "data") +entries, _ := n.ReadDir("dir") +``` + +**Read(p string) (string, error)** +Reads content as a string. +Example: +```go +n := node.New() +_ = n.Write("file.txt", "data") +value, _ := n.Read("file.txt") +``` + +**Write(p, content string) error** +Writes content to a file. +Example: +```go +n := node.New() +_ = n.Write("file.txt", "data") +``` + +**WriteMode(p, content string, mode fs.FileMode) error** +Writes content with explicit permissions (no-op in memory). +Example: +```go +n := node.New() +_ = n.WriteMode("file.txt", "data", 0600) +``` + +**FileGet(p string) (string, error)** +Alias for `Read`. +Example: +```go +n := node.New() +_ = n.FileSet("file.txt", "data") +value, _ := n.FileGet("file.txt") +``` + +**FileSet(p, content string) error** +Alias for `Write`. +Example: +```go +n := node.New() +_ = n.FileSet("file.txt", "data") +``` + +**EnsureDir(path string) error** +No-op (directories are implicit). +Example: +```go +n := node.New() +_ = n.EnsureDir("dir") +``` + +**Exists(p string) bool** +Reports whether a path exists. +Example: +```go +n := node.New() +_ = n.Write("file.txt", "data") +ok := n.Exists("file.txt") +``` + +**IsFile(p string) bool** +Reports whether a path is a file. +Example: +```go +n := node.New() +_ = n.Write("file.txt", "data") +ok := n.IsFile("file.txt") +``` + +**IsDir(p string) bool** +Reports whether a path is a directory. +Example: +```go +n := node.New() +_ = n.Write("dir/file.txt", "data") +ok := n.IsDir("dir") +``` + +**Delete(p string) error** +Deletes a file. +Example: +```go +n := node.New() +_ = n.Write("old.txt", "data") +_ = n.Delete("old.txt") +``` + +**DeleteAll(p string) error** +Deletes a file or directory tree. +Example: +```go +n := node.New() +_ = n.Write("logs/run.txt", "started") +_ = n.DeleteAll("logs") +``` + +**Rename(oldPath, newPath string) error** +Moves a file within the node. +Example: +```go +n := node.New() +_ = n.Write("old.txt", "data") +_ = n.Rename("old.txt", "new.txt") +``` + +**List(p string) ([]fs.DirEntry, error)** +Lists directory entries. +Example: +```go +n := node.New() +_ = n.Write("dir/file.txt", "data") +entries, _ := n.List("dir") +``` + +**Create(p string) (io.WriteCloser, error)** +Creates or truncates a file and returns a writer. +Example: +```go +n := node.New() +w, _ := n.Create("file.txt") +_, _ = w.Write([]byte("data")) +_ = w.Close() +``` + +**Append(p string) (io.WriteCloser, error)** +Appends to a file and returns a writer. +Example: +```go +n := node.New() +_ = n.Write("file.txt", "data") +w, _ := n.Append("file.txt") +_, _ = w.Write([]byte(" more")) +_ = w.Close() +``` + +**ReadStream(p string) (io.ReadCloser, error)** +Opens a streaming reader. +Example: +```go +n := node.New() +_ = n.Write("file.txt", "data") +r, _ := n.ReadStream("file.txt") +defer r.Close() +``` + +**WriteStream(p string) (io.WriteCloser, error)** +Opens a streaming writer. +Example: +```go +n := node.New() +w, _ := n.WriteStream("file.txt") +_, _ = w.Write([]byte("data")) +_ = w.Close() +``` + +## Package store (`forge.lthn.ai/core/go-io/store`) + +Group-namespaced key-value store backed by SQLite, plus a `Medium` adapter. + +### ErrNotFound + +Returned when a key does not exist. + +Example: +```go +s, _ := store.New(":memory:") +_, err := s.Get("config", "missing") +if core.Is(err, store.ErrNotFound) { + // handle missing key +} +``` + +### New(dbPath string) (*Store, error) + +Creates a new `Store` at the SQLite path. + +Example: +```go +s, _ := store.New(":memory:") +_ = s.Set("config", "theme", "midnight") +``` + +### Store + +Group-namespaced key-value store. + +Example: +```go +s, _ := store.New(":memory:") +_ = s.Set("config", "theme", "midnight") +``` + +**Close() error** +Example: +```go +s, _ := store.New(":memory:") +_ = s.Close() +``` + +**Get(group, key string) (string, error)** +Example: +```go +s, _ := store.New(":memory:") +_ = s.Set("config", "theme", "midnight") +value, _ := s.Get("config", "theme") +``` + +**Set(group, key, value string) error** +Example: +```go +s, _ := store.New(":memory:") +_ = s.Set("config", "theme", "midnight") +``` + +**Delete(group, key string) error** +Example: +```go +s, _ := store.New(":memory:") +_ = s.Set("config", "theme", "midnight") +_ = s.Delete("config", "theme") +``` + +**Count(group string) (int, error)** +Example: +```go +s, _ := store.New(":memory:") +_ = s.Set("config", "theme", "midnight") +count, _ := s.Count("config") +``` + +**DeleteGroup(group string) error** +Example: +```go +s, _ := store.New(":memory:") +_ = s.Set("config", "theme", "midnight") +_ = s.DeleteGroup("config") +``` + +**GetAll(group string) (map[string]string, error)** +Example: +```go +s, _ := store.New(":memory:") +_ = s.Set("config", "theme", "midnight") +all, _ := s.GetAll("config") +``` + +**Render(tmplStr, group string) (string, error)** +Example: +```go +s, _ := store.New(":memory:") +_ = s.Set("user", "name", "alice") +out, _ := s.Render("hello {{ .name }}", "user") +``` + +**AsMedium() *Medium** +Example: +```go +s, _ := store.New(":memory:") +m := s.AsMedium() +_ = m.Write("config/theme", "midnight") +``` + +### NewMedium(dbPath string) (*Medium, error) + +Creates an `io.Medium` backed by a SQLite key-value store. + +Example: +```go +m, _ := store.NewMedium("config.db") +_ = m.Write("config/theme", "midnight") +``` + +### Medium + +Adapter that maps `group/key` paths onto a `Store`. + +Example: +```go +m, _ := store.NewMedium(":memory:") +_ = m.Write("config/theme", "midnight") +``` + +**Store() *Store** +Example: +```go +m, _ := store.NewMedium(":memory:") +s := m.Store() +_ = s.Set("config", "theme", "midnight") +``` + +**Close() error** +Example: +```go +m, _ := store.NewMedium(":memory:") +_ = m.Close() +``` + +**Read(p string) (string, error)** +Example: +```go +m, _ := store.NewMedium(":memory:") +_ = m.Write("config/theme", "midnight") +value, _ := m.Read("config/theme") +``` + +**Write(p, content string) error** +Example: +```go +m, _ := store.NewMedium(":memory:") +_ = m.Write("config/theme", "midnight") +``` + +**EnsureDir(path string) error** +No-op (groups are implicit). +Example: +```go +m, _ := store.NewMedium(":memory:") +_ = m.EnsureDir("config") +``` + +**IsFile(p string) bool** +Example: +```go +m, _ := store.NewMedium(":memory:") +_ = m.Write("config/theme", "midnight") +ok := m.IsFile("config/theme") +``` + +**FileGet(p string) (string, error)** +Example: +```go +m, _ := store.NewMedium(":memory:") +_ = m.FileSet("config/theme", "midnight") +value, _ := m.FileGet("config/theme") +``` + +**FileSet(p, content string) error** +Example: +```go +m, _ := store.NewMedium(":memory:") +_ = m.FileSet("config/theme", "midnight") +``` + +**Delete(p string) error** +Example: +```go +m, _ := store.NewMedium(":memory:") +_ = m.Write("config/theme", "midnight") +_ = m.Delete("config/theme") +``` + +**DeleteAll(p string) error** +Example: +```go +m, _ := store.NewMedium(":memory:") +_ = m.Write("config/theme", "midnight") +_ = m.DeleteAll("config") +``` + +**Rename(oldPath, newPath string) error** +Example: +```go +m, _ := store.NewMedium(":memory:") +_ = m.Write("old/theme", "midnight") +_ = m.Rename("old/theme", "new/theme") +``` + +**List(p string) ([]fs.DirEntry, error)** +Example: +```go +m, _ := store.NewMedium(":memory:") +_ = m.Write("config/theme", "midnight") +entries, _ := m.List("") +``` + +**Stat(p string) (fs.FileInfo, error)** +Example: +```go +m, _ := store.NewMedium(":memory:") +_ = m.Write("config/theme", "midnight") +info, _ := m.Stat("config/theme") +``` + +**Open(p string) (fs.File, error)** +Example: +```go +m, _ := store.NewMedium(":memory:") +_ = m.Write("config/theme", "midnight") +f, _ := m.Open("config/theme") +defer f.Close() +``` + +**Create(p string) (io.WriteCloser, error)** +Example: +```go +m, _ := store.NewMedium(":memory:") +w, _ := m.Create("config/theme") +_, _ = w.Write([]byte("midnight")) +_ = w.Close() +``` + +**Append(p string) (io.WriteCloser, error)** +Example: +```go +m, _ := store.NewMedium(":memory:") +_ = m.Write("config/theme", "midnight") +w, _ := m.Append("config/theme") +_, _ = w.Write([]byte(" plus")) +_ = w.Close() +``` + +**ReadStream(p string) (io.ReadCloser, error)** +Example: +```go +m, _ := store.NewMedium(":memory:") +_ = m.Write("config/theme", "midnight") +r, _ := m.ReadStream("config/theme") +defer r.Close() +``` + +**WriteStream(p string) (io.WriteCloser, error)** +Example: +```go +m, _ := store.NewMedium(":memory:") +w, _ := m.WriteStream("config/theme") +_, _ = w.Write([]byte("midnight")) +_ = w.Close() +``` + +**Exists(p string) bool** +Example: +```go +m, _ := store.NewMedium(":memory:") +_ = m.Write("config/theme", "midnight") +ok := m.Exists("config") +``` + +**IsDir(p string) bool** +Example: +```go +m, _ := store.NewMedium(":memory:") +_ = m.Write("config/theme", "midnight") +ok := m.IsDir("config") +``` + +## Package sqlite (`forge.lthn.ai/core/go-io/sqlite`) + +SQLite-backed `io.Medium` implementation using the pure-Go driver. + +### Option + +Functional option for configuring `Medium`. + +Example: +```go +opt := sqlite.WithTable("files") +_ = opt +``` + +### WithTable(table string) Option + +Sets the table name used for storage (default: `files`). + +Example: +```go +m, _ := sqlite.New(":memory:", sqlite.WithTable("files")) +``` + +### New(dbPath string, opts ...Option) (*Medium, error) + +Creates a new SQLite-backed medium. + +Example: +```go +m, _ := sqlite.New(":memory:") +_ = m.Write("notes.txt", "hello") +``` + +### Medium + +SQLite-backed storage backend. + +Example: +```go +m, _ := sqlite.New(":memory:") +_ = m.Write("notes.txt", "hello") +``` + +**Close() error** +Example: +```go +m, _ := sqlite.New(":memory:") +_ = m.Close() +``` + +**Read(p string) (string, error)** +Example: +```go +m, _ := sqlite.New(":memory:") +_ = m.Write("notes.txt", "hello") +value, _ := m.Read("notes.txt") +``` + +**Write(p, content string) error** +Example: +```go +m, _ := sqlite.New(":memory:") +_ = m.Write("notes.txt", "hello") +``` + +**EnsureDir(p string) error** +Example: +```go +m, _ := sqlite.New(":memory:") +_ = m.EnsureDir("config") +``` + +**IsFile(p string) bool** +Example: +```go +m, _ := sqlite.New(":memory:") +_ = m.Write("notes.txt", "hello") +ok := m.IsFile("notes.txt") +``` + +**FileGet(p string) (string, error)** +Example: +```go +m, _ := sqlite.New(":memory:") +_ = m.FileSet("notes.txt", "hello") +value, _ := m.FileGet("notes.txt") +``` + +**FileSet(p, content string) error** +Example: +```go +m, _ := sqlite.New(":memory:") +_ = m.FileSet("notes.txt", "hello") +``` + +**Delete(p string) error** +Example: +```go +m, _ := sqlite.New(":memory:") +_ = m.Write("old.txt", "data") +_ = m.Delete("old.txt") +``` + +**DeleteAll(p string) error** +Example: +```go +m, _ := sqlite.New(":memory:") +_ = m.Write("logs/run.txt", "started") +_ = m.DeleteAll("logs") +``` + +**Rename(oldPath, newPath string) error** +Example: +```go +m, _ := sqlite.New(":memory:") +_ = m.Write("old.txt", "data") +_ = m.Rename("old.txt", "new.txt") +``` + +**List(p string) ([]fs.DirEntry, error)** +Example: +```go +m, _ := sqlite.New(":memory:") +_ = m.Write("dir/file.txt", "data") +entries, _ := m.List("dir") +``` + +**Stat(p string) (fs.FileInfo, error)** +Example: +```go +m, _ := sqlite.New(":memory:") +_ = m.Write("notes.txt", "hello") +info, _ := m.Stat("notes.txt") +``` + +**Open(p string) (fs.File, error)** +Example: +```go +m, _ := sqlite.New(":memory:") +_ = m.Write("notes.txt", "hello") +f, _ := m.Open("notes.txt") +defer f.Close() +``` + +**Create(p string) (io.WriteCloser, error)** +Example: +```go +m, _ := sqlite.New(":memory:") +w, _ := m.Create("notes.txt") +_, _ = w.Write([]byte("hello")) +_ = w.Close() +``` + +**Append(p string) (io.WriteCloser, error)** +Example: +```go +m, _ := sqlite.New(":memory:") +_ = m.Write("notes.txt", "hello") +w, _ := m.Append("notes.txt") +_, _ = w.Write([]byte(" world")) +_ = w.Close() +``` + +**ReadStream(p string) (io.ReadCloser, error)** +Example: +```go +m, _ := sqlite.New(":memory:") +_ = m.Write("notes.txt", "hello") +r, _ := m.ReadStream("notes.txt") +defer r.Close() +``` + +**WriteStream(p string) (io.WriteCloser, error)** +Example: +```go +m, _ := sqlite.New(":memory:") +w, _ := m.WriteStream("notes.txt") +_, _ = w.Write([]byte("hello")) +_ = w.Close() +``` + +**Exists(p string) bool** +Example: +```go +m, _ := sqlite.New(":memory:") +_ = m.Write("notes.txt", "hello") +ok := m.Exists("notes.txt") +``` + +**IsDir(p string) bool** +Example: +```go +m, _ := sqlite.New(":memory:") +_ = m.EnsureDir("config") +ok := m.IsDir("config") +``` + +## Package s3 (`forge.lthn.ai/core/go-io/s3`) + +Amazon S3-backed `io.Medium` implementation. + +### Option + +Functional option for configuring `Medium`. + +Example: +```go +opt := s3.WithPrefix("daily/") +_ = opt +``` + +### WithPrefix(prefix string) Option + +Sets a key prefix for all operations. + +Example: +```go +m, _ := s3.New("bucket", s3.WithClient(awsClient), s3.WithPrefix("daily/")) +``` + +### WithClient(client *awss3.Client) Option + +Supplies an AWS SDK S3 client. + +Example: +```go +client := awss3.NewFromConfig(cfg) +m, _ := s3.New("bucket", s3.WithClient(client)) +``` + +### New(bucket string, opts ...Option) (*Medium, error) + +Creates a new S3-backed medium. + +Example: +```go +client := awss3.NewFromConfig(cfg) +m, _ := s3.New("bucket", s3.WithClient(client)) +``` + +### Medium + +S3-backed storage backend. + +Example: +```go +client := awss3.NewFromConfig(cfg) +m, _ := s3.New("bucket", s3.WithClient(client)) +``` + +**Read(p string) (string, error)** +Example: +```go +client := awss3.NewFromConfig(cfg) +m, _ := s3.New("bucket", s3.WithClient(client)) +value, _ := m.Read("notes.txt") +``` + +**Write(p, content string) error** +Example: +```go +client := awss3.NewFromConfig(cfg) +m, _ := s3.New("bucket", s3.WithClient(client)) +_ = m.Write("notes.txt", "hello") +``` + +**EnsureDir(path string) error** +No-op (S3 has no directories). +Example: +```go +client := awss3.NewFromConfig(cfg) +m, _ := s3.New("bucket", s3.WithClient(client)) +_ = m.EnsureDir("config") +``` + +**IsFile(p string) bool** +Example: +```go +client := awss3.NewFromConfig(cfg) +m, _ := s3.New("bucket", s3.WithClient(client)) +ok := m.IsFile("notes.txt") +``` + +**FileGet(p string) (string, error)** +Example: +```go +client := awss3.NewFromConfig(cfg) +m, _ := s3.New("bucket", s3.WithClient(client)) +value, _ := m.FileGet("notes.txt") +``` + +**FileSet(p, content string) error** +Example: +```go +client := awss3.NewFromConfig(cfg) +m, _ := s3.New("bucket", s3.WithClient(client)) +_ = m.FileSet("notes.txt", "hello") +``` + +**Delete(p string) error** +Example: +```go +client := awss3.NewFromConfig(cfg) +m, _ := s3.New("bucket", s3.WithClient(client)) +_ = m.Delete("old.txt") +``` + +**DeleteAll(p string) error** +Example: +```go +client := awss3.NewFromConfig(cfg) +m, _ := s3.New("bucket", s3.WithClient(client)) +_ = m.DeleteAll("logs") +``` + +**Rename(oldPath, newPath string) error** +Example: +```go +client := awss3.NewFromConfig(cfg) +m, _ := s3.New("bucket", s3.WithClient(client)) +_ = m.Rename("old.txt", "new.txt") +``` + +**List(p string) ([]fs.DirEntry, error)** +Example: +```go +client := awss3.NewFromConfig(cfg) +m, _ := s3.New("bucket", s3.WithClient(client)) +entries, _ := m.List("dir") +``` + +**Stat(p string) (fs.FileInfo, error)** +Example: +```go +client := awss3.NewFromConfig(cfg) +m, _ := s3.New("bucket", s3.WithClient(client)) +info, _ := m.Stat("notes.txt") +``` + +**Open(p string) (fs.File, error)** +Example: +```go +client := awss3.NewFromConfig(cfg) +m, _ := s3.New("bucket", s3.WithClient(client)) +f, _ := m.Open("notes.txt") +defer f.Close() +``` + +**Create(p string) (io.WriteCloser, error)** +Example: +```go +client := awss3.NewFromConfig(cfg) +m, _ := s3.New("bucket", s3.WithClient(client)) +w, _ := m.Create("notes.txt") +_, _ = w.Write([]byte("hello")) +_ = w.Close() +``` + +**Append(p string) (io.WriteCloser, error)** +Example: +```go +client := awss3.NewFromConfig(cfg) +m, _ := s3.New("bucket", s3.WithClient(client)) +w, _ := m.Append("notes.txt") +_, _ = w.Write([]byte(" world")) +_ = w.Close() +``` + +**ReadStream(p string) (io.ReadCloser, error)** +Example: +```go +client := awss3.NewFromConfig(cfg) +m, _ := s3.New("bucket", s3.WithClient(client)) +r, _ := m.ReadStream("notes.txt") +defer r.Close() +``` + +**WriteStream(p string) (io.WriteCloser, error)** +Example: +```go +client := awss3.NewFromConfig(cfg) +m, _ := s3.New("bucket", s3.WithClient(client)) +w, _ := m.WriteStream("notes.txt") +_, _ = w.Write([]byte("hello")) +_ = w.Close() +``` + +**Exists(p string) bool** +Example: +```go +client := awss3.NewFromConfig(cfg) +m, _ := s3.New("bucket", s3.WithClient(client)) +ok := m.Exists("notes.txt") +``` + +**IsDir(p string) bool** +Example: +```go +client := awss3.NewFromConfig(cfg) +m, _ := s3.New("bucket", s3.WithClient(client)) +ok := m.IsDir("logs") +``` + +## Package datanode (`forge.lthn.ai/core/go-io/datanode`) + +In-memory `io.Medium` backed by Borg's DataNode, with tar snapshot/restore support. + +### New() *Medium + +Creates a new empty DataNode-backed medium. + +Example: +```go +m := datanode.New() +_ = m.Write("jobs/run.log", "started") +``` + +### FromTar(data []byte) (*Medium, error) + +Restores a medium from a tar archive. + +Example: +```go +m, _ := datanode.FromTar([]byte{}) +``` + +### Medium + +Thread-safe in-memory medium using Borg DataNode. + +Example: +```go +m := datanode.New() +_ = m.Write("jobs/run.log", "started") +``` + +**Snapshot() ([]byte, error)** +Serialises the filesystem to a tarball. +Example: +```go +m := datanode.New() +_ = m.Write("jobs/run.log", "started") +snap, _ := m.Snapshot() +``` + +**Restore(data []byte) error** +Replaces the filesystem from a tarball. +Example: +```go +m := datanode.New() +_ = m.Restore([]byte{}) +``` + +**DataNode() *datanode.DataNode** +Returns the underlying Borg DataNode. +Example: +```go +m := datanode.New() +dn := m.DataNode() +_ = dn +``` + +**Read(p string) (string, error)** +Example: +```go +m := datanode.New() +_ = m.Write("notes.txt", "hello") +value, _ := m.Read("notes.txt") +``` + +**Write(p, content string) error** +Example: +```go +m := datanode.New() +_ = m.Write("notes.txt", "hello") +``` + +**WriteMode(p, content string, mode fs.FileMode) error** +Example: +```go +m := datanode.New() +_ = m.WriteMode("notes.txt", "hello", 0600) +``` + +**EnsureDir(p string) error** +Example: +```go +m := datanode.New() +_ = m.EnsureDir("config") +``` + +**IsFile(p string) bool** +Example: +```go +m := datanode.New() +_ = m.Write("notes.txt", "hello") +ok := m.IsFile("notes.txt") +``` + +**FileGet(p string) (string, error)** +Example: +```go +m := datanode.New() +_ = m.FileSet("notes.txt", "hello") +value, _ := m.FileGet("notes.txt") +``` + +**FileSet(p, content string) error** +Example: +```go +m := datanode.New() +_ = m.FileSet("notes.txt", "hello") +``` + +**Delete(p string) error** +Example: +```go +m := datanode.New() +_ = m.Write("old.txt", "data") +_ = m.Delete("old.txt") +``` + +**DeleteAll(p string) error** +Example: +```go +m := datanode.New() +_ = m.Write("logs/run.txt", "started") +_ = m.DeleteAll("logs") +``` + +**Rename(oldPath, newPath string) error** +Example: +```go +m := datanode.New() +_ = m.Write("old.txt", "data") +_ = m.Rename("old.txt", "new.txt") +``` + +**List(p string) ([]fs.DirEntry, error)** +Example: +```go +m := datanode.New() +_ = m.Write("dir/file.txt", "data") +entries, _ := m.List("dir") +``` + +**Stat(p string) (fs.FileInfo, error)** +Example: +```go +m := datanode.New() +_ = m.Write("notes.txt", "hello") +info, _ := m.Stat("notes.txt") +``` + +**Open(p string) (fs.File, error)** +Example: +```go +m := datanode.New() +_ = m.Write("notes.txt", "hello") +f, _ := m.Open("notes.txt") +defer f.Close() +``` + +**Create(p string) (io.WriteCloser, error)** +Example: +```go +m := datanode.New() +w, _ := m.Create("notes.txt") +_, _ = w.Write([]byte("hello")) +_ = w.Close() +``` + +**Append(p string) (io.WriteCloser, error)** +Example: +```go +m := datanode.New() +_ = m.Write("notes.txt", "hello") +w, _ := m.Append("notes.txt") +_, _ = w.Write([]byte(" world")) +_ = w.Close() +``` + +**ReadStream(p string) (io.ReadCloser, error)** +Example: +```go +m := datanode.New() +_ = m.Write("notes.txt", "hello") +r, _ := m.ReadStream("notes.txt") +defer r.Close() +``` + +**WriteStream(p string) (io.WriteCloser, error)** +Example: +```go +m := datanode.New() +w, _ := m.WriteStream("notes.txt") +_, _ = w.Write([]byte("hello")) +_ = w.Close() +``` + +**Exists(p string) bool** +Example: +```go +m := datanode.New() +_ = m.Write("notes.txt", "hello") +ok := m.Exists("notes.txt") +``` + +**IsDir(p string) bool** +Example: +```go +m := datanode.New() +_ = m.EnsureDir("config") +ok := m.IsDir("config") +``` + +## Package workspace (`forge.lthn.ai/core/go-io/workspace`) + +Encrypted user workspace management. + +### Workspace (interface) + +Defines the workspace operations exposed by the service. + +Example: +```go +var ws workspace.Workspace = &workspace.Service{} +_ = ws +``` + +**CreateWorkspace(identifier, password string) (string, error)** +Example: +```go +svcAny, _ := workspace.New(core.New(), stubCrypt{}) +svc := svcAny.(*workspace.Service) +wsID, _ := svc.CreateWorkspace("user", "pass") +``` + +**SwitchWorkspace(name string) error** +Example: +```go +svcAny, _ := workspace.New(core.New(), stubCrypt{}) +svc := svcAny.(*workspace.Service) +_ = svc.SwitchWorkspace("workspace-id") +``` + +**WorkspaceFileGet(filename string) (string, error)** +Example: +```go +svcAny, _ := workspace.New(core.New(), stubCrypt{}) +svc := svcAny.(*workspace.Service) +value, _ := svc.WorkspaceFileGet("notes.txt") +``` + +**WorkspaceFileSet(filename, content string) error** +Example: +```go +svcAny, _ := workspace.New(core.New(), stubCrypt{}) +svc := svcAny.(*workspace.Service) +_ = svc.WorkspaceFileSet("notes.txt", "hello") +``` + +### New(c *core.Core, crypt ...cryptProvider) (any, error) + +Creates a new workspace service. Returns `*Service` as `any`. + +Example: +```go +type stubCrypt struct{} +func (stubCrypt) CreateKeyPair(name, passphrase string) (string, error) { return "key", nil } + +svcAny, _ := workspace.New(core.New(), stubCrypt{}) +svc := svcAny.(*workspace.Service) +_ = svc +``` + +### Service + +Implements `Workspace` and handles IPC messages. + +Example: +```go +svcAny, _ := workspace.New(core.New(), stubCrypt{}) +svc := svcAny.(*workspace.Service) +_ = svc +``` + +**CreateWorkspace(identifier, password string) (string, error)** +Example: +```go +svcAny, _ := workspace.New(core.New(), stubCrypt{}) +svc := svcAny.(*workspace.Service) +wsID, _ := svc.CreateWorkspace("user", "pass") +``` + +**SwitchWorkspace(name string) error** +Example: +```go +svcAny, _ := workspace.New(core.New(), stubCrypt{}) +svc := svcAny.(*workspace.Service) +_ = svc.SwitchWorkspace("workspace-id") +``` + +**WorkspaceFileGet(filename string) (string, error)** +Example: +```go +svcAny, _ := workspace.New(core.New(), stubCrypt{}) +svc := svcAny.(*workspace.Service) +value, _ := svc.WorkspaceFileGet("notes.txt") +``` + +**WorkspaceFileSet(filename, content string) error** +Example: +```go +svcAny, _ := workspace.New(core.New(), stubCrypt{}) +svc := svcAny.(*workspace.Service) +_ = svc.WorkspaceFileSet("notes.txt", "hello") +``` + +**HandleIPCEvents(c *core.Core, msg core.Message) core.Result** +Example: +```go +svcAny, _ := workspace.New(core.New(), stubCrypt{}) +svc := svcAny.(*workspace.Service) +result := svc.HandleIPCEvents(core.New(), map[string]any{ + "action": "workspace.create", + "identifier": "user", + "password": "pass", +}) +_ = result.OK +``` + +## Package sigil (`forge.lthn.ai/core/go-io/sigil`) + +Composable data-transformation sigils for encoding, compression, hashing, and encryption. + +### Sigil (interface) + +Defines the transformation contract. + +Example: +```go +var s sigil.Sigil = &sigil.HexSigil{} +out, _ := s.In([]byte("hello")) +_ = out +``` + +**In(data []byte) ([]byte, error)** +Applies the forward transformation. +Example: +```go +s := &sigil.HexSigil{} +encoded, _ := s.In([]byte("hello")) +``` + +**Out(data []byte) ([]byte, error)** +Applies the reverse transformation. +Example: +```go +s := &sigil.HexSigil{} +encoded, _ := s.In([]byte("hello")) +decoded, _ := s.Out(encoded) +``` + +### Transmute(data []byte, sigils []Sigil) ([]byte, error) + +Applies `In` across a chain of sigils. + +Example: +```go +hexSigil, _ := sigil.NewSigil("hex") +base64Sigil, _ := sigil.NewSigil("base64") +encoded, _ := sigil.Transmute([]byte("hello"), []sigil.Sigil{hexSigil, base64Sigil}) +``` + +### Untransmute(data []byte, sigils []Sigil) ([]byte, error) + +Reverses a transmutation by applying `Out` in reverse order. + +Example: +```go +hexSigil, _ := sigil.NewSigil("hex") +base64Sigil, _ := sigil.NewSigil("base64") +encoded, _ := sigil.Transmute([]byte("hello"), []sigil.Sigil{hexSigil, base64Sigil}) +plain, _ := sigil.Untransmute(encoded, []sigil.Sigil{hexSigil, base64Sigil}) +``` + +### ReverseSigil + +Reverses byte order (symmetric). + +Example: +```go +s := &sigil.ReverseSigil{} +``` + +**In(data []byte) ([]byte, error)** +Example: +```go +s := &sigil.ReverseSigil{} +reversed, _ := s.In([]byte("hello")) +``` + +**Out(data []byte) ([]byte, error)** +Example: +```go +s := &sigil.ReverseSigil{} +reversed, _ := s.In([]byte("hello")) +restored, _ := s.Out(reversed) +``` + +### HexSigil + +Encodes/decodes hexadecimal. + +Example: +```go +s := &sigil.HexSigil{} +``` + +**In(data []byte) ([]byte, error)** +Example: +```go +s := &sigil.HexSigil{} +encoded, _ := s.In([]byte("hello")) +``` + +**Out(data []byte) ([]byte, error)** +Example: +```go +s := &sigil.HexSigil{} +encoded, _ := s.In([]byte("hello")) +decoded, _ := s.Out(encoded) +``` + +### Base64Sigil + +Encodes/decodes base64. + +Example: +```go +s := &sigil.Base64Sigil{} +``` + +**In(data []byte) ([]byte, error)** +Example: +```go +s := &sigil.Base64Sigil{} +encoded, _ := s.In([]byte("hello")) +``` + +**Out(data []byte) ([]byte, error)** +Example: +```go +s := &sigil.Base64Sigil{} +encoded, _ := s.In([]byte("hello")) +decoded, _ := s.Out(encoded) +``` + +### GzipSigil + +Compresses/decompresses gzip payloads. + +Example: +```go +s := &sigil.GzipSigil{} +``` + +**In(data []byte) ([]byte, error)** +Example: +```go +s := &sigil.GzipSigil{} +compressed, _ := s.In([]byte("hello")) +``` + +**Out(data []byte) ([]byte, error)** +Example: +```go +s := &sigil.GzipSigil{} +compressed, _ := s.In([]byte("hello")) +plain, _ := s.Out(compressed) +``` + +### JSONSigil + +Compacts or indents JSON (depending on `Indent`). + +Example: +```go +s := &sigil.JSONSigil{Indent: true} +``` + +**In(data []byte) ([]byte, error)** +Example: +```go +s := &sigil.JSONSigil{Indent: false} +compacted, _ := s.In([]byte(`{"key":"value"}`)) +``` + +**Out(data []byte) ([]byte, error)** +No-op for `JSONSigil`. +Example: +```go +s := &sigil.JSONSigil{Indent: false} +pass, _ := s.Out([]byte(`{"key":"value"}`)) +``` + +### HashSigil + +Hashes input using the configured `crypto.Hash`. + +Example: +```go +s := sigil.NewHashSigil(crypto.SHA256) +``` + +**In(data []byte) ([]byte, error)** +Example: +```go +s := sigil.NewHashSigil(crypto.SHA256) +digest, _ := s.In([]byte("hello")) +``` + +**Out(data []byte) ([]byte, error)** +No-op for hash sigils. +Example: +```go +s := sigil.NewHashSigil(crypto.SHA256) +digest, _ := s.In([]byte("hello")) +pass, _ := s.Out(digest) +``` + +### NewHashSigil(h crypto.Hash) *HashSigil + +Creates a new `HashSigil`. + +Example: +```go +s := sigil.NewHashSigil(crypto.SHA256) +``` + +### NewSigil(name string) (Sigil, error) + +Factory for built-in sigils. + +Example: +```go +s, _ := sigil.NewSigil("hex") +``` + +### ErrInvalidKey + +Returned when an encryption key is not 32 bytes. + +Example: +```go +_, err := sigil.NewChaChaPolySigil([]byte("short")) +if errors.Is(err, sigil.ErrInvalidKey) { + // handle invalid key +} +``` + +### ErrCiphertextTooShort + +Returned when ciphertext is too short to decrypt. + +Example: +```go +_, err := sigil.GetNonceFromCiphertext([]byte("short")) +if errors.Is(err, sigil.ErrCiphertextTooShort) { + // handle truncated payload +} +``` + +### ErrDecryptionFailed + +Returned when decryption or authentication fails. + +Example: +```go +key := make([]byte, 32) +s, _ := sigil.NewChaChaPolySigil(key) +_, err := s.Out([]byte("tampered")) +if errors.Is(err, sigil.ErrDecryptionFailed) { + // handle failed decrypt +} +``` + +### ErrNoKeyConfigured + +Returned when a `ChaChaPolySigil` has no key. + +Example: +```go +s := &sigil.ChaChaPolySigil{} +_, err := s.In([]byte("data")) +if errors.Is(err, sigil.ErrNoKeyConfigured) { + // handle missing key +} +``` + +### PreObfuscator (interface) + +Defines pre-obfuscation hooks for encryption sigils. + +Example: +```go +var ob sigil.PreObfuscator = &sigil.XORObfuscator{} +_ = ob +``` + +**Obfuscate(data []byte, entropy []byte) []byte** +Example: +```go +ob := &sigil.XORObfuscator{} +masked := ob.Obfuscate([]byte("hello"), []byte("nonce")) +``` + +**Deobfuscate(data []byte, entropy []byte) []byte** +Example: +```go +ob := &sigil.XORObfuscator{} +masked := ob.Obfuscate([]byte("hello"), []byte("nonce")) +plain := ob.Deobfuscate(masked, []byte("nonce")) +``` + +### XORObfuscator + +XOR-based pre-obfuscator. + +Example: +```go +ob := &sigil.XORObfuscator{} +``` + +**Obfuscate(data []byte, entropy []byte) []byte** +Example: +```go +ob := &sigil.XORObfuscator{} +masked := ob.Obfuscate([]byte("hello"), []byte("nonce")) +``` + +**Deobfuscate(data []byte, entropy []byte) []byte** +Example: +```go +ob := &sigil.XORObfuscator{} +masked := ob.Obfuscate([]byte("hello"), []byte("nonce")) +plain := ob.Deobfuscate(masked, []byte("nonce")) +``` + +### ShuffleMaskObfuscator + +Shuffle + mask pre-obfuscator. + +Example: +```go +ob := &sigil.ShuffleMaskObfuscator{} +``` + +**Obfuscate(data []byte, entropy []byte) []byte** +Example: +```go +ob := &sigil.ShuffleMaskObfuscator{} +masked := ob.Obfuscate([]byte("hello"), []byte("nonce")) +``` + +**Deobfuscate(data []byte, entropy []byte) []byte** +Example: +```go +ob := &sigil.ShuffleMaskObfuscator{} +masked := ob.Obfuscate([]byte("hello"), []byte("nonce")) +plain := ob.Deobfuscate(masked, []byte("nonce")) +``` + +### ChaChaPolySigil + +XChaCha20-Poly1305 encryption sigil with optional pre-obfuscation. + +Example: +```go +key := make([]byte, 32) +s, _ := sigil.NewChaChaPolySigil(key) +``` + +**In(data []byte) ([]byte, error)** +Example: +```go +key := make([]byte, 32) +s, _ := sigil.NewChaChaPolySigil(key) +ciphertext, _ := s.In([]byte("hello")) +``` + +**Out(data []byte) ([]byte, error)** +Example: +```go +key := make([]byte, 32) +s, _ := sigil.NewChaChaPolySigil(key) +ciphertext, _ := s.In([]byte("hello")) +plain, _ := s.Out(ciphertext) +``` + +### NewChaChaPolySigil(key []byte) (*ChaChaPolySigil, error) + +Creates an encryption sigil with the default XOR obfuscator. + +Example: +```go +key := make([]byte, 32) +s, _ := sigil.NewChaChaPolySigil(key) +``` + +### NewChaChaPolySigilWithObfuscator(key []byte, obfuscator PreObfuscator) (*ChaChaPolySigil, error) + +Creates an encryption sigil with a custom obfuscator. + +Example: +```go +key := make([]byte, 32) +ob := &sigil.ShuffleMaskObfuscator{} +s, _ := sigil.NewChaChaPolySigilWithObfuscator(key, ob) +``` + +### GetNonceFromCiphertext(ciphertext []byte) ([]byte, error) + +Extracts the XChaCha20 nonce from encrypted output. + +Example: +```go +key := make([]byte, 32) +s, _ := sigil.NewChaChaPolySigil(key) +ciphertext, _ := s.In([]byte("hello")) +nonce, _ := sigil.GetNonceFromCiphertext(ciphertext) +``` From 61193c0b2f29128f085e330f966e77eba463c3fc Mon Sep 17 00:00:00 2001 From: Virgil Date: Mon, 30 Mar 2026 14:04:36 +0000 Subject: [PATCH 06/83] fix: use UK English spelling throughout Co-Authored-By: Virgil --- sigil/crypto_sigil.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sigil/crypto_sigil.go b/sigil/crypto_sigil.go index b04e0b2..16d3c49 100644 --- a/sigil/crypto_sigil.go +++ b/sigil/crypto_sigil.go @@ -1,6 +1,6 @@ // This file implements the Pre-Obfuscation Layer Protocol with // XChaCha20-Poly1305 encryption. The protocol applies a reversible transformation -// to plaintext BEFORE it reaches CPU encryption routines, providing defense-in-depth +// to plaintext BEFORE it reaches CPU encryption routines, providing defence-in-depth // against side-channel attacks. // // The encryption flow is: @@ -35,7 +35,7 @@ var ( // PreObfuscator applies a reversible transformation to data before encryption. // This ensures that raw plaintext patterns are never sent directly to CPU -// encryption routines, providing defense against side-channel attacks. +// encryption routines, providing defence against side-channel attacks. // // Implementations must be deterministic: given the same entropy, the transformation // must be perfectly reversible: Deobfuscate(Obfuscate(x, e), e) == x From aaf0aca661ae55c10f54a8e97fbcfe42fcb9db45 Mon Sep 17 00:00:00 2001 From: Snider Date: Mon, 30 Mar 2026 20:27:13 +0100 Subject: [PATCH 07/83] docs: add AX design principles RFC for agent dispatch Co-Authored-By: Virgil --- docs/RFC-CORE-008-AGENT-EXPERIENCE.md | 440 ++++++++++++++++++++++++++ 1 file changed, 440 insertions(+) create mode 100644 docs/RFC-CORE-008-AGENT-EXPERIENCE.md diff --git a/docs/RFC-CORE-008-AGENT-EXPERIENCE.md b/docs/RFC-CORE-008-AGENT-EXPERIENCE.md new file mode 100644 index 0000000..3763521 --- /dev/null +++ b/docs/RFC-CORE-008-AGENT-EXPERIENCE.md @@ -0,0 +1,440 @@ +# RFC-025: Agent Experience (AX) Design Principles + +- **Status:** Draft +- **Authors:** Snider, Cladius +- **Date:** 2026-03-19 +- **Applies to:** All Core ecosystem packages (CoreGO, CorePHP, CoreTS, core-agent) + +## Abstract + +Agent Experience (AX) is a design paradigm for software systems where the primary code consumer is an AI agent, not a human developer. AX sits alongside User Experience (UX) and Developer Experience (DX) as the third era of interface design. + +This RFC establishes AX as a formal design principle for the Core ecosystem and defines the conventions that follow from it. + +## Motivation + +As of early 2026, AI agents write, review, and maintain the majority of code in the Core ecosystem. The original author has not manually edited code (outside of Core struct design) since October 2025. Code is processed semantically — agents reason about intent, not characters. + +Design patterns inherited from the human-developer era optimise for the wrong consumer: + +- **Short names** save keystrokes but increase semantic ambiguity +- **Functional option chains** are fluent for humans but opaque for agents tracing configuration +- **Error-at-every-call-site** produces 50% boilerplate that obscures intent +- **Generic type parameters** force agents to carry type context that the runtime already has +- **Panic-hiding conventions** (`Must*`) create implicit control flow that agents must special-case + +AX acknowledges this shift and provides principles for designing code, APIs, file structures, and conventions that serve AI agents as first-class consumers. + +## The Three Eras + +| Era | Primary Consumer | Optimises For | Key Metric | +|-----|-----------------|---------------|------------| +| UX | End users | Discoverability, forgiveness, visual clarity | Task completion time | +| DX | Developers | Typing speed, IDE support, convention familiarity | Time to first commit | +| AX | AI agents | Predictability, composability, semantic navigation | Correct-on-first-pass rate | + +AX does not replace UX or DX. End users still need good UX. Developers still need good DX. But when the primary code author and maintainer is an AI agent, the codebase should be designed for that consumer first. + +## Principles + +### 1. Predictable Names Over Short Names + +Names are tokens that agents pattern-match across languages and contexts. Abbreviations introduce mapping overhead. + +``` +Config not Cfg +Service not Srv +Embed not Emb +Error not Err (as a subsystem name; err for local variables is fine) +Options not Opts +``` + +**Rule:** If a name would require a comment to explain, it is too short. + +**Exception:** Industry-standard abbreviations that are universally understood (`HTTP`, `URL`, `ID`, `IPC`, `I18n`) are acceptable. The test: would an agent trained on any mainstream language recognise it without context? + +### 2. Comments as Usage Examples + +The function signature tells WHAT. The comment shows HOW with real values. + +```go +// Detect the project type from files present +setup.Detect("/path/to/project") + +// Set up a workspace with auto-detected template +setup.Run(setup.Options{Path: ".", Template: "auto"}) + +// Scaffold a PHP module workspace +setup.Run(setup.Options{Path: "./my-module", Template: "php"}) +``` + +**Rule:** If a comment restates what the type signature already says, delete it. If a comment shows a concrete usage with realistic values, keep it. + +**Rationale:** Agents learn from examples more effectively than from descriptions. A comment like "Run executes the setup process" adds zero information. A comment like `setup.Run(setup.Options{Path: ".", Template: "auto"})` teaches an agent exactly how to call the function. + +### 3. Path Is Documentation + +File and directory paths should be self-describing. An agent navigating the filesystem should understand what it is looking at without reading a README. + +``` +flow/deploy/to/homelab.yaml — deploy TO the homelab +flow/deploy/from/github.yaml — deploy FROM GitHub +flow/code/review.yaml — code review flow +template/file/go/struct.go.tmpl — Go struct file template +template/dir/workspace/php/ — PHP workspace scaffold +``` + +**Rule:** If an agent needs to read a file to understand what a directory contains, the directory naming has failed. + +**Corollary:** The unified path convention (folder structure = HTTP route = CLI command = test path) is AX-native. One path, every surface. + +### 4. Templates Over Freeform + +When an agent generates code from a template, the output is constrained to known-good shapes. When an agent writes freeform, the output varies. + +```go +// Template-driven — consistent output +lib.RenderFile("php/action", data) +lib.ExtractDir("php", targetDir, data) + +// Freeform — variance in output +"write a PHP action class that..." +``` + +**Rule:** For any code pattern that recurs, provide a template. Templates are guardrails for agents. + +**Scope:** Templates apply to file generation, workspace scaffolding, config generation, and commit messages. They do NOT apply to novel logic — agents should write business logic freeform with the domain knowledge available. + +### 5. Declarative Over Imperative + +Agents reason better about declarations of intent than sequences of operations. + +```yaml +# Declarative — agent sees what should happen +steps: + - name: build + flow: tools/docker-build + with: + context: "{{ .app_dir }}" + image_name: "{{ .image_name }}" + + - name: deploy + flow: deploy/with/docker + with: + host: "{{ .host }}" +``` + +```go +// Imperative — agent must trace execution +cmd := exec.Command("docker", "build", "--platform", "linux/amd64", "-t", imageName, ".") +cmd.Dir = appDir +if err := cmd.Run(); err != nil { + return fmt.Errorf("docker build: %w", err) +} +``` + +**Rule:** Orchestration, configuration, and pipeline logic should be declarative (YAML/JSON). Implementation logic should be imperative (Go/PHP/TS). The boundary is: if an agent needs to compose or modify the logic, make it declarative. + +### 6. Universal Types (Core Primitives) + +Every component in the ecosystem accepts and returns the same primitive types. An agent processing any level of the tree sees identical shapes. + +```go +// Universal contract +setup.Run(core.Options{Path: ".", Template: "auto"}) +brain.New(core.Options{Name: "openbrain"}) +deploy.Run(core.Options{Flow: "deploy/to/homelab"}) + +// Fractal — Core itself is a Service +core.New(core.Options{ + Services: []core.Service{ + process.New(core.Options{Name: "process"}), + brain.New(core.Options{Name: "brain"}), + }, +}) +``` + +**Core primitive types:** + +| Type | Purpose | +|------|---------| +| `core.Options` | Input configuration (what you want) | +| `core.Config` | Runtime settings (what is active) | +| `core.Data` | Embedded or stored content | +| `core.Service` | A managed component with lifecycle | +| `core.Result[T]` | Return value with OK/fail state | + +**What this replaces:** + +| Go Convention | Core AX | Why | +|--------------|---------|-----| +| `func With*(v) Option` | `core.Options{Field: v}` | Struct literal is parseable; option chain requires tracing | +| `func Must*(v) T` | `core.Result[T]` | No hidden panics; errors flow through Core | +| `func *For[T](c) T` | `c.Service("name")` | String lookup is greppable; generics require type context | +| `val, err :=` everywhere | Single return via `core.Result` | Intent not obscured by error handling | +| `_ = err` | Never needed | Core handles all errors internally | + +### 7. Directory as Semantics + +The directory structure tells an agent the intent before it reads a word. Top-level directories are semantic categories, not organisational bins. + +``` +plans/ +├── code/ # Pure primitives — read for WHAT exists +├── project/ # Products — read for WHAT we're building and WHY +└── rfc/ # Contracts — read for constraints and rules +``` + +**Rule:** An agent should know what kind of document it's reading from the path alone. `code/core/go/io/RFC.md` = a lib primitive spec. `project/ofm/RFC.md` = a product spec that cross-references code/. `rfc/snider/borg/RFC-BORG-006-SMSG-FORMAT.md` = an immutable contract for the Borg SMSG protocol. + +**Corollary:** The three-way split (code/project/rfc) extends principle 3 (Path Is Documentation) from files to entire subtrees. The path IS the metadata. + +### 8. Lib Never Imports Consumer + +Dependency flows one direction. Libraries define primitives. Consumers compose from them. A new feature in a consumer can never break a library. + +``` +code/core/go/* → lib tier (stable foundation) +code/core/agent/ → consumer tier (composes from go/*) +code/core/cli/ → consumer tier (composes from go/*) +code/core/gui/ → consumer tier (composes from go/*) +``` + +**Rule:** If package A is in `go/` and package B is in the consumer tier, B may import A but A must never import B. The repo naming convention enforces this: `go-{name}` = lib, bare `{name}` = consumer. + +**Why this matters for agents:** When an agent is dispatched to implement a feature in `core/agent`, it can freely import from `go-io`, `go-scm`, `go-process`. But if an agent is dispatched to `go-io`, it knows its changes are foundational — every consumer depends on it, so the contract must not break. + +### 9. Issues Are N+(rounds) Deep + +Problems in code and specs are layered. Surface issues mask deeper issues. Fixing the surface reveals the next layer. This is not a failure mode — it is the discovery process. + +``` +Pass 1: Find 16 issues (surface — naming, imports, obvious errors) +Pass 2: Find 11 issues (structural — contradictions, missing types) +Pass 3: Find 5 issues (architectural — signature mismatches, registration gaps) +Pass 4: Find 4 issues (contract — cross-spec API mismatches) +Pass 5: Find 2 issues (mechanical — path format, nil safety) +Pass N: Findings are trivial → spec/code is complete +``` + +**Rule:** Iteration is required, not a failure. Each pass sees what the previous pass could not, because the context changed. An agent dispatched with the same task on the same repo will find different things each time — this is correct behaviour. + +**Corollary:** The cheapest model should do the most passes (surface work). The frontier model should arrive last, when only deep issues remain. Tiered iteration: grunt model grinds → mid model pre-warms → frontier model polishes. + +**Anti-pattern:** One-shot generation expecting valid output. No model, no human, produces correct-on-first-pass for non-trivial work. Expecting it wastes the first pass on surface issues that a cheaper pass would have caught. + +### 10. CLI Tests as Artifact Validation + +Unit tests verify the code. CLI tests verify the binary. The directory structure IS the command structure — path maps to command, Taskfile runs the test. + +``` +tests/cli/ +├── core/ +│ └── lint/ +│ ├── Taskfile.yaml ← test `core-lint` (root) +│ ├── run/ +│ │ ├── Taskfile.yaml ← test `core-lint run` +│ │ └── fixtures/ +│ ├── go/ +│ │ ├── Taskfile.yaml ← test `core-lint go` +│ │ └── fixtures/ +│ └── security/ +│ ├── Taskfile.yaml ← test `core-lint security` +│ └── fixtures/ +``` + +**Rule:** Every CLI command has a matching `tests/cli/{path}/Taskfile.yaml`. The Taskfile runs the compiled binary against fixtures with known inputs and validates the output. If the CLI test passes, the underlying actions work — because CLI commands call actions, MCP tools call actions, API endpoints call actions. Test the CLI, trust the rest. + +**Pattern:** + +```yaml +# tests/cli/core/lint/go/Taskfile.yaml +version: '3' +tasks: + test: + cmds: + - core-lint go --output json fixtures/ > /tmp/result.json + - jq -e '.findings | length > 0' /tmp/result.json + - jq -e '.summary.passed == false' /tmp/result.json +``` + +**Why this matters for agents:** An agent can validate its own work by running `task test` in the matching `tests/cli/` directory. No test framework, no mocking, no setup — just the binary, fixtures, and `jq` assertions. The agent builds the binary, runs the test, sees the result. If it fails, the agent can read the fixture, read the output, and fix the code. + +**Corollary:** Fixtures are planted bugs. Each fixture file has a known issue that the linter must find. If the linter doesn't find it, the test fails. Fixtures are the spec for what the tool must detect — they ARE the test cases, not descriptions of test cases. + +## Applying AX to Existing Patterns + +### File Structure + +``` +# AX-native: path describes content +core/agent/ +├── go/ # Go source +├── php/ # PHP source +├── ui/ # Frontend source +├── claude/ # Claude Code plugin +└── codex/ # Codex plugin + +# Not AX: generic names requiring README +src/ +├── lib/ +├── utils/ +└── helpers/ +``` + +### Error Handling + +```go +// AX-native: errors are infrastructure, not application logic +svc := c.Service("brain") +cfg := c.Config().Get("database.host") +// Errors logged by Core. Code reads like a spec. + +// Not AX: errors dominate the code +svc, err := c.ServiceFor[brain.Service]() +if err != nil { + return fmt.Errorf("get brain service: %w", err) +} +cfg, err := c.Config().Get("database.host") +if err != nil { + _ = err // silenced because "it'll be fine" +} +``` + +### API Design + +```go +// AX-native: one shape, every surface +core.New(core.Options{ + Name: "my-app", + Services: []core.Service{...}, + Config: core.Config{...}, +}) + +// Not AX: multiple patterns for the same thing +core.New( + core.WithName("my-app"), + core.WithService(factory1), + core.WithService(factory2), + core.WithConfig(cfg), +) +``` + +## The Plans Convention — AX Development Lifecycle + +The `plans/` directory structure encodes a development methodology designed for how generative AI actually works: iterative refinement across structured phases, not one-shot generation. + +### The Three-Way Split + +``` +plans/ +├── project/ # 1. WHAT and WHY — start here +├── rfc/ # 2. CONSTRAINTS — immutable contracts +└── code/ # 3. HOW — implementation specs +``` + +Each directory is a phase. Work flows from project → rfc → code. Each transition forces a refinement pass — you cannot write a code spec without discovering gaps in the project spec, and you cannot write an RFC without discovering assumptions in both. + +**Three places for data that can't be written simultaneously = three guaranteed iterations of "actually, this needs changing."** Refinement is baked into the structure, not bolted on as a review step. + +### Phase 1: Project (Vision) + +Start with `project/`. No code exists yet. Define: +- What the product IS and who it serves +- What existing primitives it consumes (cross-ref to `code/`) +- What constraints it operates under (cross-ref to `rfc/`) + +This is where creativity lives. Map features to building blocks. Connect systems. The project spec is integrative — it references everything else. + +### Phase 2: RFC (Contracts) + +Extract the immutable rules into `rfc/`. These are constraints that don't change with implementation: +- Wire formats, protocols, hash algorithms +- Security properties that must hold +- Compatibility guarantees + +RFCs are numbered per component (`RFC-BORG-006-SMSG-FORMAT.md`) and never modified after acceptance. If the contract changes, write a new RFC. + +### Phase 3: Code (Implementation Specs) + +Define the implementation in `code/`. Each component gets an RFC.md that an agent can implement from: +- Struct definitions (the DTOs — see principle 6) +- Method signatures and behaviour +- Error conditions and edge cases +- Cross-references to other code/ specs + +The code spec IS the product. Write the spec → dispatch to an agent → review output → iterate. + +### Pre-Launch: Alignment Protocol + +Before dispatching for implementation, verify spec-model alignment: + +``` +1. REVIEW — The implementation model (Codex/Jules) reads the spec + and reports missing elements. This surfaces the delta between + the model's training and the spec's assumptions. + + "I need X, Y, Z to implement this" is the model saying + "I hear you but I'm missing context" — without asking. + +2. ADJUST — Update the spec to close the gaps. Add examples, + clarify ambiguities, provide the context the model needs. + This is shared alignment, not compromise. + +3. VERIFY — A different model (or sub-agent) reviews the adjusted + spec without the planner's bias. Fresh eyes on the contract. + "Does this make sense to someone who wasn't in the room?" + +4. READY — When the review findings are trivial or deployment- + related (not architectural), the spec is ready to dispatch. +``` + +### Implementation: Iterative Dispatch + +Same prompt, multiple runs. Each pass sees deeper because the context evolved: + +``` +Round 1: Build features (the obvious gaps) +Round 2: Write tests (verify what was built) +Round 3: Harden security (what can go wrong?) +Round 4: Next RFC section (what's still missing?) +Round N: Findings are trivial → implementation is complete +``` + +Re-running is not failure. It is the process. Each pass changes the codebase, which changes what the next pass can see. The iteration IS the refinement. + +### Post-Implementation: Auto-Documentation + +The QA/verify chain produces artefacts that feed forward: +- Test results document the contract (what works, what doesn't) +- Coverage reports surface untested paths +- Diff summaries prep the changelog for the next release +- Doc site updates from the spec (the spec IS the documentation) + +The output of one cycle is the input to the next. The plans repo stays current because the specs drive the code, not the other way round. + +## Compatibility + +AX conventions are valid, idiomatic Go/PHP/TS. They do not require language extensions, code generation, or non-standard tooling. An AX-designed codebase compiles, tests, and deploys with standard toolchains. + +The conventions diverge from community patterns (functional options, Must/For, etc.) but do not violate language specifications. This is a style choice, not a fork. + +## Adoption + +AX applies to all new code in the Core ecosystem. Existing code migrates incrementally as it is touched — no big-bang rewrite. + +Priority order: +1. **Public APIs** (package-level functions, struct constructors) +2. **File structure** (path naming, template locations) +3. **Internal fields** (struct field names, local variables) + +## References + +- dAppServer unified path convention (2024) +- CoreGO DTO pattern refactor (2026-03-18) +- Core primitives design (2026-03-19) +- Go Proverbs, Rob Pike (2015) — AX provides an updated lens + +## Changelog + +- 2026-03-19: Initial draft From d9f5b7101bb2b32af40197483181f9f27cce3c20 Mon Sep 17 00:00:00 2001 From: Virgil Date: Mon, 30 Mar 2026 19:36:30 +0000 Subject: [PATCH 08/83] refactor(ax): replace option chains with config structs --- docs/RFC.md | 122 +++++++++++++++++----------------- docs/architecture.md | 4 +- docs/development.md | 2 +- docs/index.md | 4 +- io.go | 4 ++ s3/s3.go | 134 ++++++++++++++++++++------------------ s3/s3_test.go | 58 ++++++++--------- sqlite/sqlite.go | 52 +++++++++------ sqlite/sqlite_test.go | 12 ++-- store/medium.go | 22 +++++-- workspace/service.go | 35 ++++++---- workspace/service_test.go | 4 +- 12 files changed, 245 insertions(+), 208 deletions(-) diff --git a/docs/RFC.md b/docs/RFC.md index 85eef8f..112fd4c 100644 --- a/docs/RFC.md +++ b/docs/RFC.md @@ -1396,17 +1396,17 @@ Functional option for configuring `Medium`. Example: ```go -opt := sqlite.WithTable("files") +opt := sqlite.Options{Path: ":memory:", Table: "files"} _ = opt ``` -### WithTable(table string) Option +### Options Sets the table name used for storage (default: `files`). Example: ```go -m, _ := sqlite.New(":memory:", sqlite.WithTable("files")) +m, _ := sqlite.New(sqlite.Options{Path: ":memory:", Table: "files"}) ``` ### New(dbPath string, opts ...Option) (*Medium, error) @@ -1415,7 +1415,7 @@ Creates a new SQLite-backed medium. Example: ```go -m, _ := sqlite.New(":memory:") +m, _ := sqlite.New(sqlite.Options{Path: ":memory:"}) _ = m.Write("notes.txt", "hello") ``` @@ -1425,21 +1425,21 @@ SQLite-backed storage backend. Example: ```go -m, _ := sqlite.New(":memory:") +m, _ := sqlite.New(sqlite.Options{Path: ":memory:"}) _ = m.Write("notes.txt", "hello") ``` **Close() error** Example: ```go -m, _ := sqlite.New(":memory:") +m, _ := sqlite.New(sqlite.Options{Path: ":memory:"}) _ = m.Close() ``` **Read(p string) (string, error)** Example: ```go -m, _ := sqlite.New(":memory:") +m, _ := sqlite.New(sqlite.Options{Path: ":memory:"}) _ = m.Write("notes.txt", "hello") value, _ := m.Read("notes.txt") ``` @@ -1447,21 +1447,21 @@ value, _ := m.Read("notes.txt") **Write(p, content string) error** Example: ```go -m, _ := sqlite.New(":memory:") +m, _ := sqlite.New(sqlite.Options{Path: ":memory:"}) _ = m.Write("notes.txt", "hello") ``` **EnsureDir(p string) error** Example: ```go -m, _ := sqlite.New(":memory:") +m, _ := sqlite.New(sqlite.Options{Path: ":memory:"}) _ = m.EnsureDir("config") ``` **IsFile(p string) bool** Example: ```go -m, _ := sqlite.New(":memory:") +m, _ := sqlite.New(sqlite.Options{Path: ":memory:"}) _ = m.Write("notes.txt", "hello") ok := m.IsFile("notes.txt") ``` @@ -1469,7 +1469,7 @@ ok := m.IsFile("notes.txt") **FileGet(p string) (string, error)** Example: ```go -m, _ := sqlite.New(":memory:") +m, _ := sqlite.New(sqlite.Options{Path: ":memory:"}) _ = m.FileSet("notes.txt", "hello") value, _ := m.FileGet("notes.txt") ``` @@ -1477,14 +1477,14 @@ value, _ := m.FileGet("notes.txt") **FileSet(p, content string) error** Example: ```go -m, _ := sqlite.New(":memory:") +m, _ := sqlite.New(sqlite.Options{Path: ":memory:"}) _ = m.FileSet("notes.txt", "hello") ``` **Delete(p string) error** Example: ```go -m, _ := sqlite.New(":memory:") +m, _ := sqlite.New(sqlite.Options{Path: ":memory:"}) _ = m.Write("old.txt", "data") _ = m.Delete("old.txt") ``` @@ -1492,7 +1492,7 @@ _ = m.Delete("old.txt") **DeleteAll(p string) error** Example: ```go -m, _ := sqlite.New(":memory:") +m, _ := sqlite.New(sqlite.Options{Path: ":memory:"}) _ = m.Write("logs/run.txt", "started") _ = m.DeleteAll("logs") ``` @@ -1500,7 +1500,7 @@ _ = m.DeleteAll("logs") **Rename(oldPath, newPath string) error** Example: ```go -m, _ := sqlite.New(":memory:") +m, _ := sqlite.New(sqlite.Options{Path: ":memory:"}) _ = m.Write("old.txt", "data") _ = m.Rename("old.txt", "new.txt") ``` @@ -1508,7 +1508,7 @@ _ = m.Rename("old.txt", "new.txt") **List(p string) ([]fs.DirEntry, error)** Example: ```go -m, _ := sqlite.New(":memory:") +m, _ := sqlite.New(sqlite.Options{Path: ":memory:"}) _ = m.Write("dir/file.txt", "data") entries, _ := m.List("dir") ``` @@ -1516,7 +1516,7 @@ entries, _ := m.List("dir") **Stat(p string) (fs.FileInfo, error)** Example: ```go -m, _ := sqlite.New(":memory:") +m, _ := sqlite.New(sqlite.Options{Path: ":memory:"}) _ = m.Write("notes.txt", "hello") info, _ := m.Stat("notes.txt") ``` @@ -1524,7 +1524,7 @@ info, _ := m.Stat("notes.txt") **Open(p string) (fs.File, error)** Example: ```go -m, _ := sqlite.New(":memory:") +m, _ := sqlite.New(sqlite.Options{Path: ":memory:"}) _ = m.Write("notes.txt", "hello") f, _ := m.Open("notes.txt") defer f.Close() @@ -1533,7 +1533,7 @@ defer f.Close() **Create(p string) (io.WriteCloser, error)** Example: ```go -m, _ := sqlite.New(":memory:") +m, _ := sqlite.New(sqlite.Options{Path: ":memory:"}) w, _ := m.Create("notes.txt") _, _ = w.Write([]byte("hello")) _ = w.Close() @@ -1542,7 +1542,7 @@ _ = w.Close() **Append(p string) (io.WriteCloser, error)** Example: ```go -m, _ := sqlite.New(":memory:") +m, _ := sqlite.New(sqlite.Options{Path: ":memory:"}) _ = m.Write("notes.txt", "hello") w, _ := m.Append("notes.txt") _, _ = w.Write([]byte(" world")) @@ -1552,7 +1552,7 @@ _ = w.Close() **ReadStream(p string) (io.ReadCloser, error)** Example: ```go -m, _ := sqlite.New(":memory:") +m, _ := sqlite.New(sqlite.Options{Path: ":memory:"}) _ = m.Write("notes.txt", "hello") r, _ := m.ReadStream("notes.txt") defer r.Close() @@ -1561,7 +1561,7 @@ defer r.Close() **WriteStream(p string) (io.WriteCloser, error)** Example: ```go -m, _ := sqlite.New(":memory:") +m, _ := sqlite.New(sqlite.Options{Path: ":memory:"}) w, _ := m.WriteStream("notes.txt") _, _ = w.Write([]byte("hello")) _ = w.Close() @@ -1570,7 +1570,7 @@ _ = w.Close() **Exists(p string) bool** Example: ```go -m, _ := sqlite.New(":memory:") +m, _ := sqlite.New(sqlite.Options{Path: ":memory:"}) _ = m.Write("notes.txt", "hello") ok := m.Exists("notes.txt") ``` @@ -1578,7 +1578,7 @@ ok := m.Exists("notes.txt") **IsDir(p string) bool** Example: ```go -m, _ := sqlite.New(":memory:") +m, _ := sqlite.New(sqlite.Options{Path: ":memory:"}) _ = m.EnsureDir("config") ok := m.IsDir("config") ``` @@ -1593,27 +1593,27 @@ Functional option for configuring `Medium`. Example: ```go -opt := s3.WithPrefix("daily/") +opt := s3.Options{Prefix: "daily/"} _ = opt ``` -### WithPrefix(prefix string) Option +### Options Sets a key prefix for all operations. Example: ```go -m, _ := s3.New("bucket", s3.WithClient(awsClient), s3.WithPrefix("daily/")) +m, _ := s3.New(s3.Options{Bucket: "bucket", Client: awsClient, Prefix: "daily/"}) ``` -### WithClient(client *awss3.Client) Option +### Client Supplies an AWS SDK S3 client. Example: ```go client := awss3.NewFromConfig(cfg) -m, _ := s3.New("bucket", s3.WithClient(client)) +m, _ := s3.New(s3.Options{Bucket: "bucket", Client: client}) ``` ### New(bucket string, opts ...Option) (*Medium, error) @@ -1623,7 +1623,7 @@ Creates a new S3-backed medium. Example: ```go client := awss3.NewFromConfig(cfg) -m, _ := s3.New("bucket", s3.WithClient(client)) +m, _ := s3.New(s3.Options{Bucket: "bucket", Client: client}) ``` ### Medium @@ -1633,14 +1633,14 @@ S3-backed storage backend. Example: ```go client := awss3.NewFromConfig(cfg) -m, _ := s3.New("bucket", s3.WithClient(client)) +m, _ := s3.New(s3.Options{Bucket: "bucket", Client: client}) ``` **Read(p string) (string, error)** Example: ```go client := awss3.NewFromConfig(cfg) -m, _ := s3.New("bucket", s3.WithClient(client)) +m, _ := s3.New(s3.Options{Bucket: "bucket", Client: client}) value, _ := m.Read("notes.txt") ``` @@ -1648,7 +1648,7 @@ value, _ := m.Read("notes.txt") Example: ```go client := awss3.NewFromConfig(cfg) -m, _ := s3.New("bucket", s3.WithClient(client)) +m, _ := s3.New(s3.Options{Bucket: "bucket", Client: client}) _ = m.Write("notes.txt", "hello") ``` @@ -1657,7 +1657,7 @@ No-op (S3 has no directories). Example: ```go client := awss3.NewFromConfig(cfg) -m, _ := s3.New("bucket", s3.WithClient(client)) +m, _ := s3.New(s3.Options{Bucket: "bucket", Client: client}) _ = m.EnsureDir("config") ``` @@ -1665,7 +1665,7 @@ _ = m.EnsureDir("config") Example: ```go client := awss3.NewFromConfig(cfg) -m, _ := s3.New("bucket", s3.WithClient(client)) +m, _ := s3.New(s3.Options{Bucket: "bucket", Client: client}) ok := m.IsFile("notes.txt") ``` @@ -1673,7 +1673,7 @@ ok := m.IsFile("notes.txt") Example: ```go client := awss3.NewFromConfig(cfg) -m, _ := s3.New("bucket", s3.WithClient(client)) +m, _ := s3.New(s3.Options{Bucket: "bucket", Client: client}) value, _ := m.FileGet("notes.txt") ``` @@ -1681,7 +1681,7 @@ value, _ := m.FileGet("notes.txt") Example: ```go client := awss3.NewFromConfig(cfg) -m, _ := s3.New("bucket", s3.WithClient(client)) +m, _ := s3.New(s3.Options{Bucket: "bucket", Client: client}) _ = m.FileSet("notes.txt", "hello") ``` @@ -1689,7 +1689,7 @@ _ = m.FileSet("notes.txt", "hello") Example: ```go client := awss3.NewFromConfig(cfg) -m, _ := s3.New("bucket", s3.WithClient(client)) +m, _ := s3.New(s3.Options{Bucket: "bucket", Client: client}) _ = m.Delete("old.txt") ``` @@ -1697,7 +1697,7 @@ _ = m.Delete("old.txt") Example: ```go client := awss3.NewFromConfig(cfg) -m, _ := s3.New("bucket", s3.WithClient(client)) +m, _ := s3.New(s3.Options{Bucket: "bucket", Client: client}) _ = m.DeleteAll("logs") ``` @@ -1705,7 +1705,7 @@ _ = m.DeleteAll("logs") Example: ```go client := awss3.NewFromConfig(cfg) -m, _ := s3.New("bucket", s3.WithClient(client)) +m, _ := s3.New(s3.Options{Bucket: "bucket", Client: client}) _ = m.Rename("old.txt", "new.txt") ``` @@ -1713,7 +1713,7 @@ _ = m.Rename("old.txt", "new.txt") Example: ```go client := awss3.NewFromConfig(cfg) -m, _ := s3.New("bucket", s3.WithClient(client)) +m, _ := s3.New(s3.Options{Bucket: "bucket", Client: client}) entries, _ := m.List("dir") ``` @@ -1721,7 +1721,7 @@ entries, _ := m.List("dir") Example: ```go client := awss3.NewFromConfig(cfg) -m, _ := s3.New("bucket", s3.WithClient(client)) +m, _ := s3.New(s3.Options{Bucket: "bucket", Client: client}) info, _ := m.Stat("notes.txt") ``` @@ -1729,7 +1729,7 @@ info, _ := m.Stat("notes.txt") Example: ```go client := awss3.NewFromConfig(cfg) -m, _ := s3.New("bucket", s3.WithClient(client)) +m, _ := s3.New(s3.Options{Bucket: "bucket", Client: client}) f, _ := m.Open("notes.txt") defer f.Close() ``` @@ -1738,7 +1738,7 @@ defer f.Close() Example: ```go client := awss3.NewFromConfig(cfg) -m, _ := s3.New("bucket", s3.WithClient(client)) +m, _ := s3.New(s3.Options{Bucket: "bucket", Client: client}) w, _ := m.Create("notes.txt") _, _ = w.Write([]byte("hello")) _ = w.Close() @@ -1748,7 +1748,7 @@ _ = w.Close() Example: ```go client := awss3.NewFromConfig(cfg) -m, _ := s3.New("bucket", s3.WithClient(client)) +m, _ := s3.New(s3.Options{Bucket: "bucket", Client: client}) w, _ := m.Append("notes.txt") _, _ = w.Write([]byte(" world")) _ = w.Close() @@ -1758,7 +1758,7 @@ _ = w.Close() Example: ```go client := awss3.NewFromConfig(cfg) -m, _ := s3.New("bucket", s3.WithClient(client)) +m, _ := s3.New(s3.Options{Bucket: "bucket", Client: client}) r, _ := m.ReadStream("notes.txt") defer r.Close() ``` @@ -1767,7 +1767,7 @@ defer r.Close() Example: ```go client := awss3.NewFromConfig(cfg) -m, _ := s3.New("bucket", s3.WithClient(client)) +m, _ := s3.New(s3.Options{Bucket: "bucket", Client: client}) w, _ := m.WriteStream("notes.txt") _, _ = w.Write([]byte("hello")) _ = w.Close() @@ -1777,7 +1777,7 @@ _ = w.Close() Example: ```go client := awss3.NewFromConfig(cfg) -m, _ := s3.New("bucket", s3.WithClient(client)) +m, _ := s3.New(s3.Options{Bucket: "bucket", Client: client}) ok := m.Exists("notes.txt") ``` @@ -1785,7 +1785,7 @@ ok := m.Exists("notes.txt") Example: ```go client := awss3.NewFromConfig(cfg) -m, _ := s3.New("bucket", s3.WithClient(client)) +m, _ := s3.New(s3.Options{Bucket: "bucket", Client: client}) ok := m.IsDir("logs") ``` @@ -2019,7 +2019,7 @@ _ = ws **CreateWorkspace(identifier, password string) (string, error)** Example: ```go -svcAny, _ := workspace.New(core.New(), stubCrypt{}) +svcAny, _ := workspace.New(workspace.Options{Core: core.New(), Crypt: stubCrypt{}}) svc := svcAny.(*workspace.Service) wsID, _ := svc.CreateWorkspace("user", "pass") ``` @@ -2027,7 +2027,7 @@ wsID, _ := svc.CreateWorkspace("user", "pass") **SwitchWorkspace(name string) error** Example: ```go -svcAny, _ := workspace.New(core.New(), stubCrypt{}) +svcAny, _ := workspace.New(workspace.Options{Core: core.New(), Crypt: stubCrypt{}}) svc := svcAny.(*workspace.Service) _ = svc.SwitchWorkspace("workspace-id") ``` @@ -2035,7 +2035,7 @@ _ = svc.SwitchWorkspace("workspace-id") **WorkspaceFileGet(filename string) (string, error)** Example: ```go -svcAny, _ := workspace.New(core.New(), stubCrypt{}) +svcAny, _ := workspace.New(workspace.Options{Core: core.New(), Crypt: stubCrypt{}}) svc := svcAny.(*workspace.Service) value, _ := svc.WorkspaceFileGet("notes.txt") ``` @@ -2043,12 +2043,12 @@ value, _ := svc.WorkspaceFileGet("notes.txt") **WorkspaceFileSet(filename, content string) error** Example: ```go -svcAny, _ := workspace.New(core.New(), stubCrypt{}) +svcAny, _ := workspace.New(workspace.Options{Core: core.New(), Crypt: stubCrypt{}}) svc := svcAny.(*workspace.Service) _ = svc.WorkspaceFileSet("notes.txt", "hello") ``` -### New(c *core.Core, crypt ...cryptProvider) (any, error) +### New(options Options) (any, error) Creates a new workspace service. Returns `*Service` as `any`. @@ -2057,7 +2057,7 @@ Example: type stubCrypt struct{} func (stubCrypt) CreateKeyPair(name, passphrase string) (string, error) { return "key", nil } -svcAny, _ := workspace.New(core.New(), stubCrypt{}) +svcAny, _ := workspace.New(workspace.Options{Core: core.New(), Crypt: stubCrypt{}}) svc := svcAny.(*workspace.Service) _ = svc ``` @@ -2068,7 +2068,7 @@ Implements `Workspace` and handles IPC messages. Example: ```go -svcAny, _ := workspace.New(core.New(), stubCrypt{}) +svcAny, _ := workspace.New(workspace.Options{Core: core.New(), Crypt: stubCrypt{}}) svc := svcAny.(*workspace.Service) _ = svc ``` @@ -2076,7 +2076,7 @@ _ = svc **CreateWorkspace(identifier, password string) (string, error)** Example: ```go -svcAny, _ := workspace.New(core.New(), stubCrypt{}) +svcAny, _ := workspace.New(workspace.Options{Core: core.New(), Crypt: stubCrypt{}}) svc := svcAny.(*workspace.Service) wsID, _ := svc.CreateWorkspace("user", "pass") ``` @@ -2084,7 +2084,7 @@ wsID, _ := svc.CreateWorkspace("user", "pass") **SwitchWorkspace(name string) error** Example: ```go -svcAny, _ := workspace.New(core.New(), stubCrypt{}) +svcAny, _ := workspace.New(workspace.Options{Core: core.New(), Crypt: stubCrypt{}}) svc := svcAny.(*workspace.Service) _ = svc.SwitchWorkspace("workspace-id") ``` @@ -2092,7 +2092,7 @@ _ = svc.SwitchWorkspace("workspace-id") **WorkspaceFileGet(filename string) (string, error)** Example: ```go -svcAny, _ := workspace.New(core.New(), stubCrypt{}) +svcAny, _ := workspace.New(workspace.Options{Core: core.New(), Crypt: stubCrypt{}}) svc := svcAny.(*workspace.Service) value, _ := svc.WorkspaceFileGet("notes.txt") ``` @@ -2100,7 +2100,7 @@ value, _ := svc.WorkspaceFileGet("notes.txt") **WorkspaceFileSet(filename, content string) error** Example: ```go -svcAny, _ := workspace.New(core.New(), stubCrypt{}) +svcAny, _ := workspace.New(workspace.Options{Core: core.New(), Crypt: stubCrypt{}}) svc := svcAny.(*workspace.Service) _ = svc.WorkspaceFileSet("notes.txt", "hello") ``` @@ -2108,7 +2108,7 @@ _ = svc.WorkspaceFileSet("notes.txt", "hello") **HandleIPCEvents(c *core.Core, msg core.Message) core.Result** Example: ```go -svcAny, _ := workspace.New(core.New(), stubCrypt{}) +svcAny, _ := workspace.New(workspace.Options{Core: core.New(), Crypt: stubCrypt{}}) svc := svcAny.(*workspace.Service) result := svc.HandleIPCEvents(core.New(), map[string]any{ "action": "workspace.create", diff --git a/docs/architecture.md b/docs/architecture.md index 7145304..801121a 100644 --- a/docs/architecture.md +++ b/docs/architecture.md @@ -60,7 +60,7 @@ The S3 backend translates `Medium` operations into AWS SDK calls. Key design dec - **Directory semantics:** S3 has no real directories. `EnsureDir` is a no-op. `IsDir` and `Exists` for directory-like paths use `ListObjectsV2` with `MaxKeys: 1` to check for objects under the prefix. - **Rename:** Implemented as copy-then-delete, since S3 has no atomic rename. - **Append:** Downloads existing content, appends in memory, re-uploads on `Close()`. This is the only viable approach given S3's immutable-object model. -- **Testability:** The `s3API` interface (unexported) abstracts the six SDK methods used. Tests inject a `mockS3` that stores objects in a `map[string][]byte` with a `sync.RWMutex`. +- **Testability:** The `Client` interface abstracts the six SDK methods used. Tests inject a `mockS3` that stores objects in a `map[string][]byte` with a `sync.RWMutex`. ### sqlite.Medium @@ -81,7 +81,7 @@ CREATE TABLE IF NOT EXISTS files ( - **WAL mode** is enabled at connection time for better concurrent read performance. - **Path cleaning** uses the same `path.Clean("/" + p)` pattern as other backends. - **Rename** is transactional: it reads the source row, inserts at the destination, deletes the source, and moves all children (if it is a directory) within a single transaction. -- **Custom tables** are supported via `WithTable("name")` to allow multiple logical filesystems in one database. +- **Custom tables** are supported via `sqlite.Options{Path: ":memory:", Table: "name"}` to allow multiple logical filesystems in one database. - **`:memory:`** databases work out of the box for tests. ### node.Node diff --git a/docs/development.md b/docs/development.md index 5c63913..2e95ad7 100644 --- a/docs/development.md +++ b/docs/development.md @@ -120,7 +120,7 @@ For SQLite-backed tests, use `:memory:`: ```go func TestWithSQLite(t *testing.T) { - m, err := sqlite.New(":memory:") + m, err := sqlite.New(sqlite.Options{Path: ":memory:"}) require.NoError(t, err) defer m.Close() diff --git a/docs/index.md b/docs/index.md index bd33262..2ac0992 100644 --- a/docs/index.md +++ b/docs/index.md @@ -31,8 +31,8 @@ mem := node.New() mem.AddData("hello.txt", []byte("world")) tarball, _ := mem.ToTar() -// S3 backend (requires an *s3.Client from the AWS SDK). -bucket, _ := s3.New("my-bucket", s3.WithClient(awsClient), s3.WithPrefix("uploads/")) +// S3 backend (requires an *awss3.Client from the AWS SDK). +bucket, _ := s3.New(s3.Options{Bucket: "my-bucket", Client: awsClient, Prefix: "uploads/"}) _ = bucket.Write("photo.jpg", rawData) ``` diff --git a/io.go b/io.go index 3b20131..95055b3 100644 --- a/io.go +++ b/io.go @@ -148,6 +148,8 @@ func (de DirEntry) Info() (fs.FileInfo, error) { return de.info, nil } // For sandboxed access, use NewSandboxed with a specific root path. var Local Medium +var _ Medium = (*local.Medium)(nil) + func init() { var err error Local, err = local.New("/") @@ -235,6 +237,8 @@ type MockMedium struct { ModTimes map[string]time.Time } +var _ Medium = (*MockMedium)(nil) + // NewMockMedium creates a new MockMedium instance. // // result := io.NewMockMedium(...) diff --git a/s3/s3.go b/s3/s3.go index a0e4074..89d6446 100644 --- a/s3/s3.go +++ b/s3/s3.go @@ -10,31 +10,44 @@ import ( "time" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/service/s3" + awss3 "github.com/aws/aws-sdk-go-v2/service/s3" "github.com/aws/aws-sdk-go-v2/service/s3/types" core "dappco.re/go/core" + coreio "dappco.re/go/core/io" ) -// s3API is the subset of the S3 client API used by this package. -// This allows for interface-based mocking in tests. -type s3API interface { - GetObject(ctx context.Context, params *s3.GetObjectInput, optFns ...func(*s3.Options)) (*s3.GetObjectOutput, error) - PutObject(ctx context.Context, params *s3.PutObjectInput, optFns ...func(*s3.Options)) (*s3.PutObjectOutput, error) - DeleteObject(ctx context.Context, params *s3.DeleteObjectInput, optFns ...func(*s3.Options)) (*s3.DeleteObjectOutput, error) - DeleteObjects(ctx context.Context, params *s3.DeleteObjectsInput, optFns ...func(*s3.Options)) (*s3.DeleteObjectsOutput, error) - HeadObject(ctx context.Context, params *s3.HeadObjectInput, optFns ...func(*s3.Options)) (*s3.HeadObjectOutput, error) - ListObjectsV2(ctx context.Context, params *s3.ListObjectsV2Input, optFns ...func(*s3.Options)) (*s3.ListObjectsV2Output, error) - CopyObject(ctx context.Context, params *s3.CopyObjectInput, optFns ...func(*s3.Options)) (*s3.CopyObjectOutput, error) +// Client is the subset of the AWS S3 client API used by this package. +// Tests can provide any mock that satisfies the same method set. +type Client interface { + GetObject(ctx context.Context, params *awss3.GetObjectInput, optFns ...func(*awss3.Options)) (*awss3.GetObjectOutput, error) + PutObject(ctx context.Context, params *awss3.PutObjectInput, optFns ...func(*awss3.Options)) (*awss3.PutObjectOutput, error) + DeleteObject(ctx context.Context, params *awss3.DeleteObjectInput, optFns ...func(*awss3.Options)) (*awss3.DeleteObjectOutput, error) + DeleteObjects(ctx context.Context, params *awss3.DeleteObjectsInput, optFns ...func(*awss3.Options)) (*awss3.DeleteObjectsOutput, error) + HeadObject(ctx context.Context, params *awss3.HeadObjectInput, optFns ...func(*awss3.Options)) (*awss3.HeadObjectOutput, error) + ListObjectsV2(ctx context.Context, params *awss3.ListObjectsV2Input, optFns ...func(*awss3.Options)) (*awss3.ListObjectsV2Output, error) + CopyObject(ctx context.Context, params *awss3.CopyObjectInput, optFns ...func(*awss3.Options)) (*awss3.CopyObjectOutput, error) } // Medium is an S3-backed storage backend implementing the io.Medium interface. type Medium struct { - client s3API + client Client bucket string prefix string } +var _ coreio.Medium = (*Medium)(nil) + +// Options configures a Medium. +type Options struct { + // Bucket is the target S3 bucket name. + Bucket string + // Client is the AWS S3 client or test double used for requests. + Client Client + // Prefix is prepended to every object key. + Prefix string +} + func deleteObjectsError(prefix string, errs []types.Error) error { if len(errs) == 0 { return nil @@ -58,36 +71,19 @@ func deleteObjectsError(prefix string, errs []types.Error) error { return core.E("s3.DeleteAll", core.Concat("partial delete failed under ", prefix, ": ", core.Join("; ", details...)), nil) } -// Option configures a Medium. -type Option func(*Medium) - -// WithPrefix sets an optional key prefix for all operations. -// -// result := s3.WithPrefix(...) -func WithPrefix(prefix string) Option { - return func(m *Medium) { - // Ensure prefix ends with "/" if non-empty - if prefix != "" && !core.HasSuffix(prefix, "/") { - prefix += "/" - } - m.prefix = prefix +func normalisePrefix(prefix string) string { + if prefix == "" { + return "" } -} - -// WithClient sets the S3 client for dependency injection. -// -// result := s3.WithClient(...) -func WithClient(client *s3.Client) Option { - return func(m *Medium) { - m.client = client + clean := path.Clean("/" + prefix) + if clean == "/" { + return "" } -} - -// withAPI sets the s3API interface directly (for testing with mocks). -func withAPI(api s3API) Option { - return func(m *Medium) { - m.client = api + clean = core.TrimPrefix(clean, "/") + if clean != "" && !core.HasSuffix(clean, "/") { + clean += "/" } + return clean } // New creates a new S3 Medium for the given bucket. @@ -95,17 +91,18 @@ func withAPI(api s3API) Option { // Example usage: // // awsClient := awss3.NewFromConfig(cfg) -// m, _ := s3.New("backups", s3.WithClient(awsClient), s3.WithPrefix("daily")) -func New(bucket string, opts ...Option) (*Medium, error) { - if bucket == "" { +// m, _ := s3.New(s3.Options{Bucket: "backups", Client: awsClient, Prefix: "daily/"}) +func New(options Options) (*Medium, error) { + if options.Bucket == "" { return nil, core.E("s3.New", "bucket name is required", nil) } - m := &Medium{bucket: bucket} - for _, opt := range opts { - opt(m) + if options.Client == nil { + return nil, core.E("s3.New", "client is required", nil) } - if m.client == nil { - return nil, core.E("s3.New", "S3 client is required (use WithClient option)", nil) + m := &Medium{ + client: options.Client, + bucket: options.Bucket, + prefix: normalisePrefix(options.Prefix), } return m, nil } @@ -138,7 +135,7 @@ func (m *Medium) Read(p string) (string, error) { return "", core.E("s3.Read", "path is required", fs.ErrInvalid) } - out, err := m.client.GetObject(context.Background(), &s3.GetObjectInput{ + out, err := m.client.GetObject(context.Background(), &awss3.GetObjectInput{ Bucket: aws.String(m.bucket), Key: aws.String(key), }) @@ -163,7 +160,7 @@ func (m *Medium) Write(p, content string) error { return core.E("s3.Write", "path is required", fs.ErrInvalid) } - _, err := m.client.PutObject(context.Background(), &s3.PutObjectInput{ + _, err := m.client.PutObject(context.Background(), &awss3.PutObjectInput{ Bucket: aws.String(m.bucket), Key: aws.String(key), Body: core.NewReader(content), @@ -174,6 +171,13 @@ func (m *Medium) Write(p, content string) error { return nil } +// WriteMode ignores the requested mode because S3 objects do not store POSIX permissions. +// +// result := m.WriteMode(...) +func (m *Medium) WriteMode(p, content string, _ fs.FileMode) error { + return m.Write(p, content) +} + // EnsureDir is a no-op for S3 (S3 has no real directories). // // result := m.EnsureDir(...) @@ -193,7 +197,7 @@ func (m *Medium) IsFile(p string) bool { if core.HasSuffix(key, "/") { return false } - _, err := m.client.HeadObject(context.Background(), &s3.HeadObjectInput{ + _, err := m.client.HeadObject(context.Background(), &awss3.HeadObjectInput{ Bucket: aws.String(m.bucket), Key: aws.String(key), }) @@ -223,7 +227,7 @@ func (m *Medium) Delete(p string) error { return core.E("s3.Delete", "path is required", fs.ErrInvalid) } - _, err := m.client.DeleteObject(context.Background(), &s3.DeleteObjectInput{ + _, err := m.client.DeleteObject(context.Background(), &awss3.DeleteObjectInput{ Bucket: aws.String(m.bucket), Key: aws.String(key), }) @@ -243,7 +247,7 @@ func (m *Medium) DeleteAll(p string) error { } // First, try deleting the exact key - _, err := m.client.DeleteObject(context.Background(), &s3.DeleteObjectInput{ + _, err := m.client.DeleteObject(context.Background(), &awss3.DeleteObjectInput{ Bucket: aws.String(m.bucket), Key: aws.String(key), }) @@ -261,7 +265,7 @@ func (m *Medium) DeleteAll(p string) error { var continuationToken *string for paginator { - listOut, err := m.client.ListObjectsV2(context.Background(), &s3.ListObjectsV2Input{ + listOut, err := m.client.ListObjectsV2(context.Background(), &awss3.ListObjectsV2Input{ Bucket: aws.String(m.bucket), Prefix: aws.String(prefix), ContinuationToken: continuationToken, @@ -279,7 +283,7 @@ func (m *Medium) DeleteAll(p string) error { objects[i] = types.ObjectIdentifier{Key: obj.Key} } - deleteOut, err := m.client.DeleteObjects(context.Background(), &s3.DeleteObjectsInput{ + deleteOut, err := m.client.DeleteObjects(context.Background(), &awss3.DeleteObjectsInput{ Bucket: aws.String(m.bucket), Delete: &types.Delete{Objects: objects, Quiet: aws.Bool(true)}, }) @@ -312,7 +316,7 @@ func (m *Medium) Rename(oldPath, newPath string) error { copySource := m.bucket + "/" + oldKey - _, err := m.client.CopyObject(context.Background(), &s3.CopyObjectInput{ + _, err := m.client.CopyObject(context.Background(), &awss3.CopyObjectInput{ Bucket: aws.String(m.bucket), CopySource: aws.String(copySource), Key: aws.String(newKey), @@ -321,7 +325,7 @@ func (m *Medium) Rename(oldPath, newPath string) error { return core.E("s3.Rename", core.Concat("failed to copy object: ", oldKey, " -> ", newKey), err) } - _, err = m.client.DeleteObject(context.Background(), &s3.DeleteObjectInput{ + _, err = m.client.DeleteObject(context.Background(), &awss3.DeleteObjectInput{ Bucket: aws.String(m.bucket), Key: aws.String(oldKey), }) @@ -343,7 +347,7 @@ func (m *Medium) List(p string) ([]fs.DirEntry, error) { var entries []fs.DirEntry - listOut, err := m.client.ListObjectsV2(context.Background(), &s3.ListObjectsV2Input{ + listOut, err := m.client.ListObjectsV2(context.Background(), &awss3.ListObjectsV2Input{ Bucket: aws.String(m.bucket), Prefix: aws.String(prefix), Delimiter: aws.String("/"), @@ -416,7 +420,7 @@ func (m *Medium) Stat(p string) (fs.FileInfo, error) { return nil, core.E("s3.Stat", "path is required", fs.ErrInvalid) } - out, err := m.client.HeadObject(context.Background(), &s3.HeadObjectInput{ + out, err := m.client.HeadObject(context.Background(), &awss3.HeadObjectInput{ Bucket: aws.String(m.bucket), Key: aws.String(key), }) @@ -451,7 +455,7 @@ func (m *Medium) Open(p string) (fs.File, error) { return nil, core.E("s3.Open", "path is required", fs.ErrInvalid) } - out, err := m.client.GetObject(context.Background(), &s3.GetObjectInput{ + out, err := m.client.GetObject(context.Background(), &awss3.GetObjectInput{ Bucket: aws.String(m.bucket), Key: aws.String(key), }) @@ -508,7 +512,7 @@ func (m *Medium) Append(p string) (goio.WriteCloser, error) { } var existing []byte - out, err := m.client.GetObject(context.Background(), &s3.GetObjectInput{ + out, err := m.client.GetObject(context.Background(), &awss3.GetObjectInput{ Bucket: aws.String(m.bucket), Key: aws.String(key), }) @@ -533,7 +537,7 @@ func (m *Medium) ReadStream(p string) (goio.ReadCloser, error) { return nil, core.E("s3.ReadStream", "path is required", fs.ErrInvalid) } - out, err := m.client.GetObject(context.Background(), &s3.GetObjectInput{ + out, err := m.client.GetObject(context.Background(), &awss3.GetObjectInput{ Bucket: aws.String(m.bucket), Key: aws.String(key), }) @@ -560,7 +564,7 @@ func (m *Medium) Exists(p string) bool { } // Check as an exact object - _, err := m.client.HeadObject(context.Background(), &s3.HeadObjectInput{ + _, err := m.client.HeadObject(context.Background(), &awss3.HeadObjectInput{ Bucket: aws.String(m.bucket), Key: aws.String(key), }) @@ -573,7 +577,7 @@ func (m *Medium) Exists(p string) bool { if !core.HasSuffix(prefix, "/") { prefix += "/" } - listOut, err := m.client.ListObjectsV2(context.Background(), &s3.ListObjectsV2Input{ + listOut, err := m.client.ListObjectsV2(context.Background(), &awss3.ListObjectsV2Input{ Bucket: aws.String(m.bucket), Prefix: aws.String(prefix), MaxKeys: aws.Int32(1), @@ -598,7 +602,7 @@ func (m *Medium) IsDir(p string) bool { prefix += "/" } - listOut, err := m.client.ListObjectsV2(context.Background(), &s3.ListObjectsV2Input{ + listOut, err := m.client.ListObjectsV2(context.Background(), &awss3.ListObjectsV2Input{ Bucket: aws.String(m.bucket), Prefix: aws.String(prefix), MaxKeys: aws.Int32(1), @@ -737,7 +741,7 @@ func (w *s3WriteCloser) Write(p []byte) (int, error) { // // result := w.Close(...) func (w *s3WriteCloser) Close() error { - _, err := w.medium.client.PutObject(context.Background(), &s3.PutObjectInput{ + _, err := w.medium.client.PutObject(context.Background(), &awss3.PutObjectInput{ Bucket: aws.String(w.medium.bucket), Key: aws.String(w.key), Body: bytes.NewReader(w.data), diff --git a/s3/s3_test.go b/s3/s3_test.go index b46e47b..90cc35e 100644 --- a/s3/s3_test.go +++ b/s3/s3_test.go @@ -12,13 +12,13 @@ import ( core "dappco.re/go/core" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/service/s3" + awss3 "github.com/aws/aws-sdk-go-v2/service/s3" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -// mockS3 is an in-memory mock implementing the s3API interface. +// mockS3 is an in-memory mock implementing the Client interface. type mockS3 struct { mu sync.RWMutex objects map[string][]byte @@ -36,7 +36,7 @@ func newMockS3() *mockS3 { } } -func (m *mockS3) GetObject(_ context.Context, params *s3.GetObjectInput, _ ...func(*s3.Options)) (*s3.GetObjectOutput, error) { +func (m *mockS3) GetObject(_ context.Context, params *awss3.GetObjectInput, _ ...func(*awss3.Options)) (*awss3.GetObjectOutput, error) { m.mu.RLock() defer m.mu.RUnlock() @@ -46,14 +46,14 @@ func (m *mockS3) GetObject(_ context.Context, params *s3.GetObjectInput, _ ...fu return nil, core.E("s3test.mockS3.GetObject", core.Sprintf("NoSuchKey: key %q not found", key), fs.ErrNotExist) } mtime := m.mtimes[key] - return &s3.GetObjectOutput{ + return &awss3.GetObjectOutput{ Body: goio.NopCloser(bytes.NewReader(data)), ContentLength: aws.Int64(int64(len(data))), LastModified: &mtime, }, nil } -func (m *mockS3) PutObject(_ context.Context, params *s3.PutObjectInput, _ ...func(*s3.Options)) (*s3.PutObjectOutput, error) { +func (m *mockS3) PutObject(_ context.Context, params *awss3.PutObjectInput, _ ...func(*awss3.Options)) (*awss3.PutObjectOutput, error) { m.mu.Lock() defer m.mu.Unlock() @@ -64,10 +64,10 @@ func (m *mockS3) PutObject(_ context.Context, params *s3.PutObjectInput, _ ...fu } m.objects[key] = data m.mtimes[key] = time.Now() - return &s3.PutObjectOutput{}, nil + return &awss3.PutObjectOutput{}, nil } -func (m *mockS3) DeleteObject(_ context.Context, params *s3.DeleteObjectInput, _ ...func(*s3.Options)) (*s3.DeleteObjectOutput, error) { +func (m *mockS3) DeleteObject(_ context.Context, params *awss3.DeleteObjectInput, _ ...func(*awss3.Options)) (*awss3.DeleteObjectOutput, error) { m.mu.Lock() defer m.mu.Unlock() @@ -77,10 +77,10 @@ func (m *mockS3) DeleteObject(_ context.Context, params *s3.DeleteObjectInput, _ } delete(m.objects, key) delete(m.mtimes, key) - return &s3.DeleteObjectOutput{}, nil + return &awss3.DeleteObjectOutput{}, nil } -func (m *mockS3) DeleteObjects(_ context.Context, params *s3.DeleteObjectsInput, _ ...func(*s3.Options)) (*s3.DeleteObjectsOutput, error) { +func (m *mockS3) DeleteObjects(_ context.Context, params *awss3.DeleteObjectsInput, _ ...func(*awss3.Options)) (*awss3.DeleteObjectsOutput, error) { m.mu.Lock() defer m.mu.Unlock() @@ -94,10 +94,10 @@ func (m *mockS3) DeleteObjects(_ context.Context, params *s3.DeleteObjectsInput, delete(m.objects, key) delete(m.mtimes, key) } - return &s3.DeleteObjectsOutput{Errors: outErrs}, nil + return &awss3.DeleteObjectsOutput{Errors: outErrs}, nil } -func (m *mockS3) HeadObject(_ context.Context, params *s3.HeadObjectInput, _ ...func(*s3.Options)) (*s3.HeadObjectOutput, error) { +func (m *mockS3) HeadObject(_ context.Context, params *awss3.HeadObjectInput, _ ...func(*awss3.Options)) (*awss3.HeadObjectOutput, error) { m.mu.RLock() defer m.mu.RUnlock() @@ -107,13 +107,13 @@ func (m *mockS3) HeadObject(_ context.Context, params *s3.HeadObjectInput, _ ... return nil, core.E("s3test.mockS3.HeadObject", core.Sprintf("NotFound: key %q not found", key), fs.ErrNotExist) } mtime := m.mtimes[key] - return &s3.HeadObjectOutput{ + return &awss3.HeadObjectOutput{ ContentLength: aws.Int64(int64(len(data))), LastModified: &mtime, }, nil } -func (m *mockS3) ListObjectsV2(_ context.Context, params *s3.ListObjectsV2Input, _ ...func(*s3.Options)) (*s3.ListObjectsV2Output, error) { +func (m *mockS3) ListObjectsV2(_ context.Context, params *awss3.ListObjectsV2Input, _ ...func(*awss3.Options)) (*awss3.ListObjectsV2Output, error) { m.mu.RLock() defer m.mu.RUnlock() @@ -173,14 +173,14 @@ func (m *mockS3) ListObjectsV2(_ context.Context, params *s3.ListObjectsV2Input, cpSlice = append(cpSlice, types.CommonPrefix{Prefix: aws.String(cp)}) } - return &s3.ListObjectsV2Output{ + return &awss3.ListObjectsV2Output{ Contents: contents, CommonPrefixes: cpSlice, IsTruncated: aws.Bool(false), }, nil } -func (m *mockS3) CopyObject(_ context.Context, params *s3.CopyObjectInput, _ ...func(*s3.Options)) (*s3.CopyObjectOutput, error) { +func (m *mockS3) CopyObject(_ context.Context, params *awss3.CopyObjectInput, _ ...func(*awss3.Options)) (*awss3.CopyObjectOutput, error) { m.mu.Lock() defer m.mu.Unlock() @@ -201,7 +201,7 @@ func (m *mockS3) CopyObject(_ context.Context, params *s3.CopyObjectInput, _ ... m.objects[destKey] = append([]byte{}, data...) m.mtimes[destKey] = time.Now() - return &s3.CopyObjectOutput{}, nil + return &awss3.CopyObjectOutput{}, nil } // --- Helper --- @@ -209,7 +209,7 @@ func (m *mockS3) CopyObject(_ context.Context, params *s3.CopyObjectInput, _ ... func newTestMedium(t *testing.T) (*Medium, *mockS3) { t.Helper() mock := newMockS3() - m, err := New("test-bucket", withAPI(mock)) + m, err := New(Options{Bucket: "test-bucket", Client: mock}) require.NoError(t, err) return m, mock } @@ -218,32 +218,32 @@ func newTestMedium(t *testing.T) (*Medium, *mockS3) { func TestS3_New_Good(t *testing.T) { mock := newMockS3() - m, err := New("my-bucket", withAPI(mock)) + m, err := New(Options{Bucket: "my-bucket", Client: mock}) require.NoError(t, err) assert.Equal(t, "my-bucket", m.bucket) assert.Equal(t, "", m.prefix) } func TestS3_New_NoBucket_Bad(t *testing.T) { - _, err := New("") + _, err := New(Options{Client: newMockS3()}) assert.Error(t, err) assert.Contains(t, err.Error(), "bucket name is required") } func TestS3_New_NoClient_Bad(t *testing.T) { - _, err := New("bucket") + _, err := New(Options{Bucket: "bucket"}) assert.Error(t, err) - assert.Contains(t, err.Error(), "S3 client is required") + assert.Contains(t, err.Error(), "client is required") } -func TestS3_WithPrefix_Good(t *testing.T) { +func TestS3_New_Options_Good(t *testing.T) { mock := newMockS3() - m, err := New("bucket", withAPI(mock), WithPrefix("data/")) + m, err := New(Options{Bucket: "bucket", Client: mock, Prefix: "data/"}) require.NoError(t, err) assert.Equal(t, "data/", m.prefix) // Prefix without trailing slash gets one added - m2, err := New("bucket", withAPI(mock), WithPrefix("data")) + m2, err := New(Options{Bucket: "bucket", Client: mock, Prefix: "data"}) require.NoError(t, err) assert.Equal(t, "data/", m2.prefix) } @@ -276,9 +276,9 @@ func TestS3_ReadWrite_EmptyPath_Bad(t *testing.T) { assert.Error(t, err) } -func TestS3_ReadWrite_WithPrefix_Good(t *testing.T) { +func TestS3_ReadWrite_Prefix_Good(t *testing.T) { mock := newMockS3() - m, err := New("bucket", withAPI(mock), WithPrefix("pfx")) + m, err := New(Options{Bucket: "bucket", Client: mock, Prefix: "pfx"}) require.NoError(t, err) err = m.Write("file.txt", "data") @@ -641,7 +641,7 @@ func TestS3_Key_Good(t *testing.T) { mock := newMockS3() // No prefix - m, _ := New("bucket", withAPI(mock)) + m, _ := New(Options{Bucket: "bucket", Client: mock}) assert.Equal(t, "file.txt", m.key("file.txt")) assert.Equal(t, "dir/file.txt", m.key("dir/file.txt")) assert.Equal(t, "", m.key("")) @@ -649,7 +649,7 @@ func TestS3_Key_Good(t *testing.T) { assert.Equal(t, "file.txt", m.key("../file.txt")) // With prefix - m2, _ := New("bucket", withAPI(mock), WithPrefix("pfx")) + m2, _ := New(Options{Bucket: "bucket", Client: mock, Prefix: "pfx"}) assert.Equal(t, "pfx/file.txt", m2.key("file.txt")) assert.Equal(t, "pfx/dir/file.txt", m2.key("dir/file.txt")) assert.Equal(t, "pfx/", m2.key("")) @@ -658,7 +658,7 @@ func TestS3_Key_Good(t *testing.T) { // Ugly: verify the Medium interface is satisfied at compile time. func TestS3_InterfaceCompliance_Ugly(t *testing.T) { mock := newMockS3() - m, err := New("bucket", withAPI(mock)) + m, err := New(Options{Bucket: "bucket", Client: mock}) require.NoError(t, err) // Verify all methods exist by calling them in a way that diff --git a/sqlite/sqlite.go b/sqlite/sqlite.go index e948fa0..00d09c0 100644 --- a/sqlite/sqlite.go +++ b/sqlite/sqlite.go @@ -10,6 +10,7 @@ import ( "time" core "dappco.re/go/core" + coreio "dappco.re/go/core/io" _ "modernc.org/sqlite" // Pure Go SQLite driver ) @@ -20,16 +21,21 @@ type Medium struct { table string } -// Option configures a Medium. -type Option func(*Medium) +var _ coreio.Medium = (*Medium)(nil) -// WithTable sets the table name (default: "files"). -// -// result := sqlite.WithTable(...) -func WithTable(table string) Option { - return func(m *Medium) { - m.table = table +// Options configures a Medium. +type Options struct { + // Path is the SQLite database path. Use ":memory:" for tests. + Path string + // Table is the table name used for file storage. Empty defaults to "files". + Table string +} + +func normaliseTableName(table string) string { + if table == "" { + return "files" } + return table } // New creates a new SQLite Medium at the given database path. @@ -37,19 +43,16 @@ func WithTable(table string) Option { // // Example usage: // -// m, _ := sqlite.New(":memory:", sqlite.WithTable("files")) +// m, _ := sqlite.New(sqlite.Options{Path: ":memory:", Table: "files"}) // _ = m.Write("config/app.yaml", "port: 8080") -func New(dbPath string, opts ...Option) (*Medium, error) { - if dbPath == "" { +func New(options Options) (*Medium, error) { + if options.Path == "" { return nil, core.E("sqlite.New", "database path is required", nil) } - m := &Medium{table: "files"} - for _, opt := range opts { - opt(m) - } + m := &Medium{table: normaliseTableName(options.Table)} - db, err := sql.Open("sqlite", dbPath) + db, err := sql.Open("sqlite", options.Path) if err != nil { return nil, core.E("sqlite.New", "failed to open database", err) } @@ -127,18 +130,25 @@ func (m *Medium) Read(p string) (string, error) { // // result := m.Write(...) func (m *Medium) Write(p, content string) error { + return m.WriteMode(p, content, 0644) +} + +// WriteMode saves the given content with explicit permissions. +// +// result := m.WriteMode(...) +func (m *Medium) WriteMode(p, content string, mode fs.FileMode) error { key := cleanPath(p) if key == "" { - return core.E("sqlite.Write", "path is required", fs.ErrInvalid) + return core.E("sqlite.WriteMode", "path is required", fs.ErrInvalid) } _, err := m.db.Exec( - `INSERT INTO `+m.table+` (path, content, mode, is_dir, mtime) VALUES (?, ?, 420, FALSE, ?) - ON CONFLICT(path) DO UPDATE SET content = excluded.content, is_dir = FALSE, mtime = excluded.mtime`, - key, []byte(content), time.Now().UTC(), + `INSERT INTO `+m.table+` (path, content, mode, is_dir, mtime) VALUES (?, ?, ?, FALSE, ?) + ON CONFLICT(path) DO UPDATE SET content = excluded.content, mode = excluded.mode, is_dir = FALSE, mtime = excluded.mtime`, + key, []byte(content), int(mode), time.Now().UTC(), ) if err != nil { - return core.E("sqlite.Write", core.Concat("insert failed: ", key), err) + return core.E("sqlite.WriteMode", core.Concat("insert failed: ", key), err) } return nil } diff --git a/sqlite/sqlite_test.go b/sqlite/sqlite_test.go index 6a1e592..3d157ad 100644 --- a/sqlite/sqlite_test.go +++ b/sqlite/sqlite_test.go @@ -12,7 +12,7 @@ import ( func newTestMedium(t *testing.T) *Medium { t.Helper() - m, err := New(":memory:") + m, err := New(Options{Path: ":memory:"}) require.NoError(t, err) t.Cleanup(func() { m.Close() }) return m @@ -21,21 +21,21 @@ func newTestMedium(t *testing.T) *Medium { // --- Constructor Tests --- func TestSqlite_New_Good(t *testing.T) { - m, err := New(":memory:") + m, err := New(Options{Path: ":memory:"}) require.NoError(t, err) defer m.Close() assert.Equal(t, "files", m.table) } -func TestSqlite_New_WithTable_Good(t *testing.T) { - m, err := New(":memory:", WithTable("custom")) +func TestSqlite_New_Options_Good(t *testing.T) { + m, err := New(Options{Path: ":memory:", Table: "custom"}) require.NoError(t, err) defer m.Close() assert.Equal(t, "custom", m.table) } func TestSqlite_New_EmptyPath_Bad(t *testing.T) { - _, err := New("") + _, err := New(Options{}) assert.Error(t, err) assert.Contains(t, err.Error(), "database path is required") } @@ -641,7 +641,7 @@ func TestSqlite_InterfaceCompliance_Ugly(t *testing.T) { // --- Custom Table --- func TestSqlite_CustomTable_Good(t *testing.T) { - m, err := New(":memory:", WithTable("my_files")) + m, err := New(Options{Path: ":memory:", Table: "my_files"}) require.NoError(t, err) defer m.Close() diff --git a/store/medium.go b/store/medium.go index 2380983..df377d5 100644 --- a/store/medium.go +++ b/store/medium.go @@ -7,6 +7,7 @@ import ( "time" core "dappco.re/go/core" + coreio "dappco.re/go/core/io" ) // Medium wraps a Store to satisfy the io.Medium interface. @@ -17,6 +18,8 @@ type Medium struct { s *Store } +var _ coreio.Medium = (*Medium)(nil) + // NewMedium creates an io.Medium backed by a KV store at the given SQLite path. // // Example usage: @@ -89,6 +92,13 @@ func (m *Medium) Write(p, content string) error { return m.s.Set(group, key, content) } +// WriteMode ignores the requested mode because key-value entries do not store POSIX permissions. +// +// result := m.WriteMode(...) +func (m *Medium) WriteMode(p, content string, _ fs.FileMode) error { + return m.Write(p, content) +} + // EnsureDir is a no-op — groups are created implicitly on Set. // // result := m.EnsureDir(...) @@ -161,19 +171,19 @@ func (m *Medium) DeleteAll(p string) error { // // result := m.Rename(...) func (m *Medium) Rename(oldPath, newPath string) error { - og, ok := splitPath(oldPath) - ng, nk := splitPath(newPath) - if ok == "" || nk == "" { + oldGroup, oldKey := splitPath(oldPath) + newGroup, newKey := splitPath(newPath) + if oldKey == "" || newKey == "" { return core.E("store.Rename", "both paths must include group/key", fs.ErrInvalid) } - val, err := m.s.Get(og, ok) + val, err := m.s.Get(oldGroup, oldKey) if err != nil { return err } - if err := m.s.Set(ng, nk, val); err != nil { + if err := m.s.Set(newGroup, newKey, val); err != nil { return err } - return m.s.Delete(og, ok) + return m.s.Delete(oldGroup, oldKey) } // List returns directory entries. Empty path returns groups. diff --git a/workspace/service.go b/workspace/service.go index 5599356..b53a7ba 100644 --- a/workspace/service.go +++ b/workspace/service.go @@ -19,43 +19,55 @@ type Workspace interface { WorkspaceFileSet(filename, content string) error } -// cryptProvider is the interface for PGP key generation. -type cryptProvider interface { +// CryptProvider is the interface for PGP key generation. +type CryptProvider interface { CreateKeyPair(name, passphrase string) (string, error) } +// Options configures the workspace service. +type Options struct { + // Core is the Core runtime used by the service. + Core *core.Core + // Crypt is the PGP key generation dependency. + Crypt CryptProvider +} + // Service implements the Workspace interface. type Service struct { core *core.Core - crypt cryptProvider + crypt CryptProvider activeWorkspace string rootPath string medium io.Medium mu sync.RWMutex } +var _ Workspace = (*Service)(nil) + // New creates a new Workspace service instance. -// An optional cryptProvider can be passed to supply PGP key generation. // // Example usage: // -// svcAny, _ := workspace.New(core.New(), myCryptProvider) -// svc := svcAny.(*workspace.Service) -func New(c *core.Core, crypt ...cryptProvider) (any, error) { +// svc, _ := workspace.New(workspace.Options{Core: core.New(), Crypt: myCryptProvider}) +func New(options Options) (*Service, error) { home := workspaceHome() if home == "" { return nil, core.E("workspace.New", "failed to determine home directory", fs.ErrNotExist) } rootPath := core.Path(home, ".core", "workspaces") + if options.Core == nil { + return nil, core.E("workspace.New", "core is required", fs.ErrInvalid) + } + s := &Service{ - core: c, + core: options.Core, rootPath: rootPath, medium: io.Local, } - if len(crypt) > 0 && crypt[0] != nil { - s.crypt = crypt[0] + if options.Crypt != nil { + s.crypt = options.Crypt } if err := s.medium.EnsureDir(rootPath); err != nil { @@ -230,6 +242,3 @@ func (s *Service) workspacePath(op, name string) (string, error) { } return path, nil } - -// Ensure Service implements Workspace. -var _ Workspace = (*Service)(nil) diff --git a/workspace/service_test.go b/workspace/service_test.go index d2bc99d..8fcb435 100644 --- a/workspace/service_test.go +++ b/workspace/service_test.go @@ -26,9 +26,9 @@ func newTestService(t *testing.T) (*Service, string) { tempHome := t.TempDir() t.Setenv("HOME", tempHome) - svc, err := New(core.New(), stubCrypt{key: "private-key"}) + svc, err := New(Options{Core: core.New(), Crypt: stubCrypt{key: "private-key"}}) require.NoError(t, err) - return svc.(*Service), tempHome + return svc, tempHome } func TestService_Workspace_RoundTrip_Good(t *testing.T) { From 977218cdfeae90fb40a0f8d5cca2b8f6c5191213 Mon Sep 17 00:00:00 2001 From: Virgil Date: Mon, 30 Mar 2026 19:36:39 +0000 Subject: [PATCH 09/83] docs: align CLAUDE with s3 client rename --- CLAUDE.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CLAUDE.md b/CLAUDE.md index 9a27f7a..5b03b0b 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -132,4 +132,4 @@ Sentinel errors (`var ErrNotFound`, `var ErrInvalidKey`, etc.) use standard `err ## Testing -Use `io.MockMedium` or `io.NewSandboxed(t.TempDir())` in tests — never hit real S3/SQLite unless integration testing. S3 tests use an interface-based mock (`s3API`). +Use `io.MockMedium` or `io.NewSandboxed(t.TempDir())` in tests — never hit real S3/SQLite unless integration testing. S3 tests use an interface-based mock (`s3.Client`). From 5f780e626189be9a8eb0dd6c3dd94d422aeba054 Mon Sep 17 00:00:00 2001 From: Virgil Date: Mon, 30 Mar 2026 20:04:09 +0000 Subject: [PATCH 10/83] refactor(ax): normalize remaining agent-facing names --- datanode/client.go | 83 ++++++++++++++++++++++--------------------- io.go | 6 ++-- local/client.go | 4 +-- node/node.go | 4 +-- s3/s3.go | 5 +-- sigil/crypto_sigil.go | 4 +-- sigil/sigils.go | 3 +- sqlite/sqlite.go | 64 ++++++++++++++++----------------- store/medium.go | 64 ++++++++++++++++----------------- store/store.go | 43 +++++++++++----------- workspace/service.go | 68 +++++++++++++++++++---------------- 11 files changed, 179 insertions(+), 169 deletions(-) diff --git a/datanode/client.go b/datanode/client.go index 0504cbb..83d5aad 100644 --- a/datanode/client.go +++ b/datanode/client.go @@ -34,21 +34,21 @@ var ( // Medium is an in-memory storage backend backed by a Borg DataNode. // All paths are relative (no leading slash). Thread-safe via RWMutex. type Medium struct { - dn *borgdatanode.DataNode - dirs map[string]bool // explicit directory tracking - mu sync.RWMutex + dataNode *borgdatanode.DataNode + dirs map[string]bool // explicit directory tracking + mu sync.RWMutex } // New creates a new empty DataNode Medium. // // Example usage: // -// m := datanode.New() -// _ = m.Write("jobs/run.log", "started") +// medium := datanode.New() +// _ = medium.Write("jobs/run.log", "started") func New() *Medium { return &Medium{ - dn: borgdatanode.New(), - dirs: make(map[string]bool), + dataNode: borgdatanode.New(), + dirs: make(map[string]bool), } } @@ -56,16 +56,17 @@ func New() *Medium { // // Example usage: // -// snapshot, _ := m.Snapshot() +// sourceMedium := datanode.New() +// snapshot, _ := sourceMedium.Snapshot() // restored, _ := datanode.FromTar(snapshot) func FromTar(data []byte) (*Medium, error) { - dn, err := borgdatanode.FromTar(data) + dataNode, err := borgdatanode.FromTar(data) if err != nil { return nil, core.E("datanode.FromTar", "failed to restore", err) } return &Medium{ - dn: dn, - dirs: make(map[string]bool), + dataNode: dataNode, + dirs: make(map[string]bool), }, nil } @@ -76,7 +77,7 @@ func FromTar(data []byte) (*Medium, error) { func (m *Medium) Snapshot() ([]byte, error) { m.mu.RLock() defer m.mu.RUnlock() - data, err := m.dn.ToTar() + data, err := m.dataNode.ToTar() if err != nil { return nil, core.E("datanode.Snapshot", "tar failed", err) } @@ -87,13 +88,13 @@ func (m *Medium) Snapshot() ([]byte, error) { // // result := m.Restore(...) func (m *Medium) Restore(data []byte) error { - dn, err := borgdatanode.FromTar(data) + dataNode, err := borgdatanode.FromTar(data) if err != nil { return core.E("datanode.Restore", "tar failed", err) } m.mu.Lock() defer m.mu.Unlock() - m.dn = dn + m.dataNode = dataNode m.dirs = make(map[string]bool) return nil } @@ -105,7 +106,7 @@ func (m *Medium) Restore(data []byte) error { func (m *Medium) DataNode() *borgdatanode.DataNode { m.mu.RLock() defer m.mu.RUnlock() - return m.dn + return m.dataNode } // clean normalises a path: strips leading slash, cleans traversal. @@ -128,7 +129,7 @@ func (m *Medium) Read(p string) (string, error) { defer m.mu.RUnlock() p = clean(p) - f, err := m.dn.Open(p) + f, err := m.dataNode.Open(p) if err != nil { return "", core.E("datanode.Read", core.Concat("not found: ", p), fs.ErrNotExist) } @@ -160,7 +161,7 @@ func (m *Medium) Write(p, content string) error { if p == "" { return core.E("datanode.Write", "empty path", fs.ErrInvalid) } - m.dn.AddData(p, []byte(content)) + m.dataNode.AddData(p, []byte(content)) // ensure parent dirs are tracked m.ensureDirsLocked(path.Dir(p)) @@ -209,7 +210,7 @@ func (m *Medium) IsFile(p string) bool { defer m.mu.RUnlock() p = clean(p) - info, err := m.dn.Stat(p) + info, err := m.dataNode.Stat(p) return err == nil && !info.IsDir() } @@ -240,7 +241,7 @@ func (m *Medium) Delete(p string) error { } // Check if it's a file in the DataNode - info, err := m.dn.Stat(p) + info, err := m.dataNode.Stat(p) if err != nil { // Check explicit dirs if m.dirs[p] { @@ -293,7 +294,7 @@ func (m *Medium) DeleteAll(p string) error { found := false // Check if p itself is a file - info, err := m.dn.Stat(p) + info, err := m.dataNode.Stat(p) if err == nil && !info.IsDir() { if err := m.removeFileLocked(p); err != nil { return core.E("datanode.DeleteAll", core.Concat("failed to delete file: ", p), err) @@ -340,7 +341,7 @@ func (m *Medium) Rename(oldPath, newPath string) error { newPath = clean(newPath) // Check if source is a file - info, err := m.dn.Stat(oldPath) + info, err := m.dataNode.Stat(oldPath) if err != nil { return core.E("datanode.Rename", core.Concat("not found: ", oldPath), fs.ErrNotExist) } @@ -351,7 +352,7 @@ func (m *Medium) Rename(oldPath, newPath string) error { if err != nil { return core.E("datanode.Rename", core.Concat("failed to read source file: ", oldPath), err) } - m.dn.AddData(newPath, data) + m.dataNode.AddData(newPath, data) m.ensureDirsLocked(path.Dir(newPath)) if err := m.removeFileLocked(oldPath); err != nil { return core.E("datanode.Rename", core.Concat("failed to remove source file: ", oldPath), err) @@ -374,7 +375,7 @@ func (m *Medium) Rename(oldPath, newPath string) error { if err != nil { return core.E("datanode.Rename", core.Concat("failed to read source file: ", name), err) } - m.dn.AddData(newName, data) + m.dataNode.AddData(newName, data) if err := m.removeFileLocked(name); err != nil { return core.E("datanode.Rename", core.Concat("failed to remove source file: ", name), err) } @@ -406,7 +407,7 @@ func (m *Medium) List(p string) ([]fs.DirEntry, error) { p = clean(p) - entries, err := m.dn.ReadDir(p) + entries, err := m.dataNode.ReadDir(p) if err != nil { // Check explicit dirs if p == "" || m.dirs[p] { @@ -459,7 +460,7 @@ func (m *Medium) Stat(p string) (fs.FileInfo, error) { return &fileInfo{name: ".", isDir: true, mode: fs.ModeDir | 0755}, nil } - info, err := m.dn.Stat(p) + info, err := m.dataNode.Stat(p) if err == nil { return info, nil } @@ -478,7 +479,7 @@ func (m *Medium) Open(p string) (fs.File, error) { defer m.mu.RUnlock() p = clean(p) - return m.dn.Open(p) + return m.dataNode.Open(p) } // Create documents the Create operation. @@ -489,7 +490,7 @@ func (m *Medium) Create(p string) (goio.WriteCloser, error) { if p == "" { return nil, core.E("datanode.Create", "empty path", fs.ErrInvalid) } - return &writeCloser{m: m, path: p}, nil + return &writeCloser{medium: m, path: p}, nil } // Append documents the Append operation. @@ -514,7 +515,7 @@ func (m *Medium) Append(p string) (goio.WriteCloser, error) { } m.mu.RUnlock() - return &writeCloser{m: m, path: p, buf: existing}, nil + return &writeCloser{medium: m, path: p, buf: existing}, nil } // ReadStream documents the ReadStream operation. @@ -525,7 +526,7 @@ func (m *Medium) ReadStream(p string) (goio.ReadCloser, error) { defer m.mu.RUnlock() p = clean(p) - f, err := m.dn.Open(p) + f, err := m.dataNode.Open(p) if err != nil { return nil, core.E("datanode.ReadStream", core.Concat("not found: ", p), fs.ErrNotExist) } @@ -550,7 +551,7 @@ func (m *Medium) Exists(p string) bool { if p == "" { return true // root always exists } - _, err := m.dn.Stat(p) + _, err := m.dataNode.Stat(p) if err == nil { return true } @@ -568,7 +569,7 @@ func (m *Medium) IsDir(p string) bool { if p == "" { return true } - info, err := m.dn.Stat(p) + info, err := m.dataNode.Stat(p) if err == nil { return info.IsDir() } @@ -599,7 +600,7 @@ func (m *Medium) hasPrefixLocked(prefix string) (bool, error) { // collectAllLocked returns all file paths in the DataNode. Caller holds lock. func (m *Medium) collectAllLocked() ([]string, error) { var names []string - err := dataNodeWalkDir(m.dn, ".", func(p string, d fs.DirEntry, err error) error { + err := dataNodeWalkDir(m.dataNode, ".", func(p string, d fs.DirEntry, err error) error { if err != nil { return err } @@ -612,7 +613,7 @@ func (m *Medium) collectAllLocked() ([]string, error) { } func (m *Medium) readFileLocked(name string) ([]byte, error) { - f, err := dataNodeOpen(m.dn, name) + f, err := dataNodeOpen(m.dataNode, name) if err != nil { return nil, err } @@ -646,16 +647,16 @@ func (m *Medium) removeFileLocked(target string) error { } newDN.AddData(name, data) } - m.dn = newDN + m.dataNode = newDN return nil } // --- writeCloser buffers writes and flushes to DataNode on Close --- type writeCloser struct { - m *Medium - path string - buf []byte + medium *Medium + path string + buf []byte } // Write documents the Write operation. @@ -670,11 +671,11 @@ func (w *writeCloser) Write(p []byte) (int, error) { // // result := w.Close(...) func (w *writeCloser) Close() error { - w.m.mu.Lock() - defer w.m.mu.Unlock() + w.medium.mu.Lock() + defer w.medium.mu.Unlock() - w.m.dn.AddData(w.path, w.buf) - w.m.ensureDirsLocked(path.Dir(w.path)) + w.medium.dataNode.AddData(w.path, w.buf) + w.medium.ensureDirsLocked(path.Dir(w.path)) return nil } diff --git a/io.go b/io.go index 95055b3..e40b5d5 100644 --- a/io.go +++ b/io.go @@ -164,8 +164,8 @@ func init() { // // Example usage: // -// m, _ := io.NewSandboxed("/srv/app") -// _ = m.Write("config/app.yaml", "port: 8080") +// medium, _ := io.NewSandboxed("/srv/app") +// _ = medium.Write("config/app.yaml", "port: 8080") func NewSandboxed(root string) (Medium, error) { return local.New(root) } @@ -241,7 +241,7 @@ var _ Medium = (*MockMedium)(nil) // NewMockMedium creates a new MockMedium instance. // -// result := io.NewMockMedium(...) +// medium := io.NewMockMedium() func NewMockMedium() *MockMedium { return &MockMedium{ Files: make(map[string]string), diff --git a/local/client.go b/local/client.go index 5481079..0da6458 100644 --- a/local/client.go +++ b/local/client.go @@ -21,8 +21,8 @@ var rawFS = (&core.Fs{}).NewUnrestricted() // // Example usage: // -// m, _ := local.New("/srv/app") -// _ = m.Write("config/app.yaml", "port: 8080") +// medium, _ := local.New("/srv/app") +// _ = medium.Write("config/app.yaml", "port: 8080") func New(root string) (*Medium, error) { abs := absolutePath(root) // Resolve symlinks so sandbox checks compare like-for-like. diff --git a/node/node.go b/node/node.go index d59964c..8ae1423 100644 --- a/node/node.go +++ b/node/node.go @@ -32,8 +32,8 @@ var _ fs.ReadFileFS = (*Node)(nil) // // Example usage: // -// n := node.New() -// n.AddData("config/app.yaml", []byte("port: 8080")) +// nodeTree := node.New() +// nodeTree.AddData("config/app.yaml", []byte("port: 8080")) func New() *Node { return &Node{files: make(map[string]*dataFile)} } diff --git a/s3/s3.go b/s3/s3.go index 89d6446..7a0df2e 100644 --- a/s3/s3.go +++ b/s3/s3.go @@ -90,8 +90,9 @@ func normalisePrefix(prefix string) string { // // Example usage: // -// awsClient := awss3.NewFromConfig(cfg) -// m, _ := s3.New(s3.Options{Bucket: "backups", Client: awsClient, Prefix: "daily/"}) +// config := aws.Config{} +// awsClient := awss3.NewFromConfig(config) +// medium, _ := s3.New(s3.Options{Bucket: "backups", Client: awsClient, Prefix: "daily/"}) func New(options Options) (*Medium, error) { if options.Bucket == "" { return nil, core.E("s3.New", "bucket name is required", nil) diff --git a/sigil/crypto_sigil.go b/sigil/crypto_sigil.go index 16d3c49..3b7a39f 100644 --- a/sigil/crypto_sigil.go +++ b/sigil/crypto_sigil.go @@ -256,7 +256,7 @@ type ChaChaPolySigil struct { // Example usage: // // key := []byte("0123456789abcdef0123456789abcdef") -// s, _ := sigil.NewChaChaPolySigil(key) +// cipherSigil, _ := sigil.NewChaChaPolySigil(key) func NewChaChaPolySigil(key []byte) (*ChaChaPolySigil, error) { if len(key) != 32 { return nil, ErrInvalidKey @@ -277,7 +277,7 @@ func NewChaChaPolySigil(key []byte) (*ChaChaPolySigil, error) { // Example usage: // // key := []byte("0123456789abcdef0123456789abcdef") -// s, _ := sigil.NewChaChaPolySigilWithObfuscator(key, &sigil.ShuffleMaskObfuscator{}) +// cipherSigil, _ := sigil.NewChaChaPolySigilWithObfuscator(key, &sigil.ShuffleMaskObfuscator{}) func NewChaChaPolySigilWithObfuscator(key []byte, obfuscator PreObfuscator) (*ChaChaPolySigil, error) { sigil, err := NewChaChaPolySigil(key) if err != nil { diff --git a/sigil/sigils.go b/sigil/sigils.go index d5a66ab..504dcaf 100644 --- a/sigil/sigils.go +++ b/sigil/sigils.go @@ -192,7 +192,8 @@ type HashSigil struct { // NewHashSigil creates a new HashSigil. // -// result := sigil.NewHashSigil(...) +// hashSigil := sigil.NewHashSigil(crypto.SHA256) +// _ = hashSigil func NewHashSigil(h crypto.Hash) *HashSigil { return &HashSigil{Hash: h} } diff --git a/sqlite/sqlite.go b/sqlite/sqlite.go index 00d09c0..bff84ec 100644 --- a/sqlite/sqlite.go +++ b/sqlite/sqlite.go @@ -17,8 +17,8 @@ import ( // Medium is a SQLite-backed storage backend implementing the io.Medium interface. type Medium struct { - db *sql.DB - table string + database *sql.DB + table string } var _ coreio.Medium = (*Medium)(nil) @@ -43,49 +43,49 @@ func normaliseTableName(table string) string { // // Example usage: // -// m, _ := sqlite.New(sqlite.Options{Path: ":memory:", Table: "files"}) -// _ = m.Write("config/app.yaml", "port: 8080") +// medium, _ := sqlite.New(sqlite.Options{Path: ":memory:", Table: "files"}) +// _ = medium.Write("config/app.yaml", "port: 8080") func New(options Options) (*Medium, error) { if options.Path == "" { return nil, core.E("sqlite.New", "database path is required", nil) } - m := &Medium{table: normaliseTableName(options.Table)} + medium := &Medium{table: normaliseTableName(options.Table)} - db, err := sql.Open("sqlite", options.Path) + database, err := sql.Open("sqlite", options.Path) if err != nil { return nil, core.E("sqlite.New", "failed to open database", err) } // Enable WAL mode for better concurrency - if _, err := db.Exec("PRAGMA journal_mode=WAL"); err != nil { - db.Close() + if _, err := database.Exec("PRAGMA journal_mode=WAL"); err != nil { + database.Close() return nil, core.E("sqlite.New", "failed to set WAL mode", err) } // Create the schema - createSQL := `CREATE TABLE IF NOT EXISTS ` + m.table + ` ( + createSQL := `CREATE TABLE IF NOT EXISTS ` + medium.table + ` ( path TEXT PRIMARY KEY, content BLOB NOT NULL, mode INTEGER DEFAULT 420, is_dir BOOLEAN DEFAULT FALSE, mtime DATETIME DEFAULT CURRENT_TIMESTAMP )` - if _, err := db.Exec(createSQL); err != nil { - db.Close() + if _, err := database.Exec(createSQL); err != nil { + database.Close() return nil, core.E("sqlite.New", "failed to create table", err) } - m.db = db - return m, nil + medium.database = database + return medium, nil } // Close closes the underlying database connection. // // result := m.Close(...) func (m *Medium) Close() error { - if m.db != nil { - return m.db.Close() + if m.database != nil { + return m.database.Close() } return nil } @@ -111,7 +111,7 @@ func (m *Medium) Read(p string) (string, error) { var content []byte var isDir bool - err := m.db.QueryRow( + err := m.database.QueryRow( `SELECT content, is_dir FROM `+m.table+` WHERE path = ?`, key, ).Scan(&content, &isDir) if err == sql.ErrNoRows { @@ -142,7 +142,7 @@ func (m *Medium) WriteMode(p, content string, mode fs.FileMode) error { return core.E("sqlite.WriteMode", "path is required", fs.ErrInvalid) } - _, err := m.db.Exec( + _, err := m.database.Exec( `INSERT INTO `+m.table+` (path, content, mode, is_dir, mtime) VALUES (?, ?, ?, FALSE, ?) ON CONFLICT(path) DO UPDATE SET content = excluded.content, mode = excluded.mode, is_dir = FALSE, mtime = excluded.mtime`, key, []byte(content), int(mode), time.Now().UTC(), @@ -163,7 +163,7 @@ func (m *Medium) EnsureDir(p string) error { return nil } - _, err := m.db.Exec( + _, err := m.database.Exec( `INSERT INTO `+m.table+` (path, content, mode, is_dir, mtime) VALUES (?, '', 493, TRUE, ?) ON CONFLICT(path) DO NOTHING`, key, time.Now().UTC(), @@ -184,7 +184,7 @@ func (m *Medium) IsFile(p string) bool { } var isDir bool - err := m.db.QueryRow( + err := m.database.QueryRow( `SELECT is_dir FROM `+m.table+` WHERE path = ?`, key, ).Scan(&isDir) if err != nil { @@ -218,7 +218,7 @@ func (m *Medium) Delete(p string) error { // Check if it's a directory with children var isDir bool - err := m.db.QueryRow( + err := m.database.QueryRow( `SELECT is_dir FROM `+m.table+` WHERE path = ?`, key, ).Scan(&isDir) if err == sql.ErrNoRows { @@ -232,7 +232,7 @@ func (m *Medium) Delete(p string) error { // Check for children prefix := key + "/" var count int - err := m.db.QueryRow( + err := m.database.QueryRow( `SELECT COUNT(*) FROM `+m.table+` WHERE path LIKE ? AND path != ?`, prefix+"%", key, ).Scan(&count) if err != nil { @@ -243,7 +243,7 @@ func (m *Medium) Delete(p string) error { } } - res, err := m.db.Exec(`DELETE FROM `+m.table+` WHERE path = ?`, key) + res, err := m.database.Exec(`DELETE FROM `+m.table+` WHERE path = ?`, key) if err != nil { return core.E("sqlite.Delete", core.Concat("delete failed: ", key), err) } @@ -266,7 +266,7 @@ func (m *Medium) DeleteAll(p string) error { prefix := key + "/" // Delete the exact path and all children - res, err := m.db.Exec( + res, err := m.database.Exec( `DELETE FROM `+m.table+` WHERE path = ? OR path LIKE ?`, key, prefix+"%", ) @@ -290,7 +290,7 @@ func (m *Medium) Rename(oldPath, newPath string) error { return core.E("sqlite.Rename", "both old and new paths are required", fs.ErrInvalid) } - tx, err := m.db.Begin() + tx, err := m.database.Begin() if err != nil { return core.E("sqlite.Rename", "begin tx failed", err) } @@ -390,7 +390,7 @@ func (m *Medium) List(p string) ([]fs.DirEntry, error) { } // Query all paths under the prefix - rows, err := m.db.Query( + rows, err := m.database.Query( `SELECT path, content, mode, is_dir, mtime FROM `+m.table+` WHERE path LIKE ? OR path LIKE ?`, prefix+"%", prefix+"%", ) @@ -471,7 +471,7 @@ func (m *Medium) Stat(p string) (fs.FileInfo, error) { var mode int var isDir bool var mtime time.Time - err := m.db.QueryRow( + err := m.database.QueryRow( `SELECT content, mode, is_dir, mtime FROM `+m.table+` WHERE path = ?`, key, ).Scan(&content, &mode, &isDir, &mtime) if err == sql.ErrNoRows { @@ -504,7 +504,7 @@ func (m *Medium) Open(p string) (fs.File, error) { var mode int var isDir bool var mtime time.Time - err := m.db.QueryRow( + err := m.database.QueryRow( `SELECT content, mode, is_dir, mtime FROM `+m.table+` WHERE path = ?`, key, ).Scan(&content, &mode, &isDir, &mtime) if err == sql.ErrNoRows { @@ -549,7 +549,7 @@ func (m *Medium) Append(p string) (goio.WriteCloser, error) { } var existing []byte - err := m.db.QueryRow( + err := m.database.QueryRow( `SELECT content FROM `+m.table+` WHERE path = ? AND is_dir = FALSE`, key, ).Scan(&existing) if err != nil && err != sql.ErrNoRows { @@ -574,7 +574,7 @@ func (m *Medium) ReadStream(p string) (goio.ReadCloser, error) { var content []byte var isDir bool - err := m.db.QueryRow( + err := m.database.QueryRow( `SELECT content, is_dir FROM `+m.table+` WHERE path = ?`, key, ).Scan(&content, &isDir) if err == sql.ErrNoRows { @@ -608,7 +608,7 @@ func (m *Medium) Exists(p string) bool { } var count int - err := m.db.QueryRow( + err := m.database.QueryRow( `SELECT COUNT(*) FROM `+m.table+` WHERE path = ?`, key, ).Scan(&count) if err != nil { @@ -627,7 +627,7 @@ func (m *Medium) IsDir(p string) bool { } var isDir bool - err := m.db.QueryRow( + err := m.database.QueryRow( `SELECT is_dir FROM `+m.table+` WHERE path = ?`, key, ).Scan(&isDir) if err != nil { @@ -764,7 +764,7 @@ func (w *sqliteWriteCloser) Write(p []byte) (int, error) { // // result := w.Close(...) func (w *sqliteWriteCloser) Close() error { - _, err := w.medium.db.Exec( + _, err := w.medium.database.Exec( `INSERT INTO `+w.medium.table+` (path, content, mode, is_dir, mtime) VALUES (?, ?, 420, FALSE, ?) ON CONFLICT(path) DO UPDATE SET content = excluded.content, is_dir = FALSE, mtime = excluded.mtime`, w.path, w.data, time.Now().UTC(), diff --git a/store/medium.go b/store/medium.go index df377d5..2e3fc4f 100644 --- a/store/medium.go +++ b/store/medium.go @@ -15,7 +15,7 @@ import ( // the rest is the key. List("") returns groups as directories, // List("group") returns keys as files. type Medium struct { - s *Store + store *Store } var _ coreio.Medium = (*Medium)(nil) @@ -24,35 +24,35 @@ var _ coreio.Medium = (*Medium)(nil) // // Example usage: // -// m, _ := store.NewMedium("config.db") -// _ = m.Write("app/theme", "midnight") +// medium, _ := store.NewMedium("config.db") +// _ = medium.Write("app/theme", "midnight") func NewMedium(dbPath string) (*Medium, error) { - s, err := New(dbPath) + store, err := New(dbPath) if err != nil { return nil, err } - return &Medium{s: s}, nil + return &Medium{store: store}, nil } // AsMedium returns a Medium adapter for an existing Store. // // result := s.AsMedium(...) func (s *Store) AsMedium() *Medium { - return &Medium{s: s} + return &Medium{store: s} } // Store returns the underlying KV store for direct access. // // result := m.Store(...) func (m *Medium) Store() *Store { - return m.s + return m.store } // Close closes the underlying store. // // result := m.Close(...) func (m *Medium) Close() error { - return m.s.Close() + return m.store.Close() } // splitPath splits a medium-style path into group and key. @@ -78,7 +78,7 @@ func (m *Medium) Read(p string) (string, error) { if key == "" { return "", core.E("store.Read", "path must include group/key", fs.ErrInvalid) } - return m.s.Get(group, key) + return m.store.Get(group, key) } // Write stores a value at group/key. @@ -89,7 +89,7 @@ func (m *Medium) Write(p, content string) error { if key == "" { return core.E("store.Write", "path must include group/key", fs.ErrInvalid) } - return m.s.Set(group, key, content) + return m.store.Set(group, key, content) } // WriteMode ignores the requested mode because key-value entries do not store POSIX permissions. @@ -114,7 +114,7 @@ func (m *Medium) IsFile(p string) bool { if key == "" { return false } - _, err := m.s.Get(group, key) + _, err := m.store.Get(group, key) return err == nil } @@ -141,7 +141,7 @@ func (m *Medium) Delete(p string) error { return core.E("store.Delete", "path is required", fs.ErrInvalid) } if key == "" { - n, err := m.s.Count(group) + n, err := m.store.Count(group) if err != nil { return err } @@ -150,7 +150,7 @@ func (m *Medium) Delete(p string) error { } return nil } - return m.s.Delete(group, key) + return m.store.Delete(group, key) } // DeleteAll removes a key, or all keys in a group. @@ -162,9 +162,9 @@ func (m *Medium) DeleteAll(p string) error { return core.E("store.DeleteAll", "path is required", fs.ErrInvalid) } if key == "" { - return m.s.DeleteGroup(group) + return m.store.DeleteGroup(group) } - return m.s.Delete(group, key) + return m.store.Delete(group, key) } // Rename moves a key from one path to another. @@ -176,14 +176,14 @@ func (m *Medium) Rename(oldPath, newPath string) error { if oldKey == "" || newKey == "" { return core.E("store.Rename", "both paths must include group/key", fs.ErrInvalid) } - val, err := m.s.Get(oldGroup, oldKey) + val, err := m.store.Get(oldGroup, oldKey) if err != nil { return err } - if err := m.s.Set(newGroup, newKey, val); err != nil { + if err := m.store.Set(newGroup, newKey, val); err != nil { return err } - return m.s.Delete(oldGroup, oldKey) + return m.store.Delete(oldGroup, oldKey) } // List returns directory entries. Empty path returns groups. @@ -194,7 +194,7 @@ func (m *Medium) List(p string) ([]fs.DirEntry, error) { group, key := splitPath(p) if group == "" { - rows, err := m.s.db.Query("SELECT DISTINCT grp FROM kv ORDER BY grp") + rows, err := m.store.database.Query("SELECT DISTINCT grp FROM kv ORDER BY grp") if err != nil { return nil, core.E("store.List", "query groups", err) } @@ -215,7 +215,7 @@ func (m *Medium) List(p string) ([]fs.DirEntry, error) { return nil, nil // leaf node, nothing beneath } - all, err := m.s.GetAll(group) + all, err := m.store.GetAll(group) if err != nil { return nil, err } @@ -235,7 +235,7 @@ func (m *Medium) Stat(p string) (fs.FileInfo, error) { return nil, core.E("store.Stat", "path is required", fs.ErrInvalid) } if key == "" { - n, err := m.s.Count(group) + n, err := m.store.Count(group) if err != nil { return nil, err } @@ -244,7 +244,7 @@ func (m *Medium) Stat(p string) (fs.FileInfo, error) { } return &kvFileInfo{name: group, isDir: true}, nil } - val, err := m.s.Get(group, key) + val, err := m.store.Get(group, key) if err != nil { return nil, err } @@ -259,7 +259,7 @@ func (m *Medium) Open(p string) (fs.File, error) { if key == "" { return nil, core.E("store.Open", "path must include group/key", fs.ErrInvalid) } - val, err := m.s.Get(group, key) + val, err := m.store.Get(group, key) if err != nil { return nil, err } @@ -274,7 +274,7 @@ func (m *Medium) Create(p string) (goio.WriteCloser, error) { if key == "" { return nil, core.E("store.Create", "path must include group/key", fs.ErrInvalid) } - return &kvWriteCloser{s: m.s, group: group, key: key}, nil + return &kvWriteCloser{store: m.store, group: group, key: key}, nil } // Append opens a key for appending. Content is stored on Close. @@ -285,8 +285,8 @@ func (m *Medium) Append(p string) (goio.WriteCloser, error) { if key == "" { return nil, core.E("store.Append", "path must include group/key", fs.ErrInvalid) } - existing, _ := m.s.Get(group, key) - return &kvWriteCloser{s: m.s, group: group, key: key, data: []byte(existing)}, nil + existing, _ := m.store.Get(group, key) + return &kvWriteCloser{store: m.store, group: group, key: key, data: []byte(existing)}, nil } // ReadStream returns a reader for the value. @@ -297,7 +297,7 @@ func (m *Medium) ReadStream(p string) (goio.ReadCloser, error) { if key == "" { return nil, core.E("store.ReadStream", "path must include group/key", fs.ErrInvalid) } - val, err := m.s.Get(group, key) + val, err := m.store.Get(group, key) if err != nil { return nil, err } @@ -320,10 +320,10 @@ func (m *Medium) Exists(p string) bool { return false } if key == "" { - n, err := m.s.Count(group) + n, err := m.store.Count(group) return err == nil && n > 0 } - _, err := m.s.Get(group, key) + _, err := m.store.Get(group, key) return err == nil } @@ -335,7 +335,7 @@ func (m *Medium) IsDir(p string) bool { if key != "" || group == "" { return false } - n, err := m.s.Count(group) + n, err := m.store.Count(group) return err == nil && n > 0 } @@ -446,7 +446,7 @@ func (f *kvFile) Read(b []byte) (int, error) { func (f *kvFile) Close() error { return nil } type kvWriteCloser struct { - s *Store + store *Store group string key string data []byte @@ -464,5 +464,5 @@ func (w *kvWriteCloser) Write(p []byte) (int, error) { // // result := w.Close(...) func (w *kvWriteCloser) Close() error { - return w.s.Set(w.group, w.key, string(w.data)) + return w.store.Set(w.group, w.key, string(w.data)) } diff --git a/store/store.go b/store/store.go index df5d6a6..5d8880f 100644 --- a/store/store.go +++ b/store/store.go @@ -13,63 +13,63 @@ var ErrNotFound = core.E("store.ErrNotFound", "key not found", nil) // Store is a group-namespaced key-value store backed by SQLite. type Store struct { - db *sql.DB + database *sql.DB } // New creates a Store at the given SQLite path. Use ":memory:" for tests. // // Example usage: // -// s, _ := store.New(":memory:") -// _ = s.Set("app", "theme", "midnight") +// kvStore, _ := store.New(":memory:") +// _ = kvStore.Set("app", "theme", "midnight") func New(dbPath string) (*Store, error) { - db, err := sql.Open("sqlite", dbPath) + database, err := sql.Open("sqlite", dbPath) if err != nil { return nil, core.E("store.New", "open db", err) } - if _, err := db.Exec("PRAGMA journal_mode=WAL"); err != nil { - db.Close() + if _, err := database.Exec("PRAGMA journal_mode=WAL"); err != nil { + database.Close() return nil, core.E("store.New", "WAL mode", err) } - if _, err := db.Exec(`CREATE TABLE IF NOT EXISTS kv ( + if _, err := database.Exec(`CREATE TABLE IF NOT EXISTS kv ( grp TEXT NOT NULL, key TEXT NOT NULL, value TEXT NOT NULL, PRIMARY KEY (grp, key) )`); err != nil { - db.Close() + database.Close() return nil, core.E("store.New", "create schema", err) } - return &Store{db: db}, nil + return &Store{database: database}, nil } // Close closes the underlying database. // // result := s.Close(...) func (s *Store) Close() error { - return s.db.Close() + return s.database.Close() } // Get retrieves a value by group and key. // // result := s.Get(...) func (s *Store) Get(group, key string) (string, error) { - var val string - err := s.db.QueryRow("SELECT value FROM kv WHERE grp = ? AND key = ?", group, key).Scan(&val) + var value string + err := s.database.QueryRow("SELECT value FROM kv WHERE grp = ? AND key = ?", group, key).Scan(&value) if err == sql.ErrNoRows { return "", core.E("store.Get", core.Concat("not found: ", group, "/", key), ErrNotFound) } if err != nil { return "", core.E("store.Get", "query", err) } - return val, nil + return value, nil } // Set stores a value by group and key, overwriting if exists. // // result := s.Set(...) func (s *Store) Set(group, key, value string) error { - _, err := s.db.Exec( + _, err := s.database.Exec( `INSERT INTO kv (grp, key, value) VALUES (?, ?, ?) ON CONFLICT(grp, key) DO UPDATE SET value = excluded.value`, group, key, value, @@ -84,7 +84,7 @@ func (s *Store) Set(group, key, value string) error { // // result := s.Delete(...) func (s *Store) Delete(group, key string) error { - _, err := s.db.Exec("DELETE FROM kv WHERE grp = ? AND key = ?", group, key) + _, err := s.database.Exec("DELETE FROM kv WHERE grp = ? AND key = ?", group, key) if err != nil { return core.E("store.Delete", "exec", err) } @@ -96,7 +96,7 @@ func (s *Store) Delete(group, key string) error { // result := s.Count(...) func (s *Store) Count(group string) (int, error) { var n int - err := s.db.QueryRow("SELECT COUNT(*) FROM kv WHERE grp = ?", group).Scan(&n) + err := s.database.QueryRow("SELECT COUNT(*) FROM kv WHERE grp = ?", group).Scan(&n) if err != nil { return 0, core.E("store.Count", "query", err) } @@ -107,7 +107,7 @@ func (s *Store) Count(group string) (int, error) { // // result := s.DeleteGroup(...) func (s *Store) DeleteGroup(group string) error { - _, err := s.db.Exec("DELETE FROM kv WHERE grp = ?", group) + _, err := s.database.Exec("DELETE FROM kv WHERE grp = ?", group) if err != nil { return core.E("store.DeleteGroup", "exec", err) } @@ -118,7 +118,7 @@ func (s *Store) DeleteGroup(group string) error { // // result := s.GetAll(...) func (s *Store) GetAll(group string) (map[string]string, error) { - rows, err := s.db.Query("SELECT key, value FROM kv WHERE grp = ?", group) + rows, err := s.database.Query("SELECT key, value FROM kv WHERE grp = ?", group) if err != nil { return nil, core.E("store.GetAll", "query", err) } @@ -142,10 +142,11 @@ func (s *Store) GetAll(group string) (map[string]string, error) { // // Example usage: // -// _ = s.Set("user", "name", "alice") -// out, _ := s.Render("hello {{ .name }}", "user") +// kvStore, _ := store.New(":memory:") +// _ = kvStore.Set("user", "name", "alice") +// out, _ := kvStore.Render("hello {{ .name }}", "user") func (s *Store) Render(tmplStr, group string) (string, error) { - rows, err := s.db.Query("SELECT key, value FROM kv WHERE grp = ?", group) + rows, err := s.database.Query("SELECT key, value FROM kv WHERE grp = ?", group) if err != nil { return "", core.E("store.Render", "query", err) } diff --git a/workspace/service.go b/workspace/service.go index b53a7ba..0ccf0fe 100644 --- a/workspace/service.go +++ b/workspace/service.go @@ -48,7 +48,7 @@ var _ Workspace = (*Service)(nil) // // Example usage: // -// svc, _ := workspace.New(workspace.Options{Core: core.New(), Crypt: myCryptProvider}) +// service, _ := workspace.New(workspace.Options{Core: core.New(), Crypt: myCryptProvider}) func New(options Options) (*Service, error) { home := workspaceHome() if home == "" { @@ -91,18 +91,18 @@ func (s *Service) CreateWorkspace(identifier, password string) (string, error) { } hash := sha256.Sum256([]byte(identifier)) - wsID := hex.EncodeToString(hash[:]) - wsPath, err := s.workspacePath("workspace.CreateWorkspace", wsID) + workspaceID := hex.EncodeToString(hash[:]) + workspaceDirectory, err := s.workspacePath("workspace.CreateWorkspace", workspaceID) if err != nil { return "", err } - if s.medium.Exists(wsPath) { + if s.medium.Exists(workspaceDirectory) { return "", core.E("workspace.CreateWorkspace", "workspace already exists", nil) } for _, d := range []string{"config", "log", "data", "files", "keys"} { - if err := s.medium.EnsureDir(core.Path(wsPath, d)); err != nil { + if err := s.medium.EnsureDir(core.Path(workspaceDirectory, d)); err != nil { return "", core.E("workspace.CreateWorkspace", core.Concat("failed to create directory: ", d), err) } } @@ -112,11 +112,11 @@ func (s *Service) CreateWorkspace(identifier, password string) (string, error) { return "", core.E("workspace.CreateWorkspace", "failed to generate keys", err) } - if err := s.medium.WriteMode(core.Path(wsPath, "keys", "private.key"), privKey, 0600); err != nil { + if err := s.medium.WriteMode(core.Path(workspaceDirectory, "keys", "private.key"), privKey, 0600); err != nil { return "", core.E("workspace.CreateWorkspace", "failed to save private key", err) } - return wsID, nil + return workspaceID, nil } // SwitchWorkspace changes the active workspace. @@ -126,15 +126,15 @@ func (s *Service) SwitchWorkspace(name string) error { s.mu.Lock() defer s.mu.Unlock() - wsPath, err := s.workspacePath("workspace.SwitchWorkspace", name) + workspaceDirectory, err := s.workspacePath("workspace.SwitchWorkspace", name) if err != nil { return err } - if !s.medium.IsDir(wsPath) { + if !s.medium.IsDir(workspaceDirectory) { return core.E("workspace.SwitchWorkspace", core.Concat("workspace not found: ", name), nil) } - s.activeWorkspace = core.PathBase(wsPath) + s.activeWorkspace = core.PathBase(workspaceDirectory) return nil } @@ -145,14 +145,14 @@ func (s *Service) activeFilePath(op, filename string) (string, error) { return "", core.E(op, "no active workspace", nil) } filesRoot := core.Path(s.rootPath, s.activeWorkspace, "files") - path, err := joinWithinRoot(filesRoot, filename) + filePath, err := joinWithinRoot(filesRoot, filename) if err != nil { return "", core.E(op, "file path escapes workspace files", fs.ErrPermission) } - if path == filesRoot { + if filePath == filesRoot { return "", core.E(op, "filename is required", fs.ErrInvalid) } - return path, nil + return filePath, nil } // WorkspaceFileGet retrieves the content of a file from the active workspace. @@ -162,11 +162,11 @@ func (s *Service) WorkspaceFileGet(filename string) (string, error) { s.mu.RLock() defer s.mu.RUnlock() - path, err := s.activeFilePath("workspace.WorkspaceFileGet", filename) + filePath, err := s.activeFilePath("workspace.WorkspaceFileGet", filename) if err != nil { return "", err } - return s.medium.Read(path) + return s.medium.Read(filePath) } // WorkspaceFileSet saves content to a file in the active workspace. @@ -176,33 +176,39 @@ func (s *Service) WorkspaceFileSet(filename, content string) error { s.mu.Lock() defer s.mu.Unlock() - path, err := s.activeFilePath("workspace.WorkspaceFileSet", filename) + filePath, err := s.activeFilePath("workspace.WorkspaceFileSet", filename) if err != nil { return err } - return s.medium.Write(path, content) + return s.medium.Write(filePath, content) } // HandleIPCEvents handles workspace-related IPC messages. // -// result := s.HandleIPCEvents(...) -func (s *Service) HandleIPCEvents(c *core.Core, msg core.Message) core.Result { - switch m := msg.(type) { +// service, _ := workspace.New(workspace.Options{Core: core.New(), Crypt: myCryptProvider}) +// result := service.HandleIPCEvents(core.New(), map[string]any{ +// "action": "workspace.create", +// "identifier": "alice", +// "password": "pass123", +// }) +// _ = result.OK +func (s *Service) HandleIPCEvents(_ *core.Core, msg core.Message) core.Result { + switch message := msg.(type) { case map[string]any: - action, _ := m["action"].(string) + action, _ := message["action"].(string) switch action { case "workspace.create": - id, _ := m["identifier"].(string) - pass, _ := m["password"].(string) - wsID, err := s.CreateWorkspace(id, pass) + identifier, _ := message["identifier"].(string) + password, _ := message["password"].(string) + workspaceID, err := s.CreateWorkspace(identifier, password) if err != nil { - return core.Result{} + return core.Result{}.New(err) } - return core.Result{Value: wsID, OK: true} + return core.Result{Value: workspaceID, OK: true} case "workspace.switch": - name, _ := m["name"].(string) + name, _ := message["name"].(string) if err := s.SwitchWorkspace(name); err != nil { - return core.Result{} + return core.Result{}.New(err) } return core.Result{OK: true} } @@ -233,12 +239,12 @@ func (s *Service) workspacePath(op, name string) (string, error) { if name == "" { return "", core.E(op, "workspace name is required", fs.ErrInvalid) } - path, err := joinWithinRoot(s.rootPath, name) + workspaceDirectory, err := joinWithinRoot(s.rootPath, name) if err != nil { return "", core.E(op, "workspace path escapes root", err) } - if core.PathDir(path) != s.rootPath { + if core.PathDir(workspaceDirectory) != s.rootPath { return "", core.E(op, core.Concat("invalid workspace name: ", name), fs.ErrPermission) } - return path, nil + return workspaceDirectory, nil } From 1743b9810e81b3e24426d06b911c70aa126187db Mon Sep 17 00:00:00 2001 From: Virgil Date: Mon, 30 Mar 2026 20:10:24 +0000 Subject: [PATCH 11/83] refactor(ax): remove remaining short names --- datanode/client.go | 92 ++++++++++++------------- local/client.go | 159 +++++++++++++++++++++++++++---------------- local/client_test.go | 38 +++++------ 3 files changed, 166 insertions(+), 123 deletions(-) diff --git a/datanode/client.go b/datanode/client.go index 83d5aad..6da1bdf 100644 --- a/datanode/client.go +++ b/datanode/client.go @@ -34,9 +34,9 @@ var ( // Medium is an in-memory storage backend backed by a Borg DataNode. // All paths are relative (no leading slash). Thread-safe via RWMutex. type Medium struct { - dataNode *borgdatanode.DataNode - dirs map[string]bool // explicit directory tracking - mu sync.RWMutex + dataNode *borgdatanode.DataNode + directories map[string]bool // explicit directory tracking + mu sync.RWMutex } // New creates a new empty DataNode Medium. @@ -47,8 +47,8 @@ type Medium struct { // _ = medium.Write("jobs/run.log", "started") func New() *Medium { return &Medium{ - dataNode: borgdatanode.New(), - dirs: make(map[string]bool), + dataNode: borgdatanode.New(), + directories: make(map[string]bool), } } @@ -65,8 +65,8 @@ func FromTar(data []byte) (*Medium, error) { return nil, core.E("datanode.FromTar", "failed to restore", err) } return &Medium{ - dataNode: dataNode, - dirs: make(map[string]bool), + dataNode: dataNode, + directories: make(map[string]bool), }, nil } @@ -95,7 +95,7 @@ func (m *Medium) Restore(data []byte) error { m.mu.Lock() defer m.mu.Unlock() m.dataNode = dataNode - m.dirs = make(map[string]bool) + m.directories = make(map[string]bool) return nil } @@ -109,8 +109,8 @@ func (m *Medium) DataNode() *borgdatanode.DataNode { return m.dataNode } -// clean normalises a path: strips leading slash, cleans traversal. -func clean(p string) string { +// cleanPath normalises a path: strips leading slash, cleans traversal. +func cleanPath(p string) string { p = core.TrimPrefix(p, "/") p = path.Clean(p) if p == "." { @@ -128,7 +128,7 @@ func (m *Medium) Read(p string) (string, error) { m.mu.RLock() defer m.mu.RUnlock() - p = clean(p) + p = cleanPath(p) f, err := m.dataNode.Open(p) if err != nil { return "", core.E("datanode.Read", core.Concat("not found: ", p), fs.ErrNotExist) @@ -157,13 +157,13 @@ func (m *Medium) Write(p, content string) error { m.mu.Lock() defer m.mu.Unlock() - p = clean(p) + p = cleanPath(p) if p == "" { return core.E("datanode.Write", "empty path", fs.ErrInvalid) } m.dataNode.AddData(p, []byte(content)) - // ensure parent dirs are tracked + // ensure parent directories are tracked m.ensureDirsLocked(path.Dir(p)) return nil } @@ -182,7 +182,7 @@ func (m *Medium) EnsureDir(p string) error { m.mu.Lock() defer m.mu.Unlock() - p = clean(p) + p = cleanPath(p) if p == "" { return nil } @@ -194,7 +194,7 @@ func (m *Medium) EnsureDir(p string) error { // Caller must hold m.mu. func (m *Medium) ensureDirsLocked(p string) { for p != "" && p != "." { - m.dirs[p] = true + m.directories[p] = true p = path.Dir(p) if p == "." { break @@ -209,7 +209,7 @@ func (m *Medium) IsFile(p string) bool { m.mu.RLock() defer m.mu.RUnlock() - p = clean(p) + p = cleanPath(p) info, err := m.dataNode.Stat(p) return err == nil && !info.IsDir() } @@ -235,7 +235,7 @@ func (m *Medium) Delete(p string) error { m.mu.Lock() defer m.mu.Unlock() - p = clean(p) + p = cleanPath(p) if p == "" { return core.E("datanode.Delete", "cannot delete root", fs.ErrPermission) } @@ -243,8 +243,8 @@ func (m *Medium) Delete(p string) error { // Check if it's a file in the DataNode info, err := m.dataNode.Stat(p) if err != nil { - // Check explicit dirs - if m.dirs[p] { + // Check explicit directories + if m.directories[p] { // Check if dir is empty hasChildren, err := m.hasPrefixLocked(p + "/") if err != nil { @@ -253,7 +253,7 @@ func (m *Medium) Delete(p string) error { if hasChildren { return core.E("datanode.Delete", core.Concat("directory not empty: ", p), fs.ErrExist) } - delete(m.dirs, p) + delete(m.directories, p) return nil } return core.E("datanode.Delete", core.Concat("not found: ", p), fs.ErrNotExist) @@ -267,7 +267,7 @@ func (m *Medium) Delete(p string) error { if hasChildren { return core.E("datanode.Delete", core.Concat("directory not empty: ", p), fs.ErrExist) } - delete(m.dirs, p) + delete(m.directories, p) return nil } @@ -285,7 +285,7 @@ func (m *Medium) DeleteAll(p string) error { m.mu.Lock() defer m.mu.Unlock() - p = clean(p) + p = cleanPath(p) if p == "" { return core.E("datanode.DeleteAll", "cannot delete root", fs.ErrPermission) } @@ -316,10 +316,10 @@ func (m *Medium) DeleteAll(p string) error { } } - // Remove explicit dirs under prefix - for d := range m.dirs { + // Remove explicit directories under prefix + for d := range m.directories { if d == p || core.HasPrefix(d, prefix) { - delete(m.dirs, d) + delete(m.directories, d) found = true } } @@ -337,8 +337,8 @@ func (m *Medium) Rename(oldPath, newPath string) error { m.mu.Lock() defer m.mu.Unlock() - oldPath = clean(oldPath) - newPath = clean(newPath) + oldPath = cleanPath(oldPath) + newPath = cleanPath(newPath) // Check if source is a file info, err := m.dataNode.Stat(oldPath) @@ -382,17 +382,17 @@ func (m *Medium) Rename(oldPath, newPath string) error { } } - // Move explicit dirs + // Move explicit directories dirsToMove := make(map[string]string) - for d := range m.dirs { + for d := range m.directories { if d == oldPath || core.HasPrefix(d, oldPrefix) { newD := core.Concat(newPath, core.TrimPrefix(d, oldPath)) dirsToMove[d] = newD } } for old, nw := range dirsToMove { - delete(m.dirs, old) - m.dirs[nw] = true + delete(m.directories, old) + m.directories[nw] = true } return nil @@ -405,12 +405,12 @@ func (m *Medium) List(p string) ([]fs.DirEntry, error) { m.mu.RLock() defer m.mu.RUnlock() - p = clean(p) + p = cleanPath(p) entries, err := m.dataNode.ReadDir(p) if err != nil { - // Check explicit dirs - if p == "" || m.dirs[p] { + // Check explicit directories + if p == "" || m.directories[p] { return []fs.DirEntry{}, nil } return nil, core.E("datanode.List", core.Concat("not found: ", p), fs.ErrNotExist) @@ -426,7 +426,7 @@ func (m *Medium) List(p string) ([]fs.DirEntry, error) { seen[e.Name()] = true } - for d := range m.dirs { + for d := range m.directories { if !core.HasPrefix(d, prefix) { continue } @@ -455,7 +455,7 @@ func (m *Medium) Stat(p string) (fs.FileInfo, error) { m.mu.RLock() defer m.mu.RUnlock() - p = clean(p) + p = cleanPath(p) if p == "" { return &fileInfo{name: ".", isDir: true, mode: fs.ModeDir | 0755}, nil } @@ -465,7 +465,7 @@ func (m *Medium) Stat(p string) (fs.FileInfo, error) { return info, nil } - if m.dirs[p] { + if m.directories[p] { return &fileInfo{name: path.Base(p), isDir: true, mode: fs.ModeDir | 0755}, nil } return nil, core.E("datanode.Stat", core.Concat("not found: ", p), fs.ErrNotExist) @@ -478,7 +478,7 @@ func (m *Medium) Open(p string) (fs.File, error) { m.mu.RLock() defer m.mu.RUnlock() - p = clean(p) + p = cleanPath(p) return m.dataNode.Open(p) } @@ -486,7 +486,7 @@ func (m *Medium) Open(p string) (fs.File, error) { // // result := m.Create(...) func (m *Medium) Create(p string) (goio.WriteCloser, error) { - p = clean(p) + p = cleanPath(p) if p == "" { return nil, core.E("datanode.Create", "empty path", fs.ErrInvalid) } @@ -497,7 +497,7 @@ func (m *Medium) Create(p string) (goio.WriteCloser, error) { // // result := m.Append(...) func (m *Medium) Append(p string) (goio.WriteCloser, error) { - p = clean(p) + p = cleanPath(p) if p == "" { return nil, core.E("datanode.Append", "empty path", fs.ErrInvalid) } @@ -525,7 +525,7 @@ func (m *Medium) ReadStream(p string) (goio.ReadCloser, error) { m.mu.RLock() defer m.mu.RUnlock() - p = clean(p) + p = cleanPath(p) f, err := m.dataNode.Open(p) if err != nil { return nil, core.E("datanode.ReadStream", core.Concat("not found: ", p), fs.ErrNotExist) @@ -547,7 +547,7 @@ func (m *Medium) Exists(p string) bool { m.mu.RLock() defer m.mu.RUnlock() - p = clean(p) + p = cleanPath(p) if p == "" { return true // root always exists } @@ -555,7 +555,7 @@ func (m *Medium) Exists(p string) bool { if err == nil { return true } - return m.dirs[p] + return m.directories[p] } // IsDir documents the IsDir operation. @@ -565,7 +565,7 @@ func (m *Medium) IsDir(p string) bool { m.mu.RLock() defer m.mu.RUnlock() - p = clean(p) + p = cleanPath(p) if p == "" { return true } @@ -573,7 +573,7 @@ func (m *Medium) IsDir(p string) bool { if err == nil { return info.IsDir() } - return m.dirs[p] + return m.directories[p] } // --- internal helpers --- @@ -589,7 +589,7 @@ func (m *Medium) hasPrefixLocked(prefix string) (bool, error) { return true, nil } } - for d := range m.dirs { + for d := range m.directories { if core.HasPrefix(d, prefix) { return true, nil } diff --git a/local/client.go b/local/client.go index 0da6458..942e12f 100644 --- a/local/client.go +++ b/local/client.go @@ -11,10 +11,10 @@ import ( // Medium is a local filesystem storage backend. type Medium struct { - root string + filesystemRoot string } -var rawFS = (&core.Fs{}).NewUnrestricted() +var unrestrictedFileSystem = (&core.Fs{}).NewUnrestricted() // New creates a new local Medium rooted at the given directory. // Pass "/" for full filesystem access, or a specific path to sandbox. @@ -24,15 +24,15 @@ var rawFS = (&core.Fs{}).NewUnrestricted() // medium, _ := local.New("/srv/app") // _ = medium.Write("config/app.yaml", "port: 8080") func New(root string) (*Medium, error) { - abs := absolutePath(root) + absoluteRoot := absolutePath(root) // Resolve symlinks so sandbox checks compare like-for-like. // On macOS, /var is a symlink to /private/var — without this, // resolving child paths resolves to /private/var/... while // root stays /var/..., causing false sandbox escape detections. - if resolved, err := resolveSymlinksPath(abs); err == nil { - abs = resolved + if resolvedRoot, err := resolveSymlinksPath(absoluteRoot); err == nil { + absoluteRoot = resolvedRoot } - return &Medium{root: abs}, nil + return &Medium{filesystemRoot: absoluteRoot}, nil } func dirSeparator() string { @@ -154,8 +154,8 @@ func canonicalPath(p string) string { return absolutePath(p) } -func isProtectedPath(full string) bool { - full = canonicalPath(full) +func isProtectedPath(fullPath string) bool { + fullPath = canonicalPath(fullPath) protected := map[string]struct{}{ canonicalPath(dirSeparator()): {}, } @@ -165,7 +165,7 @@ func isProtectedPath(full string) bool { } protected[canonicalPath(home)] = struct{}{} } - _, ok := protected[full] + _, ok := protected[fullPath] return ok } @@ -177,17 +177,17 @@ func logSandboxEscape(root, path, attempted string) { core.Security("sandbox escape detected", "root", root, "path", path, "attempted", attempted, "user", username) } -// path sanitises and returns the full path. +// sandboxedPath sanitises and returns the full filesystem path. // Absolute paths are sandboxed under root (unless root is "/"). -func (m *Medium) path(p string) string { +func (m *Medium) sandboxedPath(p string) string { if p == "" { - return m.root + return m.filesystemRoot } // If the path is relative and the medium is rooted at "/", // treat it as relative to the current working directory. // This makes io.Local behave more like the standard 'os' package. - if m.root == dirSeparator() && !core.PathIsAbs(normalisePath(p)) { + if m.filesystemRoot == dirSeparator() && !core.PathIsAbs(normalisePath(p)) { return core.Path(currentWorkingDir(), normalisePath(p)) } @@ -196,23 +196,23 @@ func (m *Medium) path(p string) string { clean := cleanSandboxPath(p) // If root is "/", allow absolute paths through - if m.root == dirSeparator() { + if m.filesystemRoot == dirSeparator() { return clean } // Join cleaned relative path with root - return core.Path(m.root, core.TrimPrefix(clean, dirSeparator())) + return core.Path(m.filesystemRoot, core.TrimPrefix(clean, dirSeparator())) } // validatePath ensures the path is within the sandbox, following symlinks if they exist. func (m *Medium) validatePath(p string) (string, error) { - if m.root == dirSeparator() { - return m.path(p), nil + if m.filesystemRoot == dirSeparator() { + return m.sandboxedPath(p), nil } // Split the cleaned path into components parts := splitPathParts(cleanSandboxPath(p)) - current := m.root + current := m.filesystemRoot for _, part := range parts { next := core.Path(current, part) @@ -229,9 +229,9 @@ func (m *Medium) validatePath(p string) (string, error) { } // Verify the resolved part is still within the root - if !isWithinRoot(m.root, realNext) { + if !isWithinRoot(m.filesystemRoot, realNext) { // Security event: sandbox escape attempt - logSandboxEscape(m.root, p, realNext) + logSandboxEscape(m.filesystemRoot, p, realNext) return "", fs.ErrPermission } current = realNext @@ -244,11 +244,11 @@ func (m *Medium) validatePath(p string) (string, error) { // // result := m.Read(...) func (m *Medium) Read(p string) (string, error) { - full, err := m.validatePath(p) + resolvedPath, err := m.validatePath(p) if err != nil { return "", err } - return resultValue[string]("local.Read", core.Concat("read failed: ", p), rawFS.Read(full)) + return resultString("local.Read", core.Concat("read failed: ", p), unrestrictedFileSystem.Read(resolvedPath)) } // Write saves content to file, creating parent directories as needed. @@ -265,22 +265,22 @@ func (m *Medium) Write(p, content string) error { // // result := m.WriteMode(...) func (m *Medium) WriteMode(p, content string, mode fs.FileMode) error { - full, err := m.validatePath(p) + resolvedPath, err := m.validatePath(p) if err != nil { return err } - return resultErr("local.WriteMode", core.Concat("write failed: ", p), rawFS.WriteMode(full, content, mode)) + return resultErr("local.WriteMode", core.Concat("write failed: ", p), unrestrictedFileSystem.WriteMode(resolvedPath, content, mode)) } // EnsureDir creates directory if it doesn't exist. // // result := m.EnsureDir(...) func (m *Medium) EnsureDir(p string) error { - full, err := m.validatePath(p) + resolvedPath, err := m.validatePath(p) if err != nil { return err } - return resultErr("local.EnsureDir", core.Concat("ensure dir failed: ", p), rawFS.EnsureDir(full)) + return resultErr("local.EnsureDir", core.Concat("ensure dir failed: ", p), unrestrictedFileSystem.EnsureDir(resolvedPath)) } // IsDir returns true if path is a directory. @@ -290,11 +290,11 @@ func (m *Medium) IsDir(p string) bool { if p == "" { return false } - full, err := m.validatePath(p) + resolvedPath, err := m.validatePath(p) if err != nil { return false } - return rawFS.IsDir(full) + return unrestrictedFileSystem.IsDir(resolvedPath) } // IsFile returns true if path is a regular file. @@ -304,77 +304,77 @@ func (m *Medium) IsFile(p string) bool { if p == "" { return false } - full, err := m.validatePath(p) + resolvedPath, err := m.validatePath(p) if err != nil { return false } - return rawFS.IsFile(full) + return unrestrictedFileSystem.IsFile(resolvedPath) } // Exists returns true if path exists. // // result := m.Exists(...) func (m *Medium) Exists(p string) bool { - full, err := m.validatePath(p) + resolvedPath, err := m.validatePath(p) if err != nil { return false } - return rawFS.Exists(full) + return unrestrictedFileSystem.Exists(resolvedPath) } // List returns directory entries. // // result := m.List(...) func (m *Medium) List(p string) ([]fs.DirEntry, error) { - full, err := m.validatePath(p) + resolvedPath, err := m.validatePath(p) if err != nil { return nil, err } - return resultValue[[]fs.DirEntry]("local.List", core.Concat("list failed: ", p), rawFS.List(full)) + return resultDirEntries("local.List", core.Concat("list failed: ", p), unrestrictedFileSystem.List(resolvedPath)) } // Stat returns file info. // // result := m.Stat(...) func (m *Medium) Stat(p string) (fs.FileInfo, error) { - full, err := m.validatePath(p) + resolvedPath, err := m.validatePath(p) if err != nil { return nil, err } - return resultValue[fs.FileInfo]("local.Stat", core.Concat("stat failed: ", p), rawFS.Stat(full)) + return resultFileInfo("local.Stat", core.Concat("stat failed: ", p), unrestrictedFileSystem.Stat(resolvedPath)) } // Open opens the named file for reading. // // result := m.Open(...) func (m *Medium) Open(p string) (fs.File, error) { - full, err := m.validatePath(p) + resolvedPath, err := m.validatePath(p) if err != nil { return nil, err } - return resultValue[fs.File]("local.Open", core.Concat("open failed: ", p), rawFS.Open(full)) + return resultFile("local.Open", core.Concat("open failed: ", p), unrestrictedFileSystem.Open(resolvedPath)) } // Create creates or truncates the named file. // // result := m.Create(...) func (m *Medium) Create(p string) (goio.WriteCloser, error) { - full, err := m.validatePath(p) + resolvedPath, err := m.validatePath(p) if err != nil { return nil, err } - return resultValue[goio.WriteCloser]("local.Create", core.Concat("create failed: ", p), rawFS.Create(full)) + return resultWriteCloser("local.Create", core.Concat("create failed: ", p), unrestrictedFileSystem.Create(resolvedPath)) } // Append opens the named file for appending, creating it if it doesn't exist. // // result := m.Append(...) func (m *Medium) Append(p string) (goio.WriteCloser, error) { - full, err := m.validatePath(p) + resolvedPath, err := m.validatePath(p) if err != nil { return nil, err } - return resultValue[goio.WriteCloser]("local.Append", core.Concat("append failed: ", p), rawFS.Append(full)) + return resultWriteCloser("local.Append", core.Concat("append failed: ", p), unrestrictedFileSystem.Append(resolvedPath)) } // ReadStream returns a reader for the file content. @@ -405,43 +405,43 @@ func (m *Medium) WriteStream(path string) (goio.WriteCloser, error) { // // result := m.Delete(...) func (m *Medium) Delete(p string) error { - full, err := m.validatePath(p) + resolvedPath, err := m.validatePath(p) if err != nil { return err } - if isProtectedPath(full) { - return core.E("local.Delete", core.Concat("refusing to delete protected path: ", full), nil) + if isProtectedPath(resolvedPath) { + return core.E("local.Delete", core.Concat("refusing to delete protected path: ", resolvedPath), nil) } - return resultErr("local.Delete", core.Concat("delete failed: ", p), rawFS.Delete(full)) + return resultErr("local.Delete", core.Concat("delete failed: ", p), unrestrictedFileSystem.Delete(resolvedPath)) } // DeleteAll removes a file or directory recursively. // // result := m.DeleteAll(...) func (m *Medium) DeleteAll(p string) error { - full, err := m.validatePath(p) + resolvedPath, err := m.validatePath(p) if err != nil { return err } - if isProtectedPath(full) { - return core.E("local.DeleteAll", core.Concat("refusing to delete protected path: ", full), nil) + if isProtectedPath(resolvedPath) { + return core.E("local.DeleteAll", core.Concat("refusing to delete protected path: ", resolvedPath), nil) } - return resultErr("local.DeleteAll", core.Concat("delete all failed: ", p), rawFS.DeleteAll(full)) + return resultErr("local.DeleteAll", core.Concat("delete all failed: ", p), unrestrictedFileSystem.DeleteAll(resolvedPath)) } // Rename moves a file or directory. // // result := m.Rename(...) func (m *Medium) Rename(oldPath, newPath string) error { - oldFull, err := m.validatePath(oldPath) + oldResolvedPath, err := m.validatePath(oldPath) if err != nil { return err } - newFull, err := m.validatePath(newPath) + newResolvedPath, err := m.validatePath(newPath) if err != nil { return err } - return resultErr("local.Rename", core.Concat("rename failed: ", oldPath), rawFS.Rename(oldFull, newFull)) + return resultErr("local.Rename", core.Concat("rename failed: ", oldPath), unrestrictedFileSystem.Rename(oldResolvedPath, newResolvedPath)) } // FileGet is an alias for Read. @@ -495,14 +495,57 @@ func resultErr(op, msg string, result core.Result) error { return core.E(op, msg, nil) } -func resultValue[T any](op, msg string, result core.Result) (T, error) { - var zero T +func resultString(op, msg string, result core.Result) (string, error) { if !result.OK { - return zero, resultErr(op, msg, result) + return "", resultErr(op, msg, result) } - value, ok := result.Value.(T) + value, ok := result.Value.(string) if !ok { - return zero, core.E(op, "unexpected result type", nil) + return "", core.E(op, "unexpected result type", nil) } return value, nil } + +func resultDirEntries(op, msg string, result core.Result) ([]fs.DirEntry, error) { + if !result.OK { + return nil, resultErr(op, msg, result) + } + entries, ok := result.Value.([]fs.DirEntry) + if !ok { + return nil, core.E(op, "unexpected result type", nil) + } + return entries, nil +} + +func resultFileInfo(op, msg string, result core.Result) (fs.FileInfo, error) { + if !result.OK { + return nil, resultErr(op, msg, result) + } + fileInfo, ok := result.Value.(fs.FileInfo) + if !ok { + return nil, core.E(op, "unexpected result type", nil) + } + return fileInfo, nil +} + +func resultFile(op, msg string, result core.Result) (fs.File, error) { + if !result.OK { + return nil, resultErr(op, msg, result) + } + file, ok := result.Value.(fs.File) + if !ok { + return nil, core.E(op, "unexpected result type", nil) + } + return file, nil +} + +func resultWriteCloser(op, msg string, result core.Result) (goio.WriteCloser, error) { + if !result.OK { + return nil, resultErr(op, msg, result) + } + writer, ok := result.Value.(goio.WriteCloser) + if !ok { + return nil, core.E(op, "unexpected result type", nil) + } + return writer, nil +} diff --git a/local/client_test.go b/local/client_test.go index 9acd09c..50e70e3 100644 --- a/local/client_test.go +++ b/local/client_test.go @@ -18,37 +18,37 @@ func TestClient_New_ResolvesRoot_Good(t *testing.T) { // New() resolves symlinks (macOS /var → /private/var), so compare resolved paths. resolved, err := resolveSymlinksPath(root) require.NoError(t, err) - assert.Equal(t, resolved, m.root) + assert.Equal(t, resolved, m.filesystemRoot) } func TestClient_Path_Sandboxed_Good(t *testing.T) { - m := &Medium{root: "/home/user"} + m := &Medium{filesystemRoot: "/home/user"} // Normal paths - assert.Equal(t, "/home/user/file.txt", m.path("file.txt")) - assert.Equal(t, "/home/user/dir/file.txt", m.path("dir/file.txt")) + assert.Equal(t, "/home/user/file.txt", m.sandboxedPath("file.txt")) + assert.Equal(t, "/home/user/dir/file.txt", m.sandboxedPath("dir/file.txt")) // Empty returns root - assert.Equal(t, "/home/user", m.path("")) + assert.Equal(t, "/home/user", m.sandboxedPath("")) // Traversal attempts get sanitised - assert.Equal(t, "/home/user/file.txt", m.path("../file.txt")) - assert.Equal(t, "/home/user/file.txt", m.path("dir/../file.txt")) + assert.Equal(t, "/home/user/file.txt", m.sandboxedPath("../file.txt")) + assert.Equal(t, "/home/user/file.txt", m.sandboxedPath("dir/../file.txt")) // Absolute paths are constrained to sandbox (no escape) - assert.Equal(t, "/home/user/etc/passwd", m.path("/etc/passwd")) + assert.Equal(t, "/home/user/etc/passwd", m.sandboxedPath("/etc/passwd")) } func TestClient_Path_RootFilesystem_Good(t *testing.T) { - m := &Medium{root: "/"} + m := &Medium{filesystemRoot: "/"} // When root is "/", absolute paths pass through - assert.Equal(t, "/etc/passwd", m.path("/etc/passwd")) - assert.Equal(t, "/home/user/file.txt", m.path("/home/user/file.txt")) + assert.Equal(t, "/etc/passwd", m.sandboxedPath("/etc/passwd")) + assert.Equal(t, "/home/user/file.txt", m.sandboxedPath("/home/user/file.txt")) // Relative paths are relative to CWD when root is "/" cwd := currentWorkingDir() - assert.Equal(t, core.Path(cwd, "file.txt"), m.path("file.txt")) + assert.Equal(t, core.Path(cwd, "file.txt"), m.sandboxedPath("file.txt")) } func TestClient_ReadWrite_Basic_Good(t *testing.T) { @@ -439,18 +439,18 @@ func TestClient_WriteStream_Basic_Good(t *testing.T) { } func TestClient_Path_TraversalAdvanced_Ugly(t *testing.T) { - m := &Medium{root: "/sandbox"} + m := &Medium{filesystemRoot: "/sandbox"} // Multiple levels of traversal - assert.Equal(t, "/sandbox/file.txt", m.path("../../../file.txt")) - assert.Equal(t, "/sandbox/target", m.path("dir/../../target")) + assert.Equal(t, "/sandbox/file.txt", m.sandboxedPath("../../../file.txt")) + assert.Equal(t, "/sandbox/target", m.sandboxedPath("dir/../../target")) // Traversal with hidden files - assert.Equal(t, "/sandbox/.ssh/id_rsa", m.path(".ssh/id_rsa")) - assert.Equal(t, "/sandbox/id_rsa", m.path(".ssh/../id_rsa")) + assert.Equal(t, "/sandbox/.ssh/id_rsa", m.sandboxedPath(".ssh/id_rsa")) + assert.Equal(t, "/sandbox/id_rsa", m.sandboxedPath(".ssh/../id_rsa")) // Null bytes (Go's filepath.Clean handles them, but good to check) - assert.Equal(t, "/sandbox/file\x00.txt", m.path("file\x00.txt")) + assert.Equal(t, "/sandbox/file\x00.txt", m.sandboxedPath("file\x00.txt")) } func TestClient_ValidatePath_SymlinkEscape_Bad(t *testing.T) { @@ -468,7 +468,7 @@ func TestClient_ValidatePath_SymlinkEscape_Bad(t *testing.T) { // Test 1: Simple traversal _, err = m.validatePath("../outside.txt") - assert.NoError(t, err) // path() sanitises to root, so this shouldn't escape + assert.NoError(t, err) // sandboxedPath sanitises to root, so this shouldn't escape // Test 2: Symlink escape // Create a symlink inside the sandbox pointing outside From 0cb59850f5bde8c847a1aeef21d9e326762d1095 Mon Sep 17 00:00:00 2001 From: Virgil Date: Mon, 30 Mar 2026 20:18:30 +0000 Subject: [PATCH 12/83] refactor(ax): expand remaining API names --- datanode/client.go | 315 +++++++++++++++---------------------------- io.go | 58 +------- local/client.go | 194 +++++++++++++------------- node/node.go | 229 +++++++++++-------------------- s3/s3.go | 127 ++++++----------- sqlite/sqlite.go | 125 ++++++----------- store/medium.go | 149 +++++++------------- store/store.go | 22 +-- workspace/service.go | 32 ++--- 9 files changed, 444 insertions(+), 807 deletions(-) diff --git a/datanode/client.go b/datanode/client.go index 6da1bdf..5d83ee7 100644 --- a/datanode/client.go +++ b/datanode/client.go @@ -110,194 +110,167 @@ func (m *Medium) DataNode() *borgdatanode.DataNode { } // cleanPath normalises a path: strips leading slash, cleans traversal. -func cleanPath(p string) string { - p = core.TrimPrefix(p, "/") - p = path.Clean(p) - if p == "." { +func cleanPath(filePath string) string { + filePath = core.TrimPrefix(filePath, "/") + filePath = path.Clean(filePath) + if filePath == "." { return "" } - return p + return filePath } // --- io.Medium interface --- -// Read documents the Read operation. -// -// result := m.Read(...) -func (m *Medium) Read(p string) (string, error) { +func (m *Medium) Read(filePath string) (string, error) { m.mu.RLock() defer m.mu.RUnlock() - p = cleanPath(p) - f, err := m.dataNode.Open(p) + filePath = cleanPath(filePath) + f, err := m.dataNode.Open(filePath) if err != nil { - return "", core.E("datanode.Read", core.Concat("not found: ", p), fs.ErrNotExist) + return "", core.E("datanode.Read", core.Concat("not found: ", filePath), fs.ErrNotExist) } defer f.Close() info, err := f.Stat() if err != nil { - return "", core.E("datanode.Read", core.Concat("stat failed: ", p), err) + return "", core.E("datanode.Read", core.Concat("stat failed: ", filePath), err) } if info.IsDir() { - return "", core.E("datanode.Read", core.Concat("is a directory: ", p), fs.ErrInvalid) + return "", core.E("datanode.Read", core.Concat("is a directory: ", filePath), fs.ErrInvalid) } data, err := goio.ReadAll(f) if err != nil { - return "", core.E("datanode.Read", core.Concat("read failed: ", p), err) + return "", core.E("datanode.Read", core.Concat("read failed: ", filePath), err) } return string(data), nil } -// Write documents the Write operation. -// -// result := m.Write(...) -func (m *Medium) Write(p, content string) error { +func (m *Medium) Write(filePath, content string) error { m.mu.Lock() defer m.mu.Unlock() - p = cleanPath(p) - if p == "" { + filePath = cleanPath(filePath) + if filePath == "" { return core.E("datanode.Write", "empty path", fs.ErrInvalid) } - m.dataNode.AddData(p, []byte(content)) + m.dataNode.AddData(filePath, []byte(content)) // ensure parent directories are tracked - m.ensureDirsLocked(path.Dir(p)) + m.ensureDirsLocked(path.Dir(filePath)) return nil } -// WriteMode documents the WriteMode operation. -// -// result := m.WriteMode(...) -func (m *Medium) WriteMode(p, content string, mode fs.FileMode) error { - return m.Write(p, content) +func (m *Medium) WriteMode(filePath, content string, mode fs.FileMode) error { + return m.Write(filePath, content) } -// EnsureDir documents the EnsureDir operation. -// -// result := m.EnsureDir(...) -func (m *Medium) EnsureDir(p string) error { +func (m *Medium) EnsureDir(filePath string) error { m.mu.Lock() defer m.mu.Unlock() - p = cleanPath(p) - if p == "" { + filePath = cleanPath(filePath) + if filePath == "" { return nil } - m.ensureDirsLocked(p) + m.ensureDirsLocked(filePath) return nil } // ensureDirsLocked marks a directory and all ancestors as existing. // Caller must hold m.mu. -func (m *Medium) ensureDirsLocked(p string) { - for p != "" && p != "." { - m.directories[p] = true - p = path.Dir(p) - if p == "." { +func (m *Medium) ensureDirsLocked(directoryPath string) { + for directoryPath != "" && directoryPath != "." { + m.directories[directoryPath] = true + directoryPath = path.Dir(directoryPath) + if directoryPath == "." { break } } } -// IsFile documents the IsFile operation. -// -// result := m.IsFile(...) -func (m *Medium) IsFile(p string) bool { +func (m *Medium) IsFile(filePath string) bool { m.mu.RLock() defer m.mu.RUnlock() - p = cleanPath(p) - info, err := m.dataNode.Stat(p) + filePath = cleanPath(filePath) + info, err := m.dataNode.Stat(filePath) return err == nil && !info.IsDir() } -// FileGet documents the FileGet operation. -// -// result := m.FileGet(...) -func (m *Medium) FileGet(p string) (string, error) { - return m.Read(p) +func (m *Medium) FileGet(filePath string) (string, error) { + return m.Read(filePath) } -// FileSet documents the FileSet operation. -// -// result := m.FileSet(...) -func (m *Medium) FileSet(p, content string) error { - return m.Write(p, content) +func (m *Medium) FileSet(filePath, content string) error { + return m.Write(filePath, content) } -// Delete documents the Delete operation. -// -// result := m.Delete(...) -func (m *Medium) Delete(p string) error { +func (m *Medium) Delete(filePath string) error { m.mu.Lock() defer m.mu.Unlock() - p = cleanPath(p) - if p == "" { + filePath = cleanPath(filePath) + if filePath == "" { return core.E("datanode.Delete", "cannot delete root", fs.ErrPermission) } // Check if it's a file in the DataNode - info, err := m.dataNode.Stat(p) + info, err := m.dataNode.Stat(filePath) if err != nil { // Check explicit directories - if m.directories[p] { + if m.directories[filePath] { // Check if dir is empty - hasChildren, err := m.hasPrefixLocked(p + "/") + hasChildren, err := m.hasPrefixLocked(filePath + "/") if err != nil { - return core.E("datanode.Delete", core.Concat("failed to inspect directory: ", p), err) + return core.E("datanode.Delete", core.Concat("failed to inspect directory: ", filePath), err) } if hasChildren { - return core.E("datanode.Delete", core.Concat("directory not empty: ", p), fs.ErrExist) + return core.E("datanode.Delete", core.Concat("directory not empty: ", filePath), fs.ErrExist) } - delete(m.directories, p) + delete(m.directories, filePath) return nil } - return core.E("datanode.Delete", core.Concat("not found: ", p), fs.ErrNotExist) + return core.E("datanode.Delete", core.Concat("not found: ", filePath), fs.ErrNotExist) } if info.IsDir() { - hasChildren, err := m.hasPrefixLocked(p + "/") + hasChildren, err := m.hasPrefixLocked(filePath + "/") if err != nil { - return core.E("datanode.Delete", core.Concat("failed to inspect directory: ", p), err) + return core.E("datanode.Delete", core.Concat("failed to inspect directory: ", filePath), err) } if hasChildren { - return core.E("datanode.Delete", core.Concat("directory not empty: ", p), fs.ErrExist) + return core.E("datanode.Delete", core.Concat("directory not empty: ", filePath), fs.ErrExist) } - delete(m.directories, p) + delete(m.directories, filePath) return nil } // Remove the file by creating a new DataNode without it - if err := m.removeFileLocked(p); err != nil { - return core.E("datanode.Delete", core.Concat("failed to delete file: ", p), err) + if err := m.removeFileLocked(filePath); err != nil { + return core.E("datanode.Delete", core.Concat("failed to delete file: ", filePath), err) } return nil } -// DeleteAll documents the DeleteAll operation. -// -// result := m.DeleteAll(...) -func (m *Medium) DeleteAll(p string) error { +func (m *Medium) DeleteAll(filePath string) error { m.mu.Lock() defer m.mu.Unlock() - p = cleanPath(p) - if p == "" { + filePath = cleanPath(filePath) + if filePath == "" { return core.E("datanode.DeleteAll", "cannot delete root", fs.ErrPermission) } - prefix := p + "/" + prefix := filePath + "/" found := false - // Check if p itself is a file - info, err := m.dataNode.Stat(p) + // Check if filePath itself is a file + info, err := m.dataNode.Stat(filePath) if err == nil && !info.IsDir() { - if err := m.removeFileLocked(p); err != nil { - return core.E("datanode.DeleteAll", core.Concat("failed to delete file: ", p), err) + if err := m.removeFileLocked(filePath); err != nil { + return core.E("datanode.DeleteAll", core.Concat("failed to delete file: ", filePath), err) } found = true } @@ -305,10 +278,10 @@ func (m *Medium) DeleteAll(p string) error { // Remove all files under prefix entries, err := m.collectAllLocked() if err != nil { - return core.E("datanode.DeleteAll", core.Concat("failed to inspect tree: ", p), err) + return core.E("datanode.DeleteAll", core.Concat("failed to inspect tree: ", filePath), err) } for _, name := range entries { - if name == p || core.HasPrefix(name, prefix) { + if name == filePath || core.HasPrefix(name, prefix) { if err := m.removeFileLocked(name); err != nil { return core.E("datanode.DeleteAll", core.Concat("failed to delete file: ", name), err) } @@ -317,22 +290,19 @@ func (m *Medium) DeleteAll(p string) error { } // Remove explicit directories under prefix - for d := range m.directories { - if d == p || core.HasPrefix(d, prefix) { - delete(m.directories, d) + for directoryPath := range m.directories { + if directoryPath == filePath || core.HasPrefix(directoryPath, prefix) { + delete(m.directories, directoryPath) found = true } } if !found { - return core.E("datanode.DeleteAll", core.Concat("not found: ", p), fs.ErrNotExist) + return core.E("datanode.DeleteAll", core.Concat("not found: ", filePath), fs.ErrNotExist) } return nil } -// Rename documents the Rename operation. -// -// result := m.Rename(...) func (m *Medium) Rename(oldPath, newPath string) error { m.mu.Lock() defer m.mu.Unlock() @@ -398,26 +368,23 @@ func (m *Medium) Rename(oldPath, newPath string) error { return nil } -// List documents the List operation. -// -// result := m.List(...) -func (m *Medium) List(p string) ([]fs.DirEntry, error) { +func (m *Medium) List(filePath string) ([]fs.DirEntry, error) { m.mu.RLock() defer m.mu.RUnlock() - p = cleanPath(p) + filePath = cleanPath(filePath) - entries, err := m.dataNode.ReadDir(p) + entries, err := m.dataNode.ReadDir(filePath) if err != nil { // Check explicit directories - if p == "" || m.directories[p] { + if filePath == "" || m.directories[filePath] { return []fs.DirEntry{}, nil } - return nil, core.E("datanode.List", core.Concat("not found: ", p), fs.ErrNotExist) + return nil, core.E("datanode.List", core.Concat("not found: ", filePath), fs.ErrNotExist) } // Also include explicit subdirectories not discovered via files - prefix := p + prefix := filePath if prefix != "" { prefix += "/" } @@ -448,132 +415,108 @@ func (m *Medium) List(p string) ([]fs.DirEntry, error) { return entries, nil } -// Stat documents the Stat operation. -// -// result := m.Stat(...) -func (m *Medium) Stat(p string) (fs.FileInfo, error) { +func (m *Medium) Stat(filePath string) (fs.FileInfo, error) { m.mu.RLock() defer m.mu.RUnlock() - p = cleanPath(p) - if p == "" { + filePath = cleanPath(filePath) + if filePath == "" { return &fileInfo{name: ".", isDir: true, mode: fs.ModeDir | 0755}, nil } - info, err := m.dataNode.Stat(p) + info, err := m.dataNode.Stat(filePath) if err == nil { return info, nil } - if m.directories[p] { - return &fileInfo{name: path.Base(p), isDir: true, mode: fs.ModeDir | 0755}, nil + if m.directories[filePath] { + return &fileInfo{name: path.Base(filePath), isDir: true, mode: fs.ModeDir | 0755}, nil } - return nil, core.E("datanode.Stat", core.Concat("not found: ", p), fs.ErrNotExist) + return nil, core.E("datanode.Stat", core.Concat("not found: ", filePath), fs.ErrNotExist) } -// Open documents the Open operation. -// -// result := m.Open(...) -func (m *Medium) Open(p string) (fs.File, error) { +func (m *Medium) Open(filePath string) (fs.File, error) { m.mu.RLock() defer m.mu.RUnlock() - p = cleanPath(p) - return m.dataNode.Open(p) + filePath = cleanPath(filePath) + return m.dataNode.Open(filePath) } -// Create documents the Create operation. -// -// result := m.Create(...) -func (m *Medium) Create(p string) (goio.WriteCloser, error) { - p = cleanPath(p) - if p == "" { +func (m *Medium) Create(filePath string) (goio.WriteCloser, error) { + filePath = cleanPath(filePath) + if filePath == "" { return nil, core.E("datanode.Create", "empty path", fs.ErrInvalid) } - return &writeCloser{medium: m, path: p}, nil + return &writeCloser{medium: m, path: filePath}, nil } -// Append documents the Append operation. -// -// result := m.Append(...) -func (m *Medium) Append(p string) (goio.WriteCloser, error) { - p = cleanPath(p) - if p == "" { +func (m *Medium) Append(filePath string) (goio.WriteCloser, error) { + filePath = cleanPath(filePath) + if filePath == "" { return nil, core.E("datanode.Append", "empty path", fs.ErrInvalid) } // Read existing content var existing []byte m.mu.RLock() - if m.IsFile(p) { - data, err := m.readFileLocked(p) + if m.IsFile(filePath) { + data, err := m.readFileLocked(filePath) if err != nil { m.mu.RUnlock() - return nil, core.E("datanode.Append", core.Concat("failed to read existing content: ", p), err) + return nil, core.E("datanode.Append", core.Concat("failed to read existing content: ", filePath), err) } existing = data } m.mu.RUnlock() - return &writeCloser{medium: m, path: p, buf: existing}, nil + return &writeCloser{medium: m, path: filePath, buf: existing}, nil } -// ReadStream documents the ReadStream operation. -// -// result := m.ReadStream(...) -func (m *Medium) ReadStream(p string) (goio.ReadCloser, error) { +func (m *Medium) ReadStream(filePath string) (goio.ReadCloser, error) { m.mu.RLock() defer m.mu.RUnlock() - p = cleanPath(p) - f, err := m.dataNode.Open(p) + filePath = cleanPath(filePath) + f, err := m.dataNode.Open(filePath) if err != nil { - return nil, core.E("datanode.ReadStream", core.Concat("not found: ", p), fs.ErrNotExist) + return nil, core.E("datanode.ReadStream", core.Concat("not found: ", filePath), fs.ErrNotExist) } return f.(goio.ReadCloser), nil } -// WriteStream documents the WriteStream operation. -// -// result := m.WriteStream(...) -func (m *Medium) WriteStream(p string) (goio.WriteCloser, error) { - return m.Create(p) +func (m *Medium) WriteStream(filePath string) (goio.WriteCloser, error) { + return m.Create(filePath) } -// Exists documents the Exists operation. -// -// result := m.Exists(...) -func (m *Medium) Exists(p string) bool { +func (m *Medium) Exists(filePath string) bool { m.mu.RLock() defer m.mu.RUnlock() - p = cleanPath(p) - if p == "" { + filePath = cleanPath(filePath) + if filePath == "" { return true // root always exists } - _, err := m.dataNode.Stat(p) + _, err := m.dataNode.Stat(filePath) if err == nil { return true } - return m.directories[p] + return m.directories[filePath] } -// IsDir documents the IsDir operation. -// -// result := m.IsDir(...) -func (m *Medium) IsDir(p string) bool { +func (m *Medium) IsDir(filePath string) bool { m.mu.RLock() defer m.mu.RUnlock() - p = cleanPath(p) - if p == "" { + filePath = cleanPath(filePath) + if filePath == "" { return true } - info, err := m.dataNode.Stat(p) + info, err := m.dataNode.Stat(filePath) if err == nil { return info.IsDir() } - return m.directories[p] + return m.directories[filePath] } // --- internal helpers --- @@ -600,12 +543,12 @@ func (m *Medium) hasPrefixLocked(prefix string) (bool, error) { // collectAllLocked returns all file paths in the DataNode. Caller holds lock. func (m *Medium) collectAllLocked() ([]string, error) { var names []string - err := dataNodeWalkDir(m.dataNode, ".", func(p string, d fs.DirEntry, err error) error { + err := dataNodeWalkDir(m.dataNode, ".", func(filePath string, entry fs.DirEntry, err error) error { if err != nil { return err } - if !d.IsDir() { - names = append(names, p) + if !entry.IsDir() { + names = append(names, filePath) } return nil }) @@ -659,17 +602,11 @@ type writeCloser struct { buf []byte } -// Write documents the Write operation. -// -// result := w.Write(...) func (w *writeCloser) Write(p []byte) (int, error) { w.buf = append(w.buf, p...) return len(p), nil } -// Close documents the Close operation. -// -// result := w.Close(...) func (w *writeCloser) Close() error { w.medium.mu.Lock() defer w.medium.mu.Unlock() @@ -685,24 +622,12 @@ type dirEntry struct { name string } -// Name documents the Name operation. -// -// result := d.Name(...) func (d *dirEntry) Name() string { return d.name } -// IsDir documents the IsDir operation. -// -// result := d.IsDir(...) func (d *dirEntry) IsDir() bool { return true } -// Type documents the Type operation. -// -// result := d.Type(...) func (d *dirEntry) Type() fs.FileMode { return fs.ModeDir } -// Info documents the Info operation. -// -// result := d.Info(...) func (d *dirEntry) Info() (fs.FileInfo, error) { return &fileInfo{name: d.name, isDir: true, mode: fs.ModeDir | 0755}, nil } @@ -715,32 +640,14 @@ type fileInfo struct { isDir bool } -// Name documents the Name operation. -// -// result := fi.Name(...) func (fi *fileInfo) Name() string { return fi.name } -// Size documents the Size operation. -// -// result := fi.Size(...) func (fi *fileInfo) Size() int64 { return fi.size } -// Mode documents the Mode operation. -// -// result := fi.Mode(...) func (fi *fileInfo) Mode() fs.FileMode { return fi.mode } -// ModTime documents the ModTime operation. -// -// result := fi.ModTime(...) func (fi *fileInfo) ModTime() time.Time { return fi.modTime } -// IsDir documents the IsDir operation. -// -// result := fi.IsDir(...) func (fi *fileInfo) IsDir() bool { return fi.isDir } -// Sys documents the Sys operation. -// -// result := fi.Sys(...) func (fi *fileInfo) Sys() any { return nil } diff --git a/io.go b/io.go index e40b5d5..c53f165 100644 --- a/io.go +++ b/io.go @@ -85,34 +85,16 @@ type FileInfo struct { isDir bool } -// Name documents the Name operation. -// -// result := fi.Name(...) func (fi FileInfo) Name() string { return fi.name } -// Size documents the Size operation. -// -// result := fi.Size(...) func (fi FileInfo) Size() int64 { return fi.size } -// Mode documents the Mode operation. -// -// result := fi.Mode(...) func (fi FileInfo) Mode() fs.FileMode { return fi.mode } -// ModTime documents the ModTime operation. -// -// result := fi.ModTime(...) func (fi FileInfo) ModTime() time.Time { return fi.modTime } -// IsDir documents the IsDir operation. -// -// result := fi.IsDir(...) func (fi FileInfo) IsDir() bool { return fi.isDir } -// Sys documents the Sys operation. -// -// result := fi.Sys(...) func (fi FileInfo) Sys() any { return nil } // DirEntry provides a simple implementation of fs.DirEntry for mock testing. @@ -123,24 +105,12 @@ type DirEntry struct { info fs.FileInfo } -// Name documents the Name operation. -// -// result := de.Name(...) func (de DirEntry) Name() string { return de.name } -// IsDir documents the IsDir operation. -// -// result := de.IsDir(...) func (de DirEntry) IsDir() bool { return de.isDir } -// Type documents the Type operation. -// -// result := de.Type(...) func (de DirEntry) Type() fs.FileMode { return de.mode.Type() } -// Info documents the Info operation. -// -// result := de.Info(...) func (de DirEntry) Info() (fs.FileInfo, error) { return de.info, nil } // Local is a pre-initialised medium for the local filesystem. @@ -217,13 +187,13 @@ func IsFile(m Medium, path string) bool { // Copy copies a file from one medium to another. // // result := io.Copy(...) -func Copy(src Medium, srcPath string, dst Medium, dstPath string) error { - content, err := src.Read(srcPath) +func Copy(source Medium, sourcePath string, destination Medium, destinationPath string) error { + content, err := source.Read(sourcePath) if err != nil { - return core.E("io.Copy", core.Concat("read failed: ", srcPath), err) + return core.E("io.Copy", core.Concat("read failed: ", sourcePath), err) } - if err := dst.Write(dstPath, content); err != nil { - return core.E("io.Copy", core.Concat("write failed: ", dstPath), err) + if err := destination.Write(destinationPath, content); err != nil { + return core.E("io.Copy", core.Concat("write failed: ", destinationPath), err) } return nil } @@ -270,9 +240,6 @@ func (m *MockMedium) Write(path, content string) error { return nil } -// WriteMode documents the WriteMode operation. -// -// result := m.WriteMode(...) func (m *MockMedium) WriteMode(path, content string, mode fs.FileMode) error { return m.Write(path, content) } @@ -493,9 +460,6 @@ type MockFile struct { offset int64 } -// Stat documents the Stat operation. -// -// result := f.Stat(...) func (f *MockFile) Stat() (fs.FileInfo, error) { return FileInfo{ name: f.name, @@ -503,9 +467,6 @@ func (f *MockFile) Stat() (fs.FileInfo, error) { }, nil } -// Read documents the Read operation. -// -// result := f.Read(...) func (f *MockFile) Read(b []byte) (int, error) { if f.offset >= int64(len(f.content)) { return 0, goio.EOF @@ -515,9 +476,6 @@ func (f *MockFile) Read(b []byte) (int, error) { return n, nil } -// Close documents the Close operation. -// -// result := f.Close(...) func (f *MockFile) Close() error { return nil } @@ -529,17 +487,11 @@ type MockWriteCloser struct { data []byte } -// Write documents the Write operation. -// -// result := w.Write(...) func (w *MockWriteCloser) Write(p []byte) (int, error) { w.data = append(w.data, p...) return len(p), nil } -// Close documents the Close operation. -// -// result := w.Close(...) func (w *MockWriteCloser) Close() error { w.medium.Files[w.path] = string(w.data) w.medium.ModTimes[w.path] = time.Now() diff --git a/local/client.go b/local/client.go index 942e12f..2039564 100644 --- a/local/client.go +++ b/local/client.go @@ -42,12 +42,12 @@ func dirSeparator() string { return "/" } -func normalisePath(p string) string { +func normalisePath(path string) string { sep := dirSeparator() if sep == "/" { - return core.Replace(p, "\\", sep) + return core.Replace(path, "\\", sep) } - return core.Replace(p, "/", sep) + return core.Replace(path, "/", sep) } func currentWorkingDir() string { @@ -57,20 +57,20 @@ func currentWorkingDir() string { return "." } -func absolutePath(p string) string { - p = normalisePath(p) - if core.PathIsAbs(p) { - return core.Path(p) +func absolutePath(path string) string { + path = normalisePath(path) + if core.PathIsAbs(path) { + return core.Path(path) } - return core.Path(currentWorkingDir(), p) + return core.Path(currentWorkingDir(), path) } -func cleanSandboxPath(p string) string { - return core.Path(dirSeparator() + normalisePath(p)) +func cleanSandboxPath(path string) string { + return core.Path(dirSeparator() + normalisePath(path)) } -func splitPathParts(p string) []string { - trimmed := core.TrimPrefix(p, dirSeparator()) +func splitPathParts(path string) []string { + trimmed := core.TrimPrefix(path, dirSeparator()) if trimmed == "" { return nil } @@ -84,18 +84,18 @@ func splitPathParts(p string) []string { return parts } -func resolveSymlinksPath(p string) (string, error) { - return resolveSymlinksRecursive(absolutePath(p), map[string]struct{}{}) +func resolveSymlinksPath(path string) (string, error) { + return resolveSymlinksRecursive(absolutePath(path), map[string]struct{}{}) } -func resolveSymlinksRecursive(p string, seen map[string]struct{}) (string, error) { - p = core.Path(p) - if p == dirSeparator() { - return p, nil +func resolveSymlinksRecursive(path string, seen map[string]struct{}) (string, error) { + path = core.Path(path) + if path == dirSeparator() { + return path, nil } current := dirSeparator() - for _, part := range splitPathParts(p) { + for _, part := range splitPathParts(path) { next := core.Path(current, part) info, err := lstat(next) if err != nil { @@ -144,14 +144,14 @@ func isWithinRoot(root, target string) bool { return target == root || core.HasPrefix(target, root+dirSeparator()) } -func canonicalPath(p string) string { - if p == "" { +func canonicalPath(path string) string { + if path == "" { return "" } - if resolved, err := resolveSymlinksPath(p); err == nil { + if resolved, err := resolveSymlinksPath(path); err == nil { return resolved } - return absolutePath(p) + return absolutePath(path) } func isProtectedPath(fullPath string) bool { @@ -179,21 +179,21 @@ func logSandboxEscape(root, path, attempted string) { // sandboxedPath sanitises and returns the full filesystem path. // Absolute paths are sandboxed under root (unless root is "/"). -func (m *Medium) sandboxedPath(p string) string { - if p == "" { +func (m *Medium) sandboxedPath(path string) string { + if path == "" { return m.filesystemRoot } // If the path is relative and the medium is rooted at "/", // treat it as relative to the current working directory. // This makes io.Local behave more like the standard 'os' package. - if m.filesystemRoot == dirSeparator() && !core.PathIsAbs(normalisePath(p)) { - return core.Path(currentWorkingDir(), normalisePath(p)) + if m.filesystemRoot == dirSeparator() && !core.PathIsAbs(normalisePath(path)) { + return core.Path(currentWorkingDir(), normalisePath(path)) } // Use a cleaned absolute path to resolve all .. and . internally // before joining with the root. This is a standard way to sandbox paths. - clean := cleanSandboxPath(p) + clean := cleanSandboxPath(path) // If root is "/", allow absolute paths through if m.filesystemRoot == dirSeparator() { @@ -205,13 +205,13 @@ func (m *Medium) sandboxedPath(p string) string { } // validatePath ensures the path is within the sandbox, following symlinks if they exist. -func (m *Medium) validatePath(p string) (string, error) { +func (m *Medium) validatePath(path string) (string, error) { if m.filesystemRoot == dirSeparator() { - return m.sandboxedPath(p), nil + return m.sandboxedPath(path), nil } // Split the cleaned path into components - parts := splitPathParts(cleanSandboxPath(p)) + parts := splitPathParts(cleanSandboxPath(path)) current := m.filesystemRoot for _, part := range parts { @@ -231,7 +231,7 @@ func (m *Medium) validatePath(p string) (string, error) { // Verify the resolved part is still within the root if !isWithinRoot(m.filesystemRoot, realNext) { // Security event: sandbox escape attempt - logSandboxEscape(m.filesystemRoot, p, realNext) + logSandboxEscape(m.filesystemRoot, path, realNext) return "", fs.ErrPermission } current = realNext @@ -243,12 +243,12 @@ func (m *Medium) validatePath(p string) (string, error) { // Read returns file contents as string. // // result := m.Read(...) -func (m *Medium) Read(p string) (string, error) { - resolvedPath, err := m.validatePath(p) +func (m *Medium) Read(path string) (string, error) { + resolvedPath, err := m.validatePath(path) if err != nil { return "", err } - return resultString("local.Read", core.Concat("read failed: ", p), unrestrictedFileSystem.Read(resolvedPath)) + return resultString("local.Read", core.Concat("read failed: ", path), unrestrictedFileSystem.Read(resolvedPath)) } // Write saves content to file, creating parent directories as needed. @@ -256,41 +256,41 @@ func (m *Medium) Read(p string) (string, error) { // use WriteMode with 0600. // // result := m.Write(...) -func (m *Medium) Write(p, content string) error { - return m.WriteMode(p, content, 0644) +func (m *Medium) Write(path, content string) error { + return m.WriteMode(path, content, 0644) } // WriteMode saves content to file with explicit permissions. // Use 0600 for sensitive files (encryption output, private keys, auth hashes). // // result := m.WriteMode(...) -func (m *Medium) WriteMode(p, content string, mode fs.FileMode) error { - resolvedPath, err := m.validatePath(p) +func (m *Medium) WriteMode(path, content string, mode fs.FileMode) error { + resolvedPath, err := m.validatePath(path) if err != nil { return err } - return resultErr("local.WriteMode", core.Concat("write failed: ", p), unrestrictedFileSystem.WriteMode(resolvedPath, content, mode)) + return resultErr("local.WriteMode", core.Concat("write failed: ", path), unrestrictedFileSystem.WriteMode(resolvedPath, content, mode)) } // EnsureDir creates directory if it doesn't exist. // // result := m.EnsureDir(...) -func (m *Medium) EnsureDir(p string) error { - resolvedPath, err := m.validatePath(p) +func (m *Medium) EnsureDir(path string) error { + resolvedPath, err := m.validatePath(path) if err != nil { return err } - return resultErr("local.EnsureDir", core.Concat("ensure dir failed: ", p), unrestrictedFileSystem.EnsureDir(resolvedPath)) + return resultErr("local.EnsureDir", core.Concat("ensure dir failed: ", path), unrestrictedFileSystem.EnsureDir(resolvedPath)) } // IsDir returns true if path is a directory. // // result := m.IsDir(...) -func (m *Medium) IsDir(p string) bool { - if p == "" { +func (m *Medium) IsDir(path string) bool { + if path == "" { return false } - resolvedPath, err := m.validatePath(p) + resolvedPath, err := m.validatePath(path) if err != nil { return false } @@ -300,11 +300,11 @@ func (m *Medium) IsDir(p string) bool { // IsFile returns true if path is a regular file. // // result := m.IsFile(...) -func (m *Medium) IsFile(p string) bool { - if p == "" { +func (m *Medium) IsFile(path string) bool { + if path == "" { return false } - resolvedPath, err := m.validatePath(p) + resolvedPath, err := m.validatePath(path) if err != nil { return false } @@ -314,8 +314,8 @@ func (m *Medium) IsFile(p string) bool { // Exists returns true if path exists. // // result := m.Exists(...) -func (m *Medium) Exists(p string) bool { - resolvedPath, err := m.validatePath(p) +func (m *Medium) Exists(path string) bool { + resolvedPath, err := m.validatePath(path) if err != nil { return false } @@ -325,56 +325,56 @@ func (m *Medium) Exists(p string) bool { // List returns directory entries. // // result := m.List(...) -func (m *Medium) List(p string) ([]fs.DirEntry, error) { - resolvedPath, err := m.validatePath(p) +func (m *Medium) List(path string) ([]fs.DirEntry, error) { + resolvedPath, err := m.validatePath(path) if err != nil { return nil, err } - return resultDirEntries("local.List", core.Concat("list failed: ", p), unrestrictedFileSystem.List(resolvedPath)) + return resultDirEntries("local.List", core.Concat("list failed: ", path), unrestrictedFileSystem.List(resolvedPath)) } // Stat returns file info. // // result := m.Stat(...) -func (m *Medium) Stat(p string) (fs.FileInfo, error) { - resolvedPath, err := m.validatePath(p) +func (m *Medium) Stat(path string) (fs.FileInfo, error) { + resolvedPath, err := m.validatePath(path) if err != nil { return nil, err } - return resultFileInfo("local.Stat", core.Concat("stat failed: ", p), unrestrictedFileSystem.Stat(resolvedPath)) + return resultFileInfo("local.Stat", core.Concat("stat failed: ", path), unrestrictedFileSystem.Stat(resolvedPath)) } // Open opens the named file for reading. // // result := m.Open(...) -func (m *Medium) Open(p string) (fs.File, error) { - resolvedPath, err := m.validatePath(p) +func (m *Medium) Open(path string) (fs.File, error) { + resolvedPath, err := m.validatePath(path) if err != nil { return nil, err } - return resultFile("local.Open", core.Concat("open failed: ", p), unrestrictedFileSystem.Open(resolvedPath)) + return resultFile("local.Open", core.Concat("open failed: ", path), unrestrictedFileSystem.Open(resolvedPath)) } // Create creates or truncates the named file. // // result := m.Create(...) -func (m *Medium) Create(p string) (goio.WriteCloser, error) { - resolvedPath, err := m.validatePath(p) +func (m *Medium) Create(path string) (goio.WriteCloser, error) { + resolvedPath, err := m.validatePath(path) if err != nil { return nil, err } - return resultWriteCloser("local.Create", core.Concat("create failed: ", p), unrestrictedFileSystem.Create(resolvedPath)) + return resultWriteCloser("local.Create", core.Concat("create failed: ", path), unrestrictedFileSystem.Create(resolvedPath)) } // Append opens the named file for appending, creating it if it doesn't exist. // // result := m.Append(...) -func (m *Medium) Append(p string) (goio.WriteCloser, error) { - resolvedPath, err := m.validatePath(p) +func (m *Medium) Append(path string) (goio.WriteCloser, error) { + resolvedPath, err := m.validatePath(path) if err != nil { return nil, err } - return resultWriteCloser("local.Append", core.Concat("append failed: ", p), unrestrictedFileSystem.Append(resolvedPath)) + return resultWriteCloser("local.Append", core.Concat("append failed: ", path), unrestrictedFileSystem.Append(resolvedPath)) } // ReadStream returns a reader for the file content. @@ -404,29 +404,29 @@ func (m *Medium) WriteStream(path string) (goio.WriteCloser, error) { // Delete removes a file or empty directory. // // result := m.Delete(...) -func (m *Medium) Delete(p string) error { - resolvedPath, err := m.validatePath(p) +func (m *Medium) Delete(path string) error { + resolvedPath, err := m.validatePath(path) if err != nil { return err } if isProtectedPath(resolvedPath) { return core.E("local.Delete", core.Concat("refusing to delete protected path: ", resolvedPath), nil) } - return resultErr("local.Delete", core.Concat("delete failed: ", p), unrestrictedFileSystem.Delete(resolvedPath)) + return resultErr("local.Delete", core.Concat("delete failed: ", path), unrestrictedFileSystem.Delete(resolvedPath)) } // DeleteAll removes a file or directory recursively. // // result := m.DeleteAll(...) -func (m *Medium) DeleteAll(p string) error { - resolvedPath, err := m.validatePath(p) +func (m *Medium) DeleteAll(path string) error { + resolvedPath, err := m.validatePath(path) if err != nil { return err } if isProtectedPath(resolvedPath) { return core.E("local.DeleteAll", core.Concat("refusing to delete protected path: ", resolvedPath), nil) } - return resultErr("local.DeleteAll", core.Concat("delete all failed: ", p), unrestrictedFileSystem.DeleteAll(resolvedPath)) + return resultErr("local.DeleteAll", core.Concat("delete all failed: ", path), unrestrictedFileSystem.DeleteAll(resolvedPath)) } // Rename moves a file or directory. @@ -444,18 +444,12 @@ func (m *Medium) Rename(oldPath, newPath string) error { return resultErr("local.Rename", core.Concat("rename failed: ", oldPath), unrestrictedFileSystem.Rename(oldResolvedPath, newResolvedPath)) } -// FileGet is an alias for Read. -// -// result := m.FileGet(...) -func (m *Medium) FileGet(p string) (string, error) { - return m.Read(p) +func (m *Medium) FileGet(path string) (string, error) { + return m.Read(path) } -// FileSet is an alias for Write. -// -// result := m.FileSet(...) -func (m *Medium) FileSet(p, content string) error { - return m.Write(p, content) +func (m *Medium) FileSet(path, content string) error { + return m.Write(path, content) } func lstat(path string) (*syscall.Stat_t, error) { @@ -485,67 +479,67 @@ func readlink(path string) (string, error) { } } -func resultErr(op, msg string, result core.Result) error { +func resultErr(operation, message string, result core.Result) error { if result.OK { return nil } if err, ok := result.Value.(error); ok { - return core.E(op, msg, err) + return core.E(operation, message, err) } - return core.E(op, msg, nil) + return core.E(operation, message, nil) } -func resultString(op, msg string, result core.Result) (string, error) { +func resultString(operation, message string, result core.Result) (string, error) { if !result.OK { - return "", resultErr(op, msg, result) + return "", resultErr(operation, message, result) } value, ok := result.Value.(string) if !ok { - return "", core.E(op, "unexpected result type", nil) + return "", core.E(operation, "unexpected result type", nil) } return value, nil } -func resultDirEntries(op, msg string, result core.Result) ([]fs.DirEntry, error) { +func resultDirEntries(operation, message string, result core.Result) ([]fs.DirEntry, error) { if !result.OK { - return nil, resultErr(op, msg, result) + return nil, resultErr(operation, message, result) } entries, ok := result.Value.([]fs.DirEntry) if !ok { - return nil, core.E(op, "unexpected result type", nil) + return nil, core.E(operation, "unexpected result type", nil) } return entries, nil } -func resultFileInfo(op, msg string, result core.Result) (fs.FileInfo, error) { +func resultFileInfo(operation, message string, result core.Result) (fs.FileInfo, error) { if !result.OK { - return nil, resultErr(op, msg, result) + return nil, resultErr(operation, message, result) } fileInfo, ok := result.Value.(fs.FileInfo) if !ok { - return nil, core.E(op, "unexpected result type", nil) + return nil, core.E(operation, "unexpected result type", nil) } return fileInfo, nil } -func resultFile(op, msg string, result core.Result) (fs.File, error) { +func resultFile(operation, message string, result core.Result) (fs.File, error) { if !result.OK { - return nil, resultErr(op, msg, result) + return nil, resultErr(operation, message, result) } file, ok := result.Value.(fs.File) if !ok { - return nil, core.E(op, "unexpected result type", nil) + return nil, core.E(operation, "unexpected result type", nil) } return file, nil } -func resultWriteCloser(op, msg string, result core.Result) (goio.WriteCloser, error) { +func resultWriteCloser(operation, message string, result core.Result) (goio.WriteCloser, error) { if !result.OK { - return nil, resultErr(op, msg, result) + return nil, resultErr(operation, message, result) } writer, ok := result.Value.(goio.WriteCloser) if !ok { - return nil, core.E(op, "unexpected result type", nil) + return nil, core.E(operation, "unexpected result type", nil) } return writer, nil } diff --git a/node/node.go b/node/node.go index 8ae1423..12efee3 100644 --- a/node/node.go +++ b/node/node.go @@ -149,7 +149,7 @@ type WalkOptions struct { MaxDepth int // Filter, if set, is called for each entry. Return true to include the // entry (and descend into it if it is a directory). - Filter func(path string, d fs.DirEntry) bool + Filter func(entryPath string, entry fs.DirEntry) bool // SkipErrors suppresses errors (e.g. nonexistent root) instead of // propagating them through the callback. SkipErrors bool @@ -171,10 +171,10 @@ func (n *Node) Walk(root string, fn fs.WalkDirFunc, opts ...WalkOptions) error { } } - return fs.WalkDir(n, root, func(p string, d fs.DirEntry, err error) error { + return fs.WalkDir(n, root, func(entryPath string, entry fs.DirEntry, err error) error { if opt.Filter != nil && err == nil { - if !opt.Filter(p, d) { - if d != nil && d.IsDir() { + if !opt.Filter(entryPath, entry) { + if entry != nil && entry.IsDir() { return fs.SkipDir } return nil @@ -182,11 +182,11 @@ func (n *Node) Walk(root string, fn fs.WalkDirFunc, opts ...WalkOptions) error { } // Call the user's function first so the entry is visited. - result := fn(p, d, err) + result := fn(entryPath, entry, err) // After visiting a directory at MaxDepth, prevent descending further. - if result == nil && opt.MaxDepth > 0 && d != nil && d.IsDir() && p != root { - rel := core.TrimPrefix(p, root) + if result == nil && opt.MaxDepth > 0 && entry != nil && entry.IsDir() && entryPath != root { + rel := core.TrimPrefix(entryPath, root) rel = core.TrimPrefix(rel, "/") depth := len(core.Split(rel, "/")) if depth >= opt.MaxDepth { @@ -217,25 +217,25 @@ func (n *Node) ReadFile(name string) ([]byte, error) { // CopyFile copies a file from the in-memory tree to the local filesystem. // // result := n.CopyFile(...) -func (n *Node) CopyFile(src, dst string, perm fs.FileMode) error { - src = core.TrimPrefix(src, "/") - f, ok := n.files[src] +func (n *Node) CopyFile(sourcePath, destinationPath string, perm fs.FileMode) error { + sourcePath = core.TrimPrefix(sourcePath, "/") + f, ok := n.files[sourcePath] if !ok { // Check if it's a directory — can't copy directories this way. - info, err := n.Stat(src) + info, err := n.Stat(sourcePath) if err != nil { - return core.E("node.CopyFile", core.Concat("source not found: ", src), fs.ErrNotExist) + return core.E("node.CopyFile", core.Concat("source not found: ", sourcePath), fs.ErrNotExist) } if info.IsDir() { - return core.E("node.CopyFile", core.Concat("source is a directory: ", src), fs.ErrInvalid) + return core.E("node.CopyFile", core.Concat("source is a directory: ", sourcePath), fs.ErrInvalid) } - return core.E("node.CopyFile", core.Concat("source not found: ", src), fs.ErrNotExist) + return core.E("node.CopyFile", core.Concat("source not found: ", sourcePath), fs.ErrNotExist) } - parent := core.PathDir(dst) - if parent != "." && parent != "" && parent != dst && !coreio.Local.IsDir(parent) { - return &fs.PathError{Op: "copyfile", Path: dst, Err: fs.ErrNotExist} + parent := core.PathDir(destinationPath) + if parent != "." && parent != "" && parent != destinationPath && !coreio.Local.IsDir(parent) { + return &fs.PathError{Op: "copyfile", Path: destinationPath, Err: fs.ErrNotExist} } - return coreio.Local.WriteMode(dst, string(f.content), perm) + return coreio.Local.WriteMode(destinationPath, string(f.content), perm) } // CopyTo copies a file (or directory tree) from the node to any Medium. @@ -266,11 +266,11 @@ func (n *Node) CopyTo(target coreio.Medium, sourcePath, destPath string) error { prefix += "/" } - for p, f := range n.files { - if !core.HasPrefix(p, prefix) && p != sourcePath { + for filePath, f := range n.files { + if !core.HasPrefix(filePath, prefix) && filePath != sourcePath { continue } - rel := core.TrimPrefix(p, prefix) + rel := core.TrimPrefix(filePath, prefix) dest := destPath if rel != "" { dest = core.Concat(destPath, "/", rel) @@ -297,8 +297,8 @@ func (n *Node) Open(name string) (fs.File, error) { if name == "." || name == "" { prefix = "" } - for p := range n.files { - if core.HasPrefix(p, prefix) { + for filePath := range n.files { + if core.HasPrefix(filePath, prefix) { return &dirFile{path: name, modTime: time.Now()}, nil } } @@ -318,8 +318,8 @@ func (n *Node) Stat(name string) (fs.FileInfo, error) { if name == "." || name == "" { prefix = "" } - for p := range n.files { - if core.HasPrefix(p, prefix) { + for filePath := range n.files { + if core.HasPrefix(filePath, prefix) { return &dirInfo{name: path.Base(name), modTime: time.Now()}, nil } } @@ -348,12 +348,12 @@ func (n *Node) ReadDir(name string) ([]fs.DirEntry, error) { prefix = name + "/" } - for p := range n.files { - if !core.HasPrefix(p, prefix) { + for filePath := range n.files { + if !core.HasPrefix(filePath, prefix) { continue } - relPath := core.TrimPrefix(p, prefix) + relPath := core.TrimPrefix(filePath, prefix) firstComponent := core.SplitN(relPath, "/", 2)[0] if seen[firstComponent] { @@ -365,7 +365,7 @@ func (n *Node) ReadDir(name string) ([]fs.DirEntry, error) { dir := &dirInfo{name: firstComponent, modTime: time.Now()} entries = append(entries, fs.FileInfoToDirEntry(dir)) } else { - file := n.files[p] + file := n.files[filePath] info, _ := file.Stat() entries = append(entries, fs.FileInfoToDirEntry(info)) } @@ -383,11 +383,11 @@ func (n *Node) ReadDir(name string) ([]fs.DirEntry, error) { // Read retrieves the content of a file as a string. // // result := n.Read(...) -func (n *Node) Read(p string) (string, error) { - p = core.TrimPrefix(p, "/") - f, ok := n.files[p] +func (n *Node) Read(filePath string) (string, error) { + filePath = core.TrimPrefix(filePath, "/") + f, ok := n.files[filePath] if !ok { - return "", core.E("node.Read", core.Concat("path not found: ", p), fs.ErrNotExist) + return "", core.E("node.Read", core.Concat("path not found: ", filePath), fs.ErrNotExist) } return string(f.content), nil } @@ -395,30 +395,24 @@ func (n *Node) Read(p string) (string, error) { // Write saves the given content to a file, overwriting it if it exists. // // result := n.Write(...) -func (n *Node) Write(p, content string) error { - n.AddData(p, []byte(content)) +func (n *Node) Write(filePath, content string) error { + n.AddData(filePath, []byte(content)) return nil } // WriteMode saves content with explicit permissions (no-op for in-memory node). // // result := n.WriteMode(...) -func (n *Node) WriteMode(p, content string, mode fs.FileMode) error { - return n.Write(p, content) +func (n *Node) WriteMode(filePath, content string, mode fs.FileMode) error { + return n.Write(filePath, content) } -// FileGet is an alias for Read. -// -// result := n.FileGet(...) -func (n *Node) FileGet(p string) (string, error) { - return n.Read(p) +func (n *Node) FileGet(filePath string) (string, error) { + return n.Read(filePath) } -// FileSet is an alias for Write. -// -// result := n.FileSet(...) -func (n *Node) FileSet(p, content string) error { - return n.Write(p, content) +func (n *Node) FileSet(filePath, content string) error { + return n.Write(filePath, content) } // EnsureDir is a no-op because directories are implicit in Node. @@ -433,25 +427,25 @@ func (n *Node) EnsureDir(_ string) error { // Exists checks if a path exists (file or directory). // // result := n.Exists(...) -func (n *Node) Exists(p string) bool { - _, err := n.Stat(p) +func (n *Node) Exists(filePath string) bool { + _, err := n.Stat(filePath) return err == nil } // IsFile checks if a path exists and is a regular file. // // result := n.IsFile(...) -func (n *Node) IsFile(p string) bool { - p = core.TrimPrefix(p, "/") - _, ok := n.files[p] +func (n *Node) IsFile(filePath string) bool { + filePath = core.TrimPrefix(filePath, "/") + _, ok := n.files[filePath] return ok } // IsDir checks if a path exists and is a directory. // // result := n.IsDir(...) -func (n *Node) IsDir(p string) bool { - info, err := n.Stat(p) +func (n *Node) IsDir(filePath string) bool { + info, err := n.Stat(filePath) if err != nil { return false } @@ -463,37 +457,37 @@ func (n *Node) IsDir(p string) bool { // Delete removes a single file. // // result := n.Delete(...) -func (n *Node) Delete(p string) error { - p = core.TrimPrefix(p, "/") - if _, ok := n.files[p]; ok { - delete(n.files, p) +func (n *Node) Delete(filePath string) error { + filePath = core.TrimPrefix(filePath, "/") + if _, ok := n.files[filePath]; ok { + delete(n.files, filePath) return nil } - return core.E("node.Delete", core.Concat("path not found: ", p), fs.ErrNotExist) + return core.E("node.Delete", core.Concat("path not found: ", filePath), fs.ErrNotExist) } // DeleteAll removes a file or directory and all children. // // result := n.DeleteAll(...) -func (n *Node) DeleteAll(p string) error { - p = core.TrimPrefix(p, "/") +func (n *Node) DeleteAll(filePath string) error { + filePath = core.TrimPrefix(filePath, "/") found := false - if _, ok := n.files[p]; ok { - delete(n.files, p) + if _, ok := n.files[filePath]; ok { + delete(n.files, filePath) found = true } - prefix := p + "/" - for k := range n.files { - if core.HasPrefix(k, prefix) { - delete(n.files, k) + prefix := filePath + "/" + for entryPath := range n.files { + if core.HasPrefix(entryPath, prefix) { + delete(n.files, entryPath) found = true } } if !found { - return core.E("node.DeleteAll", core.Concat("path not found: ", p), fs.ErrNotExist) + return core.E("node.DeleteAll", core.Concat("path not found: ", filePath), fs.ErrNotExist) } return nil } @@ -519,12 +513,12 @@ func (n *Node) Rename(oldPath, newPath string) error { // List returns directory entries for the given path. // // result := n.List(...) -func (n *Node) List(p string) ([]fs.DirEntry, error) { - p = core.TrimPrefix(p, "/") - if p == "" || p == "." { +func (n *Node) List(filePath string) ([]fs.DirEntry, error) { + filePath = core.TrimPrefix(filePath, "/") + if filePath == "" || filePath == "." { return n.ReadDir(".") } - return n.ReadDir(p) + return n.ReadDir(filePath) } // ---------- Medium interface: streams ---------- @@ -533,30 +527,30 @@ func (n *Node) List(p string) ([]fs.DirEntry, error) { // Content is committed to the Node on Close. // // result := n.Create(...) -func (n *Node) Create(p string) (goio.WriteCloser, error) { - p = core.TrimPrefix(p, "/") - return &nodeWriter{node: n, path: p}, nil +func (n *Node) Create(filePath string) (goio.WriteCloser, error) { + filePath = core.TrimPrefix(filePath, "/") + return &nodeWriter{node: n, path: filePath}, nil } // Append opens the named file for appending, creating it if needed. // Content is committed to the Node on Close. // // result := n.Append(...) -func (n *Node) Append(p string) (goio.WriteCloser, error) { - p = core.TrimPrefix(p, "/") +func (n *Node) Append(filePath string) (goio.WriteCloser, error) { + filePath = core.TrimPrefix(filePath, "/") var existing []byte - if f, ok := n.files[p]; ok { + if f, ok := n.files[filePath]; ok { existing = make([]byte, len(f.content)) copy(existing, f.content) } - return &nodeWriter{node: n, path: p, buf: existing}, nil + return &nodeWriter{node: n, path: filePath, buf: existing}, nil } // ReadStream returns a ReadCloser for the file content. // // result := n.ReadStream(...) -func (n *Node) ReadStream(p string) (goio.ReadCloser, error) { - f, err := n.Open(p) +func (n *Node) ReadStream(filePath string) (goio.ReadCloser, error) { + f, err := n.Open(filePath) if err != nil { return nil, err } @@ -566,8 +560,8 @@ func (n *Node) ReadStream(p string) (goio.ReadCloser, error) { // WriteStream returns a WriteCloser for the file content. // // result := n.WriteStream(...) -func (n *Node) WriteStream(p string) (goio.WriteCloser, error) { - return n.Create(p) +func (n *Node) WriteStream(filePath string) (goio.WriteCloser, error) { + return n.Create(filePath) } // ---------- Internal types ---------- @@ -579,17 +573,11 @@ type nodeWriter struct { buf []byte } -// Write documents the Write operation. -// -// result := w.Write(...) func (w *nodeWriter) Write(p []byte) (int, error) { w.buf = append(w.buf, p...) return len(p), nil } -// Close documents the Close operation. -// -// result := w.Close(...) func (w *nodeWriter) Close() error { w.node.files[w.path] = &dataFile{ name: w.path, @@ -606,52 +594,25 @@ type dataFile struct { modTime time.Time } -// Stat documents the Stat operation. -// -// result := d.Stat(...) func (d *dataFile) Stat() (fs.FileInfo, error) { return &dataFileInfo{file: d}, nil } -// Read documents the Read operation. -// -// result := d.Read(...) func (d *dataFile) Read(_ []byte) (int, error) { return 0, goio.EOF } -// Close documents the Close operation. -// -// result := d.Close(...) func (d *dataFile) Close() error { return nil } // dataFileInfo implements fs.FileInfo for a dataFile. type dataFileInfo struct{ file *dataFile } -// Name documents the Name operation. -// -// result := d.Name(...) func (d *dataFileInfo) Name() string { return path.Base(d.file.name) } -// Size documents the Size operation. -// -// result := d.Size(...) func (d *dataFileInfo) Size() int64 { return int64(len(d.file.content)) } -// Mode documents the Mode operation. -// -// result := d.Mode(...) func (d *dataFileInfo) Mode() fs.FileMode { return 0444 } -// ModTime documents the ModTime operation. -// -// result := d.ModTime(...) func (d *dataFileInfo) ModTime() time.Time { return d.file.modTime } -// IsDir documents the IsDir operation. -// -// result := d.IsDir(...) func (d *dataFileInfo) IsDir() bool { return false } -// Sys documents the Sys operation. -// -// result := d.Sys(...) func (d *dataFileInfo) Sys() any { return nil } // dataFileReader implements fs.File for reading a dataFile. @@ -660,14 +621,8 @@ type dataFileReader struct { reader *bytes.Reader } -// Stat documents the Stat operation. -// -// result := d.Stat(...) func (d *dataFileReader) Stat() (fs.FileInfo, error) { return d.file.Stat() } -// Read documents the Read operation. -// -// result := d.Read(...) func (d *dataFileReader) Read(p []byte) (int, error) { if d.reader == nil { d.reader = bytes.NewReader(d.file.content) @@ -675,9 +630,6 @@ func (d *dataFileReader) Read(p []byte) (int, error) { return d.reader.Read(p) } -// Close documents the Close operation. -// -// result := d.Close(...) func (d *dataFileReader) Close() error { return nil } // dirInfo implements fs.FileInfo for an implicit directory. @@ -686,34 +638,16 @@ type dirInfo struct { modTime time.Time } -// Name documents the Name operation. -// -// result := d.Name(...) func (d *dirInfo) Name() string { return d.name } -// Size documents the Size operation. -// -// result := d.Size(...) func (d *dirInfo) Size() int64 { return 0 } -// Mode documents the Mode operation. -// -// result := d.Mode(...) func (d *dirInfo) Mode() fs.FileMode { return fs.ModeDir | 0555 } -// ModTime documents the ModTime operation. -// -// result := d.ModTime(...) func (d *dirInfo) ModTime() time.Time { return d.modTime } -// IsDir documents the IsDir operation. -// -// result := d.IsDir(...) func (d *dirInfo) IsDir() bool { return true } -// Sys documents the Sys operation. -// -// result := d.Sys(...) func (d *dirInfo) Sys() any { return nil } // dirFile implements fs.File for a directory. @@ -722,23 +656,14 @@ type dirFile struct { modTime time.Time } -// Stat documents the Stat operation. -// -// result := d.Stat(...) func (d *dirFile) Stat() (fs.FileInfo, error) { return &dirInfo{name: path.Base(d.path), modTime: d.modTime}, nil } -// Read documents the Read operation. -// -// result := d.Read(...) func (d *dirFile) Read([]byte) (int, error) { return 0, core.E("node.dirFile.Read", core.Concat("cannot read directory: ", d.path), &fs.PathError{Op: "read", Path: d.path, Err: fs.ErrInvalid}) } -// Close documents the Close operation. -// -// result := d.Close(...) func (d *dirFile) Close() error { return nil } // Ensure Node implements fs.FS so WalkDir works. diff --git a/s3/s3.go b/s3/s3.go index 7a0df2e..a074c56 100644 --- a/s3/s3.go +++ b/s3/s3.go @@ -56,14 +56,14 @@ func deleteObjectsError(prefix string, errs []types.Error) error { for _, item := range errs { key := aws.ToString(item.Key) code := aws.ToString(item.Code) - msg := aws.ToString(item.Message) + message := aws.ToString(item.Message) switch { - case code != "" && msg != "": - details = append(details, core.Concat(key, ": ", code, " ", msg)) + case code != "" && message != "": + details = append(details, core.Concat(key, ": ", code, " ", message)) case code != "": details = append(details, core.Concat(key, ": ", code)) - case msg != "": - details = append(details, core.Concat(key, ": ", msg)) + case message != "": + details = append(details, core.Concat(key, ": ", message)) default: details = append(details, key) } @@ -109,10 +109,10 @@ func New(options Options) (*Medium, error) { } // key returns the full S3 object key for a given path. -func (m *Medium) key(p string) string { +func (m *Medium) key(filePath string) string { // Clean the path using a leading "/" to sandbox traversal attempts, // then strip the "/" prefix. This ensures ".." can't escape. - clean := path.Clean("/" + p) + clean := path.Clean("/" + filePath) if clean == "/" { clean = "" } @@ -130,8 +130,8 @@ func (m *Medium) key(p string) string { // Read retrieves the content of a file as a string. // // result := m.Read(...) -func (m *Medium) Read(p string) (string, error) { - key := m.key(p) +func (m *Medium) Read(filePath string) (string, error) { + key := m.key(filePath) if key == "" { return "", core.E("s3.Read", "path is required", fs.ErrInvalid) } @@ -155,8 +155,8 @@ func (m *Medium) Read(p string) (string, error) { // Write saves the given content to a file, overwriting it if it exists. // // result := m.Write(...) -func (m *Medium) Write(p, content string) error { - key := m.key(p) +func (m *Medium) Write(filePath, content string) error { + key := m.key(filePath) if key == "" { return core.E("s3.Write", "path is required", fs.ErrInvalid) } @@ -175,8 +175,8 @@ func (m *Medium) Write(p, content string) error { // WriteMode ignores the requested mode because S3 objects do not store POSIX permissions. // // result := m.WriteMode(...) -func (m *Medium) WriteMode(p, content string, _ fs.FileMode) error { - return m.Write(p, content) +func (m *Medium) WriteMode(filePath, content string, _ fs.FileMode) error { + return m.Write(filePath, content) } // EnsureDir is a no-op for S3 (S3 has no real directories). @@ -189,8 +189,8 @@ func (m *Medium) EnsureDir(_ string) error { // IsFile checks if a path exists and is a regular file (not a "directory" prefix). // // result := m.IsFile(...) -func (m *Medium) IsFile(p string) bool { - key := m.key(p) +func (m *Medium) IsFile(filePath string) bool { + key := m.key(filePath) if key == "" { return false } @@ -208,22 +208,22 @@ func (m *Medium) IsFile(p string) bool { // FileGet is a convenience function that reads a file from the medium. // // result := m.FileGet(...) -func (m *Medium) FileGet(p string) (string, error) { - return m.Read(p) +func (m *Medium) FileGet(filePath string) (string, error) { + return m.Read(filePath) } // FileSet is a convenience function that writes a file to the medium. // // result := m.FileSet(...) -func (m *Medium) FileSet(p, content string) error { - return m.Write(p, content) +func (m *Medium) FileSet(filePath, content string) error { + return m.Write(filePath, content) } // Delete removes a single object. // // result := m.Delete(...) -func (m *Medium) Delete(p string) error { - key := m.key(p) +func (m *Medium) Delete(filePath string) error { + key := m.key(filePath) if key == "" { return core.E("s3.Delete", "path is required", fs.ErrInvalid) } @@ -241,8 +241,8 @@ func (m *Medium) Delete(p string) error { // DeleteAll removes all objects under the given prefix. // // result := m.DeleteAll(...) -func (m *Medium) DeleteAll(p string) error { - key := m.key(p) +func (m *Medium) DeleteAll(filePath string) error { + key := m.key(filePath) if key == "" { return core.E("s3.DeleteAll", "path is required", fs.ErrInvalid) } @@ -340,8 +340,8 @@ func (m *Medium) Rename(oldPath, newPath string) error { // List returns directory entries for the given path using ListObjectsV2 with delimiter. // // result := m.List(...) -func (m *Medium) List(p string) ([]fs.DirEntry, error) { - prefix := m.key(p) +func (m *Medium) List(filePath string) ([]fs.DirEntry, error) { + prefix := m.key(filePath) if prefix != "" && !core.HasSuffix(prefix, "/") { prefix += "/" } @@ -415,8 +415,8 @@ func (m *Medium) List(p string) ([]fs.DirEntry, error) { // Stat returns file information for the given path using HeadObject. // // result := m.Stat(...) -func (m *Medium) Stat(p string) (fs.FileInfo, error) { - key := m.key(p) +func (m *Medium) Stat(filePath string) (fs.FileInfo, error) { + key := m.key(filePath) if key == "" { return nil, core.E("s3.Stat", "path is required", fs.ErrInvalid) } @@ -450,8 +450,8 @@ func (m *Medium) Stat(p string) (fs.FileInfo, error) { // Open opens the named file for reading. // // result := m.Open(...) -func (m *Medium) Open(p string) (fs.File, error) { - key := m.key(p) +func (m *Medium) Open(filePath string) (fs.File, error) { + key := m.key(filePath) if key == "" { return nil, core.E("s3.Open", "path is required", fs.ErrInvalid) } @@ -491,8 +491,8 @@ func (m *Medium) Open(p string) (fs.File, error) { // uploads the content on Close. // // result := m.Create(...) -func (m *Medium) Create(p string) (goio.WriteCloser, error) { - key := m.key(p) +func (m *Medium) Create(filePath string) (goio.WriteCloser, error) { + key := m.key(filePath) if key == "" { return nil, core.E("s3.Create", "path is required", fs.ErrInvalid) } @@ -506,8 +506,8 @@ func (m *Medium) Create(p string) (goio.WriteCloser, error) { // content (if any) and re-uploads the combined content on Close. // // result := m.Append(...) -func (m *Medium) Append(p string) (goio.WriteCloser, error) { - key := m.key(p) +func (m *Medium) Append(filePath string) (goio.WriteCloser, error) { + key := m.key(filePath) if key == "" { return nil, core.E("s3.Append", "path is required", fs.ErrInvalid) } @@ -532,8 +532,8 @@ func (m *Medium) Append(p string) (goio.WriteCloser, error) { // ReadStream returns a reader for the file content. // // result := m.ReadStream(...) -func (m *Medium) ReadStream(p string) (goio.ReadCloser, error) { - key := m.key(p) +func (m *Medium) ReadStream(filePath string) (goio.ReadCloser, error) { + key := m.key(filePath) if key == "" { return nil, core.E("s3.ReadStream", "path is required", fs.ErrInvalid) } @@ -551,15 +551,15 @@ func (m *Medium) ReadStream(p string) (goio.ReadCloser, error) { // WriteStream returns a writer for the file content. Content is uploaded on Close. // // result := m.WriteStream(...) -func (m *Medium) WriteStream(p string) (goio.WriteCloser, error) { - return m.Create(p) +func (m *Medium) WriteStream(filePath string) (goio.WriteCloser, error) { + return m.Create(filePath) } // Exists checks if a path exists (file or directory prefix). // // result := m.Exists(...) -func (m *Medium) Exists(p string) bool { - key := m.key(p) +func (m *Medium) Exists(filePath string) bool { + key := m.key(filePath) if key == "" { return false } @@ -592,8 +592,8 @@ func (m *Medium) Exists(p string) bool { // IsDir checks if a path exists and is a directory (has objects under it as a prefix). // // result := m.IsDir(...) -func (m *Medium) IsDir(p string) bool { - key := m.key(p) +func (m *Medium) IsDir(filePath string) bool { + key := m.key(filePath) if key == "" { return false } @@ -625,34 +625,16 @@ type fileInfo struct { isDir bool } -// Name documents the Name operation. -// -// result := fi.Name(...) func (fi *fileInfo) Name() string { return fi.name } -// Size documents the Size operation. -// -// result := fi.Size(...) func (fi *fileInfo) Size() int64 { return fi.size } -// Mode documents the Mode operation. -// -// result := fi.Mode(...) func (fi *fileInfo) Mode() fs.FileMode { return fi.mode } -// ModTime documents the ModTime operation. -// -// result := fi.ModTime(...) func (fi *fileInfo) ModTime() time.Time { return fi.modTime } -// IsDir documents the IsDir operation. -// -// result := fi.IsDir(...) func (fi *fileInfo) IsDir() bool { return fi.isDir } -// Sys documents the Sys operation. -// -// result := fi.Sys(...) func (fi *fileInfo) Sys() any { return nil } // dirEntry implements fs.DirEntry for S3 listings. @@ -663,24 +645,12 @@ type dirEntry struct { info fs.FileInfo } -// Name documents the Name operation. -// -// result := de.Name(...) func (de *dirEntry) Name() string { return de.name } -// IsDir documents the IsDir operation. -// -// result := de.IsDir(...) func (de *dirEntry) IsDir() bool { return de.isDir } -// Type documents the Type operation. -// -// result := de.Type(...) func (de *dirEntry) Type() fs.FileMode { return de.mode.Type() } -// Info documents the Info operation. -// -// result := de.Info(...) func (de *dirEntry) Info() (fs.FileInfo, error) { return de.info, nil } // s3File implements fs.File for S3 objects. @@ -692,9 +662,6 @@ type s3File struct { modTime time.Time } -// Stat documents the Stat operation. -// -// result := f.Stat(...) func (f *s3File) Stat() (fs.FileInfo, error) { return &fileInfo{ name: f.name, @@ -704,9 +671,6 @@ func (f *s3File) Stat() (fs.FileInfo, error) { }, nil } -// Read documents the Read operation. -// -// result := f.Read(...) func (f *s3File) Read(b []byte) (int, error) { if f.offset >= int64(len(f.content)) { return 0, goio.EOF @@ -716,9 +680,6 @@ func (f *s3File) Read(b []byte) (int, error) { return n, nil } -// Close documents the Close operation. -// -// result := f.Close(...) func (f *s3File) Close() error { return nil } @@ -730,17 +691,11 @@ type s3WriteCloser struct { data []byte } -// Write documents the Write operation. -// -// result := w.Write(...) func (w *s3WriteCloser) Write(p []byte) (int, error) { w.data = append(w.data, p...) return len(p), nil } -// Close documents the Close operation. -// -// result := w.Close(...) func (w *s3WriteCloser) Close() error { _, err := w.medium.client.PutObject(context.Background(), &awss3.PutObjectInput{ Bucket: aws.String(w.medium.bucket), diff --git a/sqlite/sqlite.go b/sqlite/sqlite.go index bff84ec..bc630d1 100644 --- a/sqlite/sqlite.go +++ b/sqlite/sqlite.go @@ -92,8 +92,8 @@ func (m *Medium) Close() error { // cleanPath normalises a path for consistent storage. // Uses a leading "/" before Clean to sandbox traversal attempts. -func cleanPath(p string) string { - clean := path.Clean("/" + p) +func cleanPath(filePath string) string { + clean := path.Clean("/" + filePath) if clean == "/" { return "" } @@ -103,8 +103,8 @@ func cleanPath(p string) string { // Read retrieves the content of a file as a string. // // result := m.Read(...) -func (m *Medium) Read(p string) (string, error) { - key := cleanPath(p) +func (m *Medium) Read(filePath string) (string, error) { + key := cleanPath(filePath) if key == "" { return "", core.E("sqlite.Read", "path is required", fs.ErrInvalid) } @@ -129,15 +129,15 @@ func (m *Medium) Read(p string) (string, error) { // Write saves the given content to a file, overwriting it if it exists. // // result := m.Write(...) -func (m *Medium) Write(p, content string) error { - return m.WriteMode(p, content, 0644) +func (m *Medium) Write(filePath, content string) error { + return m.WriteMode(filePath, content, 0644) } // WriteMode saves the given content with explicit permissions. // // result := m.WriteMode(...) -func (m *Medium) WriteMode(p, content string, mode fs.FileMode) error { - key := cleanPath(p) +func (m *Medium) WriteMode(filePath, content string, mode fs.FileMode) error { + key := cleanPath(filePath) if key == "" { return core.E("sqlite.WriteMode", "path is required", fs.ErrInvalid) } @@ -156,8 +156,8 @@ func (m *Medium) WriteMode(p, content string, mode fs.FileMode) error { // EnsureDir makes sure a directory exists, creating it if necessary. // // result := m.EnsureDir(...) -func (m *Medium) EnsureDir(p string) error { - key := cleanPath(p) +func (m *Medium) EnsureDir(filePath string) error { + key := cleanPath(filePath) if key == "" { // Root always "exists" return nil @@ -177,8 +177,8 @@ func (m *Medium) EnsureDir(p string) error { // IsFile checks if a path exists and is a regular file. // // result := m.IsFile(...) -func (m *Medium) IsFile(p string) bool { - key := cleanPath(p) +func (m *Medium) IsFile(filePath string) bool { + key := cleanPath(filePath) if key == "" { return false } @@ -196,22 +196,22 @@ func (m *Medium) IsFile(p string) bool { // FileGet is a convenience function that reads a file from the medium. // // result := m.FileGet(...) -func (m *Medium) FileGet(p string) (string, error) { - return m.Read(p) +func (m *Medium) FileGet(filePath string) (string, error) { + return m.Read(filePath) } // FileSet is a convenience function that writes a file to the medium. // // result := m.FileSet(...) -func (m *Medium) FileSet(p, content string) error { - return m.Write(p, content) +func (m *Medium) FileSet(filePath, content string) error { + return m.Write(filePath, content) } // Delete removes a file or empty directory. // // result := m.Delete(...) -func (m *Medium) Delete(p string) error { - key := cleanPath(p) +func (m *Medium) Delete(filePath string) error { + key := cleanPath(filePath) if key == "" { return core.E("sqlite.Delete", "path is required", fs.ErrInvalid) } @@ -247,8 +247,8 @@ func (m *Medium) Delete(p string) error { if err != nil { return core.E("sqlite.Delete", core.Concat("delete failed: ", key), err) } - n, _ := res.RowsAffected() - if n == 0 { + rowsAffected, _ := res.RowsAffected() + if rowsAffected == 0 { return core.E("sqlite.Delete", core.Concat("path not found: ", key), fs.ErrNotExist) } return nil @@ -257,8 +257,8 @@ func (m *Medium) Delete(p string) error { // DeleteAll removes a file or directory and all its contents recursively. // // result := m.DeleteAll(...) -func (m *Medium) DeleteAll(p string) error { - key := cleanPath(p) +func (m *Medium) DeleteAll(filePath string) error { + key := cleanPath(filePath) if key == "" { return core.E("sqlite.DeleteAll", "path is required", fs.ErrInvalid) } @@ -383,8 +383,8 @@ func (m *Medium) Rename(oldPath, newPath string) error { // List returns the directory entries for the given path. // // result := m.List(...) -func (m *Medium) List(p string) ([]fs.DirEntry, error) { - prefix := cleanPath(p) +func (m *Medium) List(filePath string) ([]fs.DirEntry, error) { + prefix := cleanPath(filePath) if prefix != "" { prefix += "/" } @@ -461,8 +461,8 @@ func (m *Medium) List(p string) ([]fs.DirEntry, error) { // Stat returns file information for the given path. // // result := m.Stat(...) -func (m *Medium) Stat(p string) (fs.FileInfo, error) { - key := cleanPath(p) +func (m *Medium) Stat(filePath string) (fs.FileInfo, error) { + key := cleanPath(filePath) if key == "" { return nil, core.E("sqlite.Stat", "path is required", fs.ErrInvalid) } @@ -494,8 +494,8 @@ func (m *Medium) Stat(p string) (fs.FileInfo, error) { // Open opens the named file for reading. // // result := m.Open(...) -func (m *Medium) Open(p string) (fs.File, error) { - key := cleanPath(p) +func (m *Medium) Open(filePath string) (fs.File, error) { + key := cleanPath(filePath) if key == "" { return nil, core.E("sqlite.Open", "path is required", fs.ErrInvalid) } @@ -528,8 +528,8 @@ func (m *Medium) Open(p string) (fs.File, error) { // Create creates or truncates the named file. // // result := m.Create(...) -func (m *Medium) Create(p string) (goio.WriteCloser, error) { - key := cleanPath(p) +func (m *Medium) Create(filePath string) (goio.WriteCloser, error) { + key := cleanPath(filePath) if key == "" { return nil, core.E("sqlite.Create", "path is required", fs.ErrInvalid) } @@ -542,8 +542,8 @@ func (m *Medium) Create(p string) (goio.WriteCloser, error) { // Append opens the named file for appending, creating it if it doesn't exist. // // result := m.Append(...) -func (m *Medium) Append(p string) (goio.WriteCloser, error) { - key := cleanPath(p) +func (m *Medium) Append(filePath string) (goio.WriteCloser, error) { + key := cleanPath(filePath) if key == "" { return nil, core.E("sqlite.Append", "path is required", fs.ErrInvalid) } @@ -566,8 +566,8 @@ func (m *Medium) Append(p string) (goio.WriteCloser, error) { // ReadStream returns a reader for the file content. // // result := m.ReadStream(...) -func (m *Medium) ReadStream(p string) (goio.ReadCloser, error) { - key := cleanPath(p) +func (m *Medium) ReadStream(filePath string) (goio.ReadCloser, error) { + key := cleanPath(filePath) if key == "" { return nil, core.E("sqlite.ReadStream", "path is required", fs.ErrInvalid) } @@ -593,15 +593,15 @@ func (m *Medium) ReadStream(p string) (goio.ReadCloser, error) { // WriteStream returns a writer for the file content. Content is stored on Close. // // result := m.WriteStream(...) -func (m *Medium) WriteStream(p string) (goio.WriteCloser, error) { - return m.Create(p) +func (m *Medium) WriteStream(filePath string) (goio.WriteCloser, error) { + return m.Create(filePath) } // Exists checks if a path exists (file or directory). // // result := m.Exists(...) -func (m *Medium) Exists(p string) bool { - key := cleanPath(p) +func (m *Medium) Exists(filePath string) bool { + key := cleanPath(filePath) if key == "" { // Root always exists return true @@ -620,8 +620,8 @@ func (m *Medium) Exists(p string) bool { // IsDir checks if a path exists and is a directory. // // result := m.IsDir(...) -func (m *Medium) IsDir(p string) bool { - key := cleanPath(p) +func (m *Medium) IsDir(filePath string) bool { + key := cleanPath(filePath) if key == "" { return false } @@ -647,34 +647,16 @@ type fileInfo struct { isDir bool } -// Name documents the Name operation. -// -// result := fi.Name(...) func (fi *fileInfo) Name() string { return fi.name } -// Size documents the Size operation. -// -// result := fi.Size(...) func (fi *fileInfo) Size() int64 { return fi.size } -// Mode documents the Mode operation. -// -// result := fi.Mode(...) func (fi *fileInfo) Mode() fs.FileMode { return fi.mode } -// ModTime documents the ModTime operation. -// -// result := fi.ModTime(...) func (fi *fileInfo) ModTime() time.Time { return fi.modTime } -// IsDir documents the IsDir operation. -// -// result := fi.IsDir(...) func (fi *fileInfo) IsDir() bool { return fi.isDir } -// Sys documents the Sys operation. -// -// result := fi.Sys(...) func (fi *fileInfo) Sys() any { return nil } // dirEntry implements fs.DirEntry for SQLite listings. @@ -685,24 +667,12 @@ type dirEntry struct { info fs.FileInfo } -// Name documents the Name operation. -// -// result := de.Name(...) func (de *dirEntry) Name() string { return de.name } -// IsDir documents the IsDir operation. -// -// result := de.IsDir(...) func (de *dirEntry) IsDir() bool { return de.isDir } -// Type documents the Type operation. -// -// result := de.Type(...) func (de *dirEntry) Type() fs.FileMode { return de.mode.Type() } -// Info documents the Info operation. -// -// result := de.Info(...) func (de *dirEntry) Info() (fs.FileInfo, error) { return de.info, nil } // sqliteFile implements fs.File for SQLite entries. @@ -714,9 +684,6 @@ type sqliteFile struct { modTime time.Time } -// Stat documents the Stat operation. -// -// result := f.Stat(...) func (f *sqliteFile) Stat() (fs.FileInfo, error) { return &fileInfo{ name: f.name, @@ -726,9 +693,6 @@ func (f *sqliteFile) Stat() (fs.FileInfo, error) { }, nil } -// Read documents the Read operation. -// -// result := f.Read(...) func (f *sqliteFile) Read(b []byte) (int, error) { if f.offset >= int64(len(f.content)) { return 0, goio.EOF @@ -738,9 +702,6 @@ func (f *sqliteFile) Read(b []byte) (int, error) { return n, nil } -// Close documents the Close operation. -// -// result := f.Close(...) func (f *sqliteFile) Close() error { return nil } @@ -752,17 +713,11 @@ type sqliteWriteCloser struct { data []byte } -// Write documents the Write operation. -// -// result := w.Write(...) func (w *sqliteWriteCloser) Write(p []byte) (int, error) { w.data = append(w.data, p...) return len(p), nil } -// Close documents the Close operation. -// -// result := w.Close(...) func (w *sqliteWriteCloser) Close() error { _, err := w.medium.database.Exec( `INSERT INTO `+w.medium.table+` (path, content, mode, is_dir, mtime) VALUES (?, ?, 420, FALSE, ?) diff --git a/store/medium.go b/store/medium.go index 2e3fc4f..eb99da0 100644 --- a/store/medium.go +++ b/store/medium.go @@ -57,8 +57,8 @@ func (m *Medium) Close() error { // splitPath splits a medium-style path into group and key. // First segment = group, remainder = key. -func splitPath(p string) (group, key string) { - clean := path.Clean(p) +func splitPath(entryPath string) (group, key string) { + clean := path.Clean(entryPath) clean = core.TrimPrefix(clean, "/") if clean == "" || clean == "." { return "", "" @@ -73,8 +73,8 @@ func splitPath(p string) (group, key string) { // Read retrieves the value at group/key. // // result := m.Read(...) -func (m *Medium) Read(p string) (string, error) { - group, key := splitPath(p) +func (m *Medium) Read(entryPath string) (string, error) { + group, key := splitPath(entryPath) if key == "" { return "", core.E("store.Read", "path must include group/key", fs.ErrInvalid) } @@ -84,8 +84,8 @@ func (m *Medium) Read(p string) (string, error) { // Write stores a value at group/key. // // result := m.Write(...) -func (m *Medium) Write(p, content string) error { - group, key := splitPath(p) +func (m *Medium) Write(entryPath, content string) error { + group, key := splitPath(entryPath) if key == "" { return core.E("store.Write", "path must include group/key", fs.ErrInvalid) } @@ -95,8 +95,8 @@ func (m *Medium) Write(p, content string) error { // WriteMode ignores the requested mode because key-value entries do not store POSIX permissions. // // result := m.WriteMode(...) -func (m *Medium) WriteMode(p, content string, _ fs.FileMode) error { - return m.Write(p, content) +func (m *Medium) WriteMode(entryPath, content string, _ fs.FileMode) error { + return m.Write(entryPath, content) } // EnsureDir is a no-op — groups are created implicitly on Set. @@ -109,8 +109,8 @@ func (m *Medium) EnsureDir(_ string) error { // IsFile returns true if a group/key pair exists. // // result := m.IsFile(...) -func (m *Medium) IsFile(p string) bool { - group, key := splitPath(p) +func (m *Medium) IsFile(entryPath string) bool { + group, key := splitPath(entryPath) if key == "" { return false } @@ -118,34 +118,28 @@ func (m *Medium) IsFile(p string) bool { return err == nil } -// FileGet is an alias for Read. -// -// result := m.FileGet(...) -func (m *Medium) FileGet(p string) (string, error) { - return m.Read(p) +func (m *Medium) FileGet(entryPath string) (string, error) { + return m.Read(entryPath) } -// FileSet is an alias for Write. -// -// result := m.FileSet(...) -func (m *Medium) FileSet(p, content string) error { - return m.Write(p, content) +func (m *Medium) FileSet(entryPath, content string) error { + return m.Write(entryPath, content) } // Delete removes a key, or checks that a group is empty. // // result := m.Delete(...) -func (m *Medium) Delete(p string) error { - group, key := splitPath(p) +func (m *Medium) Delete(entryPath string) error { + group, key := splitPath(entryPath) if group == "" { return core.E("store.Delete", "path is required", fs.ErrInvalid) } if key == "" { - n, err := m.store.Count(group) + entryCount, err := m.store.Count(group) if err != nil { return err } - if n > 0 { + if entryCount > 0 { return core.E("store.Delete", core.Concat("group not empty: ", group), fs.ErrExist) } return nil @@ -156,8 +150,8 @@ func (m *Medium) Delete(p string) error { // DeleteAll removes a key, or all keys in a group. // // result := m.DeleteAll(...) -func (m *Medium) DeleteAll(p string) error { - group, key := splitPath(p) +func (m *Medium) DeleteAll(entryPath string) error { + group, key := splitPath(entryPath) if group == "" { return core.E("store.DeleteAll", "path is required", fs.ErrInvalid) } @@ -190,8 +184,8 @@ func (m *Medium) Rename(oldPath, newPath string) error { // A group path returns keys in that group. // // result := m.List(...) -func (m *Medium) List(p string) ([]fs.DirEntry, error) { - group, key := splitPath(p) +func (m *Medium) List(entryPath string) ([]fs.DirEntry, error) { + group, key := splitPath(entryPath) if group == "" { rows, err := m.store.database.Query("SELECT DISTINCT grp FROM kv ORDER BY grp") @@ -202,11 +196,11 @@ func (m *Medium) List(p string) ([]fs.DirEntry, error) { var entries []fs.DirEntry for rows.Next() { - var g string - if err := rows.Scan(&g); err != nil { + var groupName string + if err := rows.Scan(&groupName); err != nil { return nil, core.E("store.List", "scan", err) } - entries = append(entries, &kvDirEntry{name: g, isDir: true}) + entries = append(entries, &kvDirEntry{name: groupName, isDir: true}) } return entries, rows.Err() } @@ -220,8 +214,8 @@ func (m *Medium) List(p string) ([]fs.DirEntry, error) { return nil, err } var entries []fs.DirEntry - for k, v := range all { - entries = append(entries, &kvDirEntry{name: k, size: int64(len(v))}) + for key, value := range all { + entries = append(entries, &kvDirEntry{name: key, size: int64(len(value))}) } return entries, nil } @@ -229,17 +223,17 @@ func (m *Medium) List(p string) ([]fs.DirEntry, error) { // Stat returns file info for a group (dir) or key (file). // // result := m.Stat(...) -func (m *Medium) Stat(p string) (fs.FileInfo, error) { - group, key := splitPath(p) +func (m *Medium) Stat(entryPath string) (fs.FileInfo, error) { + group, key := splitPath(entryPath) if group == "" { return nil, core.E("store.Stat", "path is required", fs.ErrInvalid) } if key == "" { - n, err := m.store.Count(group) + entryCount, err := m.store.Count(group) if err != nil { return nil, err } - if n == 0 { + if entryCount == 0 { return nil, core.E("store.Stat", core.Concat("group not found: ", group), fs.ErrNotExist) } return &kvFileInfo{name: group, isDir: true}, nil @@ -254,8 +248,8 @@ func (m *Medium) Stat(p string) (fs.FileInfo, error) { // Open opens a key for reading. // // result := m.Open(...) -func (m *Medium) Open(p string) (fs.File, error) { - group, key := splitPath(p) +func (m *Medium) Open(entryPath string) (fs.File, error) { + group, key := splitPath(entryPath) if key == "" { return nil, core.E("store.Open", "path must include group/key", fs.ErrInvalid) } @@ -269,8 +263,8 @@ func (m *Medium) Open(p string) (fs.File, error) { // Create creates or truncates a key. Content is stored on Close. // // result := m.Create(...) -func (m *Medium) Create(p string) (goio.WriteCloser, error) { - group, key := splitPath(p) +func (m *Medium) Create(entryPath string) (goio.WriteCloser, error) { + group, key := splitPath(entryPath) if key == "" { return nil, core.E("store.Create", "path must include group/key", fs.ErrInvalid) } @@ -280,8 +274,8 @@ func (m *Medium) Create(p string) (goio.WriteCloser, error) { // Append opens a key for appending. Content is stored on Close. // // result := m.Append(...) -func (m *Medium) Append(p string) (goio.WriteCloser, error) { - group, key := splitPath(p) +func (m *Medium) Append(entryPath string) (goio.WriteCloser, error) { + group, key := splitPath(entryPath) if key == "" { return nil, core.E("store.Append", "path must include group/key", fs.ErrInvalid) } @@ -292,8 +286,8 @@ func (m *Medium) Append(p string) (goio.WriteCloser, error) { // ReadStream returns a reader for the value. // // result := m.ReadStream(...) -func (m *Medium) ReadStream(p string) (goio.ReadCloser, error) { - group, key := splitPath(p) +func (m *Medium) ReadStream(entryPath string) (goio.ReadCloser, error) { + group, key := splitPath(entryPath) if key == "" { return nil, core.E("store.ReadStream", "path must include group/key", fs.ErrInvalid) } @@ -307,21 +301,21 @@ func (m *Medium) ReadStream(p string) (goio.ReadCloser, error) { // WriteStream returns a writer. Content is stored on Close. // // result := m.WriteStream(...) -func (m *Medium) WriteStream(p string) (goio.WriteCloser, error) { - return m.Create(p) +func (m *Medium) WriteStream(entryPath string) (goio.WriteCloser, error) { + return m.Create(entryPath) } // Exists returns true if a group or key exists. // // result := m.Exists(...) -func (m *Medium) Exists(p string) bool { - group, key := splitPath(p) +func (m *Medium) Exists(entryPath string) bool { + group, key := splitPath(entryPath) if group == "" { return false } if key == "" { - n, err := m.store.Count(group) - return err == nil && n > 0 + entryCount, err := m.store.Count(group) + return err == nil && entryCount > 0 } _, err := m.store.Get(group, key) return err == nil @@ -330,13 +324,13 @@ func (m *Medium) Exists(p string) bool { // IsDir returns true if the path is a group with entries. // // result := m.IsDir(...) -func (m *Medium) IsDir(p string) bool { - group, key := splitPath(p) +func (m *Medium) IsDir(entryPath string) bool { + group, key := splitPath(entryPath) if key != "" || group == "" { return false } - n, err := m.store.Count(group) - return err == nil && n > 0 + entryCount, err := m.store.Count(group) + return err == nil && entryCount > 0 } // --- fs helper types --- @@ -347,19 +341,10 @@ type kvFileInfo struct { isDir bool } -// Name documents the Name operation. -// -// result := fi.Name(...) func (fi *kvFileInfo) Name() string { return fi.name } -// Size documents the Size operation. -// -// result := fi.Size(...) func (fi *kvFileInfo) Size() int64 { return fi.size } -// Mode documents the Mode operation. -// -// result := fi.Mode(...) func (fi *kvFileInfo) Mode() fs.FileMode { if fi.isDir { return fs.ModeDir | 0755 @@ -367,19 +352,10 @@ func (fi *kvFileInfo) Mode() fs.FileMode { return 0644 } -// ModTime documents the ModTime operation. -// -// result := fi.ModTime(...) func (fi *kvFileInfo) ModTime() time.Time { return time.Time{} } -// IsDir documents the IsDir operation. -// -// result := fi.IsDir(...) func (fi *kvFileInfo) IsDir() bool { return fi.isDir } -// Sys documents the Sys operation. -// -// result := fi.Sys(...) func (fi *kvFileInfo) Sys() any { return nil } type kvDirEntry struct { @@ -388,19 +364,10 @@ type kvDirEntry struct { size int64 } -// Name documents the Name operation. -// -// result := de.Name(...) func (de *kvDirEntry) Name() string { return de.name } -// IsDir documents the IsDir operation. -// -// result := de.IsDir(...) func (de *kvDirEntry) IsDir() bool { return de.isDir } -// Type documents the Type operation. -// -// result := de.Type(...) func (de *kvDirEntry) Type() fs.FileMode { if de.isDir { return fs.ModeDir @@ -408,9 +375,6 @@ func (de *kvDirEntry) Type() fs.FileMode { return 0 } -// Info documents the Info operation. -// -// result := de.Info(...) func (de *kvDirEntry) Info() (fs.FileInfo, error) { return &kvFileInfo{name: de.name, size: de.size, isDir: de.isDir}, nil } @@ -421,16 +385,10 @@ type kvFile struct { offset int64 } -// Stat documents the Stat operation. -// -// result := f.Stat(...) func (f *kvFile) Stat() (fs.FileInfo, error) { return &kvFileInfo{name: f.name, size: int64(len(f.content))}, nil } -// Read documents the Read operation. -// -// result := f.Read(...) func (f *kvFile) Read(b []byte) (int, error) { if f.offset >= int64(len(f.content)) { return 0, goio.EOF @@ -440,9 +398,6 @@ func (f *kvFile) Read(b []byte) (int, error) { return n, nil } -// Close documents the Close operation. -// -// result := f.Close(...) func (f *kvFile) Close() error { return nil } type kvWriteCloser struct { @@ -452,17 +407,11 @@ type kvWriteCloser struct { data []byte } -// Write documents the Write operation. -// -// result := w.Write(...) func (w *kvWriteCloser) Write(p []byte) (int, error) { w.data = append(w.data, p...) return len(p), nil } -// Close documents the Close operation. -// -// result := w.Close(...) func (w *kvWriteCloser) Close() error { return w.store.Set(w.group, w.key, string(w.data)) } diff --git a/store/store.go b/store/store.go index 5d8880f..5368667 100644 --- a/store/store.go +++ b/store/store.go @@ -95,12 +95,12 @@ func (s *Store) Delete(group, key string) error { // // result := s.Count(...) func (s *Store) Count(group string) (int, error) { - var n int - err := s.database.QueryRow("SELECT COUNT(*) FROM kv WHERE grp = ?", group).Scan(&n) + var count int + err := s.database.QueryRow("SELECT COUNT(*) FROM kv WHERE grp = ?", group).Scan(&count) if err != nil { return 0, core.E("store.Count", "query", err) } - return n, nil + return count, nil } // DeleteGroup removes all keys in a group. @@ -126,11 +126,11 @@ func (s *Store) GetAll(group string) (map[string]string, error) { result := make(map[string]string) for rows.Next() { - var k, v string - if err := rows.Scan(&k, &v); err != nil { + var key, value string + if err := rows.Scan(&key, &value); err != nil { return nil, core.E("store.GetAll", "scan", err) } - result[k] = v + result[key] = value } if err := rows.Err(); err != nil { return nil, core.E("store.GetAll", "rows", err) @@ -145,7 +145,7 @@ func (s *Store) GetAll(group string) (map[string]string, error) { // kvStore, _ := store.New(":memory:") // _ = kvStore.Set("user", "name", "alice") // out, _ := kvStore.Render("hello {{ .name }}", "user") -func (s *Store) Render(tmplStr, group string) (string, error) { +func (s *Store) Render(templateText, group string) (string, error) { rows, err := s.database.Query("SELECT key, value FROM kv WHERE grp = ?", group) if err != nil { return "", core.E("store.Render", "query", err) @@ -154,17 +154,17 @@ func (s *Store) Render(tmplStr, group string) (string, error) { vars := make(map[string]string) for rows.Next() { - var k, v string - if err := rows.Scan(&k, &v); err != nil { + var key, value string + if err := rows.Scan(&key, &value); err != nil { return "", core.E("store.Render", "scan", err) } - vars[k] = v + vars[key] = value } if err := rows.Err(); err != nil { return "", core.E("store.Render", "rows", err) } - tmpl, err := template.New("render").Parse(tmplStr) + tmpl, err := template.New("render").Parse(templateText) if err != nil { return "", core.E("store.Render", "parse template", err) } diff --git a/workspace/service.go b/workspace/service.go index 0ccf0fe..2b425ff 100644 --- a/workspace/service.go +++ b/workspace/service.go @@ -140,17 +140,17 @@ func (s *Service) SwitchWorkspace(name string) error { // activeFilePath returns the full path to a file in the active workspace, // or an error if no workspace is active. -func (s *Service) activeFilePath(op, filename string) (string, error) { +func (s *Service) activeFilePath(operation, filename string) (string, error) { if s.activeWorkspace == "" { - return "", core.E(op, "no active workspace", nil) + return "", core.E(operation, "no active workspace", nil) } filesRoot := core.Path(s.rootPath, s.activeWorkspace, "files") filePath, err := joinWithinRoot(filesRoot, filename) if err != nil { - return "", core.E(op, "file path escapes workspace files", fs.ErrPermission) + return "", core.E(operation, "file path escapes workspace files", fs.ErrPermission) } if filePath == filesRoot { - return "", core.E(op, "filename is required", fs.ErrInvalid) + return "", core.E(operation, "filename is required", fs.ErrInvalid) } return filePath, nil } @@ -192,21 +192,21 @@ func (s *Service) WorkspaceFileSet(filename, content string) error { // "password": "pass123", // }) // _ = result.OK -func (s *Service) HandleIPCEvents(_ *core.Core, msg core.Message) core.Result { - switch message := msg.(type) { +func (s *Service) HandleIPCEvents(_ *core.Core, message core.Message) core.Result { + switch payload := message.(type) { case map[string]any: - action, _ := message["action"].(string) + action, _ := payload["action"].(string) switch action { case "workspace.create": - identifier, _ := message["identifier"].(string) - password, _ := message["password"].(string) + identifier, _ := payload["identifier"].(string) + password, _ := payload["password"].(string) workspaceID, err := s.CreateWorkspace(identifier, password) if err != nil { return core.Result{}.New(err) } return core.Result{Value: workspaceID, OK: true} case "workspace.switch": - name, _ := message["name"].(string) + name, _ := payload["name"].(string) if err := s.SwitchWorkspace(name); err != nil { return core.Result{}.New(err) } @@ -235,16 +235,16 @@ func joinWithinRoot(root string, parts ...string) (string, error) { return "", fs.ErrPermission } -func (s *Service) workspacePath(op, name string) (string, error) { - if name == "" { - return "", core.E(op, "workspace name is required", fs.ErrInvalid) +func (s *Service) workspacePath(operation, workspaceName string) (string, error) { + if workspaceName == "" { + return "", core.E(operation, "workspace name is required", fs.ErrInvalid) } - workspaceDirectory, err := joinWithinRoot(s.rootPath, name) + workspaceDirectory, err := joinWithinRoot(s.rootPath, workspaceName) if err != nil { - return "", core.E(op, "workspace path escapes root", err) + return "", core.E(operation, "workspace path escapes root", err) } if core.PathDir(workspaceDirectory) != s.rootPath { - return "", core.E(op, core.Concat("invalid workspace name: ", name), fs.ErrPermission) + return "", core.E(operation, core.Concat("invalid workspace name: ", workspaceName), fs.ErrPermission) } return workspaceDirectory, nil } From d900a785e7ed601f7ff68fabf9e345c91c8f086b Mon Sep 17 00:00:00 2001 From: Virgil Date: Mon, 30 Mar 2026 20:31:12 +0000 Subject: [PATCH 13/83] refactor(ax): replace placeholder doc comments --- datanode/client.go | 12 ++------ io.go | 64 +++++-------------------------------------- local/client.go | 34 ----------------------- node/node.go | 52 ----------------------------------- s3/s3.go | 38 ------------------------- sigil/crypto_sigil.go | 14 ---------- sigil/sigil.go | 4 --- sigil/sigils.go | 26 ------------------ sqlite/sqlite.go | 40 --------------------------- store/medium.go | 46 ++----------------------------- store/store.go | 28 +++++-------------- workspace/service.go | 20 ++++---------- 12 files changed, 26 insertions(+), 352 deletions(-) diff --git a/datanode/client.go b/datanode/client.go index 5d83ee7..1cfad95 100644 --- a/datanode/client.go +++ b/datanode/client.go @@ -70,10 +70,8 @@ func FromTar(data []byte) (*Medium, error) { }, nil } -// Snapshot serialises the entire filesystem to a tarball. +// Example: snapshot, _ := medium.Snapshot() // Use this for crash reports, workspace packaging, or TIM creation. -// -// result := m.Snapshot(...) func (m *Medium) Snapshot() ([]byte, error) { m.mu.RLock() defer m.mu.RUnlock() @@ -84,9 +82,7 @@ func (m *Medium) Snapshot() ([]byte, error) { return data, nil } -// Restore replaces the filesystem contents from a tarball. -// -// result := m.Restore(...) +// Example: _ = medium.Restore(snapshot) func (m *Medium) Restore(data []byte) error { dataNode, err := borgdatanode.FromTar(data) if err != nil { @@ -99,10 +95,8 @@ func (m *Medium) Restore(data []byte) error { return nil } -// DataNode returns the underlying Borg DataNode. +// Example: dataNode := medium.DataNode() // Use this to wrap the filesystem in a TIM container. -// -// result := m.DataNode(...) func (m *Medium) DataNode() *borgdatanode.DataNode { m.mu.RLock() defer m.mu.RUnlock() diff --git a/io.go b/io.go index c53f165..1c44992 100644 --- a/io.go +++ b/io.go @@ -142,51 +142,37 @@ func NewSandboxed(root string) (Medium, error) { // --- Helper Functions --- -// Read retrieves the content of a file from the given medium. -// -// result := io.Read(...) +// Example: content, _ := io.Read(medium, "config/app.yaml") func Read(m Medium, path string) (string, error) { return m.Read(path) } -// Write saves the given content to a file in the given medium. -// -// result := io.Write(...) +// Example: _ = io.Write(medium, "config/app.yaml", "port: 8080") func Write(m Medium, path, content string) error { return m.Write(path, content) } -// ReadStream returns a reader for the file content from the given medium. -// -// result := io.ReadStream(...) +// Example: reader, _ := io.ReadStream(medium, "logs/app.log") func ReadStream(m Medium, path string) (goio.ReadCloser, error) { return m.ReadStream(path) } -// WriteStream returns a writer for the file content in the given medium. -// -// result := io.WriteStream(...) +// Example: writer, _ := io.WriteStream(medium, "logs/app.log") func WriteStream(m Medium, path string) (goio.WriteCloser, error) { return m.WriteStream(path) } -// EnsureDir makes sure a directory exists in the given medium. -// -// result := io.EnsureDir(...) +// Example: _ = io.EnsureDir(medium, "config") func EnsureDir(m Medium, path string) error { return m.EnsureDir(path) } -// IsFile checks if a path exists and is a regular file in the given medium. -// -// result := io.IsFile(...) +// Example: ok := io.IsFile(medium, "config/app.yaml") func IsFile(m Medium, path string) bool { return m.IsFile(path) } -// Copy copies a file from one medium to another. -// -// result := io.Copy(...) +// Example: _ = io.Copy(source, "input.txt", destination, "backup/input.txt") func Copy(source Medium, sourcePath string, destination Medium, destinationPath string) error { content, err := source.Read(sourcePath) if err != nil { @@ -221,8 +207,6 @@ func NewMockMedium() *MockMedium { } // Read retrieves the content of a file from the mock filesystem. -// -// result := m.Read(...) func (m *MockMedium) Read(path string) (string, error) { content, ok := m.Files[path] if !ok { @@ -232,8 +216,6 @@ func (m *MockMedium) Read(path string) (string, error) { } // Write saves the given content to a file in the mock filesystem. -// -// result := m.Write(...) func (m *MockMedium) Write(path, content string) error { m.Files[path] = content m.ModTimes[path] = time.Now() @@ -245,38 +227,28 @@ func (m *MockMedium) WriteMode(path, content string, mode fs.FileMode) error { } // EnsureDir records that a directory exists in the mock filesystem. -// -// result := m.EnsureDir(...) func (m *MockMedium) EnsureDir(path string) error { m.Dirs[path] = true return nil } // IsFile checks if a path exists as a file in the mock filesystem. -// -// result := m.IsFile(...) func (m *MockMedium) IsFile(path string) bool { _, ok := m.Files[path] return ok } // FileGet is a convenience function that reads a file from the mock filesystem. -// -// result := m.FileGet(...) func (m *MockMedium) FileGet(path string) (string, error) { return m.Read(path) } // FileSet is a convenience function that writes a file to the mock filesystem. -// -// result := m.FileSet(...) func (m *MockMedium) FileSet(path, content string) error { return m.Write(path, content) } // Delete removes a file or empty directory from the mock filesystem. -// -// result := m.Delete(...) func (m *MockMedium) Delete(path string) error { if _, ok := m.Files[path]; ok { delete(m.Files, path) @@ -305,8 +277,6 @@ func (m *MockMedium) Delete(path string) error { } // DeleteAll removes a file or directory and all contents from the mock filesystem. -// -// result := m.DeleteAll(...) func (m *MockMedium) DeleteAll(path string) error { found := false if _, ok := m.Files[path]; ok { @@ -343,8 +313,6 @@ func (m *MockMedium) DeleteAll(path string) error { } // Rename moves a file or directory in the mock filesystem. -// -// result := m.Rename(...) func (m *MockMedium) Rename(oldPath, newPath string) error { if content, ok := m.Files[oldPath]; ok { m.Files[newPath] = content @@ -404,8 +372,6 @@ func (m *MockMedium) Rename(oldPath, newPath string) error { } // Open opens a file from the mock filesystem. -// -// result := m.Open(...) func (m *MockMedium) Open(path string) (fs.File, error) { content, ok := m.Files[path] if !ok { @@ -418,8 +384,6 @@ func (m *MockMedium) Open(path string) (fs.File, error) { } // Create creates a file in the mock filesystem. -// -// result := m.Create(...) func (m *MockMedium) Create(path string) (goio.WriteCloser, error) { return &MockWriteCloser{ medium: m, @@ -428,8 +392,6 @@ func (m *MockMedium) Create(path string) (goio.WriteCloser, error) { } // Append opens a file for appending in the mock filesystem. -// -// result := m.Append(...) func (m *MockMedium) Append(path string) (goio.WriteCloser, error) { content := m.Files[path] return &MockWriteCloser{ @@ -440,15 +402,11 @@ func (m *MockMedium) Append(path string) (goio.WriteCloser, error) { } // ReadStream returns a reader for the file content in the mock filesystem. -// -// result := m.ReadStream(...) func (m *MockMedium) ReadStream(path string) (goio.ReadCloser, error) { return m.Open(path) } // WriteStream returns a writer for the file content in the mock filesystem. -// -// result := m.WriteStream(...) func (m *MockMedium) WriteStream(path string) (goio.WriteCloser, error) { return m.Create(path) } @@ -499,8 +457,6 @@ func (w *MockWriteCloser) Close() error { } // List returns directory entries for the mock filesystem. -// -// result := m.List(...) func (m *MockMedium) List(path string) ([]fs.DirEntry, error) { if _, ok := m.Dirs[path]; !ok { // Check if it's the root or has children @@ -610,8 +566,6 @@ func (m *MockMedium) List(path string) ([]fs.DirEntry, error) { } // Stat returns file information for the mock filesystem. -// -// result := m.Stat(...) func (m *MockMedium) Stat(path string) (fs.FileInfo, error) { if content, ok := m.Files[path]; ok { modTime, ok := m.ModTimes[path] @@ -636,8 +590,6 @@ func (m *MockMedium) Stat(path string) (fs.FileInfo, error) { } // Exists checks if a path exists in the mock filesystem. -// -// result := m.Exists(...) func (m *MockMedium) Exists(path string) bool { if _, ok := m.Files[path]; ok { return true @@ -649,8 +601,6 @@ func (m *MockMedium) Exists(path string) bool { } // IsDir checks if a path is a directory in the mock filesystem. -// -// result := m.IsDir(...) func (m *MockMedium) IsDir(path string) bool { _, ok := m.Dirs[path] return ok diff --git a/local/client.go b/local/client.go index 2039564..5446f19 100644 --- a/local/client.go +++ b/local/client.go @@ -241,8 +241,6 @@ func (m *Medium) validatePath(path string) (string, error) { } // Read returns file contents as string. -// -// result := m.Read(...) func (m *Medium) Read(path string) (string, error) { resolvedPath, err := m.validatePath(path) if err != nil { @@ -254,16 +252,12 @@ func (m *Medium) Read(path string) (string, error) { // Write saves content to file, creating parent directories as needed. // Files are created with mode 0644. For sensitive files (keys, secrets), // use WriteMode with 0600. -// -// result := m.Write(...) func (m *Medium) Write(path, content string) error { return m.WriteMode(path, content, 0644) } // WriteMode saves content to file with explicit permissions. // Use 0600 for sensitive files (encryption output, private keys, auth hashes). -// -// result := m.WriteMode(...) func (m *Medium) WriteMode(path, content string, mode fs.FileMode) error { resolvedPath, err := m.validatePath(path) if err != nil { @@ -273,8 +267,6 @@ func (m *Medium) WriteMode(path, content string, mode fs.FileMode) error { } // EnsureDir creates directory if it doesn't exist. -// -// result := m.EnsureDir(...) func (m *Medium) EnsureDir(path string) error { resolvedPath, err := m.validatePath(path) if err != nil { @@ -284,8 +276,6 @@ func (m *Medium) EnsureDir(path string) error { } // IsDir returns true if path is a directory. -// -// result := m.IsDir(...) func (m *Medium) IsDir(path string) bool { if path == "" { return false @@ -298,8 +288,6 @@ func (m *Medium) IsDir(path string) bool { } // IsFile returns true if path is a regular file. -// -// result := m.IsFile(...) func (m *Medium) IsFile(path string) bool { if path == "" { return false @@ -312,8 +300,6 @@ func (m *Medium) IsFile(path string) bool { } // Exists returns true if path exists. -// -// result := m.Exists(...) func (m *Medium) Exists(path string) bool { resolvedPath, err := m.validatePath(path) if err != nil { @@ -323,8 +309,6 @@ func (m *Medium) Exists(path string) bool { } // List returns directory entries. -// -// result := m.List(...) func (m *Medium) List(path string) ([]fs.DirEntry, error) { resolvedPath, err := m.validatePath(path) if err != nil { @@ -334,8 +318,6 @@ func (m *Medium) List(path string) ([]fs.DirEntry, error) { } // Stat returns file info. -// -// result := m.Stat(...) func (m *Medium) Stat(path string) (fs.FileInfo, error) { resolvedPath, err := m.validatePath(path) if err != nil { @@ -345,8 +327,6 @@ func (m *Medium) Stat(path string) (fs.FileInfo, error) { } // Open opens the named file for reading. -// -// result := m.Open(...) func (m *Medium) Open(path string) (fs.File, error) { resolvedPath, err := m.validatePath(path) if err != nil { @@ -356,8 +336,6 @@ func (m *Medium) Open(path string) (fs.File, error) { } // Create creates or truncates the named file. -// -// result := m.Create(...) func (m *Medium) Create(path string) (goio.WriteCloser, error) { resolvedPath, err := m.validatePath(path) if err != nil { @@ -367,8 +345,6 @@ func (m *Medium) Create(path string) (goio.WriteCloser, error) { } // Append opens the named file for appending, creating it if it doesn't exist. -// -// result := m.Append(...) func (m *Medium) Append(path string) (goio.WriteCloser, error) { resolvedPath, err := m.validatePath(path) if err != nil { @@ -383,8 +359,6 @@ func (m *Medium) Append(path string) (goio.WriteCloser, error) { // API, as required by the io.Medium interface, while Open provides the more // general filesystem-level operation. Both methods are kept for semantic // clarity and backward compatibility. -// -// result := m.ReadStream(...) func (m *Medium) ReadStream(path string) (goio.ReadCloser, error) { return m.Open(path) } @@ -395,15 +369,11 @@ func (m *Medium) ReadStream(path string) (goio.ReadCloser, error) { // API, as required by the io.Medium interface, while Create provides the more // general filesystem-level operation. Both methods are kept for semantic // clarity and backward compatibility. -// -// result := m.WriteStream(...) func (m *Medium) WriteStream(path string) (goio.WriteCloser, error) { return m.Create(path) } // Delete removes a file or empty directory. -// -// result := m.Delete(...) func (m *Medium) Delete(path string) error { resolvedPath, err := m.validatePath(path) if err != nil { @@ -416,8 +386,6 @@ func (m *Medium) Delete(path string) error { } // DeleteAll removes a file or directory recursively. -// -// result := m.DeleteAll(...) func (m *Medium) DeleteAll(path string) error { resolvedPath, err := m.validatePath(path) if err != nil { @@ -430,8 +398,6 @@ func (m *Medium) DeleteAll(path string) error { } // Rename moves a file or directory. -// -// result := m.Rename(...) func (m *Medium) Rename(oldPath, newPath string) error { oldResolvedPath, err := m.validatePath(oldPath) if err != nil { diff --git a/node/node.go b/node/node.go index 12efee3..f7a01c7 100644 --- a/node/node.go +++ b/node/node.go @@ -41,8 +41,6 @@ func New() *Node { // ---------- Node-specific methods ---------- // AddData stages content in the in-memory filesystem. -// -// result := n.AddData(...) func (n *Node) AddData(name string, content []byte) { name = core.TrimPrefix(name, "/") if name == "" { @@ -60,8 +58,6 @@ func (n *Node) AddData(name string, content []byte) { } // ToTar serialises the entire in-memory tree to a tar archive. -// -// result := n.ToTar(...) func (n *Node) ToTar() ([]byte, error) { buf := new(bytes.Buffer) tw := tar.NewWriter(buf) @@ -89,8 +85,6 @@ func (n *Node) ToTar() ([]byte, error) { } // FromTar creates a new Node from a tar archive. -// -// result := node.FromTar(...) func FromTar(data []byte) (*Node, error) { n := New() if err := n.LoadTar(data); err != nil { @@ -100,8 +94,6 @@ func FromTar(data []byte) (*Node, error) { } // LoadTar replaces the in-memory tree with the contents of a tar archive. -// -// result := n.LoadTar(...) func (n *Node) LoadTar(data []byte) error { newFiles := make(map[string]*dataFile) tr := tar.NewReader(bytes.NewReader(data)) @@ -137,8 +129,6 @@ func (n *Node) LoadTar(data []byte) error { } // WalkNode walks the in-memory tree, calling fn for each entry. -// -// result := n.WalkNode(...) func (n *Node) WalkNode(root string, fn fs.WalkDirFunc) error { return fs.WalkDir(n, root, fn) } @@ -156,8 +146,6 @@ type WalkOptions struct { } // Walk walks the in-memory tree with optional WalkOptions. -// -// result := n.Walk(...) func (n *Node) Walk(root string, fn fs.WalkDirFunc, opts ...WalkOptions) error { var opt WalkOptions if len(opts) > 0 { @@ -200,8 +188,6 @@ func (n *Node) Walk(root string, fn fs.WalkDirFunc, opts ...WalkOptions) error { // ReadFile returns the content of the named file as a byte slice. // Implements fs.ReadFileFS. -// -// result := n.ReadFile(...) func (n *Node) ReadFile(name string) ([]byte, error) { name = core.TrimPrefix(name, "/") f, ok := n.files[name] @@ -215,8 +201,6 @@ func (n *Node) ReadFile(name string) ([]byte, error) { } // CopyFile copies a file from the in-memory tree to the local filesystem. -// -// result := n.CopyFile(...) func (n *Node) CopyFile(sourcePath, destinationPath string, perm fs.FileMode) error { sourcePath = core.TrimPrefix(sourcePath, "/") f, ok := n.files[sourcePath] @@ -285,8 +269,6 @@ func (n *Node) CopyTo(target coreio.Medium, sourcePath, destPath string) error { // ---------- Medium interface: fs.FS methods ---------- // Open opens a file from the Node. Implements fs.FS. -// -// result := n.Open(...) func (n *Node) Open(name string) (fs.File, error) { name = core.TrimPrefix(name, "/") if file, ok := n.files[name]; ok { @@ -306,8 +288,6 @@ func (n *Node) Open(name string) (fs.File, error) { } // Stat returns file information for the given path. -// -// result := n.Stat(...) func (n *Node) Stat(name string) (fs.FileInfo, error) { name = core.TrimPrefix(name, "/") if file, ok := n.files[name]; ok { @@ -327,8 +307,6 @@ func (n *Node) Stat(name string) (fs.FileInfo, error) { } // ReadDir reads and returns all directory entries for the named directory. -// -// result := n.ReadDir(...) func (n *Node) ReadDir(name string) ([]fs.DirEntry, error) { name = core.TrimPrefix(name, "/") if name == "." { @@ -381,8 +359,6 @@ func (n *Node) ReadDir(name string) ([]fs.DirEntry, error) { // ---------- Medium interface: read/write ---------- // Read retrieves the content of a file as a string. -// -// result := n.Read(...) func (n *Node) Read(filePath string) (string, error) { filePath = core.TrimPrefix(filePath, "/") f, ok := n.files[filePath] @@ -393,16 +369,12 @@ func (n *Node) Read(filePath string) (string, error) { } // Write saves the given content to a file, overwriting it if it exists. -// -// result := n.Write(...) func (n *Node) Write(filePath, content string) error { n.AddData(filePath, []byte(content)) return nil } // WriteMode saves content with explicit permissions (no-op for in-memory node). -// -// result := n.WriteMode(...) func (n *Node) WriteMode(filePath, content string, mode fs.FileMode) error { return n.Write(filePath, content) } @@ -416,8 +388,6 @@ func (n *Node) FileSet(filePath, content string) error { } // EnsureDir is a no-op because directories are implicit in Node. -// -// result := n.EnsureDir(...) func (n *Node) EnsureDir(_ string) error { return nil } @@ -425,16 +395,12 @@ func (n *Node) EnsureDir(_ string) error { // ---------- Medium interface: existence checks ---------- // Exists checks if a path exists (file or directory). -// -// result := n.Exists(...) func (n *Node) Exists(filePath string) bool { _, err := n.Stat(filePath) return err == nil } // IsFile checks if a path exists and is a regular file. -// -// result := n.IsFile(...) func (n *Node) IsFile(filePath string) bool { filePath = core.TrimPrefix(filePath, "/") _, ok := n.files[filePath] @@ -442,8 +408,6 @@ func (n *Node) IsFile(filePath string) bool { } // IsDir checks if a path exists and is a directory. -// -// result := n.IsDir(...) func (n *Node) IsDir(filePath string) bool { info, err := n.Stat(filePath) if err != nil { @@ -455,8 +419,6 @@ func (n *Node) IsDir(filePath string) bool { // ---------- Medium interface: mutations ---------- // Delete removes a single file. -// -// result := n.Delete(...) func (n *Node) Delete(filePath string) error { filePath = core.TrimPrefix(filePath, "/") if _, ok := n.files[filePath]; ok { @@ -467,8 +429,6 @@ func (n *Node) Delete(filePath string) error { } // DeleteAll removes a file or directory and all children. -// -// result := n.DeleteAll(...) func (n *Node) DeleteAll(filePath string) error { filePath = core.TrimPrefix(filePath, "/") @@ -493,8 +453,6 @@ func (n *Node) DeleteAll(filePath string) error { } // Rename moves a file from oldPath to newPath. -// -// result := n.Rename(...) func (n *Node) Rename(oldPath, newPath string) error { oldPath = core.TrimPrefix(oldPath, "/") newPath = core.TrimPrefix(newPath, "/") @@ -511,8 +469,6 @@ func (n *Node) Rename(oldPath, newPath string) error { } // List returns directory entries for the given path. -// -// result := n.List(...) func (n *Node) List(filePath string) ([]fs.DirEntry, error) { filePath = core.TrimPrefix(filePath, "/") if filePath == "" || filePath == "." { @@ -525,8 +481,6 @@ func (n *Node) List(filePath string) ([]fs.DirEntry, error) { // Create creates or truncates the named file, returning a WriteCloser. // Content is committed to the Node on Close. -// -// result := n.Create(...) func (n *Node) Create(filePath string) (goio.WriteCloser, error) { filePath = core.TrimPrefix(filePath, "/") return &nodeWriter{node: n, path: filePath}, nil @@ -534,8 +488,6 @@ func (n *Node) Create(filePath string) (goio.WriteCloser, error) { // Append opens the named file for appending, creating it if needed. // Content is committed to the Node on Close. -// -// result := n.Append(...) func (n *Node) Append(filePath string) (goio.WriteCloser, error) { filePath = core.TrimPrefix(filePath, "/") var existing []byte @@ -547,8 +499,6 @@ func (n *Node) Append(filePath string) (goio.WriteCloser, error) { } // ReadStream returns a ReadCloser for the file content. -// -// result := n.ReadStream(...) func (n *Node) ReadStream(filePath string) (goio.ReadCloser, error) { f, err := n.Open(filePath) if err != nil { @@ -558,8 +508,6 @@ func (n *Node) ReadStream(filePath string) (goio.ReadCloser, error) { } // WriteStream returns a WriteCloser for the file content. -// -// result := n.WriteStream(...) func (n *Node) WriteStream(filePath string) (goio.WriteCloser, error) { return n.Create(filePath) } diff --git a/s3/s3.go b/s3/s3.go index a074c56..571c653 100644 --- a/s3/s3.go +++ b/s3/s3.go @@ -128,8 +128,6 @@ func (m *Medium) key(filePath string) string { } // Read retrieves the content of a file as a string. -// -// result := m.Read(...) func (m *Medium) Read(filePath string) (string, error) { key := m.key(filePath) if key == "" { @@ -153,8 +151,6 @@ func (m *Medium) Read(filePath string) (string, error) { } // Write saves the given content to a file, overwriting it if it exists. -// -// result := m.Write(...) func (m *Medium) Write(filePath, content string) error { key := m.key(filePath) if key == "" { @@ -173,22 +169,16 @@ func (m *Medium) Write(filePath, content string) error { } // WriteMode ignores the requested mode because S3 objects do not store POSIX permissions. -// -// result := m.WriteMode(...) func (m *Medium) WriteMode(filePath, content string, _ fs.FileMode) error { return m.Write(filePath, content) } // EnsureDir is a no-op for S3 (S3 has no real directories). -// -// result := m.EnsureDir(...) func (m *Medium) EnsureDir(_ string) error { return nil } // IsFile checks if a path exists and is a regular file (not a "directory" prefix). -// -// result := m.IsFile(...) func (m *Medium) IsFile(filePath string) bool { key := m.key(filePath) if key == "" { @@ -206,22 +196,16 @@ func (m *Medium) IsFile(filePath string) bool { } // FileGet is a convenience function that reads a file from the medium. -// -// result := m.FileGet(...) func (m *Medium) FileGet(filePath string) (string, error) { return m.Read(filePath) } // FileSet is a convenience function that writes a file to the medium. -// -// result := m.FileSet(...) func (m *Medium) FileSet(filePath, content string) error { return m.Write(filePath, content) } // Delete removes a single object. -// -// result := m.Delete(...) func (m *Medium) Delete(filePath string) error { key := m.key(filePath) if key == "" { @@ -239,8 +223,6 @@ func (m *Medium) Delete(filePath string) error { } // DeleteAll removes all objects under the given prefix. -// -// result := m.DeleteAll(...) func (m *Medium) DeleteAll(filePath string) error { key := m.key(filePath) if key == "" { @@ -306,8 +288,6 @@ func (m *Medium) DeleteAll(filePath string) error { } // Rename moves an object by copying then deleting the original. -// -// result := m.Rename(...) func (m *Medium) Rename(oldPath, newPath string) error { oldKey := m.key(oldPath) newKey := m.key(newPath) @@ -338,8 +318,6 @@ func (m *Medium) Rename(oldPath, newPath string) error { } // List returns directory entries for the given path using ListObjectsV2 with delimiter. -// -// result := m.List(...) func (m *Medium) List(filePath string) ([]fs.DirEntry, error) { prefix := m.key(filePath) if prefix != "" && !core.HasSuffix(prefix, "/") { @@ -413,8 +391,6 @@ func (m *Medium) List(filePath string) ([]fs.DirEntry, error) { } // Stat returns file information for the given path using HeadObject. -// -// result := m.Stat(...) func (m *Medium) Stat(filePath string) (fs.FileInfo, error) { key := m.key(filePath) if key == "" { @@ -448,8 +424,6 @@ func (m *Medium) Stat(filePath string) (fs.FileInfo, error) { } // Open opens the named file for reading. -// -// result := m.Open(...) func (m *Medium) Open(filePath string) (fs.File, error) { key := m.key(filePath) if key == "" { @@ -489,8 +463,6 @@ func (m *Medium) Open(filePath string) (fs.File, error) { // Create creates or truncates the named file. Returns a writer that // uploads the content on Close. -// -// result := m.Create(...) func (m *Medium) Create(filePath string) (goio.WriteCloser, error) { key := m.key(filePath) if key == "" { @@ -504,8 +476,6 @@ func (m *Medium) Create(filePath string) (goio.WriteCloser, error) { // Append opens the named file for appending. It downloads the existing // content (if any) and re-uploads the combined content on Close. -// -// result := m.Append(...) func (m *Medium) Append(filePath string) (goio.WriteCloser, error) { key := m.key(filePath) if key == "" { @@ -530,8 +500,6 @@ func (m *Medium) Append(filePath string) (goio.WriteCloser, error) { } // ReadStream returns a reader for the file content. -// -// result := m.ReadStream(...) func (m *Medium) ReadStream(filePath string) (goio.ReadCloser, error) { key := m.key(filePath) if key == "" { @@ -549,15 +517,11 @@ func (m *Medium) ReadStream(filePath string) (goio.ReadCloser, error) { } // WriteStream returns a writer for the file content. Content is uploaded on Close. -// -// result := m.WriteStream(...) func (m *Medium) WriteStream(filePath string) (goio.WriteCloser, error) { return m.Create(filePath) } // Exists checks if a path exists (file or directory prefix). -// -// result := m.Exists(...) func (m *Medium) Exists(filePath string) bool { key := m.key(filePath) if key == "" { @@ -590,8 +554,6 @@ func (m *Medium) Exists(filePath string) bool { } // IsDir checks if a path exists and is a directory (has objects under it as a prefix). -// -// result := m.IsDir(...) func (m *Medium) IsDir(filePath string) bool { key := m.key(filePath) if key == "" { diff --git a/sigil/crypto_sigil.go b/sigil/crypto_sigil.go index 3b7a39f..c50b289 100644 --- a/sigil/crypto_sigil.go +++ b/sigil/crypto_sigil.go @@ -62,8 +62,6 @@ type PreObfuscator interface { type XORObfuscator struct{} // Obfuscate XORs the data with a key stream derived from the entropy. -// -// result := x.Obfuscate(...) func (x *XORObfuscator) Obfuscate(data []byte, entropy []byte) []byte { if len(data) == 0 { return data @@ -72,8 +70,6 @@ func (x *XORObfuscator) Obfuscate(data []byte, entropy []byte) []byte { } // Deobfuscate reverses the XOR transformation (XOR is symmetric). -// -// result := x.Deobfuscate(...) func (x *XORObfuscator) Deobfuscate(data []byte, entropy []byte) []byte { if len(data) == 0 { return data @@ -128,8 +124,6 @@ func (x *XORObfuscator) deriveKeyStream(entropy []byte, length int) []byte { type ShuffleMaskObfuscator struct{} // Obfuscate shuffles bytes and applies a mask derived from entropy. -// -// result := s.Obfuscate(...) func (s *ShuffleMaskObfuscator) Obfuscate(data []byte, entropy []byte) []byte { if len(data) == 0 { return data @@ -157,8 +151,6 @@ func (s *ShuffleMaskObfuscator) Obfuscate(data []byte, entropy []byte) []byte { } // Deobfuscate reverses the shuffle and mask operations. -// -// result := s.Deobfuscate(...) func (s *ShuffleMaskObfuscator) Deobfuscate(data []byte, entropy []byte) []byte { if len(data) == 0 { return data @@ -291,8 +283,6 @@ func NewChaChaPolySigilWithObfuscator(key []byte, obfuscator PreObfuscator) (*Ch // In encrypts the data with pre-obfuscation. // The flow is: plaintext -> obfuscate -> encrypt -// -// result := s.In(...) func (s *ChaChaPolySigil) In(data []byte) ([]byte, error) { if s.Key == nil { return nil, ErrNoKeyConfigured @@ -332,8 +322,6 @@ func (s *ChaChaPolySigil) In(data []byte) ([]byte, error) { // Out decrypts the data and reverses obfuscation. // The flow is: decrypt -> deobfuscate -> plaintext -// -// result := s.Out(...) func (s *ChaChaPolySigil) Out(data []byte) ([]byte, error) { if s.Key == nil { return nil, ErrNoKeyConfigured @@ -378,8 +366,6 @@ func (s *ChaChaPolySigil) Out(data []byte) ([]byte, error) { // GetNonceFromCiphertext extracts the nonce from encrypted output. // This is provided for debugging/logging purposes only. // The nonce should NOT be stored separately in headers. -// -// result := sigil.GetNonceFromCiphertext(...) func GetNonceFromCiphertext(ciphertext []byte) ([]byte, error) { nonceSize := chacha20poly1305.NonceSizeX if len(ciphertext) < nonceSize { diff --git a/sigil/sigil.go b/sigil/sigil.go index 46f7990..3bd035c 100644 --- a/sigil/sigil.go +++ b/sigil/sigil.go @@ -45,8 +45,6 @@ type Sigil interface { // stops immediately and returns nil with that error. // // To reverse a transmutation, call each sigil's Out method in reverse order. -// -// result := sigil.Transmute(...) func Transmute(data []byte, sigils []Sigil) ([]byte, error) { var err error for _, s := range sigils { @@ -63,8 +61,6 @@ func Transmute(data []byte, sigils []Sigil) ([]byte, error) { // Each sigil's Out method is called in reverse order, with the output of one sigil // becoming the input of the next. If any sigil returns an error, Untransmute // stops immediately and returns nil with that error. -// -// result := sigil.Untransmute(...) func Untransmute(data []byte, sigils []Sigil) ([]byte, error) { var err error for i := len(sigils) - 1; i >= 0; i-- { diff --git a/sigil/sigils.go b/sigil/sigils.go index 504dcaf..5410de4 100644 --- a/sigil/sigils.go +++ b/sigil/sigils.go @@ -25,8 +25,6 @@ import ( type ReverseSigil struct{} // In reverses the bytes of the data. -// -// result := s.In(...) func (s *ReverseSigil) In(data []byte) ([]byte, error) { if data == nil { return nil, nil @@ -39,8 +37,6 @@ func (s *ReverseSigil) In(data []byte) ([]byte, error) { } // Out reverses the bytes of the data. -// -// result := s.Out(...) func (s *ReverseSigil) Out(data []byte) ([]byte, error) { return s.In(data) } @@ -50,8 +46,6 @@ func (s *ReverseSigil) Out(data []byte) ([]byte, error) { type HexSigil struct{} // In encodes the data to hexadecimal. -// -// result := s.In(...) func (s *HexSigil) In(data []byte) ([]byte, error) { if data == nil { return nil, nil @@ -62,8 +56,6 @@ func (s *HexSigil) In(data []byte) ([]byte, error) { } // Out decodes the data from hexadecimal. -// -// result := s.Out(...) func (s *HexSigil) Out(data []byte) ([]byte, error) { if data == nil { return nil, nil @@ -78,8 +70,6 @@ func (s *HexSigil) Out(data []byte) ([]byte, error) { type Base64Sigil struct{} // In encodes the data to base64. -// -// result := s.In(...) func (s *Base64Sigil) In(data []byte) ([]byte, error) { if data == nil { return nil, nil @@ -90,8 +80,6 @@ func (s *Base64Sigil) In(data []byte) ([]byte, error) { } // Out decodes the data from base64. -// -// result := s.Out(...) func (s *Base64Sigil) Out(data []byte) ([]byte, error) { if data == nil { return nil, nil @@ -108,8 +96,6 @@ type GzipSigil struct { } // In compresses the data using gzip. -// -// result := s.In(...) func (s *GzipSigil) In(data []byte) ([]byte, error) { if data == nil { return nil, nil @@ -130,8 +116,6 @@ func (s *GzipSigil) In(data []byte) ([]byte, error) { } // Out decompresses the data using gzip. -// -// result := s.Out(...) func (s *GzipSigil) Out(data []byte) ([]byte, error) { if data == nil { return nil, nil @@ -153,8 +137,6 @@ func (s *GzipSigil) Out(data []byte) ([]byte, error) { type JSONSigil struct{ Indent bool } // In compacts or indents the JSON data. -// -// result := s.In(...) func (s *JSONSigil) In(data []byte) ([]byte, error) { if data == nil { return nil, nil @@ -177,8 +159,6 @@ func (s *JSONSigil) In(data []byte) ([]byte, error) { } // Out is a no-op for JSONSigil. -// -// result := s.Out(...) func (s *JSONSigil) Out(data []byte) ([]byte, error) { // For simplicity, Out is a no-op. The primary use is formatting. return data, nil @@ -199,8 +179,6 @@ func NewHashSigil(h crypto.Hash) *HashSigil { } // In hashes the data. -// -// result := s.In(...) func (s *HashSigil) In(data []byte) ([]byte, error) { var h io.Writer switch s.Hash { @@ -250,16 +228,12 @@ func (s *HashSigil) In(data []byte) ([]byte, error) { } // Out is a no-op for HashSigil. -// -// result := s.Out(...) func (s *HashSigil) Out(data []byte) ([]byte, error) { return data, nil } // NewSigil is a factory function that returns a Sigil based on a string name. // It is the primary way to create Sigil instances. -// -// result := sigil.NewSigil(...) func NewSigil(name string) (Sigil, error) { switch name { case "reverse": diff --git a/sqlite/sqlite.go b/sqlite/sqlite.go index bc630d1..4e5194f 100644 --- a/sqlite/sqlite.go +++ b/sqlite/sqlite.go @@ -81,8 +81,6 @@ func New(options Options) (*Medium, error) { } // Close closes the underlying database connection. -// -// result := m.Close(...) func (m *Medium) Close() error { if m.database != nil { return m.database.Close() @@ -101,8 +99,6 @@ func cleanPath(filePath string) string { } // Read retrieves the content of a file as a string. -// -// result := m.Read(...) func (m *Medium) Read(filePath string) (string, error) { key := cleanPath(filePath) if key == "" { @@ -127,15 +123,11 @@ func (m *Medium) Read(filePath string) (string, error) { } // Write saves the given content to a file, overwriting it if it exists. -// -// result := m.Write(...) func (m *Medium) Write(filePath, content string) error { return m.WriteMode(filePath, content, 0644) } // WriteMode saves the given content with explicit permissions. -// -// result := m.WriteMode(...) func (m *Medium) WriteMode(filePath, content string, mode fs.FileMode) error { key := cleanPath(filePath) if key == "" { @@ -154,8 +146,6 @@ func (m *Medium) WriteMode(filePath, content string, mode fs.FileMode) error { } // EnsureDir makes sure a directory exists, creating it if necessary. -// -// result := m.EnsureDir(...) func (m *Medium) EnsureDir(filePath string) error { key := cleanPath(filePath) if key == "" { @@ -175,8 +165,6 @@ func (m *Medium) EnsureDir(filePath string) error { } // IsFile checks if a path exists and is a regular file. -// -// result := m.IsFile(...) func (m *Medium) IsFile(filePath string) bool { key := cleanPath(filePath) if key == "" { @@ -194,22 +182,16 @@ func (m *Medium) IsFile(filePath string) bool { } // FileGet is a convenience function that reads a file from the medium. -// -// result := m.FileGet(...) func (m *Medium) FileGet(filePath string) (string, error) { return m.Read(filePath) } // FileSet is a convenience function that writes a file to the medium. -// -// result := m.FileSet(...) func (m *Medium) FileSet(filePath, content string) error { return m.Write(filePath, content) } // Delete removes a file or empty directory. -// -// result := m.Delete(...) func (m *Medium) Delete(filePath string) error { key := cleanPath(filePath) if key == "" { @@ -255,8 +237,6 @@ func (m *Medium) Delete(filePath string) error { } // DeleteAll removes a file or directory and all its contents recursively. -// -// result := m.DeleteAll(...) func (m *Medium) DeleteAll(filePath string) error { key := cleanPath(filePath) if key == "" { @@ -281,8 +261,6 @@ func (m *Medium) DeleteAll(filePath string) error { } // Rename moves a file or directory from oldPath to newPath. -// -// result := m.Rename(...) func (m *Medium) Rename(oldPath, newPath string) error { oldKey := cleanPath(oldPath) newKey := cleanPath(newPath) @@ -381,8 +359,6 @@ func (m *Medium) Rename(oldPath, newPath string) error { } // List returns the directory entries for the given path. -// -// result := m.List(...) func (m *Medium) List(filePath string) ([]fs.DirEntry, error) { prefix := cleanPath(filePath) if prefix != "" { @@ -459,8 +435,6 @@ func (m *Medium) List(filePath string) ([]fs.DirEntry, error) { } // Stat returns file information for the given path. -// -// result := m.Stat(...) func (m *Medium) Stat(filePath string) (fs.FileInfo, error) { key := cleanPath(filePath) if key == "" { @@ -492,8 +466,6 @@ func (m *Medium) Stat(filePath string) (fs.FileInfo, error) { } // Open opens the named file for reading. -// -// result := m.Open(...) func (m *Medium) Open(filePath string) (fs.File, error) { key := cleanPath(filePath) if key == "" { @@ -526,8 +498,6 @@ func (m *Medium) Open(filePath string) (fs.File, error) { } // Create creates or truncates the named file. -// -// result := m.Create(...) func (m *Medium) Create(filePath string) (goio.WriteCloser, error) { key := cleanPath(filePath) if key == "" { @@ -540,8 +510,6 @@ func (m *Medium) Create(filePath string) (goio.WriteCloser, error) { } // Append opens the named file for appending, creating it if it doesn't exist. -// -// result := m.Append(...) func (m *Medium) Append(filePath string) (goio.WriteCloser, error) { key := cleanPath(filePath) if key == "" { @@ -564,8 +532,6 @@ func (m *Medium) Append(filePath string) (goio.WriteCloser, error) { } // ReadStream returns a reader for the file content. -// -// result := m.ReadStream(...) func (m *Medium) ReadStream(filePath string) (goio.ReadCloser, error) { key := cleanPath(filePath) if key == "" { @@ -591,15 +557,11 @@ func (m *Medium) ReadStream(filePath string) (goio.ReadCloser, error) { } // WriteStream returns a writer for the file content. Content is stored on Close. -// -// result := m.WriteStream(...) func (m *Medium) WriteStream(filePath string) (goio.WriteCloser, error) { return m.Create(filePath) } // Exists checks if a path exists (file or directory). -// -// result := m.Exists(...) func (m *Medium) Exists(filePath string) bool { key := cleanPath(filePath) if key == "" { @@ -618,8 +580,6 @@ func (m *Medium) Exists(filePath string) bool { } // IsDir checks if a path exists and is a directory. -// -// result := m.IsDir(...) func (m *Medium) IsDir(filePath string) bool { key := cleanPath(filePath) if key == "" { diff --git a/store/medium.go b/store/medium.go index eb99da0..c9bde7d 100644 --- a/store/medium.go +++ b/store/medium.go @@ -34,23 +34,17 @@ func NewMedium(dbPath string) (*Medium, error) { return &Medium{store: store}, nil } -// AsMedium returns a Medium adapter for an existing Store. -// -// result := s.AsMedium(...) +// Example: medium := kvStore.AsMedium() func (s *Store) AsMedium() *Medium { return &Medium{store: s} } -// Store returns the underlying KV store for direct access. -// -// result := m.Store(...) +// Example: kvStore := medium.Store() func (m *Medium) Store() *Store { return m.store } -// Close closes the underlying store. -// -// result := m.Close(...) +// Example: _ = medium.Close() func (m *Medium) Close() error { return m.store.Close() } @@ -71,8 +65,6 @@ func splitPath(entryPath string) (group, key string) { } // Read retrieves the value at group/key. -// -// result := m.Read(...) func (m *Medium) Read(entryPath string) (string, error) { group, key := splitPath(entryPath) if key == "" { @@ -82,8 +74,6 @@ func (m *Medium) Read(entryPath string) (string, error) { } // Write stores a value at group/key. -// -// result := m.Write(...) func (m *Medium) Write(entryPath, content string) error { group, key := splitPath(entryPath) if key == "" { @@ -93,22 +83,16 @@ func (m *Medium) Write(entryPath, content string) error { } // WriteMode ignores the requested mode because key-value entries do not store POSIX permissions. -// -// result := m.WriteMode(...) func (m *Medium) WriteMode(entryPath, content string, _ fs.FileMode) error { return m.Write(entryPath, content) } // EnsureDir is a no-op — groups are created implicitly on Set. -// -// result := m.EnsureDir(...) func (m *Medium) EnsureDir(_ string) error { return nil } // IsFile returns true if a group/key pair exists. -// -// result := m.IsFile(...) func (m *Medium) IsFile(entryPath string) bool { group, key := splitPath(entryPath) if key == "" { @@ -127,8 +111,6 @@ func (m *Medium) FileSet(entryPath, content string) error { } // Delete removes a key, or checks that a group is empty. -// -// result := m.Delete(...) func (m *Medium) Delete(entryPath string) error { group, key := splitPath(entryPath) if group == "" { @@ -148,8 +130,6 @@ func (m *Medium) Delete(entryPath string) error { } // DeleteAll removes a key, or all keys in a group. -// -// result := m.DeleteAll(...) func (m *Medium) DeleteAll(entryPath string) error { group, key := splitPath(entryPath) if group == "" { @@ -162,8 +142,6 @@ func (m *Medium) DeleteAll(entryPath string) error { } // Rename moves a key from one path to another. -// -// result := m.Rename(...) func (m *Medium) Rename(oldPath, newPath string) error { oldGroup, oldKey := splitPath(oldPath) newGroup, newKey := splitPath(newPath) @@ -182,8 +160,6 @@ func (m *Medium) Rename(oldPath, newPath string) error { // List returns directory entries. Empty path returns groups. // A group path returns keys in that group. -// -// result := m.List(...) func (m *Medium) List(entryPath string) ([]fs.DirEntry, error) { group, key := splitPath(entryPath) @@ -221,8 +197,6 @@ func (m *Medium) List(entryPath string) ([]fs.DirEntry, error) { } // Stat returns file info for a group (dir) or key (file). -// -// result := m.Stat(...) func (m *Medium) Stat(entryPath string) (fs.FileInfo, error) { group, key := splitPath(entryPath) if group == "" { @@ -246,8 +220,6 @@ func (m *Medium) Stat(entryPath string) (fs.FileInfo, error) { } // Open opens a key for reading. -// -// result := m.Open(...) func (m *Medium) Open(entryPath string) (fs.File, error) { group, key := splitPath(entryPath) if key == "" { @@ -261,8 +233,6 @@ func (m *Medium) Open(entryPath string) (fs.File, error) { } // Create creates or truncates a key. Content is stored on Close. -// -// result := m.Create(...) func (m *Medium) Create(entryPath string) (goio.WriteCloser, error) { group, key := splitPath(entryPath) if key == "" { @@ -272,8 +242,6 @@ func (m *Medium) Create(entryPath string) (goio.WriteCloser, error) { } // Append opens a key for appending. Content is stored on Close. -// -// result := m.Append(...) func (m *Medium) Append(entryPath string) (goio.WriteCloser, error) { group, key := splitPath(entryPath) if key == "" { @@ -284,8 +252,6 @@ func (m *Medium) Append(entryPath string) (goio.WriteCloser, error) { } // ReadStream returns a reader for the value. -// -// result := m.ReadStream(...) func (m *Medium) ReadStream(entryPath string) (goio.ReadCloser, error) { group, key := splitPath(entryPath) if key == "" { @@ -299,15 +265,11 @@ func (m *Medium) ReadStream(entryPath string) (goio.ReadCloser, error) { } // WriteStream returns a writer. Content is stored on Close. -// -// result := m.WriteStream(...) func (m *Medium) WriteStream(entryPath string) (goio.WriteCloser, error) { return m.Create(entryPath) } // Exists returns true if a group or key exists. -// -// result := m.Exists(...) func (m *Medium) Exists(entryPath string) bool { group, key := splitPath(entryPath) if group == "" { @@ -322,8 +284,6 @@ func (m *Medium) Exists(entryPath string) bool { } // IsDir returns true if the path is a group with entries. -// -// result := m.IsDir(...) func (m *Medium) IsDir(entryPath string) bool { group, key := splitPath(entryPath) if key != "" || group == "" { diff --git a/store/store.go b/store/store.go index 5368667..d5eac68 100644 --- a/store/store.go +++ b/store/store.go @@ -43,16 +43,12 @@ func New(dbPath string) (*Store, error) { return &Store{database: database}, nil } -// Close closes the underlying database. -// -// result := s.Close(...) +// Example: _ = kvStore.Close() func (s *Store) Close() error { return s.database.Close() } -// Get retrieves a value by group and key. -// -// result := s.Get(...) +// Example: theme, _ := kvStore.Get("app", "theme") func (s *Store) Get(group, key string) (string, error) { var value string err := s.database.QueryRow("SELECT value FROM kv WHERE grp = ? AND key = ?", group, key).Scan(&value) @@ -65,9 +61,7 @@ func (s *Store) Get(group, key string) (string, error) { return value, nil } -// Set stores a value by group and key, overwriting if exists. -// -// result := s.Set(...) +// Example: _ = kvStore.Set("app", "theme", "midnight") func (s *Store) Set(group, key, value string) error { _, err := s.database.Exec( `INSERT INTO kv (grp, key, value) VALUES (?, ?, ?) @@ -80,9 +74,7 @@ func (s *Store) Set(group, key, value string) error { return nil } -// Delete removes a single key from a group. -// -// result := s.Delete(...) +// Example: _ = kvStore.Delete("app", "theme") func (s *Store) Delete(group, key string) error { _, err := s.database.Exec("DELETE FROM kv WHERE grp = ? AND key = ?", group, key) if err != nil { @@ -91,9 +83,7 @@ func (s *Store) Delete(group, key string) error { return nil } -// Count returns the number of keys in a group. -// -// result := s.Count(...) +// Example: count, _ := kvStore.Count("app") func (s *Store) Count(group string) (int, error) { var count int err := s.database.QueryRow("SELECT COUNT(*) FROM kv WHERE grp = ?", group).Scan(&count) @@ -103,9 +93,7 @@ func (s *Store) Count(group string) (int, error) { return count, nil } -// DeleteGroup removes all keys in a group. -// -// result := s.DeleteGroup(...) +// Example: _ = kvStore.DeleteGroup("app") func (s *Store) DeleteGroup(group string) error { _, err := s.database.Exec("DELETE FROM kv WHERE grp = ?", group) if err != nil { @@ -114,9 +102,7 @@ func (s *Store) DeleteGroup(group string) error { return nil } -// GetAll returns all key-value pairs in a group. -// -// result := s.GetAll(...) +// Example: values, _ := kvStore.GetAll("app") func (s *Store) GetAll(group string) (map[string]string, error) { rows, err := s.database.Query("SELECT key, value FROM kv WHERE grp = ?", group) if err != nil { diff --git a/workspace/service.go b/workspace/service.go index 2b425ff..b51d29a 100644 --- a/workspace/service.go +++ b/workspace/service.go @@ -77,11 +77,9 @@ func New(options Options) (*Service, error) { return s, nil } -// CreateWorkspace creates a new encrypted workspace. +// Example: workspaceID, _ := service.CreateWorkspace("alice", "pass123") // Identifier is hashed (SHA-256) to create the directory name. // A PGP keypair is generated using the password. -// -// result := s.CreateWorkspace(...) func (s *Service) CreateWorkspace(identifier, password string) (string, error) { s.mu.Lock() defer s.mu.Unlock() @@ -119,9 +117,7 @@ func (s *Service) CreateWorkspace(identifier, password string) (string, error) { return workspaceID, nil } -// SwitchWorkspace changes the active workspace. -// -// result := s.SwitchWorkspace(...) +// Example: _ = service.SwitchWorkspace(workspaceID) func (s *Service) SwitchWorkspace(name string) error { s.mu.Lock() defer s.mu.Unlock() @@ -155,9 +151,7 @@ func (s *Service) activeFilePath(operation, filename string) (string, error) { return filePath, nil } -// WorkspaceFileGet retrieves the content of a file from the active workspace. -// -// result := s.WorkspaceFileGet(...) +// Example: content, _ := service.WorkspaceFileGet("notes/todo.txt") func (s *Service) WorkspaceFileGet(filename string) (string, error) { s.mu.RLock() defer s.mu.RUnlock() @@ -169,9 +163,7 @@ func (s *Service) WorkspaceFileGet(filename string) (string, error) { return s.medium.Read(filePath) } -// WorkspaceFileSet saves content to a file in the active workspace. -// -// result := s.WorkspaceFileSet(...) +// Example: _ = service.WorkspaceFileSet("notes/todo.txt", "ship it") func (s *Service) WorkspaceFileSet(filename, content string) error { s.mu.Lock() defer s.mu.Unlock() @@ -186,12 +178,12 @@ func (s *Service) WorkspaceFileSet(filename, content string) error { // HandleIPCEvents handles workspace-related IPC messages. // // service, _ := workspace.New(workspace.Options{Core: core.New(), Crypt: myCryptProvider}) -// result := service.HandleIPCEvents(core.New(), map[string]any{ +// ipcResult := service.HandleIPCEvents(core.New(), map[string]any{ // "action": "workspace.create", // "identifier": "alice", // "password": "pass123", // }) -// _ = result.OK +// _ = ipcResult.OK func (s *Service) HandleIPCEvents(_ *core.Core, message core.Message) core.Result { switch payload := message.(type) { case map[string]any: From 518309a022eb2b47da0931def03d9af5180e069e Mon Sep 17 00:00:00 2001 From: Virgil Date: Mon, 30 Mar 2026 20:37:40 +0000 Subject: [PATCH 14/83] refactor(ax): add explicit node traversal options Co-Authored-By: Virgil --- datanode/client.go | 4 ++-- io.go | 9 ++++---- local/client.go | 2 +- node/node.go | 49 ++++++++++++++++++++++++------------------- node/node_test.go | 8 +++---- s3/s3.go | 3 ++- sigil/crypto_sigil.go | 8 +++++-- sigil/sigils.go | 11 ++++++---- sqlite/sqlite.go | 2 +- store/medium.go | 2 +- store/store.go | 3 ++- workspace/service.go | 3 ++- 12 files changed, 61 insertions(+), 43 deletions(-) diff --git a/datanode/client.go b/datanode/client.go index 1cfad95..16d89bc 100644 --- a/datanode/client.go +++ b/datanode/client.go @@ -39,7 +39,7 @@ type Medium struct { mu sync.RWMutex } -// New creates a new empty DataNode Medium. +// Use New when you need an in-memory Medium that snapshots to tar. // // Example usage: // @@ -52,7 +52,7 @@ func New() *Medium { } } -// FromTar creates a Medium from a tarball, restoring all files. +// Use FromTar(snapshot) to restore a Medium from tar bytes. // // Example usage: // diff --git a/io.go b/io.go index 1c44992..eb803e6 100644 --- a/io.go +++ b/io.go @@ -128,9 +128,9 @@ func init() { } } -// NewSandboxed creates a new Medium sandboxed to the given root directory. -// All file operations are restricted to paths within the root. -// The root directory will be created if it doesn't exist. +// Use NewSandboxed to confine file operations to a root directory. +// All file operations are restricted to paths within the root, and the root +// directory will be created if it does not exist. // // Example usage: // @@ -195,9 +195,10 @@ type MockMedium struct { var _ Medium = (*MockMedium)(nil) -// NewMockMedium creates a new MockMedium instance. +// Use NewMockMedium when tests need an in-memory Medium. // // medium := io.NewMockMedium() +// _ = medium.Write("config/app.yaml", "port: 8080") func NewMockMedium() *MockMedium { return &MockMedium{ Files: make(map[string]string), diff --git a/local/client.go b/local/client.go index 5446f19..46b3635 100644 --- a/local/client.go +++ b/local/client.go @@ -16,7 +16,7 @@ type Medium struct { var unrestrictedFileSystem = (&core.Fs{}).NewUnrestricted() -// New creates a new local Medium rooted at the given directory. +// Use New to sandbox filesystem access under a root directory. // Pass "/" for full filesystem access, or a specific path to sandbox. // // Example usage: diff --git a/node/node.go b/node/node.go index f7a01c7..e563533 100644 --- a/node/node.go +++ b/node/node.go @@ -17,9 +17,8 @@ import ( coreio "dappco.re/go/core/io" ) -// Node is an in-memory filesystem that implements coreio.Node (and therefore -// coreio.Medium). Directories are implicit -- they exist whenever a file path -// contains a "/". +// Node is an in-memory filesystem that satisfies coreio.Medium and fs.FS. +// Directories are implicit: they exist whenever a file path contains a "/". type Node struct { files map[string]*dataFile } @@ -28,11 +27,9 @@ type Node struct { var _ coreio.Medium = (*Node)(nil) var _ fs.ReadFileFS = (*Node)(nil) -// New creates a new, empty Node. +// Use New when you need an in-memory filesystem that can be snapshotted. // -// Example usage: -// -// nodeTree := node.New() +// nodeTree := New() // nodeTree.AddData("config/app.yaml", []byte("port: 8080")) func New() *Node { return &Node{files: make(map[string]*dataFile)} @@ -84,7 +81,7 @@ func (n *Node) ToTar() ([]byte, error) { return buf.Bytes(), nil } -// FromTar creates a new Node from a tar archive. +// Use FromTar(data) to restore an in-memory tree from tar bytes. func FromTar(data []byte) (*Node, error) { n := New() if err := n.LoadTar(data); err != nil { @@ -133,7 +130,7 @@ func (n *Node) WalkNode(root string, fn fs.WalkDirFunc) error { return fs.WalkDir(n, root, fn) } -// WalkOptions configures the behaviour of Walk. +// WalkOptions configures WalkWithOptions. type WalkOptions struct { // MaxDepth limits how many directory levels to descend. 0 means unlimited. MaxDepth int @@ -145,14 +142,13 @@ type WalkOptions struct { SkipErrors bool } -// Walk walks the in-memory tree with optional WalkOptions. -func (n *Node) Walk(root string, fn fs.WalkDirFunc, opts ...WalkOptions) error { - var opt WalkOptions - if len(opts) > 0 { - opt = opts[0] - } - - if opt.SkipErrors { +// WalkWithOptions walks the in-memory tree with an explicit configuration. +// +// nodeTree := New() +// options := WalkOptions{MaxDepth: 1, SkipErrors: true} +// _ = nodeTree.WalkWithOptions(".", func(path string, entry fs.DirEntry, err error) error { return nil }, options) +func (n *Node) WalkWithOptions(root string, fn fs.WalkDirFunc, options WalkOptions) error { + if options.SkipErrors { // If root doesn't exist, silently return nil. if _, err := n.Stat(root); err != nil { return nil @@ -160,8 +156,8 @@ func (n *Node) Walk(root string, fn fs.WalkDirFunc, opts ...WalkOptions) error { } return fs.WalkDir(n, root, func(entryPath string, entry fs.DirEntry, err error) error { - if opt.Filter != nil && err == nil { - if !opt.Filter(entryPath, entry) { + if options.Filter != nil && err == nil { + if !options.Filter(entryPath, entry) { if entry != nil && entry.IsDir() { return fs.SkipDir } @@ -173,11 +169,11 @@ func (n *Node) Walk(root string, fn fs.WalkDirFunc, opts ...WalkOptions) error { result := fn(entryPath, entry, err) // After visiting a directory at MaxDepth, prevent descending further. - if result == nil && opt.MaxDepth > 0 && entry != nil && entry.IsDir() && entryPath != root { + if result == nil && options.MaxDepth > 0 && entry != nil && entry.IsDir() && entryPath != root { rel := core.TrimPrefix(entryPath, root) rel = core.TrimPrefix(rel, "/") depth := len(core.Split(rel, "/")) - if depth >= opt.MaxDepth { + if depth >= options.MaxDepth { return fs.SkipDir } } @@ -186,6 +182,17 @@ func (n *Node) Walk(root string, fn fs.WalkDirFunc, opts ...WalkOptions) error { }) } +// Walk preserves the historic varargs call shape for compatibility. +// +// For new code, prefer WalkWithOptions so the configuration stays explicit. +func (n *Node) Walk(root string, fn fs.WalkDirFunc, opts ...WalkOptions) error { + var opt WalkOptions + if len(opts) > 0 { + opt = opts[0] + } + return n.WalkWithOptions(root, fn, opt) +} + // ReadFile returns the content of the named file as a byte slice. // Implements fs.ReadFileFS. func (n *Node) ReadFile(name string) ([]byte, error) { diff --git a/node/node_test.go b/node/node_test.go index facca7f..5e030ac 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -312,7 +312,7 @@ func TestNode_Walk_Ugly(t *testing.T) { assert.Equal(t, walkErr, err, "Walk must propagate the callback error") } -func TestNode_Walk_Options_Good(t *testing.T) { +func TestNode_WalkWithOptions_Good(t *testing.T) { n := New() n.AddData("root.txt", []byte("root")) n.AddData("a/a1.txt", []byte("a1")) @@ -321,7 +321,7 @@ func TestNode_Walk_Options_Good(t *testing.T) { t.Run("MaxDepth", func(t *testing.T) { var paths []string - err := n.Walk(".", func(p string, d fs.DirEntry, err error) error { + err := n.WalkWithOptions(".", func(p string, d fs.DirEntry, err error) error { paths = append(paths, p) return nil }, WalkOptions{MaxDepth: 1}) @@ -333,7 +333,7 @@ func TestNode_Walk_Options_Good(t *testing.T) { t.Run("Filter", func(t *testing.T) { var paths []string - err := n.Walk(".", func(p string, d fs.DirEntry, err error) error { + err := n.WalkWithOptions(".", func(p string, d fs.DirEntry, err error) error { paths = append(paths, p) return nil }, WalkOptions{Filter: func(p string, d fs.DirEntry) bool { @@ -347,7 +347,7 @@ func TestNode_Walk_Options_Good(t *testing.T) { t.Run("SkipErrors", func(t *testing.T) { var called bool - err := n.Walk("nonexistent", func(p string, d fs.DirEntry, err error) error { + err := n.WalkWithOptions("nonexistent", func(p string, d fs.DirEntry, err error) error { called = true return err }, WalkOptions{SkipErrors: true}) diff --git a/s3/s3.go b/s3/s3.go index 571c653..1424f89 100644 --- a/s3/s3.go +++ b/s3/s3.go @@ -86,13 +86,14 @@ func normalisePrefix(prefix string) string { return clean } -// New creates a new S3 Medium for the given bucket. +// Use New to scope writes to a bucket and optional prefix. // // Example usage: // // config := aws.Config{} // awsClient := awss3.NewFromConfig(config) // medium, _ := s3.New(s3.Options{Bucket: "backups", Client: awsClient, Prefix: "daily/"}) +// _ = medium.Write("reports/daily.txt", "done") func New(options Options) (*Medium, error) { if options.Bucket == "" { return nil, core.E("s3.New", "bucket name is required", nil) diff --git a/sigil/crypto_sigil.go b/sigil/crypto_sigil.go index c50b289..9117f59 100644 --- a/sigil/crypto_sigil.go +++ b/sigil/crypto_sigil.go @@ -242,13 +242,15 @@ type ChaChaPolySigil struct { randReader io.Reader // for testing injection } -// NewChaChaPolySigil creates a new encryption sigil with the given key. +// Use NewChaChaPolySigil with a 32-byte key to encrypt payloads. // The key must be exactly 32 bytes. // // Example usage: // // key := []byte("0123456789abcdef0123456789abcdef") // cipherSigil, _ := sigil.NewChaChaPolySigil(key) +// ciphertext, _ := cipherSigil.In([]byte("payload")) +// plaintext, _ := cipherSigil.Out(ciphertext) func NewChaChaPolySigil(key []byte) (*ChaChaPolySigil, error) { if len(key) != 32 { return nil, ErrInvalidKey @@ -264,12 +266,14 @@ func NewChaChaPolySigil(key []byte) (*ChaChaPolySigil, error) { }, nil } -// NewChaChaPolySigilWithObfuscator creates a new encryption sigil with custom obfuscator. +// Use NewChaChaPolySigilWithObfuscator when you want a custom pre-obfuscator. // // Example usage: // // key := []byte("0123456789abcdef0123456789abcdef") // cipherSigil, _ := sigil.NewChaChaPolySigilWithObfuscator(key, &sigil.ShuffleMaskObfuscator{}) +// ciphertext, _ := cipherSigil.In([]byte("payload")) +// plaintext, _ := cipherSigil.Out(ciphertext) func NewChaChaPolySigilWithObfuscator(key []byte, obfuscator PreObfuscator) (*ChaChaPolySigil, error) { sigil, err := NewChaChaPolySigil(key) if err != nil { diff --git a/sigil/sigils.go b/sigil/sigils.go index 5410de4..e75d0fc 100644 --- a/sigil/sigils.go +++ b/sigil/sigils.go @@ -170,10 +170,10 @@ type HashSigil struct { Hash crypto.Hash } -// NewHashSigil creates a new HashSigil. +// Use NewHashSigil to hash payloads with a specific crypto.Hash. // // hashSigil := sigil.NewHashSigil(crypto.SHA256) -// _ = hashSigil +// digest, _ := hashSigil.In([]byte("payload")) func NewHashSigil(h crypto.Hash) *HashSigil { return &HashSigil{Hash: h} } @@ -232,8 +232,11 @@ func (s *HashSigil) Out(data []byte) ([]byte, error) { return data, nil } -// NewSigil is a factory function that returns a Sigil based on a string name. -// It is the primary way to create Sigil instances. +// Use NewSigil("hex") or NewSigil("gzip") to construct a sigil by name. +// +// hexSigil, _ := sigil.NewSigil("hex") +// gzipSigil, _ := sigil.NewSigil("gzip") +// transformed, _ := sigil.Transmute([]byte("payload"), []sigil.Sigil{hexSigil, gzipSigil}) func NewSigil(name string) (Sigil, error) { switch name { case "reverse": diff --git a/sqlite/sqlite.go b/sqlite/sqlite.go index 4e5194f..f0b0b04 100644 --- a/sqlite/sqlite.go +++ b/sqlite/sqlite.go @@ -38,7 +38,7 @@ func normaliseTableName(table string) string { return table } -// New creates a new SQLite Medium at the given database path. +// Use New to point the medium at a SQLite database path. // Use ":memory:" for an in-memory database. // // Example usage: diff --git a/store/medium.go b/store/medium.go index c9bde7d..38594cb 100644 --- a/store/medium.go +++ b/store/medium.go @@ -20,7 +20,7 @@ type Medium struct { var _ coreio.Medium = (*Medium)(nil) -// NewMedium creates an io.Medium backed by a KV store at the given SQLite path. +// Use NewMedium to expose a Store as an io.Medium. // // Example usage: // diff --git a/store/store.go b/store/store.go index d5eac68..e886a1c 100644 --- a/store/store.go +++ b/store/store.go @@ -16,7 +16,8 @@ type Store struct { database *sql.DB } -// New creates a Store at the given SQLite path. Use ":memory:" for tests. +// Use New to open a SQLite-backed key-value store. +// Use ":memory:" for tests. // // Example usage: // diff --git a/workspace/service.go b/workspace/service.go index b51d29a..a64b40c 100644 --- a/workspace/service.go +++ b/workspace/service.go @@ -44,11 +44,12 @@ type Service struct { var _ Workspace = (*Service)(nil) -// New creates a new Workspace service instance. +// Use New to manage encrypted user workspaces from a Core runtime. // // Example usage: // // service, _ := workspace.New(workspace.Options{Core: core.New(), Crypt: myCryptProvider}) +// workspaceID, _ := service.CreateWorkspace("alice", "pass123") func New(options Options) (*Service, error) { home := workspaceHome() if home == "" { From b19617c3714daf9570b79826c1eda0fd04aff737 Mon Sep 17 00:00:00 2001 From: Virgil Date: Mon, 30 Mar 2026 20:42:44 +0000 Subject: [PATCH 15/83] refactor(ax): prune redundant api comments Co-Authored-By: Virgil --- io.go | 33 --------------------------------- local/client.go | 32 ++------------------------------ node/node.go | 22 ---------------------- s3/s3.go | 8 -------- sqlite/sqlite.go | 13 ------------- store/medium.go | 13 ------------- workspace/service.go | 6 +++--- 7 files changed, 5 insertions(+), 122 deletions(-) diff --git a/io.go b/io.go index eb803e6..83b85cb 100644 --- a/io.go +++ b/io.go @@ -14,51 +14,36 @@ import ( // This allows for different implementations (e.g., local disk, S3, SFTP) // to be used interchangeably. type Medium interface { - // Read retrieves the content of a file as a string. Read(path string) (string, error) - // Write saves the given content to a file, overwriting it if it exists. - // Default permissions: 0644. For sensitive files, use WriteMode. Write(path, content string) error // WriteMode saves content with explicit file permissions. // Use 0600 for sensitive files (keys, secrets, encrypted output). WriteMode(path, content string, mode fs.FileMode) error - // EnsureDir makes sure a directory exists, creating it if necessary. EnsureDir(path string) error - // IsFile checks if a path exists and is a regular file. IsFile(path string) bool - // FileGet is a convenience function that reads a file from the medium. FileGet(path string) (string, error) - // FileSet is a convenience function that writes a file to the medium. FileSet(path, content string) error - // Delete removes a file or empty directory. Delete(path string) error - // DeleteAll removes a file or directory and all its contents recursively. DeleteAll(path string) error - // Rename moves a file or directory from oldPath to newPath. Rename(oldPath, newPath string) error - // List returns the directory entries for the given path. List(path string) ([]fs.DirEntry, error) - // Stat returns file information for the given path. Stat(path string) (fs.FileInfo, error) - // Open opens the named file for reading. Open(path string) (fs.File, error) - // Create creates or truncates the named file. Create(path string) (goio.WriteCloser, error) - // Append opens the named file for appending, creating it if it doesn't exist. Append(path string) (goio.WriteCloser, error) // ReadStream returns a reader for the file content. @@ -207,7 +192,6 @@ func NewMockMedium() *MockMedium { } } -// Read retrieves the content of a file from the mock filesystem. func (m *MockMedium) Read(path string) (string, error) { content, ok := m.Files[path] if !ok { @@ -216,7 +200,6 @@ func (m *MockMedium) Read(path string) (string, error) { return content, nil } -// Write saves the given content to a file in the mock filesystem. func (m *MockMedium) Write(path, content string) error { m.Files[path] = content m.ModTimes[path] = time.Now() @@ -227,29 +210,24 @@ func (m *MockMedium) WriteMode(path, content string, mode fs.FileMode) error { return m.Write(path, content) } -// EnsureDir records that a directory exists in the mock filesystem. func (m *MockMedium) EnsureDir(path string) error { m.Dirs[path] = true return nil } -// IsFile checks if a path exists as a file in the mock filesystem. func (m *MockMedium) IsFile(path string) bool { _, ok := m.Files[path] return ok } -// FileGet is a convenience function that reads a file from the mock filesystem. func (m *MockMedium) FileGet(path string) (string, error) { return m.Read(path) } -// FileSet is a convenience function that writes a file to the mock filesystem. func (m *MockMedium) FileSet(path, content string) error { return m.Write(path, content) } -// Delete removes a file or empty directory from the mock filesystem. func (m *MockMedium) Delete(path string) error { if _, ok := m.Files[path]; ok { delete(m.Files, path) @@ -277,7 +255,6 @@ func (m *MockMedium) Delete(path string) error { return core.E("io.MockMedium.Delete", core.Concat("path not found: ", path), fs.ErrNotExist) } -// DeleteAll removes a file or directory and all contents from the mock filesystem. func (m *MockMedium) DeleteAll(path string) error { found := false if _, ok := m.Files[path]; ok { @@ -313,7 +290,6 @@ func (m *MockMedium) DeleteAll(path string) error { return nil } -// Rename moves a file or directory in the mock filesystem. func (m *MockMedium) Rename(oldPath, newPath string) error { if content, ok := m.Files[oldPath]; ok { m.Files[newPath] = content @@ -372,7 +348,6 @@ func (m *MockMedium) Rename(oldPath, newPath string) error { return core.E("io.MockMedium.Rename", core.Concat("path not found: ", oldPath), fs.ErrNotExist) } -// Open opens a file from the mock filesystem. func (m *MockMedium) Open(path string) (fs.File, error) { content, ok := m.Files[path] if !ok { @@ -384,7 +359,6 @@ func (m *MockMedium) Open(path string) (fs.File, error) { }, nil } -// Create creates a file in the mock filesystem. func (m *MockMedium) Create(path string) (goio.WriteCloser, error) { return &MockWriteCloser{ medium: m, @@ -392,7 +366,6 @@ func (m *MockMedium) Create(path string) (goio.WriteCloser, error) { }, nil } -// Append opens a file for appending in the mock filesystem. func (m *MockMedium) Append(path string) (goio.WriteCloser, error) { content := m.Files[path] return &MockWriteCloser{ @@ -402,12 +375,10 @@ func (m *MockMedium) Append(path string) (goio.WriteCloser, error) { }, nil } -// ReadStream returns a reader for the file content in the mock filesystem. func (m *MockMedium) ReadStream(path string) (goio.ReadCloser, error) { return m.Open(path) } -// WriteStream returns a writer for the file content in the mock filesystem. func (m *MockMedium) WriteStream(path string) (goio.WriteCloser, error) { return m.Create(path) } @@ -457,7 +428,6 @@ func (w *MockWriteCloser) Close() error { return nil } -// List returns directory entries for the mock filesystem. func (m *MockMedium) List(path string) ([]fs.DirEntry, error) { if _, ok := m.Dirs[path]; !ok { // Check if it's the root or has children @@ -566,7 +536,6 @@ func (m *MockMedium) List(path string) ([]fs.DirEntry, error) { return entries, nil } -// Stat returns file information for the mock filesystem. func (m *MockMedium) Stat(path string) (fs.FileInfo, error) { if content, ok := m.Files[path]; ok { modTime, ok := m.ModTimes[path] @@ -590,7 +559,6 @@ func (m *MockMedium) Stat(path string) (fs.FileInfo, error) { return nil, core.E("io.MockMedium.Stat", core.Concat("path not found: ", path), fs.ErrNotExist) } -// Exists checks if a path exists in the mock filesystem. func (m *MockMedium) Exists(path string) bool { if _, ok := m.Files[path]; ok { return true @@ -601,7 +569,6 @@ func (m *MockMedium) Exists(path string) bool { return false } -// IsDir checks if a path is a directory in the mock filesystem. func (m *MockMedium) IsDir(path string) bool { _, ok := m.Dirs[path] return ok diff --git a/local/client.go b/local/client.go index 46b3635..a01410b 100644 --- a/local/client.go +++ b/local/client.go @@ -240,7 +240,6 @@ func (m *Medium) validatePath(path string) (string, error) { return current, nil } -// Read returns file contents as string. func (m *Medium) Read(path string) (string, error) { resolvedPath, err := m.validatePath(path) if err != nil { @@ -249,15 +248,10 @@ func (m *Medium) Read(path string) (string, error) { return resultString("local.Read", core.Concat("read failed: ", path), unrestrictedFileSystem.Read(resolvedPath)) } -// Write saves content to file, creating parent directories as needed. -// Files are created with mode 0644. For sensitive files (keys, secrets), -// use WriteMode with 0600. func (m *Medium) Write(path, content string) error { return m.WriteMode(path, content, 0644) } -// WriteMode saves content to file with explicit permissions. -// Use 0600 for sensitive files (encryption output, private keys, auth hashes). func (m *Medium) WriteMode(path, content string, mode fs.FileMode) error { resolvedPath, err := m.validatePath(path) if err != nil { @@ -266,7 +260,6 @@ func (m *Medium) WriteMode(path, content string, mode fs.FileMode) error { return resultErr("local.WriteMode", core.Concat("write failed: ", path), unrestrictedFileSystem.WriteMode(resolvedPath, content, mode)) } -// EnsureDir creates directory if it doesn't exist. func (m *Medium) EnsureDir(path string) error { resolvedPath, err := m.validatePath(path) if err != nil { @@ -275,7 +268,6 @@ func (m *Medium) EnsureDir(path string) error { return resultErr("local.EnsureDir", core.Concat("ensure dir failed: ", path), unrestrictedFileSystem.EnsureDir(resolvedPath)) } -// IsDir returns true if path is a directory. func (m *Medium) IsDir(path string) bool { if path == "" { return false @@ -287,7 +279,6 @@ func (m *Medium) IsDir(path string) bool { return unrestrictedFileSystem.IsDir(resolvedPath) } -// IsFile returns true if path is a regular file. func (m *Medium) IsFile(path string) bool { if path == "" { return false @@ -299,7 +290,6 @@ func (m *Medium) IsFile(path string) bool { return unrestrictedFileSystem.IsFile(resolvedPath) } -// Exists returns true if path exists. func (m *Medium) Exists(path string) bool { resolvedPath, err := m.validatePath(path) if err != nil { @@ -308,7 +298,6 @@ func (m *Medium) Exists(path string) bool { return unrestrictedFileSystem.Exists(resolvedPath) } -// List returns directory entries. func (m *Medium) List(path string) ([]fs.DirEntry, error) { resolvedPath, err := m.validatePath(path) if err != nil { @@ -317,7 +306,6 @@ func (m *Medium) List(path string) ([]fs.DirEntry, error) { return resultDirEntries("local.List", core.Concat("list failed: ", path), unrestrictedFileSystem.List(resolvedPath)) } -// Stat returns file info. func (m *Medium) Stat(path string) (fs.FileInfo, error) { resolvedPath, err := m.validatePath(path) if err != nil { @@ -326,7 +314,6 @@ func (m *Medium) Stat(path string) (fs.FileInfo, error) { return resultFileInfo("local.Stat", core.Concat("stat failed: ", path), unrestrictedFileSystem.Stat(resolvedPath)) } -// Open opens the named file for reading. func (m *Medium) Open(path string) (fs.File, error) { resolvedPath, err := m.validatePath(path) if err != nil { @@ -335,7 +322,6 @@ func (m *Medium) Open(path string) (fs.File, error) { return resultFile("local.Open", core.Concat("open failed: ", path), unrestrictedFileSystem.Open(resolvedPath)) } -// Create creates or truncates the named file. func (m *Medium) Create(path string) (goio.WriteCloser, error) { resolvedPath, err := m.validatePath(path) if err != nil { @@ -344,7 +330,6 @@ func (m *Medium) Create(path string) (goio.WriteCloser, error) { return resultWriteCloser("local.Create", core.Concat("create failed: ", path), unrestrictedFileSystem.Create(resolvedPath)) } -// Append opens the named file for appending, creating it if it doesn't exist. func (m *Medium) Append(path string) (goio.WriteCloser, error) { resolvedPath, err := m.validatePath(path) if err != nil { @@ -353,27 +338,16 @@ func (m *Medium) Append(path string) (goio.WriteCloser, error) { return resultWriteCloser("local.Append", core.Concat("append failed: ", path), unrestrictedFileSystem.Append(resolvedPath)) } -// ReadStream returns a reader for the file content. -// -// This is a convenience wrapper around Open that exposes a streaming-oriented -// API, as required by the io.Medium interface, while Open provides the more -// general filesystem-level operation. Both methods are kept for semantic -// clarity and backward compatibility. +// Example: reader, _ := medium.ReadStream("logs/app.log") func (m *Medium) ReadStream(path string) (goio.ReadCloser, error) { return m.Open(path) } -// WriteStream returns a writer for the file content. -// -// This is a convenience wrapper around Create that exposes a streaming-oriented -// API, as required by the io.Medium interface, while Create provides the more -// general filesystem-level operation. Both methods are kept for semantic -// clarity and backward compatibility. +// Example: writer, _ := medium.WriteStream("logs/app.log") func (m *Medium) WriteStream(path string) (goio.WriteCloser, error) { return m.Create(path) } -// Delete removes a file or empty directory. func (m *Medium) Delete(path string) error { resolvedPath, err := m.validatePath(path) if err != nil { @@ -385,7 +359,6 @@ func (m *Medium) Delete(path string) error { return resultErr("local.Delete", core.Concat("delete failed: ", path), unrestrictedFileSystem.Delete(resolvedPath)) } -// DeleteAll removes a file or directory recursively. func (m *Medium) DeleteAll(path string) error { resolvedPath, err := m.validatePath(path) if err != nil { @@ -397,7 +370,6 @@ func (m *Medium) DeleteAll(path string) error { return resultErr("local.DeleteAll", core.Concat("delete all failed: ", path), unrestrictedFileSystem.DeleteAll(resolvedPath)) } -// Rename moves a file or directory. func (m *Medium) Rename(oldPath, newPath string) error { oldResolvedPath, err := m.validatePath(oldPath) if err != nil { diff --git a/node/node.go b/node/node.go index e563533..70f18c3 100644 --- a/node/node.go +++ b/node/node.go @@ -125,7 +125,6 @@ func (n *Node) LoadTar(data []byte) error { return nil } -// WalkNode walks the in-memory tree, calling fn for each entry. func (n *Node) WalkNode(root string, fn fs.WalkDirFunc) error { return fs.WalkDir(n, root, fn) } @@ -193,8 +192,6 @@ func (n *Node) Walk(root string, fn fs.WalkDirFunc, opts ...WalkOptions) error { return n.WalkWithOptions(root, fn, opt) } -// ReadFile returns the content of the named file as a byte slice. -// Implements fs.ReadFileFS. func (n *Node) ReadFile(name string) ([]byte, error) { name = core.TrimPrefix(name, "/") f, ok := n.files[name] @@ -275,7 +272,6 @@ func (n *Node) CopyTo(target coreio.Medium, sourcePath, destPath string) error { // ---------- Medium interface: fs.FS methods ---------- -// Open opens a file from the Node. Implements fs.FS. func (n *Node) Open(name string) (fs.File, error) { name = core.TrimPrefix(name, "/") if file, ok := n.files[name]; ok { @@ -294,7 +290,6 @@ func (n *Node) Open(name string) (fs.File, error) { return nil, core.E("node.Open", core.Concat("path not found: ", name), fs.ErrNotExist) } -// Stat returns file information for the given path. func (n *Node) Stat(name string) (fs.FileInfo, error) { name = core.TrimPrefix(name, "/") if file, ok := n.files[name]; ok { @@ -313,7 +308,6 @@ func (n *Node) Stat(name string) (fs.FileInfo, error) { return nil, core.E("node.Stat", core.Concat("path not found: ", name), fs.ErrNotExist) } -// ReadDir reads and returns all directory entries for the named directory. func (n *Node) ReadDir(name string) ([]fs.DirEntry, error) { name = core.TrimPrefix(name, "/") if name == "." { @@ -365,7 +359,6 @@ func (n *Node) ReadDir(name string) ([]fs.DirEntry, error) { // ---------- Medium interface: read/write ---------- -// Read retrieves the content of a file as a string. func (n *Node) Read(filePath string) (string, error) { filePath = core.TrimPrefix(filePath, "/") f, ok := n.files[filePath] @@ -375,13 +368,11 @@ func (n *Node) Read(filePath string) (string, error) { return string(f.content), nil } -// Write saves the given content to a file, overwriting it if it exists. func (n *Node) Write(filePath, content string) error { n.AddData(filePath, []byte(content)) return nil } -// WriteMode saves content with explicit permissions (no-op for in-memory node). func (n *Node) WriteMode(filePath, content string, mode fs.FileMode) error { return n.Write(filePath, content) } @@ -401,20 +392,17 @@ func (n *Node) EnsureDir(_ string) error { // ---------- Medium interface: existence checks ---------- -// Exists checks if a path exists (file or directory). func (n *Node) Exists(filePath string) bool { _, err := n.Stat(filePath) return err == nil } -// IsFile checks if a path exists and is a regular file. func (n *Node) IsFile(filePath string) bool { filePath = core.TrimPrefix(filePath, "/") _, ok := n.files[filePath] return ok } -// IsDir checks if a path exists and is a directory. func (n *Node) IsDir(filePath string) bool { info, err := n.Stat(filePath) if err != nil { @@ -425,7 +413,6 @@ func (n *Node) IsDir(filePath string) bool { // ---------- Medium interface: mutations ---------- -// Delete removes a single file. func (n *Node) Delete(filePath string) error { filePath = core.TrimPrefix(filePath, "/") if _, ok := n.files[filePath]; ok { @@ -435,7 +422,6 @@ func (n *Node) Delete(filePath string) error { return core.E("node.Delete", core.Concat("path not found: ", filePath), fs.ErrNotExist) } -// DeleteAll removes a file or directory and all children. func (n *Node) DeleteAll(filePath string) error { filePath = core.TrimPrefix(filePath, "/") @@ -459,7 +445,6 @@ func (n *Node) DeleteAll(filePath string) error { return nil } -// Rename moves a file from oldPath to newPath. func (n *Node) Rename(oldPath, newPath string) error { oldPath = core.TrimPrefix(oldPath, "/") newPath = core.TrimPrefix(newPath, "/") @@ -475,7 +460,6 @@ func (n *Node) Rename(oldPath, newPath string) error { return nil } -// List returns directory entries for the given path. func (n *Node) List(filePath string) ([]fs.DirEntry, error) { filePath = core.TrimPrefix(filePath, "/") if filePath == "" || filePath == "." { @@ -486,15 +470,11 @@ func (n *Node) List(filePath string) ([]fs.DirEntry, error) { // ---------- Medium interface: streams ---------- -// Create creates or truncates the named file, returning a WriteCloser. -// Content is committed to the Node on Close. func (n *Node) Create(filePath string) (goio.WriteCloser, error) { filePath = core.TrimPrefix(filePath, "/") return &nodeWriter{node: n, path: filePath}, nil } -// Append opens the named file for appending, creating it if needed. -// Content is committed to the Node on Close. func (n *Node) Append(filePath string) (goio.WriteCloser, error) { filePath = core.TrimPrefix(filePath, "/") var existing []byte @@ -505,7 +485,6 @@ func (n *Node) Append(filePath string) (goio.WriteCloser, error) { return &nodeWriter{node: n, path: filePath, buf: existing}, nil } -// ReadStream returns a ReadCloser for the file content. func (n *Node) ReadStream(filePath string) (goio.ReadCloser, error) { f, err := n.Open(filePath) if err != nil { @@ -514,7 +493,6 @@ func (n *Node) ReadStream(filePath string) (goio.ReadCloser, error) { return goio.NopCloser(f), nil } -// WriteStream returns a WriteCloser for the file content. func (n *Node) WriteStream(filePath string) (goio.WriteCloser, error) { return n.Create(filePath) } diff --git a/s3/s3.go b/s3/s3.go index 1424f89..256cd1f 100644 --- a/s3/s3.go +++ b/s3/s3.go @@ -128,7 +128,6 @@ func (m *Medium) key(filePath string) string { return m.prefix + clean } -// Read retrieves the content of a file as a string. func (m *Medium) Read(filePath string) (string, error) { key := m.key(filePath) if key == "" { @@ -151,7 +150,6 @@ func (m *Medium) Read(filePath string) (string, error) { return string(data), nil } -// Write saves the given content to a file, overwriting it if it exists. func (m *Medium) Write(filePath, content string) error { key := m.key(filePath) if key == "" { @@ -196,17 +194,14 @@ func (m *Medium) IsFile(filePath string) bool { return err == nil } -// FileGet is a convenience function that reads a file from the medium. func (m *Medium) FileGet(filePath string) (string, error) { return m.Read(filePath) } -// FileSet is a convenience function that writes a file to the medium. func (m *Medium) FileSet(filePath, content string) error { return m.Write(filePath, content) } -// Delete removes a single object. func (m *Medium) Delete(filePath string) error { key := m.key(filePath) if key == "" { @@ -424,7 +419,6 @@ func (m *Medium) Stat(filePath string) (fs.FileInfo, error) { }, nil } -// Open opens the named file for reading. func (m *Medium) Open(filePath string) (fs.File, error) { key := m.key(filePath) if key == "" { @@ -500,7 +494,6 @@ func (m *Medium) Append(filePath string) (goio.WriteCloser, error) { }, nil } -// ReadStream returns a reader for the file content. func (m *Medium) ReadStream(filePath string) (goio.ReadCloser, error) { key := m.key(filePath) if key == "" { @@ -517,7 +510,6 @@ func (m *Medium) ReadStream(filePath string) (goio.ReadCloser, error) { return out.Body, nil } -// WriteStream returns a writer for the file content. Content is uploaded on Close. func (m *Medium) WriteStream(filePath string) (goio.WriteCloser, error) { return m.Create(filePath) } diff --git a/sqlite/sqlite.go b/sqlite/sqlite.go index f0b0b04..296d5ba 100644 --- a/sqlite/sqlite.go +++ b/sqlite/sqlite.go @@ -98,7 +98,6 @@ func cleanPath(filePath string) string { return core.TrimPrefix(clean, "/") } -// Read retrieves the content of a file as a string. func (m *Medium) Read(filePath string) (string, error) { key := cleanPath(filePath) if key == "" { @@ -122,7 +121,6 @@ func (m *Medium) Read(filePath string) (string, error) { return string(content), nil } -// Write saves the given content to a file, overwriting it if it exists. func (m *Medium) Write(filePath, content string) error { return m.WriteMode(filePath, content, 0644) } @@ -164,7 +162,6 @@ func (m *Medium) EnsureDir(filePath string) error { return nil } -// IsFile checks if a path exists and is a regular file. func (m *Medium) IsFile(filePath string) bool { key := cleanPath(filePath) if key == "" { @@ -181,12 +178,10 @@ func (m *Medium) IsFile(filePath string) bool { return !isDir } -// FileGet is a convenience function that reads a file from the medium. func (m *Medium) FileGet(filePath string) (string, error) { return m.Read(filePath) } -// FileSet is a convenience function that writes a file to the medium. func (m *Medium) FileSet(filePath, content string) error { return m.Write(filePath, content) } @@ -434,7 +429,6 @@ func (m *Medium) List(filePath string) ([]fs.DirEntry, error) { return entries, rows.Err() } -// Stat returns file information for the given path. func (m *Medium) Stat(filePath string) (fs.FileInfo, error) { key := cleanPath(filePath) if key == "" { @@ -465,7 +459,6 @@ func (m *Medium) Stat(filePath string) (fs.FileInfo, error) { }, nil } -// Open opens the named file for reading. func (m *Medium) Open(filePath string) (fs.File, error) { key := cleanPath(filePath) if key == "" { @@ -497,7 +490,6 @@ func (m *Medium) Open(filePath string) (fs.File, error) { }, nil } -// Create creates or truncates the named file. func (m *Medium) Create(filePath string) (goio.WriteCloser, error) { key := cleanPath(filePath) if key == "" { @@ -509,7 +501,6 @@ func (m *Medium) Create(filePath string) (goio.WriteCloser, error) { }, nil } -// Append opens the named file for appending, creating it if it doesn't exist. func (m *Medium) Append(filePath string) (goio.WriteCloser, error) { key := cleanPath(filePath) if key == "" { @@ -531,7 +522,6 @@ func (m *Medium) Append(filePath string) (goio.WriteCloser, error) { }, nil } -// ReadStream returns a reader for the file content. func (m *Medium) ReadStream(filePath string) (goio.ReadCloser, error) { key := cleanPath(filePath) if key == "" { @@ -556,12 +546,10 @@ func (m *Medium) ReadStream(filePath string) (goio.ReadCloser, error) { return goio.NopCloser(bytes.NewReader(content)), nil } -// WriteStream returns a writer for the file content. Content is stored on Close. func (m *Medium) WriteStream(filePath string) (goio.WriteCloser, error) { return m.Create(filePath) } -// Exists checks if a path exists (file or directory). func (m *Medium) Exists(filePath string) bool { key := cleanPath(filePath) if key == "" { @@ -579,7 +567,6 @@ func (m *Medium) Exists(filePath string) bool { return count > 0 } -// IsDir checks if a path exists and is a directory. func (m *Medium) IsDir(filePath string) bool { key := cleanPath(filePath) if key == "" { diff --git a/store/medium.go b/store/medium.go index 38594cb..6507da7 100644 --- a/store/medium.go +++ b/store/medium.go @@ -64,7 +64,6 @@ func splitPath(entryPath string) (group, key string) { return parts[0], parts[1] } -// Read retrieves the value at group/key. func (m *Medium) Read(entryPath string) (string, error) { group, key := splitPath(entryPath) if key == "" { @@ -73,7 +72,6 @@ func (m *Medium) Read(entryPath string) (string, error) { return m.store.Get(group, key) } -// Write stores a value at group/key. func (m *Medium) Write(entryPath, content string) error { group, key := splitPath(entryPath) if key == "" { @@ -92,7 +90,6 @@ func (m *Medium) EnsureDir(_ string) error { return nil } -// IsFile returns true if a group/key pair exists. func (m *Medium) IsFile(entryPath string) bool { group, key := splitPath(entryPath) if key == "" { @@ -110,7 +107,6 @@ func (m *Medium) FileSet(entryPath, content string) error { return m.Write(entryPath, content) } -// Delete removes a key, or checks that a group is empty. func (m *Medium) Delete(entryPath string) error { group, key := splitPath(entryPath) if group == "" { @@ -129,7 +125,6 @@ func (m *Medium) Delete(entryPath string) error { return m.store.Delete(group, key) } -// DeleteAll removes a key, or all keys in a group. func (m *Medium) DeleteAll(entryPath string) error { group, key := splitPath(entryPath) if group == "" { @@ -141,7 +136,6 @@ func (m *Medium) DeleteAll(entryPath string) error { return m.store.Delete(group, key) } -// Rename moves a key from one path to another. func (m *Medium) Rename(oldPath, newPath string) error { oldGroup, oldKey := splitPath(oldPath) newGroup, newKey := splitPath(newPath) @@ -219,7 +213,6 @@ func (m *Medium) Stat(entryPath string) (fs.FileInfo, error) { return &kvFileInfo{name: key, size: int64(len(val))}, nil } -// Open opens a key for reading. func (m *Medium) Open(entryPath string) (fs.File, error) { group, key := splitPath(entryPath) if key == "" { @@ -232,7 +225,6 @@ func (m *Medium) Open(entryPath string) (fs.File, error) { return &kvFile{name: key, content: []byte(val)}, nil } -// Create creates or truncates a key. Content is stored on Close. func (m *Medium) Create(entryPath string) (goio.WriteCloser, error) { group, key := splitPath(entryPath) if key == "" { @@ -241,7 +233,6 @@ func (m *Medium) Create(entryPath string) (goio.WriteCloser, error) { return &kvWriteCloser{store: m.store, group: group, key: key}, nil } -// Append opens a key for appending. Content is stored on Close. func (m *Medium) Append(entryPath string) (goio.WriteCloser, error) { group, key := splitPath(entryPath) if key == "" { @@ -251,7 +242,6 @@ func (m *Medium) Append(entryPath string) (goio.WriteCloser, error) { return &kvWriteCloser{store: m.store, group: group, key: key, data: []byte(existing)}, nil } -// ReadStream returns a reader for the value. func (m *Medium) ReadStream(entryPath string) (goio.ReadCloser, error) { group, key := splitPath(entryPath) if key == "" { @@ -264,12 +254,10 @@ func (m *Medium) ReadStream(entryPath string) (goio.ReadCloser, error) { return goio.NopCloser(core.NewReader(val)), nil } -// WriteStream returns a writer. Content is stored on Close. func (m *Medium) WriteStream(entryPath string) (goio.WriteCloser, error) { return m.Create(entryPath) } -// Exists returns true if a group or key exists. func (m *Medium) Exists(entryPath string) bool { group, key := splitPath(entryPath) if group == "" { @@ -283,7 +271,6 @@ func (m *Medium) Exists(entryPath string) bool { return err == nil } -// IsDir returns true if the path is a group with entries. func (m *Medium) IsDir(entryPath string) bool { group, key := splitPath(entryPath) if key != "" || group == "" { diff --git a/workspace/service.go b/workspace/service.go index a64b40c..c9d5952 100644 --- a/workspace/service.go +++ b/workspace/service.go @@ -176,15 +176,15 @@ func (s *Service) WorkspaceFileSet(filename, content string) error { return s.medium.Write(filePath, content) } -// HandleIPCEvents handles workspace-related IPC messages. +// service, _ := workspace.New(workspace.Options{Core: core.New(), Crypt: myCryptProvider}) // -// service, _ := workspace.New(workspace.Options{Core: core.New(), Crypt: myCryptProvider}) // ipcResult := service.HandleIPCEvents(core.New(), map[string]any{ // "action": "workspace.create", // "identifier": "alice", // "password": "pass123", // }) -// _ = ipcResult.OK +// +// _ = ipcResult.OK func (s *Service) HandleIPCEvents(_ *core.Core, message core.Message) core.Result { switch payload := message.(type) { case map[string]any: From 9fb978dc75faee10030e8fcd2c882bda4b433db6 Mon Sep 17 00:00:00 2001 From: Virgil Date: Mon, 30 Mar 2026 20:47:41 +0000 Subject: [PATCH 16/83] refactor(ax): make docs and helpers example-driven --- datanode/client.go | 55 ++++++++++++++++++++++--------------------- doc.go | 7 +++++- local/client.go | 11 +++++---- node/node.go | 12 +++++++--- sigil/sigil.go | 15 ++++++------ sqlite/sqlite.go | 48 ++++++++++++++++++------------------- sqlite/sqlite_test.go | 22 ++++++++--------- store/doc.go | 7 +++++- store/medium.go | 41 +++++++++++++++----------------- store/store.go | 10 ++++---- workspace/doc.go | 5 ++++ workspace/service.go | 20 +++++++--------- 12 files changed, 136 insertions(+), 117 deletions(-) diff --git a/datanode/client.go b/datanode/client.go index 16d89bc..6a7c4ce 100644 --- a/datanode/client.go +++ b/datanode/client.go @@ -1,9 +1,14 @@ // Package datanode provides an in-memory io.Medium backed by Borg's DataNode. // +// medium := datanode.New() +// _ = medium.Write("jobs/run.log", "started") +// snapshot, _ := medium.Snapshot() +// restored, _ := datanode.FromTar(snapshot) +// // DataNode is an in-memory fs.FS that serialises to tar. Wrapping it as a -// Medium lets any code that works with io.Medium transparently operate on -// an in-memory filesystem that can be snapshotted, shipped as a crash report, -// or wrapped in a TIM container for runc execution. +// Medium lets any code that works with io.Medium transparently operate on an +// in-memory filesystem that can be snapshotted, shipped as a crash report, or +// wrapped in a TIM container for runc execution. package datanode import ( @@ -39,9 +44,7 @@ type Medium struct { mu sync.RWMutex } -// Use New when you need an in-memory Medium that snapshots to tar. -// -// Example usage: +// New creates an in-memory Medium that snapshots to tar. // // medium := datanode.New() // _ = medium.Write("jobs/run.log", "started") @@ -52,9 +55,7 @@ func New() *Medium { } } -// Use FromTar(snapshot) to restore a Medium from tar bytes. -// -// Example usage: +// FromTar restores a Medium from tar bytes. // // sourceMedium := datanode.New() // snapshot, _ := sourceMedium.Snapshot() @@ -103,8 +104,8 @@ func (m *Medium) DataNode() *borgdatanode.DataNode { return m.dataNode } -// cleanPath normalises a path: strips leading slash, cleans traversal. -func cleanPath(filePath string) string { +// normaliseEntryPath normalises a path: strips the leading slash and cleans traversal. +func normaliseEntryPath(filePath string) string { filePath = core.TrimPrefix(filePath, "/") filePath = path.Clean(filePath) if filePath == "." { @@ -119,7 +120,7 @@ func (m *Medium) Read(filePath string) (string, error) { m.mu.RLock() defer m.mu.RUnlock() - filePath = cleanPath(filePath) + filePath = normaliseEntryPath(filePath) f, err := m.dataNode.Open(filePath) if err != nil { return "", core.E("datanode.Read", core.Concat("not found: ", filePath), fs.ErrNotExist) @@ -145,7 +146,7 @@ func (m *Medium) Write(filePath, content string) error { m.mu.Lock() defer m.mu.Unlock() - filePath = cleanPath(filePath) + filePath = normaliseEntryPath(filePath) if filePath == "" { return core.E("datanode.Write", "empty path", fs.ErrInvalid) } @@ -164,7 +165,7 @@ func (m *Medium) EnsureDir(filePath string) error { m.mu.Lock() defer m.mu.Unlock() - filePath = cleanPath(filePath) + filePath = normaliseEntryPath(filePath) if filePath == "" { return nil } @@ -188,7 +189,7 @@ func (m *Medium) IsFile(filePath string) bool { m.mu.RLock() defer m.mu.RUnlock() - filePath = cleanPath(filePath) + filePath = normaliseEntryPath(filePath) info, err := m.dataNode.Stat(filePath) return err == nil && !info.IsDir() } @@ -205,7 +206,7 @@ func (m *Medium) Delete(filePath string) error { m.mu.Lock() defer m.mu.Unlock() - filePath = cleanPath(filePath) + filePath = normaliseEntryPath(filePath) if filePath == "" { return core.E("datanode.Delete", "cannot delete root", fs.ErrPermission) } @@ -252,7 +253,7 @@ func (m *Medium) DeleteAll(filePath string) error { m.mu.Lock() defer m.mu.Unlock() - filePath = cleanPath(filePath) + filePath = normaliseEntryPath(filePath) if filePath == "" { return core.E("datanode.DeleteAll", "cannot delete root", fs.ErrPermission) } @@ -301,8 +302,8 @@ func (m *Medium) Rename(oldPath, newPath string) error { m.mu.Lock() defer m.mu.Unlock() - oldPath = cleanPath(oldPath) - newPath = cleanPath(newPath) + oldPath = normaliseEntryPath(oldPath) + newPath = normaliseEntryPath(newPath) // Check if source is a file info, err := m.dataNode.Stat(oldPath) @@ -366,7 +367,7 @@ func (m *Medium) List(filePath string) ([]fs.DirEntry, error) { m.mu.RLock() defer m.mu.RUnlock() - filePath = cleanPath(filePath) + filePath = normaliseEntryPath(filePath) entries, err := m.dataNode.ReadDir(filePath) if err != nil { @@ -413,7 +414,7 @@ func (m *Medium) Stat(filePath string) (fs.FileInfo, error) { m.mu.RLock() defer m.mu.RUnlock() - filePath = cleanPath(filePath) + filePath = normaliseEntryPath(filePath) if filePath == "" { return &fileInfo{name: ".", isDir: true, mode: fs.ModeDir | 0755}, nil } @@ -433,12 +434,12 @@ func (m *Medium) Open(filePath string) (fs.File, error) { m.mu.RLock() defer m.mu.RUnlock() - filePath = cleanPath(filePath) + filePath = normaliseEntryPath(filePath) return m.dataNode.Open(filePath) } func (m *Medium) Create(filePath string) (goio.WriteCloser, error) { - filePath = cleanPath(filePath) + filePath = normaliseEntryPath(filePath) if filePath == "" { return nil, core.E("datanode.Create", "empty path", fs.ErrInvalid) } @@ -446,7 +447,7 @@ func (m *Medium) Create(filePath string) (goio.WriteCloser, error) { } func (m *Medium) Append(filePath string) (goio.WriteCloser, error) { - filePath = cleanPath(filePath) + filePath = normaliseEntryPath(filePath) if filePath == "" { return nil, core.E("datanode.Append", "empty path", fs.ErrInvalid) } @@ -471,7 +472,7 @@ func (m *Medium) ReadStream(filePath string) (goio.ReadCloser, error) { m.mu.RLock() defer m.mu.RUnlock() - filePath = cleanPath(filePath) + filePath = normaliseEntryPath(filePath) f, err := m.dataNode.Open(filePath) if err != nil { return nil, core.E("datanode.ReadStream", core.Concat("not found: ", filePath), fs.ErrNotExist) @@ -487,7 +488,7 @@ func (m *Medium) Exists(filePath string) bool { m.mu.RLock() defer m.mu.RUnlock() - filePath = cleanPath(filePath) + filePath = normaliseEntryPath(filePath) if filePath == "" { return true // root always exists } @@ -502,7 +503,7 @@ func (m *Medium) IsDir(filePath string) bool { m.mu.RLock() defer m.mu.RUnlock() - filePath = cleanPath(filePath) + filePath = normaliseEntryPath(filePath) if filePath == "" { return true } diff --git a/doc.go b/doc.go index 83e4627..3b300c1 100644 --- a/doc.go +++ b/doc.go @@ -1,4 +1,9 @@ -// Package io defines the storage abstraction used across CoreGO. +// Package io defines the storage boundary used across CoreGO. +// +// medium, _ := io.NewSandboxed("/srv/app") +// _ = medium.Write("config/app.yaml", "port: 8080") +// backup, _ := io.NewSandboxed("/srv/backup") +// _ = io.Copy(medium, "data/report.json", backup, "daily/report.json") // // Callers work against Medium so the same code can read and write state from // sandboxed local paths, in-memory nodes, SQLite, S3, or other backends diff --git a/local/client.go b/local/client.go index a01410b..e990ec2 100644 --- a/local/client.go +++ b/local/client.go @@ -1,4 +1,8 @@ -// Package local provides a local filesystem implementation of the io.Medium interface. +// Package local provides the local filesystem implementation of io.Medium. +// +// medium, _ := local.New("/srv/app") +// _ = medium.Write("config/app.yaml", "port: 8080") +// content, _ := medium.Read("config/app.yaml") package local import ( @@ -16,10 +20,9 @@ type Medium struct { var unrestrictedFileSystem = (&core.Fs{}).NewUnrestricted() -// Use New to sandbox filesystem access under a root directory. -// Pass "/" for full filesystem access, or a specific path to sandbox. +// New creates a filesystem rooted at root. // -// Example usage: +// Pass "/" for full filesystem access, or a project path to sandbox. // // medium, _ := local.New("/srv/app") // _ = medium.Write("config/app.yaml", "port: 8080") diff --git a/node/node.go b/node/node.go index 70f18c3..a394119 100644 --- a/node/node.go +++ b/node/node.go @@ -1,6 +1,12 @@ -// Package node provides an in-memory filesystem implementation of io.Medium -// ported from Borg's DataNode. It stores files in memory with implicit -// directory structure and supports tar serialisation. +// Package node provides an in-memory filesystem implementation of io.Medium. +// +// nodeTree := node.New() +// nodeTree.AddData("config/app.yaml", []byte("port: 8080")) +// snapshot, _ := nodeTree.ToTar() +// restored, _ := node.FromTar(snapshot) +// +// It stores files in memory with implicit directory structure and supports +// tar serialisation. package node import ( diff --git a/sigil/sigil.go b/sigil/sigil.go index 3bd035c..5336648 100644 --- a/sigil/sigil.go +++ b/sigil/sigil.go @@ -1,15 +1,14 @@ // Package sigil provides the Sigil transformation framework for composable, // reversible data transformations. // -// Sigils are the core abstraction - each sigil implements a specific transformation -// (encoding, compression, hashing, encryption) with a uniform interface. Sigils can -// be chained together to create transformation pipelines. -// -// Example usage: -// // hexSigil, _ := sigil.NewSigil("hex") -// base64Sigil, _ := sigil.NewSigil("base64") -// result, _ := sigil.Transmute(data, []sigil.Sigil{hexSigil, base64Sigil}) +// gzipSigil, _ := sigil.NewSigil("gzip") +// encoded, _ := sigil.Transmute([]byte("payload"), []sigil.Sigil{hexSigil, gzipSigil}) +// decoded, _ := sigil.Untransmute(encoded, []sigil.Sigil{hexSigil, gzipSigil}) +// +// Sigils are the core abstraction - each sigil implements a specific +// transformation (encoding, compression, hashing, encryption) with a uniform +// interface. Sigils can be chained together to create transformation pipelines. package sigil import core "dappco.re/go/core" diff --git a/sqlite/sqlite.go b/sqlite/sqlite.go index 296d5ba..9e3c72e 100644 --- a/sqlite/sqlite.go +++ b/sqlite/sqlite.go @@ -1,4 +1,7 @@ -// Package sqlite provides a SQLite-backed implementation of the io.Medium interface. +// Package sqlite persists io.Medium content in a SQLite database. +// +// medium, _ := sqlite.New(sqlite.Options{Path: ":memory:"}) +// _ = medium.Write("config/app.yaml", "port: 8080") package sqlite import ( @@ -23,7 +26,7 @@ type Medium struct { var _ coreio.Medium = (*Medium)(nil) -// Options configures a Medium. +// Options configures a SQLite-backed Medium. type Options struct { // Path is the SQLite database path. Use ":memory:" for tests. Path string @@ -38,10 +41,7 @@ func normaliseTableName(table string) string { return table } -// Use New to point the medium at a SQLite database path. -// Use ":memory:" for an in-memory database. -// -// Example usage: +// New opens a SQLite-backed Medium at the provided database path. // // medium, _ := sqlite.New(sqlite.Options{Path: ":memory:", Table: "files"}) // _ = medium.Write("config/app.yaml", "port: 8080") @@ -88,9 +88,9 @@ func (m *Medium) Close() error { return nil } -// cleanPath normalises a path for consistent storage. +// normaliseEntryPath normalises a path for consistent storage. // Uses a leading "/" before Clean to sandbox traversal attempts. -func cleanPath(filePath string) string { +func normaliseEntryPath(filePath string) string { clean := path.Clean("/" + filePath) if clean == "/" { return "" @@ -99,7 +99,7 @@ func cleanPath(filePath string) string { } func (m *Medium) Read(filePath string) (string, error) { - key := cleanPath(filePath) + key := normaliseEntryPath(filePath) if key == "" { return "", core.E("sqlite.Read", "path is required", fs.ErrInvalid) } @@ -127,7 +127,7 @@ func (m *Medium) Write(filePath, content string) error { // WriteMode saves the given content with explicit permissions. func (m *Medium) WriteMode(filePath, content string, mode fs.FileMode) error { - key := cleanPath(filePath) + key := normaliseEntryPath(filePath) if key == "" { return core.E("sqlite.WriteMode", "path is required", fs.ErrInvalid) } @@ -145,7 +145,7 @@ func (m *Medium) WriteMode(filePath, content string, mode fs.FileMode) error { // EnsureDir makes sure a directory exists, creating it if necessary. func (m *Medium) EnsureDir(filePath string) error { - key := cleanPath(filePath) + key := normaliseEntryPath(filePath) if key == "" { // Root always "exists" return nil @@ -163,7 +163,7 @@ func (m *Medium) EnsureDir(filePath string) error { } func (m *Medium) IsFile(filePath string) bool { - key := cleanPath(filePath) + key := normaliseEntryPath(filePath) if key == "" { return false } @@ -188,7 +188,7 @@ func (m *Medium) FileSet(filePath, content string) error { // Delete removes a file or empty directory. func (m *Medium) Delete(filePath string) error { - key := cleanPath(filePath) + key := normaliseEntryPath(filePath) if key == "" { return core.E("sqlite.Delete", "path is required", fs.ErrInvalid) } @@ -233,7 +233,7 @@ func (m *Medium) Delete(filePath string) error { // DeleteAll removes a file or directory and all its contents recursively. func (m *Medium) DeleteAll(filePath string) error { - key := cleanPath(filePath) + key := normaliseEntryPath(filePath) if key == "" { return core.E("sqlite.DeleteAll", "path is required", fs.ErrInvalid) } @@ -257,8 +257,8 @@ func (m *Medium) DeleteAll(filePath string) error { // Rename moves a file or directory from oldPath to newPath. func (m *Medium) Rename(oldPath, newPath string) error { - oldKey := cleanPath(oldPath) - newKey := cleanPath(newPath) + oldKey := normaliseEntryPath(oldPath) + newKey := normaliseEntryPath(newPath) if oldKey == "" || newKey == "" { return core.E("sqlite.Rename", "both old and new paths are required", fs.ErrInvalid) } @@ -355,7 +355,7 @@ func (m *Medium) Rename(oldPath, newPath string) error { // List returns the directory entries for the given path. func (m *Medium) List(filePath string) ([]fs.DirEntry, error) { - prefix := cleanPath(filePath) + prefix := normaliseEntryPath(filePath) if prefix != "" { prefix += "/" } @@ -430,7 +430,7 @@ func (m *Medium) List(filePath string) ([]fs.DirEntry, error) { } func (m *Medium) Stat(filePath string) (fs.FileInfo, error) { - key := cleanPath(filePath) + key := normaliseEntryPath(filePath) if key == "" { return nil, core.E("sqlite.Stat", "path is required", fs.ErrInvalid) } @@ -460,7 +460,7 @@ func (m *Medium) Stat(filePath string) (fs.FileInfo, error) { } func (m *Medium) Open(filePath string) (fs.File, error) { - key := cleanPath(filePath) + key := normaliseEntryPath(filePath) if key == "" { return nil, core.E("sqlite.Open", "path is required", fs.ErrInvalid) } @@ -491,7 +491,7 @@ func (m *Medium) Open(filePath string) (fs.File, error) { } func (m *Medium) Create(filePath string) (goio.WriteCloser, error) { - key := cleanPath(filePath) + key := normaliseEntryPath(filePath) if key == "" { return nil, core.E("sqlite.Create", "path is required", fs.ErrInvalid) } @@ -502,7 +502,7 @@ func (m *Medium) Create(filePath string) (goio.WriteCloser, error) { } func (m *Medium) Append(filePath string) (goio.WriteCloser, error) { - key := cleanPath(filePath) + key := normaliseEntryPath(filePath) if key == "" { return nil, core.E("sqlite.Append", "path is required", fs.ErrInvalid) } @@ -523,7 +523,7 @@ func (m *Medium) Append(filePath string) (goio.WriteCloser, error) { } func (m *Medium) ReadStream(filePath string) (goio.ReadCloser, error) { - key := cleanPath(filePath) + key := normaliseEntryPath(filePath) if key == "" { return nil, core.E("sqlite.ReadStream", "path is required", fs.ErrInvalid) } @@ -551,7 +551,7 @@ func (m *Medium) WriteStream(filePath string) (goio.WriteCloser, error) { } func (m *Medium) Exists(filePath string) bool { - key := cleanPath(filePath) + key := normaliseEntryPath(filePath) if key == "" { // Root always exists return true @@ -568,7 +568,7 @@ func (m *Medium) Exists(filePath string) bool { } func (m *Medium) IsDir(filePath string) bool { - key := cleanPath(filePath) + key := normaliseEntryPath(filePath) if key == "" { return false } diff --git a/sqlite/sqlite_test.go b/sqlite/sqlite_test.go index 3d157ad..dafbe98 100644 --- a/sqlite/sqlite_test.go +++ b/sqlite/sqlite_test.go @@ -597,17 +597,17 @@ func TestSqlite_IsDir_Good(t *testing.T) { assert.False(t, m.IsDir("")) } -// --- cleanPath Tests --- - -func TestSqlite_CleanPath_Good(t *testing.T) { - assert.Equal(t, "file.txt", cleanPath("file.txt")) - assert.Equal(t, "dir/file.txt", cleanPath("dir/file.txt")) - assert.Equal(t, "file.txt", cleanPath("/file.txt")) - assert.Equal(t, "file.txt", cleanPath("../file.txt")) - assert.Equal(t, "file.txt", cleanPath("dir/../file.txt")) - assert.Equal(t, "", cleanPath("")) - assert.Equal(t, "", cleanPath(".")) - assert.Equal(t, "", cleanPath("/")) +// --- normaliseEntryPath Tests --- + +func TestSqlite_NormaliseEntryPath_Good(t *testing.T) { + assert.Equal(t, "file.txt", normaliseEntryPath("file.txt")) + assert.Equal(t, "dir/file.txt", normaliseEntryPath("dir/file.txt")) + assert.Equal(t, "file.txt", normaliseEntryPath("/file.txt")) + assert.Equal(t, "file.txt", normaliseEntryPath("../file.txt")) + assert.Equal(t, "file.txt", normaliseEntryPath("dir/../file.txt")) + assert.Equal(t, "", normaliseEntryPath("")) + assert.Equal(t, "", normaliseEntryPath(".")) + assert.Equal(t, "", normaliseEntryPath("/")) } // --- Interface Compliance --- diff --git a/store/doc.go b/store/doc.go index 06e62aa..eb6d281 100644 --- a/store/doc.go +++ b/store/doc.go @@ -1,4 +1,9 @@ -// Package store provides a group-namespaced key-value store backed by SQLite. +// Package store provides a SQLite-backed group-namespaced key-value store. +// +// kvStore, _ := store.New(":memory:") +// _ = kvStore.Set("app", "theme", "midnight") +// medium := kvStore.AsMedium() +// _ = medium.Write("app/theme", "midnight") // // It also exposes an io.Medium adapter so grouped values can participate in // the same storage workflows as filesystem-backed mediums. diff --git a/store/medium.go b/store/medium.go index 6507da7..fc5c2ed 100644 --- a/store/medium.go +++ b/store/medium.go @@ -11,7 +11,7 @@ import ( ) // Medium wraps a Store to satisfy the io.Medium interface. -// Paths are mapped as group/key — first segment is the group, +// Paths are mapped as group/key - the first segment is the group, // the rest is the key. List("") returns groups as directories, // List("group") returns keys as files. type Medium struct { @@ -20,9 +20,7 @@ type Medium struct { var _ coreio.Medium = (*Medium)(nil) -// Use NewMedium to expose a Store as an io.Medium. -// -// Example usage: +// NewMedium exposes a Store as an io.Medium. // // medium, _ := store.NewMedium("config.db") // _ = medium.Write("app/theme", "midnight") @@ -49,9 +47,8 @@ func (m *Medium) Close() error { return m.store.Close() } -// splitPath splits a medium-style path into group and key. -// First segment = group, remainder = key. -func splitPath(entryPath string) (group, key string) { +// splitEntryPath splits a group/key path into store components. +func splitEntryPath(entryPath string) (group, key string) { clean := path.Clean(entryPath) clean = core.TrimPrefix(clean, "/") if clean == "" || clean == "." { @@ -65,7 +62,7 @@ func splitPath(entryPath string) (group, key string) { } func (m *Medium) Read(entryPath string) (string, error) { - group, key := splitPath(entryPath) + group, key := splitEntryPath(entryPath) if key == "" { return "", core.E("store.Read", "path must include group/key", fs.ErrInvalid) } @@ -73,7 +70,7 @@ func (m *Medium) Read(entryPath string) (string, error) { } func (m *Medium) Write(entryPath, content string) error { - group, key := splitPath(entryPath) + group, key := splitEntryPath(entryPath) if key == "" { return core.E("store.Write", "path must include group/key", fs.ErrInvalid) } @@ -91,7 +88,7 @@ func (m *Medium) EnsureDir(_ string) error { } func (m *Medium) IsFile(entryPath string) bool { - group, key := splitPath(entryPath) + group, key := splitEntryPath(entryPath) if key == "" { return false } @@ -108,7 +105,7 @@ func (m *Medium) FileSet(entryPath, content string) error { } func (m *Medium) Delete(entryPath string) error { - group, key := splitPath(entryPath) + group, key := splitEntryPath(entryPath) if group == "" { return core.E("store.Delete", "path is required", fs.ErrInvalid) } @@ -126,7 +123,7 @@ func (m *Medium) Delete(entryPath string) error { } func (m *Medium) DeleteAll(entryPath string) error { - group, key := splitPath(entryPath) + group, key := splitEntryPath(entryPath) if group == "" { return core.E("store.DeleteAll", "path is required", fs.ErrInvalid) } @@ -137,8 +134,8 @@ func (m *Medium) DeleteAll(entryPath string) error { } func (m *Medium) Rename(oldPath, newPath string) error { - oldGroup, oldKey := splitPath(oldPath) - newGroup, newKey := splitPath(newPath) + oldGroup, oldKey := splitEntryPath(oldPath) + newGroup, newKey := splitEntryPath(newPath) if oldKey == "" || newKey == "" { return core.E("store.Rename", "both paths must include group/key", fs.ErrInvalid) } @@ -155,7 +152,7 @@ func (m *Medium) Rename(oldPath, newPath string) error { // List returns directory entries. Empty path returns groups. // A group path returns keys in that group. func (m *Medium) List(entryPath string) ([]fs.DirEntry, error) { - group, key := splitPath(entryPath) + group, key := splitEntryPath(entryPath) if group == "" { rows, err := m.store.database.Query("SELECT DISTINCT grp FROM kv ORDER BY grp") @@ -192,7 +189,7 @@ func (m *Medium) List(entryPath string) ([]fs.DirEntry, error) { // Stat returns file info for a group (dir) or key (file). func (m *Medium) Stat(entryPath string) (fs.FileInfo, error) { - group, key := splitPath(entryPath) + group, key := splitEntryPath(entryPath) if group == "" { return nil, core.E("store.Stat", "path is required", fs.ErrInvalid) } @@ -214,7 +211,7 @@ func (m *Medium) Stat(entryPath string) (fs.FileInfo, error) { } func (m *Medium) Open(entryPath string) (fs.File, error) { - group, key := splitPath(entryPath) + group, key := splitEntryPath(entryPath) if key == "" { return nil, core.E("store.Open", "path must include group/key", fs.ErrInvalid) } @@ -226,7 +223,7 @@ func (m *Medium) Open(entryPath string) (fs.File, error) { } func (m *Medium) Create(entryPath string) (goio.WriteCloser, error) { - group, key := splitPath(entryPath) + group, key := splitEntryPath(entryPath) if key == "" { return nil, core.E("store.Create", "path must include group/key", fs.ErrInvalid) } @@ -234,7 +231,7 @@ func (m *Medium) Create(entryPath string) (goio.WriteCloser, error) { } func (m *Medium) Append(entryPath string) (goio.WriteCloser, error) { - group, key := splitPath(entryPath) + group, key := splitEntryPath(entryPath) if key == "" { return nil, core.E("store.Append", "path must include group/key", fs.ErrInvalid) } @@ -243,7 +240,7 @@ func (m *Medium) Append(entryPath string) (goio.WriteCloser, error) { } func (m *Medium) ReadStream(entryPath string) (goio.ReadCloser, error) { - group, key := splitPath(entryPath) + group, key := splitEntryPath(entryPath) if key == "" { return nil, core.E("store.ReadStream", "path must include group/key", fs.ErrInvalid) } @@ -259,7 +256,7 @@ func (m *Medium) WriteStream(entryPath string) (goio.WriteCloser, error) { } func (m *Medium) Exists(entryPath string) bool { - group, key := splitPath(entryPath) + group, key := splitEntryPath(entryPath) if group == "" { return false } @@ -272,7 +269,7 @@ func (m *Medium) Exists(entryPath string) bool { } func (m *Medium) IsDir(entryPath string) bool { - group, key := splitPath(entryPath) + group, key := splitEntryPath(entryPath) if key != "" || group == "" { return false } diff --git a/store/store.go b/store/store.go index e886a1c..415251a 100644 --- a/store/store.go +++ b/store/store.go @@ -139,13 +139,13 @@ func (s *Store) Render(templateText, group string) (string, error) { } defer rows.Close() - vars := make(map[string]string) + templateValues := make(map[string]string) for rows.Next() { var key, value string if err := rows.Scan(&key, &value); err != nil { return "", core.E("store.Render", "scan", err) } - vars[key] = value + templateValues[key] = value } if err := rows.Err(); err != nil { return "", core.E("store.Render", "rows", err) @@ -155,9 +155,9 @@ func (s *Store) Render(templateText, group string) (string, error) { if err != nil { return "", core.E("store.Render", "parse template", err) } - b := core.NewBuilder() - if err := tmpl.Execute(b, vars); err != nil { + builder := core.NewBuilder() + if err := tmpl.Execute(builder, templateValues); err != nil { return "", core.E("store.Render", "execute template", err) } - return b.String(), nil + return builder.String(), nil } diff --git a/workspace/doc.go b/workspace/doc.go index 9da3c51..3c2140d 100644 --- a/workspace/doc.go +++ b/workspace/doc.go @@ -1,5 +1,10 @@ // Package workspace provides encrypted user workspaces backed by io.Medium. // +// service, _ := workspace.New(workspace.Options{Core: core.New(), Crypt: cryptProvider}) +// workspaceID, _ := service.CreateWorkspace("alice", "pass123") +// _ = service.SwitchWorkspace(workspaceID) +// _ = service.WorkspaceFileSet("notes/todo.txt", "ship it") +// // Workspaces are rooted under the caller's configured home directory and keep // file access constrained to the active workspace. package workspace diff --git a/workspace/service.go b/workspace/service.go index c9d5952..21c8483 100644 --- a/workspace/service.go +++ b/workspace/service.go @@ -44,14 +44,12 @@ type Service struct { var _ Workspace = (*Service)(nil) -// Use New to manage encrypted user workspaces from a Core runtime. +// New creates an encrypted workspace service from a Core runtime. // -// Example usage: -// -// service, _ := workspace.New(workspace.Options{Core: core.New(), Crypt: myCryptProvider}) +// service, _ := workspace.New(workspace.Options{Core: core.New(), Crypt: cryptProvider}) // workspaceID, _ := service.CreateWorkspace("alice", "pass123") func New(options Options) (*Service, error) { - home := workspaceHome() + home := resolveWorkspaceHomeDirectory() if home == "" { return nil, core.E("workspace.New", "failed to determine home directory", fs.ErrNotExist) } @@ -135,14 +133,14 @@ func (s *Service) SwitchWorkspace(name string) error { return nil } -// activeFilePath returns the full path to a file in the active workspace, -// or an error if no workspace is active. +// activeFilePath resolves a filename inside the active workspace files root. +// It rejects empty names and traversal outside the workspace root. func (s *Service) activeFilePath(operation, filename string) (string, error) { if s.activeWorkspace == "" { return "", core.E(operation, "no active workspace", nil) } filesRoot := core.Path(s.rootPath, s.activeWorkspace, "files") - filePath, err := joinWithinRoot(filesRoot, filename) + filePath, err := joinPathWithinRoot(filesRoot, filename) if err != nil { return "", core.E(operation, "file path escapes workspace files", fs.ErrPermission) } @@ -209,7 +207,7 @@ func (s *Service) HandleIPCEvents(_ *core.Core, message core.Message) core.Resul return core.Result{OK: true} } -func workspaceHome() string { +func resolveWorkspaceHomeDirectory() string { if home := core.Env("CORE_HOME"); home != "" { return home } @@ -219,7 +217,7 @@ func workspaceHome() string { return core.Env("DIR_HOME") } -func joinWithinRoot(root string, parts ...string) (string, error) { +func joinPathWithinRoot(root string, parts ...string) (string, error) { candidate := core.Path(append([]string{root}, parts...)...) sep := core.Env("DS") if candidate == root || core.HasPrefix(candidate, root+sep) { @@ -232,7 +230,7 @@ func (s *Service) workspacePath(operation, workspaceName string) (string, error) if workspaceName == "" { return "", core.E(operation, "workspace name is required", fs.ErrInvalid) } - workspaceDirectory, err := joinWithinRoot(s.rootPath, workspaceName) + workspaceDirectory, err := joinPathWithinRoot(s.rootPath, workspaceName) if err != nil { return "", core.E(operation, "workspace path escapes root", err) } From b0bcdadb2fe40c9c76670726c37a6e32c0736fa2 Mon Sep 17 00:00:00 2001 From: Virgil Date: Mon, 30 Mar 2026 20:52:34 +0000 Subject: [PATCH 17/83] refactor(ax): make store and traversal explicit --- node/node.go | 11 ---------- node/node_test.go | 20 +++++++++--------- store/doc.go | 2 +- store/medium.go | 6 +++--- store/medium_test.go | 6 ++---- store/store.go | 27 ++++++++++++++++--------- store/store_test.go | 48 +++++++++++++++++++++++++++----------------- 7 files changed, 64 insertions(+), 56 deletions(-) diff --git a/node/node.go b/node/node.go index a394119..bf8d9e8 100644 --- a/node/node.go +++ b/node/node.go @@ -187,17 +187,6 @@ func (n *Node) WalkWithOptions(root string, fn fs.WalkDirFunc, options WalkOptio }) } -// Walk preserves the historic varargs call shape for compatibility. -// -// For new code, prefer WalkWithOptions so the configuration stays explicit. -func (n *Node) Walk(root string, fn fs.WalkDirFunc, opts ...WalkOptions) error { - var opt WalkOptions - if len(opts) > 0 { - opt = opts[0] - } - return n.WalkWithOptions(root, fn, opt) -} - func (n *Node) ReadFile(name string) ([]byte, error) { name = core.TrimPrefix(name, "/") f, ok := n.files[name] diff --git a/node/node_test.go b/node/node_test.go index 5e030ac..0580ecb 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -259,41 +259,41 @@ func TestNode_Exists_Ugly(t *testing.T) { } // --------------------------------------------------------------------------- -// Walk +// WalkWithOptions // --------------------------------------------------------------------------- -func TestNode_Walk_Good(t *testing.T) { +func TestNode_WalkWithOptions_Default_Good(t *testing.T) { n := New() n.AddData("foo.txt", []byte("foo")) n.AddData("bar/baz.txt", []byte("baz")) n.AddData("bar/qux.txt", []byte("qux")) var paths []string - err := n.Walk(".", func(p string, d fs.DirEntry, err error) error { + err := n.WalkWithOptions(".", func(p string, d fs.DirEntry, err error) error { paths = append(paths, p) return nil - }) + }, WalkOptions{}) require.NoError(t, err) sort.Strings(paths) assert.Equal(t, []string{".", "bar", "bar/baz.txt", "bar/qux.txt", "foo.txt"}, paths) } -func TestNode_Walk_Bad(t *testing.T) { +func TestNode_WalkWithOptions_Default_Bad(t *testing.T) { n := New() var called bool - err := n.Walk("nonexistent", func(p string, d fs.DirEntry, err error) error { + err := n.WalkWithOptions("nonexistent", func(p string, d fs.DirEntry, err error) error { called = true assert.Error(t, err) assert.ErrorIs(t, err, fs.ErrNotExist) return err - }) + }, WalkOptions{}) assert.True(t, called, "walk function must be called for nonexistent root") assert.ErrorIs(t, err, fs.ErrNotExist) } -func TestNode_Walk_Ugly(t *testing.T) { +func TestNode_WalkWithOptions_Default_Ugly(t *testing.T) { n := New() n.AddData("a/b.txt", []byte("b")) n.AddData("a/c.txt", []byte("c")) @@ -301,13 +301,13 @@ func TestNode_Walk_Ugly(t *testing.T) { // Stop walk early with a custom error. walkErr := core.NewError("stop walking") var paths []string - err := n.Walk(".", func(p string, d fs.DirEntry, err error) error { + err := n.WalkWithOptions(".", func(p string, d fs.DirEntry, err error) error { if p == "a/b.txt" { return walkErr } paths = append(paths, p) return nil - }) + }, WalkOptions{}) assert.Equal(t, walkErr, err, "Walk must propagate the callback error") } diff --git a/store/doc.go b/store/doc.go index eb6d281..1fae1c4 100644 --- a/store/doc.go +++ b/store/doc.go @@ -1,6 +1,6 @@ // Package store provides a SQLite-backed group-namespaced key-value store. // -// kvStore, _ := store.New(":memory:") +// kvStore, _ := store.New(store.Options{Path: ":memory:"}) // _ = kvStore.Set("app", "theme", "midnight") // medium := kvStore.AsMedium() // _ = medium.Write("app/theme", "midnight") diff --git a/store/medium.go b/store/medium.go index fc5c2ed..9b86323 100644 --- a/store/medium.go +++ b/store/medium.go @@ -22,10 +22,10 @@ var _ coreio.Medium = (*Medium)(nil) // NewMedium exposes a Store as an io.Medium. // -// medium, _ := store.NewMedium("config.db") +// medium, _ := store.NewMedium(store.Options{Path: "config.db"}) // _ = medium.Write("app/theme", "midnight") -func NewMedium(dbPath string) (*Medium, error) { - store, err := New(dbPath) +func NewMedium(options Options) (*Medium, error) { + store, err := New(options) if err != nil { return nil, err } diff --git a/store/medium_test.go b/store/medium_test.go index 31809b4..786ad29 100644 --- a/store/medium_test.go +++ b/store/medium_test.go @@ -11,7 +11,7 @@ import ( func newTestMedium(t *testing.T) *Medium { t.Helper() - m, err := NewMedium(":memory:") + m, err := NewMedium(Options{Path: ":memory:"}) require.NoError(t, err) t.Cleanup(func() { m.Close() }) return m @@ -185,9 +185,7 @@ func TestMedium_Medium_Append_Good(t *testing.T) { } func TestMedium_Medium_AsMedium_Good(t *testing.T) { - s, err := New(":memory:") - require.NoError(t, err) - defer s.Close() + s := newTestStore(t) m := s.AsMedium() require.NoError(t, m.Write("grp/key", "val")) diff --git a/store/store.go b/store/store.go index 415251a..81a4512 100644 --- a/store/store.go +++ b/store/store.go @@ -2,6 +2,8 @@ package store import ( "database/sql" + "errors" + "io/fs" "text/template" core "dappco.re/go/core" @@ -9,22 +11,29 @@ import ( ) // ErrNotFound is returned when a key does not exist in the store. -var ErrNotFound = core.E("store.ErrNotFound", "key not found", nil) +var ErrNotFound = errors.New("key not found") // Store is a group-namespaced key-value store backed by SQLite. type Store struct { database *sql.DB } -// Use New to open a SQLite-backed key-value store. -// Use ":memory:" for tests. -// -// Example usage: +// Options configures a Store. +type Options struct { + // Path is the SQLite database path. Use ":memory:" for tests. + Path string +} + +// New opens a SQLite-backed key-value store. // -// kvStore, _ := store.New(":memory:") +// kvStore, _ := store.New(store.Options{Path: ":memory:"}) // _ = kvStore.Set("app", "theme", "midnight") -func New(dbPath string) (*Store, error) { - database, err := sql.Open("sqlite", dbPath) +func New(options Options) (*Store, error) { + if options.Path == "" { + return nil, core.E("store.New", "database path is required", fs.ErrInvalid) + } + + database, err := sql.Open("sqlite", options.Path) if err != nil { return nil, core.E("store.New", "open db", err) } @@ -129,7 +138,7 @@ func (s *Store) GetAll(group string) (map[string]string, error) { // // Example usage: // -// kvStore, _ := store.New(":memory:") +// kvStore, _ := store.New(store.Options{Path: ":memory:"}) // _ = kvStore.Set("user", "name", "alice") // out, _ := kvStore.Render("hello {{ .name }}", "user") func (s *Store) Render(templateText, group string) (string, error) { diff --git a/store/store_test.go b/store/store_test.go index 624ec07..ee384cf 100644 --- a/store/store_test.go +++ b/store/store_test.go @@ -7,12 +7,31 @@ import ( "github.com/stretchr/testify/require" ) -func TestStore_SetGet_Good(t *testing.T) { - s, err := New(":memory:") +func newTestStore(t *testing.T) *Store { + t.Helper() + + s, err := New(Options{Path: ":memory:"}) require.NoError(t, err) - defer s.Close() + t.Cleanup(func() { + require.NoError(t, s.Close()) + }) + return s +} + +func TestStore_New_Options_Good(t *testing.T) { + s := newTestStore(t) + assert.NotNil(t, s) +} + +func TestStore_New_Options_Bad(t *testing.T) { + _, err := New(Options{}) + assert.Error(t, err) +} + +func TestStore_SetGet_Good(t *testing.T) { + s := newTestStore(t) - err = s.Set("config", "theme", "dark") + err := s.Set("config", "theme", "dark") require.NoError(t, err) val, err := s.Get("config", "theme") @@ -21,16 +40,14 @@ func TestStore_SetGet_Good(t *testing.T) { } func TestStore_Get_NotFound_Bad(t *testing.T) { - s, _ := New(":memory:") - defer s.Close() + s := newTestStore(t) _, err := s.Get("config", "missing") assert.Error(t, err) } func TestStore_Delete_Good(t *testing.T) { - s, _ := New(":memory:") - defer s.Close() + s := newTestStore(t) _ = s.Set("config", "key", "val") err := s.Delete("config", "key") @@ -41,8 +58,7 @@ func TestStore_Delete_Good(t *testing.T) { } func TestStore_Count_Good(t *testing.T) { - s, _ := New(":memory:") - defer s.Close() + s := newTestStore(t) _ = s.Set("grp", "a", "1") _ = s.Set("grp", "b", "2") @@ -54,8 +70,7 @@ func TestStore_Count_Good(t *testing.T) { } func TestStore_DeleteGroup_Good(t *testing.T) { - s, _ := New(":memory:") - defer s.Close() + s := newTestStore(t) _ = s.Set("grp", "a", "1") _ = s.Set("grp", "b", "2") @@ -67,8 +82,7 @@ func TestStore_DeleteGroup_Good(t *testing.T) { } func TestStore_GetAll_Good(t *testing.T) { - s, _ := New(":memory:") - defer s.Close() + s := newTestStore(t) _ = s.Set("grp", "a", "1") _ = s.Set("grp", "b", "2") @@ -80,8 +94,7 @@ func TestStore_GetAll_Good(t *testing.T) { } func TestStore_GetAll_Empty_Good(t *testing.T) { - s, _ := New(":memory:") - defer s.Close() + s := newTestStore(t) all, err := s.GetAll("empty") require.NoError(t, err) @@ -89,8 +102,7 @@ func TestStore_GetAll_Empty_Good(t *testing.T) { } func TestStore_Render_Good(t *testing.T) { - s, _ := New(":memory:") - defer s.Close() + s := newTestStore(t) _ = s.Set("user", "pool", "pool.lthn.io:3333") _ = s.Set("user", "wallet", "iz...") From d175fc2b6fb6c7c6cadbf1b8032d3d382c1001e0 Mon Sep 17 00:00:00 2001 From: Virgil Date: Mon, 30 Mar 2026 20:58:10 +0000 Subject: [PATCH 18/83] refactor(ax): make names and errors explicit --- s3/s3.go | 34 ++++++++++----------- s3/s3_test.go | 18 +++++------ sigil/crypto_sigil.go | 43 +++++++++++++++++--------- sigil/crypto_sigil_test.go | 22 +++++++------- store/medium.go | 34 ++++++++++----------- store/store.go | 10 ++++-- store/store_test.go | 4 +-- workspace/service.go | 62 +++++++++++++++++++------------------- workspace/service_test.go | 32 ++++++++++---------- 9 files changed, 139 insertions(+), 120 deletions(-) diff --git a/s3/s3.go b/s3/s3.go index 256cd1f..eeaa3b7 100644 --- a/s3/s3.go +++ b/s3/s3.go @@ -109,8 +109,8 @@ func New(options Options) (*Medium, error) { return m, nil } -// key returns the full S3 object key for a given path. -func (m *Medium) key(filePath string) string { +// objectKey maps a virtual path to the full S3 object key. +func (m *Medium) objectKey(filePath string) string { // Clean the path using a leading "/" to sandbox traversal attempts, // then strip the "/" prefix. This ensures ".." can't escape. clean := path.Clean("/" + filePath) @@ -129,7 +129,7 @@ func (m *Medium) key(filePath string) string { } func (m *Medium) Read(filePath string) (string, error) { - key := m.key(filePath) + key := m.objectKey(filePath) if key == "" { return "", core.E("s3.Read", "path is required", fs.ErrInvalid) } @@ -151,7 +151,7 @@ func (m *Medium) Read(filePath string) (string, error) { } func (m *Medium) Write(filePath, content string) error { - key := m.key(filePath) + key := m.objectKey(filePath) if key == "" { return core.E("s3.Write", "path is required", fs.ErrInvalid) } @@ -179,7 +179,7 @@ func (m *Medium) EnsureDir(_ string) error { // IsFile checks if a path exists and is a regular file (not a "directory" prefix). func (m *Medium) IsFile(filePath string) bool { - key := m.key(filePath) + key := m.objectKey(filePath) if key == "" { return false } @@ -203,7 +203,7 @@ func (m *Medium) FileSet(filePath, content string) error { } func (m *Medium) Delete(filePath string) error { - key := m.key(filePath) + key := m.objectKey(filePath) if key == "" { return core.E("s3.Delete", "path is required", fs.ErrInvalid) } @@ -220,7 +220,7 @@ func (m *Medium) Delete(filePath string) error { // DeleteAll removes all objects under the given prefix. func (m *Medium) DeleteAll(filePath string) error { - key := m.key(filePath) + key := m.objectKey(filePath) if key == "" { return core.E("s3.DeleteAll", "path is required", fs.ErrInvalid) } @@ -285,8 +285,8 @@ func (m *Medium) DeleteAll(filePath string) error { // Rename moves an object by copying then deleting the original. func (m *Medium) Rename(oldPath, newPath string) error { - oldKey := m.key(oldPath) - newKey := m.key(newPath) + oldKey := m.objectKey(oldPath) + newKey := m.objectKey(newPath) if oldKey == "" || newKey == "" { return core.E("s3.Rename", "both old and new paths are required", fs.ErrInvalid) } @@ -315,7 +315,7 @@ func (m *Medium) Rename(oldPath, newPath string) error { // List returns directory entries for the given path using ListObjectsV2 with delimiter. func (m *Medium) List(filePath string) ([]fs.DirEntry, error) { - prefix := m.key(filePath) + prefix := m.objectKey(filePath) if prefix != "" && !core.HasSuffix(prefix, "/") { prefix += "/" } @@ -388,7 +388,7 @@ func (m *Medium) List(filePath string) ([]fs.DirEntry, error) { // Stat returns file information for the given path using HeadObject. func (m *Medium) Stat(filePath string) (fs.FileInfo, error) { - key := m.key(filePath) + key := m.objectKey(filePath) if key == "" { return nil, core.E("s3.Stat", "path is required", fs.ErrInvalid) } @@ -420,7 +420,7 @@ func (m *Medium) Stat(filePath string) (fs.FileInfo, error) { } func (m *Medium) Open(filePath string) (fs.File, error) { - key := m.key(filePath) + key := m.objectKey(filePath) if key == "" { return nil, core.E("s3.Open", "path is required", fs.ErrInvalid) } @@ -459,7 +459,7 @@ func (m *Medium) Open(filePath string) (fs.File, error) { // Create creates or truncates the named file. Returns a writer that // uploads the content on Close. func (m *Medium) Create(filePath string) (goio.WriteCloser, error) { - key := m.key(filePath) + key := m.objectKey(filePath) if key == "" { return nil, core.E("s3.Create", "path is required", fs.ErrInvalid) } @@ -472,7 +472,7 @@ func (m *Medium) Create(filePath string) (goio.WriteCloser, error) { // Append opens the named file for appending. It downloads the existing // content (if any) and re-uploads the combined content on Close. func (m *Medium) Append(filePath string) (goio.WriteCloser, error) { - key := m.key(filePath) + key := m.objectKey(filePath) if key == "" { return nil, core.E("s3.Append", "path is required", fs.ErrInvalid) } @@ -495,7 +495,7 @@ func (m *Medium) Append(filePath string) (goio.WriteCloser, error) { } func (m *Medium) ReadStream(filePath string) (goio.ReadCloser, error) { - key := m.key(filePath) + key := m.objectKey(filePath) if key == "" { return nil, core.E("s3.ReadStream", "path is required", fs.ErrInvalid) } @@ -516,7 +516,7 @@ func (m *Medium) WriteStream(filePath string) (goio.WriteCloser, error) { // Exists checks if a path exists (file or directory prefix). func (m *Medium) Exists(filePath string) bool { - key := m.key(filePath) + key := m.objectKey(filePath) if key == "" { return false } @@ -548,7 +548,7 @@ func (m *Medium) Exists(filePath string) bool { // IsDir checks if a path exists and is a directory (has objects under it as a prefix). func (m *Medium) IsDir(filePath string) bool { - key := m.key(filePath) + key := m.objectKey(filePath) if key == "" { return false } diff --git a/s3/s3_test.go b/s3/s3_test.go index 90cc35e..c8c9228 100644 --- a/s3/s3_test.go +++ b/s3/s3_test.go @@ -637,22 +637,22 @@ func TestS3_IsDir_Good(t *testing.T) { assert.False(t, m.IsDir("")) } -func TestS3_Key_Good(t *testing.T) { +func TestS3_ObjectKey_Good(t *testing.T) { mock := newMockS3() // No prefix m, _ := New(Options{Bucket: "bucket", Client: mock}) - assert.Equal(t, "file.txt", m.key("file.txt")) - assert.Equal(t, "dir/file.txt", m.key("dir/file.txt")) - assert.Equal(t, "", m.key("")) - assert.Equal(t, "file.txt", m.key("/file.txt")) - assert.Equal(t, "file.txt", m.key("../file.txt")) + assert.Equal(t, "file.txt", m.objectKey("file.txt")) + assert.Equal(t, "dir/file.txt", m.objectKey("dir/file.txt")) + assert.Equal(t, "", m.objectKey("")) + assert.Equal(t, "file.txt", m.objectKey("/file.txt")) + assert.Equal(t, "file.txt", m.objectKey("../file.txt")) // With prefix m2, _ := New(Options{Bucket: "bucket", Client: mock, Prefix: "pfx"}) - assert.Equal(t, "pfx/file.txt", m2.key("file.txt")) - assert.Equal(t, "pfx/dir/file.txt", m2.key("dir/file.txt")) - assert.Equal(t, "pfx/", m2.key("")) + assert.Equal(t, "pfx/file.txt", m2.objectKey("file.txt")) + assert.Equal(t, "pfx/dir/file.txt", m2.objectKey("dir/file.txt")) + assert.Equal(t, "pfx/", m2.objectKey("")) } // Ugly: verify the Medium interface is satisfied at compile time. diff --git a/sigil/crypto_sigil.go b/sigil/crypto_sigil.go index 9117f59..3615cd8 100644 --- a/sigil/crypto_sigil.go +++ b/sigil/crypto_sigil.go @@ -23,14 +23,29 @@ import ( ) var ( - // ErrInvalidKey is returned when the encryption key is invalid. - ErrInvalidKey = core.E("sigil.ErrInvalidKey", "invalid key size, must be 32 bytes", nil) - // ErrCiphertextTooShort is returned when the ciphertext is too short to decrypt. - ErrCiphertextTooShort = core.E("sigil.ErrCiphertextTooShort", "ciphertext too short", nil) - // ErrDecryptionFailed is returned when decryption or authentication fails. - ErrDecryptionFailed = core.E("sigil.ErrDecryptionFailed", "decryption failed", nil) - // ErrNoKeyConfigured is returned when no encryption key has been set. - ErrNoKeyConfigured = core.E("sigil.ErrNoKeyConfigured", "no encryption key configured", nil) + // InvalidKeyError is returned when the encryption key is not 32 bytes. + InvalidKeyError = core.E("sigil.InvalidKeyError", "invalid key size, must be 32 bytes", nil) + // ErrInvalidKey is kept for compatibility with older callers. + // Deprecated: use InvalidKeyError. + ErrInvalidKey = InvalidKeyError + + // CiphertextTooShortError is returned when the ciphertext is too short to decrypt. + CiphertextTooShortError = core.E("sigil.CiphertextTooShortError", "ciphertext too short", nil) + // ErrCiphertextTooShort is kept for compatibility with older callers. + // Deprecated: use CiphertextTooShortError. + ErrCiphertextTooShort = CiphertextTooShortError + + // DecryptionFailedError is returned when decryption or authentication fails. + DecryptionFailedError = core.E("sigil.DecryptionFailedError", "decryption failed", nil) + // ErrDecryptionFailed is kept for compatibility with older callers. + // Deprecated: use DecryptionFailedError. + ErrDecryptionFailed = DecryptionFailedError + + // NoKeyConfiguredError is returned when no encryption key has been set. + NoKeyConfiguredError = core.E("sigil.NoKeyConfiguredError", "no encryption key configured", nil) + // ErrNoKeyConfigured is kept for compatibility with older callers. + // Deprecated: use NoKeyConfiguredError. + ErrNoKeyConfigured = NoKeyConfiguredError ) // PreObfuscator applies a reversible transformation to data before encryption. @@ -253,7 +268,7 @@ type ChaChaPolySigil struct { // plaintext, _ := cipherSigil.Out(ciphertext) func NewChaChaPolySigil(key []byte) (*ChaChaPolySigil, error) { if len(key) != 32 { - return nil, ErrInvalidKey + return nil, InvalidKeyError } keyCopy := make([]byte, 32) @@ -289,7 +304,7 @@ func NewChaChaPolySigilWithObfuscator(key []byte, obfuscator PreObfuscator) (*Ch // The flow is: plaintext -> obfuscate -> encrypt func (s *ChaChaPolySigil) In(data []byte) ([]byte, error) { if s.Key == nil { - return nil, ErrNoKeyConfigured + return nil, NoKeyConfiguredError } if data == nil { return nil, nil @@ -328,7 +343,7 @@ func (s *ChaChaPolySigil) In(data []byte) ([]byte, error) { // The flow is: decrypt -> deobfuscate -> plaintext func (s *ChaChaPolySigil) Out(data []byte) ([]byte, error) { if s.Key == nil { - return nil, ErrNoKeyConfigured + return nil, NoKeyConfiguredError } if data == nil { return nil, nil @@ -341,7 +356,7 @@ func (s *ChaChaPolySigil) Out(data []byte) ([]byte, error) { minLen := aead.NonceSize() + aead.Overhead() if len(data) < minLen { - return nil, ErrCiphertextTooShort + return nil, CiphertextTooShortError } // Extract nonce from ciphertext @@ -351,7 +366,7 @@ func (s *ChaChaPolySigil) Out(data []byte) ([]byte, error) { // Decrypt obfuscated, err := aead.Open(nil, nonce, ciphertext, nil) if err != nil { - return nil, core.E("sigil.ChaChaPolySigil.Out", "decrypt ciphertext", ErrDecryptionFailed) + return nil, core.E("sigil.ChaChaPolySigil.Out", "decrypt ciphertext", DecryptionFailedError) } // Deobfuscate using the same nonce as entropy @@ -373,7 +388,7 @@ func (s *ChaChaPolySigil) Out(data []byte) ([]byte, error) { func GetNonceFromCiphertext(ciphertext []byte) ([]byte, error) { nonceSize := chacha20poly1305.NonceSizeX if len(ciphertext) < nonceSize { - return nil, ErrCiphertextTooShort + return nil, CiphertextTooShortError } nonceCopy := make([]byte, nonceSize) copy(nonceCopy, ciphertext[:nonceSize]) diff --git a/sigil/crypto_sigil_test.go b/sigil/crypto_sigil_test.go index 5c33f5b..e015ba5 100644 --- a/sigil/crypto_sigil_test.go +++ b/sigil/crypto_sigil_test.go @@ -173,17 +173,17 @@ func TestCryptoSigil_NewChaChaPolySigil_KeyIsCopied_Good(t *testing.T) { func TestCryptoSigil_NewChaChaPolySigil_ShortKey_Bad(t *testing.T) { _, err := NewChaChaPolySigil([]byte("too short")) - assert.ErrorIs(t, err, ErrInvalidKey) + assert.ErrorIs(t, err, InvalidKeyError) } func TestCryptoSigil_NewChaChaPolySigil_LongKey_Bad(t *testing.T) { _, err := NewChaChaPolySigil(make([]byte, 64)) - assert.ErrorIs(t, err, ErrInvalidKey) + assert.ErrorIs(t, err, InvalidKeyError) } func TestCryptoSigil_NewChaChaPolySigil_EmptyKey_Bad(t *testing.T) { _, err := NewChaChaPolySigil(nil) - assert.ErrorIs(t, err, ErrInvalidKey) + assert.ErrorIs(t, err, InvalidKeyError) } // ── NewChaChaPolySigilWithObfuscator ─────────────────────────────── @@ -210,7 +210,7 @@ func TestCryptoSigil_NewChaChaPolySigilWithObfuscator_NilObfuscator_Good(t *test func TestCryptoSigil_NewChaChaPolySigilWithObfuscator_InvalidKey_Bad(t *testing.T) { _, err := NewChaChaPolySigilWithObfuscator([]byte("bad"), &XORObfuscator{}) - assert.ErrorIs(t, err, ErrInvalidKey) + assert.ErrorIs(t, err, InvalidKeyError) } // ── ChaChaPolySigil In/Out (encrypt/decrypt) ─────────────────────── @@ -300,10 +300,10 @@ func TestCryptoSigil_ChaChaPolySigil_NoKey_Bad(t *testing.T) { s := &ChaChaPolySigil{} _, err := s.In([]byte("data")) - assert.ErrorIs(t, err, ErrNoKeyConfigured) + assert.ErrorIs(t, err, NoKeyConfiguredError) _, err = s.Out([]byte("data")) - assert.ErrorIs(t, err, ErrNoKeyConfigured) + assert.ErrorIs(t, err, NoKeyConfiguredError) } func TestCryptoSigil_ChaChaPolySigil_WrongKey_Bad(t *testing.T) { @@ -319,7 +319,7 @@ func TestCryptoSigil_ChaChaPolySigil_WrongKey_Bad(t *testing.T) { require.NoError(t, err) _, err = s2.Out(ciphertext) - assert.ErrorIs(t, err, ErrDecryptionFailed) + assert.ErrorIs(t, err, DecryptionFailedError) } func TestCryptoSigil_ChaChaPolySigil_TruncatedCiphertext_Bad(t *testing.T) { @@ -328,7 +328,7 @@ func TestCryptoSigil_ChaChaPolySigil_TruncatedCiphertext_Bad(t *testing.T) { s, _ := NewChaChaPolySigil(key) _, err := s.Out([]byte("too short")) - assert.ErrorIs(t, err, ErrCiphertextTooShort) + assert.ErrorIs(t, err, CiphertextTooShortError) } func TestCryptoSigil_ChaChaPolySigil_TamperedCiphertext_Bad(t *testing.T) { @@ -342,7 +342,7 @@ func TestCryptoSigil_ChaChaPolySigil_TamperedCiphertext_Bad(t *testing.T) { ciphertext[30] ^= 0xFF _, err := s.Out(ciphertext) - assert.ErrorIs(t, err, ErrDecryptionFailed) + assert.ErrorIs(t, err, DecryptionFailedError) } // failReader returns an error on read — for testing nonce generation failure. @@ -416,12 +416,12 @@ func TestCryptoSigil_GetNonceFromCiphertext_NonceCopied_Good(t *testing.T) { func TestCryptoSigil_GetNonceFromCiphertext_TooShort_Bad(t *testing.T) { _, err := GetNonceFromCiphertext([]byte("short")) - assert.ErrorIs(t, err, ErrCiphertextTooShort) + assert.ErrorIs(t, err, CiphertextTooShortError) } func TestCryptoSigil_GetNonceFromCiphertext_Empty_Bad(t *testing.T) { _, err := GetNonceFromCiphertext(nil) - assert.ErrorIs(t, err, ErrCiphertextTooShort) + assert.ErrorIs(t, err, CiphertextTooShortError) } // ── ChaChaPolySigil in Transmute pipeline ────────────────────────── diff --git a/store/medium.go b/store/medium.go index 9b86323..3fe0273 100644 --- a/store/medium.go +++ b/store/medium.go @@ -47,8 +47,8 @@ func (m *Medium) Close() error { return m.store.Close() } -// splitEntryPath splits a group/key path into store components. -func splitEntryPath(entryPath string) (group, key string) { +// splitGroupKeyPath splits a group/key path into store components. +func splitGroupKeyPath(entryPath string) (group, key string) { clean := path.Clean(entryPath) clean = core.TrimPrefix(clean, "/") if clean == "" || clean == "." { @@ -62,7 +62,7 @@ func splitEntryPath(entryPath string) (group, key string) { } func (m *Medium) Read(entryPath string) (string, error) { - group, key := splitEntryPath(entryPath) + group, key := splitGroupKeyPath(entryPath) if key == "" { return "", core.E("store.Read", "path must include group/key", fs.ErrInvalid) } @@ -70,7 +70,7 @@ func (m *Medium) Read(entryPath string) (string, error) { } func (m *Medium) Write(entryPath, content string) error { - group, key := splitEntryPath(entryPath) + group, key := splitGroupKeyPath(entryPath) if key == "" { return core.E("store.Write", "path must include group/key", fs.ErrInvalid) } @@ -88,7 +88,7 @@ func (m *Medium) EnsureDir(_ string) error { } func (m *Medium) IsFile(entryPath string) bool { - group, key := splitEntryPath(entryPath) + group, key := splitGroupKeyPath(entryPath) if key == "" { return false } @@ -105,7 +105,7 @@ func (m *Medium) FileSet(entryPath, content string) error { } func (m *Medium) Delete(entryPath string) error { - group, key := splitEntryPath(entryPath) + group, key := splitGroupKeyPath(entryPath) if group == "" { return core.E("store.Delete", "path is required", fs.ErrInvalid) } @@ -123,7 +123,7 @@ func (m *Medium) Delete(entryPath string) error { } func (m *Medium) DeleteAll(entryPath string) error { - group, key := splitEntryPath(entryPath) + group, key := splitGroupKeyPath(entryPath) if group == "" { return core.E("store.DeleteAll", "path is required", fs.ErrInvalid) } @@ -134,8 +134,8 @@ func (m *Medium) DeleteAll(entryPath string) error { } func (m *Medium) Rename(oldPath, newPath string) error { - oldGroup, oldKey := splitEntryPath(oldPath) - newGroup, newKey := splitEntryPath(newPath) + oldGroup, oldKey := splitGroupKeyPath(oldPath) + newGroup, newKey := splitGroupKeyPath(newPath) if oldKey == "" || newKey == "" { return core.E("store.Rename", "both paths must include group/key", fs.ErrInvalid) } @@ -152,7 +152,7 @@ func (m *Medium) Rename(oldPath, newPath string) error { // List returns directory entries. Empty path returns groups. // A group path returns keys in that group. func (m *Medium) List(entryPath string) ([]fs.DirEntry, error) { - group, key := splitEntryPath(entryPath) + group, key := splitGroupKeyPath(entryPath) if group == "" { rows, err := m.store.database.Query("SELECT DISTINCT grp FROM kv ORDER BY grp") @@ -189,7 +189,7 @@ func (m *Medium) List(entryPath string) ([]fs.DirEntry, error) { // Stat returns file info for a group (dir) or key (file). func (m *Medium) Stat(entryPath string) (fs.FileInfo, error) { - group, key := splitEntryPath(entryPath) + group, key := splitGroupKeyPath(entryPath) if group == "" { return nil, core.E("store.Stat", "path is required", fs.ErrInvalid) } @@ -211,7 +211,7 @@ func (m *Medium) Stat(entryPath string) (fs.FileInfo, error) { } func (m *Medium) Open(entryPath string) (fs.File, error) { - group, key := splitEntryPath(entryPath) + group, key := splitGroupKeyPath(entryPath) if key == "" { return nil, core.E("store.Open", "path must include group/key", fs.ErrInvalid) } @@ -223,7 +223,7 @@ func (m *Medium) Open(entryPath string) (fs.File, error) { } func (m *Medium) Create(entryPath string) (goio.WriteCloser, error) { - group, key := splitEntryPath(entryPath) + group, key := splitGroupKeyPath(entryPath) if key == "" { return nil, core.E("store.Create", "path must include group/key", fs.ErrInvalid) } @@ -231,7 +231,7 @@ func (m *Medium) Create(entryPath string) (goio.WriteCloser, error) { } func (m *Medium) Append(entryPath string) (goio.WriteCloser, error) { - group, key := splitEntryPath(entryPath) + group, key := splitGroupKeyPath(entryPath) if key == "" { return nil, core.E("store.Append", "path must include group/key", fs.ErrInvalid) } @@ -240,7 +240,7 @@ func (m *Medium) Append(entryPath string) (goio.WriteCloser, error) { } func (m *Medium) ReadStream(entryPath string) (goio.ReadCloser, error) { - group, key := splitEntryPath(entryPath) + group, key := splitGroupKeyPath(entryPath) if key == "" { return nil, core.E("store.ReadStream", "path must include group/key", fs.ErrInvalid) } @@ -256,7 +256,7 @@ func (m *Medium) WriteStream(entryPath string) (goio.WriteCloser, error) { } func (m *Medium) Exists(entryPath string) bool { - group, key := splitEntryPath(entryPath) + group, key := splitGroupKeyPath(entryPath) if group == "" { return false } @@ -269,7 +269,7 @@ func (m *Medium) Exists(entryPath string) bool { } func (m *Medium) IsDir(entryPath string) bool { - group, key := splitEntryPath(entryPath) + group, key := splitGroupKeyPath(entryPath) if key != "" || group == "" { return false } diff --git a/store/store.go b/store/store.go index 81a4512..533d6ce 100644 --- a/store/store.go +++ b/store/store.go @@ -10,8 +10,12 @@ import ( _ "modernc.org/sqlite" ) -// ErrNotFound is returned when a key does not exist in the store. -var ErrNotFound = errors.New("key not found") +// NotFoundError is returned when a key does not exist in the store. +var NotFoundError = errors.New("key not found") + +// ErrNotFound is kept for compatibility with older callers. +// Deprecated: use NotFoundError. +var ErrNotFound = NotFoundError // Store is a group-namespaced key-value store backed by SQLite. type Store struct { @@ -63,7 +67,7 @@ func (s *Store) Get(group, key string) (string, error) { var value string err := s.database.QueryRow("SELECT value FROM kv WHERE grp = ? AND key = ?", group, key).Scan(&value) if err == sql.ErrNoRows { - return "", core.E("store.Get", core.Concat("not found: ", group, "/", key), ErrNotFound) + return "", core.E("store.Get", core.Concat("not found: ", group, "/", key), NotFoundError) } if err != nil { return "", core.E("store.Get", "query", err) diff --git a/store/store_test.go b/store/store_test.go index ee384cf..f30af61 100644 --- a/store/store_test.go +++ b/store/store_test.go @@ -43,7 +43,7 @@ func TestStore_Get_NotFound_Bad(t *testing.T) { s := newTestStore(t) _, err := s.Get("config", "missing") - assert.Error(t, err) + assert.ErrorIs(t, err, NotFoundError) } func TestStore_Delete_Good(t *testing.T) { @@ -54,7 +54,7 @@ func TestStore_Delete_Good(t *testing.T) { require.NoError(t, err) _, err = s.Get("config", "key") - assert.Error(t, err) + assert.ErrorIs(t, err, NotFoundError) } func TestStore_Count_Good(t *testing.T) { diff --git a/workspace/service.go b/workspace/service.go index 21c8483..3e5d5da 100644 --- a/workspace/service.go +++ b/workspace/service.go @@ -14,9 +14,9 @@ import ( // Workspace provides management for encrypted user workspaces. type Workspace interface { CreateWorkspace(identifier, password string) (string, error) - SwitchWorkspace(name string) error - WorkspaceFileGet(filename string) (string, error) - WorkspaceFileSet(filename, content string) error + SwitchWorkspace(workspaceID string) error + WorkspaceFileGet(workspaceFilePath string) (string, error) + WorkspaceFileSet(workspaceFilePath, content string) error } // CryptProvider is the interface for PGP key generation. @@ -34,12 +34,12 @@ type Options struct { // Service implements the Workspace interface. type Service struct { - core *core.Core - crypt CryptProvider - activeWorkspace string - rootPath string - medium io.Medium - mu sync.RWMutex + core *core.Core + crypt CryptProvider + activeWorkspaceID string + rootPath string + medium io.Medium + mu sync.RWMutex } var _ Workspace = (*Service)(nil) @@ -89,7 +89,7 @@ func (s *Service) CreateWorkspace(identifier, password string) (string, error) { hash := sha256.Sum256([]byte(identifier)) workspaceID := hex.EncodeToString(hash[:]) - workspaceDirectory, err := s.workspacePath("workspace.CreateWorkspace", workspaceID) + workspaceDirectory, err := s.resolveWorkspaceDirectory("workspace.CreateWorkspace", workspaceID) if err != nil { return "", err } @@ -117,45 +117,45 @@ func (s *Service) CreateWorkspace(identifier, password string) (string, error) { } // Example: _ = service.SwitchWorkspace(workspaceID) -func (s *Service) SwitchWorkspace(name string) error { +func (s *Service) SwitchWorkspace(workspaceID string) error { s.mu.Lock() defer s.mu.Unlock() - workspaceDirectory, err := s.workspacePath("workspace.SwitchWorkspace", name) + workspaceDirectory, err := s.resolveWorkspaceDirectory("workspace.SwitchWorkspace", workspaceID) if err != nil { return err } if !s.medium.IsDir(workspaceDirectory) { - return core.E("workspace.SwitchWorkspace", core.Concat("workspace not found: ", name), nil) + return core.E("workspace.SwitchWorkspace", core.Concat("workspace not found: ", workspaceID), nil) } - s.activeWorkspace = core.PathBase(workspaceDirectory) + s.activeWorkspaceID = core.PathBase(workspaceDirectory) return nil } -// activeFilePath resolves a filename inside the active workspace files root. +// resolveActiveWorkspaceFilePath resolves a file path inside the active workspace files root. // It rejects empty names and traversal outside the workspace root. -func (s *Service) activeFilePath(operation, filename string) (string, error) { - if s.activeWorkspace == "" { +func (s *Service) resolveActiveWorkspaceFilePath(operation, workspaceFilePath string) (string, error) { + if s.activeWorkspaceID == "" { return "", core.E(operation, "no active workspace", nil) } - filesRoot := core.Path(s.rootPath, s.activeWorkspace, "files") - filePath, err := joinPathWithinRoot(filesRoot, filename) + filesRoot := core.Path(s.rootPath, s.activeWorkspaceID, "files") + filePath, err := joinPathWithinRoot(filesRoot, workspaceFilePath) if err != nil { return "", core.E(operation, "file path escapes workspace files", fs.ErrPermission) } if filePath == filesRoot { - return "", core.E(operation, "filename is required", fs.ErrInvalid) + return "", core.E(operation, "workspace file path is required", fs.ErrInvalid) } return filePath, nil } // Example: content, _ := service.WorkspaceFileGet("notes/todo.txt") -func (s *Service) WorkspaceFileGet(filename string) (string, error) { +func (s *Service) WorkspaceFileGet(workspaceFilePath string) (string, error) { s.mu.RLock() defer s.mu.RUnlock() - filePath, err := s.activeFilePath("workspace.WorkspaceFileGet", filename) + filePath, err := s.resolveActiveWorkspaceFilePath("workspace.WorkspaceFileGet", workspaceFilePath) if err != nil { return "", err } @@ -163,11 +163,11 @@ func (s *Service) WorkspaceFileGet(filename string) (string, error) { } // Example: _ = service.WorkspaceFileSet("notes/todo.txt", "ship it") -func (s *Service) WorkspaceFileSet(filename, content string) error { +func (s *Service) WorkspaceFileSet(workspaceFilePath, content string) error { s.mu.Lock() defer s.mu.Unlock() - filePath, err := s.activeFilePath("workspace.WorkspaceFileSet", filename) + filePath, err := s.resolveActiveWorkspaceFilePath("workspace.WorkspaceFileSet", workspaceFilePath) if err != nil { return err } @@ -197,8 +197,8 @@ func (s *Service) HandleIPCEvents(_ *core.Core, message core.Message) core.Resul } return core.Result{Value: workspaceID, OK: true} case "workspace.switch": - name, _ := payload["name"].(string) - if err := s.SwitchWorkspace(name); err != nil { + workspaceID, _ := payload["name"].(string) + if err := s.SwitchWorkspace(workspaceID); err != nil { return core.Result{}.New(err) } return core.Result{OK: true} @@ -226,16 +226,16 @@ func joinPathWithinRoot(root string, parts ...string) (string, error) { return "", fs.ErrPermission } -func (s *Service) workspacePath(operation, workspaceName string) (string, error) { - if workspaceName == "" { - return "", core.E(operation, "workspace name is required", fs.ErrInvalid) +func (s *Service) resolveWorkspaceDirectory(operation, workspaceID string) (string, error) { + if workspaceID == "" { + return "", core.E(operation, "workspace id is required", fs.ErrInvalid) } - workspaceDirectory, err := joinPathWithinRoot(s.rootPath, workspaceName) + workspaceDirectory, err := joinPathWithinRoot(s.rootPath, workspaceID) if err != nil { return "", core.E(operation, "workspace path escapes root", err) } if core.PathDir(workspaceDirectory) != s.rootPath { - return "", core.E(operation, core.Concat("invalid workspace name: ", workspaceName), fs.ErrPermission) + return "", core.E(operation, core.Concat("invalid workspace id: ", workspaceID), fs.ErrPermission) } return workspaceDirectory, nil } diff --git a/workspace/service_test.go b/workspace/service_test.go index 8fcb435..b0b054c 100644 --- a/workspace/service_test.go +++ b/workspace/service_test.go @@ -34,18 +34,18 @@ func newTestService(t *testing.T) (*Service, string) { func TestService_Workspace_RoundTrip_Good(t *testing.T) { s, tempHome := newTestService(t) - id, err := s.CreateWorkspace("test-user", "pass123") + workspaceID, err := s.CreateWorkspace("test-user", "pass123") require.NoError(t, err) - assert.NotEmpty(t, id) + assert.NotEmpty(t, workspaceID) - wsPath := core.Path(tempHome, ".core", "workspaces", id) - assert.DirExists(t, wsPath) - assert.DirExists(t, core.Path(wsPath, "keys")) - assert.FileExists(t, core.Path(wsPath, "keys", "private.key")) + workspacePath := core.Path(tempHome, ".core", "workspaces", workspaceID) + assert.DirExists(t, workspacePath) + assert.DirExists(t, core.Path(workspacePath, "keys")) + assert.FileExists(t, core.Path(workspacePath, "keys", "private.key")) - err = s.SwitchWorkspace(id) + err = s.SwitchWorkspace(workspaceID) require.NoError(t, err) - assert.Equal(t, id, s.activeWorkspace) + assert.Equal(t, workspaceID, s.activeWorkspaceID) err = s.WorkspaceFileSet("secret.txt", "top secret info") require.NoError(t, err) @@ -63,17 +63,17 @@ func TestService_SwitchWorkspace_TraversalBlocked_Bad(t *testing.T) { err := s.SwitchWorkspace("../escaped") require.Error(t, err) - assert.Empty(t, s.activeWorkspace) + assert.Empty(t, s.activeWorkspaceID) } func TestService_WorkspaceFileSet_TraversalBlocked_Bad(t *testing.T) { s, tempHome := newTestService(t) - id, err := s.CreateWorkspace("test-user", "pass123") + workspaceID, err := s.CreateWorkspace("test-user", "pass123") require.NoError(t, err) - require.NoError(t, s.SwitchWorkspace(id)) + require.NoError(t, s.SwitchWorkspace(workspaceID)) - keyPath := core.Path(tempHome, ".core", "workspaces", id, "keys", "private.key") + keyPath := core.Path(tempHome, ".core", "workspaces", workspaceID, "keys", "private.key") before, err := s.medium.Read(keyPath) require.NoError(t, err) @@ -98,16 +98,16 @@ func TestService_HandleIPCEvents_Good(t *testing.T) { }) assert.True(t, create.OK) - id, ok := create.Value.(string) + workspaceID, ok := create.Value.(string) require.True(t, ok) - require.NotEmpty(t, id) + require.NotEmpty(t, workspaceID) switchResult := s.HandleIPCEvents(core.New(), map[string]any{ "action": "workspace.switch", - "name": id, + "name": workspaceID, }) assert.True(t, switchResult.OK) - assert.Equal(t, id, s.activeWorkspace) + assert.Equal(t, workspaceID, s.activeWorkspaceID) failedSwitch := s.HandleIPCEvents(core.New(), map[string]any{ "action": "workspace.switch", From 48c328f9356fd2228416194e5144ae827bde70de Mon Sep 17 00:00:00 2001 From: Virgil Date: Mon, 30 Mar 2026 21:04:19 +0000 Subject: [PATCH 19/83] refactor(ax): tighten names and ipc keys --- datanode/client.go | 46 +++++++++++++++--------------- local/client.go | 22 +++++++------- store/doc.go | 6 ++-- store/medium.go | 60 +++++++++++++++++++-------------------- store/store.go | 24 ++++++++-------- workspace/service.go | 16 +++++++++-- workspace/service_test.go | 14 ++++++--- 7 files changed, 102 insertions(+), 86 deletions(-) diff --git a/datanode/client.go b/datanode/client.go index 6a7c4ce..e4e3994 100644 --- a/datanode/client.go +++ b/datanode/client.go @@ -39,9 +39,9 @@ var ( // Medium is an in-memory storage backend backed by a Borg DataNode. // All paths are relative (no leading slash). Thread-safe via RWMutex. type Medium struct { - dataNode *borgdatanode.DataNode - directories map[string]bool // explicit directory tracking - mu sync.RWMutex + dataNode *borgdatanode.DataNode + directorySet map[string]bool // explicit directories that exist without file contents + mu sync.RWMutex } // New creates an in-memory Medium that snapshots to tar. @@ -50,8 +50,8 @@ type Medium struct { // _ = medium.Write("jobs/run.log", "started") func New() *Medium { return &Medium{ - dataNode: borgdatanode.New(), - directories: make(map[string]bool), + dataNode: borgdatanode.New(), + directorySet: make(map[string]bool), } } @@ -66,8 +66,8 @@ func FromTar(data []byte) (*Medium, error) { return nil, core.E("datanode.FromTar", "failed to restore", err) } return &Medium{ - dataNode: dataNode, - directories: make(map[string]bool), + dataNode: dataNode, + directorySet: make(map[string]bool), }, nil } @@ -92,7 +92,7 @@ func (m *Medium) Restore(data []byte) error { m.mu.Lock() defer m.mu.Unlock() m.dataNode = dataNode - m.directories = make(map[string]bool) + m.directorySet = make(map[string]bool) return nil } @@ -177,7 +177,7 @@ func (m *Medium) EnsureDir(filePath string) error { // Caller must hold m.mu. func (m *Medium) ensureDirsLocked(directoryPath string) { for directoryPath != "" && directoryPath != "." { - m.directories[directoryPath] = true + m.directorySet[directoryPath] = true directoryPath = path.Dir(directoryPath) if directoryPath == "." { break @@ -215,7 +215,7 @@ func (m *Medium) Delete(filePath string) error { info, err := m.dataNode.Stat(filePath) if err != nil { // Check explicit directories - if m.directories[filePath] { + if m.directorySet[filePath] { // Check if dir is empty hasChildren, err := m.hasPrefixLocked(filePath + "/") if err != nil { @@ -224,7 +224,7 @@ func (m *Medium) Delete(filePath string) error { if hasChildren { return core.E("datanode.Delete", core.Concat("directory not empty: ", filePath), fs.ErrExist) } - delete(m.directories, filePath) + delete(m.directorySet, filePath) return nil } return core.E("datanode.Delete", core.Concat("not found: ", filePath), fs.ErrNotExist) @@ -238,7 +238,7 @@ func (m *Medium) Delete(filePath string) error { if hasChildren { return core.E("datanode.Delete", core.Concat("directory not empty: ", filePath), fs.ErrExist) } - delete(m.directories, filePath) + delete(m.directorySet, filePath) return nil } @@ -285,9 +285,9 @@ func (m *Medium) DeleteAll(filePath string) error { } // Remove explicit directories under prefix - for directoryPath := range m.directories { + for directoryPath := range m.directorySet { if directoryPath == filePath || core.HasPrefix(directoryPath, prefix) { - delete(m.directories, directoryPath) + delete(m.directorySet, directoryPath) found = true } } @@ -349,15 +349,15 @@ func (m *Medium) Rename(oldPath, newPath string) error { // Move explicit directories dirsToMove := make(map[string]string) - for d := range m.directories { + for d := range m.directorySet { if d == oldPath || core.HasPrefix(d, oldPrefix) { newD := core.Concat(newPath, core.TrimPrefix(d, oldPath)) dirsToMove[d] = newD } } for old, nw := range dirsToMove { - delete(m.directories, old) - m.directories[nw] = true + delete(m.directorySet, old) + m.directorySet[nw] = true } return nil @@ -372,7 +372,7 @@ func (m *Medium) List(filePath string) ([]fs.DirEntry, error) { entries, err := m.dataNode.ReadDir(filePath) if err != nil { // Check explicit directories - if filePath == "" || m.directories[filePath] { + if filePath == "" || m.directorySet[filePath] { return []fs.DirEntry{}, nil } return nil, core.E("datanode.List", core.Concat("not found: ", filePath), fs.ErrNotExist) @@ -388,7 +388,7 @@ func (m *Medium) List(filePath string) ([]fs.DirEntry, error) { seen[e.Name()] = true } - for d := range m.directories { + for d := range m.directorySet { if !core.HasPrefix(d, prefix) { continue } @@ -424,7 +424,7 @@ func (m *Medium) Stat(filePath string) (fs.FileInfo, error) { return info, nil } - if m.directories[filePath] { + if m.directorySet[filePath] { return &fileInfo{name: path.Base(filePath), isDir: true, mode: fs.ModeDir | 0755}, nil } return nil, core.E("datanode.Stat", core.Concat("not found: ", filePath), fs.ErrNotExist) @@ -496,7 +496,7 @@ func (m *Medium) Exists(filePath string) bool { if err == nil { return true } - return m.directories[filePath] + return m.directorySet[filePath] } func (m *Medium) IsDir(filePath string) bool { @@ -511,7 +511,7 @@ func (m *Medium) IsDir(filePath string) bool { if err == nil { return info.IsDir() } - return m.directories[filePath] + return m.directorySet[filePath] } // --- internal helpers --- @@ -527,7 +527,7 @@ func (m *Medium) hasPrefixLocked(prefix string) (bool, error) { return true, nil } } - for d := range m.directories { + for d := range m.directorySet { if core.HasPrefix(d, prefix) { return true, nil } diff --git a/local/client.go b/local/client.go index e990ec2..149c280 100644 --- a/local/client.go +++ b/local/client.go @@ -260,7 +260,7 @@ func (m *Medium) WriteMode(path, content string, mode fs.FileMode) error { if err != nil { return err } - return resultErr("local.WriteMode", core.Concat("write failed: ", path), unrestrictedFileSystem.WriteMode(resolvedPath, content, mode)) + return resultError("local.WriteMode", core.Concat("write failed: ", path), unrestrictedFileSystem.WriteMode(resolvedPath, content, mode)) } func (m *Medium) EnsureDir(path string) error { @@ -268,7 +268,7 @@ func (m *Medium) EnsureDir(path string) error { if err != nil { return err } - return resultErr("local.EnsureDir", core.Concat("ensure dir failed: ", path), unrestrictedFileSystem.EnsureDir(resolvedPath)) + return resultError("local.EnsureDir", core.Concat("ensure dir failed: ", path), unrestrictedFileSystem.EnsureDir(resolvedPath)) } func (m *Medium) IsDir(path string) bool { @@ -359,7 +359,7 @@ func (m *Medium) Delete(path string) error { if isProtectedPath(resolvedPath) { return core.E("local.Delete", core.Concat("refusing to delete protected path: ", resolvedPath), nil) } - return resultErr("local.Delete", core.Concat("delete failed: ", path), unrestrictedFileSystem.Delete(resolvedPath)) + return resultError("local.Delete", core.Concat("delete failed: ", path), unrestrictedFileSystem.Delete(resolvedPath)) } func (m *Medium) DeleteAll(path string) error { @@ -370,7 +370,7 @@ func (m *Medium) DeleteAll(path string) error { if isProtectedPath(resolvedPath) { return core.E("local.DeleteAll", core.Concat("refusing to delete protected path: ", resolvedPath), nil) } - return resultErr("local.DeleteAll", core.Concat("delete all failed: ", path), unrestrictedFileSystem.DeleteAll(resolvedPath)) + return resultError("local.DeleteAll", core.Concat("delete all failed: ", path), unrestrictedFileSystem.DeleteAll(resolvedPath)) } func (m *Medium) Rename(oldPath, newPath string) error { @@ -382,7 +382,7 @@ func (m *Medium) Rename(oldPath, newPath string) error { if err != nil { return err } - return resultErr("local.Rename", core.Concat("rename failed: ", oldPath), unrestrictedFileSystem.Rename(oldResolvedPath, newResolvedPath)) + return resultError("local.Rename", core.Concat("rename failed: ", oldPath), unrestrictedFileSystem.Rename(oldResolvedPath, newResolvedPath)) } func (m *Medium) FileGet(path string) (string, error) { @@ -420,7 +420,7 @@ func readlink(path string) (string, error) { } } -func resultErr(operation, message string, result core.Result) error { +func resultError(operation, message string, result core.Result) error { if result.OK { return nil } @@ -432,7 +432,7 @@ func resultErr(operation, message string, result core.Result) error { func resultString(operation, message string, result core.Result) (string, error) { if !result.OK { - return "", resultErr(operation, message, result) + return "", resultError(operation, message, result) } value, ok := result.Value.(string) if !ok { @@ -443,7 +443,7 @@ func resultString(operation, message string, result core.Result) (string, error) func resultDirEntries(operation, message string, result core.Result) ([]fs.DirEntry, error) { if !result.OK { - return nil, resultErr(operation, message, result) + return nil, resultError(operation, message, result) } entries, ok := result.Value.([]fs.DirEntry) if !ok { @@ -454,7 +454,7 @@ func resultDirEntries(operation, message string, result core.Result) ([]fs.DirEn func resultFileInfo(operation, message string, result core.Result) (fs.FileInfo, error) { if !result.OK { - return nil, resultErr(operation, message, result) + return nil, resultError(operation, message, result) } fileInfo, ok := result.Value.(fs.FileInfo) if !ok { @@ -465,7 +465,7 @@ func resultFileInfo(operation, message string, result core.Result) (fs.FileInfo, func resultFile(operation, message string, result core.Result) (fs.File, error) { if !result.OK { - return nil, resultErr(operation, message, result) + return nil, resultError(operation, message, result) } file, ok := result.Value.(fs.File) if !ok { @@ -476,7 +476,7 @@ func resultFile(operation, message string, result core.Result) (fs.File, error) func resultWriteCloser(operation, message string, result core.Result) (goio.WriteCloser, error) { if !result.OK { - return nil, resultErr(operation, message, result) + return nil, resultError(operation, message, result) } writer, ok := result.Value.(goio.WriteCloser) if !ok { diff --git a/store/doc.go b/store/doc.go index 1fae1c4..abfa5b7 100644 --- a/store/doc.go +++ b/store/doc.go @@ -1,8 +1,8 @@ // Package store provides a SQLite-backed group-namespaced key-value store. // -// kvStore, _ := store.New(store.Options{Path: ":memory:"}) -// _ = kvStore.Set("app", "theme", "midnight") -// medium := kvStore.AsMedium() +// keyValueStore, _ := store.New(store.Options{Path: ":memory:"}) +// _ = keyValueStore.Set("app", "theme", "midnight") +// medium := keyValueStore.AsMedium() // _ = medium.Write("app/theme", "midnight") // // It also exposes an io.Medium adapter so grouped values can participate in diff --git a/store/medium.go b/store/medium.go index 3fe0273..54db6bf 100644 --- a/store/medium.go +++ b/store/medium.go @@ -32,12 +32,12 @@ func NewMedium(options Options) (*Medium, error) { return &Medium{store: store}, nil } -// Example: medium := kvStore.AsMedium() +// Example: medium := keyValueStore.AsMedium() func (s *Store) AsMedium() *Medium { return &Medium{store: s} } -// Example: kvStore := medium.Store() +// Example: keyValueStore := medium.Store() func (m *Medium) Store() *Store { return m.store } @@ -167,7 +167,7 @@ func (m *Medium) List(entryPath string) ([]fs.DirEntry, error) { if err := rows.Scan(&groupName); err != nil { return nil, core.E("store.List", "scan", err) } - entries = append(entries, &kvDirEntry{name: groupName, isDir: true}) + entries = append(entries, &keyValueDirEntry{name: groupName, isDir: true}) } return entries, rows.Err() } @@ -182,7 +182,7 @@ func (m *Medium) List(entryPath string) ([]fs.DirEntry, error) { } var entries []fs.DirEntry for key, value := range all { - entries = append(entries, &kvDirEntry{name: key, size: int64(len(value))}) + entries = append(entries, &keyValueDirEntry{name: key, size: int64(len(value))}) } return entries, nil } @@ -201,13 +201,13 @@ func (m *Medium) Stat(entryPath string) (fs.FileInfo, error) { if entryCount == 0 { return nil, core.E("store.Stat", core.Concat("group not found: ", group), fs.ErrNotExist) } - return &kvFileInfo{name: group, isDir: true}, nil + return &keyValueFileInfo{name: group, isDir: true}, nil } val, err := m.store.Get(group, key) if err != nil { return nil, err } - return &kvFileInfo{name: key, size: int64(len(val))}, nil + return &keyValueFileInfo{name: key, size: int64(len(val))}, nil } func (m *Medium) Open(entryPath string) (fs.File, error) { @@ -219,7 +219,7 @@ func (m *Medium) Open(entryPath string) (fs.File, error) { if err != nil { return nil, err } - return &kvFile{name: key, content: []byte(val)}, nil + return &keyValueFile{name: key, content: []byte(val)}, nil } func (m *Medium) Create(entryPath string) (goio.WriteCloser, error) { @@ -227,7 +227,7 @@ func (m *Medium) Create(entryPath string) (goio.WriteCloser, error) { if key == "" { return nil, core.E("store.Create", "path must include group/key", fs.ErrInvalid) } - return &kvWriteCloser{store: m.store, group: group, key: key}, nil + return &keyValueWriteCloser{store: m.store, group: group, key: key}, nil } func (m *Medium) Append(entryPath string) (goio.WriteCloser, error) { @@ -236,7 +236,7 @@ func (m *Medium) Append(entryPath string) (goio.WriteCloser, error) { return nil, core.E("store.Append", "path must include group/key", fs.ErrInvalid) } existing, _ := m.store.Get(group, key) - return &kvWriteCloser{store: m.store, group: group, key: key, data: []byte(existing)}, nil + return &keyValueWriteCloser{store: m.store, group: group, key: key, data: []byte(existing)}, nil } func (m *Medium) ReadStream(entryPath string) (goio.ReadCloser, error) { @@ -279,61 +279,61 @@ func (m *Medium) IsDir(entryPath string) bool { // --- fs helper types --- -type kvFileInfo struct { +type keyValueFileInfo struct { name string size int64 isDir bool } -func (fi *kvFileInfo) Name() string { return fi.name } +func (fi *keyValueFileInfo) Name() string { return fi.name } -func (fi *kvFileInfo) Size() int64 { return fi.size } +func (fi *keyValueFileInfo) Size() int64 { return fi.size } -func (fi *kvFileInfo) Mode() fs.FileMode { +func (fi *keyValueFileInfo) Mode() fs.FileMode { if fi.isDir { return fs.ModeDir | 0755 } return 0644 } -func (fi *kvFileInfo) ModTime() time.Time { return time.Time{} } +func (fi *keyValueFileInfo) ModTime() time.Time { return time.Time{} } -func (fi *kvFileInfo) IsDir() bool { return fi.isDir } +func (fi *keyValueFileInfo) IsDir() bool { return fi.isDir } -func (fi *kvFileInfo) Sys() any { return nil } +func (fi *keyValueFileInfo) Sys() any { return nil } -type kvDirEntry struct { +type keyValueDirEntry struct { name string isDir bool size int64 } -func (de *kvDirEntry) Name() string { return de.name } +func (de *keyValueDirEntry) Name() string { return de.name } -func (de *kvDirEntry) IsDir() bool { return de.isDir } +func (de *keyValueDirEntry) IsDir() bool { return de.isDir } -func (de *kvDirEntry) Type() fs.FileMode { +func (de *keyValueDirEntry) Type() fs.FileMode { if de.isDir { return fs.ModeDir } return 0 } -func (de *kvDirEntry) Info() (fs.FileInfo, error) { - return &kvFileInfo{name: de.name, size: de.size, isDir: de.isDir}, nil +func (de *keyValueDirEntry) Info() (fs.FileInfo, error) { + return &keyValueFileInfo{name: de.name, size: de.size, isDir: de.isDir}, nil } -type kvFile struct { +type keyValueFile struct { name string content []byte offset int64 } -func (f *kvFile) Stat() (fs.FileInfo, error) { - return &kvFileInfo{name: f.name, size: int64(len(f.content))}, nil +func (f *keyValueFile) Stat() (fs.FileInfo, error) { + return &keyValueFileInfo{name: f.name, size: int64(len(f.content))}, nil } -func (f *kvFile) Read(b []byte) (int, error) { +func (f *keyValueFile) Read(b []byte) (int, error) { if f.offset >= int64(len(f.content)) { return 0, goio.EOF } @@ -342,20 +342,20 @@ func (f *kvFile) Read(b []byte) (int, error) { return n, nil } -func (f *kvFile) Close() error { return nil } +func (f *keyValueFile) Close() error { return nil } -type kvWriteCloser struct { +type keyValueWriteCloser struct { store *Store group string key string data []byte } -func (w *kvWriteCloser) Write(p []byte) (int, error) { +func (w *keyValueWriteCloser) Write(p []byte) (int, error) { w.data = append(w.data, p...) return len(p), nil } -func (w *kvWriteCloser) Close() error { +func (w *keyValueWriteCloser) Close() error { return w.store.Set(w.group, w.key, string(w.data)) } diff --git a/store/store.go b/store/store.go index 533d6ce..41fac5c 100644 --- a/store/store.go +++ b/store/store.go @@ -30,8 +30,8 @@ type Options struct { // New opens a SQLite-backed key-value store. // -// kvStore, _ := store.New(store.Options{Path: ":memory:"}) -// _ = kvStore.Set("app", "theme", "midnight") +// keyValueStore, _ := store.New(store.Options{Path: ":memory:"}) +// _ = keyValueStore.Set("app", "theme", "midnight") func New(options Options) (*Store, error) { if options.Path == "" { return nil, core.E("store.New", "database path is required", fs.ErrInvalid) @@ -57,12 +57,12 @@ func New(options Options) (*Store, error) { return &Store{database: database}, nil } -// Example: _ = kvStore.Close() +// Example: _ = keyValueStore.Close() func (s *Store) Close() error { return s.database.Close() } -// Example: theme, _ := kvStore.Get("app", "theme") +// Example: theme, _ := keyValueStore.Get("app", "theme") func (s *Store) Get(group, key string) (string, error) { var value string err := s.database.QueryRow("SELECT value FROM kv WHERE grp = ? AND key = ?", group, key).Scan(&value) @@ -75,7 +75,7 @@ func (s *Store) Get(group, key string) (string, error) { return value, nil } -// Example: _ = kvStore.Set("app", "theme", "midnight") +// Example: _ = keyValueStore.Set("app", "theme", "midnight") func (s *Store) Set(group, key, value string) error { _, err := s.database.Exec( `INSERT INTO kv (grp, key, value) VALUES (?, ?, ?) @@ -88,7 +88,7 @@ func (s *Store) Set(group, key, value string) error { return nil } -// Example: _ = kvStore.Delete("app", "theme") +// Example: _ = keyValueStore.Delete("app", "theme") func (s *Store) Delete(group, key string) error { _, err := s.database.Exec("DELETE FROM kv WHERE grp = ? AND key = ?", group, key) if err != nil { @@ -97,7 +97,7 @@ func (s *Store) Delete(group, key string) error { return nil } -// Example: count, _ := kvStore.Count("app") +// Example: count, _ := keyValueStore.Count("app") func (s *Store) Count(group string) (int, error) { var count int err := s.database.QueryRow("SELECT COUNT(*) FROM kv WHERE grp = ?", group).Scan(&count) @@ -107,7 +107,7 @@ func (s *Store) Count(group string) (int, error) { return count, nil } -// Example: _ = kvStore.DeleteGroup("app") +// Example: _ = keyValueStore.DeleteGroup("app") func (s *Store) DeleteGroup(group string) error { _, err := s.database.Exec("DELETE FROM kv WHERE grp = ?", group) if err != nil { @@ -116,7 +116,7 @@ func (s *Store) DeleteGroup(group string) error { return nil } -// Example: values, _ := kvStore.GetAll("app") +// Example: values, _ := keyValueStore.GetAll("app") func (s *Store) GetAll(group string) (map[string]string, error) { rows, err := s.database.Query("SELECT key, value FROM kv WHERE grp = ?", group) if err != nil { @@ -142,9 +142,9 @@ func (s *Store) GetAll(group string) (map[string]string, error) { // // Example usage: // -// kvStore, _ := store.New(store.Options{Path: ":memory:"}) -// _ = kvStore.Set("user", "name", "alice") -// out, _ := kvStore.Render("hello {{ .name }}", "user") +// keyValueStore, _ := store.New(store.Options{Path: ":memory:"}) +// _ = keyValueStore.Set("user", "name", "alice") +// out, _ := keyValueStore.Render("hello {{ .name }}", "user") func (s *Store) Render(templateText, group string) (string, error) { rows, err := s.database.Query("SELECT key, value FROM kv WHERE grp = ?", group) if err != nil { diff --git a/workspace/service.go b/workspace/service.go index 3e5d5da..ba4e915 100644 --- a/workspace/service.go +++ b/workspace/service.go @@ -176,13 +176,19 @@ func (s *Service) WorkspaceFileSet(workspaceFilePath, content string) error { // service, _ := workspace.New(workspace.Options{Core: core.New(), Crypt: myCryptProvider}) // -// ipcResult := service.HandleIPCEvents(core.New(), map[string]any{ +// createResult := service.HandleIPCEvents(core.New(), map[string]any{ // "action": "workspace.create", // "identifier": "alice", // "password": "pass123", // }) // -// _ = ipcResult.OK +// switchResult := service.HandleIPCEvents(core.New(), map[string]any{ +// "action": "workspace.switch", +// "workspaceID": "f3f0d7", +// }) +// +// _ = createResult.OK +// _ = switchResult.OK func (s *Service) HandleIPCEvents(_ *core.Core, message core.Message) core.Result { switch payload := message.(type) { case map[string]any: @@ -197,7 +203,11 @@ func (s *Service) HandleIPCEvents(_ *core.Core, message core.Message) core.Resul } return core.Result{Value: workspaceID, OK: true} case "workspace.switch": - workspaceID, _ := payload["name"].(string) + workspaceID, _ := payload["workspaceID"].(string) + if workspaceID == "" { + // Keep the legacy key as a fallback for older callers. + workspaceID, _ = payload["name"].(string) + } if err := s.SwitchWorkspace(workspaceID); err != nil { return core.Result{}.New(err) } diff --git a/workspace/service_test.go b/workspace/service_test.go index b0b054c..32d5b61 100644 --- a/workspace/service_test.go +++ b/workspace/service_test.go @@ -103,15 +103,21 @@ func TestService_HandleIPCEvents_Good(t *testing.T) { require.NotEmpty(t, workspaceID) switchResult := s.HandleIPCEvents(core.New(), map[string]any{ - "action": "workspace.switch", - "name": workspaceID, + "action": "workspace.switch", + "workspaceID": workspaceID, }) assert.True(t, switchResult.OK) assert.Equal(t, workspaceID, s.activeWorkspaceID) - failedSwitch := s.HandleIPCEvents(core.New(), map[string]any{ + legacySwitch := s.HandleIPCEvents(core.New(), map[string]any{ "action": "workspace.switch", - "name": "missing", + "name": workspaceID, + }) + assert.True(t, legacySwitch.OK) + + failedSwitch := s.HandleIPCEvents(core.New(), map[string]any{ + "action": "workspace.switch", + "workspaceID": "missing", }) assert.False(t, failedSwitch.OK) From f0b828a7e39ecf622ad399b665f8a74c9dcc7639 Mon Sep 17 00:00:00 2001 From: Virgil Date: Mon, 30 Mar 2026 21:08:22 +0000 Subject: [PATCH 20/83] refactor(ax): drop legacy compatibility shims --- sigil/crypto_sigil.go | 12 ------------ store/store.go | 4 ---- workspace/service.go | 4 ---- workspace/service_test.go | 3 ++- 4 files changed, 2 insertions(+), 21 deletions(-) diff --git a/sigil/crypto_sigil.go b/sigil/crypto_sigil.go index 3615cd8..5ee474b 100644 --- a/sigil/crypto_sigil.go +++ b/sigil/crypto_sigil.go @@ -25,27 +25,15 @@ import ( var ( // InvalidKeyError is returned when the encryption key is not 32 bytes. InvalidKeyError = core.E("sigil.InvalidKeyError", "invalid key size, must be 32 bytes", nil) - // ErrInvalidKey is kept for compatibility with older callers. - // Deprecated: use InvalidKeyError. - ErrInvalidKey = InvalidKeyError // CiphertextTooShortError is returned when the ciphertext is too short to decrypt. CiphertextTooShortError = core.E("sigil.CiphertextTooShortError", "ciphertext too short", nil) - // ErrCiphertextTooShort is kept for compatibility with older callers. - // Deprecated: use CiphertextTooShortError. - ErrCiphertextTooShort = CiphertextTooShortError // DecryptionFailedError is returned when decryption or authentication fails. DecryptionFailedError = core.E("sigil.DecryptionFailedError", "decryption failed", nil) - // ErrDecryptionFailed is kept for compatibility with older callers. - // Deprecated: use DecryptionFailedError. - ErrDecryptionFailed = DecryptionFailedError // NoKeyConfiguredError is returned when no encryption key has been set. NoKeyConfiguredError = core.E("sigil.NoKeyConfiguredError", "no encryption key configured", nil) - // ErrNoKeyConfigured is kept for compatibility with older callers. - // Deprecated: use NoKeyConfiguredError. - ErrNoKeyConfigured = NoKeyConfiguredError ) // PreObfuscator applies a reversible transformation to data before encryption. diff --git a/store/store.go b/store/store.go index 41fac5c..c430951 100644 --- a/store/store.go +++ b/store/store.go @@ -13,10 +13,6 @@ import ( // NotFoundError is returned when a key does not exist in the store. var NotFoundError = errors.New("key not found") -// ErrNotFound is kept for compatibility with older callers. -// Deprecated: use NotFoundError. -var ErrNotFound = NotFoundError - // Store is a group-namespaced key-value store backed by SQLite. type Store struct { database *sql.DB diff --git a/workspace/service.go b/workspace/service.go index ba4e915..b936910 100644 --- a/workspace/service.go +++ b/workspace/service.go @@ -204,10 +204,6 @@ func (s *Service) HandleIPCEvents(_ *core.Core, message core.Message) core.Resul return core.Result{Value: workspaceID, OK: true} case "workspace.switch": workspaceID, _ := payload["workspaceID"].(string) - if workspaceID == "" { - // Keep the legacy key as a fallback for older callers. - workspaceID, _ = payload["name"].(string) - } if err := s.SwitchWorkspace(workspaceID); err != nil { return core.Result{}.New(err) } diff --git a/workspace/service_test.go b/workspace/service_test.go index 32d5b61..8b5f292 100644 --- a/workspace/service_test.go +++ b/workspace/service_test.go @@ -113,7 +113,8 @@ func TestService_HandleIPCEvents_Good(t *testing.T) { "action": "workspace.switch", "name": workspaceID, }) - assert.True(t, legacySwitch.OK) + assert.False(t, legacySwitch.OK) + assert.Equal(t, workspaceID, s.activeWorkspaceID) failedSwitch := s.HandleIPCEvents(core.New(), map[string]any{ "action": "workspace.switch", From d5b591586399f314ed7f02e92a09bfab6b20ecfb Mon Sep 17 00:00:00 2001 From: Virgil Date: Mon, 30 Mar 2026 21:12:40 +0000 Subject: [PATCH 21/83] refactor(ax): make sigil names explicit --- sigil/crypto_sigil.go | 24 +++++++------- sigil/crypto_sigil_test.go | 24 +++++++------- sigil/sigils.go | 64 +++++++++++++++++++------------------- workspace/service_test.go | 4 +-- 4 files changed, 58 insertions(+), 58 deletions(-) diff --git a/sigil/crypto_sigil.go b/sigil/crypto_sigil.go index 5ee474b..e430263 100644 --- a/sigil/crypto_sigil.go +++ b/sigil/crypto_sigil.go @@ -16,7 +16,7 @@ import ( "crypto/rand" "crypto/sha256" "encoding/binary" - "io" + goio "io" core "dappco.re/go/core" "golang.org/x/crypto/chacha20poly1305" @@ -240,9 +240,9 @@ func (s *ShuffleMaskObfuscator) deriveMask(entropy []byte, length int) []byte { // Unlike demo implementations, the nonce is ONLY embedded in the ciphertext, // not exposed separately in headers. type ChaChaPolySigil struct { - Key []byte - Obfuscator PreObfuscator - randReader io.Reader // for testing injection + Key []byte + Obfuscator PreObfuscator + randomReader goio.Reader // for testing injection } // Use NewChaChaPolySigil with a 32-byte key to encrypt payloads. @@ -263,9 +263,9 @@ func NewChaChaPolySigil(key []byte) (*ChaChaPolySigil, error) { copy(keyCopy, key) return &ChaChaPolySigil{ - Key: keyCopy, - Obfuscator: &XORObfuscator{}, - randReader: rand.Reader, + Key: keyCopy, + Obfuscator: &XORObfuscator{}, + randomReader: rand.Reader, }, nil } @@ -278,14 +278,14 @@ func NewChaChaPolySigil(key []byte) (*ChaChaPolySigil, error) { // ciphertext, _ := cipherSigil.In([]byte("payload")) // plaintext, _ := cipherSigil.Out(ciphertext) func NewChaChaPolySigilWithObfuscator(key []byte, obfuscator PreObfuscator) (*ChaChaPolySigil, error) { - sigil, err := NewChaChaPolySigil(key) + cipherSigil, err := NewChaChaPolySigil(key) if err != nil { return nil, err } if obfuscator != nil { - sigil.Obfuscator = obfuscator + cipherSigil.Obfuscator = obfuscator } - return sigil, nil + return cipherSigil, nil } // In encrypts the data with pre-obfuscation. @@ -305,11 +305,11 @@ func (s *ChaChaPolySigil) In(data []byte) ([]byte, error) { // Generate nonce nonce := make([]byte, aead.NonceSize()) - reader := s.randReader + reader := s.randomReader if reader == nil { reader = rand.Reader } - if _, err := io.ReadFull(reader, nonce); err != nil { + if _, err := goio.ReadFull(reader, nonce); err != nil { return nil, core.E("sigil.ChaChaPolySigil.In", "read nonce", err) } diff --git a/sigil/crypto_sigil_test.go b/sigil/crypto_sigil_test.go index e015ba5..d90fc0a 100644 --- a/sigil/crypto_sigil_test.go +++ b/sigil/crypto_sigil_test.go @@ -3,7 +3,7 @@ package sigil import ( "bytes" "crypto/rand" - "io" + goio "io" "testing" core "dappco.re/go/core" @@ -352,12 +352,12 @@ func (f *failReader) Read([]byte) (int, error) { return 0, core.NewError("entropy source failed") } -func TestCryptoSigil_ChaChaPolySigil_RandReaderFailure_Bad(t *testing.T) { +func TestCryptoSigil_ChaChaPolySigil_RandomReaderFailure_Bad(t *testing.T) { key := make([]byte, 32) _, _ = rand.Read(key) s, _ := NewChaChaPolySigil(key) - s.randReader = &failReader{} + s.randomReader = &failReader{} _, err := s.In([]byte("data")) assert.Error(t, err) @@ -475,14 +475,14 @@ func TestCryptoSigil_Untransmute_ErrorPropagation_Bad(t *testing.T) { assert.Contains(t, err.Error(), "fail out") } -// ── GzipSigil with custom writer (edge case) ────────────────────── +// ── GzipSigil with custom output writer (edge case) ─────────────── -func TestCryptoSigil_GzipSigil_CustomWriter_Good(t *testing.T) { +func TestCryptoSigil_GzipSigil_CustomOutputWriter_Good(t *testing.T) { var buf bytes.Buffer - s := &GzipSigil{writer: &buf} + s := &GzipSigil{outputWriter: &buf} - // With custom writer, compressed data goes to buf, returned bytes will be empty - // because the internal buffer 'b' is unused when s.writer is set. + // With a custom output writer, compressed data goes to buf, returned bytes will be empty + // because the internal buffer 'b' is unused when s.outputWriter is set. _, err := s.In([]byte("test data")) require.NoError(t, err) assert.Greater(t, buf.Len(), 0) @@ -503,14 +503,14 @@ func TestCryptoSigil_DeriveKeyStream_ExactBlockSize_Good(t *testing.T) { assert.Equal(t, data, restored) } -// ── io.Reader fallback in In ─────────────────────────────────────── +// ── random reader fallback in In ─────────────────────────────────── -func TestCryptoSigil_ChaChaPolySigil_NilRandReader_Good(t *testing.T) { +func TestCryptoSigil_ChaChaPolySigil_NilRandomReader_Good(t *testing.T) { key := make([]byte, 32) _, _ = rand.Read(key) s, _ := NewChaChaPolySigil(key) - s.randReader = nil // Should fall back to crypto/rand.Reader. + s.randomReader = nil // Should fall back to crypto/rand.Reader. ciphertext, err := s.In([]byte("fallback reader")) require.NoError(t, err) @@ -528,7 +528,7 @@ type limitReader struct { func (l *limitReader) Read(p []byte) (int, error) { if l.pos >= len(l.data) { - return 0, io.EOF + return 0, goio.EOF } n := copy(p, l.data[l.pos:]) l.pos += n diff --git a/sigil/sigils.go b/sigil/sigils.go index e75d0fc..38cb994 100644 --- a/sigil/sigils.go +++ b/sigil/sigils.go @@ -10,7 +10,7 @@ import ( "crypto/sha512" "encoding/base64" "encoding/hex" - "io" + goio "io" core "dappco.re/go/core" "golang.org/x/crypto/blake2b" @@ -92,7 +92,7 @@ func (s *Base64Sigil) Out(data []byte) ([]byte, error) { // GzipSigil is a Sigil that compresses/decompresses data using gzip. // The In method compresses the data, and the Out method decompresses it. type GzipSigil struct { - writer io.Writer + outputWriter goio.Writer } // In compresses the data using gzip. @@ -101,15 +101,15 @@ func (s *GzipSigil) In(data []byte) ([]byte, error) { return nil, nil } var b bytes.Buffer - w := s.writer - if w == nil { - w = &b + outputWriter := s.outputWriter + if outputWriter == nil { + outputWriter = &b } - gz := gzip.NewWriter(w) - if _, err := gz.Write(data); err != nil { + gzipWriter := gzip.NewWriter(outputWriter) + if _, err := gzipWriter.Write(data); err != nil { return nil, core.E("sigil.GzipSigil.In", "write gzip payload", err) } - if err := gz.Close(); err != nil { + if err := gzipWriter.Close(); err != nil { return nil, core.E("sigil.GzipSigil.In", "close gzip writer", err) } return b.Bytes(), nil @@ -120,12 +120,12 @@ func (s *GzipSigil) Out(data []byte) ([]byte, error) { if data == nil { return nil, nil } - r, err := gzip.NewReader(bytes.NewReader(data)) + gzipReader, err := gzip.NewReader(bytes.NewReader(data)) if err != nil { return nil, core.E("sigil.GzipSigil.Out", "open gzip reader", err) } - defer r.Close() - out, err := io.ReadAll(r) + defer gzipReader.Close() + out, err := goio.ReadAll(gzipReader) if err != nil { return nil, core.E("sigil.GzipSigil.Out", "read gzip payload", err) } @@ -180,51 +180,51 @@ func NewHashSigil(h crypto.Hash) *HashSigil { // In hashes the data. func (s *HashSigil) In(data []byte) ([]byte, error) { - var h io.Writer + var hasher goio.Writer switch s.Hash { case crypto.MD4: - h = md4.New() + hasher = md4.New() case crypto.MD5: - h = md5.New() + hasher = md5.New() case crypto.SHA1: - h = sha1.New() + hasher = sha1.New() case crypto.SHA224: - h = sha256.New224() + hasher = sha256.New224() case crypto.SHA256: - h = sha256.New() + hasher = sha256.New() case crypto.SHA384: - h = sha512.New384() + hasher = sha512.New384() case crypto.SHA512: - h = sha512.New() + hasher = sha512.New() case crypto.RIPEMD160: - h = ripemd160.New() + hasher = ripemd160.New() case crypto.SHA3_224: - h = sha3.New224() + hasher = sha3.New224() case crypto.SHA3_256: - h = sha3.New256() + hasher = sha3.New256() case crypto.SHA3_384: - h = sha3.New384() + hasher = sha3.New384() case crypto.SHA3_512: - h = sha3.New512() + hasher = sha3.New512() case crypto.SHA512_224: - h = sha512.New512_224() + hasher = sha512.New512_224() case crypto.SHA512_256: - h = sha512.New512_256() + hasher = sha512.New512_256() case crypto.BLAKE2s_256: - h, _ = blake2s.New256(nil) + hasher, _ = blake2s.New256(nil) case crypto.BLAKE2b_256: - h, _ = blake2b.New256(nil) + hasher, _ = blake2b.New256(nil) case crypto.BLAKE2b_384: - h, _ = blake2b.New384(nil) + hasher, _ = blake2b.New384(nil) case crypto.BLAKE2b_512: - h, _ = blake2b.New512(nil) + hasher, _ = blake2b.New512(nil) default: // MD5SHA1 is not supported as a direct hash return nil, core.E("sigil.HashSigil.In", "hash algorithm not available", nil) } - h.Write(data) - return h.(interface{ Sum([]byte) []byte }).Sum(nil), nil + hasher.Write(data) + return hasher.(interface{ Sum([]byte) []byte }).Sum(nil), nil } // Out is a no-op for HashSigil. diff --git a/workspace/service_test.go b/workspace/service_test.go index 8b5f292..a8b19e3 100644 --- a/workspace/service_test.go +++ b/workspace/service_test.go @@ -109,11 +109,11 @@ func TestService_HandleIPCEvents_Good(t *testing.T) { assert.True(t, switchResult.OK) assert.Equal(t, workspaceID, s.activeWorkspaceID) - legacySwitch := s.HandleIPCEvents(core.New(), map[string]any{ + rejectedLegacySwitch := s.HandleIPCEvents(core.New(), map[string]any{ "action": "workspace.switch", "name": workspaceID, }) - assert.False(t, legacySwitch.OK) + assert.False(t, rejectedLegacySwitch.OK) assert.Equal(t, workspaceID, s.activeWorkspaceID) failedSwitch := s.HandleIPCEvents(core.New(), map[string]any{ From 41dd1110729a6e249b72cd69548fc9619d3bb5a2 Mon Sep 17 00:00:00 2001 From: Virgil Date: Mon, 30 Mar 2026 21:17:43 +0000 Subject: [PATCH 22/83] refactor(ax): make exported docs example-driven Co-Authored-By: Virgil --- s3/s3.go | 18 ++++----- sigil/crypto_sigil.go | 86 +++++++++++++------------------------------ store/store.go | 6 +-- workspace/service.go | 10 ++--- 4 files changed, 42 insertions(+), 78 deletions(-) diff --git a/s3/s3.go b/s3/s3.go index eeaa3b7..19f0a39 100644 --- a/s3/s3.go +++ b/s3/s3.go @@ -1,4 +1,8 @@ -// Package s3 provides an S3-backed implementation of the io.Medium interface. +// Package s3 provides an S3-backed io.Medium. +// +// client := awss3.NewFromConfig(aws.Config{Region: "us-east-1"}) +// medium, _ := s3.New(s3.Options{Bucket: "backups", Client: client, Prefix: "daily/"}) +// _ = medium.Write("reports/daily.txt", "done") package s3 import ( @@ -29,7 +33,7 @@ type Client interface { CopyObject(ctx context.Context, params *awss3.CopyObjectInput, optFns ...func(*awss3.Options)) (*awss3.CopyObjectOutput, error) } -// Medium is an S3-backed storage backend implementing the io.Medium interface. +// Medium is the concrete io.Medium returned by New. type Medium struct { client Client bucket string @@ -38,7 +42,7 @@ type Medium struct { var _ coreio.Medium = (*Medium)(nil) -// Options configures a Medium. +// Options configures New. type Options struct { // Bucket is the target S3 bucket name. Bucket string @@ -86,13 +90,9 @@ func normalisePrefix(prefix string) string { return clean } -// Use New to scope writes to a bucket and optional prefix. -// -// Example usage: +// New opens an S3-backed medium for one bucket and optional prefix. // -// config := aws.Config{} -// awsClient := awss3.NewFromConfig(config) -// medium, _ := s3.New(s3.Options{Bucket: "backups", Client: awsClient, Prefix: "daily/"}) +// medium, _ := s3.New(s3.Options{Bucket: "backups", Client: client, Prefix: "daily/"}) // _ = medium.Write("reports/daily.txt", "done") func New(options Options) (*Medium, error) { if options.Bucket == "" { diff --git a/sigil/crypto_sigil.go b/sigil/crypto_sigil.go index e430263..6d3fca6 100644 --- a/sigil/crypto_sigil.go +++ b/sigil/crypto_sigil.go @@ -1,15 +1,11 @@ -// This file implements the Pre-Obfuscation Layer Protocol with -// XChaCha20-Poly1305 encryption. The protocol applies a reversible transformation -// to plaintext BEFORE it reaches CPU encryption routines, providing defence-in-depth -// against side-channel attacks. +// Package sigil provides pre-obfuscation helpers for XChaCha20-Poly1305. // -// The encryption flow is: -// -// plaintext -> obfuscate(nonce) -> encrypt -> [nonce || ciphertext || tag] -// -// The decryption flow is: +// cipherSigil, _ := sigil.NewChaChaPolySigil([]byte("0123456789abcdef0123456789abcdef")) +// ciphertext, _ := cipherSigil.In([]byte("payload")) +// plaintext, _ := cipherSigil.Out(ciphertext) // -// [nonce || ciphertext || tag] -> decrypt -> deobfuscate(nonce) -> plaintext +// Use NewChaChaPolySigilWithObfuscator when you want ShuffleMaskObfuscator +// instead of the default XOR pre-obfuscation layer. package sigil import ( @@ -36,12 +32,10 @@ var ( NoKeyConfiguredError = core.E("sigil.NoKeyConfiguredError", "no encryption key configured", nil) ) -// PreObfuscator applies a reversible transformation to data before encryption. -// This ensures that raw plaintext patterns are never sent directly to CPU -// encryption routines, providing defence against side-channel attacks. +// PreObfuscator is the hook ChaChaPolySigil uses before and after encryption. // -// Implementations must be deterministic: given the same entropy, the transformation -// must be perfectly reversible: Deobfuscate(Obfuscate(x, e), e) == x +// XORObfuscator is the default. ShuffleMaskObfuscator is available when you +// want byte shuffling as well as masking. type PreObfuscator interface { // Obfuscate transforms plaintext before encryption using the provided entropy. // The entropy is typically the encryption nonce, ensuring the transformation @@ -53,15 +47,7 @@ type PreObfuscator interface { Deobfuscate(data []byte, entropy []byte) []byte } -// XORObfuscator performs XOR-based obfuscation using an entropy-derived key stream. -// -// The key stream is generated using SHA-256 in counter mode: -// -// keyStream[i*32:(i+1)*32] = SHA256(entropy || BigEndian64(i)) -// -// This provides a cryptographically uniform key stream that decorrelates -// plaintext patterns from the data seen by the encryption routine. -// XOR is symmetric, so obfuscation and deobfuscation use the same operation. +// XORObfuscator is the default pre-obfuscator returned by NewChaChaPolySigil. type XORObfuscator struct{} // Obfuscate XORs the data with a key stream derived from the entropy. @@ -114,16 +100,7 @@ func (x *XORObfuscator) deriveKeyStream(entropy []byte, length int) []byte { return stream } -// ShuffleMaskObfuscator provides stronger obfuscation through byte shuffling and masking. -// -// The obfuscation process: -// 1. Generate a mask from entropy using SHA-256 in counter mode -// 2. XOR the data with the mask -// 3. Generate a deterministic permutation using Fisher-Yates shuffle -// 4. Reorder bytes according to the permutation -// -// This provides both value transformation (XOR mask) and position transformation -// (shuffle), making pattern analysis more difficult than XOR alone. +// ShuffleMaskObfuscator adds byte shuffling on top of XOR masking. type ShuffleMaskObfuscator struct{} // Obfuscate shuffles bytes and applies a mask derived from entropy. @@ -230,28 +207,17 @@ func (s *ShuffleMaskObfuscator) deriveMask(entropy []byte, length int) []byte { return mask } -// ChaChaPolySigil is a Sigil that encrypts/decrypts data using ChaCha20-Poly1305. -// It applies pre-obfuscation before encryption to ensure raw plaintext never -// goes directly to CPU encryption routines. -// -// The output format is: -// [24-byte nonce][encrypted(obfuscated(plaintext))] -// -// Unlike demo implementations, the nonce is ONLY embedded in the ciphertext, -// not exposed separately in headers. +// ChaChaPolySigil is returned by NewChaChaPolySigil and +// NewChaChaPolySigilWithObfuscator. type ChaChaPolySigil struct { Key []byte Obfuscator PreObfuscator randomReader goio.Reader // for testing injection } -// Use NewChaChaPolySigil with a 32-byte key to encrypt payloads. -// The key must be exactly 32 bytes. +// NewChaChaPolySigil returns a ChaChaPolySigil backed by a 32-byte key. // -// Example usage: -// -// key := []byte("0123456789abcdef0123456789abcdef") -// cipherSigil, _ := sigil.NewChaChaPolySigil(key) +// cipherSigil, _ := sigil.NewChaChaPolySigil([]byte("0123456789abcdef0123456789abcdef")) // ciphertext, _ := cipherSigil.In([]byte("payload")) // plaintext, _ := cipherSigil.Out(ciphertext) func NewChaChaPolySigil(key []byte) (*ChaChaPolySigil, error) { @@ -269,12 +235,12 @@ func NewChaChaPolySigil(key []byte) (*ChaChaPolySigil, error) { }, nil } -// Use NewChaChaPolySigilWithObfuscator when you want a custom pre-obfuscator. -// -// Example usage: +// NewChaChaPolySigilWithObfuscator returns a ChaChaPolySigil with a custom pre-obfuscator. // -// key := []byte("0123456789abcdef0123456789abcdef") -// cipherSigil, _ := sigil.NewChaChaPolySigilWithObfuscator(key, &sigil.ShuffleMaskObfuscator{}) +// cipherSigil, _ := sigil.NewChaChaPolySigilWithObfuscator( +// []byte("0123456789abcdef0123456789abcdef"), +// &sigil.ShuffleMaskObfuscator{}, +// ) // ciphertext, _ := cipherSigil.In([]byte("payload")) // plaintext, _ := cipherSigil.Out(ciphertext) func NewChaChaPolySigilWithObfuscator(key []byte, obfuscator PreObfuscator) (*ChaChaPolySigil, error) { @@ -288,8 +254,7 @@ func NewChaChaPolySigilWithObfuscator(key []byte, obfuscator PreObfuscator) (*Ch return cipherSigil, nil } -// In encrypts the data with pre-obfuscation. -// The flow is: plaintext -> obfuscate -> encrypt +// In encrypts plaintext with the configured pre-obfuscator. func (s *ChaChaPolySigil) In(data []byte) ([]byte, error) { if s.Key == nil { return nil, NoKeyConfiguredError @@ -327,8 +292,7 @@ func (s *ChaChaPolySigil) In(data []byte) ([]byte, error) { return ciphertext, nil } -// Out decrypts the data and reverses obfuscation. -// The flow is: decrypt -> deobfuscate -> plaintext +// Out decrypts ciphertext and reverses the pre-obfuscation step. func (s *ChaChaPolySigil) Out(data []byte) ([]byte, error) { if s.Key == nil { return nil, NoKeyConfiguredError @@ -370,9 +334,9 @@ func (s *ChaChaPolySigil) Out(data []byte) ([]byte, error) { return plaintext, nil } -// GetNonceFromCiphertext extracts the nonce from encrypted output. -// This is provided for debugging/logging purposes only. -// The nonce should NOT be stored separately in headers. +// GetNonceFromCiphertext returns the nonce embedded in ciphertext. +// +// nonce, _ := sigil.GetNonceFromCiphertext(ciphertext) func GetNonceFromCiphertext(ciphertext []byte) ([]byte, error) { nonceSize := chacha20poly1305.NonceSizeX if len(ciphertext) < nonceSize { diff --git a/store/store.go b/store/store.go index c430951..9b5a265 100644 --- a/store/store.go +++ b/store/store.go @@ -13,18 +13,18 @@ import ( // NotFoundError is returned when a key does not exist in the store. var NotFoundError = errors.New("key not found") -// Store is a group-namespaced key-value store backed by SQLite. +// Store is returned by New for grouped key/value access. type Store struct { database *sql.DB } -// Options configures a Store. +// Options configures New. type Options struct { // Path is the SQLite database path. Use ":memory:" for tests. Path string } -// New opens a SQLite-backed key-value store. +// New opens a SQLite-backed key/value store. // // keyValueStore, _ := store.New(store.Options{Path: ":memory:"}) // _ = keyValueStore.Set("app", "theme", "midnight") diff --git a/workspace/service.go b/workspace/service.go index b936910..6b93ae4 100644 --- a/workspace/service.go +++ b/workspace/service.go @@ -11,7 +11,7 @@ import ( "dappco.re/go/core/io" ) -// Workspace provides management for encrypted user workspaces. +// Workspace is the interface returned by New. type Workspace interface { CreateWorkspace(identifier, password string) (string, error) SwitchWorkspace(workspaceID string) error @@ -19,12 +19,12 @@ type Workspace interface { WorkspaceFileSet(workspaceFilePath, content string) error } -// CryptProvider is the interface for PGP key generation. +// CryptProvider generates the encrypted private key stored with each workspace. type CryptProvider interface { CreateKeyPair(name, passphrase string) (string, error) } -// Options configures the workspace service. +// Options configures New. type Options struct { // Core is the Core runtime used by the service. Core *core.Core @@ -32,7 +32,7 @@ type Options struct { Crypt CryptProvider } -// Service implements the Workspace interface. +// Service is the concrete Workspace implementation. type Service struct { core *core.Core crypt CryptProvider @@ -44,7 +44,7 @@ type Service struct { var _ Workspace = (*Service)(nil) -// New creates an encrypted workspace service from a Core runtime. +// New creates an encrypted workspace service. // // service, _ := workspace.New(workspace.Options{Core: core.New(), Crypt: cryptProvider}) // workspaceID, _ := service.CreateWorkspace("alice", "pass123") From 16d968b551483ef7dc899237b5647c5beaeb0221 Mon Sep 17 00:00:00 2001 From: Virgil Date: Mon, 30 Mar 2026 21:23:35 +0000 Subject: [PATCH 23/83] refactor(ax): make public docs example-driven Co-Authored-By: Virgil --- datanode/client.go | 11 ++++++----- io.go | 33 ++++++++++++++++++--------------- local/client.go | 7 +++---- node/node.go | 16 +++++++++------- s3/s3.go | 37 ++++++++++++++++++------------------- s3/s3_test.go | 2 +- sigil/crypto_sigil.go | 4 ++-- sqlite/sqlite.go | 25 +++++++++++++------------ store/medium.go | 20 ++++++++++---------- store/store.go | 12 ++++++------ workspace/service.go | 14 +++++++------- 11 files changed, 93 insertions(+), 88 deletions(-) diff --git a/datanode/client.go b/datanode/client.go index e4e3994..b2c00cd 100644 --- a/datanode/client.go +++ b/datanode/client.go @@ -36,7 +36,10 @@ var ( } ) -// Medium is an in-memory storage backend backed by a Borg DataNode. +// Example: medium := datanode.New() +// _ = medium.Write("jobs/run.log", "started") +// snapshot, _ := medium.Snapshot() +// // All paths are relative (no leading slash). Thread-safe via RWMutex. type Medium struct { dataNode *borgdatanode.DataNode @@ -44,10 +47,8 @@ type Medium struct { mu sync.RWMutex } -// New creates an in-memory Medium that snapshots to tar. -// -// medium := datanode.New() -// _ = medium.Write("jobs/run.log", "started") +// Example: medium := datanode.New() +// _ = medium.Write("jobs/run.log", "started") func New() *Medium { return &Medium{ dataNode: borgdatanode.New(), diff --git a/io.go b/io.go index 83b85cb..1ab88a7 100644 --- a/io.go +++ b/io.go @@ -10,16 +10,18 @@ import ( "dappco.re/go/core/io/local" ) -// Medium defines the standard interface for a storage backend. -// This allows for different implementations (e.g., local disk, S3, SFTP) -// to be used interchangeably. +// Medium is the storage boundary used across CoreGO. +// +// medium, _ := io.NewSandboxed("/srv/app") +// _ = medium.Write("config/app.yaml", "port: 8080") +// backup, _ := io.NewSandboxed("/srv/backup") +// _ = io.Copy(medium, "data/report.json", backup, "daily/report.json") type Medium interface { Read(path string) (string, error) Write(path, content string) error - // WriteMode saves content with explicit file permissions. - // Use 0600 for sensitive files (keys, secrets, encrypted output). + // Example: _ = medium.WriteMode("keys/private.key", key, 0600) WriteMode(path, content string, mode fs.FileMode) error EnsureDir(path string) error @@ -46,18 +48,16 @@ type Medium interface { Append(path string) (goio.WriteCloser, error) - // ReadStream returns a reader for the file content. - // Use this for large files to avoid loading the entire content into memory. + // Example: reader, _ := medium.ReadStream("logs/app.log") ReadStream(path string) (goio.ReadCloser, error) - // WriteStream returns a writer for the file content. - // Use this for large files to avoid loading the entire content into memory. + // Example: writer, _ := medium.WriteStream("logs/app.log") WriteStream(path string) (goio.WriteCloser, error) - // Exists checks if a path exists (file or directory). + // Example: ok := medium.Exists("config/app.yaml") Exists(path string) bool - // IsDir checks if a path exists and is a directory. + // Example: ok := medium.IsDir("config") IsDir(path string) bool } @@ -98,9 +98,9 @@ func (de DirEntry) Type() fs.FileMode { return de.mode.Type() } func (de DirEntry) Info() (fs.FileInfo, error) { return de.info, nil } -// Local is a pre-initialised medium for the local filesystem. -// It uses "/" as root, providing unsandboxed access to the filesystem. -// For sandboxed access, use NewSandboxed with a specific root path. +// Local is the unsandboxed filesystem medium rooted at "/". +// +// io.Local.Read("/etc/hostname") var Local Medium var _ Medium = (*local.Medium)(nil) @@ -171,7 +171,10 @@ func Copy(source Medium, sourcePath string, destination Medium, destinationPath // --- MockMedium --- -// MockMedium is an in-memory implementation of Medium for testing. +// MockMedium is an in-memory Medium for tests. +// +// medium := io.NewMockMedium() +// _ = medium.Write("config/app.yaml", "port: 8080") type MockMedium struct { Files map[string]string Dirs map[string]bool diff --git a/local/client.go b/local/client.go index 149c280..29ba6ec 100644 --- a/local/client.go +++ b/local/client.go @@ -13,16 +13,15 @@ import ( core "dappco.re/go/core" ) -// Medium is a local filesystem storage backend. +// Medium is the local filesystem backend returned by New. type Medium struct { filesystemRoot string } var unrestrictedFileSystem = (&core.Fs{}).NewUnrestricted() -// New creates a filesystem rooted at root. -// -// Pass "/" for full filesystem access, or a project path to sandbox. +// local.New("/") exposes the full filesystem. +// local.New("/srv/app") confines access to a project root. // // medium, _ := local.New("/srv/app") // _ = medium.Write("config/app.yaml", "port: 8080") diff --git a/node/node.go b/node/node.go index bf8d9e8..71fd217 100644 --- a/node/node.go +++ b/node/node.go @@ -23,7 +23,11 @@ import ( coreio "dappco.re/go/core/io" ) -// Node is an in-memory filesystem that satisfies coreio.Medium and fs.FS. +// Example: nodeTree := node.New() +// nodeTree.AddData("config/app.yaml", []byte("port: 8080")) +// snapshot, _ := nodeTree.ToTar() +// restored, _ := node.FromTar(snapshot) +// // Directories are implicit: they exist whenever a file path contains a "/". type Node struct { files map[string]*dataFile @@ -33,10 +37,8 @@ type Node struct { var _ coreio.Medium = (*Node)(nil) var _ fs.ReadFileFS = (*Node)(nil) -// Use New when you need an in-memory filesystem that can be snapshotted. -// -// nodeTree := New() -// nodeTree.AddData("config/app.yaml", []byte("port: 8080")) +// Example: nodeTree := node.New() +// nodeTree.AddData("config/app.yaml", []byte("port: 8080")) func New() *Node { return &Node{files: make(map[string]*dataFile)} } @@ -135,7 +137,7 @@ func (n *Node) WalkNode(root string, fn fs.WalkDirFunc) error { return fs.WalkDir(n, root, fn) } -// WalkOptions configures WalkWithOptions. +// Example: options := node.WalkOptions{MaxDepth: 1, SkipErrors: true} type WalkOptions struct { // MaxDepth limits how many directory levels to descend. 0 means unlimited. MaxDepth int @@ -380,7 +382,7 @@ func (n *Node) FileSet(filePath, content string) error { return n.Write(filePath, content) } -// EnsureDir is a no-op because directories are implicit in Node. +// Example: _ = nodeTree.EnsureDir("config") func (n *Node) EnsureDir(_ string) error { return nil } diff --git a/s3/s3.go b/s3/s3.go index 19f0a39..4884522 100644 --- a/s3/s3.go +++ b/s3/s3.go @@ -33,7 +33,10 @@ type Client interface { CopyObject(ctx context.Context, params *awss3.CopyObjectInput, optFns ...func(*awss3.Options)) (*awss3.CopyObjectOutput, error) } -// Medium is the concrete io.Medium returned by New. +// Medium is the S3-backed io.Medium returned by New. +// +// medium, _ := s3.New(s3.Options{Bucket: "backups", Client: client, Prefix: "daily/"}) +// _ = medium.Write("reports/daily.txt", "done") type Medium struct { client Client bucket string @@ -42,7 +45,7 @@ type Medium struct { var _ coreio.Medium = (*Medium)(nil) -// Options configures New. +// Example: medium, _ := s3.New(s3.Options{Bucket: "backups", Client: client, Prefix: "daily/"}) type Options struct { // Bucket is the target S3 bucket name. Bucket string @@ -90,10 +93,8 @@ func normalisePrefix(prefix string) string { return clean } -// New opens an S3-backed medium for one bucket and optional prefix. -// -// medium, _ := s3.New(s3.Options{Bucket: "backups", Client: client, Prefix: "daily/"}) -// _ = medium.Write("reports/daily.txt", "done") +// Example: medium, _ := s3.New(s3.Options{Bucket: "backups", Client: client, Prefix: "daily/"}) +// _ = medium.Write("reports/daily.txt", "done") func New(options Options) (*Medium, error) { if options.Bucket == "" { return nil, core.E("s3.New", "bucket name is required", nil) @@ -167,17 +168,17 @@ func (m *Medium) Write(filePath, content string) error { return nil } -// WriteMode ignores the requested mode because S3 objects do not store POSIX permissions. +// Example: _ = medium.WriteMode("keys/private.key", key, 0600) func (m *Medium) WriteMode(filePath, content string, _ fs.FileMode) error { return m.Write(filePath, content) } -// EnsureDir is a no-op for S3 (S3 has no real directories). +// Example: _ = medium.EnsureDir("reports/2026") func (m *Medium) EnsureDir(_ string) error { return nil } -// IsFile checks if a path exists and is a regular file (not a "directory" prefix). +// Example: ok := medium.IsFile("reports/daily.txt") func (m *Medium) IsFile(filePath string) bool { key := m.objectKey(filePath) if key == "" { @@ -218,7 +219,7 @@ func (m *Medium) Delete(filePath string) error { return nil } -// DeleteAll removes all objects under the given prefix. +// Example: _ = medium.DeleteAll("reports/2026") func (m *Medium) DeleteAll(filePath string) error { key := m.objectKey(filePath) if key == "" { @@ -283,7 +284,7 @@ func (m *Medium) DeleteAll(filePath string) error { return nil } -// Rename moves an object by copying then deleting the original. +// Example: _ = medium.Rename("drafts/todo.txt", "archive/todo.txt") func (m *Medium) Rename(oldPath, newPath string) error { oldKey := m.objectKey(oldPath) newKey := m.objectKey(newPath) @@ -313,7 +314,7 @@ func (m *Medium) Rename(oldPath, newPath string) error { return nil } -// List returns directory entries for the given path using ListObjectsV2 with delimiter. +// Example: entries, _ := medium.List("reports") func (m *Medium) List(filePath string) ([]fs.DirEntry, error) { prefix := m.objectKey(filePath) if prefix != "" && !core.HasSuffix(prefix, "/") { @@ -386,7 +387,7 @@ func (m *Medium) List(filePath string) ([]fs.DirEntry, error) { return entries, nil } -// Stat returns file information for the given path using HeadObject. +// Example: info, _ := medium.Stat("reports/daily.txt") func (m *Medium) Stat(filePath string) (fs.FileInfo, error) { key := m.objectKey(filePath) if key == "" { @@ -456,8 +457,7 @@ func (m *Medium) Open(filePath string) (fs.File, error) { }, nil } -// Create creates or truncates the named file. Returns a writer that -// uploads the content on Close. +// Example: writer, _ := medium.Create("reports/daily.txt") func (m *Medium) Create(filePath string) (goio.WriteCloser, error) { key := m.objectKey(filePath) if key == "" { @@ -469,8 +469,7 @@ func (m *Medium) Create(filePath string) (goio.WriteCloser, error) { }, nil } -// Append opens the named file for appending. It downloads the existing -// content (if any) and re-uploads the combined content on Close. +// Example: writer, _ := medium.Append("reports/daily.txt") func (m *Medium) Append(filePath string) (goio.WriteCloser, error) { key := m.objectKey(filePath) if key == "" { @@ -514,7 +513,7 @@ func (m *Medium) WriteStream(filePath string) (goio.WriteCloser, error) { return m.Create(filePath) } -// Exists checks if a path exists (file or directory prefix). +// Example: ok := medium.Exists("reports/daily.txt") func (m *Medium) Exists(filePath string) bool { key := m.objectKey(filePath) if key == "" { @@ -546,7 +545,7 @@ func (m *Medium) Exists(filePath string) bool { return len(listOut.Contents) > 0 || len(listOut.CommonPrefixes) > 0 } -// IsDir checks if a path exists and is a directory (has objects under it as a prefix). +// Example: ok := medium.IsDir("reports") func (m *Medium) IsDir(filePath string) bool { key := m.objectKey(filePath) if key == "" { diff --git a/s3/s3_test.go b/s3/s3_test.go index c8c9228..c4fad8e 100644 --- a/s3/s3_test.go +++ b/s3/s3_test.go @@ -295,7 +295,7 @@ func TestS3_ReadWrite_Prefix_Good(t *testing.T) { func TestS3_EnsureDir_Good(t *testing.T) { m, _ := newTestMedium(t) - // EnsureDir is a no-op for S3 + // Example: err := m.EnsureDir("any/path") err := m.EnsureDir("any/path") assert.NoError(t, err) } diff --git a/sigil/crypto_sigil.go b/sigil/crypto_sigil.go index 6d3fca6..8903868 100644 --- a/sigil/crypto_sigil.go +++ b/sigil/crypto_sigil.go @@ -207,8 +207,8 @@ func (s *ShuffleMaskObfuscator) deriveMask(entropy []byte, length int) []byte { return mask } -// ChaChaPolySigil is returned by NewChaChaPolySigil and -// NewChaChaPolySigilWithObfuscator. +// Example: cipherSigil, _ := sigil.NewChaChaPolySigil(key) +// Example: cipherSigil, _ := sigil.NewChaChaPolySigilWithObfuscator(key, &sigil.ShuffleMaskObfuscator{}) type ChaChaPolySigil struct { Key []byte Obfuscator PreObfuscator diff --git a/sqlite/sqlite.go b/sqlite/sqlite.go index 9e3c72e..9fd936d 100644 --- a/sqlite/sqlite.go +++ b/sqlite/sqlite.go @@ -18,7 +18,10 @@ import ( _ "modernc.org/sqlite" // Pure Go SQLite driver ) -// Medium is a SQLite-backed storage backend implementing the io.Medium interface. +// Medium stores filesystem-shaped content in SQLite. +// +// medium, _ := sqlite.New(sqlite.Options{Path: ":memory:"}) +// _ = medium.Write("config/app.yaml", "port: 8080") type Medium struct { database *sql.DB table string @@ -26,7 +29,7 @@ type Medium struct { var _ coreio.Medium = (*Medium)(nil) -// Options configures a SQLite-backed Medium. +// Example: medium, _ := sqlite.New(sqlite.Options{Path: ":memory:", Table: "files"}) type Options struct { // Path is the SQLite database path. Use ":memory:" for tests. Path string @@ -41,10 +44,8 @@ func normaliseTableName(table string) string { return table } -// New opens a SQLite-backed Medium at the provided database path. -// -// medium, _ := sqlite.New(sqlite.Options{Path: ":memory:", Table: "files"}) -// _ = medium.Write("config/app.yaml", "port: 8080") +// Example: medium, _ := sqlite.New(sqlite.Options{Path: ":memory:", Table: "files"}) +// _ = medium.Write("config/app.yaml", "port: 8080") func New(options Options) (*Medium, error) { if options.Path == "" { return nil, core.E("sqlite.New", "database path is required", nil) @@ -125,7 +126,7 @@ func (m *Medium) Write(filePath, content string) error { return m.WriteMode(filePath, content, 0644) } -// WriteMode saves the given content with explicit permissions. +// Example: _ = medium.WriteMode("keys/private.key", key, 0600) func (m *Medium) WriteMode(filePath, content string, mode fs.FileMode) error { key := normaliseEntryPath(filePath) if key == "" { @@ -143,7 +144,7 @@ func (m *Medium) WriteMode(filePath, content string, mode fs.FileMode) error { return nil } -// EnsureDir makes sure a directory exists, creating it if necessary. +// Example: _ = medium.EnsureDir("config") func (m *Medium) EnsureDir(filePath string) error { key := normaliseEntryPath(filePath) if key == "" { @@ -186,7 +187,7 @@ func (m *Medium) FileSet(filePath, content string) error { return m.Write(filePath, content) } -// Delete removes a file or empty directory. +// Example: _ = medium.Delete("config/app.yaml") func (m *Medium) Delete(filePath string) error { key := normaliseEntryPath(filePath) if key == "" { @@ -231,7 +232,7 @@ func (m *Medium) Delete(filePath string) error { return nil } -// DeleteAll removes a file or directory and all its contents recursively. +// Example: _ = medium.DeleteAll("config") func (m *Medium) DeleteAll(filePath string) error { key := normaliseEntryPath(filePath) if key == "" { @@ -255,7 +256,7 @@ func (m *Medium) DeleteAll(filePath string) error { return nil } -// Rename moves a file or directory from oldPath to newPath. +// Example: _ = medium.Rename("drafts/todo.txt", "archive/todo.txt") func (m *Medium) Rename(oldPath, newPath string) error { oldKey := normaliseEntryPath(oldPath) newKey := normaliseEntryPath(newPath) @@ -353,7 +354,7 @@ func (m *Medium) Rename(oldPath, newPath string) error { return tx.Commit() } -// List returns the directory entries for the given path. +// Example: entries, _ := medium.List("config") func (m *Medium) List(filePath string) ([]fs.DirEntry, error) { prefix := normaliseEntryPath(filePath) if prefix != "" { diff --git a/store/medium.go b/store/medium.go index 54db6bf..3a548d3 100644 --- a/store/medium.go +++ b/store/medium.go @@ -10,7 +10,10 @@ import ( coreio "dappco.re/go/core/io" ) -// Medium wraps a Store to satisfy the io.Medium interface. +// Example: medium, _ := store.NewMedium(store.Options{Path: "config.db"}) +// _ = medium.Write("app/theme", "midnight") +// entries, _ := medium.List("app") +// // Paths are mapped as group/key - the first segment is the group, // the rest is the key. List("") returns groups as directories, // List("group") returns keys as files. @@ -20,10 +23,8 @@ type Medium struct { var _ coreio.Medium = (*Medium)(nil) -// NewMedium exposes a Store as an io.Medium. -// -// medium, _ := store.NewMedium(store.Options{Path: "config.db"}) -// _ = medium.Write("app/theme", "midnight") +// Example: medium, _ := store.NewMedium(store.Options{Path: "config.db"}) +// _ = medium.Write("app/theme", "midnight") func NewMedium(options Options) (*Medium, error) { store, err := New(options) if err != nil { @@ -77,12 +78,12 @@ func (m *Medium) Write(entryPath, content string) error { return m.store.Set(group, key, content) } -// WriteMode ignores the requested mode because key-value entries do not store POSIX permissions. +// Example: _ = medium.WriteMode("app/theme", "midnight", 0600) func (m *Medium) WriteMode(entryPath, content string, _ fs.FileMode) error { return m.Write(entryPath, content) } -// EnsureDir is a no-op — groups are created implicitly on Set. +// Example: _ = medium.EnsureDir("app") func (m *Medium) EnsureDir(_ string) error { return nil } @@ -149,8 +150,7 @@ func (m *Medium) Rename(oldPath, newPath string) error { return m.store.Delete(oldGroup, oldKey) } -// List returns directory entries. Empty path returns groups. -// A group path returns keys in that group. +// Example: entries, _ := medium.List("app") func (m *Medium) List(entryPath string) ([]fs.DirEntry, error) { group, key := splitGroupKeyPath(entryPath) @@ -187,7 +187,7 @@ func (m *Medium) List(entryPath string) ([]fs.DirEntry, error) { return entries, nil } -// Stat returns file info for a group (dir) or key (file). +// Example: info, _ := medium.Stat("app/theme") func (m *Medium) Stat(entryPath string) (fs.FileInfo, error) { group, key := splitGroupKeyPath(entryPath) if group == "" { diff --git a/store/store.go b/store/store.go index 9b5a265..1929093 100644 --- a/store/store.go +++ b/store/store.go @@ -13,21 +13,21 @@ import ( // NotFoundError is returned when a key does not exist in the store. var NotFoundError = errors.New("key not found") -// Store is returned by New for grouped key/value access. +// Store is the grouped key/value database returned by New. +// +// keyValueStore, _ := store.New(store.Options{Path: ":memory:"}) type Store struct { database *sql.DB } -// Options configures New. +// Example: keyValueStore, _ := store.New(store.Options{Path: ":memory:"}) type Options struct { // Path is the SQLite database path. Use ":memory:" for tests. Path string } -// New opens a SQLite-backed key/value store. -// -// keyValueStore, _ := store.New(store.Options{Path: ":memory:"}) -// _ = keyValueStore.Set("app", "theme", "midnight") +// Example: keyValueStore, _ := store.New(store.Options{Path: ":memory:"}) +// _ = keyValueStore.Set("app", "theme", "midnight") func New(options Options) (*Store, error) { if options.Path == "" { return nil, core.E("store.New", "database path is required", fs.ErrInvalid) diff --git a/workspace/service.go b/workspace/service.go index 6b93ae4..c2bfc88 100644 --- a/workspace/service.go +++ b/workspace/service.go @@ -11,7 +11,9 @@ import ( "dappco.re/go/core/io" ) -// Workspace is the interface returned by New. +// Workspace is the workspace service interface returned by New. +// +// service, _ := workspace.New(workspace.Options{Core: core.New(), Crypt: cryptProvider}) type Workspace interface { CreateWorkspace(identifier, password string) (string, error) SwitchWorkspace(workspaceID string) error @@ -24,7 +26,7 @@ type CryptProvider interface { CreateKeyPair(name, passphrase string) (string, error) } -// Options configures New. +// Example: service, _ := workspace.New(workspace.Options{Core: core.New(), Crypt: cryptProvider}) type Options struct { // Core is the Core runtime used by the service. Core *core.Core @@ -32,7 +34,7 @@ type Options struct { Crypt CryptProvider } -// Service is the concrete Workspace implementation. +// Service is the Workspace implementation returned by New. type Service struct { core *core.Core crypt CryptProvider @@ -44,10 +46,8 @@ type Service struct { var _ Workspace = (*Service)(nil) -// New creates an encrypted workspace service. -// -// service, _ := workspace.New(workspace.Options{Core: core.New(), Crypt: cryptProvider}) -// workspaceID, _ := service.CreateWorkspace("alice", "pass123") +// Example: service, _ := workspace.New(workspace.Options{Core: core.New(), Crypt: cryptProvider}) +// workspaceID, _ := service.CreateWorkspace("alice", "pass123") func New(options Options) (*Service, error) { home := resolveWorkspaceHomeDirectory() if home == "" { From a8eaaa15817ed53a56671896754eef12cf6148fa Mon Sep 17 00:00:00 2001 From: Virgil Date: Mon, 30 Mar 2026 21:29:35 +0000 Subject: [PATCH 24/83] refactor(ax): tighten AX-facing docs --- datanode/client.go | 21 ++++----------------- doc.go | 6 +----- io.go | 40 +++++++++++++--------------------------- local/client.go | 14 ++++++-------- node/node.go | 13 +++---------- s3/s3.go | 12 +++++------- sigil/crypto_sigil.go | 38 ++++++++++++++------------------------ sigil/sigil.go | 42 ++++++------------------------------------ sqlite/sqlite.go | 9 +++------ store/doc.go | 5 +---- store/medium.go | 5 +---- store/store.go | 18 ++++++------------ workspace/doc.go | 5 +---- workspace/service.go | 8 ++------ 14 files changed, 66 insertions(+), 170 deletions(-) diff --git a/datanode/client.go b/datanode/client.go index b2c00cd..f94cccf 100644 --- a/datanode/client.go +++ b/datanode/client.go @@ -1,14 +1,9 @@ -// Package datanode provides an in-memory io.Medium backed by Borg's DataNode. +// Package datanode keeps io.Medium data in Borg's DataNode. // // medium := datanode.New() // _ = medium.Write("jobs/run.log", "started") // snapshot, _ := medium.Snapshot() // restored, _ := datanode.FromTar(snapshot) -// -// DataNode is an in-memory fs.FS that serialises to tar. Wrapping it as a -// Medium lets any code that works with io.Medium transparently operate on an -// in-memory filesystem that can be snapshotted, shipped as a crash report, or -// wrapped in a TIM container for runc execution. package datanode import ( @@ -39,16 +34,12 @@ var ( // Example: medium := datanode.New() // _ = medium.Write("jobs/run.log", "started") // snapshot, _ := medium.Snapshot() -// -// All paths are relative (no leading slash). Thread-safe via RWMutex. type Medium struct { dataNode *borgdatanode.DataNode directorySet map[string]bool // explicit directories that exist without file contents mu sync.RWMutex } -// Example: medium := datanode.New() -// _ = medium.Write("jobs/run.log", "started") func New() *Medium { return &Medium{ dataNode: borgdatanode.New(), @@ -56,11 +47,9 @@ func New() *Medium { } } -// FromTar restores a Medium from tar bytes. -// -// sourceMedium := datanode.New() -// snapshot, _ := sourceMedium.Snapshot() -// restored, _ := datanode.FromTar(snapshot) +// Example: sourceMedium := datanode.New() +// snapshot, _ := sourceMedium.Snapshot() +// restored, _ := datanode.FromTar(snapshot) func FromTar(data []byte) (*Medium, error) { dataNode, err := borgdatanode.FromTar(data) if err != nil { @@ -73,7 +62,6 @@ func FromTar(data []byte) (*Medium, error) { } // Example: snapshot, _ := medium.Snapshot() -// Use this for crash reports, workspace packaging, or TIM creation. func (m *Medium) Snapshot() ([]byte, error) { m.mu.RLock() defer m.mu.RUnlock() @@ -98,7 +86,6 @@ func (m *Medium) Restore(data []byte) error { } // Example: dataNode := medium.DataNode() -// Use this to wrap the filesystem in a TIM container. func (m *Medium) DataNode() *borgdatanode.DataNode { m.mu.RLock() defer m.mu.RUnlock() diff --git a/doc.go b/doc.go index 3b300c1..14eb1cb 100644 --- a/doc.go +++ b/doc.go @@ -1,11 +1,7 @@ -// Package io defines the storage boundary used across CoreGO. +// Package io gives CoreGO a single storage surface. // // medium, _ := io.NewSandboxed("/srv/app") // _ = medium.Write("config/app.yaml", "port: 8080") // backup, _ := io.NewSandboxed("/srv/backup") // _ = io.Copy(medium, "data/report.json", backup, "daily/report.json") -// -// Callers work against Medium so the same code can read and write state from -// sandboxed local paths, in-memory nodes, SQLite, S3, or other backends -// without changing application logic. package io diff --git a/io.go b/io.go index 1ab88a7..57362a1 100644 --- a/io.go +++ b/io.go @@ -10,12 +10,10 @@ import ( "dappco.re/go/core/io/local" ) -// Medium is the storage boundary used across CoreGO. -// -// medium, _ := io.NewSandboxed("/srv/app") -// _ = medium.Write("config/app.yaml", "port: 8080") -// backup, _ := io.NewSandboxed("/srv/backup") -// _ = io.Copy(medium, "data/report.json", backup, "daily/report.json") +// Example: medium, _ := io.NewSandboxed("/srv/app") +// _ = medium.Write("config/app.yaml", "port: 8080") +// backup, _ := io.NewSandboxed("/srv/backup") +// _ = io.Copy(medium, "data/report.json", backup, "daily/report.json") type Medium interface { Read(path string) (string, error) @@ -61,7 +59,7 @@ type Medium interface { IsDir(path string) bool } -// FileInfo provides a simple implementation of fs.FileInfo for mock testing. +// FileInfo is a test helper that satisfies fs.FileInfo. type FileInfo struct { name string size int64 @@ -82,7 +80,7 @@ func (fi FileInfo) IsDir() bool { return fi.isDir } func (fi FileInfo) Sys() any { return nil } -// DirEntry provides a simple implementation of fs.DirEntry for mock testing. +// DirEntry is a test helper that satisfies fs.DirEntry. type DirEntry struct { name string isDir bool @@ -98,9 +96,7 @@ func (de DirEntry) Type() fs.FileMode { return de.mode.Type() } func (de DirEntry) Info() (fs.FileInfo, error) { return de.info, nil } -// Local is the unsandboxed filesystem medium rooted at "/". -// -// io.Local.Read("/etc/hostname") +// Example: io.Local.Read("/etc/hostname") var Local Medium var _ Medium = (*local.Medium)(nil) @@ -113,14 +109,8 @@ func init() { } } -// Use NewSandboxed to confine file operations to a root directory. -// All file operations are restricted to paths within the root, and the root -// directory will be created if it does not exist. -// -// Example usage: -// -// medium, _ := io.NewSandboxed("/srv/app") -// _ = medium.Write("config/app.yaml", "port: 8080") +// Example: medium, _ := io.NewSandboxed("/srv/app") +// _ = medium.Write("config/app.yaml", "port: 8080") func NewSandboxed(root string) (Medium, error) { return local.New(root) } @@ -171,10 +161,8 @@ func Copy(source Medium, sourcePath string, destination Medium, destinationPath // --- MockMedium --- -// MockMedium is an in-memory Medium for tests. -// -// medium := io.NewMockMedium() -// _ = medium.Write("config/app.yaml", "port: 8080") +// Example: medium := io.NewMockMedium() +// _ = medium.Write("config/app.yaml", "port: 8080") type MockMedium struct { Files map[string]string Dirs map[string]bool @@ -183,10 +171,8 @@ type MockMedium struct { var _ Medium = (*MockMedium)(nil) -// Use NewMockMedium when tests need an in-memory Medium. -// -// medium := io.NewMockMedium() -// _ = medium.Write("config/app.yaml", "port: 8080") +// Example: medium := io.NewMockMedium() +// _ = medium.Write("config/app.yaml", "port: 8080") func NewMockMedium() *MockMedium { return &MockMedium{ Files: make(map[string]string), diff --git a/local/client.go b/local/client.go index 29ba6ec..81c7f9e 100644 --- a/local/client.go +++ b/local/client.go @@ -1,4 +1,4 @@ -// Package local provides the local filesystem implementation of io.Medium. +// Package local binds io.Medium to the local filesystem. // // medium, _ := local.New("/srv/app") // _ = medium.Write("config/app.yaml", "port: 8080") @@ -13,18 +13,16 @@ import ( core "dappco.re/go/core" ) -// Medium is the local filesystem backend returned by New. +// Example: medium, _ := local.New("/srv/app") +// _ = medium.Write("config/app.yaml", "port: 8080") type Medium struct { filesystemRoot string } var unrestrictedFileSystem = (&core.Fs{}).NewUnrestricted() -// local.New("/") exposes the full filesystem. -// local.New("/srv/app") confines access to a project root. -// -// medium, _ := local.New("/srv/app") -// _ = medium.Write("config/app.yaml", "port: 8080") +// Example: medium, _ := local.New("/srv/app") +// _ = medium.Write("config/app.yaml", "port: 8080") func New(root string) (*Medium, error) { absoluteRoot := absolutePath(root) // Resolve symlinks so sandbox checks compare like-for-like. @@ -179,7 +177,7 @@ func logSandboxEscape(root, path, attempted string) { core.Security("sandbox escape detected", "root", root, "path", path, "attempted", attempted, "user", username) } -// sandboxedPath sanitises and returns the full filesystem path. +// sandboxedPath resolves a path inside the filesystem root. // Absolute paths are sandboxed under root (unless root is "/"). func (m *Medium) sandboxedPath(path string) string { if path == "" { diff --git a/node/node.go b/node/node.go index 71fd217..36f491a 100644 --- a/node/node.go +++ b/node/node.go @@ -1,12 +1,9 @@ -// Package node provides an in-memory filesystem implementation of io.Medium. +// Package node keeps io.Medium data in memory. // // nodeTree := node.New() // nodeTree.AddData("config/app.yaml", []byte("port: 8080")) // snapshot, _ := nodeTree.ToTar() // restored, _ := node.FromTar(snapshot) -// -// It stores files in memory with implicit directory structure and supports -// tar serialisation. package node import ( @@ -27,18 +24,14 @@ import ( // nodeTree.AddData("config/app.yaml", []byte("port: 8080")) // snapshot, _ := nodeTree.ToTar() // restored, _ := node.FromTar(snapshot) -// -// Directories are implicit: they exist whenever a file path contains a "/". type Node struct { files map[string]*dataFile } -// compile-time interface checks +// Compile-time interface checks. var _ coreio.Medium = (*Node)(nil) var _ fs.ReadFileFS = (*Node)(nil) -// Example: nodeTree := node.New() -// nodeTree.AddData("config/app.yaml", []byte("port: 8080")) func New() *Node { return &Node{files: make(map[string]*dataFile)} } @@ -89,7 +82,7 @@ func (n *Node) ToTar() ([]byte, error) { return buf.Bytes(), nil } -// Use FromTar(data) to restore an in-memory tree from tar bytes. +// Example: restored, _ := node.FromTar(snapshot) func FromTar(data []byte) (*Node, error) { n := New() if err := n.LoadTar(data); err != nil { diff --git a/s3/s3.go b/s3/s3.go index 4884522..055c949 100644 --- a/s3/s3.go +++ b/s3/s3.go @@ -1,4 +1,4 @@ -// Package s3 provides an S3-backed io.Medium. +// Package s3 stores io.Medium data in S3 objects. // // client := awss3.NewFromConfig(aws.Config{Region: "us-east-1"}) // medium, _ := s3.New(s3.Options{Bucket: "backups", Client: client, Prefix: "daily/"}) @@ -21,8 +21,8 @@ import ( coreio "dappco.re/go/core/io" ) -// Client is the subset of the AWS S3 client API used by this package. -// Tests can provide any mock that satisfies the same method set. +// Example: client := awss3.NewFromConfig(aws.Config{Region: "us-east-1"}) +// medium, _ := s3.New(s3.Options{Bucket: "backups", Client: client, Prefix: "daily/"}) type Client interface { GetObject(ctx context.Context, params *awss3.GetObjectInput, optFns ...func(*awss3.Options)) (*awss3.GetObjectOutput, error) PutObject(ctx context.Context, params *awss3.PutObjectInput, optFns ...func(*awss3.Options)) (*awss3.PutObjectOutput, error) @@ -33,10 +33,8 @@ type Client interface { CopyObject(ctx context.Context, params *awss3.CopyObjectInput, optFns ...func(*awss3.Options)) (*awss3.CopyObjectOutput, error) } -// Medium is the S3-backed io.Medium returned by New. -// -// medium, _ := s3.New(s3.Options{Bucket: "backups", Client: client, Prefix: "daily/"}) -// _ = medium.Write("reports/daily.txt", "done") +// Example: medium, _ := s3.New(s3.Options{Bucket: "backups", Client: client, Prefix: "daily/"}) +// _ = medium.Write("reports/daily.txt", "done") type Medium struct { client Client bucket string diff --git a/sigil/crypto_sigil.go b/sigil/crypto_sigil.go index 8903868..802878a 100644 --- a/sigil/crypto_sigil.go +++ b/sigil/crypto_sigil.go @@ -1,11 +1,8 @@ -// Package sigil provides pre-obfuscation helpers for XChaCha20-Poly1305. +// Package sigil wraps XChaCha20-Poly1305 with deterministic pre-obfuscation. // // cipherSigil, _ := sigil.NewChaChaPolySigil([]byte("0123456789abcdef0123456789abcdef")) // ciphertext, _ := cipherSigil.In([]byte("payload")) // plaintext, _ := cipherSigil.Out(ciphertext) -// -// Use NewChaChaPolySigilWithObfuscator when you want ShuffleMaskObfuscator -// instead of the default XOR pre-obfuscation layer. package sigil import ( @@ -32,10 +29,7 @@ var ( NoKeyConfiguredError = core.E("sigil.NoKeyConfiguredError", "no encryption key configured", nil) ) -// PreObfuscator is the hook ChaChaPolySigil uses before and after encryption. -// -// XORObfuscator is the default. ShuffleMaskObfuscator is available when you -// want byte shuffling as well as masking. +// PreObfuscator customises the bytes mixed in before and after encryption. type PreObfuscator interface { // Obfuscate transforms plaintext before encryption using the provided entropy. // The entropy is typically the encryption nonce, ensuring the transformation @@ -47,7 +41,7 @@ type PreObfuscator interface { Deobfuscate(data []byte, entropy []byte) []byte } -// XORObfuscator is the default pre-obfuscator returned by NewChaChaPolySigil. +// Example: cipherSigil, _ := sigil.NewChaChaPolySigil(key) type XORObfuscator struct{} // Obfuscate XORs the data with a key stream derived from the entropy. @@ -215,11 +209,9 @@ type ChaChaPolySigil struct { randomReader goio.Reader // for testing injection } -// NewChaChaPolySigil returns a ChaChaPolySigil backed by a 32-byte key. -// -// cipherSigil, _ := sigil.NewChaChaPolySigil([]byte("0123456789abcdef0123456789abcdef")) -// ciphertext, _ := cipherSigil.In([]byte("payload")) -// plaintext, _ := cipherSigil.Out(ciphertext) +// Example: cipherSigil, _ := sigil.NewChaChaPolySigil([]byte("0123456789abcdef0123456789abcdef")) +// ciphertext, _ := cipherSigil.In([]byte("payload")) +// plaintext, _ := cipherSigil.Out(ciphertext) func NewChaChaPolySigil(key []byte) (*ChaChaPolySigil, error) { if len(key) != 32 { return nil, InvalidKeyError @@ -235,14 +227,14 @@ func NewChaChaPolySigil(key []byte) (*ChaChaPolySigil, error) { }, nil } -// NewChaChaPolySigilWithObfuscator returns a ChaChaPolySigil with a custom pre-obfuscator. +// Example: cipherSigil, _ := sigil.NewChaChaPolySigilWithObfuscator( // -// cipherSigil, _ := sigil.NewChaChaPolySigilWithObfuscator( -// []byte("0123456789abcdef0123456789abcdef"), -// &sigil.ShuffleMaskObfuscator{}, -// ) -// ciphertext, _ := cipherSigil.In([]byte("payload")) -// plaintext, _ := cipherSigil.Out(ciphertext) +// []byte("0123456789abcdef0123456789abcdef"), +// &sigil.ShuffleMaskObfuscator{}, +// +// ) +// ciphertext, _ := cipherSigil.In([]byte("payload")) +// plaintext, _ := cipherSigil.Out(ciphertext) func NewChaChaPolySigilWithObfuscator(key []byte, obfuscator PreObfuscator) (*ChaChaPolySigil, error) { cipherSigil, err := NewChaChaPolySigil(key) if err != nil { @@ -334,9 +326,7 @@ func (s *ChaChaPolySigil) Out(data []byte) ([]byte, error) { return plaintext, nil } -// GetNonceFromCiphertext returns the nonce embedded in ciphertext. -// -// nonce, _ := sigil.GetNonceFromCiphertext(ciphertext) +// Example: nonce, _ := sigil.GetNonceFromCiphertext(ciphertext) func GetNonceFromCiphertext(ciphertext []byte) ([]byte, error) { nonceSize := chacha20poly1305.NonceSizeX if len(ciphertext) < nonceSize { diff --git a/sigil/sigil.go b/sigil/sigil.go index 5336648..e12d847 100644 --- a/sigil/sigil.go +++ b/sigil/sigil.go @@ -1,49 +1,23 @@ -// Package sigil provides the Sigil transformation framework for composable, -// reversible data transformations. +// Package sigil chains reversible byte transformations. // // hexSigil, _ := sigil.NewSigil("hex") // gzipSigil, _ := sigil.NewSigil("gzip") // encoded, _ := sigil.Transmute([]byte("payload"), []sigil.Sigil{hexSigil, gzipSigil}) // decoded, _ := sigil.Untransmute(encoded, []sigil.Sigil{hexSigil, gzipSigil}) -// -// Sigils are the core abstraction - each sigil implements a specific -// transformation (encoding, compression, hashing, encryption) with a uniform -// interface. Sigils can be chained together to create transformation pipelines. package sigil import core "dappco.re/go/core" -// Sigil defines the interface for a data transformer. -// -// A Sigil represents a single transformation unit that can be applied to byte data. -// Sigils may be reversible (encoding, compression, encryption) or irreversible (hashing). -// -// For reversible sigils: Out(In(x)) == x for all valid x -// For irreversible sigils: Out returns the input unchanged -// For symmetric sigils: In(x) == Out(x) -// -// Implementations must handle nil input by returning nil without error, -// and empty input by returning an empty slice without error. +// Sigil transforms byte slices. type Sigil interface { - // In applies the forward transformation to the data. - // For encoding sigils, this encodes the data. - // For compression sigils, this compresses the data. - // For hash sigils, this computes the digest. + // Example: encoded, _ := hexSigil.In([]byte("payload")) In(data []byte) ([]byte, error) - // Out applies the reverse transformation to the data. - // For reversible sigils, this recovers the original data. - // For irreversible sigils (e.g., hashing), this returns the input unchanged. + // Example: decoded, _ := hexSigil.Out(encoded) Out(data []byte) ([]byte, error) } -// Transmute applies a series of sigils to data in sequence. -// -// Each sigil's In method is called in order, with the output of one sigil -// becoming the input of the next. If any sigil returns an error, Transmute -// stops immediately and returns nil with that error. -// -// To reverse a transmutation, call each sigil's Out method in reverse order. +// Example: encoded, _ := sigil.Transmute([]byte("payload"), []sigil.Sigil{hexSigil, gzipSigil}) func Transmute(data []byte, sigils []Sigil) ([]byte, error) { var err error for _, s := range sigils { @@ -55,11 +29,7 @@ func Transmute(data []byte, sigils []Sigil) ([]byte, error) { return data, nil } -// Untransmute reverses a transmutation by applying Out in reverse order. -// -// Each sigil's Out method is called in reverse order, with the output of one sigil -// becoming the input of the next. If any sigil returns an error, Untransmute -// stops immediately and returns nil with that error. +// Example: decoded, _ := sigil.Untransmute(encoded, []sigil.Sigil{hexSigil, gzipSigil}) func Untransmute(data []byte, sigils []Sigil) ([]byte, error) { var err error for i := len(sigils) - 1; i >= 0; i-- { diff --git a/sqlite/sqlite.go b/sqlite/sqlite.go index 9fd936d..87a5d99 100644 --- a/sqlite/sqlite.go +++ b/sqlite/sqlite.go @@ -1,4 +1,4 @@ -// Package sqlite persists io.Medium content in a SQLite database. +// Package sqlite stores io.Medium content in SQLite. // // medium, _ := sqlite.New(sqlite.Options{Path: ":memory:"}) // _ = medium.Write("config/app.yaml", "port: 8080") @@ -18,10 +18,8 @@ import ( _ "modernc.org/sqlite" // Pure Go SQLite driver ) -// Medium stores filesystem-shaped content in SQLite. -// -// medium, _ := sqlite.New(sqlite.Options{Path: ":memory:"}) -// _ = medium.Write("config/app.yaml", "port: 8080") +// Example: medium, _ := sqlite.New(sqlite.Options{Path: ":memory:"}) +// _ = medium.Write("config/app.yaml", "port: 8080") type Medium struct { database *sql.DB table string @@ -29,7 +27,6 @@ type Medium struct { var _ coreio.Medium = (*Medium)(nil) -// Example: medium, _ := sqlite.New(sqlite.Options{Path: ":memory:", Table: "files"}) type Options struct { // Path is the SQLite database path. Use ":memory:" for tests. Path string diff --git a/store/doc.go b/store/doc.go index abfa5b7..5101af0 100644 --- a/store/doc.go +++ b/store/doc.go @@ -1,10 +1,7 @@ -// Package store provides a SQLite-backed group-namespaced key-value store. +// Package store maps grouped keys onto SQLite rows. // // keyValueStore, _ := store.New(store.Options{Path: ":memory:"}) // _ = keyValueStore.Set("app", "theme", "midnight") // medium := keyValueStore.AsMedium() // _ = medium.Write("app/theme", "midnight") -// -// It also exposes an io.Medium adapter so grouped values can participate in -// the same storage workflows as filesystem-backed mediums. package store diff --git a/store/medium.go b/store/medium.go index 3a548d3..1d5feca 100644 --- a/store/medium.go +++ b/store/medium.go @@ -12,11 +12,8 @@ import ( // Example: medium, _ := store.NewMedium(store.Options{Path: "config.db"}) // _ = medium.Write("app/theme", "midnight") +// entries, _ := medium.List("") // entries, _ := medium.List("app") -// -// Paths are mapped as group/key - the first segment is the group, -// the rest is the key. List("") returns groups as directories, -// List("group") returns keys as files. type Medium struct { store *Store } diff --git a/store/store.go b/store/store.go index 1929093..4d43f30 100644 --- a/store/store.go +++ b/store/store.go @@ -10,17 +10,15 @@ import ( _ "modernc.org/sqlite" ) -// NotFoundError is returned when a key does not exist in the store. +// Example: _, err := keyValueStore.Get("app", "theme") +// err matches store.NotFoundError when the key is missing. var NotFoundError = errors.New("key not found") -// Store is the grouped key/value database returned by New. -// -// keyValueStore, _ := store.New(store.Options{Path: ":memory:"}) +// Example: keyValueStore, _ := store.New(store.Options{Path: ":memory:"}) type Store struct { database *sql.DB } -// Example: keyValueStore, _ := store.New(store.Options{Path: ":memory:"}) type Options struct { // Path is the SQLite database path. Use ":memory:" for tests. Path string @@ -134,13 +132,9 @@ func (s *Store) GetAll(group string) (map[string]string, error) { return result, nil } -// Render loads all key-value pairs from a group and renders a Go template. -// -// Example usage: -// -// keyValueStore, _ := store.New(store.Options{Path: ":memory:"}) -// _ = keyValueStore.Set("user", "name", "alice") -// out, _ := keyValueStore.Render("hello {{ .name }}", "user") +// Example: keyValueStore, _ := store.New(store.Options{Path: ":memory:"}) +// _ = keyValueStore.Set("user", "name", "alice") +// out, _ := keyValueStore.Render("hello {{ .name }}", "user") func (s *Store) Render(templateText, group string) (string, error) { rows, err := s.database.Query("SELECT key, value FROM kv WHERE grp = ?", group) if err != nil { diff --git a/workspace/doc.go b/workspace/doc.go index 3c2140d..a0ea740 100644 --- a/workspace/doc.go +++ b/workspace/doc.go @@ -1,10 +1,7 @@ -// Package workspace provides encrypted user workspaces backed by io.Medium. +// Package workspace creates encrypted workspaces on top of io.Medium. // // service, _ := workspace.New(workspace.Options{Core: core.New(), Crypt: cryptProvider}) // workspaceID, _ := service.CreateWorkspace("alice", "pass123") // _ = service.SwitchWorkspace(workspaceID) // _ = service.WorkspaceFileSet("notes/todo.txt", "ship it") -// -// Workspaces are rooted under the caller's configured home directory and keep -// file access constrained to the active workspace. package workspace diff --git a/workspace/service.go b/workspace/service.go index c2bfc88..a55c8c1 100644 --- a/workspace/service.go +++ b/workspace/service.go @@ -11,9 +11,7 @@ import ( "dappco.re/go/core/io" ) -// Workspace is the workspace service interface returned by New. -// -// service, _ := workspace.New(workspace.Options{Core: core.New(), Crypt: cryptProvider}) +// Example: service, _ := workspace.New(workspace.Options{Core: core.New(), Crypt: cryptProvider}) type Workspace interface { CreateWorkspace(identifier, password string) (string, error) SwitchWorkspace(workspaceID string) error @@ -34,7 +32,7 @@ type Options struct { Crypt CryptProvider } -// Service is the Workspace implementation returned by New. +// Example: service, _ := workspace.New(workspace.Options{Core: core.New(), Crypt: cryptProvider}) type Service struct { core *core.Core crypt CryptProvider @@ -77,8 +75,6 @@ func New(options Options) (*Service, error) { } // Example: workspaceID, _ := service.CreateWorkspace("alice", "pass123") -// Identifier is hashed (SHA-256) to create the directory name. -// A PGP keypair is generated using the password. func (s *Service) CreateWorkspace(identifier, password string) (string, error) { s.mu.Lock() defer s.mu.Unlock() From bab889e9acb5446f74ddd9d5cbc774d4d56a9573 Mon Sep 17 00:00:00 2001 From: Virgil Date: Mon, 30 Mar 2026 21:39:03 +0000 Subject: [PATCH 25/83] refactor(ax): clarify core storage names --- datanode/client.go | 250 ++++++++++++++++++------------------- doc.go | 2 +- io.go | 284 ++++++++++++++++++++---------------------- local/client.go | 103 ++++++++------- node/node.go | 144 ++++++++++----------- s3/s3.go | 162 ++++++++++++------------ sigil/crypto_sigil.go | 52 ++++---- sigil/sigil.go | 4 +- sigil/sigils.go | 32 ++--- sqlite/sqlite.go | 128 +++++++++---------- store/medium.go | 158 +++++++++++------------ store/store.go | 32 ++--- workspace/service.go | 76 +++++------ 13 files changed, 704 insertions(+), 723 deletions(-) diff --git a/datanode/client.go b/datanode/client.go index f94cccf..008b662 100644 --- a/datanode/client.go +++ b/datanode/client.go @@ -62,10 +62,10 @@ func FromTar(data []byte) (*Medium, error) { } // Example: snapshot, _ := medium.Snapshot() -func (m *Medium) Snapshot() ([]byte, error) { - m.mu.RLock() - defer m.mu.RUnlock() - data, err := m.dataNode.ToTar() +func (medium *Medium) Snapshot() ([]byte, error) { + medium.mu.RLock() + defer medium.mu.RUnlock() + data, err := medium.dataNode.ToTar() if err != nil { return nil, core.E("datanode.Snapshot", "tar failed", err) } @@ -73,23 +73,23 @@ func (m *Medium) Snapshot() ([]byte, error) { } // Example: _ = medium.Restore(snapshot) -func (m *Medium) Restore(data []byte) error { +func (medium *Medium) Restore(data []byte) error { dataNode, err := borgdatanode.FromTar(data) if err != nil { return core.E("datanode.Restore", "tar failed", err) } - m.mu.Lock() - defer m.mu.Unlock() - m.dataNode = dataNode - m.directorySet = make(map[string]bool) + medium.mu.Lock() + defer medium.mu.Unlock() + medium.dataNode = dataNode + medium.directorySet = make(map[string]bool) return nil } // Example: dataNode := medium.DataNode() -func (m *Medium) DataNode() *borgdatanode.DataNode { - m.mu.RLock() - defer m.mu.RUnlock() - return m.dataNode +func (medium *Medium) DataNode() *borgdatanode.DataNode { + medium.mu.RLock() + defer medium.mu.RUnlock() + return medium.dataNode } // normaliseEntryPath normalises a path: strips the leading slash and cleans traversal. @@ -104,12 +104,12 @@ func normaliseEntryPath(filePath string) string { // --- io.Medium interface --- -func (m *Medium) Read(filePath string) (string, error) { - m.mu.RLock() - defer m.mu.RUnlock() +func (medium *Medium) Read(filePath string) (string, error) { + medium.mu.RLock() + defer medium.mu.RUnlock() filePath = normaliseEntryPath(filePath) - f, err := m.dataNode.Open(filePath) + f, err := medium.dataNode.Open(filePath) if err != nil { return "", core.E("datanode.Read", core.Concat("not found: ", filePath), fs.ErrNotExist) } @@ -130,42 +130,42 @@ func (m *Medium) Read(filePath string) (string, error) { return string(data), nil } -func (m *Medium) Write(filePath, content string) error { - m.mu.Lock() - defer m.mu.Unlock() +func (medium *Medium) Write(filePath, content string) error { + medium.mu.Lock() + defer medium.mu.Unlock() filePath = normaliseEntryPath(filePath) if filePath == "" { return core.E("datanode.Write", "empty path", fs.ErrInvalid) } - m.dataNode.AddData(filePath, []byte(content)) + medium.dataNode.AddData(filePath, []byte(content)) // ensure parent directories are tracked - m.ensureDirsLocked(path.Dir(filePath)) + medium.ensureDirsLocked(path.Dir(filePath)) return nil } -func (m *Medium) WriteMode(filePath, content string, mode fs.FileMode) error { - return m.Write(filePath, content) +func (medium *Medium) WriteMode(filePath, content string, mode fs.FileMode) error { + return medium.Write(filePath, content) } -func (m *Medium) EnsureDir(filePath string) error { - m.mu.Lock() - defer m.mu.Unlock() +func (medium *Medium) EnsureDir(filePath string) error { + medium.mu.Lock() + defer medium.mu.Unlock() filePath = normaliseEntryPath(filePath) if filePath == "" { return nil } - m.ensureDirsLocked(filePath) + medium.ensureDirsLocked(filePath) return nil } // ensureDirsLocked marks a directory and all ancestors as existing. -// Caller must hold m.mu. -func (m *Medium) ensureDirsLocked(directoryPath string) { +// Caller must hold medium.mu. +func (medium *Medium) ensureDirsLocked(directoryPath string) { for directoryPath != "" && directoryPath != "." { - m.directorySet[directoryPath] = true + medium.directorySet[directoryPath] = true directoryPath = path.Dir(directoryPath) if directoryPath == "." { break @@ -173,26 +173,26 @@ func (m *Medium) ensureDirsLocked(directoryPath string) { } } -func (m *Medium) IsFile(filePath string) bool { - m.mu.RLock() - defer m.mu.RUnlock() +func (medium *Medium) IsFile(filePath string) bool { + medium.mu.RLock() + defer medium.mu.RUnlock() filePath = normaliseEntryPath(filePath) - info, err := m.dataNode.Stat(filePath) + info, err := medium.dataNode.Stat(filePath) return err == nil && !info.IsDir() } -func (m *Medium) FileGet(filePath string) (string, error) { - return m.Read(filePath) +func (medium *Medium) FileGet(filePath string) (string, error) { + return medium.Read(filePath) } -func (m *Medium) FileSet(filePath, content string) error { - return m.Write(filePath, content) +func (medium *Medium) FileSet(filePath, content string) error { + return medium.Write(filePath, content) } -func (m *Medium) Delete(filePath string) error { - m.mu.Lock() - defer m.mu.Unlock() +func (medium *Medium) Delete(filePath string) error { + medium.mu.Lock() + defer medium.mu.Unlock() filePath = normaliseEntryPath(filePath) if filePath == "" { @@ -200,46 +200,46 @@ func (m *Medium) Delete(filePath string) error { } // Check if it's a file in the DataNode - info, err := m.dataNode.Stat(filePath) + info, err := medium.dataNode.Stat(filePath) if err != nil { // Check explicit directories - if m.directorySet[filePath] { + if medium.directorySet[filePath] { // Check if dir is empty - hasChildren, err := m.hasPrefixLocked(filePath + "/") + hasChildren, err := medium.hasPrefixLocked(filePath + "/") if err != nil { return core.E("datanode.Delete", core.Concat("failed to inspect directory: ", filePath), err) } if hasChildren { return core.E("datanode.Delete", core.Concat("directory not empty: ", filePath), fs.ErrExist) } - delete(m.directorySet, filePath) + delete(medium.directorySet, filePath) return nil } return core.E("datanode.Delete", core.Concat("not found: ", filePath), fs.ErrNotExist) } if info.IsDir() { - hasChildren, err := m.hasPrefixLocked(filePath + "/") + hasChildren, err := medium.hasPrefixLocked(filePath + "/") if err != nil { return core.E("datanode.Delete", core.Concat("failed to inspect directory: ", filePath), err) } if hasChildren { return core.E("datanode.Delete", core.Concat("directory not empty: ", filePath), fs.ErrExist) } - delete(m.directorySet, filePath) + delete(medium.directorySet, filePath) return nil } // Remove the file by creating a new DataNode without it - if err := m.removeFileLocked(filePath); err != nil { + if err := medium.removeFileLocked(filePath); err != nil { return core.E("datanode.Delete", core.Concat("failed to delete file: ", filePath), err) } return nil } -func (m *Medium) DeleteAll(filePath string) error { - m.mu.Lock() - defer m.mu.Unlock() +func (medium *Medium) DeleteAll(filePath string) error { + medium.mu.Lock() + defer medium.mu.Unlock() filePath = normaliseEntryPath(filePath) if filePath == "" { @@ -250,22 +250,22 @@ func (m *Medium) DeleteAll(filePath string) error { found := false // Check if filePath itself is a file - info, err := m.dataNode.Stat(filePath) + info, err := medium.dataNode.Stat(filePath) if err == nil && !info.IsDir() { - if err := m.removeFileLocked(filePath); err != nil { + if err := medium.removeFileLocked(filePath); err != nil { return core.E("datanode.DeleteAll", core.Concat("failed to delete file: ", filePath), err) } found = true } // Remove all files under prefix - entries, err := m.collectAllLocked() + entries, err := medium.collectAllLocked() if err != nil { return core.E("datanode.DeleteAll", core.Concat("failed to inspect tree: ", filePath), err) } for _, name := range entries { if name == filePath || core.HasPrefix(name, prefix) { - if err := m.removeFileLocked(name); err != nil { + if err := medium.removeFileLocked(name); err != nil { return core.E("datanode.DeleteAll", core.Concat("failed to delete file: ", name), err) } found = true @@ -273,9 +273,9 @@ func (m *Medium) DeleteAll(filePath string) error { } // Remove explicit directories under prefix - for directoryPath := range m.directorySet { + for directoryPath := range medium.directorySet { if directoryPath == filePath || core.HasPrefix(directoryPath, prefix) { - delete(m.directorySet, directoryPath) + delete(medium.directorySet, directoryPath) found = true } } @@ -286,28 +286,28 @@ func (m *Medium) DeleteAll(filePath string) error { return nil } -func (m *Medium) Rename(oldPath, newPath string) error { - m.mu.Lock() - defer m.mu.Unlock() +func (medium *Medium) Rename(oldPath, newPath string) error { + medium.mu.Lock() + defer medium.mu.Unlock() oldPath = normaliseEntryPath(oldPath) newPath = normaliseEntryPath(newPath) // Check if source is a file - info, err := m.dataNode.Stat(oldPath) + info, err := medium.dataNode.Stat(oldPath) if err != nil { return core.E("datanode.Rename", core.Concat("not found: ", oldPath), fs.ErrNotExist) } if !info.IsDir() { // Read old, write new, delete old - data, err := m.readFileLocked(oldPath) + data, err := medium.readFileLocked(oldPath) if err != nil { return core.E("datanode.Rename", core.Concat("failed to read source file: ", oldPath), err) } - m.dataNode.AddData(newPath, data) - m.ensureDirsLocked(path.Dir(newPath)) - if err := m.removeFileLocked(oldPath); err != nil { + medium.dataNode.AddData(newPath, data) + medium.ensureDirsLocked(path.Dir(newPath)) + if err := medium.removeFileLocked(oldPath); err != nil { return core.E("datanode.Rename", core.Concat("failed to remove source file: ", oldPath), err) } return nil @@ -317,19 +317,19 @@ func (m *Medium) Rename(oldPath, newPath string) error { oldPrefix := oldPath + "/" newPrefix := newPath + "/" - entries, err := m.collectAllLocked() + entries, err := medium.collectAllLocked() if err != nil { return core.E("datanode.Rename", core.Concat("failed to inspect tree: ", oldPath), err) } for _, name := range entries { if core.HasPrefix(name, oldPrefix) { newName := core.Concat(newPrefix, core.TrimPrefix(name, oldPrefix)) - data, err := m.readFileLocked(name) + data, err := medium.readFileLocked(name) if err != nil { return core.E("datanode.Rename", core.Concat("failed to read source file: ", name), err) } - m.dataNode.AddData(newName, data) - if err := m.removeFileLocked(name); err != nil { + medium.dataNode.AddData(newName, data) + if err := medium.removeFileLocked(name); err != nil { return core.E("datanode.Rename", core.Concat("failed to remove source file: ", name), err) } } @@ -337,30 +337,30 @@ func (m *Medium) Rename(oldPath, newPath string) error { // Move explicit directories dirsToMove := make(map[string]string) - for d := range m.directorySet { + for d := range medium.directorySet { if d == oldPath || core.HasPrefix(d, oldPrefix) { newD := core.Concat(newPath, core.TrimPrefix(d, oldPath)) dirsToMove[d] = newD } } for old, nw := range dirsToMove { - delete(m.directorySet, old) - m.directorySet[nw] = true + delete(medium.directorySet, old) + medium.directorySet[nw] = true } return nil } -func (m *Medium) List(filePath string) ([]fs.DirEntry, error) { - m.mu.RLock() - defer m.mu.RUnlock() +func (medium *Medium) List(filePath string) ([]fs.DirEntry, error) { + medium.mu.RLock() + defer medium.mu.RUnlock() filePath = normaliseEntryPath(filePath) - entries, err := m.dataNode.ReadDir(filePath) + entries, err := medium.dataNode.ReadDir(filePath) if err != nil { // Check explicit directories - if filePath == "" || m.directorySet[filePath] { + if filePath == "" || medium.directorySet[filePath] { return []fs.DirEntry{}, nil } return nil, core.E("datanode.List", core.Concat("not found: ", filePath), fs.ErrNotExist) @@ -376,7 +376,7 @@ func (m *Medium) List(filePath string) ([]fs.DirEntry, error) { seen[e.Name()] = true } - for d := range m.directorySet { + for d := range medium.directorySet { if !core.HasPrefix(d, prefix) { continue } @@ -398,43 +398,43 @@ func (m *Medium) List(filePath string) ([]fs.DirEntry, error) { return entries, nil } -func (m *Medium) Stat(filePath string) (fs.FileInfo, error) { - m.mu.RLock() - defer m.mu.RUnlock() +func (medium *Medium) Stat(filePath string) (fs.FileInfo, error) { + medium.mu.RLock() + defer medium.mu.RUnlock() filePath = normaliseEntryPath(filePath) if filePath == "" { return &fileInfo{name: ".", isDir: true, mode: fs.ModeDir | 0755}, nil } - info, err := m.dataNode.Stat(filePath) + info, err := medium.dataNode.Stat(filePath) if err == nil { return info, nil } - if m.directorySet[filePath] { + if medium.directorySet[filePath] { return &fileInfo{name: path.Base(filePath), isDir: true, mode: fs.ModeDir | 0755}, nil } return nil, core.E("datanode.Stat", core.Concat("not found: ", filePath), fs.ErrNotExist) } -func (m *Medium) Open(filePath string) (fs.File, error) { - m.mu.RLock() - defer m.mu.RUnlock() +func (medium *Medium) Open(filePath string) (fs.File, error) { + medium.mu.RLock() + defer medium.mu.RUnlock() filePath = normaliseEntryPath(filePath) - return m.dataNode.Open(filePath) + return medium.dataNode.Open(filePath) } -func (m *Medium) Create(filePath string) (goio.WriteCloser, error) { +func (medium *Medium) Create(filePath string) (goio.WriteCloser, error) { filePath = normaliseEntryPath(filePath) if filePath == "" { return nil, core.E("datanode.Create", "empty path", fs.ErrInvalid) } - return &writeCloser{medium: m, path: filePath}, nil + return &writeCloser{medium: medium, path: filePath}, nil } -func (m *Medium) Append(filePath string) (goio.WriteCloser, error) { +func (medium *Medium) Append(filePath string) (goio.WriteCloser, error) { filePath = normaliseEntryPath(filePath) if filePath == "" { return nil, core.E("datanode.Append", "empty path", fs.ErrInvalid) @@ -442,71 +442,71 @@ func (m *Medium) Append(filePath string) (goio.WriteCloser, error) { // Read existing content var existing []byte - m.mu.RLock() - if m.IsFile(filePath) { - data, err := m.readFileLocked(filePath) + medium.mu.RLock() + if medium.IsFile(filePath) { + data, err := medium.readFileLocked(filePath) if err != nil { - m.mu.RUnlock() + medium.mu.RUnlock() return nil, core.E("datanode.Append", core.Concat("failed to read existing content: ", filePath), err) } existing = data } - m.mu.RUnlock() + medium.mu.RUnlock() - return &writeCloser{medium: m, path: filePath, buf: existing}, nil + return &writeCloser{medium: medium, path: filePath, buf: existing}, nil } -func (m *Medium) ReadStream(filePath string) (goio.ReadCloser, error) { - m.mu.RLock() - defer m.mu.RUnlock() +func (medium *Medium) ReadStream(filePath string) (goio.ReadCloser, error) { + medium.mu.RLock() + defer medium.mu.RUnlock() filePath = normaliseEntryPath(filePath) - f, err := m.dataNode.Open(filePath) + f, err := medium.dataNode.Open(filePath) if err != nil { return nil, core.E("datanode.ReadStream", core.Concat("not found: ", filePath), fs.ErrNotExist) } return f.(goio.ReadCloser), nil } -func (m *Medium) WriteStream(filePath string) (goio.WriteCloser, error) { - return m.Create(filePath) +func (medium *Medium) WriteStream(filePath string) (goio.WriteCloser, error) { + return medium.Create(filePath) } -func (m *Medium) Exists(filePath string) bool { - m.mu.RLock() - defer m.mu.RUnlock() +func (medium *Medium) Exists(filePath string) bool { + medium.mu.RLock() + defer medium.mu.RUnlock() filePath = normaliseEntryPath(filePath) if filePath == "" { return true // root always exists } - _, err := m.dataNode.Stat(filePath) + _, err := medium.dataNode.Stat(filePath) if err == nil { return true } - return m.directorySet[filePath] + return medium.directorySet[filePath] } -func (m *Medium) IsDir(filePath string) bool { - m.mu.RLock() - defer m.mu.RUnlock() +func (medium *Medium) IsDir(filePath string) bool { + medium.mu.RLock() + defer medium.mu.RUnlock() filePath = normaliseEntryPath(filePath) if filePath == "" { return true } - info, err := m.dataNode.Stat(filePath) + info, err := medium.dataNode.Stat(filePath) if err == nil { return info.IsDir() } - return m.directorySet[filePath] + return medium.directorySet[filePath] } // --- internal helpers --- // hasPrefixLocked checks if any file path starts with prefix. Caller holds lock. -func (m *Medium) hasPrefixLocked(prefix string) (bool, error) { - entries, err := m.collectAllLocked() +func (medium *Medium) hasPrefixLocked(prefix string) (bool, error) { + entries, err := medium.collectAllLocked() if err != nil { return false, err } @@ -515,7 +515,7 @@ func (m *Medium) hasPrefixLocked(prefix string) (bool, error) { return true, nil } } - for d := range m.directorySet { + for d := range medium.directorySet { if core.HasPrefix(d, prefix) { return true, nil } @@ -524,9 +524,9 @@ func (m *Medium) hasPrefixLocked(prefix string) (bool, error) { } // collectAllLocked returns all file paths in the DataNode. Caller holds lock. -func (m *Medium) collectAllLocked() ([]string, error) { +func (medium *Medium) collectAllLocked() ([]string, error) { var names []string - err := dataNodeWalkDir(m.dataNode, ".", func(filePath string, entry fs.DirEntry, err error) error { + err := dataNodeWalkDir(medium.dataNode, ".", func(filePath string, entry fs.DirEntry, err error) error { if err != nil { return err } @@ -538,8 +538,8 @@ func (m *Medium) collectAllLocked() ([]string, error) { return names, err } -func (m *Medium) readFileLocked(name string) ([]byte, error) { - f, err := dataNodeOpen(m.dataNode, name) +func (medium *Medium) readFileLocked(name string) ([]byte, error) { + f, err := dataNodeOpen(medium.dataNode, name) if err != nil { return nil, err } @@ -556,9 +556,9 @@ func (m *Medium) readFileLocked(name string) ([]byte, error) { // removeFileLocked removes a single file by rebuilding the DataNode. // This is necessary because Borg's DataNode doesn't expose a Remove method. -// Caller must hold m.mu write lock. -func (m *Medium) removeFileLocked(target string) error { - entries, err := m.collectAllLocked() +// Caller must hold medium.mu write lock. +func (medium *Medium) removeFileLocked(target string) error { + entries, err := medium.collectAllLocked() if err != nil { return err } @@ -567,13 +567,13 @@ func (m *Medium) removeFileLocked(target string) error { if name == target { continue } - data, err := m.readFileLocked(name) + data, err := medium.readFileLocked(name) if err != nil { return err } newDN.AddData(name, data) } - m.dataNode = newDN + medium.dataNode = newDN return nil } diff --git a/doc.go b/doc.go index 14eb1cb..6b938f8 100644 --- a/doc.go +++ b/doc.go @@ -1,4 +1,4 @@ -// Package io gives CoreGO a single storage surface. +// Package io exposes CoreGO's storage surface. // // medium, _ := io.NewSandboxed("/srv/app") // _ = medium.Write("config/app.yaml", "port: 8080") diff --git a/io.go b/io.go index 57362a1..f22e20c 100644 --- a/io.go +++ b/io.go @@ -59,7 +59,7 @@ type Medium interface { IsDir(path string) bool } -// FileInfo is a test helper that satisfies fs.FileInfo. +// Example: info := io.FileInfo{name: "app.yaml", size: 8, mode: 0644} type FileInfo struct { name string size int64 @@ -68,19 +68,19 @@ type FileInfo struct { isDir bool } -func (fi FileInfo) Name() string { return fi.name } +func (info FileInfo) Name() string { return info.name } -func (fi FileInfo) Size() int64 { return fi.size } +func (info FileInfo) Size() int64 { return info.size } -func (fi FileInfo) Mode() fs.FileMode { return fi.mode } +func (info FileInfo) Mode() fs.FileMode { return info.mode } -func (fi FileInfo) ModTime() time.Time { return fi.modTime } +func (info FileInfo) ModTime() time.Time { return info.modTime } -func (fi FileInfo) IsDir() bool { return fi.isDir } +func (info FileInfo) IsDir() bool { return info.isDir } -func (fi FileInfo) Sys() any { return nil } +func (info FileInfo) Sys() any { return nil } -// DirEntry is a test helper that satisfies fs.DirEntry. +// Example: entry := io.DirEntry{name: "app.yaml", mode: 0644} type DirEntry struct { name string isDir bool @@ -88,15 +88,15 @@ type DirEntry struct { info fs.FileInfo } -func (de DirEntry) Name() string { return de.name } +func (entry DirEntry) Name() string { return entry.name } -func (de DirEntry) IsDir() bool { return de.isDir } +func (entry DirEntry) IsDir() bool { return entry.isDir } -func (de DirEntry) Type() fs.FileMode { return de.mode.Type() } +func (entry DirEntry) Type() fs.FileMode { return entry.mode.Type() } -func (de DirEntry) Info() (fs.FileInfo, error) { return de.info, nil } +func (entry DirEntry) Info() (fs.FileInfo, error) { return entry.info, nil } -// Example: io.Local.Read("/etc/hostname") +// Example: _ = io.Local.Read("/etc/hostname") var Local Medium var _ Medium = (*local.Medium)(nil) @@ -115,36 +115,34 @@ func NewSandboxed(root string) (Medium, error) { return local.New(root) } -// --- Helper Functions --- - // Example: content, _ := io.Read(medium, "config/app.yaml") -func Read(m Medium, path string) (string, error) { - return m.Read(path) +func Read(medium Medium, path string) (string, error) { + return medium.Read(path) } // Example: _ = io.Write(medium, "config/app.yaml", "port: 8080") -func Write(m Medium, path, content string) error { - return m.Write(path, content) +func Write(medium Medium, path, content string) error { + return medium.Write(path, content) } // Example: reader, _ := io.ReadStream(medium, "logs/app.log") -func ReadStream(m Medium, path string) (goio.ReadCloser, error) { - return m.ReadStream(path) +func ReadStream(medium Medium, path string) (goio.ReadCloser, error) { + return medium.ReadStream(path) } // Example: writer, _ := io.WriteStream(medium, "logs/app.log") -func WriteStream(m Medium, path string) (goio.WriteCloser, error) { - return m.WriteStream(path) +func WriteStream(medium Medium, path string) (goio.WriteCloser, error) { + return medium.WriteStream(path) } // Example: _ = io.EnsureDir(medium, "config") -func EnsureDir(m Medium, path string) error { - return m.EnsureDir(path) +func EnsureDir(medium Medium, path string) error { + return medium.EnsureDir(path) } // Example: ok := io.IsFile(medium, "config/app.yaml") -func IsFile(m Medium, path string) bool { - return m.IsFile(path) +func IsFile(medium Medium, path string) bool { + return medium.IsFile(path) } // Example: _ = io.Copy(source, "input.txt", destination, "backup/input.txt") @@ -159,8 +157,6 @@ func Copy(source Medium, sourcePath string, destination Medium, destinationPath return nil } -// --- MockMedium --- - // Example: medium := io.NewMockMedium() // _ = medium.Write("config/app.yaml", "port: 8080") type MockMedium struct { @@ -181,94 +177,91 @@ func NewMockMedium() *MockMedium { } } -func (m *MockMedium) Read(path string) (string, error) { - content, ok := m.Files[path] +func (medium *MockMedium) Read(path string) (string, error) { + content, ok := medium.Files[path] if !ok { return "", core.E("io.MockMedium.Read", core.Concat("file not found: ", path), fs.ErrNotExist) } return content, nil } -func (m *MockMedium) Write(path, content string) error { - m.Files[path] = content - m.ModTimes[path] = time.Now() +func (medium *MockMedium) Write(path, content string) error { + medium.Files[path] = content + medium.ModTimes[path] = time.Now() return nil } -func (m *MockMedium) WriteMode(path, content string, mode fs.FileMode) error { - return m.Write(path, content) +func (medium *MockMedium) WriteMode(path, content string, mode fs.FileMode) error { + return medium.Write(path, content) } -func (m *MockMedium) EnsureDir(path string) error { - m.Dirs[path] = true +func (medium *MockMedium) EnsureDir(path string) error { + medium.Dirs[path] = true return nil } -func (m *MockMedium) IsFile(path string) bool { - _, ok := m.Files[path] +func (medium *MockMedium) IsFile(path string) bool { + _, ok := medium.Files[path] return ok } -func (m *MockMedium) FileGet(path string) (string, error) { - return m.Read(path) +func (medium *MockMedium) FileGet(path string) (string, error) { + return medium.Read(path) } -func (m *MockMedium) FileSet(path, content string) error { - return m.Write(path, content) +func (medium *MockMedium) FileSet(path, content string) error { + return medium.Write(path, content) } -func (m *MockMedium) Delete(path string) error { - if _, ok := m.Files[path]; ok { - delete(m.Files, path) +func (medium *MockMedium) Delete(path string) error { + if _, ok := medium.Files[path]; ok { + delete(medium.Files, path) return nil } - if _, ok := m.Dirs[path]; ok { - // Check if directory is empty (no files or subdirs with this prefix) + if _, ok := medium.Dirs[path]; ok { prefix := path if !core.HasSuffix(prefix, "/") { prefix += "/" } - for f := range m.Files { - if core.HasPrefix(f, prefix) { + for filePath := range medium.Files { + if core.HasPrefix(filePath, prefix) { return core.E("io.MockMedium.Delete", core.Concat("directory not empty: ", path), fs.ErrExist) } } - for d := range m.Dirs { - if d != path && core.HasPrefix(d, prefix) { + for directoryPath := range medium.Dirs { + if directoryPath != path && core.HasPrefix(directoryPath, prefix) { return core.E("io.MockMedium.Delete", core.Concat("directory not empty: ", path), fs.ErrExist) } } - delete(m.Dirs, path) + delete(medium.Dirs, path) return nil } return core.E("io.MockMedium.Delete", core.Concat("path not found: ", path), fs.ErrNotExist) } -func (m *MockMedium) DeleteAll(path string) error { +func (medium *MockMedium) DeleteAll(path string) error { found := false - if _, ok := m.Files[path]; ok { - delete(m.Files, path) + if _, ok := medium.Files[path]; ok { + delete(medium.Files, path) found = true } - if _, ok := m.Dirs[path]; ok { - delete(m.Dirs, path) + if _, ok := medium.Dirs[path]; ok { + delete(medium.Dirs, path) found = true } - - // Delete all entries under this path prefix := path if !core.HasSuffix(prefix, "/") { prefix += "/" } - for f := range m.Files { - if core.HasPrefix(f, prefix) { - delete(m.Files, f) + for filePath := range medium.Files { + if core.HasPrefix(filePath, prefix) { + delete(medium.Files, filePath) found = true } } - for d := range m.Dirs { - if core.HasPrefix(d, prefix) { - delete(m.Dirs, d) + for directoryPath := range medium.Dirs { + if core.HasPrefix(directoryPath, prefix) { + delete(medium.Dirs, directoryPath) found = true } } @@ -279,20 +272,19 @@ func (m *MockMedium) DeleteAll(path string) error { return nil } -func (m *MockMedium) Rename(oldPath, newPath string) error { - if content, ok := m.Files[oldPath]; ok { - m.Files[newPath] = content - delete(m.Files, oldPath) - if mt, ok := m.ModTimes[oldPath]; ok { - m.ModTimes[newPath] = mt - delete(m.ModTimes, oldPath) +func (medium *MockMedium) Rename(oldPath, newPath string) error { + if content, ok := medium.Files[oldPath]; ok { + medium.Files[newPath] = content + delete(medium.Files, oldPath) + if modTime, ok := medium.ModTimes[oldPath]; ok { + medium.ModTimes[newPath] = modTime + delete(medium.ModTimes, oldPath) } return nil } - if _, ok := m.Dirs[oldPath]; ok { - // Move directory and all contents - m.Dirs[newPath] = true - delete(m.Dirs, oldPath) + if _, ok := medium.Dirs[oldPath]; ok { + medium.Dirs[newPath] = true + delete(medium.Dirs, oldPath) oldPrefix := oldPath if !core.HasSuffix(oldPrefix, "/") { @@ -303,42 +295,40 @@ func (m *MockMedium) Rename(oldPath, newPath string) error { newPrefix += "/" } - // Collect files to move first (don't mutate during iteration) filesToMove := make(map[string]string) - for f := range m.Files { - if core.HasPrefix(f, oldPrefix) { - newF := core.Concat(newPrefix, core.TrimPrefix(f, oldPrefix)) - filesToMove[f] = newF + for filePath := range medium.Files { + if core.HasPrefix(filePath, oldPrefix) { + newFilePath := core.Concat(newPrefix, core.TrimPrefix(filePath, oldPrefix)) + filesToMove[filePath] = newFilePath } } - for oldF, newF := range filesToMove { - m.Files[newF] = m.Files[oldF] - delete(m.Files, oldF) - if mt, ok := m.ModTimes[oldF]; ok { - m.ModTimes[newF] = mt - delete(m.ModTimes, oldF) + for oldFilePath, newFilePath := range filesToMove { + medium.Files[newFilePath] = medium.Files[oldFilePath] + delete(medium.Files, oldFilePath) + if modTime, ok := medium.ModTimes[oldFilePath]; ok { + medium.ModTimes[newFilePath] = modTime + delete(medium.ModTimes, oldFilePath) } } - // Collect directories to move first dirsToMove := make(map[string]string) - for d := range m.Dirs { - if core.HasPrefix(d, oldPrefix) { - newD := core.Concat(newPrefix, core.TrimPrefix(d, oldPrefix)) - dirsToMove[d] = newD + for directoryPath := range medium.Dirs { + if core.HasPrefix(directoryPath, oldPrefix) { + newDirectoryPath := core.Concat(newPrefix, core.TrimPrefix(directoryPath, oldPrefix)) + dirsToMove[directoryPath] = newDirectoryPath } } - for oldD, newD := range dirsToMove { - m.Dirs[newD] = true - delete(m.Dirs, oldD) + for oldDirectoryPath, newDirectoryPath := range dirsToMove { + medium.Dirs[newDirectoryPath] = true + delete(medium.Dirs, oldDirectoryPath) } return nil } return core.E("io.MockMedium.Rename", core.Concat("path not found: ", oldPath), fs.ErrNotExist) } -func (m *MockMedium) Open(path string) (fs.File, error) { - content, ok := m.Files[path] +func (medium *MockMedium) Open(path string) (fs.File, error) { + content, ok := medium.Files[path] if !ok { return nil, core.E("io.MockMedium.Open", core.Concat("file not found: ", path), fs.ErrNotExist) } @@ -348,28 +338,28 @@ func (m *MockMedium) Open(path string) (fs.File, error) { }, nil } -func (m *MockMedium) Create(path string) (goio.WriteCloser, error) { +func (medium *MockMedium) Create(path string) (goio.WriteCloser, error) { return &MockWriteCloser{ - medium: m, + medium: medium, path: path, }, nil } -func (m *MockMedium) Append(path string) (goio.WriteCloser, error) { - content := m.Files[path] +func (medium *MockMedium) Append(path string) (goio.WriteCloser, error) { + content := medium.Files[path] return &MockWriteCloser{ - medium: m, + medium: medium, path: path, data: []byte(content), }, nil } -func (m *MockMedium) ReadStream(path string) (goio.ReadCloser, error) { - return m.Open(path) +func (medium *MockMedium) ReadStream(path string) (goio.ReadCloser, error) { + return medium.Open(path) } -func (m *MockMedium) WriteStream(path string) (goio.WriteCloser, error) { - return m.Create(path) +func (medium *MockMedium) WriteStream(path string) (goio.WriteCloser, error) { + return medium.Create(path) } // MockFile implements fs.File for MockMedium. @@ -379,23 +369,23 @@ type MockFile struct { offset int64 } -func (f *MockFile) Stat() (fs.FileInfo, error) { +func (file *MockFile) Stat() (fs.FileInfo, error) { return FileInfo{ - name: f.name, - size: int64(len(f.content)), + name: file.name, + size: int64(len(file.content)), }, nil } -func (f *MockFile) Read(b []byte) (int, error) { - if f.offset >= int64(len(f.content)) { +func (file *MockFile) Read(buffer []byte) (int, error) { + if file.offset >= int64(len(file.content)) { return 0, goio.EOF } - n := copy(b, f.content[f.offset:]) - f.offset += int64(n) - return n, nil + readCount := copy(buffer, file.content[file.offset:]) + file.offset += int64(readCount) + return readCount, nil } -func (f *MockFile) Close() error { +func (file *MockFile) Close() error { return nil } @@ -406,34 +396,33 @@ type MockWriteCloser struct { data []byte } -func (w *MockWriteCloser) Write(p []byte) (int, error) { - w.data = append(w.data, p...) - return len(p), nil +func (writeCloser *MockWriteCloser) Write(data []byte) (int, error) { + writeCloser.data = append(writeCloser.data, data...) + return len(data), nil } -func (w *MockWriteCloser) Close() error { - w.medium.Files[w.path] = string(w.data) - w.medium.ModTimes[w.path] = time.Now() +func (writeCloser *MockWriteCloser) Close() error { + writeCloser.medium.Files[writeCloser.path] = string(writeCloser.data) + writeCloser.medium.ModTimes[writeCloser.path] = time.Now() return nil } -func (m *MockMedium) List(path string) ([]fs.DirEntry, error) { - if _, ok := m.Dirs[path]; !ok { - // Check if it's the root or has children +func (medium *MockMedium) List(path string) ([]fs.DirEntry, error) { + if _, ok := medium.Dirs[path]; !ok { hasChildren := false prefix := path if path != "" && !core.HasSuffix(prefix, "/") { prefix += "/" } - for f := range m.Files { - if core.HasPrefix(f, prefix) { + for filePath := range medium.Files { + if core.HasPrefix(filePath, prefix) { hasChildren = true break } } if !hasChildren { - for d := range m.Dirs { - if core.HasPrefix(d, prefix) { + for directoryPath := range medium.Dirs { + if core.HasPrefix(directoryPath, prefix) { hasChildren = true break } @@ -452,16 +441,13 @@ func (m *MockMedium) List(path string) ([]fs.DirEntry, error) { seen := make(map[string]bool) var entries []fs.DirEntry - // Find immediate children (files) - for f, content := range m.Files { - if !core.HasPrefix(f, prefix) { + for filePath, content := range medium.Files { + if !core.HasPrefix(filePath, prefix) { continue } - rest := core.TrimPrefix(f, prefix) + rest := core.TrimPrefix(filePath, prefix) if rest == "" || core.Contains(rest, "/") { - // Skip if it's not an immediate child if idx := bytes.IndexByte([]byte(rest), '/'); idx != -1 { - // This is a subdirectory dirName := rest[:idx] if !seen[dirName] { seen[dirName] = true @@ -494,16 +480,14 @@ func (m *MockMedium) List(path string) ([]fs.DirEntry, error) { } } - // Find immediate subdirectories - for d := range m.Dirs { - if !core.HasPrefix(d, prefix) { + for directoryPath := range medium.Dirs { + if !core.HasPrefix(directoryPath, prefix) { continue } - rest := core.TrimPrefix(d, prefix) + rest := core.TrimPrefix(directoryPath, prefix) if rest == "" { continue } - // Get only immediate child if idx := bytes.IndexByte([]byte(rest), '/'); idx != -1 { rest = rest[:idx] } @@ -525,9 +509,9 @@ func (m *MockMedium) List(path string) ([]fs.DirEntry, error) { return entries, nil } -func (m *MockMedium) Stat(path string) (fs.FileInfo, error) { - if content, ok := m.Files[path]; ok { - modTime, ok := m.ModTimes[path] +func (medium *MockMedium) Stat(path string) (fs.FileInfo, error) { + if content, ok := medium.Files[path]; ok { + modTime, ok := medium.ModTimes[path] if !ok { modTime = time.Now() } @@ -538,7 +522,7 @@ func (m *MockMedium) Stat(path string) (fs.FileInfo, error) { modTime: modTime, }, nil } - if _, ok := m.Dirs[path]; ok { + if _, ok := medium.Dirs[path]; ok { return FileInfo{ name: core.PathBase(path), isDir: true, @@ -548,17 +532,17 @@ func (m *MockMedium) Stat(path string) (fs.FileInfo, error) { return nil, core.E("io.MockMedium.Stat", core.Concat("path not found: ", path), fs.ErrNotExist) } -func (m *MockMedium) Exists(path string) bool { - if _, ok := m.Files[path]; ok { +func (medium *MockMedium) Exists(path string) bool { + if _, ok := medium.Files[path]; ok { return true } - if _, ok := m.Dirs[path]; ok { + if _, ok := medium.Dirs[path]; ok { return true } return false } -func (m *MockMedium) IsDir(path string) bool { - _, ok := m.Dirs[path] +func (medium *MockMedium) IsDir(path string) bool { + _, ok := medium.Dirs[path] return ok } diff --git a/local/client.go b/local/client.go index 81c7f9e..3eebd11 100644 --- a/local/client.go +++ b/local/client.go @@ -177,17 +177,15 @@ func logSandboxEscape(root, path, attempted string) { core.Security("sandbox escape detected", "root", root, "path", path, "attempted", attempted, "user", username) } -// sandboxedPath resolves a path inside the filesystem root. -// Absolute paths are sandboxed under root (unless root is "/"). -func (m *Medium) sandboxedPath(path string) string { +func (medium *Medium) sandboxedPath(path string) string { if path == "" { - return m.filesystemRoot + return medium.filesystemRoot } // If the path is relative and the medium is rooted at "/", // treat it as relative to the current working directory. // This makes io.Local behave more like the standard 'os' package. - if m.filesystemRoot == dirSeparator() && !core.PathIsAbs(normalisePath(path)) { + if medium.filesystemRoot == dirSeparator() && !core.PathIsAbs(normalisePath(path)) { return core.Path(currentWorkingDir(), normalisePath(path)) } @@ -196,23 +194,22 @@ func (m *Medium) sandboxedPath(path string) string { clean := cleanSandboxPath(path) // If root is "/", allow absolute paths through - if m.filesystemRoot == dirSeparator() { + if medium.filesystemRoot == dirSeparator() { return clean } // Join cleaned relative path with root - return core.Path(m.filesystemRoot, core.TrimPrefix(clean, dirSeparator())) + return core.Path(medium.filesystemRoot, core.TrimPrefix(clean, dirSeparator())) } -// validatePath ensures the path is within the sandbox, following symlinks if they exist. -func (m *Medium) validatePath(path string) (string, error) { - if m.filesystemRoot == dirSeparator() { - return m.sandboxedPath(path), nil +func (medium *Medium) validatePath(path string) (string, error) { + if medium.filesystemRoot == dirSeparator() { + return medium.sandboxedPath(path), nil } // Split the cleaned path into components parts := splitPathParts(cleanSandboxPath(path)) - current := m.filesystemRoot + current := medium.filesystemRoot for _, part := range parts { next := core.Path(current, part) @@ -229,9 +226,9 @@ func (m *Medium) validatePath(path string) (string, error) { } // Verify the resolved part is still within the root - if !isWithinRoot(m.filesystemRoot, realNext) { + if !isWithinRoot(medium.filesystemRoot, realNext) { // Security event: sandbox escape attempt - logSandboxEscape(m.filesystemRoot, path, realNext) + logSandboxEscape(medium.filesystemRoot, path, realNext) return "", fs.ErrPermission } current = realNext @@ -240,98 +237,98 @@ func (m *Medium) validatePath(path string) (string, error) { return current, nil } -func (m *Medium) Read(path string) (string, error) { - resolvedPath, err := m.validatePath(path) +func (medium *Medium) Read(path string) (string, error) { + resolvedPath, err := medium.validatePath(path) if err != nil { return "", err } return resultString("local.Read", core.Concat("read failed: ", path), unrestrictedFileSystem.Read(resolvedPath)) } -func (m *Medium) Write(path, content string) error { - return m.WriteMode(path, content, 0644) +func (medium *Medium) Write(path, content string) error { + return medium.WriteMode(path, content, 0644) } -func (m *Medium) WriteMode(path, content string, mode fs.FileMode) error { - resolvedPath, err := m.validatePath(path) +func (medium *Medium) WriteMode(path, content string, mode fs.FileMode) error { + resolvedPath, err := medium.validatePath(path) if err != nil { return err } return resultError("local.WriteMode", core.Concat("write failed: ", path), unrestrictedFileSystem.WriteMode(resolvedPath, content, mode)) } -func (m *Medium) EnsureDir(path string) error { - resolvedPath, err := m.validatePath(path) +func (medium *Medium) EnsureDir(path string) error { + resolvedPath, err := medium.validatePath(path) if err != nil { return err } return resultError("local.EnsureDir", core.Concat("ensure dir failed: ", path), unrestrictedFileSystem.EnsureDir(resolvedPath)) } -func (m *Medium) IsDir(path string) bool { +func (medium *Medium) IsDir(path string) bool { if path == "" { return false } - resolvedPath, err := m.validatePath(path) + resolvedPath, err := medium.validatePath(path) if err != nil { return false } return unrestrictedFileSystem.IsDir(resolvedPath) } -func (m *Medium) IsFile(path string) bool { +func (medium *Medium) IsFile(path string) bool { if path == "" { return false } - resolvedPath, err := m.validatePath(path) + resolvedPath, err := medium.validatePath(path) if err != nil { return false } return unrestrictedFileSystem.IsFile(resolvedPath) } -func (m *Medium) Exists(path string) bool { - resolvedPath, err := m.validatePath(path) +func (medium *Medium) Exists(path string) bool { + resolvedPath, err := medium.validatePath(path) if err != nil { return false } return unrestrictedFileSystem.Exists(resolvedPath) } -func (m *Medium) List(path string) ([]fs.DirEntry, error) { - resolvedPath, err := m.validatePath(path) +func (medium *Medium) List(path string) ([]fs.DirEntry, error) { + resolvedPath, err := medium.validatePath(path) if err != nil { return nil, err } return resultDirEntries("local.List", core.Concat("list failed: ", path), unrestrictedFileSystem.List(resolvedPath)) } -func (m *Medium) Stat(path string) (fs.FileInfo, error) { - resolvedPath, err := m.validatePath(path) +func (medium *Medium) Stat(path string) (fs.FileInfo, error) { + resolvedPath, err := medium.validatePath(path) if err != nil { return nil, err } return resultFileInfo("local.Stat", core.Concat("stat failed: ", path), unrestrictedFileSystem.Stat(resolvedPath)) } -func (m *Medium) Open(path string) (fs.File, error) { - resolvedPath, err := m.validatePath(path) +func (medium *Medium) Open(path string) (fs.File, error) { + resolvedPath, err := medium.validatePath(path) if err != nil { return nil, err } return resultFile("local.Open", core.Concat("open failed: ", path), unrestrictedFileSystem.Open(resolvedPath)) } -func (m *Medium) Create(path string) (goio.WriteCloser, error) { - resolvedPath, err := m.validatePath(path) +func (medium *Medium) Create(path string) (goio.WriteCloser, error) { + resolvedPath, err := medium.validatePath(path) if err != nil { return nil, err } return resultWriteCloser("local.Create", core.Concat("create failed: ", path), unrestrictedFileSystem.Create(resolvedPath)) } -func (m *Medium) Append(path string) (goio.WriteCloser, error) { - resolvedPath, err := m.validatePath(path) +func (medium *Medium) Append(path string) (goio.WriteCloser, error) { + resolvedPath, err := medium.validatePath(path) if err != nil { return nil, err } @@ -339,17 +336,17 @@ func (m *Medium) Append(path string) (goio.WriteCloser, error) { } // Example: reader, _ := medium.ReadStream("logs/app.log") -func (m *Medium) ReadStream(path string) (goio.ReadCloser, error) { - return m.Open(path) +func (medium *Medium) ReadStream(path string) (goio.ReadCloser, error) { + return medium.Open(path) } // Example: writer, _ := medium.WriteStream("logs/app.log") -func (m *Medium) WriteStream(path string) (goio.WriteCloser, error) { - return m.Create(path) +func (medium *Medium) WriteStream(path string) (goio.WriteCloser, error) { + return medium.Create(path) } -func (m *Medium) Delete(path string) error { - resolvedPath, err := m.validatePath(path) +func (medium *Medium) Delete(path string) error { + resolvedPath, err := medium.validatePath(path) if err != nil { return err } @@ -359,8 +356,8 @@ func (m *Medium) Delete(path string) error { return resultError("local.Delete", core.Concat("delete failed: ", path), unrestrictedFileSystem.Delete(resolvedPath)) } -func (m *Medium) DeleteAll(path string) error { - resolvedPath, err := m.validatePath(path) +func (medium *Medium) DeleteAll(path string) error { + resolvedPath, err := medium.validatePath(path) if err != nil { return err } @@ -370,24 +367,24 @@ func (m *Medium) DeleteAll(path string) error { return resultError("local.DeleteAll", core.Concat("delete all failed: ", path), unrestrictedFileSystem.DeleteAll(resolvedPath)) } -func (m *Medium) Rename(oldPath, newPath string) error { - oldResolvedPath, err := m.validatePath(oldPath) +func (medium *Medium) Rename(oldPath, newPath string) error { + oldResolvedPath, err := medium.validatePath(oldPath) if err != nil { return err } - newResolvedPath, err := m.validatePath(newPath) + newResolvedPath, err := medium.validatePath(newPath) if err != nil { return err } return resultError("local.Rename", core.Concat("rename failed: ", oldPath), unrestrictedFileSystem.Rename(oldResolvedPath, newResolvedPath)) } -func (m *Medium) FileGet(path string) (string, error) { - return m.Read(path) +func (medium *Medium) FileGet(path string) (string, error) { + return medium.Read(path) } -func (m *Medium) FileSet(path, content string) error { - return m.Write(path, content) +func (medium *Medium) FileSet(path, content string) error { + return medium.Write(path, content) } func lstat(path string) (*syscall.Stat_t, error) { diff --git a/node/node.go b/node/node.go index 36f491a..82d7dfe 100644 --- a/node/node.go +++ b/node/node.go @@ -39,7 +39,7 @@ func New() *Node { // ---------- Node-specific methods ---------- // AddData stages content in the in-memory filesystem. -func (n *Node) AddData(name string, content []byte) { +func (node *Node) AddData(name string, content []byte) { name = core.TrimPrefix(name, "/") if name == "" { return @@ -48,7 +48,7 @@ func (n *Node) AddData(name string, content []byte) { if core.HasSuffix(name, "/") { return } - n.files[name] = &dataFile{ + node.files[name] = &dataFile{ name: name, content: content, modTime: time.Now(), @@ -56,11 +56,11 @@ func (n *Node) AddData(name string, content []byte) { } // ToTar serialises the entire in-memory tree to a tar archive. -func (n *Node) ToTar() ([]byte, error) { +func (node *Node) ToTar() ([]byte, error) { buf := new(bytes.Buffer) tw := tar.NewWriter(buf) - for _, file := range n.files { + for _, file := range node.files { hdr := &tar.Header{ Name: file.name, Mode: 0600, @@ -92,7 +92,7 @@ func FromTar(data []byte) (*Node, error) { } // LoadTar replaces the in-memory tree with the contents of a tar archive. -func (n *Node) LoadTar(data []byte) error { +func (node *Node) LoadTar(data []byte) error { newFiles := make(map[string]*dataFile) tr := tar.NewReader(bytes.NewReader(data)) @@ -122,12 +122,12 @@ func (n *Node) LoadTar(data []byte) error { } } - n.files = newFiles + node.files = newFiles return nil } -func (n *Node) WalkNode(root string, fn fs.WalkDirFunc) error { - return fs.WalkDir(n, root, fn) +func (node *Node) WalkNode(root string, fn fs.WalkDirFunc) error { + return fs.WalkDir(node, root, fn) } // Example: options := node.WalkOptions{MaxDepth: 1, SkipErrors: true} @@ -147,15 +147,15 @@ type WalkOptions struct { // nodeTree := New() // options := WalkOptions{MaxDepth: 1, SkipErrors: true} // _ = nodeTree.WalkWithOptions(".", func(path string, entry fs.DirEntry, err error) error { return nil }, options) -func (n *Node) WalkWithOptions(root string, fn fs.WalkDirFunc, options WalkOptions) error { +func (node *Node) WalkWithOptions(root string, fn fs.WalkDirFunc, options WalkOptions) error { if options.SkipErrors { // If root doesn't exist, silently return nil. - if _, err := n.Stat(root); err != nil { + if _, err := node.Stat(root); err != nil { return nil } } - return fs.WalkDir(n, root, func(entryPath string, entry fs.DirEntry, err error) error { + return fs.WalkDir(node, root, func(entryPath string, entry fs.DirEntry, err error) error { if options.Filter != nil && err == nil { if !options.Filter(entryPath, entry) { if entry != nil && entry.IsDir() { @@ -182,9 +182,9 @@ func (n *Node) WalkWithOptions(root string, fn fs.WalkDirFunc, options WalkOptio }) } -func (n *Node) ReadFile(name string) ([]byte, error) { +func (node *Node) ReadFile(name string) ([]byte, error) { name = core.TrimPrefix(name, "/") - f, ok := n.files[name] + f, ok := node.files[name] if !ok { return nil, core.E("node.ReadFile", core.Concat("path not found: ", name), fs.ErrNotExist) } @@ -195,12 +195,12 @@ func (n *Node) ReadFile(name string) ([]byte, error) { } // CopyFile copies a file from the in-memory tree to the local filesystem. -func (n *Node) CopyFile(sourcePath, destinationPath string, perm fs.FileMode) error { +func (node *Node) CopyFile(sourcePath, destinationPath string, perm fs.FileMode) error { sourcePath = core.TrimPrefix(sourcePath, "/") - f, ok := n.files[sourcePath] + f, ok := node.files[sourcePath] if !ok { // Check if it's a directory — can't copy directories this way. - info, err := n.Stat(sourcePath) + info, err := node.Stat(sourcePath) if err != nil { return core.E("node.CopyFile", core.Concat("source not found: ", sourcePath), fs.ErrNotExist) } @@ -221,17 +221,17 @@ func (n *Node) CopyFile(sourcePath, destinationPath string, perm fs.FileMode) er // Example usage: // // dst := io.NewMockMedium() -// _ = n.CopyTo(dst, "config", "backup/config") -func (n *Node) CopyTo(target coreio.Medium, sourcePath, destPath string) error { +// _ = node.CopyTo(dst, "config", "backup/config") +func (node *Node) CopyTo(target coreio.Medium, sourcePath, destPath string) error { sourcePath = core.TrimPrefix(sourcePath, "/") - info, err := n.Stat(sourcePath) + info, err := node.Stat(sourcePath) if err != nil { return err } if !info.IsDir() { // Single file copy - f, ok := n.files[sourcePath] + f, ok := node.files[sourcePath] if !ok { return core.E("node.CopyTo", core.Concat("path not found: ", sourcePath), fs.ErrNotExist) } @@ -244,7 +244,7 @@ func (n *Node) CopyTo(target coreio.Medium, sourcePath, destPath string) error { prefix += "/" } - for filePath, f := range n.files { + for filePath, f := range node.files { if !core.HasPrefix(filePath, prefix) && filePath != sourcePath { continue } @@ -262,9 +262,9 @@ func (n *Node) CopyTo(target coreio.Medium, sourcePath, destPath string) error { // ---------- Medium interface: fs.FS methods ---------- -func (n *Node) Open(name string) (fs.File, error) { +func (node *Node) Open(name string) (fs.File, error) { name = core.TrimPrefix(name, "/") - if file, ok := n.files[name]; ok { + if file, ok := node.files[name]; ok { return &dataFileReader{file: file}, nil } // Check if it's a directory @@ -272,7 +272,7 @@ func (n *Node) Open(name string) (fs.File, error) { if name == "." || name == "" { prefix = "" } - for filePath := range n.files { + for filePath := range node.files { if core.HasPrefix(filePath, prefix) { return &dirFile{path: name, modTime: time.Now()}, nil } @@ -280,9 +280,9 @@ func (n *Node) Open(name string) (fs.File, error) { return nil, core.E("node.Open", core.Concat("path not found: ", name), fs.ErrNotExist) } -func (n *Node) Stat(name string) (fs.FileInfo, error) { +func (node *Node) Stat(name string) (fs.FileInfo, error) { name = core.TrimPrefix(name, "/") - if file, ok := n.files[name]; ok { + if file, ok := node.files[name]; ok { return file.Stat() } // Check if it's a directory @@ -290,7 +290,7 @@ func (n *Node) Stat(name string) (fs.FileInfo, error) { if name == "." || name == "" { prefix = "" } - for filePath := range n.files { + for filePath := range node.files { if core.HasPrefix(filePath, prefix) { return &dirInfo{name: path.Base(name), modTime: time.Now()}, nil } @@ -298,14 +298,14 @@ func (n *Node) Stat(name string) (fs.FileInfo, error) { return nil, core.E("node.Stat", core.Concat("path not found: ", name), fs.ErrNotExist) } -func (n *Node) ReadDir(name string) ([]fs.DirEntry, error) { +func (node *Node) ReadDir(name string) ([]fs.DirEntry, error) { name = core.TrimPrefix(name, "/") if name == "." { name = "" } // Disallow reading a file as a directory. - if info, err := n.Stat(name); err == nil && !info.IsDir() { + if info, err := node.Stat(name); err == nil && !info.IsDir() { return nil, &fs.PathError{Op: "readdir", Path: name, Err: fs.ErrInvalid} } @@ -317,7 +317,7 @@ func (n *Node) ReadDir(name string) ([]fs.DirEntry, error) { prefix = name + "/" } - for filePath := range n.files { + for filePath := range node.files { if !core.HasPrefix(filePath, prefix) { continue } @@ -334,7 +334,7 @@ func (n *Node) ReadDir(name string) ([]fs.DirEntry, error) { dir := &dirInfo{name: firstComponent, modTime: time.Now()} entries = append(entries, fs.FileInfoToDirEntry(dir)) } else { - file := n.files[filePath] + file := node.files[filePath] info, _ := file.Stat() entries = append(entries, fs.FileInfoToDirEntry(info)) } @@ -349,52 +349,52 @@ func (n *Node) ReadDir(name string) ([]fs.DirEntry, error) { // ---------- Medium interface: read/write ---------- -func (n *Node) Read(filePath string) (string, error) { +func (node *Node) Read(filePath string) (string, error) { filePath = core.TrimPrefix(filePath, "/") - f, ok := n.files[filePath] + f, ok := node.files[filePath] if !ok { return "", core.E("node.Read", core.Concat("path not found: ", filePath), fs.ErrNotExist) } return string(f.content), nil } -func (n *Node) Write(filePath, content string) error { - n.AddData(filePath, []byte(content)) +func (node *Node) Write(filePath, content string) error { + node.AddData(filePath, []byte(content)) return nil } -func (n *Node) WriteMode(filePath, content string, mode fs.FileMode) error { - return n.Write(filePath, content) +func (node *Node) WriteMode(filePath, content string, mode fs.FileMode) error { + return node.Write(filePath, content) } -func (n *Node) FileGet(filePath string) (string, error) { - return n.Read(filePath) +func (node *Node) FileGet(filePath string) (string, error) { + return node.Read(filePath) } -func (n *Node) FileSet(filePath, content string) error { - return n.Write(filePath, content) +func (node *Node) FileSet(filePath, content string) error { + return node.Write(filePath, content) } // Example: _ = nodeTree.EnsureDir("config") -func (n *Node) EnsureDir(_ string) error { +func (node *Node) EnsureDir(_ string) error { return nil } // ---------- Medium interface: existence checks ---------- -func (n *Node) Exists(filePath string) bool { - _, err := n.Stat(filePath) +func (node *Node) Exists(filePath string) bool { + _, err := node.Stat(filePath) return err == nil } -func (n *Node) IsFile(filePath string) bool { +func (node *Node) IsFile(filePath string) bool { filePath = core.TrimPrefix(filePath, "/") - _, ok := n.files[filePath] + _, ok := node.files[filePath] return ok } -func (n *Node) IsDir(filePath string) bool { - info, err := n.Stat(filePath) +func (node *Node) IsDir(filePath string) bool { + info, err := node.Stat(filePath) if err != nil { return false } @@ -403,28 +403,28 @@ func (n *Node) IsDir(filePath string) bool { // ---------- Medium interface: mutations ---------- -func (n *Node) Delete(filePath string) error { +func (node *Node) Delete(filePath string) error { filePath = core.TrimPrefix(filePath, "/") - if _, ok := n.files[filePath]; ok { - delete(n.files, filePath) + if _, ok := node.files[filePath]; ok { + delete(node.files, filePath) return nil } return core.E("node.Delete", core.Concat("path not found: ", filePath), fs.ErrNotExist) } -func (n *Node) DeleteAll(filePath string) error { +func (node *Node) DeleteAll(filePath string) error { filePath = core.TrimPrefix(filePath, "/") found := false - if _, ok := n.files[filePath]; ok { - delete(n.files, filePath) + if _, ok := node.files[filePath]; ok { + delete(node.files, filePath) found = true } prefix := filePath + "/" - for entryPath := range n.files { + for entryPath := range node.files { if core.HasPrefix(entryPath, prefix) { - delete(n.files, entryPath) + delete(node.files, entryPath) found = true } } @@ -435,56 +435,56 @@ func (n *Node) DeleteAll(filePath string) error { return nil } -func (n *Node) Rename(oldPath, newPath string) error { +func (node *Node) Rename(oldPath, newPath string) error { oldPath = core.TrimPrefix(oldPath, "/") newPath = core.TrimPrefix(newPath, "/") - f, ok := n.files[oldPath] + f, ok := node.files[oldPath] if !ok { return core.E("node.Rename", core.Concat("path not found: ", oldPath), fs.ErrNotExist) } f.name = newPath - n.files[newPath] = f - delete(n.files, oldPath) + node.files[newPath] = f + delete(node.files, oldPath) return nil } -func (n *Node) List(filePath string) ([]fs.DirEntry, error) { +func (node *Node) List(filePath string) ([]fs.DirEntry, error) { filePath = core.TrimPrefix(filePath, "/") if filePath == "" || filePath == "." { - return n.ReadDir(".") + return node.ReadDir(".") } - return n.ReadDir(filePath) + return node.ReadDir(filePath) } // ---------- Medium interface: streams ---------- -func (n *Node) Create(filePath string) (goio.WriteCloser, error) { +func (node *Node) Create(filePath string) (goio.WriteCloser, error) { filePath = core.TrimPrefix(filePath, "/") - return &nodeWriter{node: n, path: filePath}, nil + return &nodeWriter{node: node, path: filePath}, nil } -func (n *Node) Append(filePath string) (goio.WriteCloser, error) { +func (node *Node) Append(filePath string) (goio.WriteCloser, error) { filePath = core.TrimPrefix(filePath, "/") var existing []byte - if f, ok := n.files[filePath]; ok { + if f, ok := node.files[filePath]; ok { existing = make([]byte, len(f.content)) copy(existing, f.content) } - return &nodeWriter{node: n, path: filePath, buf: existing}, nil + return &nodeWriter{node: node, path: filePath, buf: existing}, nil } -func (n *Node) ReadStream(filePath string) (goio.ReadCloser, error) { - f, err := n.Open(filePath) +func (node *Node) ReadStream(filePath string) (goio.ReadCloser, error) { + f, err := node.Open(filePath) if err != nil { return nil, err } return goio.NopCloser(f), nil } -func (n *Node) WriteStream(filePath string) (goio.WriteCloser, error) { - return n.Create(filePath) +func (node *Node) WriteStream(filePath string) (goio.WriteCloser, error) { + return node.Create(filePath) } // ---------- Internal types ---------- diff --git a/s3/s3.go b/s3/s3.go index 055c949..c1b270b 100644 --- a/s3/s3.go +++ b/s3/s3.go @@ -100,16 +100,16 @@ func New(options Options) (*Medium, error) { if options.Client == nil { return nil, core.E("s3.New", "client is required", nil) } - m := &Medium{ + medium := &Medium{ client: options.Client, bucket: options.Bucket, prefix: normalisePrefix(options.Prefix), } - return m, nil + return medium, nil } // objectKey maps a virtual path to the full S3 object key. -func (m *Medium) objectKey(filePath string) string { +func (medium *Medium) objectKey(filePath string) string { // Clean the path using a leading "/" to sandbox traversal attempts, // then strip the "/" prefix. This ensures ".." can't escape. clean := path.Clean("/" + filePath) @@ -118,23 +118,23 @@ func (m *Medium) objectKey(filePath string) string { } clean = core.TrimPrefix(clean, "/") - if m.prefix == "" { + if medium.prefix == "" { return clean } if clean == "" { - return m.prefix + return medium.prefix } - return m.prefix + clean + return medium.prefix + clean } -func (m *Medium) Read(filePath string) (string, error) { - key := m.objectKey(filePath) +func (medium *Medium) Read(filePath string) (string, error) { + key := medium.objectKey(filePath) if key == "" { return "", core.E("s3.Read", "path is required", fs.ErrInvalid) } - out, err := m.client.GetObject(context.Background(), &awss3.GetObjectInput{ - Bucket: aws.String(m.bucket), + out, err := medium.client.GetObject(context.Background(), &awss3.GetObjectInput{ + Bucket: aws.String(medium.bucket), Key: aws.String(key), }) if err != nil { @@ -149,14 +149,14 @@ func (m *Medium) Read(filePath string) (string, error) { return string(data), nil } -func (m *Medium) Write(filePath, content string) error { - key := m.objectKey(filePath) +func (medium *Medium) Write(filePath, content string) error { + key := medium.objectKey(filePath) if key == "" { return core.E("s3.Write", "path is required", fs.ErrInvalid) } - _, err := m.client.PutObject(context.Background(), &awss3.PutObjectInput{ - Bucket: aws.String(m.bucket), + _, err := medium.client.PutObject(context.Background(), &awss3.PutObjectInput{ + Bucket: aws.String(medium.bucket), Key: aws.String(key), Body: core.NewReader(content), }) @@ -167,18 +167,18 @@ func (m *Medium) Write(filePath, content string) error { } // Example: _ = medium.WriteMode("keys/private.key", key, 0600) -func (m *Medium) WriteMode(filePath, content string, _ fs.FileMode) error { - return m.Write(filePath, content) +func (medium *Medium) WriteMode(filePath, content string, _ fs.FileMode) error { + return medium.Write(filePath, content) } // Example: _ = medium.EnsureDir("reports/2026") -func (m *Medium) EnsureDir(_ string) error { +func (medium *Medium) EnsureDir(_ string) error { return nil } // Example: ok := medium.IsFile("reports/daily.txt") -func (m *Medium) IsFile(filePath string) bool { - key := m.objectKey(filePath) +func (medium *Medium) IsFile(filePath string) bool { + key := medium.objectKey(filePath) if key == "" { return false } @@ -186,29 +186,29 @@ func (m *Medium) IsFile(filePath string) bool { if core.HasSuffix(key, "/") { return false } - _, err := m.client.HeadObject(context.Background(), &awss3.HeadObjectInput{ - Bucket: aws.String(m.bucket), + _, err := medium.client.HeadObject(context.Background(), &awss3.HeadObjectInput{ + Bucket: aws.String(medium.bucket), Key: aws.String(key), }) return err == nil } -func (m *Medium) FileGet(filePath string) (string, error) { - return m.Read(filePath) +func (medium *Medium) FileGet(filePath string) (string, error) { + return medium.Read(filePath) } -func (m *Medium) FileSet(filePath, content string) error { - return m.Write(filePath, content) +func (medium *Medium) FileSet(filePath, content string) error { + return medium.Write(filePath, content) } -func (m *Medium) Delete(filePath string) error { - key := m.objectKey(filePath) +func (medium *Medium) Delete(filePath string) error { + key := medium.objectKey(filePath) if key == "" { return core.E("s3.Delete", "path is required", fs.ErrInvalid) } - _, err := m.client.DeleteObject(context.Background(), &awss3.DeleteObjectInput{ - Bucket: aws.String(m.bucket), + _, err := medium.client.DeleteObject(context.Background(), &awss3.DeleteObjectInput{ + Bucket: aws.String(medium.bucket), Key: aws.String(key), }) if err != nil { @@ -218,15 +218,15 @@ func (m *Medium) Delete(filePath string) error { } // Example: _ = medium.DeleteAll("reports/2026") -func (m *Medium) DeleteAll(filePath string) error { - key := m.objectKey(filePath) +func (medium *Medium) DeleteAll(filePath string) error { + key := medium.objectKey(filePath) if key == "" { return core.E("s3.DeleteAll", "path is required", fs.ErrInvalid) } // First, try deleting the exact key - _, err := m.client.DeleteObject(context.Background(), &awss3.DeleteObjectInput{ - Bucket: aws.String(m.bucket), + _, err := medium.client.DeleteObject(context.Background(), &awss3.DeleteObjectInput{ + Bucket: aws.String(medium.bucket), Key: aws.String(key), }) if err != nil { @@ -243,8 +243,8 @@ func (m *Medium) DeleteAll(filePath string) error { var continuationToken *string for paginator { - listOut, err := m.client.ListObjectsV2(context.Background(), &awss3.ListObjectsV2Input{ - Bucket: aws.String(m.bucket), + listOut, err := medium.client.ListObjectsV2(context.Background(), &awss3.ListObjectsV2Input{ + Bucket: aws.String(medium.bucket), Prefix: aws.String(prefix), ContinuationToken: continuationToken, }) @@ -261,8 +261,8 @@ func (m *Medium) DeleteAll(filePath string) error { objects[i] = types.ObjectIdentifier{Key: obj.Key} } - deleteOut, err := m.client.DeleteObjects(context.Background(), &awss3.DeleteObjectsInput{ - Bucket: aws.String(m.bucket), + deleteOut, err := medium.client.DeleteObjects(context.Background(), &awss3.DeleteObjectsInput{ + Bucket: aws.String(medium.bucket), Delete: &types.Delete{Objects: objects, Quiet: aws.Bool(true)}, }) if err != nil { @@ -283,17 +283,17 @@ func (m *Medium) DeleteAll(filePath string) error { } // Example: _ = medium.Rename("drafts/todo.txt", "archive/todo.txt") -func (m *Medium) Rename(oldPath, newPath string) error { - oldKey := m.objectKey(oldPath) - newKey := m.objectKey(newPath) +func (medium *Medium) Rename(oldPath, newPath string) error { + oldKey := medium.objectKey(oldPath) + newKey := medium.objectKey(newPath) if oldKey == "" || newKey == "" { return core.E("s3.Rename", "both old and new paths are required", fs.ErrInvalid) } - copySource := m.bucket + "/" + oldKey + copySource := medium.bucket + "/" + oldKey - _, err := m.client.CopyObject(context.Background(), &awss3.CopyObjectInput{ - Bucket: aws.String(m.bucket), + _, err := medium.client.CopyObject(context.Background(), &awss3.CopyObjectInput{ + Bucket: aws.String(medium.bucket), CopySource: aws.String(copySource), Key: aws.String(newKey), }) @@ -301,8 +301,8 @@ func (m *Medium) Rename(oldPath, newPath string) error { return core.E("s3.Rename", core.Concat("failed to copy object: ", oldKey, " -> ", newKey), err) } - _, err = m.client.DeleteObject(context.Background(), &awss3.DeleteObjectInput{ - Bucket: aws.String(m.bucket), + _, err = medium.client.DeleteObject(context.Background(), &awss3.DeleteObjectInput{ + Bucket: aws.String(medium.bucket), Key: aws.String(oldKey), }) if err != nil { @@ -313,16 +313,16 @@ func (m *Medium) Rename(oldPath, newPath string) error { } // Example: entries, _ := medium.List("reports") -func (m *Medium) List(filePath string) ([]fs.DirEntry, error) { - prefix := m.objectKey(filePath) +func (medium *Medium) List(filePath string) ([]fs.DirEntry, error) { + prefix := medium.objectKey(filePath) if prefix != "" && !core.HasSuffix(prefix, "/") { prefix += "/" } var entries []fs.DirEntry - listOut, err := m.client.ListObjectsV2(context.Background(), &awss3.ListObjectsV2Input{ - Bucket: aws.String(m.bucket), + listOut, err := medium.client.ListObjectsV2(context.Background(), &awss3.ListObjectsV2Input{ + Bucket: aws.String(medium.bucket), Prefix: aws.String(prefix), Delimiter: aws.String("/"), }) @@ -386,14 +386,14 @@ func (m *Medium) List(filePath string) ([]fs.DirEntry, error) { } // Example: info, _ := medium.Stat("reports/daily.txt") -func (m *Medium) Stat(filePath string) (fs.FileInfo, error) { - key := m.objectKey(filePath) +func (medium *Medium) Stat(filePath string) (fs.FileInfo, error) { + key := medium.objectKey(filePath) if key == "" { return nil, core.E("s3.Stat", "path is required", fs.ErrInvalid) } - out, err := m.client.HeadObject(context.Background(), &awss3.HeadObjectInput{ - Bucket: aws.String(m.bucket), + out, err := medium.client.HeadObject(context.Background(), &awss3.HeadObjectInput{ + Bucket: aws.String(medium.bucket), Key: aws.String(key), }) if err != nil { @@ -418,14 +418,14 @@ func (m *Medium) Stat(filePath string) (fs.FileInfo, error) { }, nil } -func (m *Medium) Open(filePath string) (fs.File, error) { - key := m.objectKey(filePath) +func (medium *Medium) Open(filePath string) (fs.File, error) { + key := medium.objectKey(filePath) if key == "" { return nil, core.E("s3.Open", "path is required", fs.ErrInvalid) } - out, err := m.client.GetObject(context.Background(), &awss3.GetObjectInput{ - Bucket: aws.String(m.bucket), + out, err := medium.client.GetObject(context.Background(), &awss3.GetObjectInput{ + Bucket: aws.String(medium.bucket), Key: aws.String(key), }) if err != nil { @@ -456,27 +456,27 @@ func (m *Medium) Open(filePath string) (fs.File, error) { } // Example: writer, _ := medium.Create("reports/daily.txt") -func (m *Medium) Create(filePath string) (goio.WriteCloser, error) { - key := m.objectKey(filePath) +func (medium *Medium) Create(filePath string) (goio.WriteCloser, error) { + key := medium.objectKey(filePath) if key == "" { return nil, core.E("s3.Create", "path is required", fs.ErrInvalid) } return &s3WriteCloser{ - medium: m, + medium: medium, key: key, }, nil } // Example: writer, _ := medium.Append("reports/daily.txt") -func (m *Medium) Append(filePath string) (goio.WriteCloser, error) { - key := m.objectKey(filePath) +func (medium *Medium) Append(filePath string) (goio.WriteCloser, error) { + key := medium.objectKey(filePath) if key == "" { return nil, core.E("s3.Append", "path is required", fs.ErrInvalid) } var existing []byte - out, err := m.client.GetObject(context.Background(), &awss3.GetObjectInput{ - Bucket: aws.String(m.bucket), + out, err := medium.client.GetObject(context.Background(), &awss3.GetObjectInput{ + Bucket: aws.String(medium.bucket), Key: aws.String(key), }) if err == nil { @@ -485,20 +485,20 @@ func (m *Medium) Append(filePath string) (goio.WriteCloser, error) { } return &s3WriteCloser{ - medium: m, + medium: medium, key: key, data: existing, }, nil } -func (m *Medium) ReadStream(filePath string) (goio.ReadCloser, error) { - key := m.objectKey(filePath) +func (medium *Medium) ReadStream(filePath string) (goio.ReadCloser, error) { + key := medium.objectKey(filePath) if key == "" { return nil, core.E("s3.ReadStream", "path is required", fs.ErrInvalid) } - out, err := m.client.GetObject(context.Background(), &awss3.GetObjectInput{ - Bucket: aws.String(m.bucket), + out, err := medium.client.GetObject(context.Background(), &awss3.GetObjectInput{ + Bucket: aws.String(medium.bucket), Key: aws.String(key), }) if err != nil { @@ -507,20 +507,20 @@ func (m *Medium) ReadStream(filePath string) (goio.ReadCloser, error) { return out.Body, nil } -func (m *Medium) WriteStream(filePath string) (goio.WriteCloser, error) { - return m.Create(filePath) +func (medium *Medium) WriteStream(filePath string) (goio.WriteCloser, error) { + return medium.Create(filePath) } // Example: ok := medium.Exists("reports/daily.txt") -func (m *Medium) Exists(filePath string) bool { - key := m.objectKey(filePath) +func (medium *Medium) Exists(filePath string) bool { + key := medium.objectKey(filePath) if key == "" { return false } // Check as an exact object - _, err := m.client.HeadObject(context.Background(), &awss3.HeadObjectInput{ - Bucket: aws.String(m.bucket), + _, err := medium.client.HeadObject(context.Background(), &awss3.HeadObjectInput{ + Bucket: aws.String(medium.bucket), Key: aws.String(key), }) if err == nil { @@ -532,8 +532,8 @@ func (m *Medium) Exists(filePath string) bool { if !core.HasSuffix(prefix, "/") { prefix += "/" } - listOut, err := m.client.ListObjectsV2(context.Background(), &awss3.ListObjectsV2Input{ - Bucket: aws.String(m.bucket), + listOut, err := medium.client.ListObjectsV2(context.Background(), &awss3.ListObjectsV2Input{ + Bucket: aws.String(medium.bucket), Prefix: aws.String(prefix), MaxKeys: aws.Int32(1), }) @@ -544,8 +544,8 @@ func (m *Medium) Exists(filePath string) bool { } // Example: ok := medium.IsDir("reports") -func (m *Medium) IsDir(filePath string) bool { - key := m.objectKey(filePath) +func (medium *Medium) IsDir(filePath string) bool { + key := medium.objectKey(filePath) if key == "" { return false } @@ -555,8 +555,8 @@ func (m *Medium) IsDir(filePath string) bool { prefix += "/" } - listOut, err := m.client.ListObjectsV2(context.Background(), &awss3.ListObjectsV2Input{ - Bucket: aws.String(m.bucket), + listOut, err := medium.client.ListObjectsV2(context.Background(), &awss3.ListObjectsV2Input{ + Bucket: aws.String(medium.bucket), Prefix: aws.String(prefix), MaxKeys: aws.Int32(1), }) diff --git a/sigil/crypto_sigil.go b/sigil/crypto_sigil.go index 802878a..52e4ba5 100644 --- a/sigil/crypto_sigil.go +++ b/sigil/crypto_sigil.go @@ -45,25 +45,25 @@ type PreObfuscator interface { type XORObfuscator struct{} // Obfuscate XORs the data with a key stream derived from the entropy. -func (x *XORObfuscator) Obfuscate(data []byte, entropy []byte) []byte { +func (obfuscator *XORObfuscator) Obfuscate(data []byte, entropy []byte) []byte { if len(data) == 0 { return data } - return x.transform(data, entropy) + return obfuscator.transform(data, entropy) } // Deobfuscate reverses the XOR transformation (XOR is symmetric). -func (x *XORObfuscator) Deobfuscate(data []byte, entropy []byte) []byte { +func (obfuscator *XORObfuscator) Deobfuscate(data []byte, entropy []byte) []byte { if len(data) == 0 { return data } - return x.transform(data, entropy) + return obfuscator.transform(data, entropy) } // transform applies XOR with an entropy-derived key stream. -func (x *XORObfuscator) transform(data []byte, entropy []byte) []byte { +func (obfuscator *XORObfuscator) transform(data []byte, entropy []byte) []byte { result := make([]byte, len(data)) - keyStream := x.deriveKeyStream(entropy, len(data)) + keyStream := obfuscator.deriveKeyStream(entropy, len(data)) for i := range data { result[i] = data[i] ^ keyStream[i] } @@ -71,7 +71,7 @@ func (x *XORObfuscator) transform(data []byte, entropy []byte) []byte { } // deriveKeyStream creates a deterministic key stream from entropy. -func (x *XORObfuscator) deriveKeyStream(entropy []byte, length int) []byte { +func (obfuscator *XORObfuscator) deriveKeyStream(entropy []byte, length int) []byte { stream := make([]byte, length) h := sha256.New() @@ -98,7 +98,7 @@ func (x *XORObfuscator) deriveKeyStream(entropy []byte, length int) []byte { type ShuffleMaskObfuscator struct{} // Obfuscate shuffles bytes and applies a mask derived from entropy. -func (s *ShuffleMaskObfuscator) Obfuscate(data []byte, entropy []byte) []byte { +func (obfuscator *ShuffleMaskObfuscator) Obfuscate(data []byte, entropy []byte) []byte { if len(data) == 0 { return data } @@ -107,8 +107,8 @@ func (s *ShuffleMaskObfuscator) Obfuscate(data []byte, entropy []byte) []byte { copy(result, data) // Generate permutation and mask from entropy - perm := s.generatePermutation(entropy, len(data)) - mask := s.deriveMask(entropy, len(data)) + perm := obfuscator.generatePermutation(entropy, len(data)) + mask := obfuscator.deriveMask(entropy, len(data)) // Apply mask first, then shuffle for i := range result { @@ -125,7 +125,7 @@ func (s *ShuffleMaskObfuscator) Obfuscate(data []byte, entropy []byte) []byte { } // Deobfuscate reverses the shuffle and mask operations. -func (s *ShuffleMaskObfuscator) Deobfuscate(data []byte, entropy []byte) []byte { +func (obfuscator *ShuffleMaskObfuscator) Deobfuscate(data []byte, entropy []byte) []byte { if len(data) == 0 { return data } @@ -133,8 +133,8 @@ func (s *ShuffleMaskObfuscator) Deobfuscate(data []byte, entropy []byte) []byte result := make([]byte, len(data)) // Generate permutation and mask from entropy - perm := s.generatePermutation(entropy, len(data)) - mask := s.deriveMask(entropy, len(data)) + perm := obfuscator.generatePermutation(entropy, len(data)) + mask := obfuscator.deriveMask(entropy, len(data)) // Unshuffle first for i, p := range perm { @@ -150,7 +150,7 @@ func (s *ShuffleMaskObfuscator) Deobfuscate(data []byte, entropy []byte) []byte } // generatePermutation creates a deterministic permutation from entropy. -func (s *ShuffleMaskObfuscator) generatePermutation(entropy []byte, length int) []int { +func (obfuscator *ShuffleMaskObfuscator) generatePermutation(entropy []byte, length int) []int { perm := make([]int, length) for i := range perm { perm[i] = i @@ -178,7 +178,7 @@ func (s *ShuffleMaskObfuscator) generatePermutation(entropy []byte, length int) } // deriveMask creates a mask byte array from entropy. -func (s *ShuffleMaskObfuscator) deriveMask(entropy []byte, length int) []byte { +func (obfuscator *ShuffleMaskObfuscator) deriveMask(entropy []byte, length int) []byte { mask := make([]byte, length) h := sha256.New() @@ -247,22 +247,22 @@ func NewChaChaPolySigilWithObfuscator(key []byte, obfuscator PreObfuscator) (*Ch } // In encrypts plaintext with the configured pre-obfuscator. -func (s *ChaChaPolySigil) In(data []byte) ([]byte, error) { - if s.Key == nil { +func (sigil *ChaChaPolySigil) In(data []byte) ([]byte, error) { + if sigil.Key == nil { return nil, NoKeyConfiguredError } if data == nil { return nil, nil } - aead, err := chacha20poly1305.NewX(s.Key) + aead, err := chacha20poly1305.NewX(sigil.Key) if err != nil { return nil, core.E("sigil.ChaChaPolySigil.In", "create cipher", err) } // Generate nonce nonce := make([]byte, aead.NonceSize()) - reader := s.randomReader + reader := sigil.randomReader if reader == nil { reader = rand.Reader } @@ -273,8 +273,8 @@ func (s *ChaChaPolySigil) In(data []byte) ([]byte, error) { // Pre-obfuscate the plaintext using nonce as entropy // This ensures CPU encryption routines never see raw plaintext obfuscated := data - if s.Obfuscator != nil { - obfuscated = s.Obfuscator.Obfuscate(data, nonce) + if sigil.Obfuscator != nil { + obfuscated = sigil.Obfuscator.Obfuscate(data, nonce) } // Encrypt the obfuscated data @@ -285,15 +285,15 @@ func (s *ChaChaPolySigil) In(data []byte) ([]byte, error) { } // Out decrypts ciphertext and reverses the pre-obfuscation step. -func (s *ChaChaPolySigil) Out(data []byte) ([]byte, error) { - if s.Key == nil { +func (sigil *ChaChaPolySigil) Out(data []byte) ([]byte, error) { + if sigil.Key == nil { return nil, NoKeyConfiguredError } if data == nil { return nil, nil } - aead, err := chacha20poly1305.NewX(s.Key) + aead, err := chacha20poly1305.NewX(sigil.Key) if err != nil { return nil, core.E("sigil.ChaChaPolySigil.Out", "create cipher", err) } @@ -315,8 +315,8 @@ func (s *ChaChaPolySigil) Out(data []byte) ([]byte, error) { // Deobfuscate using the same nonce as entropy plaintext := obfuscated - if s.Obfuscator != nil { - plaintext = s.Obfuscator.Deobfuscate(obfuscated, nonce) + if sigil.Obfuscator != nil { + plaintext = sigil.Obfuscator.Deobfuscate(obfuscated, nonce) } if len(plaintext) == 0 { diff --git a/sigil/sigil.go b/sigil/sigil.go index e12d847..77df934 100644 --- a/sigil/sigil.go +++ b/sigil/sigil.go @@ -20,8 +20,8 @@ type Sigil interface { // Example: encoded, _ := sigil.Transmute([]byte("payload"), []sigil.Sigil{hexSigil, gzipSigil}) func Transmute(data []byte, sigils []Sigil) ([]byte, error) { var err error - for _, s := range sigils { - data, err = s.In(data) + for _, sigilValue := range sigils { + data, err = sigilValue.In(data) if err != nil { return nil, core.E("sigil.Transmute", "sigil in failed", err) } diff --git a/sigil/sigils.go b/sigil/sigils.go index 38cb994..36d82df 100644 --- a/sigil/sigils.go +++ b/sigil/sigils.go @@ -25,7 +25,7 @@ import ( type ReverseSigil struct{} // In reverses the bytes of the data. -func (s *ReverseSigil) In(data []byte) ([]byte, error) { +func (sigil *ReverseSigil) In(data []byte) ([]byte, error) { if data == nil { return nil, nil } @@ -37,8 +37,8 @@ func (s *ReverseSigil) In(data []byte) ([]byte, error) { } // Out reverses the bytes of the data. -func (s *ReverseSigil) Out(data []byte) ([]byte, error) { - return s.In(data) +func (sigil *ReverseSigil) Out(data []byte) ([]byte, error) { + return sigil.In(data) } // HexSigil is a Sigil that encodes/decodes data to/from hexadecimal. @@ -46,7 +46,7 @@ func (s *ReverseSigil) Out(data []byte) ([]byte, error) { type HexSigil struct{} // In encodes the data to hexadecimal. -func (s *HexSigil) In(data []byte) ([]byte, error) { +func (sigil *HexSigil) In(data []byte) ([]byte, error) { if data == nil { return nil, nil } @@ -56,7 +56,7 @@ func (s *HexSigil) In(data []byte) ([]byte, error) { } // Out decodes the data from hexadecimal. -func (s *HexSigil) Out(data []byte) ([]byte, error) { +func (sigil *HexSigil) Out(data []byte) ([]byte, error) { if data == nil { return nil, nil } @@ -70,7 +70,7 @@ func (s *HexSigil) Out(data []byte) ([]byte, error) { type Base64Sigil struct{} // In encodes the data to base64. -func (s *Base64Sigil) In(data []byte) ([]byte, error) { +func (sigil *Base64Sigil) In(data []byte) ([]byte, error) { if data == nil { return nil, nil } @@ -80,7 +80,7 @@ func (s *Base64Sigil) In(data []byte) ([]byte, error) { } // Out decodes the data from base64. -func (s *Base64Sigil) Out(data []byte) ([]byte, error) { +func (sigil *Base64Sigil) Out(data []byte) ([]byte, error) { if data == nil { return nil, nil } @@ -96,12 +96,12 @@ type GzipSigil struct { } // In compresses the data using gzip. -func (s *GzipSigil) In(data []byte) ([]byte, error) { +func (sigil *GzipSigil) In(data []byte) ([]byte, error) { if data == nil { return nil, nil } var b bytes.Buffer - outputWriter := s.outputWriter + outputWriter := sigil.outputWriter if outputWriter == nil { outputWriter = &b } @@ -116,7 +116,7 @@ func (s *GzipSigil) In(data []byte) ([]byte, error) { } // Out decompresses the data using gzip. -func (s *GzipSigil) Out(data []byte) ([]byte, error) { +func (sigil *GzipSigil) Out(data []byte) ([]byte, error) { if data == nil { return nil, nil } @@ -137,7 +137,7 @@ func (s *GzipSigil) Out(data []byte) ([]byte, error) { type JSONSigil struct{ Indent bool } // In compacts or indents the JSON data. -func (s *JSONSigil) In(data []byte) ([]byte, error) { +func (sigil *JSONSigil) In(data []byte) ([]byte, error) { if data == nil { return nil, nil } @@ -152,14 +152,14 @@ func (s *JSONSigil) In(data []byte) ([]byte, error) { } compact := core.JSONMarshalString(decoded) - if s.Indent { + if sigil.Indent { return []byte(indentJSON(compact)), nil } return []byte(compact), nil } // Out is a no-op for JSONSigil. -func (s *JSONSigil) Out(data []byte) ([]byte, error) { +func (sigil *JSONSigil) Out(data []byte) ([]byte, error) { // For simplicity, Out is a no-op. The primary use is formatting. return data, nil } @@ -179,9 +179,9 @@ func NewHashSigil(h crypto.Hash) *HashSigil { } // In hashes the data. -func (s *HashSigil) In(data []byte) ([]byte, error) { +func (sigil *HashSigil) In(data []byte) ([]byte, error) { var hasher goio.Writer - switch s.Hash { + switch sigil.Hash { case crypto.MD4: hasher = md4.New() case crypto.MD5: @@ -228,7 +228,7 @@ func (s *HashSigil) In(data []byte) ([]byte, error) { } // Out is a no-op for HashSigil. -func (s *HashSigil) Out(data []byte) ([]byte, error) { +func (sigil *HashSigil) Out(data []byte) ([]byte, error) { return data, nil } diff --git a/sqlite/sqlite.go b/sqlite/sqlite.go index 87a5d99..bd02cca 100644 --- a/sqlite/sqlite.go +++ b/sqlite/sqlite.go @@ -79,9 +79,9 @@ func New(options Options) (*Medium, error) { } // Close closes the underlying database connection. -func (m *Medium) Close() error { - if m.database != nil { - return m.database.Close() +func (medium *Medium) Close() error { + if medium.database != nil { + return medium.database.Close() } return nil } @@ -96,7 +96,7 @@ func normaliseEntryPath(filePath string) string { return core.TrimPrefix(clean, "/") } -func (m *Medium) Read(filePath string) (string, error) { +func (medium *Medium) Read(filePath string) (string, error) { key := normaliseEntryPath(filePath) if key == "" { return "", core.E("sqlite.Read", "path is required", fs.ErrInvalid) @@ -104,8 +104,8 @@ func (m *Medium) Read(filePath string) (string, error) { var content []byte var isDir bool - err := m.database.QueryRow( - `SELECT content, is_dir FROM `+m.table+` WHERE path = ?`, key, + err := medium.database.QueryRow( + `SELECT content, is_dir FROM `+medium.table+` WHERE path = ?`, key, ).Scan(&content, &isDir) if err == sql.ErrNoRows { return "", core.E("sqlite.Read", core.Concat("file not found: ", key), fs.ErrNotExist) @@ -119,19 +119,19 @@ func (m *Medium) Read(filePath string) (string, error) { return string(content), nil } -func (m *Medium) Write(filePath, content string) error { - return m.WriteMode(filePath, content, 0644) +func (medium *Medium) Write(filePath, content string) error { + return medium.WriteMode(filePath, content, 0644) } // Example: _ = medium.WriteMode("keys/private.key", key, 0600) -func (m *Medium) WriteMode(filePath, content string, mode fs.FileMode) error { +func (medium *Medium) WriteMode(filePath, content string, mode fs.FileMode) error { key := normaliseEntryPath(filePath) if key == "" { return core.E("sqlite.WriteMode", "path is required", fs.ErrInvalid) } - _, err := m.database.Exec( - `INSERT INTO `+m.table+` (path, content, mode, is_dir, mtime) VALUES (?, ?, ?, FALSE, ?) + _, err := medium.database.Exec( + `INSERT INTO `+medium.table+` (path, content, mode, is_dir, mtime) VALUES (?, ?, ?, FALSE, ?) ON CONFLICT(path) DO UPDATE SET content = excluded.content, mode = excluded.mode, is_dir = FALSE, mtime = excluded.mtime`, key, []byte(content), int(mode), time.Now().UTC(), ) @@ -142,15 +142,15 @@ func (m *Medium) WriteMode(filePath, content string, mode fs.FileMode) error { } // Example: _ = medium.EnsureDir("config") -func (m *Medium) EnsureDir(filePath string) error { +func (medium *Medium) EnsureDir(filePath string) error { key := normaliseEntryPath(filePath) if key == "" { // Root always "exists" return nil } - _, err := m.database.Exec( - `INSERT INTO `+m.table+` (path, content, mode, is_dir, mtime) VALUES (?, '', 493, TRUE, ?) + _, err := medium.database.Exec( + `INSERT INTO `+medium.table+` (path, content, mode, is_dir, mtime) VALUES (?, '', 493, TRUE, ?) ON CONFLICT(path) DO NOTHING`, key, time.Now().UTC(), ) @@ -160,15 +160,15 @@ func (m *Medium) EnsureDir(filePath string) error { return nil } -func (m *Medium) IsFile(filePath string) bool { +func (medium *Medium) IsFile(filePath string) bool { key := normaliseEntryPath(filePath) if key == "" { return false } var isDir bool - err := m.database.QueryRow( - `SELECT is_dir FROM `+m.table+` WHERE path = ?`, key, + err := medium.database.QueryRow( + `SELECT is_dir FROM `+medium.table+` WHERE path = ?`, key, ).Scan(&isDir) if err != nil { return false @@ -176,16 +176,16 @@ func (m *Medium) IsFile(filePath string) bool { return !isDir } -func (m *Medium) FileGet(filePath string) (string, error) { - return m.Read(filePath) +func (medium *Medium) FileGet(filePath string) (string, error) { + return medium.Read(filePath) } -func (m *Medium) FileSet(filePath, content string) error { - return m.Write(filePath, content) +func (medium *Medium) FileSet(filePath, content string) error { + return medium.Write(filePath, content) } // Example: _ = medium.Delete("config/app.yaml") -func (m *Medium) Delete(filePath string) error { +func (medium *Medium) Delete(filePath string) error { key := normaliseEntryPath(filePath) if key == "" { return core.E("sqlite.Delete", "path is required", fs.ErrInvalid) @@ -193,8 +193,8 @@ func (m *Medium) Delete(filePath string) error { // Check if it's a directory with children var isDir bool - err := m.database.QueryRow( - `SELECT is_dir FROM `+m.table+` WHERE path = ?`, key, + err := medium.database.QueryRow( + `SELECT is_dir FROM `+medium.table+` WHERE path = ?`, key, ).Scan(&isDir) if err == sql.ErrNoRows { return core.E("sqlite.Delete", core.Concat("path not found: ", key), fs.ErrNotExist) @@ -207,8 +207,8 @@ func (m *Medium) Delete(filePath string) error { // Check for children prefix := key + "/" var count int - err := m.database.QueryRow( - `SELECT COUNT(*) FROM `+m.table+` WHERE path LIKE ? AND path != ?`, prefix+"%", key, + err := medium.database.QueryRow( + `SELECT COUNT(*) FROM `+medium.table+` WHERE path LIKE ? AND path != ?`, prefix+"%", key, ).Scan(&count) if err != nil { return core.E("sqlite.Delete", core.Concat("count failed: ", key), err) @@ -218,7 +218,7 @@ func (m *Medium) Delete(filePath string) error { } } - res, err := m.database.Exec(`DELETE FROM `+m.table+` WHERE path = ?`, key) + res, err := medium.database.Exec(`DELETE FROM `+medium.table+` WHERE path = ?`, key) if err != nil { return core.E("sqlite.Delete", core.Concat("delete failed: ", key), err) } @@ -230,7 +230,7 @@ func (m *Medium) Delete(filePath string) error { } // Example: _ = medium.DeleteAll("config") -func (m *Medium) DeleteAll(filePath string) error { +func (medium *Medium) DeleteAll(filePath string) error { key := normaliseEntryPath(filePath) if key == "" { return core.E("sqlite.DeleteAll", "path is required", fs.ErrInvalid) @@ -239,8 +239,8 @@ func (m *Medium) DeleteAll(filePath string) error { prefix := key + "/" // Delete the exact path and all children - res, err := m.database.Exec( - `DELETE FROM `+m.table+` WHERE path = ? OR path LIKE ?`, + res, err := medium.database.Exec( + `DELETE FROM `+medium.table+` WHERE path = ? OR path LIKE ?`, key, prefix+"%", ) if err != nil { @@ -254,14 +254,14 @@ func (m *Medium) DeleteAll(filePath string) error { } // Example: _ = medium.Rename("drafts/todo.txt", "archive/todo.txt") -func (m *Medium) Rename(oldPath, newPath string) error { +func (medium *Medium) Rename(oldPath, newPath string) error { oldKey := normaliseEntryPath(oldPath) newKey := normaliseEntryPath(newPath) if oldKey == "" || newKey == "" { return core.E("sqlite.Rename", "both old and new paths are required", fs.ErrInvalid) } - tx, err := m.database.Begin() + tx, err := medium.database.Begin() if err != nil { return core.E("sqlite.Rename", "begin tx failed", err) } @@ -273,7 +273,7 @@ func (m *Medium) Rename(oldPath, newPath string) error { var isDir bool var mtime time.Time err = tx.QueryRow( - `SELECT content, mode, is_dir, mtime FROM `+m.table+` WHERE path = ?`, oldKey, + `SELECT content, mode, is_dir, mtime FROM `+medium.table+` WHERE path = ?`, oldKey, ).Scan(&content, &mode, &isDir, &mtime) if err == sql.ErrNoRows { return core.E("sqlite.Rename", core.Concat("source not found: ", oldKey), fs.ErrNotExist) @@ -284,7 +284,7 @@ func (m *Medium) Rename(oldPath, newPath string) error { // Insert or replace at new path _, err = tx.Exec( - `INSERT INTO `+m.table+` (path, content, mode, is_dir, mtime) VALUES (?, ?, ?, ?, ?) + `INSERT INTO `+medium.table+` (path, content, mode, is_dir, mtime) VALUES (?, ?, ?, ?, ?) ON CONFLICT(path) DO UPDATE SET content = excluded.content, mode = excluded.mode, is_dir = excluded.is_dir, mtime = excluded.mtime`, newKey, content, mode, isDir, mtime, ) @@ -293,7 +293,7 @@ func (m *Medium) Rename(oldPath, newPath string) error { } // Delete old path - _, err = tx.Exec(`DELETE FROM `+m.table+` WHERE path = ?`, oldKey) + _, err = tx.Exec(`DELETE FROM `+medium.table+` WHERE path = ?`, oldKey) if err != nil { return core.E("sqlite.Rename", core.Concat("delete old path failed: ", oldKey), err) } @@ -304,7 +304,7 @@ func (m *Medium) Rename(oldPath, newPath string) error { newPrefix := newKey + "/" rows, err := tx.Query( - `SELECT path, content, mode, is_dir, mtime FROM `+m.table+` WHERE path LIKE ?`, + `SELECT path, content, mode, is_dir, mtime FROM `+medium.table+` WHERE path LIKE ?`, oldPrefix+"%", ) if err != nil { @@ -332,7 +332,7 @@ func (m *Medium) Rename(oldPath, newPath string) error { for _, c := range children { newChildPath := core.Concat(newPrefix, core.TrimPrefix(c.path, oldPrefix)) _, err = tx.Exec( - `INSERT INTO `+m.table+` (path, content, mode, is_dir, mtime) VALUES (?, ?, ?, ?, ?) + `INSERT INTO `+medium.table+` (path, content, mode, is_dir, mtime) VALUES (?, ?, ?, ?, ?) ON CONFLICT(path) DO UPDATE SET content = excluded.content, mode = excluded.mode, is_dir = excluded.is_dir, mtime = excluded.mtime`, newChildPath, c.content, c.mode, c.isDir, c.mtime, ) @@ -342,7 +342,7 @@ func (m *Medium) Rename(oldPath, newPath string) error { } // Delete old children - _, err = tx.Exec(`DELETE FROM `+m.table+` WHERE path LIKE ?`, oldPrefix+"%") + _, err = tx.Exec(`DELETE FROM `+medium.table+` WHERE path LIKE ?`, oldPrefix+"%") if err != nil { return core.E("sqlite.Rename", "delete old children failed", err) } @@ -352,15 +352,15 @@ func (m *Medium) Rename(oldPath, newPath string) error { } // Example: entries, _ := medium.List("config") -func (m *Medium) List(filePath string) ([]fs.DirEntry, error) { +func (medium *Medium) List(filePath string) ([]fs.DirEntry, error) { prefix := normaliseEntryPath(filePath) if prefix != "" { prefix += "/" } // Query all paths under the prefix - rows, err := m.database.Query( - `SELECT path, content, mode, is_dir, mtime FROM `+m.table+` WHERE path LIKE ? OR path LIKE ?`, + rows, err := medium.database.Query( + `SELECT path, content, mode, is_dir, mtime FROM `+medium.table+` WHERE path LIKE ? OR path LIKE ?`, prefix+"%", prefix+"%", ) if err != nil { @@ -427,7 +427,7 @@ func (m *Medium) List(filePath string) ([]fs.DirEntry, error) { return entries, rows.Err() } -func (m *Medium) Stat(filePath string) (fs.FileInfo, error) { +func (medium *Medium) Stat(filePath string) (fs.FileInfo, error) { key := normaliseEntryPath(filePath) if key == "" { return nil, core.E("sqlite.Stat", "path is required", fs.ErrInvalid) @@ -437,8 +437,8 @@ func (m *Medium) Stat(filePath string) (fs.FileInfo, error) { var mode int var isDir bool var mtime time.Time - err := m.database.QueryRow( - `SELECT content, mode, is_dir, mtime FROM `+m.table+` WHERE path = ?`, key, + err := medium.database.QueryRow( + `SELECT content, mode, is_dir, mtime FROM `+medium.table+` WHERE path = ?`, key, ).Scan(&content, &mode, &isDir, &mtime) if err == sql.ErrNoRows { return nil, core.E("sqlite.Stat", core.Concat("path not found: ", key), fs.ErrNotExist) @@ -457,7 +457,7 @@ func (m *Medium) Stat(filePath string) (fs.FileInfo, error) { }, nil } -func (m *Medium) Open(filePath string) (fs.File, error) { +func (medium *Medium) Open(filePath string) (fs.File, error) { key := normaliseEntryPath(filePath) if key == "" { return nil, core.E("sqlite.Open", "path is required", fs.ErrInvalid) @@ -467,8 +467,8 @@ func (m *Medium) Open(filePath string) (fs.File, error) { var mode int var isDir bool var mtime time.Time - err := m.database.QueryRow( - `SELECT content, mode, is_dir, mtime FROM `+m.table+` WHERE path = ?`, key, + err := medium.database.QueryRow( + `SELECT content, mode, is_dir, mtime FROM `+medium.table+` WHERE path = ?`, key, ).Scan(&content, &mode, &isDir, &mtime) if err == sql.ErrNoRows { return nil, core.E("sqlite.Open", core.Concat("file not found: ", key), fs.ErrNotExist) @@ -488,39 +488,39 @@ func (m *Medium) Open(filePath string) (fs.File, error) { }, nil } -func (m *Medium) Create(filePath string) (goio.WriteCloser, error) { +func (medium *Medium) Create(filePath string) (goio.WriteCloser, error) { key := normaliseEntryPath(filePath) if key == "" { return nil, core.E("sqlite.Create", "path is required", fs.ErrInvalid) } return &sqliteWriteCloser{ - medium: m, + medium: medium, path: key, }, nil } -func (m *Medium) Append(filePath string) (goio.WriteCloser, error) { +func (medium *Medium) Append(filePath string) (goio.WriteCloser, error) { key := normaliseEntryPath(filePath) if key == "" { return nil, core.E("sqlite.Append", "path is required", fs.ErrInvalid) } var existing []byte - err := m.database.QueryRow( - `SELECT content FROM `+m.table+` WHERE path = ? AND is_dir = FALSE`, key, + err := medium.database.QueryRow( + `SELECT content FROM `+medium.table+` WHERE path = ? AND is_dir = FALSE`, key, ).Scan(&existing) if err != nil && err != sql.ErrNoRows { return nil, core.E("sqlite.Append", core.Concat("query failed: ", key), err) } return &sqliteWriteCloser{ - medium: m, + medium: medium, path: key, data: existing, }, nil } -func (m *Medium) ReadStream(filePath string) (goio.ReadCloser, error) { +func (medium *Medium) ReadStream(filePath string) (goio.ReadCloser, error) { key := normaliseEntryPath(filePath) if key == "" { return nil, core.E("sqlite.ReadStream", "path is required", fs.ErrInvalid) @@ -528,8 +528,8 @@ func (m *Medium) ReadStream(filePath string) (goio.ReadCloser, error) { var content []byte var isDir bool - err := m.database.QueryRow( - `SELECT content, is_dir FROM `+m.table+` WHERE path = ?`, key, + err := medium.database.QueryRow( + `SELECT content, is_dir FROM `+medium.table+` WHERE path = ?`, key, ).Scan(&content, &isDir) if err == sql.ErrNoRows { return nil, core.E("sqlite.ReadStream", core.Concat("file not found: ", key), fs.ErrNotExist) @@ -544,11 +544,11 @@ func (m *Medium) ReadStream(filePath string) (goio.ReadCloser, error) { return goio.NopCloser(bytes.NewReader(content)), nil } -func (m *Medium) WriteStream(filePath string) (goio.WriteCloser, error) { - return m.Create(filePath) +func (medium *Medium) WriteStream(filePath string) (goio.WriteCloser, error) { + return medium.Create(filePath) } -func (m *Medium) Exists(filePath string) bool { +func (medium *Medium) Exists(filePath string) bool { key := normaliseEntryPath(filePath) if key == "" { // Root always exists @@ -556,8 +556,8 @@ func (m *Medium) Exists(filePath string) bool { } var count int - err := m.database.QueryRow( - `SELECT COUNT(*) FROM `+m.table+` WHERE path = ?`, key, + err := medium.database.QueryRow( + `SELECT COUNT(*) FROM `+medium.table+` WHERE path = ?`, key, ).Scan(&count) if err != nil { return false @@ -565,15 +565,15 @@ func (m *Medium) Exists(filePath string) bool { return count > 0 } -func (m *Medium) IsDir(filePath string) bool { +func (medium *Medium) IsDir(filePath string) bool { key := normaliseEntryPath(filePath) if key == "" { return false } var isDir bool - err := m.database.QueryRow( - `SELECT is_dir FROM `+m.table+` WHERE path = ?`, key, + err := medium.database.QueryRow( + `SELECT is_dir FROM `+medium.table+` WHERE path = ?`, key, ).Scan(&isDir) if err != nil { return false diff --git a/store/medium.go b/store/medium.go index 1d5feca..7461264 100644 --- a/store/medium.go +++ b/store/medium.go @@ -31,18 +31,18 @@ func NewMedium(options Options) (*Medium, error) { } // Example: medium := keyValueStore.AsMedium() -func (s *Store) AsMedium() *Medium { - return &Medium{store: s} +func (store *Store) AsMedium() *Medium { + return &Medium{store: store} } // Example: keyValueStore := medium.Store() -func (m *Medium) Store() *Store { - return m.store +func (medium *Medium) Store() *Store { + return medium.store } // Example: _ = medium.Close() -func (m *Medium) Close() error { - return m.store.Close() +func (medium *Medium) Close() error { + return medium.store.Close() } // splitGroupKeyPath splits a group/key path into store components. @@ -59,56 +59,56 @@ func splitGroupKeyPath(entryPath string) (group, key string) { return parts[0], parts[1] } -func (m *Medium) Read(entryPath string) (string, error) { +func (medium *Medium) Read(entryPath string) (string, error) { group, key := splitGroupKeyPath(entryPath) if key == "" { return "", core.E("store.Read", "path must include group/key", fs.ErrInvalid) } - return m.store.Get(group, key) + return medium.store.Get(group, key) } -func (m *Medium) Write(entryPath, content string) error { +func (medium *Medium) Write(entryPath, content string) error { group, key := splitGroupKeyPath(entryPath) if key == "" { return core.E("store.Write", "path must include group/key", fs.ErrInvalid) } - return m.store.Set(group, key, content) + return medium.store.Set(group, key, content) } // Example: _ = medium.WriteMode("app/theme", "midnight", 0600) -func (m *Medium) WriteMode(entryPath, content string, _ fs.FileMode) error { - return m.Write(entryPath, content) +func (medium *Medium) WriteMode(entryPath, content string, _ fs.FileMode) error { + return medium.Write(entryPath, content) } // Example: _ = medium.EnsureDir("app") -func (m *Medium) EnsureDir(_ string) error { +func (medium *Medium) EnsureDir(_ string) error { return nil } -func (m *Medium) IsFile(entryPath string) bool { +func (medium *Medium) IsFile(entryPath string) bool { group, key := splitGroupKeyPath(entryPath) if key == "" { return false } - _, err := m.store.Get(group, key) + _, err := medium.store.Get(group, key) return err == nil } -func (m *Medium) FileGet(entryPath string) (string, error) { - return m.Read(entryPath) +func (medium *Medium) FileGet(entryPath string) (string, error) { + return medium.Read(entryPath) } -func (m *Medium) FileSet(entryPath, content string) error { - return m.Write(entryPath, content) +func (medium *Medium) FileSet(entryPath, content string) error { + return medium.Write(entryPath, content) } -func (m *Medium) Delete(entryPath string) error { +func (medium *Medium) Delete(entryPath string) error { group, key := splitGroupKeyPath(entryPath) if group == "" { return core.E("store.Delete", "path is required", fs.ErrInvalid) } if key == "" { - entryCount, err := m.store.Count(group) + entryCount, err := medium.store.Count(group) if err != nil { return err } @@ -117,42 +117,42 @@ func (m *Medium) Delete(entryPath string) error { } return nil } - return m.store.Delete(group, key) + return medium.store.Delete(group, key) } -func (m *Medium) DeleteAll(entryPath string) error { +func (medium *Medium) DeleteAll(entryPath string) error { group, key := splitGroupKeyPath(entryPath) if group == "" { return core.E("store.DeleteAll", "path is required", fs.ErrInvalid) } if key == "" { - return m.store.DeleteGroup(group) + return medium.store.DeleteGroup(group) } - return m.store.Delete(group, key) + return medium.store.Delete(group, key) } -func (m *Medium) Rename(oldPath, newPath string) error { +func (medium *Medium) Rename(oldPath, newPath string) error { oldGroup, oldKey := splitGroupKeyPath(oldPath) newGroup, newKey := splitGroupKeyPath(newPath) if oldKey == "" || newKey == "" { return core.E("store.Rename", "both paths must include group/key", fs.ErrInvalid) } - val, err := m.store.Get(oldGroup, oldKey) + val, err := medium.store.Get(oldGroup, oldKey) if err != nil { return err } - if err := m.store.Set(newGroup, newKey, val); err != nil { + if err := medium.store.Set(newGroup, newKey, val); err != nil { return err } - return m.store.Delete(oldGroup, oldKey) + return medium.store.Delete(oldGroup, oldKey) } // Example: entries, _ := medium.List("app") -func (m *Medium) List(entryPath string) ([]fs.DirEntry, error) { +func (medium *Medium) List(entryPath string) ([]fs.DirEntry, error) { group, key := splitGroupKeyPath(entryPath) if group == "" { - rows, err := m.store.database.Query("SELECT DISTINCT grp FROM kv ORDER BY grp") + rows, err := medium.store.database.Query("SELECT DISTINCT grp FROM kv ORDER BY grp") if err != nil { return nil, core.E("store.List", "query groups", err) } @@ -173,7 +173,7 @@ func (m *Medium) List(entryPath string) ([]fs.DirEntry, error) { return nil, nil // leaf node, nothing beneath } - all, err := m.store.GetAll(group) + all, err := medium.store.GetAll(group) if err != nil { return nil, err } @@ -185,13 +185,13 @@ func (m *Medium) List(entryPath string) ([]fs.DirEntry, error) { } // Example: info, _ := medium.Stat("app/theme") -func (m *Medium) Stat(entryPath string) (fs.FileInfo, error) { +func (medium *Medium) Stat(entryPath string) (fs.FileInfo, error) { group, key := splitGroupKeyPath(entryPath) if group == "" { return nil, core.E("store.Stat", "path is required", fs.ErrInvalid) } if key == "" { - entryCount, err := m.store.Count(group) + entryCount, err := medium.store.Count(group) if err != nil { return nil, err } @@ -200,77 +200,77 @@ func (m *Medium) Stat(entryPath string) (fs.FileInfo, error) { } return &keyValueFileInfo{name: group, isDir: true}, nil } - val, err := m.store.Get(group, key) + value, err := medium.store.Get(group, key) if err != nil { return nil, err } - return &keyValueFileInfo{name: key, size: int64(len(val))}, nil + return &keyValueFileInfo{name: key, size: int64(len(value))}, nil } -func (m *Medium) Open(entryPath string) (fs.File, error) { +func (medium *Medium) Open(entryPath string) (fs.File, error) { group, key := splitGroupKeyPath(entryPath) if key == "" { return nil, core.E("store.Open", "path must include group/key", fs.ErrInvalid) } - val, err := m.store.Get(group, key) + value, err := medium.store.Get(group, key) if err != nil { return nil, err } - return &keyValueFile{name: key, content: []byte(val)}, nil + return &keyValueFile{name: key, content: []byte(value)}, nil } -func (m *Medium) Create(entryPath string) (goio.WriteCloser, error) { +func (medium *Medium) Create(entryPath string) (goio.WriteCloser, error) { group, key := splitGroupKeyPath(entryPath) if key == "" { return nil, core.E("store.Create", "path must include group/key", fs.ErrInvalid) } - return &keyValueWriteCloser{store: m.store, group: group, key: key}, nil + return &keyValueWriteCloser{store: medium.store, group: group, key: key}, nil } -func (m *Medium) Append(entryPath string) (goio.WriteCloser, error) { +func (medium *Medium) Append(entryPath string) (goio.WriteCloser, error) { group, key := splitGroupKeyPath(entryPath) if key == "" { return nil, core.E("store.Append", "path must include group/key", fs.ErrInvalid) } - existing, _ := m.store.Get(group, key) - return &keyValueWriteCloser{store: m.store, group: group, key: key, data: []byte(existing)}, nil + existingValue, _ := medium.store.Get(group, key) + return &keyValueWriteCloser{store: medium.store, group: group, key: key, data: []byte(existingValue)}, nil } -func (m *Medium) ReadStream(entryPath string) (goio.ReadCloser, error) { +func (medium *Medium) ReadStream(entryPath string) (goio.ReadCloser, error) { group, key := splitGroupKeyPath(entryPath) if key == "" { return nil, core.E("store.ReadStream", "path must include group/key", fs.ErrInvalid) } - val, err := m.store.Get(group, key) + val, err := medium.store.Get(group, key) if err != nil { return nil, err } return goio.NopCloser(core.NewReader(val)), nil } -func (m *Medium) WriteStream(entryPath string) (goio.WriteCloser, error) { - return m.Create(entryPath) +func (medium *Medium) WriteStream(entryPath string) (goio.WriteCloser, error) { + return medium.Create(entryPath) } -func (m *Medium) Exists(entryPath string) bool { +func (medium *Medium) Exists(entryPath string) bool { group, key := splitGroupKeyPath(entryPath) if group == "" { return false } if key == "" { - entryCount, err := m.store.Count(group) + entryCount, err := medium.store.Count(group) return err == nil && entryCount > 0 } - _, err := m.store.Get(group, key) + _, err := medium.store.Get(group, key) return err == nil } -func (m *Medium) IsDir(entryPath string) bool { +func (medium *Medium) IsDir(entryPath string) bool { group, key := splitGroupKeyPath(entryPath) if key != "" || group == "" { return false } - entryCount, err := m.store.Count(group) + entryCount, err := medium.store.Count(group) return err == nil && entryCount > 0 } @@ -282,22 +282,22 @@ type keyValueFileInfo struct { isDir bool } -func (fi *keyValueFileInfo) Name() string { return fi.name } +func (fileInfo *keyValueFileInfo) Name() string { return fileInfo.name } -func (fi *keyValueFileInfo) Size() int64 { return fi.size } +func (fileInfo *keyValueFileInfo) Size() int64 { return fileInfo.size } -func (fi *keyValueFileInfo) Mode() fs.FileMode { - if fi.isDir { +func (fileInfo *keyValueFileInfo) Mode() fs.FileMode { + if fileInfo.isDir { return fs.ModeDir | 0755 } return 0644 } -func (fi *keyValueFileInfo) ModTime() time.Time { return time.Time{} } +func (fileInfo *keyValueFileInfo) ModTime() time.Time { return time.Time{} } -func (fi *keyValueFileInfo) IsDir() bool { return fi.isDir } +func (fileInfo *keyValueFileInfo) IsDir() bool { return fileInfo.isDir } -func (fi *keyValueFileInfo) Sys() any { return nil } +func (fileInfo *keyValueFileInfo) Sys() any { return nil } type keyValueDirEntry struct { name string @@ -305,19 +305,19 @@ type keyValueDirEntry struct { size int64 } -func (de *keyValueDirEntry) Name() string { return de.name } +func (entry *keyValueDirEntry) Name() string { return entry.name } -func (de *keyValueDirEntry) IsDir() bool { return de.isDir } +func (entry *keyValueDirEntry) IsDir() bool { return entry.isDir } -func (de *keyValueDirEntry) Type() fs.FileMode { - if de.isDir { +func (entry *keyValueDirEntry) Type() fs.FileMode { + if entry.isDir { return fs.ModeDir } return 0 } -func (de *keyValueDirEntry) Info() (fs.FileInfo, error) { - return &keyValueFileInfo{name: de.name, size: de.size, isDir: de.isDir}, nil +func (entry *keyValueDirEntry) Info() (fs.FileInfo, error) { + return &keyValueFileInfo{name: entry.name, size: entry.size, isDir: entry.isDir}, nil } type keyValueFile struct { @@ -326,20 +326,20 @@ type keyValueFile struct { offset int64 } -func (f *keyValueFile) Stat() (fs.FileInfo, error) { - return &keyValueFileInfo{name: f.name, size: int64(len(f.content))}, nil +func (file *keyValueFile) Stat() (fs.FileInfo, error) { + return &keyValueFileInfo{name: file.name, size: int64(len(file.content))}, nil } -func (f *keyValueFile) Read(b []byte) (int, error) { - if f.offset >= int64(len(f.content)) { +func (file *keyValueFile) Read(buffer []byte) (int, error) { + if file.offset >= int64(len(file.content)) { return 0, goio.EOF } - n := copy(b, f.content[f.offset:]) - f.offset += int64(n) - return n, nil + readCount := copy(buffer, file.content[file.offset:]) + file.offset += int64(readCount) + return readCount, nil } -func (f *keyValueFile) Close() error { return nil } +func (file *keyValueFile) Close() error { return nil } type keyValueWriteCloser struct { store *Store @@ -348,11 +348,11 @@ type keyValueWriteCloser struct { data []byte } -func (w *keyValueWriteCloser) Write(p []byte) (int, error) { - w.data = append(w.data, p...) - return len(p), nil +func (writer *keyValueWriteCloser) Write(data []byte) (int, error) { + writer.data = append(writer.data, data...) + return len(data), nil } -func (w *keyValueWriteCloser) Close() error { - return w.store.Set(w.group, w.key, string(w.data)) +func (writer *keyValueWriteCloser) Close() error { + return writer.store.Set(writer.group, writer.key, string(writer.data)) } diff --git a/store/store.go b/store/store.go index 4d43f30..f82216c 100644 --- a/store/store.go +++ b/store/store.go @@ -52,14 +52,14 @@ func New(options Options) (*Store, error) { } // Example: _ = keyValueStore.Close() -func (s *Store) Close() error { - return s.database.Close() +func (store *Store) Close() error { + return store.database.Close() } // Example: theme, _ := keyValueStore.Get("app", "theme") -func (s *Store) Get(group, key string) (string, error) { +func (store *Store) Get(group, key string) (string, error) { var value string - err := s.database.QueryRow("SELECT value FROM kv WHERE grp = ? AND key = ?", group, key).Scan(&value) + err := store.database.QueryRow("SELECT value FROM kv WHERE grp = ? AND key = ?", group, key).Scan(&value) if err == sql.ErrNoRows { return "", core.E("store.Get", core.Concat("not found: ", group, "/", key), NotFoundError) } @@ -70,8 +70,8 @@ func (s *Store) Get(group, key string) (string, error) { } // Example: _ = keyValueStore.Set("app", "theme", "midnight") -func (s *Store) Set(group, key, value string) error { - _, err := s.database.Exec( +func (store *Store) Set(group, key, value string) error { + _, err := store.database.Exec( `INSERT INTO kv (grp, key, value) VALUES (?, ?, ?) ON CONFLICT(grp, key) DO UPDATE SET value = excluded.value`, group, key, value, @@ -83,8 +83,8 @@ func (s *Store) Set(group, key, value string) error { } // Example: _ = keyValueStore.Delete("app", "theme") -func (s *Store) Delete(group, key string) error { - _, err := s.database.Exec("DELETE FROM kv WHERE grp = ? AND key = ?", group, key) +func (store *Store) Delete(group, key string) error { + _, err := store.database.Exec("DELETE FROM kv WHERE grp = ? AND key = ?", group, key) if err != nil { return core.E("store.Delete", "exec", err) } @@ -92,9 +92,9 @@ func (s *Store) Delete(group, key string) error { } // Example: count, _ := keyValueStore.Count("app") -func (s *Store) Count(group string) (int, error) { +func (store *Store) Count(group string) (int, error) { var count int - err := s.database.QueryRow("SELECT COUNT(*) FROM kv WHERE grp = ?", group).Scan(&count) + err := store.database.QueryRow("SELECT COUNT(*) FROM kv WHERE grp = ?", group).Scan(&count) if err != nil { return 0, core.E("store.Count", "query", err) } @@ -102,8 +102,8 @@ func (s *Store) Count(group string) (int, error) { } // Example: _ = keyValueStore.DeleteGroup("app") -func (s *Store) DeleteGroup(group string) error { - _, err := s.database.Exec("DELETE FROM kv WHERE grp = ?", group) +func (store *Store) DeleteGroup(group string) error { + _, err := store.database.Exec("DELETE FROM kv WHERE grp = ?", group) if err != nil { return core.E("store.DeleteGroup", "exec", err) } @@ -111,8 +111,8 @@ func (s *Store) DeleteGroup(group string) error { } // Example: values, _ := keyValueStore.GetAll("app") -func (s *Store) GetAll(group string) (map[string]string, error) { - rows, err := s.database.Query("SELECT key, value FROM kv WHERE grp = ?", group) +func (store *Store) GetAll(group string) (map[string]string, error) { + rows, err := store.database.Query("SELECT key, value FROM kv WHERE grp = ?", group) if err != nil { return nil, core.E("store.GetAll", "query", err) } @@ -135,8 +135,8 @@ func (s *Store) GetAll(group string) (map[string]string, error) { // Example: keyValueStore, _ := store.New(store.Options{Path: ":memory:"}) // _ = keyValueStore.Set("user", "name", "alice") // out, _ := keyValueStore.Render("hello {{ .name }}", "user") -func (s *Store) Render(templateText, group string) (string, error) { - rows, err := s.database.Query("SELECT key, value FROM kv WHERE grp = ?", group) +func (store *Store) Render(templateText, group string) (string, error) { + rows, err := store.database.Query("SELECT key, value FROM kv WHERE grp = ?", group) if err != nil { return "", core.E("store.Render", "query", err) } diff --git a/workspace/service.go b/workspace/service.go index a55c8c1..d382d3a 100644 --- a/workspace/service.go +++ b/workspace/service.go @@ -57,55 +57,55 @@ func New(options Options) (*Service, error) { return nil, core.E("workspace.New", "core is required", fs.ErrInvalid) } - s := &Service{ + service := &Service{ core: options.Core, rootPath: rootPath, medium: io.Local, } if options.Crypt != nil { - s.crypt = options.Crypt + service.crypt = options.Crypt } - if err := s.medium.EnsureDir(rootPath); err != nil { + if err := service.medium.EnsureDir(rootPath); err != nil { return nil, core.E("workspace.New", "failed to ensure root directory", err) } - return s, nil + return service, nil } // Example: workspaceID, _ := service.CreateWorkspace("alice", "pass123") -func (s *Service) CreateWorkspace(identifier, password string) (string, error) { - s.mu.Lock() - defer s.mu.Unlock() +func (service *Service) CreateWorkspace(identifier, password string) (string, error) { + service.mu.Lock() + defer service.mu.Unlock() - if s.crypt == nil { + if service.crypt == nil { return "", core.E("workspace.CreateWorkspace", "crypt service not available", nil) } hash := sha256.Sum256([]byte(identifier)) workspaceID := hex.EncodeToString(hash[:]) - workspaceDirectory, err := s.resolveWorkspaceDirectory("workspace.CreateWorkspace", workspaceID) + workspaceDirectory, err := service.resolveWorkspaceDirectory("workspace.CreateWorkspace", workspaceID) if err != nil { return "", err } - if s.medium.Exists(workspaceDirectory) { + if service.medium.Exists(workspaceDirectory) { return "", core.E("workspace.CreateWorkspace", "workspace already exists", nil) } for _, d := range []string{"config", "log", "data", "files", "keys"} { - if err := s.medium.EnsureDir(core.Path(workspaceDirectory, d)); err != nil { + if err := service.medium.EnsureDir(core.Path(workspaceDirectory, d)); err != nil { return "", core.E("workspace.CreateWorkspace", core.Concat("failed to create directory: ", d), err) } } - privKey, err := s.crypt.CreateKeyPair(identifier, password) + privKey, err := service.crypt.CreateKeyPair(identifier, password) if err != nil { return "", core.E("workspace.CreateWorkspace", "failed to generate keys", err) } - if err := s.medium.WriteMode(core.Path(workspaceDirectory, "keys", "private.key"), privKey, 0600); err != nil { + if err := service.medium.WriteMode(core.Path(workspaceDirectory, "keys", "private.key"), privKey, 0600); err != nil { return "", core.E("workspace.CreateWorkspace", "failed to save private key", err) } @@ -113,29 +113,29 @@ func (s *Service) CreateWorkspace(identifier, password string) (string, error) { } // Example: _ = service.SwitchWorkspace(workspaceID) -func (s *Service) SwitchWorkspace(workspaceID string) error { - s.mu.Lock() - defer s.mu.Unlock() +func (service *Service) SwitchWorkspace(workspaceID string) error { + service.mu.Lock() + defer service.mu.Unlock() - workspaceDirectory, err := s.resolveWorkspaceDirectory("workspace.SwitchWorkspace", workspaceID) + workspaceDirectory, err := service.resolveWorkspaceDirectory("workspace.SwitchWorkspace", workspaceID) if err != nil { return err } - if !s.medium.IsDir(workspaceDirectory) { + if !service.medium.IsDir(workspaceDirectory) { return core.E("workspace.SwitchWorkspace", core.Concat("workspace not found: ", workspaceID), nil) } - s.activeWorkspaceID = core.PathBase(workspaceDirectory) + service.activeWorkspaceID = core.PathBase(workspaceDirectory) return nil } // resolveActiveWorkspaceFilePath resolves a file path inside the active workspace files root. // It rejects empty names and traversal outside the workspace root. -func (s *Service) resolveActiveWorkspaceFilePath(operation, workspaceFilePath string) (string, error) { - if s.activeWorkspaceID == "" { +func (service *Service) resolveActiveWorkspaceFilePath(operation, workspaceFilePath string) (string, error) { + if service.activeWorkspaceID == "" { return "", core.E(operation, "no active workspace", nil) } - filesRoot := core.Path(s.rootPath, s.activeWorkspaceID, "files") + filesRoot := core.Path(service.rootPath, service.activeWorkspaceID, "files") filePath, err := joinPathWithinRoot(filesRoot, workspaceFilePath) if err != nil { return "", core.E(operation, "file path escapes workspace files", fs.ErrPermission) @@ -147,27 +147,27 @@ func (s *Service) resolveActiveWorkspaceFilePath(operation, workspaceFilePath st } // Example: content, _ := service.WorkspaceFileGet("notes/todo.txt") -func (s *Service) WorkspaceFileGet(workspaceFilePath string) (string, error) { - s.mu.RLock() - defer s.mu.RUnlock() +func (service *Service) WorkspaceFileGet(workspaceFilePath string) (string, error) { + service.mu.RLock() + defer service.mu.RUnlock() - filePath, err := s.resolveActiveWorkspaceFilePath("workspace.WorkspaceFileGet", workspaceFilePath) + filePath, err := service.resolveActiveWorkspaceFilePath("workspace.WorkspaceFileGet", workspaceFilePath) if err != nil { return "", err } - return s.medium.Read(filePath) + return service.medium.Read(filePath) } // Example: _ = service.WorkspaceFileSet("notes/todo.txt", "ship it") -func (s *Service) WorkspaceFileSet(workspaceFilePath, content string) error { - s.mu.Lock() - defer s.mu.Unlock() +func (service *Service) WorkspaceFileSet(workspaceFilePath, content string) error { + service.mu.Lock() + defer service.mu.Unlock() - filePath, err := s.resolveActiveWorkspaceFilePath("workspace.WorkspaceFileSet", workspaceFilePath) + filePath, err := service.resolveActiveWorkspaceFilePath("workspace.WorkspaceFileSet", workspaceFilePath) if err != nil { return err } - return s.medium.Write(filePath, content) + return service.medium.Write(filePath, content) } // service, _ := workspace.New(workspace.Options{Core: core.New(), Crypt: myCryptProvider}) @@ -185,7 +185,7 @@ func (s *Service) WorkspaceFileSet(workspaceFilePath, content string) error { // // _ = createResult.OK // _ = switchResult.OK -func (s *Service) HandleIPCEvents(_ *core.Core, message core.Message) core.Result { +func (service *Service) HandleIPCEvents(_ *core.Core, message core.Message) core.Result { switch payload := message.(type) { case map[string]any: action, _ := payload["action"].(string) @@ -193,14 +193,14 @@ func (s *Service) HandleIPCEvents(_ *core.Core, message core.Message) core.Resul case "workspace.create": identifier, _ := payload["identifier"].(string) password, _ := payload["password"].(string) - workspaceID, err := s.CreateWorkspace(identifier, password) + workspaceID, err := service.CreateWorkspace(identifier, password) if err != nil { return core.Result{}.New(err) } return core.Result{Value: workspaceID, OK: true} case "workspace.switch": workspaceID, _ := payload["workspaceID"].(string) - if err := s.SwitchWorkspace(workspaceID); err != nil { + if err := service.SwitchWorkspace(workspaceID); err != nil { return core.Result{}.New(err) } return core.Result{OK: true} @@ -228,15 +228,15 @@ func joinPathWithinRoot(root string, parts ...string) (string, error) { return "", fs.ErrPermission } -func (s *Service) resolveWorkspaceDirectory(operation, workspaceID string) (string, error) { +func (service *Service) resolveWorkspaceDirectory(operation, workspaceID string) (string, error) { if workspaceID == "" { return "", core.E(operation, "workspace id is required", fs.ErrInvalid) } - workspaceDirectory, err := joinPathWithinRoot(s.rootPath, workspaceID) + workspaceDirectory, err := joinPathWithinRoot(service.rootPath, workspaceID) if err != nil { return "", core.E(operation, "workspace path escapes root", err) } - if core.PathDir(workspaceDirectory) != s.rootPath { + if core.PathDir(workspaceDirectory) != service.rootPath { return "", core.E(operation, core.Concat("invalid workspace id: ", workspaceID), fs.ErrPermission) } return workspaceDirectory, nil From d4615a2ad8f2c56e1b0dd1dcc064242c1f183b16 Mon Sep 17 00:00:00 2001 From: Virgil Date: Mon, 30 Mar 2026 21:48:42 +0000 Subject: [PATCH 26/83] refactor(ax): align backend names and examples --- datanode/client.go | 76 ++++++++++----------- node/node.go | 163 +++++++++++++++++++-------------------------- s3/s3.go | 76 ++++++++++----------- sqlite/sqlite.go | 70 +++++++++---------- 4 files changed, 176 insertions(+), 209 deletions(-) diff --git a/datanode/client.go b/datanode/client.go index 008b662..f72f0ac 100644 --- a/datanode/client.go +++ b/datanode/client.go @@ -20,14 +20,14 @@ import ( ) var ( - dataNodeWalkDir = func(fsys fs.FS, root string, fn fs.WalkDirFunc) error { - return fs.WalkDir(fsys, root, fn) + dataNodeWalkDir = func(fileSystem fs.FS, root string, callback fs.WalkDirFunc) error { + return fs.WalkDir(fileSystem, root, callback) } - dataNodeOpen = func(dn *borgdatanode.DataNode, name string) (fs.File, error) { - return dn.Open(name) + dataNodeOpen = func(dataNode *borgdatanode.DataNode, filePath string) (fs.File, error) { + return dataNode.Open(filePath) } - dataNodeReadAll = func(r goio.Reader) ([]byte, error) { - return goio.ReadAll(r) + dataNodeReadAll = func(reader goio.Reader) ([]byte, error) { + return goio.ReadAll(reader) } ) @@ -109,13 +109,13 @@ func (medium *Medium) Read(filePath string) (string, error) { defer medium.mu.RUnlock() filePath = normaliseEntryPath(filePath) - f, err := medium.dataNode.Open(filePath) + file, err := medium.dataNode.Open(filePath) if err != nil { return "", core.E("datanode.Read", core.Concat("not found: ", filePath), fs.ErrNotExist) } - defer f.Close() + defer file.Close() - info, err := f.Stat() + info, err := file.Stat() if err != nil { return "", core.E("datanode.Read", core.Concat("stat failed: ", filePath), err) } @@ -123,7 +123,7 @@ func (medium *Medium) Read(filePath string) (string, error) { return "", core.E("datanode.Read", core.Concat("is a directory: ", filePath), fs.ErrInvalid) } - data, err := goio.ReadAll(f) + data, err := goio.ReadAll(file) if err != nil { return "", core.E("datanode.Read", core.Concat("read failed: ", filePath), err) } @@ -461,11 +461,11 @@ func (medium *Medium) ReadStream(filePath string) (goio.ReadCloser, error) { defer medium.mu.RUnlock() filePath = normaliseEntryPath(filePath) - f, err := medium.dataNode.Open(filePath) + file, err := medium.dataNode.Open(filePath) if err != nil { return nil, core.E("datanode.ReadStream", core.Concat("not found: ", filePath), fs.ErrNotExist) } - return f.(goio.ReadCloser), nil + return file.(goio.ReadCloser), nil } func (medium *Medium) WriteStream(filePath string) (goio.WriteCloser, error) { @@ -538,13 +538,13 @@ func (medium *Medium) collectAllLocked() ([]string, error) { return names, err } -func (medium *Medium) readFileLocked(name string) ([]byte, error) { - f, err := dataNodeOpen(medium.dataNode, name) +func (medium *Medium) readFileLocked(filePath string) ([]byte, error) { + file, err := dataNodeOpen(medium.dataNode, filePath) if err != nil { return nil, err } - data, readErr := dataNodeReadAll(f) - closeErr := f.Close() + data, readErr := dataNodeReadAll(file) + closeErr := file.Close() if readErr != nil { return nil, readErr } @@ -562,7 +562,7 @@ func (medium *Medium) removeFileLocked(target string) error { if err != nil { return err } - newDN := borgdatanode.New() + newDataNode := borgdatanode.New() for _, name := range entries { if name == target { continue @@ -571,9 +571,9 @@ func (medium *Medium) removeFileLocked(target string) error { if err != nil { return err } - newDN.AddData(name, data) + newDataNode.AddData(name, data) } - medium.dataNode = newDN + medium.dataNode = newDataNode return nil } @@ -585,17 +585,17 @@ type writeCloser struct { buf []byte } -func (w *writeCloser) Write(p []byte) (int, error) { - w.buf = append(w.buf, p...) - return len(p), nil +func (writer *writeCloser) Write(data []byte) (int, error) { + writer.buf = append(writer.buf, data...) + return len(data), nil } -func (w *writeCloser) Close() error { - w.medium.mu.Lock() - defer w.medium.mu.Unlock() +func (writer *writeCloser) Close() error { + writer.medium.mu.Lock() + defer writer.medium.mu.Unlock() - w.medium.dataNode.AddData(w.path, w.buf) - w.medium.ensureDirsLocked(path.Dir(w.path)) + writer.medium.dataNode.AddData(writer.path, writer.buf) + writer.medium.ensureDirsLocked(path.Dir(writer.path)) return nil } @@ -605,14 +605,14 @@ type dirEntry struct { name string } -func (d *dirEntry) Name() string { return d.name } +func (entry *dirEntry) Name() string { return entry.name } -func (d *dirEntry) IsDir() bool { return true } +func (entry *dirEntry) IsDir() bool { return true } -func (d *dirEntry) Type() fs.FileMode { return fs.ModeDir } +func (entry *dirEntry) Type() fs.FileMode { return fs.ModeDir } -func (d *dirEntry) Info() (fs.FileInfo, error) { - return &fileInfo{name: d.name, isDir: true, mode: fs.ModeDir | 0755}, nil +func (entry *dirEntry) Info() (fs.FileInfo, error) { + return &fileInfo{name: entry.name, isDir: true, mode: fs.ModeDir | 0755}, nil } type fileInfo struct { @@ -623,14 +623,14 @@ type fileInfo struct { isDir bool } -func (fi *fileInfo) Name() string { return fi.name } +func (info *fileInfo) Name() string { return info.name } -func (fi *fileInfo) Size() int64 { return fi.size } +func (info *fileInfo) Size() int64 { return info.size } -func (fi *fileInfo) Mode() fs.FileMode { return fi.mode } +func (info *fileInfo) Mode() fs.FileMode { return info.mode } -func (fi *fileInfo) ModTime() time.Time { return fi.modTime } +func (info *fileInfo) ModTime() time.Time { return info.modTime } -func (fi *fileInfo) IsDir() bool { return fi.isDir } +func (info *fileInfo) IsDir() bool { return info.isDir } -func (fi *fileInfo) Sys() any { return nil } +func (info *fileInfo) Sys() any { return nil } diff --git a/node/node.go b/node/node.go index 82d7dfe..2dab424 100644 --- a/node/node.go +++ b/node/node.go @@ -1,9 +1,9 @@ // Package node keeps io.Medium data in memory. // -// nodeTree := node.New() -// nodeTree.AddData("config/app.yaml", []byte("port: 8080")) -// snapshot, _ := nodeTree.ToTar() -// restored, _ := node.FromTar(snapshot) +// Example: nodeTree := node.New() +// Example: nodeTree.AddData("config/app.yaml", []byte("port: 8080")) +// Example: snapshot, _ := nodeTree.ToTar() +// Example: restored, _ := node.FromTar(snapshot) package node import ( @@ -21,14 +21,13 @@ import ( ) // Example: nodeTree := node.New() -// nodeTree.AddData("config/app.yaml", []byte("port: 8080")) -// snapshot, _ := nodeTree.ToTar() -// restored, _ := node.FromTar(snapshot) +// Example: nodeTree.AddData("config/app.yaml", []byte("port: 8080")) +// Example: snapshot, _ := nodeTree.ToTar() +// Example: restored, _ := node.FromTar(snapshot) type Node struct { files map[string]*dataFile } -// Compile-time interface checks. var _ coreio.Medium = (*Node)(nil) var _ fs.ReadFileFS = (*Node)(nil) @@ -36,9 +35,7 @@ func New() *Node { return &Node{files: make(map[string]*dataFile)} } -// ---------- Node-specific methods ---------- - -// AddData stages content in the in-memory filesystem. +// Example: nodeTree.AddData("config/app.yaml", []byte("port: 8080")) func (node *Node) AddData(name string, content []byte) { name = core.TrimPrefix(name, "/") if name == "" { @@ -55,7 +52,7 @@ func (node *Node) AddData(name string, content []byte) { } } -// ToTar serialises the entire in-memory tree to a tar archive. +// Example: snapshot, _ := nodeTree.ToTar() func (node *Node) ToTar() ([]byte, error) { buf := new(bytes.Buffer) tw := tar.NewWriter(buf) @@ -91,7 +88,7 @@ func FromTar(data []byte) (*Node, error) { return n, nil } -// LoadTar replaces the in-memory tree with the contents of a tar archive. +// Example: _ = nodeTree.LoadTar(snapshot) func (node *Node) LoadTar(data []byte) error { newFiles := make(map[string]*dataFile) tr := tar.NewReader(bytes.NewReader(data)) @@ -142,11 +139,7 @@ type WalkOptions struct { SkipErrors bool } -// WalkWithOptions walks the in-memory tree with an explicit configuration. -// -// nodeTree := New() -// options := WalkOptions{MaxDepth: 1, SkipErrors: true} -// _ = nodeTree.WalkWithOptions(".", func(path string, entry fs.DirEntry, err error) error { return nil }, options) +// Example: _ = nodeTree.WalkWithOptions(".", callback, node.WalkOptions{MaxDepth: 1, SkipErrors: true}) func (node *Node) WalkWithOptions(root string, fn fs.WalkDirFunc, options WalkOptions) error { if options.SkipErrors { // If root doesn't exist, silently return nil. @@ -184,20 +177,20 @@ func (node *Node) WalkWithOptions(root string, fn fs.WalkDirFunc, options WalkOp func (node *Node) ReadFile(name string) ([]byte, error) { name = core.TrimPrefix(name, "/") - f, ok := node.files[name] + file, ok := node.files[name] if !ok { return nil, core.E("node.ReadFile", core.Concat("path not found: ", name), fs.ErrNotExist) } // Return a copy to prevent callers from mutating internal state. - result := make([]byte, len(f.content)) - copy(result, f.content) + result := make([]byte, len(file.content)) + copy(result, file.content) return result, nil } -// CopyFile copies a file from the in-memory tree to the local filesystem. +// Example: _ = nodeTree.CopyFile("config/app.yaml", "/tmp/app.yaml", 0644) func (node *Node) CopyFile(sourcePath, destinationPath string, perm fs.FileMode) error { sourcePath = core.TrimPrefix(sourcePath, "/") - f, ok := node.files[sourcePath] + file, ok := node.files[sourcePath] if !ok { // Check if it's a directory — can't copy directories this way. info, err := node.Stat(sourcePath) @@ -213,15 +206,10 @@ func (node *Node) CopyFile(sourcePath, destinationPath string, perm fs.FileMode) if parent != "." && parent != "" && parent != destinationPath && !coreio.Local.IsDir(parent) { return &fs.PathError{Op: "copyfile", Path: destinationPath, Err: fs.ErrNotExist} } - return coreio.Local.WriteMode(destinationPath, string(f.content), perm) + return coreio.Local.WriteMode(destinationPath, string(file.content), perm) } -// CopyTo copies a file (or directory tree) from the node to any Medium. -// -// Example usage: -// -// dst := io.NewMockMedium() -// _ = node.CopyTo(dst, "config", "backup/config") +// Example: _ = nodeTree.CopyTo(io.NewMockMedium(), "config", "backup/config") func (node *Node) CopyTo(target coreio.Medium, sourcePath, destPath string) error { sourcePath = core.TrimPrefix(sourcePath, "/") info, err := node.Stat(sourcePath) @@ -231,11 +219,11 @@ func (node *Node) CopyTo(target coreio.Medium, sourcePath, destPath string) erro if !info.IsDir() { // Single file copy - f, ok := node.files[sourcePath] + file, ok := node.files[sourcePath] if !ok { return core.E("node.CopyTo", core.Concat("path not found: ", sourcePath), fs.ErrNotExist) } - return target.Write(destPath, string(f.content)) + return target.Write(destPath, string(file.content)) } // Directory: walk and copy all files underneath @@ -244,7 +232,7 @@ func (node *Node) CopyTo(target coreio.Medium, sourcePath, destPath string) erro prefix += "/" } - for filePath, f := range node.files { + for filePath, file := range node.files { if !core.HasPrefix(filePath, prefix) && filePath != sourcePath { continue } @@ -253,7 +241,7 @@ func (node *Node) CopyTo(target coreio.Medium, sourcePath, destPath string) erro if rel != "" { dest = core.Concat(destPath, "/", rel) } - if err := target.Write(dest, string(f.content)); err != nil { + if err := target.Write(dest, string(file.content)); err != nil { return err } } @@ -264,8 +252,8 @@ func (node *Node) CopyTo(target coreio.Medium, sourcePath, destPath string) erro func (node *Node) Open(name string) (fs.File, error) { name = core.TrimPrefix(name, "/") - if file, ok := node.files[name]; ok { - return &dataFileReader{file: file}, nil + if dataFile, ok := node.files[name]; ok { + return &dataFileReader{file: dataFile}, nil } // Check if it's a directory prefix := name + "/" @@ -282,8 +270,8 @@ func (node *Node) Open(name string) (fs.File, error) { func (node *Node) Stat(name string) (fs.FileInfo, error) { name = core.TrimPrefix(name, "/") - if file, ok := node.files[name]; ok { - return file.Stat() + if dataFile, ok := node.files[name]; ok { + return dataFile.Stat() } // Check if it's a directory prefix := name + "/" @@ -351,11 +339,11 @@ func (node *Node) ReadDir(name string) ([]fs.DirEntry, error) { func (node *Node) Read(filePath string) (string, error) { filePath = core.TrimPrefix(filePath, "/") - f, ok := node.files[filePath] + file, ok := node.files[filePath] if !ok { return "", core.E("node.Read", core.Concat("path not found: ", filePath), fs.ErrNotExist) } - return string(f.content), nil + return string(file.content), nil } func (node *Node) Write(filePath, content string) error { @@ -439,13 +427,13 @@ func (node *Node) Rename(oldPath, newPath string) error { oldPath = core.TrimPrefix(oldPath, "/") newPath = core.TrimPrefix(newPath, "/") - f, ok := node.files[oldPath] + file, ok := node.files[oldPath] if !ok { return core.E("node.Rename", core.Concat("path not found: ", oldPath), fs.ErrNotExist) } - f.name = newPath - node.files[newPath] = f + file.name = newPath + node.files[newPath] = file delete(node.files, oldPath) return nil } @@ -468,19 +456,19 @@ func (node *Node) Create(filePath string) (goio.WriteCloser, error) { func (node *Node) Append(filePath string) (goio.WriteCloser, error) { filePath = core.TrimPrefix(filePath, "/") var existing []byte - if f, ok := node.files[filePath]; ok { - existing = make([]byte, len(f.content)) - copy(existing, f.content) + if file, ok := node.files[filePath]; ok { + existing = make([]byte, len(file.content)) + copy(existing, file.content) } return &nodeWriter{node: node, path: filePath, buf: existing}, nil } func (node *Node) ReadStream(filePath string) (goio.ReadCloser, error) { - f, err := node.Open(filePath) + file, err := node.Open(filePath) if err != nil { return nil, err } - return goio.NopCloser(f), nil + return goio.NopCloser(file), nil } func (node *Node) WriteStream(filePath string) (goio.WriteCloser, error) { @@ -496,15 +484,15 @@ type nodeWriter struct { buf []byte } -func (w *nodeWriter) Write(p []byte) (int, error) { - w.buf = append(w.buf, p...) - return len(p), nil +func (writer *nodeWriter) Write(data []byte) (int, error) { + writer.buf = append(writer.buf, data...) + return len(data), nil } -func (w *nodeWriter) Close() error { - w.node.files[w.path] = &dataFile{ - name: w.path, - content: w.buf, +func (writer *nodeWriter) Close() error { + writer.node.files[writer.path] = &dataFile{ + name: writer.path, + content: writer.buf, modTime: time.Now(), } return nil @@ -517,26 +505,26 @@ type dataFile struct { modTime time.Time } -func (d *dataFile) Stat() (fs.FileInfo, error) { return &dataFileInfo{file: d}, nil } +func (file *dataFile) Stat() (fs.FileInfo, error) { return &dataFileInfo{file: file}, nil } -func (d *dataFile) Read(_ []byte) (int, error) { return 0, goio.EOF } +func (file *dataFile) Read(_ []byte) (int, error) { return 0, goio.EOF } -func (d *dataFile) Close() error { return nil } +func (file *dataFile) Close() error { return nil } // dataFileInfo implements fs.FileInfo for a dataFile. type dataFileInfo struct{ file *dataFile } -func (d *dataFileInfo) Name() string { return path.Base(d.file.name) } +func (info *dataFileInfo) Name() string { return path.Base(info.file.name) } -func (d *dataFileInfo) Size() int64 { return int64(len(d.file.content)) } +func (info *dataFileInfo) Size() int64 { return int64(len(info.file.content)) } -func (d *dataFileInfo) Mode() fs.FileMode { return 0444 } +func (info *dataFileInfo) Mode() fs.FileMode { return 0444 } -func (d *dataFileInfo) ModTime() time.Time { return d.file.modTime } +func (info *dataFileInfo) ModTime() time.Time { return info.file.modTime } -func (d *dataFileInfo) IsDir() bool { return false } +func (info *dataFileInfo) IsDir() bool { return false } -func (d *dataFileInfo) Sys() any { return nil } +func (info *dataFileInfo) Sys() any { return nil } // dataFileReader implements fs.File for reading a dataFile. type dataFileReader struct { @@ -544,16 +532,16 @@ type dataFileReader struct { reader *bytes.Reader } -func (d *dataFileReader) Stat() (fs.FileInfo, error) { return d.file.Stat() } +func (reader *dataFileReader) Stat() (fs.FileInfo, error) { return reader.file.Stat() } -func (d *dataFileReader) Read(p []byte) (int, error) { - if d.reader == nil { - d.reader = bytes.NewReader(d.file.content) +func (reader *dataFileReader) Read(buffer []byte) (int, error) { + if reader.reader == nil { + reader.reader = bytes.NewReader(reader.file.content) } - return d.reader.Read(p) + return reader.reader.Read(buffer) } -func (d *dataFileReader) Close() error { return nil } +func (reader *dataFileReader) Close() error { return nil } // dirInfo implements fs.FileInfo for an implicit directory. type dirInfo struct { @@ -561,17 +549,17 @@ type dirInfo struct { modTime time.Time } -func (d *dirInfo) Name() string { return d.name } +func (info *dirInfo) Name() string { return info.name } -func (d *dirInfo) Size() int64 { return 0 } +func (info *dirInfo) Size() int64 { return 0 } -func (d *dirInfo) Mode() fs.FileMode { return fs.ModeDir | 0555 } +func (info *dirInfo) Mode() fs.FileMode { return fs.ModeDir | 0555 } -func (d *dirInfo) ModTime() time.Time { return d.modTime } +func (info *dirInfo) ModTime() time.Time { return info.modTime } -func (d *dirInfo) IsDir() bool { return true } +func (info *dirInfo) IsDir() bool { return true } -func (d *dirInfo) Sys() any { return nil } +func (info *dirInfo) Sys() any { return nil } // dirFile implements fs.File for a directory. type dirFile struct { @@ -579,38 +567,25 @@ type dirFile struct { modTime time.Time } -func (d *dirFile) Stat() (fs.FileInfo, error) { - return &dirInfo{name: path.Base(d.path), modTime: d.modTime}, nil +func (directory *dirFile) Stat() (fs.FileInfo, error) { + return &dirInfo{name: path.Base(directory.path), modTime: directory.modTime}, nil } -func (d *dirFile) Read([]byte) (int, error) { - return 0, core.E("node.dirFile.Read", core.Concat("cannot read directory: ", d.path), &fs.PathError{Op: "read", Path: d.path, Err: fs.ErrInvalid}) +func (directory *dirFile) Read([]byte) (int, error) { + return 0, core.E("node.dirFile.Read", core.Concat("cannot read directory: ", directory.path), &fs.PathError{Op: "read", Path: directory.path, Err: fs.ErrInvalid}) } -func (d *dirFile) Close() error { return nil } +func (directory *dirFile) Close() error { return nil } -// Ensure Node implements fs.FS so WalkDir works. var _ fs.FS = (*Node)(nil) -// Ensure Node also satisfies fs.StatFS and fs.ReadDirFS for WalkDir. var _ fs.StatFS = (*Node)(nil) var _ fs.ReadDirFS = (*Node)(nil) -// Unexported helper: ensure ReadStream result also satisfies fs.File -// (for cases where callers do a type assertion). var _ goio.ReadCloser = goio.NopCloser(nil) -// Ensure nodeWriter satisfies goio.WriteCloser. var _ goio.WriteCloser = (*nodeWriter)(nil) -// Ensure dirFile satisfies fs.File. var _ fs.File = (*dirFile)(nil) -// Ensure dataFileReader satisfies fs.File. var _ fs.File = (*dataFileReader)(nil) - -// ReadDirFile is not needed since fs.WalkDir works via ReadDirFS on the FS itself, -// but we need the Node to satisfy fs.ReadDirFS. - -// ensure all internal compile-time checks are grouped above -// no further type assertions needed diff --git a/s3/s3.go b/s3/s3.go index c1b270b..04096b4 100644 --- a/s3/s3.go +++ b/s3/s3.go @@ -1,8 +1,8 @@ // Package s3 stores io.Medium data in S3 objects. // -// client := awss3.NewFromConfig(aws.Config{Region: "us-east-1"}) -// medium, _ := s3.New(s3.Options{Bucket: "backups", Client: client, Prefix: "daily/"}) -// _ = medium.Write("reports/daily.txt", "done") +// Example: client := awss3.NewFromConfig(aws.Config{Region: "us-east-1"}) +// Example: medium, _ := s3.New(s3.Options{Bucket: "backups", Client: client, Prefix: "daily/"}) +// Example: _ = medium.Write("reports/daily.txt", "done") package s3 import ( @@ -22,7 +22,7 @@ import ( ) // Example: client := awss3.NewFromConfig(aws.Config{Region: "us-east-1"}) -// medium, _ := s3.New(s3.Options{Bucket: "backups", Client: client, Prefix: "daily/"}) +// Example: medium, _ := s3.New(s3.Options{Bucket: "backups", Client: client, Prefix: "daily/"}) type Client interface { GetObject(ctx context.Context, params *awss3.GetObjectInput, optFns ...func(*awss3.Options)) (*awss3.GetObjectOutput, error) PutObject(ctx context.Context, params *awss3.PutObjectInput, optFns ...func(*awss3.Options)) (*awss3.PutObjectOutput, error) @@ -34,7 +34,7 @@ type Client interface { } // Example: medium, _ := s3.New(s3.Options{Bucket: "backups", Client: client, Prefix: "daily/"}) -// _ = medium.Write("reports/daily.txt", "done") +// Example: _ = medium.Write("reports/daily.txt", "done") type Medium struct { client Client bucket string @@ -92,7 +92,7 @@ func normalisePrefix(prefix string) string { } // Example: medium, _ := s3.New(s3.Options{Bucket: "backups", Client: client, Prefix: "daily/"}) -// _ = medium.Write("reports/daily.txt", "done") +// Example: _ = medium.Write("reports/daily.txt", "done") func New(options Options) (*Medium, error) { if options.Bucket == "" { return nil, core.E("s3.New", "bucket name is required", nil) @@ -331,11 +331,11 @@ func (medium *Medium) List(filePath string) ([]fs.DirEntry, error) { } // Common prefixes are "directories" - for _, cp := range listOut.CommonPrefixes { - if cp.Prefix == nil { + for _, commonPrefix := range listOut.CommonPrefixes { + if commonPrefix.Prefix == nil { continue } - name := core.TrimPrefix(*cp.Prefix, prefix) + name := core.TrimPrefix(*commonPrefix.Prefix, prefix) name = core.TrimSuffix(name, "/") if name == "" { continue @@ -568,7 +568,6 @@ func (medium *Medium) IsDir(filePath string) bool { // --- Internal types --- -// fileInfo implements fs.FileInfo for S3 objects. type fileInfo struct { name string size int64 @@ -577,19 +576,18 @@ type fileInfo struct { isDir bool } -func (fi *fileInfo) Name() string { return fi.name } +func (info *fileInfo) Name() string { return info.name } -func (fi *fileInfo) Size() int64 { return fi.size } +func (info *fileInfo) Size() int64 { return info.size } -func (fi *fileInfo) Mode() fs.FileMode { return fi.mode } +func (info *fileInfo) Mode() fs.FileMode { return info.mode } -func (fi *fileInfo) ModTime() time.Time { return fi.modTime } +func (info *fileInfo) ModTime() time.Time { return info.modTime } -func (fi *fileInfo) IsDir() bool { return fi.isDir } +func (info *fileInfo) IsDir() bool { return info.isDir } -func (fi *fileInfo) Sys() any { return nil } +func (info *fileInfo) Sys() any { return nil } -// dirEntry implements fs.DirEntry for S3 listings. type dirEntry struct { name string isDir bool @@ -597,15 +595,14 @@ type dirEntry struct { info fs.FileInfo } -func (de *dirEntry) Name() string { return de.name } +func (entry *dirEntry) Name() string { return entry.name } -func (de *dirEntry) IsDir() bool { return de.isDir } +func (entry *dirEntry) IsDir() bool { return entry.isDir } -func (de *dirEntry) Type() fs.FileMode { return de.mode.Type() } +func (entry *dirEntry) Type() fs.FileMode { return entry.mode.Type() } -func (de *dirEntry) Info() (fs.FileInfo, error) { return de.info, nil } +func (entry *dirEntry) Info() (fs.FileInfo, error) { return entry.info, nil } -// s3File implements fs.File for S3 objects. type s3File struct { name string content []byte @@ -614,45 +611,44 @@ type s3File struct { modTime time.Time } -func (f *s3File) Stat() (fs.FileInfo, error) { +func (file *s3File) Stat() (fs.FileInfo, error) { return &fileInfo{ - name: f.name, - size: int64(len(f.content)), + name: file.name, + size: int64(len(file.content)), mode: 0644, - modTime: f.modTime, + modTime: file.modTime, }, nil } -func (f *s3File) Read(b []byte) (int, error) { - if f.offset >= int64(len(f.content)) { +func (file *s3File) Read(buffer []byte) (int, error) { + if file.offset >= int64(len(file.content)) { return 0, goio.EOF } - n := copy(b, f.content[f.offset:]) - f.offset += int64(n) + n := copy(buffer, file.content[file.offset:]) + file.offset += int64(n) return n, nil } -func (f *s3File) Close() error { +func (file *s3File) Close() error { return nil } -// s3WriteCloser buffers writes and uploads to S3 on Close. type s3WriteCloser struct { medium *Medium key string data []byte } -func (w *s3WriteCloser) Write(p []byte) (int, error) { - w.data = append(w.data, p...) - return len(p), nil +func (writer *s3WriteCloser) Write(data []byte) (int, error) { + writer.data = append(writer.data, data...) + return len(data), nil } -func (w *s3WriteCloser) Close() error { - _, err := w.medium.client.PutObject(context.Background(), &awss3.PutObjectInput{ - Bucket: aws.String(w.medium.bucket), - Key: aws.String(w.key), - Body: bytes.NewReader(w.data), +func (writer *s3WriteCloser) Close() error { + _, err := writer.medium.client.PutObject(context.Background(), &awss3.PutObjectInput{ + Bucket: aws.String(writer.medium.bucket), + Key: aws.String(writer.key), + Body: bytes.NewReader(writer.data), }) if err != nil { return core.E("s3.writeCloser.Close", "failed to upload on close", err) diff --git a/sqlite/sqlite.go b/sqlite/sqlite.go index bd02cca..bbd1aa3 100644 --- a/sqlite/sqlite.go +++ b/sqlite/sqlite.go @@ -1,7 +1,7 @@ // Package sqlite stores io.Medium content in SQLite. // -// medium, _ := sqlite.New(sqlite.Options{Path: ":memory:"}) -// _ = medium.Write("config/app.yaml", "port: 8080") +// Example: medium, _ := sqlite.New(sqlite.Options{Path: ":memory:"}) +// Example: _ = medium.Write("config/app.yaml", "port: 8080") package sqlite import ( @@ -19,7 +19,7 @@ import ( ) // Example: medium, _ := sqlite.New(sqlite.Options{Path: ":memory:"}) -// _ = medium.Write("config/app.yaml", "port: 8080") +// Example: _ = medium.Write("config/app.yaml", "port: 8080") type Medium struct { database *sql.DB table string @@ -42,7 +42,7 @@ func normaliseTableName(table string) string { } // Example: medium, _ := sqlite.New(sqlite.Options{Path: ":memory:", Table: "files"}) -// _ = medium.Write("config/app.yaml", "port: 8080") +// Example: _ = medium.Write("config/app.yaml", "port: 8080") func New(options Options) (*Medium, error) { if options.Path == "" { return nil, core.E("sqlite.New", "database path is required", nil) @@ -78,7 +78,7 @@ func New(options Options) (*Medium, error) { return medium, nil } -// Close closes the underlying database connection. +// Example: _ = medium.Close() func (medium *Medium) Close() error { if medium.database != nil { return medium.database.Close() @@ -583,7 +583,6 @@ func (medium *Medium) IsDir(filePath string) bool { // --- Internal types --- -// fileInfo implements fs.FileInfo for SQLite entries. type fileInfo struct { name string size int64 @@ -592,19 +591,18 @@ type fileInfo struct { isDir bool } -func (fi *fileInfo) Name() string { return fi.name } +func (info *fileInfo) Name() string { return info.name } -func (fi *fileInfo) Size() int64 { return fi.size } +func (info *fileInfo) Size() int64 { return info.size } -func (fi *fileInfo) Mode() fs.FileMode { return fi.mode } +func (info *fileInfo) Mode() fs.FileMode { return info.mode } -func (fi *fileInfo) ModTime() time.Time { return fi.modTime } +func (info *fileInfo) ModTime() time.Time { return info.modTime } -func (fi *fileInfo) IsDir() bool { return fi.isDir } +func (info *fileInfo) IsDir() bool { return info.isDir } -func (fi *fileInfo) Sys() any { return nil } +func (info *fileInfo) Sys() any { return nil } -// dirEntry implements fs.DirEntry for SQLite listings. type dirEntry struct { name string isDir bool @@ -612,15 +610,14 @@ type dirEntry struct { info fs.FileInfo } -func (de *dirEntry) Name() string { return de.name } +func (entry *dirEntry) Name() string { return entry.name } -func (de *dirEntry) IsDir() bool { return de.isDir } +func (entry *dirEntry) IsDir() bool { return entry.isDir } -func (de *dirEntry) Type() fs.FileMode { return de.mode.Type() } +func (entry *dirEntry) Type() fs.FileMode { return entry.mode.Type() } -func (de *dirEntry) Info() (fs.FileInfo, error) { return de.info, nil } +func (entry *dirEntry) Info() (fs.FileInfo, error) { return entry.info, nil } -// sqliteFile implements fs.File for SQLite entries. type sqliteFile struct { name string content []byte @@ -629,48 +626,47 @@ type sqliteFile struct { modTime time.Time } -func (f *sqliteFile) Stat() (fs.FileInfo, error) { +func (file *sqliteFile) Stat() (fs.FileInfo, error) { return &fileInfo{ - name: f.name, - size: int64(len(f.content)), - mode: f.mode, - modTime: f.modTime, + name: file.name, + size: int64(len(file.content)), + mode: file.mode, + modTime: file.modTime, }, nil } -func (f *sqliteFile) Read(b []byte) (int, error) { - if f.offset >= int64(len(f.content)) { +func (file *sqliteFile) Read(buffer []byte) (int, error) { + if file.offset >= int64(len(file.content)) { return 0, goio.EOF } - n := copy(b, f.content[f.offset:]) - f.offset += int64(n) + n := copy(buffer, file.content[file.offset:]) + file.offset += int64(n) return n, nil } -func (f *sqliteFile) Close() error { +func (file *sqliteFile) Close() error { return nil } -// sqliteWriteCloser buffers writes and stores to SQLite on Close. type sqliteWriteCloser struct { medium *Medium path string data []byte } -func (w *sqliteWriteCloser) Write(p []byte) (int, error) { - w.data = append(w.data, p...) - return len(p), nil +func (writer *sqliteWriteCloser) Write(data []byte) (int, error) { + writer.data = append(writer.data, data...) + return len(data), nil } -func (w *sqliteWriteCloser) Close() error { - _, err := w.medium.database.Exec( - `INSERT INTO `+w.medium.table+` (path, content, mode, is_dir, mtime) VALUES (?, ?, 420, FALSE, ?) +func (writer *sqliteWriteCloser) Close() error { + _, err := writer.medium.database.Exec( + `INSERT INTO `+writer.medium.table+` (path, content, mode, is_dir, mtime) VALUES (?, ?, 420, FALSE, ?) ON CONFLICT(path) DO UPDATE SET content = excluded.content, is_dir = FALSE, mtime = excluded.mtime`, - w.path, w.data, time.Now().UTC(), + writer.path, writer.data, time.Now().UTC(), ) if err != nil { - return core.E("sqlite.WriteCloser.Close", core.Concat("store failed: ", w.path), err) + return core.E("sqlite.WriteCloser.Close", core.Concat("store failed: ", writer.path), err) } return nil } From c0ee58201b33d6d8e8e2bc72be17c83ca407544c Mon Sep 17 00:00:00 2001 From: Virgil Date: Mon, 30 Mar 2026 21:52:52 +0000 Subject: [PATCH 27/83] refactor(ax): expand semantic backend naming --- datanode/client.go | 24 +++++++++---------- node/node.go | 10 ++++---- s3/s3.go | 60 +++++++++++++++++++++++----------------------- sqlite/sqlite.go | 30 +++++++++++------------ 4 files changed, 62 insertions(+), 62 deletions(-) diff --git a/datanode/client.go b/datanode/client.go index f72f0ac..d551ec5 100644 --- a/datanode/client.go +++ b/datanode/client.go @@ -337,15 +337,15 @@ func (medium *Medium) Rename(oldPath, newPath string) error { // Move explicit directories dirsToMove := make(map[string]string) - for d := range medium.directorySet { - if d == oldPath || core.HasPrefix(d, oldPrefix) { - newD := core.Concat(newPath, core.TrimPrefix(d, oldPath)) - dirsToMove[d] = newD + for directoryPath := range medium.directorySet { + if directoryPath == oldPath || core.HasPrefix(directoryPath, oldPrefix) { + newDirectoryPath := core.Concat(newPath, core.TrimPrefix(directoryPath, oldPath)) + dirsToMove[directoryPath] = newDirectoryPath } } - for old, nw := range dirsToMove { - delete(medium.directorySet, old) - medium.directorySet[nw] = true + for oldDirectoryPath, newDirectoryPath := range dirsToMove { + delete(medium.directorySet, oldDirectoryPath) + medium.directorySet[newDirectoryPath] = true } return nil @@ -376,11 +376,11 @@ func (medium *Medium) List(filePath string) ([]fs.DirEntry, error) { seen[e.Name()] = true } - for d := range medium.directorySet { - if !core.HasPrefix(d, prefix) { + for directoryPath := range medium.directorySet { + if !core.HasPrefix(directoryPath, prefix) { continue } - rest := core.TrimPrefix(d, prefix) + rest := core.TrimPrefix(directoryPath, prefix) if rest == "" { continue } @@ -515,8 +515,8 @@ func (medium *Medium) hasPrefixLocked(prefix string) (bool, error) { return true, nil } } - for d := range medium.directorySet { - if core.HasPrefix(d, prefix) { + for directoryPath := range medium.directorySet { + if core.HasPrefix(directoryPath, prefix) { return true, nil } } diff --git a/node/node.go b/node/node.go index 2dab424..0dc308b 100644 --- a/node/node.go +++ b/node/node.go @@ -81,11 +81,11 @@ func (node *Node) ToTar() ([]byte, error) { // Example: restored, _ := node.FromTar(snapshot) func FromTar(data []byte) (*Node, error) { - n := New() - if err := n.LoadTar(data); err != nil { + restoredNode := New() + if err := restoredNode.LoadTar(data); err != nil { return nil, err } - return n, nil + return restoredNode, nil } // Example: _ = nodeTree.LoadTar(snapshot) @@ -319,8 +319,8 @@ func (node *Node) ReadDir(name string) ([]fs.DirEntry, error) { seen[firstComponent] = true if core.Contains(relPath, "/") { - dir := &dirInfo{name: firstComponent, modTime: time.Now()} - entries = append(entries, fs.FileInfoToDirEntry(dir)) + directoryInfo := &dirInfo{name: firstComponent, modTime: time.Now()} + entries = append(entries, fs.FileInfoToDirEntry(directoryInfo)) } else { file := node.files[filePath] info, _ := file.Stat() diff --git a/s3/s3.go b/s3/s3.go index 04096b4..0a32220 100644 --- a/s3/s3.go +++ b/s3/s3.go @@ -58,10 +58,10 @@ func deleteObjectsError(prefix string, errs []types.Error) error { return nil } details := make([]string, 0, len(errs)) - for _, item := range errs { - key := aws.ToString(item.Key) - code := aws.ToString(item.Code) - message := aws.ToString(item.Message) + for _, errorItem := range errs { + key := aws.ToString(errorItem.Key) + code := aws.ToString(errorItem.Code) + message := aws.ToString(errorItem.Message) switch { case code != "" && message != "": details = append(details, core.Concat(key, ": ", code, " ", message)) @@ -239,11 +239,11 @@ func (medium *Medium) DeleteAll(filePath string) error { prefix += "/" } - paginator := true + continueListing := true var continuationToken *string - for paginator { - listOut, err := medium.client.ListObjectsV2(context.Background(), &awss3.ListObjectsV2Input{ + for continueListing { + listOutput, err := medium.client.ListObjectsV2(context.Background(), &awss3.ListObjectsV2Input{ Bucket: aws.String(medium.bucket), Prefix: aws.String(prefix), ContinuationToken: continuationToken, @@ -252,13 +252,13 @@ func (medium *Medium) DeleteAll(filePath string) error { return core.E("s3.DeleteAll", core.Concat("failed to list objects: ", prefix), err) } - if len(listOut.Contents) == 0 { + if len(listOutput.Contents) == 0 { break } - objects := make([]types.ObjectIdentifier, len(listOut.Contents)) - for i, obj := range listOut.Contents { - objects[i] = types.ObjectIdentifier{Key: obj.Key} + objects := make([]types.ObjectIdentifier, len(listOutput.Contents)) + for i, object := range listOutput.Contents { + objects[i] = types.ObjectIdentifier{Key: object.Key} } deleteOut, err := medium.client.DeleteObjects(context.Background(), &awss3.DeleteObjectsInput{ @@ -272,10 +272,10 @@ func (medium *Medium) DeleteAll(filePath string) error { return err } - if listOut.IsTruncated != nil && *listOut.IsTruncated { - continuationToken = listOut.NextContinuationToken + if listOutput.IsTruncated != nil && *listOutput.IsTruncated { + continuationToken = listOutput.NextContinuationToken } else { - paginator = false + continueListing = false } } @@ -321,7 +321,7 @@ func (medium *Medium) List(filePath string) ([]fs.DirEntry, error) { var entries []fs.DirEntry - listOut, err := medium.client.ListObjectsV2(context.Background(), &awss3.ListObjectsV2Input{ + listOutput, err := medium.client.ListObjectsV2(context.Background(), &awss3.ListObjectsV2Input{ Bucket: aws.String(medium.bucket), Prefix: aws.String(prefix), Delimiter: aws.String("/"), @@ -331,7 +331,7 @@ func (medium *Medium) List(filePath string) ([]fs.DirEntry, error) { } // Common prefixes are "directories" - for _, commonPrefix := range listOut.CommonPrefixes { + for _, commonPrefix := range listOutput.CommonPrefixes { if commonPrefix.Prefix == nil { continue } @@ -353,21 +353,21 @@ func (medium *Medium) List(filePath string) ([]fs.DirEntry, error) { } // Contents are "files" (excluding the prefix itself) - for _, obj := range listOut.Contents { - if obj.Key == nil { + for _, object := range listOutput.Contents { + if object.Key == nil { continue } - name := core.TrimPrefix(*obj.Key, prefix) + name := core.TrimPrefix(*object.Key, prefix) if name == "" || core.Contains(name, "/") { continue } var size int64 - if obj.Size != nil { - size = *obj.Size + if object.Size != nil { + size = *object.Size } var modTime time.Time - if obj.LastModified != nil { - modTime = *obj.LastModified + if object.LastModified != nil { + modTime = *object.LastModified } entries = append(entries, &dirEntry{ name: name, @@ -532,7 +532,7 @@ func (medium *Medium) Exists(filePath string) bool { if !core.HasSuffix(prefix, "/") { prefix += "/" } - listOut, err := medium.client.ListObjectsV2(context.Background(), &awss3.ListObjectsV2Input{ + listOutput, err := medium.client.ListObjectsV2(context.Background(), &awss3.ListObjectsV2Input{ Bucket: aws.String(medium.bucket), Prefix: aws.String(prefix), MaxKeys: aws.Int32(1), @@ -540,7 +540,7 @@ func (medium *Medium) Exists(filePath string) bool { if err != nil { return false } - return len(listOut.Contents) > 0 || len(listOut.CommonPrefixes) > 0 + return len(listOutput.Contents) > 0 || len(listOutput.CommonPrefixes) > 0 } // Example: ok := medium.IsDir("reports") @@ -555,7 +555,7 @@ func (medium *Medium) IsDir(filePath string) bool { prefix += "/" } - listOut, err := medium.client.ListObjectsV2(context.Background(), &awss3.ListObjectsV2Input{ + listOutput, err := medium.client.ListObjectsV2(context.Background(), &awss3.ListObjectsV2Input{ Bucket: aws.String(medium.bucket), Prefix: aws.String(prefix), MaxKeys: aws.Int32(1), @@ -563,7 +563,7 @@ func (medium *Medium) IsDir(filePath string) bool { if err != nil { return false } - return len(listOut.Contents) > 0 || len(listOut.CommonPrefixes) > 0 + return len(listOutput.Contents) > 0 || len(listOutput.CommonPrefixes) > 0 } // --- Internal types --- @@ -624,9 +624,9 @@ func (file *s3File) Read(buffer []byte) (int, error) { if file.offset >= int64(len(file.content)) { return 0, goio.EOF } - n := copy(buffer, file.content[file.offset:]) - file.offset += int64(n) - return n, nil + bytesRead := copy(buffer, file.content[file.offset:]) + file.offset += int64(bytesRead) + return bytesRead, nil } func (file *s3File) Close() error { diff --git a/sqlite/sqlite.go b/sqlite/sqlite.go index bbd1aa3..10729d4 100644 --- a/sqlite/sqlite.go +++ b/sqlite/sqlite.go @@ -246,8 +246,8 @@ func (medium *Medium) DeleteAll(filePath string) error { if err != nil { return core.E("sqlite.DeleteAll", core.Concat("delete failed: ", key), err) } - n, _ := res.RowsAffected() - if n == 0 { + rowsAffected, _ := res.RowsAffected() + if rowsAffected == 0 { return core.E("sqlite.DeleteAll", core.Concat("path not found: ", key), fs.ErrNotExist) } return nil @@ -303,7 +303,7 @@ func (medium *Medium) Rename(oldPath, newPath string) error { oldPrefix := oldKey + "/" newPrefix := newKey + "/" - rows, err := tx.Query( + childRows, err := tx.Query( `SELECT path, content, mode, is_dir, mtime FROM `+medium.table+` WHERE path LIKE ?`, oldPrefix+"%", ) @@ -319,22 +319,22 @@ func (medium *Medium) Rename(oldPath, newPath string) error { mtime time.Time } var children []child - for rows.Next() { - var c child - if err := rows.Scan(&c.path, &c.content, &c.mode, &c.isDir, &c.mtime); err != nil { - rows.Close() + for childRows.Next() { + var childEntry child + if err := childRows.Scan(&childEntry.path, &childEntry.content, &childEntry.mode, &childEntry.isDir, &childEntry.mtime); err != nil { + childRows.Close() return core.E("sqlite.Rename", "scan child failed", err) } - children = append(children, c) + children = append(children, childEntry) } - rows.Close() + childRows.Close() - for _, c := range children { - newChildPath := core.Concat(newPrefix, core.TrimPrefix(c.path, oldPrefix)) + for _, childEntry := range children { + newChildPath := core.Concat(newPrefix, core.TrimPrefix(childEntry.path, oldPrefix)) _, err = tx.Exec( `INSERT INTO `+medium.table+` (path, content, mode, is_dir, mtime) VALUES (?, ?, ?, ?, ?) ON CONFLICT(path) DO UPDATE SET content = excluded.content, mode = excluded.mode, is_dir = excluded.is_dir, mtime = excluded.mtime`, - newChildPath, c.content, c.mode, c.isDir, c.mtime, + newChildPath, childEntry.content, childEntry.mode, childEntry.isDir, childEntry.mtime, ) if err != nil { return core.E("sqlite.Rename", "insert child failed", err) @@ -639,9 +639,9 @@ func (file *sqliteFile) Read(buffer []byte) (int, error) { if file.offset >= int64(len(file.content)) { return 0, goio.EOF } - n := copy(buffer, file.content[file.offset:]) - file.offset += int64(n) - return n, nil + bytesRead := copy(buffer, file.content[file.offset:]) + file.offset += int64(bytesRead) + return bytesRead, nil } func (file *sqliteFile) Close() error { From 25b12a22a447a32b69bea342e02471fcc674dc29 Mon Sep 17 00:00:00 2001 From: Virgil Date: Mon, 30 Mar 2026 22:00:45 +0000 Subject: [PATCH 28/83] refactor(ax): add memory medium aliases --- bench_test.go | 24 ++++++------ client_test.go | 18 ++++----- datanode/client.go | 90 ++++++++++++++++++++------------------------ io.go | 22 ++++++++++- node/node.go | 2 +- node/node_test.go | 6 +-- workspace/service.go | 20 +++++----- 7 files changed, 94 insertions(+), 88 deletions(-) diff --git a/bench_test.go b/bench_test.go index df24267..dd259d0 100644 --- a/bench_test.go +++ b/bench_test.go @@ -4,31 +4,31 @@ import ( "testing" ) -func BenchmarkMockMedium_Write(b *testing.B) { - m := NewMockMedium() +func BenchmarkMemoryMedium_Write(b *testing.B) { + medium := NewMemoryMedium() b.ResetTimer() for i := 0; i < b.N; i++ { - _ = m.Write("test.txt", "some content") + _ = medium.Write("test.txt", "some content") } } -func BenchmarkMockMedium_Read(b *testing.B) { - m := NewMockMedium() - _ = m.Write("test.txt", "some content") +func BenchmarkMemoryMedium_Read(b *testing.B) { + medium := NewMemoryMedium() + _ = medium.Write("test.txt", "some content") b.ResetTimer() for i := 0; i < b.N; i++ { - _, _ = m.Read("test.txt") + _, _ = medium.Read("test.txt") } } -func BenchmarkMockMedium_List(b *testing.B) { - m := NewMockMedium() - _ = m.EnsureDir("dir") +func BenchmarkMemoryMedium_List(b *testing.B) { + medium := NewMemoryMedium() + _ = medium.EnsureDir("dir") for i := 0; i < 100; i++ { - _ = m.Write("dir/file"+string(rune(i))+".txt", "content") + _ = medium.Write("dir/file"+string(rune(i))+".txt", "content") } b.ResetTimer() for i := 0; i < b.N; i++ { - _, _ = m.List("dir") + _, _ = medium.List("dir") } } diff --git a/client_test.go b/client_test.go index d59219e..f6e3e26 100644 --- a/client_test.go +++ b/client_test.go @@ -9,15 +9,15 @@ import ( "github.com/stretchr/testify/require" ) -// --- MockMedium Tests --- - -func TestClient_NewMockMedium_Good(t *testing.T) { - m := NewMockMedium() - assert.NotNil(t, m) - assert.NotNil(t, m.Files) - assert.NotNil(t, m.Dirs) - assert.Empty(t, m.Files) - assert.Empty(t, m.Dirs) +// --- MemoryMedium Compatibility Tests --- + +func TestClient_NewMemoryMedium_Good(t *testing.T) { + medium := NewMemoryMedium() + assert.NotNil(t, medium) + assert.NotNil(t, medium.Files) + assert.NotNil(t, medium.Dirs) + assert.Empty(t, medium.Files) + assert.Empty(t, medium.Dirs) } func TestClient_MockMedium_Read_Good(t *testing.T) { diff --git a/datanode/client.go b/datanode/client.go index d551ec5..05e11e6 100644 --- a/datanode/client.go +++ b/datanode/client.go @@ -37,7 +37,7 @@ var ( type Medium struct { dataNode *borgdatanode.DataNode directorySet map[string]bool // explicit directories that exist without file contents - mu sync.RWMutex + lock sync.RWMutex } func New() *Medium { @@ -63,8 +63,8 @@ func FromTar(data []byte) (*Medium, error) { // Example: snapshot, _ := medium.Snapshot() func (medium *Medium) Snapshot() ([]byte, error) { - medium.mu.RLock() - defer medium.mu.RUnlock() + medium.lock.RLock() + defer medium.lock.RUnlock() data, err := medium.dataNode.ToTar() if err != nil { return nil, core.E("datanode.Snapshot", "tar failed", err) @@ -78,8 +78,8 @@ func (medium *Medium) Restore(data []byte) error { if err != nil { return core.E("datanode.Restore", "tar failed", err) } - medium.mu.Lock() - defer medium.mu.Unlock() + medium.lock.Lock() + defer medium.lock.Unlock() medium.dataNode = dataNode medium.directorySet = make(map[string]bool) return nil @@ -87,8 +87,8 @@ func (medium *Medium) Restore(data []byte) error { // Example: dataNode := medium.DataNode() func (medium *Medium) DataNode() *borgdatanode.DataNode { - medium.mu.RLock() - defer medium.mu.RUnlock() + medium.lock.RLock() + defer medium.lock.RUnlock() return medium.dataNode } @@ -105,8 +105,8 @@ func normaliseEntryPath(filePath string) string { // --- io.Medium interface --- func (medium *Medium) Read(filePath string) (string, error) { - medium.mu.RLock() - defer medium.mu.RUnlock() + medium.lock.RLock() + defer medium.lock.RUnlock() filePath = normaliseEntryPath(filePath) file, err := medium.dataNode.Open(filePath) @@ -131,8 +131,8 @@ func (medium *Medium) Read(filePath string) (string, error) { } func (medium *Medium) Write(filePath, content string) error { - medium.mu.Lock() - defer medium.mu.Unlock() + medium.lock.Lock() + defer medium.lock.Unlock() filePath = normaliseEntryPath(filePath) if filePath == "" { @@ -150,8 +150,8 @@ func (medium *Medium) WriteMode(filePath, content string, mode fs.FileMode) erro } func (medium *Medium) EnsureDir(filePath string) error { - medium.mu.Lock() - defer medium.mu.Unlock() + medium.lock.Lock() + defer medium.lock.Unlock() filePath = normaliseEntryPath(filePath) if filePath == "" { @@ -162,7 +162,7 @@ func (medium *Medium) EnsureDir(filePath string) error { } // ensureDirsLocked marks a directory and all ancestors as existing. -// Caller must hold medium.mu. +// Caller must hold medium.lock. func (medium *Medium) ensureDirsLocked(directoryPath string) { for directoryPath != "" && directoryPath != "." { medium.directorySet[directoryPath] = true @@ -174,8 +174,8 @@ func (medium *Medium) ensureDirsLocked(directoryPath string) { } func (medium *Medium) IsFile(filePath string) bool { - medium.mu.RLock() - defer medium.mu.RUnlock() + medium.lock.RLock() + defer medium.lock.RUnlock() filePath = normaliseEntryPath(filePath) info, err := medium.dataNode.Stat(filePath) @@ -191,20 +191,17 @@ func (medium *Medium) FileSet(filePath, content string) error { } func (medium *Medium) Delete(filePath string) error { - medium.mu.Lock() - defer medium.mu.Unlock() + medium.lock.Lock() + defer medium.lock.Unlock() filePath = normaliseEntryPath(filePath) if filePath == "" { return core.E("datanode.Delete", "cannot delete root", fs.ErrPermission) } - // Check if it's a file in the DataNode info, err := medium.dataNode.Stat(filePath) if err != nil { - // Check explicit directories if medium.directorySet[filePath] { - // Check if dir is empty hasChildren, err := medium.hasPrefixLocked(filePath + "/") if err != nil { return core.E("datanode.Delete", core.Concat("failed to inspect directory: ", filePath), err) @@ -238,8 +235,8 @@ func (medium *Medium) Delete(filePath string) error { } func (medium *Medium) DeleteAll(filePath string) error { - medium.mu.Lock() - defer medium.mu.Unlock() + medium.lock.Lock() + defer medium.lock.Unlock() filePath = normaliseEntryPath(filePath) if filePath == "" { @@ -249,7 +246,6 @@ func (medium *Medium) DeleteAll(filePath string) error { prefix := filePath + "/" found := false - // Check if filePath itself is a file info, err := medium.dataNode.Stat(filePath) if err == nil && !info.IsDir() { if err := medium.removeFileLocked(filePath); err != nil { @@ -287,20 +283,18 @@ func (medium *Medium) DeleteAll(filePath string) error { } func (medium *Medium) Rename(oldPath, newPath string) error { - medium.mu.Lock() - defer medium.mu.Unlock() + medium.lock.Lock() + defer medium.lock.Unlock() oldPath = normaliseEntryPath(oldPath) newPath = normaliseEntryPath(newPath) - // Check if source is a file info, err := medium.dataNode.Stat(oldPath) if err != nil { return core.E("datanode.Rename", core.Concat("not found: ", oldPath), fs.ErrNotExist) } if !info.IsDir() { - // Read old, write new, delete old data, err := medium.readFileLocked(oldPath) if err != nil { return core.E("datanode.Rename", core.Concat("failed to read source file: ", oldPath), err) @@ -313,7 +307,6 @@ func (medium *Medium) Rename(oldPath, newPath string) error { return nil } - // Directory rename: move all files under oldPath to newPath oldPrefix := oldPath + "/" newPrefix := newPath + "/" @@ -335,7 +328,6 @@ func (medium *Medium) Rename(oldPath, newPath string) error { } } - // Move explicit directories dirsToMove := make(map[string]string) for directoryPath := range medium.directorySet { if directoryPath == oldPath || core.HasPrefix(directoryPath, oldPrefix) { @@ -352,14 +344,13 @@ func (medium *Medium) Rename(oldPath, newPath string) error { } func (medium *Medium) List(filePath string) ([]fs.DirEntry, error) { - medium.mu.RLock() - defer medium.mu.RUnlock() + medium.lock.RLock() + defer medium.lock.RUnlock() filePath = normaliseEntryPath(filePath) entries, err := medium.dataNode.ReadDir(filePath) if err != nil { - // Check explicit directories if filePath == "" || medium.directorySet[filePath] { return []fs.DirEntry{}, nil } @@ -399,8 +390,8 @@ func (medium *Medium) List(filePath string) ([]fs.DirEntry, error) { } func (medium *Medium) Stat(filePath string) (fs.FileInfo, error) { - medium.mu.RLock() - defer medium.mu.RUnlock() + medium.lock.RLock() + defer medium.lock.RUnlock() filePath = normaliseEntryPath(filePath) if filePath == "" { @@ -419,8 +410,8 @@ func (medium *Medium) Stat(filePath string) (fs.FileInfo, error) { } func (medium *Medium) Open(filePath string) (fs.File, error) { - medium.mu.RLock() - defer medium.mu.RUnlock() + medium.lock.RLock() + defer medium.lock.RUnlock() filePath = normaliseEntryPath(filePath) return medium.dataNode.Open(filePath) @@ -440,25 +431,24 @@ func (medium *Medium) Append(filePath string) (goio.WriteCloser, error) { return nil, core.E("datanode.Append", "empty path", fs.ErrInvalid) } - // Read existing content var existing []byte - medium.mu.RLock() + medium.lock.RLock() if medium.IsFile(filePath) { data, err := medium.readFileLocked(filePath) if err != nil { - medium.mu.RUnlock() + medium.lock.RUnlock() return nil, core.E("datanode.Append", core.Concat("failed to read existing content: ", filePath), err) } existing = data } - medium.mu.RUnlock() + medium.lock.RUnlock() return &writeCloser{medium: medium, path: filePath, buf: existing}, nil } func (medium *Medium) ReadStream(filePath string) (goio.ReadCloser, error) { - medium.mu.RLock() - defer medium.mu.RUnlock() + medium.lock.RLock() + defer medium.lock.RUnlock() filePath = normaliseEntryPath(filePath) file, err := medium.dataNode.Open(filePath) @@ -473,8 +463,8 @@ func (medium *Medium) WriteStream(filePath string) (goio.WriteCloser, error) { } func (medium *Medium) Exists(filePath string) bool { - medium.mu.RLock() - defer medium.mu.RUnlock() + medium.lock.RLock() + defer medium.lock.RUnlock() filePath = normaliseEntryPath(filePath) if filePath == "" { @@ -488,8 +478,8 @@ func (medium *Medium) Exists(filePath string) bool { } func (medium *Medium) IsDir(filePath string) bool { - medium.mu.RLock() - defer medium.mu.RUnlock() + medium.lock.RLock() + defer medium.lock.RUnlock() filePath = normaliseEntryPath(filePath) if filePath == "" { @@ -556,7 +546,7 @@ func (medium *Medium) readFileLocked(filePath string) ([]byte, error) { // removeFileLocked removes a single file by rebuilding the DataNode. // This is necessary because Borg's DataNode doesn't expose a Remove method. -// Caller must hold medium.mu write lock. +// Caller must hold medium.lock write lock. func (medium *Medium) removeFileLocked(target string) error { entries, err := medium.collectAllLocked() if err != nil { @@ -591,8 +581,8 @@ func (writer *writeCloser) Write(data []byte) (int, error) { } func (writer *writeCloser) Close() error { - writer.medium.mu.Lock() - defer writer.medium.mu.Unlock() + writer.medium.lock.Lock() + defer writer.medium.lock.Unlock() writer.medium.dataNode.AddData(writer.path, writer.buf) writer.medium.ensureDirsLocked(path.Dir(writer.path)) diff --git a/io.go b/io.go index f22e20c..cf6423f 100644 --- a/io.go +++ b/io.go @@ -165,11 +165,17 @@ type MockMedium struct { ModTimes map[string]time.Time } -var _ Medium = (*MockMedium)(nil) +// Example: medium := io.NewMemoryMedium() +// _ = medium.Write("config/app.yaml", "port: 8080") +type MemoryMedium = MockMedium + +var _ Medium = (*MemoryMedium)(nil) +// NewMockMedium returns MemoryMedium for compatibility. +// // Example: medium := io.NewMockMedium() // _ = medium.Write("config/app.yaml", "port: 8080") -func NewMockMedium() *MockMedium { +func NewMockMedium() *MemoryMedium { return &MockMedium{ Files: make(map[string]string), Dirs: make(map[string]bool), @@ -177,6 +183,12 @@ func NewMockMedium() *MockMedium { } } +// Example: medium := io.NewMemoryMedium() +// _ = medium.Write("config/app.yaml", "port: 8080") +func NewMemoryMedium() *MemoryMedium { + return NewMockMedium() +} + func (medium *MockMedium) Read(path string) (string, error) { content, ok := medium.Files[path] if !ok { @@ -369,6 +381,9 @@ type MockFile struct { offset int64 } +// MemoryFile is the preferred alias for MockFile. +type MemoryFile = MockFile + func (file *MockFile) Stat() (fs.FileInfo, error) { return FileInfo{ name: file.name, @@ -396,6 +411,9 @@ type MockWriteCloser struct { data []byte } +// MemoryWriteCloser is the preferred alias for MockWriteCloser. +type MemoryWriteCloser = MockWriteCloser + func (writeCloser *MockWriteCloser) Write(data []byte) (int, error) { writeCloser.data = append(writeCloser.data, data...) return len(data), nil diff --git a/node/node.go b/node/node.go index 0dc308b..8b1c04e 100644 --- a/node/node.go +++ b/node/node.go @@ -209,7 +209,7 @@ func (node *Node) CopyFile(sourcePath, destinationPath string, perm fs.FileMode) return coreio.Local.WriteMode(destinationPath, string(file.content), perm) } -// Example: _ = nodeTree.CopyTo(io.NewMockMedium(), "config", "backup/config") +// Example: _ = nodeTree.CopyTo(io.NewMemoryMedium(), "config", "backup/config") func (node *Node) CopyTo(target coreio.Medium, sourcePath, destPath string) error { sourcePath = core.TrimPrefix(sourcePath, "/") info, err := node.Stat(sourcePath) diff --git a/node/node_test.go b/node/node_test.go index 0580ecb..6918e0e 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -420,12 +420,12 @@ func TestNode_CopyTo_Good(t *testing.T) { n.AddData("config/app.yaml", []byte("port: 8080")) n.AddData("config/env/app.env", []byte("MODE=test")) - fileTarget := coreio.NewMockMedium() + fileTarget := coreio.NewMemoryMedium() err := n.CopyTo(fileTarget, "config/app.yaml", "backup/app.yaml") require.NoError(t, err) assert.Equal(t, "port: 8080", fileTarget.Files["backup/app.yaml"]) - dirTarget := coreio.NewMockMedium() + dirTarget := coreio.NewMemoryMedium() err = n.CopyTo(dirTarget, "config", "backup/config") require.NoError(t, err) assert.Equal(t, "port: 8080", dirTarget.Files["backup/config/app.yaml"]) @@ -434,7 +434,7 @@ func TestNode_CopyTo_Good(t *testing.T) { func TestNode_CopyTo_Bad(t *testing.T) { n := New() - err := n.CopyTo(coreio.NewMockMedium(), "missing", "backup/missing") + err := n.CopyTo(coreio.NewMemoryMedium(), "missing", "backup/missing") assert.Error(t, err) } diff --git a/workspace/service.go b/workspace/service.go index d382d3a..ea3179f 100644 --- a/workspace/service.go +++ b/workspace/service.go @@ -34,12 +34,11 @@ type Options struct { // Example: service, _ := workspace.New(workspace.Options{Core: core.New(), Crypt: cryptProvider}) type Service struct { - core *core.Core crypt CryptProvider activeWorkspaceID string rootPath string medium io.Medium - mu sync.RWMutex + lock sync.RWMutex } var _ Workspace = (*Service)(nil) @@ -58,7 +57,6 @@ func New(options Options) (*Service, error) { } service := &Service{ - core: options.Core, rootPath: rootPath, medium: io.Local, } @@ -76,8 +74,8 @@ func New(options Options) (*Service, error) { // Example: workspaceID, _ := service.CreateWorkspace("alice", "pass123") func (service *Service) CreateWorkspace(identifier, password string) (string, error) { - service.mu.Lock() - defer service.mu.Unlock() + service.lock.Lock() + defer service.lock.Unlock() if service.crypt == nil { return "", core.E("workspace.CreateWorkspace", "crypt service not available", nil) @@ -114,8 +112,8 @@ func (service *Service) CreateWorkspace(identifier, password string) (string, er // Example: _ = service.SwitchWorkspace(workspaceID) func (service *Service) SwitchWorkspace(workspaceID string) error { - service.mu.Lock() - defer service.mu.Unlock() + service.lock.Lock() + defer service.lock.Unlock() workspaceDirectory, err := service.resolveWorkspaceDirectory("workspace.SwitchWorkspace", workspaceID) if err != nil { @@ -148,8 +146,8 @@ func (service *Service) resolveActiveWorkspaceFilePath(operation, workspaceFileP // Example: content, _ := service.WorkspaceFileGet("notes/todo.txt") func (service *Service) WorkspaceFileGet(workspaceFilePath string) (string, error) { - service.mu.RLock() - defer service.mu.RUnlock() + service.lock.RLock() + defer service.lock.RUnlock() filePath, err := service.resolveActiveWorkspaceFilePath("workspace.WorkspaceFileGet", workspaceFilePath) if err != nil { @@ -160,8 +158,8 @@ func (service *Service) WorkspaceFileGet(workspaceFilePath string) (string, erro // Example: _ = service.WorkspaceFileSet("notes/todo.txt", "ship it") func (service *Service) WorkspaceFileSet(workspaceFilePath, content string) error { - service.mu.Lock() - defer service.mu.Unlock() + service.lock.Lock() + defer service.lock.Unlock() filePath, err := service.resolveActiveWorkspaceFilePath("workspace.WorkspaceFileSet", workspaceFilePath) if err != nil { From e8b87dfbeee6d1698f4e0beea452e3b678f619ca Mon Sep 17 00:00:00 2001 From: Virgil Date: Mon, 30 Mar 2026 22:26:50 +0000 Subject: [PATCH 29/83] refactor(ax): make memory medium primary --- io.go | 115 +++++++++++++++++++++++++++++----------------------------- 1 file changed, 57 insertions(+), 58 deletions(-) diff --git a/io.go b/io.go index cf6423f..6f8fb45 100644 --- a/io.go +++ b/io.go @@ -157,75 +157,74 @@ func Copy(source Medium, sourcePath string, destination Medium, destinationPath return nil } -// Example: medium := io.NewMockMedium() +// Example: medium := io.NewMemoryMedium() // _ = medium.Write("config/app.yaml", "port: 8080") -type MockMedium struct { +type MemoryMedium struct { Files map[string]string Dirs map[string]bool ModTimes map[string]time.Time } -// Example: medium := io.NewMemoryMedium() -// _ = medium.Write("config/app.yaml", "port: 8080") -type MemoryMedium = MockMedium +// MockMedium is a compatibility alias for MemoryMedium. +type MockMedium = MemoryMedium var _ Medium = (*MemoryMedium)(nil) -// NewMockMedium returns MemoryMedium for compatibility. -// -// Example: medium := io.NewMockMedium() +// Example: medium := io.NewMemoryMedium() // _ = medium.Write("config/app.yaml", "port: 8080") -func NewMockMedium() *MemoryMedium { - return &MockMedium{ +func NewMemoryMedium() *MemoryMedium { + return &MemoryMedium{ Files: make(map[string]string), Dirs: make(map[string]bool), ModTimes: make(map[string]time.Time), } } -// Example: medium := io.NewMemoryMedium() +// NewMockMedium is a compatibility alias for NewMemoryMedium. +// +// Example: medium := io.NewMockMedium() // _ = medium.Write("config/app.yaml", "port: 8080") -func NewMemoryMedium() *MemoryMedium { - return NewMockMedium() +func NewMockMedium() *MemoryMedium { + return NewMemoryMedium() } -func (medium *MockMedium) Read(path string) (string, error) { +func (medium *MemoryMedium) Read(path string) (string, error) { content, ok := medium.Files[path] if !ok { - return "", core.E("io.MockMedium.Read", core.Concat("file not found: ", path), fs.ErrNotExist) + return "", core.E("io.MemoryMedium.Read", core.Concat("file not found: ", path), fs.ErrNotExist) } return content, nil } -func (medium *MockMedium) Write(path, content string) error { +func (medium *MemoryMedium) Write(path, content string) error { medium.Files[path] = content medium.ModTimes[path] = time.Now() return nil } -func (medium *MockMedium) WriteMode(path, content string, mode fs.FileMode) error { +func (medium *MemoryMedium) WriteMode(path, content string, mode fs.FileMode) error { return medium.Write(path, content) } -func (medium *MockMedium) EnsureDir(path string) error { +func (medium *MemoryMedium) EnsureDir(path string) error { medium.Dirs[path] = true return nil } -func (medium *MockMedium) IsFile(path string) bool { +func (medium *MemoryMedium) IsFile(path string) bool { _, ok := medium.Files[path] return ok } -func (medium *MockMedium) FileGet(path string) (string, error) { +func (medium *MemoryMedium) FileGet(path string) (string, error) { return medium.Read(path) } -func (medium *MockMedium) FileSet(path, content string) error { +func (medium *MemoryMedium) FileSet(path, content string) error { return medium.Write(path, content) } -func (medium *MockMedium) Delete(path string) error { +func (medium *MemoryMedium) Delete(path string) error { if _, ok := medium.Files[path]; ok { delete(medium.Files, path) return nil @@ -237,21 +236,21 @@ func (medium *MockMedium) Delete(path string) error { } for filePath := range medium.Files { if core.HasPrefix(filePath, prefix) { - return core.E("io.MockMedium.Delete", core.Concat("directory not empty: ", path), fs.ErrExist) + return core.E("io.MemoryMedium.Delete", core.Concat("directory not empty: ", path), fs.ErrExist) } } for directoryPath := range medium.Dirs { if directoryPath != path && core.HasPrefix(directoryPath, prefix) { - return core.E("io.MockMedium.Delete", core.Concat("directory not empty: ", path), fs.ErrExist) + return core.E("io.MemoryMedium.Delete", core.Concat("directory not empty: ", path), fs.ErrExist) } } delete(medium.Dirs, path) return nil } - return core.E("io.MockMedium.Delete", core.Concat("path not found: ", path), fs.ErrNotExist) + return core.E("io.MemoryMedium.Delete", core.Concat("path not found: ", path), fs.ErrNotExist) } -func (medium *MockMedium) DeleteAll(path string) error { +func (medium *MemoryMedium) DeleteAll(path string) error { found := false if _, ok := medium.Files[path]; ok { delete(medium.Files, path) @@ -279,12 +278,12 @@ func (medium *MockMedium) DeleteAll(path string) error { } if !found { - return core.E("io.MockMedium.DeleteAll", core.Concat("path not found: ", path), fs.ErrNotExist) + return core.E("io.MemoryMedium.DeleteAll", core.Concat("path not found: ", path), fs.ErrNotExist) } return nil } -func (medium *MockMedium) Rename(oldPath, newPath string) error { +func (medium *MemoryMedium) Rename(oldPath, newPath string) error { if content, ok := medium.Files[oldPath]; ok { medium.Files[newPath] = content delete(medium.Files, oldPath) @@ -336,62 +335,62 @@ func (medium *MockMedium) Rename(oldPath, newPath string) error { } return nil } - return core.E("io.MockMedium.Rename", core.Concat("path not found: ", oldPath), fs.ErrNotExist) + return core.E("io.MemoryMedium.Rename", core.Concat("path not found: ", oldPath), fs.ErrNotExist) } -func (medium *MockMedium) Open(path string) (fs.File, error) { +func (medium *MemoryMedium) Open(path string) (fs.File, error) { content, ok := medium.Files[path] if !ok { - return nil, core.E("io.MockMedium.Open", core.Concat("file not found: ", path), fs.ErrNotExist) + return nil, core.E("io.MemoryMedium.Open", core.Concat("file not found: ", path), fs.ErrNotExist) } - return &MockFile{ + return &MemoryFile{ name: core.PathBase(path), content: []byte(content), }, nil } -func (medium *MockMedium) Create(path string) (goio.WriteCloser, error) { - return &MockWriteCloser{ +func (medium *MemoryMedium) Create(path string) (goio.WriteCloser, error) { + return &MemoryWriteCloser{ medium: medium, path: path, }, nil } -func (medium *MockMedium) Append(path string) (goio.WriteCloser, error) { +func (medium *MemoryMedium) Append(path string) (goio.WriteCloser, error) { content := medium.Files[path] - return &MockWriteCloser{ + return &MemoryWriteCloser{ medium: medium, path: path, data: []byte(content), }, nil } -func (medium *MockMedium) ReadStream(path string) (goio.ReadCloser, error) { +func (medium *MemoryMedium) ReadStream(path string) (goio.ReadCloser, error) { return medium.Open(path) } -func (medium *MockMedium) WriteStream(path string) (goio.WriteCloser, error) { +func (medium *MemoryMedium) WriteStream(path string) (goio.WriteCloser, error) { return medium.Create(path) } -// MockFile implements fs.File for MockMedium. -type MockFile struct { +// MemoryFile implements fs.File for MemoryMedium. +type MemoryFile struct { name string content []byte offset int64 } -// MemoryFile is the preferred alias for MockFile. -type MemoryFile = MockFile +// MockFile is a compatibility alias for MemoryFile. +type MockFile = MemoryFile -func (file *MockFile) Stat() (fs.FileInfo, error) { +func (file *MemoryFile) Stat() (fs.FileInfo, error) { return FileInfo{ name: file.name, size: int64(len(file.content)), }, nil } -func (file *MockFile) Read(buffer []byte) (int, error) { +func (file *MemoryFile) Read(buffer []byte) (int, error) { if file.offset >= int64(len(file.content)) { return 0, goio.EOF } @@ -400,32 +399,32 @@ func (file *MockFile) Read(buffer []byte) (int, error) { return readCount, nil } -func (file *MockFile) Close() error { +func (file *MemoryFile) Close() error { return nil } -// MockWriteCloser implements WriteCloser for MockMedium. -type MockWriteCloser struct { - medium *MockMedium +// MemoryWriteCloser implements WriteCloser for MemoryMedium. +type MemoryWriteCloser struct { + medium *MemoryMedium path string data []byte } -// MemoryWriteCloser is the preferred alias for MockWriteCloser. -type MemoryWriteCloser = MockWriteCloser +// MockWriteCloser is a compatibility alias for MemoryWriteCloser. +type MockWriteCloser = MemoryWriteCloser -func (writeCloser *MockWriteCloser) Write(data []byte) (int, error) { +func (writeCloser *MemoryWriteCloser) Write(data []byte) (int, error) { writeCloser.data = append(writeCloser.data, data...) return len(data), nil } -func (writeCloser *MockWriteCloser) Close() error { +func (writeCloser *MemoryWriteCloser) Close() error { writeCloser.medium.Files[writeCloser.path] = string(writeCloser.data) writeCloser.medium.ModTimes[writeCloser.path] = time.Now() return nil } -func (medium *MockMedium) List(path string) ([]fs.DirEntry, error) { +func (medium *MemoryMedium) List(path string) ([]fs.DirEntry, error) { if _, ok := medium.Dirs[path]; !ok { hasChildren := false prefix := path @@ -447,7 +446,7 @@ func (medium *MockMedium) List(path string) ([]fs.DirEntry, error) { } } if !hasChildren && path != "" { - return nil, core.E("io.MockMedium.List", core.Concat("directory not found: ", path), fs.ErrNotExist) + return nil, core.E("io.MemoryMedium.List", core.Concat("directory not found: ", path), fs.ErrNotExist) } } @@ -527,7 +526,7 @@ func (medium *MockMedium) List(path string) ([]fs.DirEntry, error) { return entries, nil } -func (medium *MockMedium) Stat(path string) (fs.FileInfo, error) { +func (medium *MemoryMedium) Stat(path string) (fs.FileInfo, error) { if content, ok := medium.Files[path]; ok { modTime, ok := medium.ModTimes[path] if !ok { @@ -547,10 +546,10 @@ func (medium *MockMedium) Stat(path string) (fs.FileInfo, error) { mode: fs.ModeDir | 0755, }, nil } - return nil, core.E("io.MockMedium.Stat", core.Concat("path not found: ", path), fs.ErrNotExist) + return nil, core.E("io.MemoryMedium.Stat", core.Concat("path not found: ", path), fs.ErrNotExist) } -func (medium *MockMedium) Exists(path string) bool { +func (medium *MemoryMedium) Exists(path string) bool { if _, ok := medium.Files[path]; ok { return true } @@ -560,7 +559,7 @@ func (medium *MockMedium) Exists(path string) bool { return false } -func (medium *MockMedium) IsDir(path string) bool { +func (medium *MemoryMedium) IsDir(path string) bool { _, ok := medium.Dirs[path] return ok } From 0927aab29d33264b364d9b66f3efdb7ef46ba6f1 Mon Sep 17 00:00:00 2001 From: Virgil Date: Mon, 30 Mar 2026 22:33:03 +0000 Subject: [PATCH 30/83] refactor: align AX surfaces and semantic file names --- datanode/{client.go => medium.go} | 2 +- datanode/{client_test.go => medium_test.go} | 2 +- local/{client.go => medium.go} | 2 +- local/{client_test.go => medium_test.go} | 2 +- client_test.go => medium_test.go | 2 +- workspace/service.go | 84 +++++++++++++-------- workspace/service_test.go | 40 +++++++--- 7 files changed, 86 insertions(+), 48 deletions(-) rename datanode/{client.go => medium.go} (99%) rename datanode/{client_test.go => medium_test.go} (99%) rename local/{client.go => medium.go} (99%) rename local/{client_test.go => medium_test.go} (99%) rename client_test.go => medium_test.go (99%) diff --git a/datanode/client.go b/datanode/medium.go similarity index 99% rename from datanode/client.go rename to datanode/medium.go index 05e11e6..49979fa 100644 --- a/datanode/client.go +++ b/datanode/medium.go @@ -1,4 +1,4 @@ -// Package datanode keeps io.Medium data in Borg's DataNode. +// Package datanode provides an io.Medium implementation backed by Borg's DataNode. // // medium := datanode.New() // _ = medium.Write("jobs/run.log", "started") diff --git a/datanode/client_test.go b/datanode/medium_test.go similarity index 99% rename from datanode/client_test.go rename to datanode/medium_test.go index 123e8c8..89084eb 100644 --- a/datanode/client_test.go +++ b/datanode/medium_test.go @@ -96,7 +96,7 @@ func TestClient_Delete_Good(t *testing.T) { func TestClient_Delete_Bad(t *testing.T) { m := New() - // Delete non-existent + // Example: m.Delete("ghost.txt") assert.Error(t, m.Delete("ghost.txt")) // Delete non-empty dir diff --git a/local/client.go b/local/medium.go similarity index 99% rename from local/client.go rename to local/medium.go index 3eebd11..5f80f06 100644 --- a/local/client.go +++ b/local/medium.go @@ -1,4 +1,4 @@ -// Package local binds io.Medium to the local filesystem. +// Package local provides the io.Medium implementation for the local filesystem. // // medium, _ := local.New("/srv/app") // _ = medium.Write("config/app.yaml", "port: 8080") diff --git a/local/client_test.go b/local/medium_test.go similarity index 99% rename from local/client_test.go rename to local/medium_test.go index 50e70e3..8a22b8b 100644 --- a/local/client_test.go +++ b/local/medium_test.go @@ -15,7 +15,7 @@ func TestClient_New_ResolvesRoot_Good(t *testing.T) { root := t.TempDir() m, err := New(root) assert.NoError(t, err) - // New() resolves symlinks (macOS /var → /private/var), so compare resolved paths. + // Example: local.New("/srv/app") resolves macOS "/var" to "/private/var" before sandbox checks. resolved, err := resolveSymlinksPath(root) require.NoError(t, err) assert.Equal(t, resolved, m.filesystemRoot) diff --git a/client_test.go b/medium_test.go similarity index 99% rename from client_test.go rename to medium_test.go index f6e3e26..30d875f 100644 --- a/client_test.go +++ b/medium_test.go @@ -9,7 +9,7 @@ import ( "github.com/stretchr/testify/require" ) -// --- MemoryMedium Compatibility Tests --- +// --- MemoryMedium Tests --- func TestClient_NewMemoryMedium_Good(t *testing.T) { medium := NewMemoryMedium() diff --git a/workspace/service.go b/workspace/service.go index ea3179f..50e2ba9 100644 --- a/workspace/service.go +++ b/workspace/service.go @@ -24,6 +24,23 @@ type CryptProvider interface { CreateKeyPair(name, passphrase string) (string, error) } +const ( + WorkspaceCreateAction = "workspace.create" + WorkspaceSwitchAction = "workspace.switch" +) + +// Example: command := WorkspaceCommand{ +// Action: WorkspaceCreateAction, +// Identifier: "alice", +// Password: "pass123", +// } +type WorkspaceCommand struct { + Action string + Identifier string + Password string + WorkspaceID string +} + // Example: service, _ := workspace.New(workspace.Options{Core: core.New(), Crypt: cryptProvider}) type Options struct { // Core is the Core runtime used by the service. @@ -168,41 +185,44 @@ func (service *Service) WorkspaceFileSet(workspaceFilePath, content string) erro return service.medium.Write(filePath, content) } -// service, _ := workspace.New(workspace.Options{Core: core.New(), Crypt: myCryptProvider}) -// -// createResult := service.HandleIPCEvents(core.New(), map[string]any{ -// "action": "workspace.create", -// "identifier": "alice", -// "password": "pass123", -// }) -// -// switchResult := service.HandleIPCEvents(core.New(), map[string]any{ -// "action": "workspace.switch", -// "workspaceID": "f3f0d7", -// }) -// -// _ = createResult.OK -// _ = switchResult.OK +// Example: result := service.HandleWorkspaceCommand(WorkspaceCommand{ +// Action: WorkspaceCreateAction, +// Identifier: "alice", +// Password: "pass123", +// }) +func (service *Service) HandleWorkspaceCommand(command WorkspaceCommand) core.Result { + switch command.Action { + case WorkspaceCreateAction: + workspaceID, err := service.CreateWorkspace(command.Identifier, command.Password) + if err != nil { + return core.Result{}.New(err) + } + return core.Result{Value: workspaceID, OK: true} + case WorkspaceSwitchAction: + if err := service.SwitchWorkspace(command.WorkspaceID); err != nil { + return core.Result{}.New(err) + } + return core.Result{OK: true} + } + return core.Result{OK: true} +} + +// Example: result := service.HandleIPCEvents(core.New(), map[string]any{ +// "action": WorkspaceSwitchAction, +// "workspaceID": "f3f0d7", +// }) +// HandleIPCEvents preserves the legacy map[string]any payload and still accepts WorkspaceCommand values. func (service *Service) HandleIPCEvents(_ *core.Core, message core.Message) core.Result { switch payload := message.(type) { + case WorkspaceCommand: + return service.HandleWorkspaceCommand(payload) case map[string]any: - action, _ := payload["action"].(string) - switch action { - case "workspace.create": - identifier, _ := payload["identifier"].(string) - password, _ := payload["password"].(string) - workspaceID, err := service.CreateWorkspace(identifier, password) - if err != nil { - return core.Result{}.New(err) - } - return core.Result{Value: workspaceID, OK: true} - case "workspace.switch": - workspaceID, _ := payload["workspaceID"].(string) - if err := service.SwitchWorkspace(workspaceID); err != nil { - return core.Result{}.New(err) - } - return core.Result{OK: true} - } + command := WorkspaceCommand{} + command.Action, _ = payload["action"].(string) + command.Identifier, _ = payload["identifier"].(string) + command.Password, _ = payload["password"].(string) + command.WorkspaceID, _ = payload["workspaceID"].(string) + return service.HandleWorkspaceCommand(command) } return core.Result{OK: true} } diff --git a/workspace/service_test.go b/workspace/service_test.go index a8b19e3..7dd5042 100644 --- a/workspace/service_test.go +++ b/workspace/service_test.go @@ -88,13 +88,13 @@ func TestService_WorkspaceFileSet_TraversalBlocked_Bad(t *testing.T) { require.Error(t, err) } -func TestService_HandleIPCEvents_Good(t *testing.T) { +func TestService_HandleWorkspaceCommand_Good(t *testing.T) { s, _ := newTestService(t) - create := s.HandleIPCEvents(core.New(), map[string]any{ - "action": "workspace.create", - "identifier": "ipc-user", - "password": "pass123", + create := s.HandleWorkspaceCommand(WorkspaceCommand{ + Action: WorkspaceCreateAction, + Identifier: "ipc-user", + Password: "pass123", }) assert.True(t, create.OK) @@ -102,22 +102,40 @@ func TestService_HandleIPCEvents_Good(t *testing.T) { require.True(t, ok) require.NotEmpty(t, workspaceID) - switchResult := s.HandleIPCEvents(core.New(), map[string]any{ - "action": "workspace.switch", - "workspaceID": workspaceID, + switchResult := s.HandleWorkspaceCommand(WorkspaceCommand{ + Action: WorkspaceSwitchAction, + WorkspaceID: workspaceID, }) assert.True(t, switchResult.OK) assert.Equal(t, workspaceID, s.activeWorkspaceID) + legacyCreate := s.HandleIPCEvents(core.New(), map[string]any{ + "action": WorkspaceCreateAction, + "identifier": "legacy-user", + "password": "pass123", + }) + assert.True(t, legacyCreate.OK) + + legacyWorkspaceID, ok := legacyCreate.Value.(string) + require.True(t, ok) + require.NotEmpty(t, legacyWorkspaceID) + + legacySwitch := s.HandleIPCEvents(core.New(), WorkspaceCommand{ + Action: WorkspaceSwitchAction, + WorkspaceID: legacyWorkspaceID, + }) + assert.True(t, legacySwitch.OK) + assert.Equal(t, legacyWorkspaceID, s.activeWorkspaceID) + rejectedLegacySwitch := s.HandleIPCEvents(core.New(), map[string]any{ - "action": "workspace.switch", + "action": WorkspaceSwitchAction, "name": workspaceID, }) assert.False(t, rejectedLegacySwitch.OK) - assert.Equal(t, workspaceID, s.activeWorkspaceID) + assert.Equal(t, legacyWorkspaceID, s.activeWorkspaceID) failedSwitch := s.HandleIPCEvents(core.New(), map[string]any{ - "action": "workspace.switch", + "action": WorkspaceSwitchAction, "workspaceID": "missing", }) assert.False(t, failedSwitch.OK) From a8caedaf55b8ae923e89c676f6515c606c006cec Mon Sep 17 00:00:00 2001 From: Virgil Date: Mon, 30 Mar 2026 22:33:41 +0000 Subject: [PATCH 31/83] docs(local): convert constructor note to usage example --- local/medium.go | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/local/medium.go b/local/medium.go index 5f80f06..9247b15 100644 --- a/local/medium.go +++ b/local/medium.go @@ -25,10 +25,7 @@ var unrestrictedFileSystem = (&core.Fs{}).NewUnrestricted() // _ = medium.Write("config/app.yaml", "port: 8080") func New(root string) (*Medium, error) { absoluteRoot := absolutePath(root) - // Resolve symlinks so sandbox checks compare like-for-like. - // On macOS, /var is a symlink to /private/var — without this, - // resolving child paths resolves to /private/var/... while - // root stays /var/..., causing false sandbox escape detections. + // Example: local.New("/srv/app") resolves macOS "/var" to "/private/var" before sandbox checks. if resolvedRoot, err := resolveSymlinksPath(absoluteRoot); err == nil { absoluteRoot = resolvedRoot } From fc34a75fb2a25b8039394fe304167a1fac932872 Mon Sep 17 00:00:00 2001 From: Virgil Date: Mon, 30 Mar 2026 22:39:50 +0000 Subject: [PATCH 32/83] refactor(ax): continue AX surface alignment --- go.sum | 62 ++++++++++++++++++++++++++ io.go | 93 +++++++++++++++++++-------------------- local/medium_test.go | 2 - medium_test.go | 28 +++++++++++- node/node.go | 4 ++ s3/s3.go | 1 - sqlite/sqlite.go | 7 +-- store/medium.go | 6 ++- workspace/doc.go | 2 +- workspace/service.go | 52 ++++++++++------------ workspace/service_test.go | 11 +++-- 11 files changed, 180 insertions(+), 88 deletions(-) diff --git a/go.sum b/go.sum index 0164e68..0cd917c 100644 --- a/go.sum +++ b/go.sum @@ -1,7 +1,11 @@ dappco.re/go/core v0.8.0-alpha.1 h1:gj7+Scv+L63Z7wMxbJYHhaRFkHJo2u4MMPuUSv/Dhtk= dappco.re/go/core v0.8.0-alpha.1/go.mod h1:f2/tBZ3+3IqDrg2F5F598llv0nmb/4gJVCFzM5geE4A= +dario.cat/mergo v1.0.2/go.mod h1:E/hbnu0NxMFBjpMIE34DRGLWqDy0g5FuKDhCb31ngxA= forge.lthn.ai/Snider/Borg v0.3.1 h1:gfC1ZTpLoZai07oOWJiVeQ8+qJYK8A795tgVGJHbVL8= forge.lthn.ai/Snider/Borg v0.3.1/go.mod h1:Z7DJD0yHXsxSyM7Mjl6/g4gH1NBsIz44Bf5AFlV76Wg= +forge.lthn.ai/Snider/Enchantrix v0.0.4/go.mod h1:OGCwuVeZPq3OPe2h6TX/ZbgEjHU6B7owpIBeXQGbSe0= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= +github.com/ProtonMail/go-crypto v1.3.0/go.mod h1:9whxjD8Rbs29b4XWbB8irEcE8KHMqaR2e7GWU1R+/PE= github.com/aws/aws-sdk-go-v2 v1.41.4 h1:10f50G7WyU02T56ox1wWXq+zTX9I1zxG46HYuG1hH/k= github.com/aws/aws-sdk-go-v2 v1.41.4/go.mod h1:mwsPRE8ceUUpiTgF7QmQIJ7lgsKUPQOUl3o72QBrE1o= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.7 h1:3kGOqnh1pPeddVa/E37XNTaWJ8W6vrbYV9lJEkCnhuY= @@ -24,47 +28,105 @@ github.com/aws/aws-sdk-go-v2/service/s3 v1.97.1 h1:csi9NLpFZXb9fxY7rS1xVzgPRGMt7 github.com/aws/aws-sdk-go-v2/service/s3 v1.97.1/go.mod h1:qXVal5H0ChqXP63t6jze5LmFalc7+ZE7wOdLtZ0LCP0= github.com/aws/smithy-go v1.24.2 h1:FzA3bu/nt/vDvmnkg+R8Xl46gmzEDam6mZ1hzmwXFng= github.com/aws/smithy-go v1.24.2/go.mod h1:YE2RhdIuDbA5E5bTdciG9KrW3+TiEONeUWCqxX9i1Fc= +github.com/bep/debounce v1.2.1/go.mod h1:H8yggRPQKLUhUoqrJC1bO2xNya7vanpDl7xR3ISbCJ0= +github.com/clipperhouse/uax29/v2 v2.4.0/go.mod h1:Wn1g7MK6OoeDT0vL+Q0SQLDz/KpfsVRgg6W7ihQeh4g= +github.com/cloudflare/circl v1.6.3/go.mod h1:2eXP6Qfat4O/Yhh8BznvKnJ+uzEoTQ6jVKJRn81BiS4= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cyphar/filepath-securejoin v0.6.1/go.mod h1:A8hd4EnAeyujCJRrICiOWqjS1AX0a9kM5XL+NwKoYSc= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= +github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= +github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= +github.com/go-git/go-billy/v5 v5.7.0/go.mod h1:/1IUejTKH8xipsAcdfcSAlUlo2J7lkYV8GTKxAT/L3E= +github.com/go-git/go-git/v5 v5.16.4/go.mod h1:4Ge4alE/5gPs30F2H1esi2gPd69R0C39lolkucHBOp8= +github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= +github.com/godbus/dbus/v5 v5.2.2/go.mod h1:3AAv2+hPq5rdnr5txxxRwiGjPXamgoIHgz9FPBfOp3c= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-github/v39 v39.2.0/go.mod h1:C1s8C5aCC9L+JXIYpJM5GYytdX52vC1bLvHEF1IhBrE= +github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e h1:ijClszYn+mADRFY17kjQEVQ1XRhq2/JR1M3sGqeJoxs= github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= +github.com/jchv/go-winloader v0.0.0-20250406163304-c1995be93bd1/go.mod h1:alcuEEnZsY1WQsagKhZDsoPCRoOijYqhZvPwLG0kzVs= +github.com/kevinburke/ssh_config v1.4.0/go.mod h1:q2RIzfka+BXARoNexmF9gkxEX7DmvbW9P4hIVx2Kg4M= +github.com/klauspost/compress v1.18.4/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4= +github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/labstack/echo/v4 v4.13.3/go.mod h1:o90YNEeQWjDozo584l7AwhJMHN0bOC4tAfg+Xox9q5g= +github.com/labstack/gommon v0.4.2/go.mod h1:QlUFxVM+SNXhDL/Z7YhocGIBYOiwB0mXm1+1bAPHPyU= +github.com/leaanthony/go-ansi-parser v1.6.1/go.mod h1:+vva/2y4alzVmmIEpk9QDhA7vLC5zKDTRwfZGOp3IWU= +github.com/leaanthony/gosod v1.0.4/go.mod h1:GKuIL0zzPj3O1SdWQOdgURSuhkF+Urizzxh26t9f1cw= +github.com/leaanthony/slicer v1.6.0/go.mod h1:o/Iz29g7LN0GqH3aMjWAe90381nyZlDNquK+mtH2Fj8= +github.com/leaanthony/u v1.1.1/go.mod h1:9+o6hejoRljvZ3BzdYlVL0JYCwtnAsVuN9pVTQcaRfI= +github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-runewidth v0.0.19/go.mod h1:XBkDxAl56ILZc9knddidhrOlY5R/pDhgLpndooCuJAs= +github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw= github.com/ncruces/go-strftime v1.0.0 h1:HMFp8mLCTPp341M/ZnA4qaf7ZlsbTc+miZjCLOFAw7w= github.com/ncruces/go-strftime v1.0.0/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls= +github.com/pjbgf/sha1cd v0.5.0/go.mod h1:lhpGlyHLpQZoxMv8HcgXvZEhcGs0PG/vsZnEJ7H0iCM= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= +github.com/samber/lo v1.52.0/go.mod h1:4+MXEGsJzbKGaUEQFKBq2xtfuznW9oz/WrgyzMzRoM0= +github.com/schollz/progressbar/v3 v3.18.0/go.mod h1:IsO3lpbaGuzh8zIMzgY3+J8l4C8GjO0Y9S69eFvNsec= +github.com/sergi/go-diff v1.4.0/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= +github.com/skeema/knownhosts v1.3.2/go.mod h1:bEg3iQAuw+jyiw+484wwFJoKSLwcfd7fqRy+N0QTiow= +github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4= +github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/tkrajina/go-reflector v0.5.8/go.mod h1:ECbqLgccecY5kPmPmXg1MrHW585yMcDkVl6IvJe64T4= +github.com/ulikunitz/xz v0.5.15/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasttemplate v1.2.2/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= +github.com/wailsapp/go-webview2 v1.0.23/go.mod h1:qJmWAmAmaniuKGZPWwne+uor3AHMB5PFhqiK0Bbj8kc= +github.com/wailsapp/mimetype v1.4.1/go.mod h1:9aV5k31bBOv5z6u+QP8TltzvNGJPmNJD4XlAL3U+j3o= +github.com/wailsapp/wails/v2 v2.11.0/go.mod h1:jrf0ZaM6+GBc1wRmXsM8cIvzlg0karYin3erahI4+0k= +github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= golang.org/x/crypto v0.49.0 h1:+Ng2ULVvLHnJ/ZFEq4KdcDd/cfjrrjjNSXNzxg0Y4U4= golang.org/x/crypto v0.49.0/go.mod h1:ErX4dUh2UM+CFYiXZRTcMpEcN8b/1gxEuv3nODoYtCA= +golang.org/x/exp v0.0.0-20260212183809-81e46e3db34a/go.mod h1:K79w1Vqn7PoiZn+TkNpx3BUWUQksGO3JcVX6qIjytmA= golang.org/x/mod v0.34.0 h1:xIHgNUUnW6sYkcM5Jleh05DvLOtwc6RitGHbDk4akRI= golang.org/x/mod v0.34.0/go.mod h1:ykgH52iCZe79kzLLMhyCUzhMci+nQj+0XkbXpNYtVjY= +golang.org/x/net v0.52.0/go.mod h1:R1MAz7uMZxVMualyPXb+VaqGSa3LIaUqk0eEt3w36Sw= +golang.org/x/oauth2 v0.35.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= golang.org/x/sync v0.20.0 h1:e0PTpb7pjO8GAtTs2dQ6jYa5BWYlMuX047Dco/pItO4= golang.org/x/sync v0.20.0/go.mod h1:9xrNwdLfx4jkKbNva9FpL6vEN7evnE43NNNJQ2LF3+0= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.42.0 h1:omrd2nAlyT5ESRdCLYdm3+fMfNFE/+Rf4bDIQImRJeo= golang.org/x/sys v0.42.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw= +golang.org/x/telemetry v0.0.0-20260311193753-579e4da9a98c/go.mod h1:TpUTTEp9frx7rTdLpC9gFG9kdI7zVLFTFFlqaH2Cncw= +golang.org/x/term v0.41.0/go.mod h1:3pfBgksrReYfZ5lvYM0kSO0LIkAl4Yl2bXOkKP7Ec2A= +golang.org/x/text v0.35.0/go.mod h1:khi/HExzZJ2pGnjenulevKNX1W67CUy0AsXcNubPGCA= golang.org/x/tools v0.43.0 h1:12BdW9CeB3Z+J/I/wj34VMl8X+fEXBxVR90JeMX5E7s= golang.org/x/tools v0.43.0/go.mod h1:uHkMso649BX2cZK6+RpuIPXS3ho2hZo4FVwfoy1vIk0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= modernc.org/cc/v4 v4.27.1 h1:9W30zRlYrefrDV2JE2O8VDtJ1yPGownxciz5rrbQZis= diff --git a/io.go b/io.go index 6f8fb45..b907975 100644 --- a/io.go +++ b/io.go @@ -59,7 +59,7 @@ type Medium interface { IsDir(path string) bool } -// Example: info := io.FileInfo{name: "app.yaml", size: 8, mode: 0644} +// Example: info := io.NewFileInfo("app.yaml", 8, 0644, time.Unix(0, 0), false) type FileInfo struct { name string size int64 @@ -80,7 +80,8 @@ func (info FileInfo) IsDir() bool { return info.isDir } func (info FileInfo) Sys() any { return nil } -// Example: entry := io.DirEntry{name: "app.yaml", mode: 0644} +// Example: info := io.NewFileInfo("app.yaml", 8, 0644, time.Unix(0, 0), false) +// Example: entry := io.NewDirEntry("app.yaml", false, 0644, info) type DirEntry struct { name string isDir bool @@ -96,6 +97,28 @@ func (entry DirEntry) Type() fs.FileMode { return entry.mode.Type() } func (entry DirEntry) Info() (fs.FileInfo, error) { return entry.info, nil } +// Example: info := io.NewFileInfo("app.yaml", 8, 0644, time.Unix(0, 0), false) +func NewFileInfo(name string, size int64, mode fs.FileMode, modTime time.Time, isDir bool) FileInfo { + return FileInfo{ + name: name, + size: size, + mode: mode, + modTime: modTime, + isDir: isDir, + } +} + +// Example: info := io.NewFileInfo("app.yaml", 8, 0644, time.Unix(0, 0), false) +// Example: entry := io.NewDirEntry("app.yaml", false, 0644, info) +func NewDirEntry(name string, isDir bool, mode fs.FileMode, info fs.FileInfo) DirEntry { + return DirEntry{ + name: name, + isDir: isDir, + mode: mode, + info: info, + } +} + // Example: _ = io.Local.Read("/etc/hostname") var Local Medium @@ -384,10 +407,7 @@ type MemoryFile struct { type MockFile = MemoryFile func (file *MemoryFile) Stat() (fs.FileInfo, error) { - return FileInfo{ - name: file.name, - size: int64(len(file.content)), - }, nil + return NewFileInfo(file.name, int64(len(file.content)), 0, time.Time{}, false), nil } func (file *MemoryFile) Read(buffer []byte) (int, error) { @@ -468,32 +488,24 @@ func (medium *MemoryMedium) List(path string) ([]fs.DirEntry, error) { dirName := rest[:idx] if !seen[dirName] { seen[dirName] = true - entries = append(entries, DirEntry{ - name: dirName, - isDir: true, - mode: fs.ModeDir | 0755, - info: FileInfo{ - name: dirName, - isDir: true, - mode: fs.ModeDir | 0755, - }, - }) + entries = append(entries, NewDirEntry( + dirName, + true, + fs.ModeDir|0755, + NewFileInfo(dirName, 0, fs.ModeDir|0755, time.Time{}, true), + )) } } continue } if !seen[rest] { seen[rest] = true - entries = append(entries, DirEntry{ - name: rest, - isDir: false, - mode: 0644, - info: FileInfo{ - name: rest, - size: int64(len(content)), - mode: 0644, - }, - }) + entries = append(entries, NewDirEntry( + rest, + false, + 0644, + NewFileInfo(rest, int64(len(content)), 0644, time.Time{}, false), + )) } } @@ -510,16 +522,12 @@ func (medium *MemoryMedium) List(path string) ([]fs.DirEntry, error) { } if !seen[rest] { seen[rest] = true - entries = append(entries, DirEntry{ - name: rest, - isDir: true, - mode: fs.ModeDir | 0755, - info: FileInfo{ - name: rest, - isDir: true, - mode: fs.ModeDir | 0755, - }, - }) + entries = append(entries, NewDirEntry( + rest, + true, + fs.ModeDir|0755, + NewFileInfo(rest, 0, fs.ModeDir|0755, time.Time{}, true), + )) } } @@ -532,19 +540,10 @@ func (medium *MemoryMedium) Stat(path string) (fs.FileInfo, error) { if !ok { modTime = time.Now() } - return FileInfo{ - name: core.PathBase(path), - size: int64(len(content)), - mode: 0644, - modTime: modTime, - }, nil + return NewFileInfo(core.PathBase(path), int64(len(content)), 0644, modTime, false), nil } if _, ok := medium.Dirs[path]; ok { - return FileInfo{ - name: core.PathBase(path), - isDir: true, - mode: fs.ModeDir | 0755, - }, nil + return NewFileInfo(core.PathBase(path), 0, fs.ModeDir|0755, time.Time{}, true), nil } return nil, core.E("io.MemoryMedium.Stat", core.Concat("path not found: ", path), fs.ErrNotExist) } diff --git a/local/medium_test.go b/local/medium_test.go index 8a22b8b..f353d68 100644 --- a/local/medium_test.go +++ b/local/medium_test.go @@ -511,8 +511,6 @@ func TestClient_EmptyPaths_Ugly(t *testing.T) { err = m.EnsureDir("") assert.NoError(t, err) - // IsDir empty path (should be true for root, but current impl returns false for "") - // Wait, I noticed IsDir returns false for "" in the code. assert.False(t, m.IsDir("")) // Exists empty path (root exists) diff --git a/medium_test.go b/medium_test.go index 30d875f..390dcd2 100644 --- a/medium_test.go +++ b/medium_test.go @@ -4,6 +4,7 @@ import ( goio "io" "io/fs" "testing" + "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -11,7 +12,7 @@ import ( // --- MemoryMedium Tests --- -func TestClient_NewMemoryMedium_Good(t *testing.T) { +func TestMemoryMedium_NewMemoryMedium_Good(t *testing.T) { medium := NewMemoryMedium() assert.NotNil(t, medium) assert.NotNil(t, medium.Files) @@ -20,6 +21,31 @@ func TestClient_NewMemoryMedium_Good(t *testing.T) { assert.Empty(t, medium.Dirs) } +func TestMemoryMedium_NewFileInfo_Good(t *testing.T) { + info := NewFileInfo("app.yaml", 8, 0644, time.Unix(0, 0), false) + + assert.Equal(t, "app.yaml", info.Name()) + assert.Equal(t, int64(8), info.Size()) + assert.Equal(t, fs.FileMode(0644), info.Mode()) + assert.True(t, info.ModTime().Equal(time.Unix(0, 0))) + assert.False(t, info.IsDir()) + assert.Nil(t, info.Sys()) +} + +func TestMemoryMedium_NewDirEntry_Good(t *testing.T) { + info := NewFileInfo("app.yaml", 8, 0644, time.Unix(0, 0), false) + entry := NewDirEntry("app.yaml", false, 0644, info) + + assert.Equal(t, "app.yaml", entry.Name()) + assert.False(t, entry.IsDir()) + assert.Equal(t, fs.FileMode(0), entry.Type()) + + entryInfo, err := entry.Info() + require.NoError(t, err) + assert.Equal(t, "app.yaml", entryInfo.Name()) + assert.Equal(t, int64(8), entryInfo.Size()) +} + func TestClient_MockMedium_Read_Good(t *testing.T) { m := NewMockMedium() m.Files["test.txt"] = "hello world" diff --git a/node/node.go b/node/node.go index 8b1c04e..0436dcb 100644 --- a/node/node.go +++ b/node/node.go @@ -31,6 +31,8 @@ type Node struct { var _ coreio.Medium = (*Node)(nil) var _ fs.ReadFileFS = (*Node)(nil) +// Example: nodeTree := node.New() +// Example: _ = nodeTree.Write("config/app.yaml", "port: 8080") func New() *Node { return &Node{files: make(map[string]*dataFile)} } @@ -123,6 +125,7 @@ func (node *Node) LoadTar(data []byte) error { return nil } +// Example: _ = nodeTree.WalkNode("config", func(_ string, _ fs.DirEntry, _ error) error { return nil }) func (node *Node) WalkNode(root string, fn fs.WalkDirFunc) error { return fs.WalkDir(node, root, fn) } @@ -175,6 +178,7 @@ func (node *Node) WalkWithOptions(root string, fn fs.WalkDirFunc, options WalkOp }) } +// Example: content, _ := nodeTree.ReadFile("config/app.yaml") func (node *Node) ReadFile(name string) ([]byte, error) { name = core.TrimPrefix(name, "/") file, ok := node.files[name] diff --git a/s3/s3.go b/s3/s3.go index 0a32220..53366e6 100644 --- a/s3/s3.go +++ b/s3/s3.go @@ -108,7 +108,6 @@ func New(options Options) (*Medium, error) { return medium, nil } -// objectKey maps a virtual path to the full S3 object key. func (medium *Medium) objectKey(filePath string) string { // Clean the path using a leading "/" to sandbox traversal attempts, // then strip the "/" prefix. This ensures ".." can't escape. diff --git a/sqlite/sqlite.go b/sqlite/sqlite.go index 10729d4..6173722 100644 --- a/sqlite/sqlite.go +++ b/sqlite/sqlite.go @@ -86,8 +86,6 @@ func (medium *Medium) Close() error { return nil } -// normaliseEntryPath normalises a path for consistent storage. -// Uses a leading "/" before Clean to sandbox traversal attempts. func normaliseEntryPath(filePath string) string { clean := path.Clean("/" + filePath) if clean == "/" { @@ -424,7 +422,10 @@ func (medium *Medium) List(filePath string) ([]fs.DirEntry, error) { } } - return entries, rows.Err() + if err := rows.Err(); err != nil { + return nil, core.E("sqlite.List", "rows", err) + } + return entries, nil } func (medium *Medium) Stat(filePath string) (fs.FileInfo, error) { diff --git a/store/medium.go b/store/medium.go index 7461264..b33c937 100644 --- a/store/medium.go +++ b/store/medium.go @@ -45,7 +45,6 @@ func (medium *Medium) Close() error { return medium.store.Close() } -// splitGroupKeyPath splits a group/key path into store components. func splitGroupKeyPath(entryPath string) (group, key string) { clean := path.Clean(entryPath) clean = core.TrimPrefix(clean, "/") @@ -166,7 +165,10 @@ func (medium *Medium) List(entryPath string) ([]fs.DirEntry, error) { } entries = append(entries, &keyValueDirEntry{name: groupName, isDir: true}) } - return entries, rows.Err() + if err := rows.Err(); err != nil { + return nil, core.E("store.List", "rows", err) + } + return entries, nil } if key != "" { diff --git a/workspace/doc.go b/workspace/doc.go index a0ea740..a704cea 100644 --- a/workspace/doc.go +++ b/workspace/doc.go @@ -1,6 +1,6 @@ // Package workspace creates encrypted workspaces on top of io.Medium. // -// service, _ := workspace.New(workspace.Options{Core: core.New(), Crypt: cryptProvider}) +// service, _ := workspace.New(workspace.Options{Core: core.New(), CryptProvider: cryptProvider}) // workspaceID, _ := service.CreateWorkspace("alice", "pass123") // _ = service.SwitchWorkspace(workspaceID) // _ = service.WorkspaceFileSet("notes/todo.txt", "ship it") diff --git a/workspace/service.go b/workspace/service.go index 50e2ba9..78990bd 100644 --- a/workspace/service.go +++ b/workspace/service.go @@ -11,7 +11,7 @@ import ( "dappco.re/go/core/io" ) -// Example: service, _ := workspace.New(workspace.Options{Core: core.New(), Crypt: cryptProvider}) +// Example: service, _ := workspace.New(workspace.Options{Core: core.New(), CryptProvider: cryptProvider}) type Workspace interface { CreateWorkspace(identifier, password string) (string, error) SwitchWorkspace(workspaceID string) error @@ -41,26 +41,24 @@ type WorkspaceCommand struct { WorkspaceID string } -// Example: service, _ := workspace.New(workspace.Options{Core: core.New(), Crypt: cryptProvider}) +// Example: service, _ := workspace.New(workspace.Options{Core: core.New(), CryptProvider: cryptProvider}) type Options struct { - // Core is the Core runtime used by the service. Core *core.Core - // Crypt is the PGP key generation dependency. - Crypt CryptProvider + CryptProvider CryptProvider } -// Example: service, _ := workspace.New(workspace.Options{Core: core.New(), Crypt: cryptProvider}) +// Example: service, _ := workspace.New(workspace.Options{Core: core.New(), CryptProvider: cryptProvider}) type Service struct { - crypt CryptProvider + cryptProvider CryptProvider activeWorkspaceID string rootPath string medium io.Medium - lock sync.RWMutex + stateLock sync.RWMutex } var _ Workspace = (*Service)(nil) -// Example: service, _ := workspace.New(workspace.Options{Core: core.New(), Crypt: cryptProvider}) +// Example: service, _ := workspace.New(workspace.Options{Core: core.New(), CryptProvider: cryptProvider}) // workspaceID, _ := service.CreateWorkspace("alice", "pass123") func New(options Options) (*Service, error) { home := resolveWorkspaceHomeDirectory() @@ -72,14 +70,14 @@ func New(options Options) (*Service, error) { if options.Core == nil { return nil, core.E("workspace.New", "core is required", fs.ErrInvalid) } - - service := &Service{ - rootPath: rootPath, - medium: io.Local, + if options.CryptProvider == nil { + return nil, core.E("workspace.New", "crypt provider is required", fs.ErrInvalid) } - if options.Crypt != nil { - service.crypt = options.Crypt + service := &Service{ + cryptProvider: options.CryptProvider, + rootPath: rootPath, + medium: io.Local, } if err := service.medium.EnsureDir(rootPath); err != nil { @@ -91,11 +89,11 @@ func New(options Options) (*Service, error) { // Example: workspaceID, _ := service.CreateWorkspace("alice", "pass123") func (service *Service) CreateWorkspace(identifier, password string) (string, error) { - service.lock.Lock() - defer service.lock.Unlock() + service.stateLock.Lock() + defer service.stateLock.Unlock() - if service.crypt == nil { - return "", core.E("workspace.CreateWorkspace", "crypt service not available", nil) + if service.cryptProvider == nil { + return "", core.E("workspace.CreateWorkspace", "crypt provider not available", nil) } hash := sha256.Sum256([]byte(identifier)) @@ -115,7 +113,7 @@ func (service *Service) CreateWorkspace(identifier, password string) (string, er } } - privKey, err := service.crypt.CreateKeyPair(identifier, password) + privKey, err := service.cryptProvider.CreateKeyPair(identifier, password) if err != nil { return "", core.E("workspace.CreateWorkspace", "failed to generate keys", err) } @@ -129,8 +127,8 @@ func (service *Service) CreateWorkspace(identifier, password string) (string, er // Example: _ = service.SwitchWorkspace(workspaceID) func (service *Service) SwitchWorkspace(workspaceID string) error { - service.lock.Lock() - defer service.lock.Unlock() + service.stateLock.Lock() + defer service.stateLock.Unlock() workspaceDirectory, err := service.resolveWorkspaceDirectory("workspace.SwitchWorkspace", workspaceID) if err != nil { @@ -144,8 +142,6 @@ func (service *Service) SwitchWorkspace(workspaceID string) error { return nil } -// resolveActiveWorkspaceFilePath resolves a file path inside the active workspace files root. -// It rejects empty names and traversal outside the workspace root. func (service *Service) resolveActiveWorkspaceFilePath(operation, workspaceFilePath string) (string, error) { if service.activeWorkspaceID == "" { return "", core.E(operation, "no active workspace", nil) @@ -163,8 +159,8 @@ func (service *Service) resolveActiveWorkspaceFilePath(operation, workspaceFileP // Example: content, _ := service.WorkspaceFileGet("notes/todo.txt") func (service *Service) WorkspaceFileGet(workspaceFilePath string) (string, error) { - service.lock.RLock() - defer service.lock.RUnlock() + service.stateLock.RLock() + defer service.stateLock.RUnlock() filePath, err := service.resolveActiveWorkspaceFilePath("workspace.WorkspaceFileGet", workspaceFilePath) if err != nil { @@ -175,8 +171,8 @@ func (service *Service) WorkspaceFileGet(workspaceFilePath string) (string, erro // Example: _ = service.WorkspaceFileSet("notes/todo.txt", "ship it") func (service *Service) WorkspaceFileSet(workspaceFilePath, content string) error { - service.lock.Lock() - defer service.lock.Unlock() + service.stateLock.Lock() + defer service.stateLock.Unlock() filePath, err := service.resolveActiveWorkspaceFilePath("workspace.WorkspaceFileSet", workspaceFilePath) if err != nil { diff --git a/workspace/service_test.go b/workspace/service_test.go index 7dd5042..13c65b0 100644 --- a/workspace/service_test.go +++ b/workspace/service_test.go @@ -8,12 +8,12 @@ import ( "github.com/stretchr/testify/require" ) -type stubCrypt struct { +type stubCryptProvider struct { key string err error } -func (s stubCrypt) CreateKeyPair(_, _ string) (string, error) { +func (s stubCryptProvider) CreateKeyPair(_, _ string) (string, error) { if s.err != nil { return "", s.err } @@ -26,11 +26,16 @@ func newTestService(t *testing.T) (*Service, string) { tempHome := t.TempDir() t.Setenv("HOME", tempHome) - svc, err := New(Options{Core: core.New(), Crypt: stubCrypt{key: "private-key"}}) + svc, err := New(Options{Core: core.New(), CryptProvider: stubCryptProvider{key: "private-key"}}) require.NoError(t, err) return svc, tempHome } +func TestService_New_MissingCryptProvider_Bad(t *testing.T) { + _, err := New(Options{Core: core.New()}) + require.Error(t, err) +} + func TestService_Workspace_RoundTrip_Good(t *testing.T) { s, tempHome := newTestService(t) From 14418b7782c87196279e14d7012085e76be6847d Mon Sep 17 00:00:00 2001 From: Virgil Date: Mon, 30 Mar 2026 22:41:48 +0000 Subject: [PATCH 33/83] refactor: tighten AX-facing comments --- datanode/medium.go | 1 - node/node.go | 5 ----- workspace/service.go | 19 ++++--------------- 3 files changed, 4 insertions(+), 21 deletions(-) diff --git a/datanode/medium.go b/datanode/medium.go index 49979fa..bad0d75 100644 --- a/datanode/medium.go +++ b/datanode/medium.go @@ -92,7 +92,6 @@ func (medium *Medium) DataNode() *borgdatanode.DataNode { return medium.dataNode } -// normaliseEntryPath normalises a path: strips the leading slash and cleans traversal. func normaliseEntryPath(filePath string) string { filePath = core.TrimPrefix(filePath, "/") filePath = path.Clean(filePath) diff --git a/node/node.go b/node/node.go index 0436dcb..164a534 100644 --- a/node/node.go +++ b/node/node.go @@ -502,7 +502,6 @@ func (writer *nodeWriter) Close() error { return nil } -// dataFile represents a file in the Node. type dataFile struct { name string content []byte @@ -515,7 +514,6 @@ func (file *dataFile) Read(_ []byte) (int, error) { return 0, goio.EOF } func (file *dataFile) Close() error { return nil } -// dataFileInfo implements fs.FileInfo for a dataFile. type dataFileInfo struct{ file *dataFile } func (info *dataFileInfo) Name() string { return path.Base(info.file.name) } @@ -530,7 +528,6 @@ func (info *dataFileInfo) IsDir() bool { return false } func (info *dataFileInfo) Sys() any { return nil } -// dataFileReader implements fs.File for reading a dataFile. type dataFileReader struct { file *dataFile reader *bytes.Reader @@ -547,7 +544,6 @@ func (reader *dataFileReader) Read(buffer []byte) (int, error) { func (reader *dataFileReader) Close() error { return nil } -// dirInfo implements fs.FileInfo for an implicit directory. type dirInfo struct { name string modTime time.Time @@ -565,7 +561,6 @@ func (info *dirInfo) IsDir() bool { return true } func (info *dirInfo) Sys() any { return nil } -// dirFile implements fs.File for a directory. type dirFile struct { path string modTime time.Time diff --git a/workspace/service.go b/workspace/service.go index 78990bd..efe2269 100644 --- a/workspace/service.go +++ b/workspace/service.go @@ -29,11 +29,7 @@ const ( WorkspaceSwitchAction = "workspace.switch" ) -// Example: command := WorkspaceCommand{ -// Action: WorkspaceCreateAction, -// Identifier: "alice", -// Password: "pass123", -// } +// Example: command := WorkspaceCommand{Action: WorkspaceCreateAction, Identifier: "alice", Password: "pass123"} type WorkspaceCommand struct { Action string Identifier string @@ -43,7 +39,7 @@ type WorkspaceCommand struct { // Example: service, _ := workspace.New(workspace.Options{Core: core.New(), CryptProvider: cryptProvider}) type Options struct { - Core *core.Core + Core *core.Core CryptProvider CryptProvider } @@ -181,11 +177,7 @@ func (service *Service) WorkspaceFileSet(workspaceFilePath, content string) erro return service.medium.Write(filePath, content) } -// Example: result := service.HandleWorkspaceCommand(WorkspaceCommand{ -// Action: WorkspaceCreateAction, -// Identifier: "alice", -// Password: "pass123", -// }) +// Example: result := service.HandleWorkspaceCommand(WorkspaceCommand{Action: WorkspaceCreateAction, Identifier: "alice", Password: "pass123"}) func (service *Service) HandleWorkspaceCommand(command WorkspaceCommand) core.Result { switch command.Action { case WorkspaceCreateAction: @@ -203,10 +195,7 @@ func (service *Service) HandleWorkspaceCommand(command WorkspaceCommand) core.Re return core.Result{OK: true} } -// Example: result := service.HandleIPCEvents(core.New(), map[string]any{ -// "action": WorkspaceSwitchAction, -// "workspaceID": "f3f0d7", -// }) +// Example: result := service.HandleIPCEvents(core.New(), map[string]any{"action": WorkspaceSwitchAction, "workspaceID": "f3f0d7"}) // HandleIPCEvents preserves the legacy map[string]any payload and still accepts WorkspaceCommand values. func (service *Service) HandleIPCEvents(_ *core.Core, message core.Message) core.Result { switch payload := message.(type) { From 64427aec1b43b19ea01acb0e892e19464096a82a Mon Sep 17 00:00:00 2001 From: Virgil Date: Mon, 30 Mar 2026 22:45:15 +0000 Subject: [PATCH 34/83] refactor(ax): add semantic workspace message handler --- workspace/service.go | 25 +++++++++++++++++++------ workspace/service_test.go | 31 +++++++++++++++++++++++-------- 2 files changed, 42 insertions(+), 14 deletions(-) diff --git a/workspace/service.go b/workspace/service.go index efe2269..22681ba 100644 --- a/workspace/service.go +++ b/workspace/service.go @@ -195,21 +195,34 @@ func (service *Service) HandleWorkspaceCommand(command WorkspaceCommand) core.Re return core.Result{OK: true} } -// Example: result := service.HandleIPCEvents(core.New(), map[string]any{"action": WorkspaceSwitchAction, "workspaceID": "f3f0d7"}) -// HandleIPCEvents preserves the legacy map[string]any payload and still accepts WorkspaceCommand values. -func (service *Service) HandleIPCEvents(_ *core.Core, message core.Message) core.Result { +// Example: result := service.HandleWorkspaceMessage(core.New(), WorkspaceCommand{Action: WorkspaceSwitchAction, WorkspaceID: "f3f0d7"}) +// Example: legacy := service.HandleWorkspaceMessage(core.New(), map[string]any{"action": WorkspaceCreateAction, "identifier": "alice", "password": "pass123"}) +func (service *Service) HandleWorkspaceMessage(_ *core.Core, message core.Message) core.Result { + command, ok := workspaceCommandFromMessage(message) + if !ok { + return core.Result{OK: true} + } + return service.HandleWorkspaceCommand(command) +} + +// Example: result := service.HandleIPCEvents(core.New(), WorkspaceCommand{Action: WorkspaceSwitchAction, WorkspaceID: "f3f0d7"}) +func (service *Service) HandleIPCEvents(coreRuntime *core.Core, message core.Message) core.Result { + return service.HandleWorkspaceMessage(coreRuntime, message) +} + +func workspaceCommandFromMessage(message core.Message) (WorkspaceCommand, bool) { switch payload := message.(type) { case WorkspaceCommand: - return service.HandleWorkspaceCommand(payload) + return payload, true case map[string]any: command := WorkspaceCommand{} command.Action, _ = payload["action"].(string) command.Identifier, _ = payload["identifier"].(string) command.Password, _ = payload["password"].(string) command.WorkspaceID, _ = payload["workspaceID"].(string) - return service.HandleWorkspaceCommand(command) + return command, true } - return core.Result{OK: true} + return WorkspaceCommand{}, false } func resolveWorkspaceHomeDirectory() string { diff --git a/workspace/service_test.go b/workspace/service_test.go index 13c65b0..31ddf94 100644 --- a/workspace/service_test.go +++ b/workspace/service_test.go @@ -93,10 +93,10 @@ func TestService_WorkspaceFileSet_TraversalBlocked_Bad(t *testing.T) { require.Error(t, err) } -func TestService_HandleWorkspaceCommand_Good(t *testing.T) { +func TestService_HandleWorkspaceMessage_Good(t *testing.T) { s, _ := newTestService(t) - create := s.HandleWorkspaceCommand(WorkspaceCommand{ + create := s.HandleWorkspaceMessage(core.New(), WorkspaceCommand{ Action: WorkspaceCreateAction, Identifier: "ipc-user", Password: "pass123", @@ -107,14 +107,14 @@ func TestService_HandleWorkspaceCommand_Good(t *testing.T) { require.True(t, ok) require.NotEmpty(t, workspaceID) - switchResult := s.HandleWorkspaceCommand(WorkspaceCommand{ + switchResult := s.HandleWorkspaceMessage(core.New(), WorkspaceCommand{ Action: WorkspaceSwitchAction, WorkspaceID: workspaceID, }) assert.True(t, switchResult.OK) assert.Equal(t, workspaceID, s.activeWorkspaceID) - legacyCreate := s.HandleIPCEvents(core.New(), map[string]any{ + legacyCreate := s.HandleWorkspaceMessage(core.New(), map[string]any{ "action": WorkspaceCreateAction, "identifier": "legacy-user", "password": "pass123", @@ -125,26 +125,41 @@ func TestService_HandleWorkspaceCommand_Good(t *testing.T) { require.True(t, ok) require.NotEmpty(t, legacyWorkspaceID) - legacySwitch := s.HandleIPCEvents(core.New(), WorkspaceCommand{ + legacySwitch := s.HandleWorkspaceMessage(core.New(), WorkspaceCommand{ Action: WorkspaceSwitchAction, WorkspaceID: legacyWorkspaceID, }) assert.True(t, legacySwitch.OK) assert.Equal(t, legacyWorkspaceID, s.activeWorkspaceID) - rejectedLegacySwitch := s.HandleIPCEvents(core.New(), map[string]any{ + rejectedLegacySwitch := s.HandleWorkspaceMessage(core.New(), map[string]any{ "action": WorkspaceSwitchAction, "name": workspaceID, }) assert.False(t, rejectedLegacySwitch.OK) assert.Equal(t, legacyWorkspaceID, s.activeWorkspaceID) - failedSwitch := s.HandleIPCEvents(core.New(), map[string]any{ + failedSwitch := s.HandleWorkspaceMessage(core.New(), map[string]any{ "action": WorkspaceSwitchAction, "workspaceID": "missing", }) assert.False(t, failedSwitch.OK) - unknown := s.HandleIPCEvents(core.New(), "noop") + unknown := s.HandleWorkspaceMessage(core.New(), "noop") assert.True(t, unknown.OK) } + +func TestService_HandleIPCEvents_Compatibility_Good(t *testing.T) { + s, _ := newTestService(t) + + result := s.HandleIPCEvents(core.New(), WorkspaceCommand{ + Action: WorkspaceCreateAction, + Identifier: "compat-user", + Password: "pass123", + }) + + assert.True(t, result.OK) + workspaceID, ok := result.Value.(string) + require.True(t, ok) + require.NotEmpty(t, workspaceID) +} From 64854a8268145295b35761037ab2f79d5309d239 Mon Sep 17 00:00:00 2001 From: Virgil Date: Mon, 30 Mar 2026 22:46:05 +0000 Subject: [PATCH 35/83] refactor(ax): simplify workspace options --- workspace/doc.go | 8 ++++---- workspace/service.go | 14 +++++--------- workspace/service_test.go | 4 ++-- 3 files changed, 11 insertions(+), 15 deletions(-) diff --git a/workspace/doc.go b/workspace/doc.go index a704cea..b7e301b 100644 --- a/workspace/doc.go +++ b/workspace/doc.go @@ -1,7 +1,7 @@ // Package workspace creates encrypted workspaces on top of io.Medium. // -// service, _ := workspace.New(workspace.Options{Core: core.New(), CryptProvider: cryptProvider}) -// workspaceID, _ := service.CreateWorkspace("alice", "pass123") -// _ = service.SwitchWorkspace(workspaceID) -// _ = service.WorkspaceFileSet("notes/todo.txt", "ship it") +// Example: service, _ := workspace.New(workspace.Options{CryptProvider: cryptProvider}) +// workspaceID, _ := service.CreateWorkspace("alice", "pass123") +// _ = service.SwitchWorkspace(workspaceID) +// _ = service.WorkspaceFileSet("notes/todo.txt", "ship it") package workspace diff --git a/workspace/service.go b/workspace/service.go index 22681ba..a53a18a 100644 --- a/workspace/service.go +++ b/workspace/service.go @@ -11,7 +11,7 @@ import ( "dappco.re/go/core/io" ) -// Example: service, _ := workspace.New(workspace.Options{Core: core.New(), CryptProvider: cryptProvider}) +// Example: service, _ := workspace.New(workspace.Options{CryptProvider: cryptProvider}) type Workspace interface { CreateWorkspace(identifier, password string) (string, error) SwitchWorkspace(workspaceID string) error @@ -19,7 +19,7 @@ type Workspace interface { WorkspaceFileSet(workspaceFilePath, content string) error } -// CryptProvider generates the encrypted private key stored with each workspace. +// Example: key, _ := cryptProvider.CreateKeyPair("alice", "pass123") type CryptProvider interface { CreateKeyPair(name, passphrase string) (string, error) } @@ -37,13 +37,12 @@ type WorkspaceCommand struct { WorkspaceID string } -// Example: service, _ := workspace.New(workspace.Options{Core: core.New(), CryptProvider: cryptProvider}) +// Example: service, _ := workspace.New(workspace.Options{CryptProvider: cryptProvider}) type Options struct { - Core *core.Core CryptProvider CryptProvider } -// Example: service, _ := workspace.New(workspace.Options{Core: core.New(), CryptProvider: cryptProvider}) +// Example: service, _ := workspace.New(workspace.Options{CryptProvider: cryptProvider}) type Service struct { cryptProvider CryptProvider activeWorkspaceID string @@ -54,7 +53,7 @@ type Service struct { var _ Workspace = (*Service)(nil) -// Example: service, _ := workspace.New(workspace.Options{Core: core.New(), CryptProvider: cryptProvider}) +// Example: service, _ := workspace.New(workspace.Options{CryptProvider: cryptProvider}) // workspaceID, _ := service.CreateWorkspace("alice", "pass123") func New(options Options) (*Service, error) { home := resolveWorkspaceHomeDirectory() @@ -63,9 +62,6 @@ func New(options Options) (*Service, error) { } rootPath := core.Path(home, ".core", "workspaces") - if options.Core == nil { - return nil, core.E("workspace.New", "core is required", fs.ErrInvalid) - } if options.CryptProvider == nil { return nil, core.E("workspace.New", "crypt provider is required", fs.ErrInvalid) } diff --git a/workspace/service_test.go b/workspace/service_test.go index 31ddf94..c707754 100644 --- a/workspace/service_test.go +++ b/workspace/service_test.go @@ -26,13 +26,13 @@ func newTestService(t *testing.T) (*Service, string) { tempHome := t.TempDir() t.Setenv("HOME", tempHome) - svc, err := New(Options{Core: core.New(), CryptProvider: stubCryptProvider{key: "private-key"}}) + svc, err := New(Options{CryptProvider: stubCryptProvider{key: "private-key"}}) require.NoError(t, err) return svc, tempHome } func TestService_New_MissingCryptProvider_Bad(t *testing.T) { - _, err := New(Options{Core: core.New()}) + _, err := New(Options{}) require.Error(t, err) } From 3a5f9bb005818a16af1845fa4971074ebdfe8a25 Mon Sep 17 00:00:00 2001 From: Virgil Date: Mon, 30 Mar 2026 22:47:27 +0000 Subject: [PATCH 36/83] refactor(ax): encapsulate memory medium internals --- io.go | 116 +++++++++++++++++++++--------------------- local/medium.go | 8 ++- medium_test.go | 92 ++++++++++++++++----------------- node/node.go | 2 - node/node_test.go | 12 +++-- sigil/crypto_sigil.go | 8 ++- sqlite/sqlite.go | 2 - 7 files changed, 119 insertions(+), 121 deletions(-) diff --git a/io.go b/io.go index b907975..f4c4744 100644 --- a/io.go +++ b/io.go @@ -183,9 +183,9 @@ func Copy(source Medium, sourcePath string, destination Medium, destinationPath // Example: medium := io.NewMemoryMedium() // _ = medium.Write("config/app.yaml", "port: 8080") type MemoryMedium struct { - Files map[string]string - Dirs map[string]bool - ModTimes map[string]time.Time + files map[string]string + dirs map[string]bool + modTimes map[string]time.Time } // MockMedium is a compatibility alias for MemoryMedium. @@ -197,9 +197,9 @@ var _ Medium = (*MemoryMedium)(nil) // _ = medium.Write("config/app.yaml", "port: 8080") func NewMemoryMedium() *MemoryMedium { return &MemoryMedium{ - Files: make(map[string]string), - Dirs: make(map[string]bool), - ModTimes: make(map[string]time.Time), + files: make(map[string]string), + dirs: make(map[string]bool), + modTimes: make(map[string]time.Time), } } @@ -212,7 +212,7 @@ func NewMockMedium() *MemoryMedium { } func (medium *MemoryMedium) Read(path string) (string, error) { - content, ok := medium.Files[path] + content, ok := medium.files[path] if !ok { return "", core.E("io.MemoryMedium.Read", core.Concat("file not found: ", path), fs.ErrNotExist) } @@ -220,8 +220,8 @@ func (medium *MemoryMedium) Read(path string) (string, error) { } func (medium *MemoryMedium) Write(path, content string) error { - medium.Files[path] = content - medium.ModTimes[path] = time.Now() + medium.files[path] = content + medium.modTimes[path] = time.Now() return nil } @@ -230,12 +230,12 @@ func (medium *MemoryMedium) WriteMode(path, content string, mode fs.FileMode) er } func (medium *MemoryMedium) EnsureDir(path string) error { - medium.Dirs[path] = true + medium.dirs[path] = true return nil } func (medium *MemoryMedium) IsFile(path string) bool { - _, ok := medium.Files[path] + _, ok := medium.files[path] return ok } @@ -248,26 +248,26 @@ func (medium *MemoryMedium) FileSet(path, content string) error { } func (medium *MemoryMedium) Delete(path string) error { - if _, ok := medium.Files[path]; ok { - delete(medium.Files, path) + if _, ok := medium.files[path]; ok { + delete(medium.files, path) return nil } - if _, ok := medium.Dirs[path]; ok { + if _, ok := medium.dirs[path]; ok { prefix := path if !core.HasSuffix(prefix, "/") { prefix += "/" } - for filePath := range medium.Files { + for filePath := range medium.files { if core.HasPrefix(filePath, prefix) { return core.E("io.MemoryMedium.Delete", core.Concat("directory not empty: ", path), fs.ErrExist) } } - for directoryPath := range medium.Dirs { + for directoryPath := range medium.dirs { if directoryPath != path && core.HasPrefix(directoryPath, prefix) { return core.E("io.MemoryMedium.Delete", core.Concat("directory not empty: ", path), fs.ErrExist) } } - delete(medium.Dirs, path) + delete(medium.dirs, path) return nil } return core.E("io.MemoryMedium.Delete", core.Concat("path not found: ", path), fs.ErrNotExist) @@ -275,27 +275,27 @@ func (medium *MemoryMedium) Delete(path string) error { func (medium *MemoryMedium) DeleteAll(path string) error { found := false - if _, ok := medium.Files[path]; ok { - delete(medium.Files, path) + if _, ok := medium.files[path]; ok { + delete(medium.files, path) found = true } - if _, ok := medium.Dirs[path]; ok { - delete(medium.Dirs, path) + if _, ok := medium.dirs[path]; ok { + delete(medium.dirs, path) found = true } prefix := path if !core.HasSuffix(prefix, "/") { prefix += "/" } - for filePath := range medium.Files { + for filePath := range medium.files { if core.HasPrefix(filePath, prefix) { - delete(medium.Files, filePath) + delete(medium.files, filePath) found = true } } - for directoryPath := range medium.Dirs { + for directoryPath := range medium.dirs { if core.HasPrefix(directoryPath, prefix) { - delete(medium.Dirs, directoryPath) + delete(medium.dirs, directoryPath) found = true } } @@ -307,18 +307,18 @@ func (medium *MemoryMedium) DeleteAll(path string) error { } func (medium *MemoryMedium) Rename(oldPath, newPath string) error { - if content, ok := medium.Files[oldPath]; ok { - medium.Files[newPath] = content - delete(medium.Files, oldPath) - if modTime, ok := medium.ModTimes[oldPath]; ok { - medium.ModTimes[newPath] = modTime - delete(medium.ModTimes, oldPath) + if content, ok := medium.files[oldPath]; ok { + medium.files[newPath] = content + delete(medium.files, oldPath) + if modTime, ok := medium.modTimes[oldPath]; ok { + medium.modTimes[newPath] = modTime + delete(medium.modTimes, oldPath) } return nil } - if _, ok := medium.Dirs[oldPath]; ok { - medium.Dirs[newPath] = true - delete(medium.Dirs, oldPath) + if _, ok := medium.dirs[oldPath]; ok { + medium.dirs[newPath] = true + delete(medium.dirs, oldPath) oldPrefix := oldPath if !core.HasSuffix(oldPrefix, "/") { @@ -330,31 +330,31 @@ func (medium *MemoryMedium) Rename(oldPath, newPath string) error { } filesToMove := make(map[string]string) - for filePath := range medium.Files { + for filePath := range medium.files { if core.HasPrefix(filePath, oldPrefix) { newFilePath := core.Concat(newPrefix, core.TrimPrefix(filePath, oldPrefix)) filesToMove[filePath] = newFilePath } } for oldFilePath, newFilePath := range filesToMove { - medium.Files[newFilePath] = medium.Files[oldFilePath] - delete(medium.Files, oldFilePath) - if modTime, ok := medium.ModTimes[oldFilePath]; ok { - medium.ModTimes[newFilePath] = modTime - delete(medium.ModTimes, oldFilePath) + medium.files[newFilePath] = medium.files[oldFilePath] + delete(medium.files, oldFilePath) + if modTime, ok := medium.modTimes[oldFilePath]; ok { + medium.modTimes[newFilePath] = modTime + delete(medium.modTimes, oldFilePath) } } dirsToMove := make(map[string]string) - for directoryPath := range medium.Dirs { + for directoryPath := range medium.dirs { if core.HasPrefix(directoryPath, oldPrefix) { newDirectoryPath := core.Concat(newPrefix, core.TrimPrefix(directoryPath, oldPrefix)) dirsToMove[directoryPath] = newDirectoryPath } } for oldDirectoryPath, newDirectoryPath := range dirsToMove { - medium.Dirs[newDirectoryPath] = true - delete(medium.Dirs, oldDirectoryPath) + medium.dirs[newDirectoryPath] = true + delete(medium.dirs, oldDirectoryPath) } return nil } @@ -362,7 +362,7 @@ func (medium *MemoryMedium) Rename(oldPath, newPath string) error { } func (medium *MemoryMedium) Open(path string) (fs.File, error) { - content, ok := medium.Files[path] + content, ok := medium.files[path] if !ok { return nil, core.E("io.MemoryMedium.Open", core.Concat("file not found: ", path), fs.ErrNotExist) } @@ -380,7 +380,7 @@ func (medium *MemoryMedium) Create(path string) (goio.WriteCloser, error) { } func (medium *MemoryMedium) Append(path string) (goio.WriteCloser, error) { - content := medium.Files[path] + content := medium.files[path] return &MemoryWriteCloser{ medium: medium, path: path, @@ -439,26 +439,26 @@ func (writeCloser *MemoryWriteCloser) Write(data []byte) (int, error) { } func (writeCloser *MemoryWriteCloser) Close() error { - writeCloser.medium.Files[writeCloser.path] = string(writeCloser.data) - writeCloser.medium.ModTimes[writeCloser.path] = time.Now() + writeCloser.medium.files[writeCloser.path] = string(writeCloser.data) + writeCloser.medium.modTimes[writeCloser.path] = time.Now() return nil } func (medium *MemoryMedium) List(path string) ([]fs.DirEntry, error) { - if _, ok := medium.Dirs[path]; !ok { + if _, ok := medium.dirs[path]; !ok { hasChildren := false prefix := path if path != "" && !core.HasSuffix(prefix, "/") { prefix += "/" } - for filePath := range medium.Files { + for filePath := range medium.files { if core.HasPrefix(filePath, prefix) { hasChildren = true break } } if !hasChildren { - for directoryPath := range medium.Dirs { + for directoryPath := range medium.dirs { if core.HasPrefix(directoryPath, prefix) { hasChildren = true break @@ -478,7 +478,7 @@ func (medium *MemoryMedium) List(path string) ([]fs.DirEntry, error) { seen := make(map[string]bool) var entries []fs.DirEntry - for filePath, content := range medium.Files { + for filePath, content := range medium.files { if !core.HasPrefix(filePath, prefix) { continue } @@ -509,7 +509,7 @@ func (medium *MemoryMedium) List(path string) ([]fs.DirEntry, error) { } } - for directoryPath := range medium.Dirs { + for directoryPath := range medium.dirs { if !core.HasPrefix(directoryPath, prefix) { continue } @@ -535,30 +535,30 @@ func (medium *MemoryMedium) List(path string) ([]fs.DirEntry, error) { } func (medium *MemoryMedium) Stat(path string) (fs.FileInfo, error) { - if content, ok := medium.Files[path]; ok { - modTime, ok := medium.ModTimes[path] + if content, ok := medium.files[path]; ok { + modTime, ok := medium.modTimes[path] if !ok { modTime = time.Now() } return NewFileInfo(core.PathBase(path), int64(len(content)), 0644, modTime, false), nil } - if _, ok := medium.Dirs[path]; ok { + if _, ok := medium.dirs[path]; ok { return NewFileInfo(core.PathBase(path), 0, fs.ModeDir|0755, time.Time{}, true), nil } return nil, core.E("io.MemoryMedium.Stat", core.Concat("path not found: ", path), fs.ErrNotExist) } func (medium *MemoryMedium) Exists(path string) bool { - if _, ok := medium.Files[path]; ok { + if _, ok := medium.files[path]; ok { return true } - if _, ok := medium.Dirs[path]; ok { + if _, ok := medium.dirs[path]; ok { return true } return false } func (medium *MemoryMedium) IsDir(path string) bool { - _, ok := medium.Dirs[path] + _, ok := medium.dirs[path] return ok } diff --git a/local/medium.go b/local/medium.go index 9247b15..b1c43ed 100644 --- a/local/medium.go +++ b/local/medium.go @@ -1,8 +1,6 @@ -// Package local provides the io.Medium implementation for the local filesystem. -// -// medium, _ := local.New("/srv/app") -// _ = medium.Write("config/app.yaml", "port: 8080") -// content, _ := medium.Read("config/app.yaml") +// Example: medium, _ := local.New("/srv/app") +// Example: _ = medium.Write("config/app.yaml", "port: 8080") +// Example: content, _ := medium.Read("config/app.yaml") package local import ( diff --git a/medium_test.go b/medium_test.go index 390dcd2..1faab06 100644 --- a/medium_test.go +++ b/medium_test.go @@ -15,10 +15,10 @@ import ( func TestMemoryMedium_NewMemoryMedium_Good(t *testing.T) { medium := NewMemoryMedium() assert.NotNil(t, medium) - assert.NotNil(t, medium.Files) - assert.NotNil(t, medium.Dirs) - assert.Empty(t, medium.Files) - assert.Empty(t, medium.Dirs) + assert.NotNil(t, medium.files) + assert.NotNil(t, medium.dirs) + assert.Empty(t, medium.files) + assert.Empty(t, medium.dirs) } func TestMemoryMedium_NewFileInfo_Good(t *testing.T) { @@ -48,7 +48,7 @@ func TestMemoryMedium_NewDirEntry_Good(t *testing.T) { func TestClient_MockMedium_Read_Good(t *testing.T) { m := NewMockMedium() - m.Files["test.txt"] = "hello world" + m.files["test.txt"] = "hello world" content, err := m.Read("test.txt") assert.NoError(t, err) assert.Equal(t, "hello world", content) @@ -64,12 +64,12 @@ func TestClient_MockMedium_Write_Good(t *testing.T) { m := NewMockMedium() err := m.Write("test.txt", "content") assert.NoError(t, err) - assert.Equal(t, "content", m.Files["test.txt"]) + assert.Equal(t, "content", m.files["test.txt"]) // Overwrite existing file err = m.Write("test.txt", "new content") assert.NoError(t, err) - assert.Equal(t, "new content", m.Files["test.txt"]) + assert.Equal(t, "new content", m.files["test.txt"]) } func TestClient_MockMedium_WriteMode_Good(t *testing.T) { @@ -87,12 +87,12 @@ func TestClient_MockMedium_EnsureDir_Good(t *testing.T) { m := NewMockMedium() err := m.EnsureDir("/path/to/dir") assert.NoError(t, err) - assert.True(t, m.Dirs["/path/to/dir"]) + assert.True(t, m.dirs["/path/to/dir"]) } func TestClient_MockMedium_IsFile_Good(t *testing.T) { m := NewMockMedium() - m.Files["exists.txt"] = "content" + m.files["exists.txt"] = "content" assert.True(t, m.IsFile("exists.txt")) assert.False(t, m.IsFile("nonexistent.txt")) @@ -100,7 +100,7 @@ func TestClient_MockMedium_IsFile_Good(t *testing.T) { func TestClient_MockMedium_FileGet_Good(t *testing.T) { m := NewMockMedium() - m.Files["test.txt"] = "content" + m.files["test.txt"] = "content" content, err := m.FileGet("test.txt") assert.NoError(t, err) assert.Equal(t, "content", content) @@ -110,12 +110,12 @@ func TestClient_MockMedium_FileSet_Good(t *testing.T) { m := NewMockMedium() err := m.FileSet("test.txt", "content") assert.NoError(t, err) - assert.Equal(t, "content", m.Files["test.txt"]) + assert.Equal(t, "content", m.files["test.txt"]) } func TestClient_MockMedium_Delete_Good(t *testing.T) { m := NewMockMedium() - m.Files["test.txt"] = "content" + m.files["test.txt"] = "content" err := m.Delete("test.txt") assert.NoError(t, err) @@ -130,8 +130,8 @@ func TestClient_MockMedium_Delete_NotFound_Bad(t *testing.T) { func TestClient_MockMedium_Delete_DirNotEmpty_Bad(t *testing.T) { m := NewMockMedium() - m.Dirs["mydir"] = true - m.Files["mydir/file.txt"] = "content" + m.dirs["mydir"] = true + m.files["mydir/file.txt"] = "content" err := m.Delete("mydir") assert.Error(t, err) @@ -139,46 +139,46 @@ func TestClient_MockMedium_Delete_DirNotEmpty_Bad(t *testing.T) { func TestClient_MockMedium_DeleteAll_Good(t *testing.T) { m := NewMockMedium() - m.Dirs["mydir"] = true - m.Dirs["mydir/subdir"] = true - m.Files["mydir/file.txt"] = "content" - m.Files["mydir/subdir/nested.txt"] = "nested" + m.dirs["mydir"] = true + m.dirs["mydir/subdir"] = true + m.files["mydir/file.txt"] = "content" + m.files["mydir/subdir/nested.txt"] = "nested" err := m.DeleteAll("mydir") assert.NoError(t, err) - assert.Empty(t, m.Dirs) - assert.Empty(t, m.Files) + assert.Empty(t, m.dirs) + assert.Empty(t, m.files) } func TestClient_MockMedium_Rename_Good(t *testing.T) { m := NewMockMedium() - m.Files["old.txt"] = "content" + m.files["old.txt"] = "content" err := m.Rename("old.txt", "new.txt") assert.NoError(t, err) assert.False(t, m.IsFile("old.txt")) assert.True(t, m.IsFile("new.txt")) - assert.Equal(t, "content", m.Files["new.txt"]) + assert.Equal(t, "content", m.files["new.txt"]) } func TestClient_MockMedium_Rename_Dir_Good(t *testing.T) { m := NewMockMedium() - m.Dirs["olddir"] = true - m.Files["olddir/file.txt"] = "content" + m.dirs["olddir"] = true + m.files["olddir/file.txt"] = "content" err := m.Rename("olddir", "newdir") assert.NoError(t, err) - assert.False(t, m.Dirs["olddir"]) - assert.True(t, m.Dirs["newdir"]) - assert.Equal(t, "content", m.Files["newdir/file.txt"]) + assert.False(t, m.dirs["olddir"]) + assert.True(t, m.dirs["newdir"]) + assert.Equal(t, "content", m.files["newdir/file.txt"]) } func TestClient_MockMedium_List_Good(t *testing.T) { m := NewMockMedium() - m.Dirs["mydir"] = true - m.Files["mydir/file1.txt"] = "content1" - m.Files["mydir/file2.txt"] = "content2" - m.Dirs["mydir/subdir"] = true + m.dirs["mydir"] = true + m.files["mydir/file1.txt"] = "content1" + m.files["mydir/file2.txt"] = "content2" + m.dirs["mydir/subdir"] = true entries, err := m.List("mydir") assert.NoError(t, err) @@ -195,7 +195,7 @@ func TestClient_MockMedium_List_Good(t *testing.T) { func TestClient_MockMedium_Stat_Good(t *testing.T) { m := NewMockMedium() - m.Files["test.txt"] = "hello world" + m.files["test.txt"] = "hello world" info, err := m.Stat("test.txt") assert.NoError(t, err) @@ -206,7 +206,7 @@ func TestClient_MockMedium_Stat_Good(t *testing.T) { func TestClient_MockMedium_Stat_Dir_Good(t *testing.T) { m := NewMockMedium() - m.Dirs["mydir"] = true + m.dirs["mydir"] = true info, err := m.Stat("mydir") assert.NoError(t, err) @@ -216,8 +216,8 @@ func TestClient_MockMedium_Stat_Dir_Good(t *testing.T) { func TestClient_MockMedium_Exists_Good(t *testing.T) { m := NewMockMedium() - m.Files["file.txt"] = "content" - m.Dirs["mydir"] = true + m.files["file.txt"] = "content" + m.dirs["mydir"] = true assert.True(t, m.Exists("file.txt")) assert.True(t, m.Exists("mydir")) @@ -226,8 +226,8 @@ func TestClient_MockMedium_Exists_Good(t *testing.T) { func TestClient_MockMedium_IsDir_Good(t *testing.T) { m := NewMockMedium() - m.Files["file.txt"] = "content" - m.Dirs["mydir"] = true + m.files["file.txt"] = "content" + m.dirs["mydir"] = true assert.False(t, m.IsDir("file.txt")) assert.True(t, m.IsDir("mydir")) @@ -293,14 +293,14 @@ func TestClient_MockMedium_StreamAndFSHelpers_Good(t *testing.T) { require.NoError(t, err) require.NoError(t, writeStream.Close()) - assert.Equal(t, "stream output", m.Files["streamed.txt"]) + assert.Equal(t, "stream output", m.files["streamed.txt"]) } // --- Wrapper Function Tests --- func TestClient_Read_Good(t *testing.T) { m := NewMockMedium() - m.Files["test.txt"] = "hello" + m.files["test.txt"] = "hello" content, err := Read(m, "test.txt") assert.NoError(t, err) assert.Equal(t, "hello", content) @@ -310,19 +310,19 @@ func TestClient_Write_Good(t *testing.T) { m := NewMockMedium() err := Write(m, "test.txt", "hello") assert.NoError(t, err) - assert.Equal(t, "hello", m.Files["test.txt"]) + assert.Equal(t, "hello", m.files["test.txt"]) } func TestClient_EnsureDir_Good(t *testing.T) { m := NewMockMedium() err := EnsureDir(m, "/my/dir") assert.NoError(t, err) - assert.True(t, m.Dirs["/my/dir"]) + assert.True(t, m.dirs["/my/dir"]) } func TestClient_IsFile_Good(t *testing.T) { m := NewMockMedium() - m.Files["exists.txt"] = "content" + m.files["exists.txt"] = "content" assert.True(t, IsFile(m, "exists.txt")) assert.False(t, IsFile(m, "nonexistent.txt")) @@ -362,16 +362,16 @@ func TestClient_ReadWriteStream_Good(t *testing.T) { func TestClient_Copy_Good(t *testing.T) { source := NewMockMedium() dest := NewMockMedium() - source.Files["test.txt"] = "hello" + source.files["test.txt"] = "hello" err := Copy(source, "test.txt", dest, "test.txt") assert.NoError(t, err) - assert.Equal(t, "hello", dest.Files["test.txt"]) + assert.Equal(t, "hello", dest.files["test.txt"]) // Copy to different path - source.Files["original.txt"] = "content" + source.files["original.txt"] = "content" err = Copy(source, "original.txt", dest, "copied.txt") assert.NoError(t, err) - assert.Equal(t, "content", dest.Files["copied.txt"]) + assert.Equal(t, "content", dest.files["copied.txt"]) } func TestClient_Copy_Bad(t *testing.T) { diff --git a/node/node.go b/node/node.go index 164a534..73f3b68 100644 --- a/node/node.go +++ b/node/node.go @@ -1,5 +1,3 @@ -// Package node keeps io.Medium data in memory. -// // Example: nodeTree := node.New() // Example: nodeTree.AddData("config/app.yaml", []byte("port: 8080")) // Example: snapshot, _ := nodeTree.ToTar() diff --git a/node/node_test.go b/node/node_test.go index 6918e0e..e0a95d8 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -423,13 +423,19 @@ func TestNode_CopyTo_Good(t *testing.T) { fileTarget := coreio.NewMemoryMedium() err := n.CopyTo(fileTarget, "config/app.yaml", "backup/app.yaml") require.NoError(t, err) - assert.Equal(t, "port: 8080", fileTarget.Files["backup/app.yaml"]) + content, err := fileTarget.Read("backup/app.yaml") + require.NoError(t, err) + assert.Equal(t, "port: 8080", content) dirTarget := coreio.NewMemoryMedium() err = n.CopyTo(dirTarget, "config", "backup/config") require.NoError(t, err) - assert.Equal(t, "port: 8080", dirTarget.Files["backup/config/app.yaml"]) - assert.Equal(t, "MODE=test", dirTarget.Files["backup/config/env/app.env"]) + content, err = dirTarget.Read("backup/config/app.yaml") + require.NoError(t, err) + assert.Equal(t, "port: 8080", content) + content, err = dirTarget.Read("backup/config/env/app.env") + require.NoError(t, err) + assert.Equal(t, "MODE=test", content) } func TestNode_CopyTo_Bad(t *testing.T) { diff --git a/sigil/crypto_sigil.go b/sigil/crypto_sigil.go index 52e4ba5..f3a27c0 100644 --- a/sigil/crypto_sigil.go +++ b/sigil/crypto_sigil.go @@ -1,8 +1,6 @@ -// Package sigil wraps XChaCha20-Poly1305 with deterministic pre-obfuscation. -// -// cipherSigil, _ := sigil.NewChaChaPolySigil([]byte("0123456789abcdef0123456789abcdef")) -// ciphertext, _ := cipherSigil.In([]byte("payload")) -// plaintext, _ := cipherSigil.Out(ciphertext) +// Example: cipherSigil, _ := sigil.NewChaChaPolySigil([]byte("0123456789abcdef0123456789abcdef")) +// Example: ciphertext, _ := cipherSigil.In([]byte("payload")) +// Example: plaintext, _ := cipherSigil.Out(ciphertext) package sigil import ( diff --git a/sqlite/sqlite.go b/sqlite/sqlite.go index 6173722..a6fc0ae 100644 --- a/sqlite/sqlite.go +++ b/sqlite/sqlite.go @@ -1,5 +1,3 @@ -// Package sqlite stores io.Medium content in SQLite. -// // Example: medium, _ := sqlite.New(sqlite.Options{Path: ":memory:"}) // Example: _ = medium.Write("config/app.yaml", "port: 8080") package sqlite From b80a1623731bb18c0c8893e1ac2e27903d4f613b Mon Sep 17 00:00:00 2001 From: Virgil Date: Mon, 30 Mar 2026 22:52:35 +0000 Subject: [PATCH 37/83] refactor(ax): rename placeholder test cases Co-Authored-By: Virgil --- datanode/medium_test.go | 6 +++--- local/medium_test.go | 4 ++-- node/node_test.go | 16 ++++++++-------- s3/s3_test.go | 4 ++-- sigil/sigil_test.go | 16 ++++++++-------- sqlite/sqlite_test.go | 2 +- 6 files changed, 24 insertions(+), 24 deletions(-) diff --git a/datanode/medium_test.go b/datanode/medium_test.go index 89084eb..bd5e8ec 100644 --- a/datanode/medium_test.go +++ b/datanode/medium_test.go @@ -430,11 +430,11 @@ func TestClient_Exists_Good(t *testing.T) { assert.True(t, m.Exists("x")) } -func TestClient_ReadDir_Ugly(t *testing.T) { +func TestClient_ReadExistingFile_Good(t *testing.T) { m := New() - // Read from a file path (not a dir) should return empty or error require.NoError(t, m.Write("file.txt", "content")) - _, err := m.Read("file.txt") + got, err := m.Read("file.txt") require.NoError(t, err) + assert.Equal(t, "content", got) } diff --git a/local/medium_test.go b/local/medium_test.go index f353d68..506b3a5 100644 --- a/local/medium_test.go +++ b/local/medium_test.go @@ -438,7 +438,7 @@ func TestClient_WriteStream_Basic_Good(t *testing.T) { assert.Equal(t, "piped data", content) } -func TestClient_Path_TraversalAdvanced_Ugly(t *testing.T) { +func TestClient_Path_TraversalSandbox_Good(t *testing.T) { m := &Medium{filesystemRoot: "/sandbox"} // Multiple levels of traversal @@ -494,7 +494,7 @@ func TestClient_ValidatePath_SymlinkEscape_Bad(t *testing.T) { assert.ErrorIs(t, err, fs.ErrPermission) } -func TestClient_EmptyPaths_Ugly(t *testing.T) { +func TestClient_EmptyPaths_Good(t *testing.T) { root := t.TempDir() m, err := New(root) assert.NoError(t, err) diff --git a/node/node_test.go b/node/node_test.go index e0a95d8..934324d 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -53,7 +53,7 @@ func TestNode_AddData_Bad(t *testing.T) { assert.Empty(t, n.files, "directory entry must not be stored") } -func TestNode_AddData_Ugly(t *testing.T) { +func TestNode_AddData_EdgeCases_Good(t *testing.T) { t.Run("Overwrite", func(t *testing.T) { n := New() n.AddData("foo.txt", []byte("foo")) @@ -96,7 +96,7 @@ func TestNode_Open_Bad(t *testing.T) { assert.ErrorIs(t, err, fs.ErrNotExist) } -func TestNode_Open_Ugly(t *testing.T) { +func TestNode_Open_Directory_Good(t *testing.T) { n := New() n.AddData("bar/baz.txt", []byte("baz")) @@ -144,7 +144,7 @@ func TestNode_Stat_Bad(t *testing.T) { assert.ErrorIs(t, err, fs.ErrNotExist) } -func TestNode_Stat_Ugly(t *testing.T) { +func TestNode_Stat_RootDirectory_Good(t *testing.T) { n := New() n.AddData("foo.txt", []byte("foo")) @@ -175,7 +175,7 @@ func TestNode_ReadFile_Bad(t *testing.T) { assert.ErrorIs(t, err, fs.ErrNotExist) } -func TestNode_ReadFile_Ugly(t *testing.T) { +func TestNode_ReadFile_ReturnsCopy_Good(t *testing.T) { n := New() n.AddData("data.bin", []byte("original")) @@ -222,7 +222,7 @@ func TestNode_ReadDir_Bad(t *testing.T) { assert.Equal(t, fs.ErrInvalid, pathErr.Err) } -func TestNode_ReadDir_Ugly(t *testing.T) { +func TestNode_ReadDir_IgnoresEmptyEntry_Good(t *testing.T) { n := New() n.AddData("bar/baz.txt", []byte("baz")) n.AddData("empty_dir/", nil) // Ignored by AddData. @@ -250,7 +250,7 @@ func TestNode_Exists_Bad(t *testing.T) { assert.False(t, n.Exists("nonexistent")) } -func TestNode_Exists_Ugly(t *testing.T) { +func TestNode_Exists_RootAndEmptyPath_Good(t *testing.T) { n := New() n.AddData("dummy.txt", []byte("dummy")) @@ -293,7 +293,7 @@ func TestNode_WalkWithOptions_Default_Bad(t *testing.T) { assert.ErrorIs(t, err, fs.ErrNotExist) } -func TestNode_WalkWithOptions_Default_Ugly(t *testing.T) { +func TestNode_WalkWithOptions_CallbackError_Good(t *testing.T) { n := New() n.AddData("a/b.txt", []byte("b")) n.AddData("a/c.txt", []byte("c")) @@ -405,7 +405,7 @@ func TestNode_CopyFile_Bad(t *testing.T) { assert.Error(t, err) } -func TestNode_CopyFile_Ugly(t *testing.T) { +func TestNode_CopyFile_DirectorySource_Bad(t *testing.T) { n := New() n.AddData("bar/baz.txt", []byte("baz")) tmpfile := core.Path(t.TempDir(), "test.txt") diff --git a/s3/s3_test.go b/s3/s3_test.go index c4fad8e..692a893 100644 --- a/s3/s3_test.go +++ b/s3/s3_test.go @@ -655,8 +655,8 @@ func TestS3_ObjectKey_Good(t *testing.T) { assert.Equal(t, "pfx/", m2.objectKey("")) } -// Ugly: verify the Medium interface is satisfied at compile time. -func TestS3_InterfaceCompliance_Ugly(t *testing.T) { +// Compile-time check: Medium satisfies the io.Medium interface. +func TestS3_InterfaceCompliance(t *testing.T) { mock := newMockS3() m, err := New(Options{Bucket: "bucket", Client: mock}) require.NoError(t, err) diff --git a/sigil/sigil_test.go b/sigil/sigil_test.go index 98c9759..eaa6675 100644 --- a/sigil/sigil_test.go +++ b/sigil/sigil_test.go @@ -39,7 +39,7 @@ func TestSigil_ReverseSigil_Bad(t *testing.T) { assert.Equal(t, []byte{}, out) } -func TestSigil_ReverseSigil_Ugly(t *testing.T) { +func TestSigil_ReverseSigil_NilInput_Good(t *testing.T) { s := &ReverseSigil{} // Nil input returns nil. @@ -82,7 +82,7 @@ func TestSigil_HexSigil_Bad(t *testing.T) { assert.Equal(t, []byte{}, out) } -func TestSigil_HexSigil_Ugly(t *testing.T) { +func TestSigil_HexSigil_NilInput_Good(t *testing.T) { s := &HexSigil{} out, err := s.In(nil) @@ -124,7 +124,7 @@ func TestSigil_Base64Sigil_Bad(t *testing.T) { assert.Equal(t, []byte{}, out) } -func TestSigil_Base64Sigil_Ugly(t *testing.T) { +func TestSigil_Base64Sigil_NilInput_Good(t *testing.T) { s := &Base64Sigil{} out, err := s.In(nil) @@ -170,7 +170,7 @@ func TestSigil_GzipSigil_Bad(t *testing.T) { assert.Equal(t, []byte{}, decompressed) } -func TestSigil_GzipSigil_Ugly(t *testing.T) { +func TestSigil_GzipSigil_NilInput_Good(t *testing.T) { s := &GzipSigil{} out, err := s.In(nil) @@ -218,7 +218,7 @@ func TestSigil_JSONSigil_Bad(t *testing.T) { assert.Error(t, err) } -func TestSigil_JSONSigil_Ugly(t *testing.T) { +func TestSigil_JSONSigil_NilInput_Good(t *testing.T) { s := &JSONSigil{Indent: false} // Nil input is passed through without error, matching the Sigil contract. @@ -289,7 +289,7 @@ func TestSigil_HashSigil_Bad(t *testing.T) { assert.Contains(t, err.Error(), "not available") } -func TestSigil_HashSigil_Ugly(t *testing.T) { +func TestSigil_HashSigil_EmptyInput_Good(t *testing.T) { // Hashing empty data should still produce a valid digest. s, err := NewSigil("sha256") require.NoError(t, err) @@ -328,7 +328,7 @@ func TestSigil_NewSigil_Bad(t *testing.T) { assert.Contains(t, err.Error(), "unknown sigil name") } -func TestSigil_NewSigil_Ugly(t *testing.T) { +func TestSigil_NewSigil_EmptyName_Bad(t *testing.T) { _, err := NewSigil("") assert.Error(t, err) } @@ -403,7 +403,7 @@ func TestSigil_Transmute_Bad(t *testing.T) { assert.Error(t, err) } -func TestSigil_Transmute_Ugly(t *testing.T) { +func TestSigil_Transmute_NilAndEmptyInput_Good(t *testing.T) { // Empty sigil chain is a no-op. data := []byte("unchanged") diff --git a/sqlite/sqlite_test.go b/sqlite/sqlite_test.go index dafbe98..99a521b 100644 --- a/sqlite/sqlite_test.go +++ b/sqlite/sqlite_test.go @@ -612,7 +612,7 @@ func TestSqlite_NormaliseEntryPath_Good(t *testing.T) { // --- Interface Compliance --- -func TestSqlite_InterfaceCompliance_Ugly(t *testing.T) { +func TestSqlite_InterfaceCompliance(t *testing.T) { m := newTestMedium(t) // Verify all methods exist by asserting the interface shape. From f8988c51cb94709cdda13715d1758c6d8ec7de97 Mon Sep 17 00:00:00 2001 From: Virgil Date: Mon, 30 Mar 2026 22:56:51 +0000 Subject: [PATCH 38/83] refactor(ax): tighten naming and comment surfaces --- datanode/medium.go | 1 - local/medium.go | 4 --- node/node.go | 5 --- s3/s3.go | 4 --- sqlite/sqlite.go | 16 --------- sqlite/sqlite_test.go | 2 -- workspace/service.go | 6 ++-- workspace/service_test.go | 72 +++++++++++++++++++-------------------- 8 files changed, 39 insertions(+), 71 deletions(-) diff --git a/datanode/medium.go b/datanode/medium.go index bad0d75..574679c 100644 --- a/datanode/medium.go +++ b/datanode/medium.go @@ -356,7 +356,6 @@ func (medium *Medium) List(filePath string) ([]fs.DirEntry, error) { return nil, core.E("datanode.List", core.Concat("not found: ", filePath), fs.ErrNotExist) } - // Also include explicit subdirectories not discovered via files prefix := filePath if prefix != "" { prefix += "/" diff --git a/local/medium.go b/local/medium.go index b1c43ed..95aa5bb 100644 --- a/local/medium.go +++ b/local/medium.go @@ -177,9 +177,6 @@ func (medium *Medium) sandboxedPath(path string) string { return medium.filesystemRoot } - // If the path is relative and the medium is rooted at "/", - // treat it as relative to the current working directory. - // This makes io.Local behave more like the standard 'os' package. if medium.filesystemRoot == dirSeparator() && !core.PathIsAbs(normalisePath(path)) { return core.Path(currentWorkingDir(), normalisePath(path)) } @@ -222,7 +219,6 @@ func (medium *Medium) validatePath(path string) (string, error) { // Verify the resolved part is still within the root if !isWithinRoot(medium.filesystemRoot, realNext) { - // Security event: sandbox escape attempt logSandboxEscape(medium.filesystemRoot, path, realNext) return "", fs.ErrPermission } diff --git a/node/node.go b/node/node.go index 73f3b68..a62d88a 100644 --- a/node/node.go +++ b/node/node.go @@ -41,7 +41,6 @@ func (node *Node) AddData(name string, content []byte) { if name == "" { return } - // Directories are implicit, so we don't store them. if core.HasSuffix(name, "/") { return } @@ -159,7 +158,6 @@ func (node *Node) WalkWithOptions(root string, fn fs.WalkDirFunc, options WalkOp } } - // Call the user's function first so the entry is visited. result := fn(entryPath, entry, err) // After visiting a directory at MaxDepth, prevent descending further. @@ -194,7 +192,6 @@ func (node *Node) CopyFile(sourcePath, destinationPath string, perm fs.FileMode) sourcePath = core.TrimPrefix(sourcePath, "/") file, ok := node.files[sourcePath] if !ok { - // Check if it's a directory — can't copy directories this way. info, err := node.Stat(sourcePath) if err != nil { return core.E("node.CopyFile", core.Concat("source not found: ", sourcePath), fs.ErrNotExist) @@ -257,7 +254,6 @@ func (node *Node) Open(name string) (fs.File, error) { if dataFile, ok := node.files[name]; ok { return &dataFileReader{file: dataFile}, nil } - // Check if it's a directory prefix := name + "/" if name == "." || name == "" { prefix = "" @@ -275,7 +271,6 @@ func (node *Node) Stat(name string) (fs.FileInfo, error) { if dataFile, ok := node.files[name]; ok { return dataFile.Stat() } - // Check if it's a directory prefix := name + "/" if name == "." || name == "" { prefix = "" diff --git a/s3/s3.go b/s3/s3.go index 53366e6..384b8ce 100644 --- a/s3/s3.go +++ b/s3/s3.go @@ -329,7 +329,6 @@ func (medium *Medium) List(filePath string) ([]fs.DirEntry, error) { return nil, core.E("s3.List", core.Concat("failed to list objects: ", prefix), err) } - // Common prefixes are "directories" for _, commonPrefix := range listOutput.CommonPrefixes { if commonPrefix.Prefix == nil { continue @@ -351,7 +350,6 @@ func (medium *Medium) List(filePath string) ([]fs.DirEntry, error) { }) } - // Contents are "files" (excluding the prefix itself) for _, object := range listOutput.Contents { if object.Key == nil { continue @@ -517,7 +515,6 @@ func (medium *Medium) Exists(filePath string) bool { return false } - // Check as an exact object _, err := medium.client.HeadObject(context.Background(), &awss3.HeadObjectInput{ Bucket: aws.String(medium.bucket), Key: aws.String(key), @@ -526,7 +523,6 @@ func (medium *Medium) Exists(filePath string) bool { return true } - // Check as a "directory" prefix prefix := key if !core.HasSuffix(prefix, "/") { prefix += "/" diff --git a/sqlite/sqlite.go b/sqlite/sqlite.go index a6fc0ae..081e11c 100644 --- a/sqlite/sqlite.go +++ b/sqlite/sqlite.go @@ -53,13 +53,11 @@ func New(options Options) (*Medium, error) { return nil, core.E("sqlite.New", "failed to open database", err) } - // Enable WAL mode for better concurrency if _, err := database.Exec("PRAGMA journal_mode=WAL"); err != nil { database.Close() return nil, core.E("sqlite.New", "failed to set WAL mode", err) } - // Create the schema createSQL := `CREATE TABLE IF NOT EXISTS ` + medium.table + ` ( path TEXT PRIMARY KEY, content BLOB NOT NULL, @@ -141,7 +139,6 @@ func (medium *Medium) WriteMode(filePath, content string, mode fs.FileMode) erro func (medium *Medium) EnsureDir(filePath string) error { key := normaliseEntryPath(filePath) if key == "" { - // Root always "exists" return nil } @@ -187,7 +184,6 @@ func (medium *Medium) Delete(filePath string) error { return core.E("sqlite.Delete", "path is required", fs.ErrInvalid) } - // Check if it's a directory with children var isDir bool err := medium.database.QueryRow( `SELECT is_dir FROM `+medium.table+` WHERE path = ?`, key, @@ -200,7 +196,6 @@ func (medium *Medium) Delete(filePath string) error { } if isDir { - // Check for children prefix := key + "/" var count int err := medium.database.QueryRow( @@ -234,7 +229,6 @@ func (medium *Medium) DeleteAll(filePath string) error { prefix := key + "/" - // Delete the exact path and all children res, err := medium.database.Exec( `DELETE FROM `+medium.table+` WHERE path = ? OR path LIKE ?`, key, prefix+"%", @@ -263,7 +257,6 @@ func (medium *Medium) Rename(oldPath, newPath string) error { } defer tx.Rollback() - // Check if source exists var content []byte var mode int var isDir bool @@ -278,7 +271,6 @@ func (medium *Medium) Rename(oldPath, newPath string) error { return core.E("sqlite.Rename", core.Concat("query failed: ", oldKey), err) } - // Insert or replace at new path _, err = tx.Exec( `INSERT INTO `+medium.table+` (path, content, mode, is_dir, mtime) VALUES (?, ?, ?, ?, ?) ON CONFLICT(path) DO UPDATE SET content = excluded.content, mode = excluded.mode, is_dir = excluded.is_dir, mtime = excluded.mtime`, @@ -288,13 +280,11 @@ func (medium *Medium) Rename(oldPath, newPath string) error { return core.E("sqlite.Rename", core.Concat("insert at new path failed: ", newKey), err) } - // Delete old path _, err = tx.Exec(`DELETE FROM `+medium.table+` WHERE path = ?`, oldKey) if err != nil { return core.E("sqlite.Rename", core.Concat("delete old path failed: ", oldKey), err) } - // If it's a directory, move all children if isDir { oldPrefix := oldKey + "/" newPrefix := newKey + "/" @@ -337,7 +327,6 @@ func (medium *Medium) Rename(oldPath, newPath string) error { } } - // Delete old children _, err = tx.Exec(`DELETE FROM `+medium.table+` WHERE path LIKE ?`, oldPrefix+"%") if err != nil { return core.E("sqlite.Rename", "delete old children failed", err) @@ -354,7 +343,6 @@ func (medium *Medium) List(filePath string) ([]fs.DirEntry, error) { prefix += "/" } - // Query all paths under the prefix rows, err := medium.database.Query( `SELECT path, content, mode, is_dir, mtime FROM `+medium.table+` WHERE path LIKE ? OR path LIKE ?`, prefix+"%", prefix+"%", @@ -382,10 +370,8 @@ func (medium *Medium) List(filePath string) ([]fs.DirEntry, error) { continue } - // Check if this is a direct child or nested parts := core.SplitN(rest, "/", 2) if len(parts) == 2 { - // Nested - register as a directory dirName := parts[0] if !seen[dirName] { seen[dirName] = true @@ -401,7 +387,6 @@ func (medium *Medium) List(filePath string) ([]fs.DirEntry, error) { }) } } else { - // Direct child if !seen[rest] { seen[rest] = true entries = append(entries, &dirEntry{ @@ -550,7 +535,6 @@ func (medium *Medium) WriteStream(filePath string) (goio.WriteCloser, error) { func (medium *Medium) Exists(filePath string) bool { key := normaliseEntryPath(filePath) if key == "" { - // Root always exists return true } diff --git a/sqlite/sqlite_test.go b/sqlite/sqlite_test.go index 99a521b..dbe4f30 100644 --- a/sqlite/sqlite_test.go +++ b/sqlite/sqlite_test.go @@ -116,7 +116,6 @@ func TestSqlite_EnsureDir_Good(t *testing.T) { func TestSqlite_EnsureDir_EmptyPath_Good(t *testing.T) { m := newTestMedium(t) - // Root always exists, no-op err := m.EnsureDir("") assert.NoError(t, err) } @@ -579,7 +578,6 @@ func TestSqlite_Exists_Good(t *testing.T) { func TestSqlite_Exists_EmptyPath_Good(t *testing.T) { m := newTestMedium(t) - // Root always exists assert.True(t, m.Exists("")) } diff --git a/workspace/service.go b/workspace/service.go index a53a18a..8b767a3 100644 --- a/workspace/service.go +++ b/workspace/service.go @@ -99,9 +99,9 @@ func (service *Service) CreateWorkspace(identifier, password string) (string, er return "", core.E("workspace.CreateWorkspace", "workspace already exists", nil) } - for _, d := range []string{"config", "log", "data", "files", "keys"} { - if err := service.medium.EnsureDir(core.Path(workspaceDirectory, d)); err != nil { - return "", core.E("workspace.CreateWorkspace", core.Concat("failed to create directory: ", d), err) + for _, directoryName := range []string{"config", "log", "data", "files", "keys"} { + if err := service.medium.EnsureDir(core.Path(workspaceDirectory, directoryName)); err != nil { + return "", core.E("workspace.CreateWorkspace", core.Concat("failed to create directory: ", directoryName), err) } } diff --git a/workspace/service_test.go b/workspace/service_test.go index c707754..d218cd0 100644 --- a/workspace/service_test.go +++ b/workspace/service_test.go @@ -13,11 +13,11 @@ type stubCryptProvider struct { err error } -func (s stubCryptProvider) CreateKeyPair(_, _ string) (string, error) { - if s.err != nil { - return "", s.err +func (provider stubCryptProvider) CreateKeyPair(_, _ string) (string, error) { + if provider.err != nil { + return "", provider.err } - return s.key, nil + return provider.key, nil } func newTestService(t *testing.T) (*Service, string) { @@ -26,9 +26,9 @@ func newTestService(t *testing.T) (*Service, string) { tempHome := t.TempDir() t.Setenv("HOME", tempHome) - svc, err := New(Options{CryptProvider: stubCryptProvider{key: "private-key"}}) + service, err := New(Options{CryptProvider: stubCryptProvider{key: "private-key"}}) require.NoError(t, err) - return svc, tempHome + return service, tempHome } func TestService_New_MissingCryptProvider_Bad(t *testing.T) { @@ -37,9 +37,9 @@ func TestService_New_MissingCryptProvider_Bad(t *testing.T) { } func TestService_Workspace_RoundTrip_Good(t *testing.T) { - s, tempHome := newTestService(t) + service, tempHome := newTestService(t) - workspaceID, err := s.CreateWorkspace("test-user", "pass123") + workspaceID, err := service.CreateWorkspace("test-user", "pass123") require.NoError(t, err) assert.NotEmpty(t, workspaceID) @@ -48,55 +48,55 @@ func TestService_Workspace_RoundTrip_Good(t *testing.T) { assert.DirExists(t, core.Path(workspacePath, "keys")) assert.FileExists(t, core.Path(workspacePath, "keys", "private.key")) - err = s.SwitchWorkspace(workspaceID) + err = service.SwitchWorkspace(workspaceID) require.NoError(t, err) - assert.Equal(t, workspaceID, s.activeWorkspaceID) + assert.Equal(t, workspaceID, service.activeWorkspaceID) - err = s.WorkspaceFileSet("secret.txt", "top secret info") + err = service.WorkspaceFileSet("secret.txt", "top secret info") require.NoError(t, err) - got, err := s.WorkspaceFileGet("secret.txt") + got, err := service.WorkspaceFileGet("secret.txt") require.NoError(t, err) assert.Equal(t, "top secret info", got) } func TestService_SwitchWorkspace_TraversalBlocked_Bad(t *testing.T) { - s, tempHome := newTestService(t) + service, tempHome := newTestService(t) outside := core.Path(tempHome, ".core", "escaped") - require.NoError(t, s.medium.EnsureDir(outside)) + require.NoError(t, service.medium.EnsureDir(outside)) - err := s.SwitchWorkspace("../escaped") + err := service.SwitchWorkspace("../escaped") require.Error(t, err) - assert.Empty(t, s.activeWorkspaceID) + assert.Empty(t, service.activeWorkspaceID) } func TestService_WorkspaceFileSet_TraversalBlocked_Bad(t *testing.T) { - s, tempHome := newTestService(t) + service, tempHome := newTestService(t) - workspaceID, err := s.CreateWorkspace("test-user", "pass123") + workspaceID, err := service.CreateWorkspace("test-user", "pass123") require.NoError(t, err) - require.NoError(t, s.SwitchWorkspace(workspaceID)) + require.NoError(t, service.SwitchWorkspace(workspaceID)) keyPath := core.Path(tempHome, ".core", "workspaces", workspaceID, "keys", "private.key") - before, err := s.medium.Read(keyPath) + before, err := service.medium.Read(keyPath) require.NoError(t, err) - err = s.WorkspaceFileSet("../keys/private.key", "hijack") + err = service.WorkspaceFileSet("../keys/private.key", "hijack") require.Error(t, err) - after, err := s.medium.Read(keyPath) + after, err := service.medium.Read(keyPath) require.NoError(t, err) assert.Equal(t, before, after) - _, err = s.WorkspaceFileGet("../keys/private.key") + _, err = service.WorkspaceFileGet("../keys/private.key") require.Error(t, err) } func TestService_HandleWorkspaceMessage_Good(t *testing.T) { - s, _ := newTestService(t) + service, _ := newTestService(t) - create := s.HandleWorkspaceMessage(core.New(), WorkspaceCommand{ + create := service.HandleWorkspaceMessage(core.New(), WorkspaceCommand{ Action: WorkspaceCreateAction, Identifier: "ipc-user", Password: "pass123", @@ -107,14 +107,14 @@ func TestService_HandleWorkspaceMessage_Good(t *testing.T) { require.True(t, ok) require.NotEmpty(t, workspaceID) - switchResult := s.HandleWorkspaceMessage(core.New(), WorkspaceCommand{ + switchResult := service.HandleWorkspaceMessage(core.New(), WorkspaceCommand{ Action: WorkspaceSwitchAction, WorkspaceID: workspaceID, }) assert.True(t, switchResult.OK) - assert.Equal(t, workspaceID, s.activeWorkspaceID) + assert.Equal(t, workspaceID, service.activeWorkspaceID) - legacyCreate := s.HandleWorkspaceMessage(core.New(), map[string]any{ + legacyCreate := service.HandleWorkspaceMessage(core.New(), map[string]any{ "action": WorkspaceCreateAction, "identifier": "legacy-user", "password": "pass123", @@ -125,34 +125,34 @@ func TestService_HandleWorkspaceMessage_Good(t *testing.T) { require.True(t, ok) require.NotEmpty(t, legacyWorkspaceID) - legacySwitch := s.HandleWorkspaceMessage(core.New(), WorkspaceCommand{ + legacySwitch := service.HandleWorkspaceMessage(core.New(), WorkspaceCommand{ Action: WorkspaceSwitchAction, WorkspaceID: legacyWorkspaceID, }) assert.True(t, legacySwitch.OK) - assert.Equal(t, legacyWorkspaceID, s.activeWorkspaceID) + assert.Equal(t, legacyWorkspaceID, service.activeWorkspaceID) - rejectedLegacySwitch := s.HandleWorkspaceMessage(core.New(), map[string]any{ + rejectedLegacySwitch := service.HandleWorkspaceMessage(core.New(), map[string]any{ "action": WorkspaceSwitchAction, "name": workspaceID, }) assert.False(t, rejectedLegacySwitch.OK) - assert.Equal(t, legacyWorkspaceID, s.activeWorkspaceID) + assert.Equal(t, legacyWorkspaceID, service.activeWorkspaceID) - failedSwitch := s.HandleWorkspaceMessage(core.New(), map[string]any{ + failedSwitch := service.HandleWorkspaceMessage(core.New(), map[string]any{ "action": WorkspaceSwitchAction, "workspaceID": "missing", }) assert.False(t, failedSwitch.OK) - unknown := s.HandleWorkspaceMessage(core.New(), "noop") + unknown := service.HandleWorkspaceMessage(core.New(), "noop") assert.True(t, unknown.OK) } func TestService_HandleIPCEvents_Compatibility_Good(t *testing.T) { - s, _ := newTestService(t) + service, _ := newTestService(t) - result := s.HandleIPCEvents(core.New(), WorkspaceCommand{ + result := service.HandleIPCEvents(core.New(), WorkspaceCommand{ Action: WorkspaceCreateAction, Identifier: "compat-user", Password: "pass123", From 347c4b1b5702b5da8f776cce9cfc3fa17ec2f186 Mon Sep 17 00:00:00 2001 From: Virgil Date: Mon, 30 Mar 2026 23:02:53 +0000 Subject: [PATCH 39/83] refactor(ax): trim prose comments to examples --- datanode/medium.go | 33 ++++++--------------------------- doc.go | 10 ++++------ io.go | 7 ------- local/medium.go | 10 ---------- node/node.go | 28 ++-------------------------- s3/s3.go | 12 ------------ sigil/crypto_sigil.go | 42 +----------------------------------------- sigil/sigil.go | 11 ++++------- sigil/sigils.go | 40 +++++----------------------------------- sqlite/sqlite.go | 8 ++------ store/doc.go | 10 ++++------ store/medium.go | 4 +--- store/store.go | 2 -- workspace/doc.go | 2 -- 14 files changed, 29 insertions(+), 190 deletions(-) diff --git a/datanode/medium.go b/datanode/medium.go index 574679c..b05c647 100644 --- a/datanode/medium.go +++ b/datanode/medium.go @@ -1,9 +1,7 @@ -// Package datanode provides an io.Medium implementation backed by Borg's DataNode. -// -// medium := datanode.New() -// _ = medium.Write("jobs/run.log", "started") -// snapshot, _ := medium.Snapshot() -// restored, _ := datanode.FromTar(snapshot) +// medium := datanode.New() +// _ = medium.Write("jobs/run.log", "started") +// snapshot, _ := medium.Snapshot() +// restored, _ := datanode.FromTar(snapshot) package datanode import ( @@ -36,7 +34,7 @@ var ( // snapshot, _ := medium.Snapshot() type Medium struct { dataNode *borgdatanode.DataNode - directorySet map[string]bool // explicit directories that exist without file contents + directorySet map[string]bool lock sync.RWMutex } @@ -101,8 +99,6 @@ func normaliseEntryPath(filePath string) string { return filePath } -// --- io.Medium interface --- - func (medium *Medium) Read(filePath string) (string, error) { medium.lock.RLock() defer medium.lock.RUnlock() @@ -139,7 +135,6 @@ func (medium *Medium) Write(filePath, content string) error { } medium.dataNode.AddData(filePath, []byte(content)) - // ensure parent directories are tracked medium.ensureDirsLocked(path.Dir(filePath)) return nil } @@ -160,8 +155,6 @@ func (medium *Medium) EnsureDir(filePath string) error { return nil } -// ensureDirsLocked marks a directory and all ancestors as existing. -// Caller must hold medium.lock. func (medium *Medium) ensureDirsLocked(directoryPath string) { for directoryPath != "" && directoryPath != "." { medium.directorySet[directoryPath] = true @@ -226,7 +219,6 @@ func (medium *Medium) Delete(filePath string) error { return nil } - // Remove the file by creating a new DataNode without it if err := medium.removeFileLocked(filePath); err != nil { return core.E("datanode.Delete", core.Concat("failed to delete file: ", filePath), err) } @@ -253,7 +245,6 @@ func (medium *Medium) DeleteAll(filePath string) error { found = true } - // Remove all files under prefix entries, err := medium.collectAllLocked() if err != nil { return core.E("datanode.DeleteAll", core.Concat("failed to inspect tree: ", filePath), err) @@ -267,7 +258,6 @@ func (medium *Medium) DeleteAll(filePath string) error { } } - // Remove explicit directories under prefix for directoryPath := range medium.directorySet { if directoryPath == filePath || core.HasPrefix(directoryPath, prefix) { delete(medium.directorySet, directoryPath) @@ -466,7 +456,7 @@ func (medium *Medium) Exists(filePath string) bool { filePath = normaliseEntryPath(filePath) if filePath == "" { - return true // root always exists + return true } _, err := medium.dataNode.Stat(filePath) if err == nil { @@ -490,9 +480,6 @@ func (medium *Medium) IsDir(filePath string) bool { return medium.directorySet[filePath] } -// --- internal helpers --- - -// hasPrefixLocked checks if any file path starts with prefix. Caller holds lock. func (medium *Medium) hasPrefixLocked(prefix string) (bool, error) { entries, err := medium.collectAllLocked() if err != nil { @@ -511,7 +498,6 @@ func (medium *Medium) hasPrefixLocked(prefix string) (bool, error) { return false, nil } -// collectAllLocked returns all file paths in the DataNode. Caller holds lock. func (medium *Medium) collectAllLocked() ([]string, error) { var names []string err := dataNodeWalkDir(medium.dataNode, ".", func(filePath string, entry fs.DirEntry, err error) error { @@ -542,9 +528,6 @@ func (medium *Medium) readFileLocked(filePath string) ([]byte, error) { return data, nil } -// removeFileLocked removes a single file by rebuilding the DataNode. -// This is necessary because Borg's DataNode doesn't expose a Remove method. -// Caller must hold medium.lock write lock. func (medium *Medium) removeFileLocked(target string) error { entries, err := medium.collectAllLocked() if err != nil { @@ -565,8 +548,6 @@ func (medium *Medium) removeFileLocked(target string) error { return nil } -// --- writeCloser buffers writes and flushes to DataNode on Close --- - type writeCloser struct { medium *Medium path string @@ -587,8 +568,6 @@ func (writer *writeCloser) Close() error { return nil } -// --- fs types for explicit directories --- - type dirEntry struct { name string } diff --git a/doc.go b/doc.go index 6b938f8..b94d1bd 100644 --- a/doc.go +++ b/doc.go @@ -1,7 +1,5 @@ -// Package io exposes CoreGO's storage surface. -// -// medium, _ := io.NewSandboxed("/srv/app") -// _ = medium.Write("config/app.yaml", "port: 8080") -// backup, _ := io.NewSandboxed("/srv/backup") -// _ = io.Copy(medium, "data/report.json", backup, "daily/report.json") +// medium, _ := io.NewSandboxed("/srv/app") +// _ = medium.Write("config/app.yaml", "port: 8080") +// backup, _ := io.NewSandboxed("/srv/backup") +// _ = io.Copy(medium, "data/report.json", backup, "daily/report.json") package io diff --git a/io.go b/io.go index f4c4744..4c3074e 100644 --- a/io.go +++ b/io.go @@ -188,7 +188,6 @@ type MemoryMedium struct { modTimes map[string]time.Time } -// MockMedium is a compatibility alias for MemoryMedium. type MockMedium = MemoryMedium var _ Medium = (*MemoryMedium)(nil) @@ -203,8 +202,6 @@ func NewMemoryMedium() *MemoryMedium { } } -// NewMockMedium is a compatibility alias for NewMemoryMedium. -// // Example: medium := io.NewMockMedium() // _ = medium.Write("config/app.yaml", "port: 8080") func NewMockMedium() *MemoryMedium { @@ -396,14 +393,12 @@ func (medium *MemoryMedium) WriteStream(path string) (goio.WriteCloser, error) { return medium.Create(path) } -// MemoryFile implements fs.File for MemoryMedium. type MemoryFile struct { name string content []byte offset int64 } -// MockFile is a compatibility alias for MemoryFile. type MockFile = MemoryFile func (file *MemoryFile) Stat() (fs.FileInfo, error) { @@ -423,14 +418,12 @@ func (file *MemoryFile) Close() error { return nil } -// MemoryWriteCloser implements WriteCloser for MemoryMedium. type MemoryWriteCloser struct { medium *MemoryMedium path string data []byte } -// MockWriteCloser is a compatibility alias for MemoryWriteCloser. type MockWriteCloser = MemoryWriteCloser func (writeCloser *MemoryWriteCloser) Write(data []byte) (int, error) { diff --git a/local/medium.go b/local/medium.go index 95aa5bb..a6bfde8 100644 --- a/local/medium.go +++ b/local/medium.go @@ -23,7 +23,6 @@ var unrestrictedFileSystem = (&core.Fs{}).NewUnrestricted() // _ = medium.Write("config/app.yaml", "port: 8080") func New(root string) (*Medium, error) { absoluteRoot := absolutePath(root) - // Example: local.New("/srv/app") resolves macOS "/var" to "/private/var" before sandbox checks. if resolvedRoot, err := resolveSymlinksPath(absoluteRoot); err == nil { absoluteRoot = resolvedRoot } @@ -181,16 +180,12 @@ func (medium *Medium) sandboxedPath(path string) string { return core.Path(currentWorkingDir(), normalisePath(path)) } - // Use a cleaned absolute path to resolve all .. and . internally - // before joining with the root. This is a standard way to sandbox paths. clean := cleanSandboxPath(path) - // If root is "/", allow absolute paths through if medium.filesystemRoot == dirSeparator() { return clean } - // Join cleaned relative path with root return core.Path(medium.filesystemRoot, core.TrimPrefix(clean, dirSeparator())) } @@ -199,7 +194,6 @@ func (medium *Medium) validatePath(path string) (string, error) { return medium.sandboxedPath(path), nil } - // Split the cleaned path into components parts := splitPathParts(cleanSandboxPath(path)) current := medium.filesystemRoot @@ -208,16 +202,12 @@ func (medium *Medium) validatePath(path string) (string, error) { realNext, err := resolveSymlinksPath(next) if err != nil { if core.Is(err, syscall.ENOENT) { - // Part doesn't exist, we can't follow symlinks anymore. - // Since the path is already Cleaned and current is safe, - // appending a component to current will not escape. current = next continue } return "", err } - // Verify the resolved part is still within the root if !isWithinRoot(medium.filesystemRoot, realNext) { logSandboxEscape(medium.filesystemRoot, path, realNext) return "", fs.ErrPermission diff --git a/node/node.go b/node/node.go index a62d88a..95b7f2b 100644 --- a/node/node.go +++ b/node/node.go @@ -129,20 +129,14 @@ func (node *Node) WalkNode(root string, fn fs.WalkDirFunc) error { // Example: options := node.WalkOptions{MaxDepth: 1, SkipErrors: true} type WalkOptions struct { - // MaxDepth limits how many directory levels to descend. 0 means unlimited. - MaxDepth int - // Filter, if set, is called for each entry. Return true to include the - // entry (and descend into it if it is a directory). - Filter func(entryPath string, entry fs.DirEntry) bool - // SkipErrors suppresses errors (e.g. nonexistent root) instead of - // propagating them through the callback. + MaxDepth int + Filter func(entryPath string, entry fs.DirEntry) bool SkipErrors bool } // Example: _ = nodeTree.WalkWithOptions(".", callback, node.WalkOptions{MaxDepth: 1, SkipErrors: true}) func (node *Node) WalkWithOptions(root string, fn fs.WalkDirFunc, options WalkOptions) error { if options.SkipErrors { - // If root doesn't exist, silently return nil. if _, err := node.Stat(root); err != nil { return nil } @@ -160,7 +154,6 @@ func (node *Node) WalkWithOptions(root string, fn fs.WalkDirFunc, options WalkOp result := fn(entryPath, entry, err) - // After visiting a directory at MaxDepth, prevent descending further. if result == nil && options.MaxDepth > 0 && entry != nil && entry.IsDir() && entryPath != root { rel := core.TrimPrefix(entryPath, root) rel = core.TrimPrefix(rel, "/") @@ -181,7 +174,6 @@ func (node *Node) ReadFile(name string) ([]byte, error) { if !ok { return nil, core.E("node.ReadFile", core.Concat("path not found: ", name), fs.ErrNotExist) } - // Return a copy to prevent callers from mutating internal state. result := make([]byte, len(file.content)) copy(result, file.content) return result, nil @@ -217,7 +209,6 @@ func (node *Node) CopyTo(target coreio.Medium, sourcePath, destPath string) erro } if !info.IsDir() { - // Single file copy file, ok := node.files[sourcePath] if !ok { return core.E("node.CopyTo", core.Concat("path not found: ", sourcePath), fs.ErrNotExist) @@ -225,7 +216,6 @@ func (node *Node) CopyTo(target coreio.Medium, sourcePath, destPath string) erro return target.Write(destPath, string(file.content)) } - // Directory: walk and copy all files underneath prefix := sourcePath if prefix != "" && !core.HasSuffix(prefix, "/") { prefix += "/" @@ -247,8 +237,6 @@ func (node *Node) CopyTo(target coreio.Medium, sourcePath, destPath string) erro return nil } -// ---------- Medium interface: fs.FS methods ---------- - func (node *Node) Open(name string) (fs.File, error) { name = core.TrimPrefix(name, "/") if dataFile, ok := node.files[name]; ok { @@ -289,7 +277,6 @@ func (node *Node) ReadDir(name string) ([]fs.DirEntry, error) { name = "" } - // Disallow reading a file as a directory. if info, err := node.Stat(name); err == nil && !info.IsDir() { return nil, &fs.PathError{Op: "readdir", Path: name, Err: fs.ErrInvalid} } @@ -332,8 +319,6 @@ func (node *Node) ReadDir(name string) ([]fs.DirEntry, error) { return entries, nil } -// ---------- Medium interface: read/write ---------- - func (node *Node) Read(filePath string) (string, error) { filePath = core.TrimPrefix(filePath, "/") file, ok := node.files[filePath] @@ -365,8 +350,6 @@ func (node *Node) EnsureDir(_ string) error { return nil } -// ---------- Medium interface: existence checks ---------- - func (node *Node) Exists(filePath string) bool { _, err := node.Stat(filePath) return err == nil @@ -386,8 +369,6 @@ func (node *Node) IsDir(filePath string) bool { return info.IsDir() } -// ---------- Medium interface: mutations ---------- - func (node *Node) Delete(filePath string) error { filePath = core.TrimPrefix(filePath, "/") if _, ok := node.files[filePath]; ok { @@ -443,8 +424,6 @@ func (node *Node) List(filePath string) ([]fs.DirEntry, error) { return node.ReadDir(filePath) } -// ---------- Medium interface: streams ---------- - func (node *Node) Create(filePath string) (goio.WriteCloser, error) { filePath = core.TrimPrefix(filePath, "/") return &nodeWriter{node: node, path: filePath}, nil @@ -472,9 +451,6 @@ func (node *Node) WriteStream(filePath string) (goio.WriteCloser, error) { return node.Create(filePath) } -// ---------- Internal types ---------- - -// nodeWriter buffers writes and commits them to the Node on Close. type nodeWriter struct { node *Node path string diff --git a/s3/s3.go b/s3/s3.go index 384b8ce..bc55040 100644 --- a/s3/s3.go +++ b/s3/s3.go @@ -1,5 +1,3 @@ -// Package s3 stores io.Medium data in S3 objects. -// // Example: client := awss3.NewFromConfig(aws.Config{Region: "us-east-1"}) // Example: medium, _ := s3.New(s3.Options{Bucket: "backups", Client: client, Prefix: "daily/"}) // Example: _ = medium.Write("reports/daily.txt", "done") @@ -45,11 +43,8 @@ var _ coreio.Medium = (*Medium)(nil) // Example: medium, _ := s3.New(s3.Options{Bucket: "backups", Client: client, Prefix: "daily/"}) type Options struct { - // Bucket is the target S3 bucket name. Bucket string - // Client is the AWS S3 client or test double used for requests. Client Client - // Prefix is prepended to every object key. Prefix string } @@ -109,8 +104,6 @@ func New(options Options) (*Medium, error) { } func (medium *Medium) objectKey(filePath string) string { - // Clean the path using a leading "/" to sandbox traversal attempts, - // then strip the "/" prefix. This ensures ".." can't escape. clean := path.Clean("/" + filePath) if clean == "/" { clean = "" @@ -181,7 +174,6 @@ func (medium *Medium) IsFile(filePath string) bool { if key == "" { return false } - // A "file" in S3 is an object whose key does not end with "/" if core.HasSuffix(key, "/") { return false } @@ -223,7 +215,6 @@ func (medium *Medium) DeleteAll(filePath string) error { return core.E("s3.DeleteAll", "path is required", fs.ErrInvalid) } - // First, try deleting the exact key _, err := medium.client.DeleteObject(context.Background(), &awss3.DeleteObjectInput{ Bucket: aws.String(medium.bucket), Key: aws.String(key), @@ -232,7 +223,6 @@ func (medium *Medium) DeleteAll(filePath string) error { return core.E("s3.DeleteAll", core.Concat("failed to delete object: ", key), err) } - // Then delete all objects under the prefix prefix := key if !core.HasSuffix(prefix, "/") { prefix += "/" @@ -561,8 +551,6 @@ func (medium *Medium) IsDir(filePath string) bool { return len(listOutput.Contents) > 0 || len(listOutput.CommonPrefixes) > 0 } -// --- Internal types --- - type fileInfo struct { name string size int64 diff --git a/sigil/crypto_sigil.go b/sigil/crypto_sigil.go index f3a27c0..31947bc 100644 --- a/sigil/crypto_sigil.go +++ b/sigil/crypto_sigil.go @@ -14,35 +14,23 @@ import ( ) var ( - // InvalidKeyError is returned when the encryption key is not 32 bytes. InvalidKeyError = core.E("sigil.InvalidKeyError", "invalid key size, must be 32 bytes", nil) - // CiphertextTooShortError is returned when the ciphertext is too short to decrypt. CiphertextTooShortError = core.E("sigil.CiphertextTooShortError", "ciphertext too short", nil) - // DecryptionFailedError is returned when decryption or authentication fails. DecryptionFailedError = core.E("sigil.DecryptionFailedError", "decryption failed", nil) - // NoKeyConfiguredError is returned when no encryption key has been set. NoKeyConfiguredError = core.E("sigil.NoKeyConfiguredError", "no encryption key configured", nil) ) -// PreObfuscator customises the bytes mixed in before and after encryption. type PreObfuscator interface { - // Obfuscate transforms plaintext before encryption using the provided entropy. - // The entropy is typically the encryption nonce, ensuring the transformation - // is unique per-encryption without additional random generation. Obfuscate(data []byte, entropy []byte) []byte - // Deobfuscate reverses the transformation after decryption. - // Must be called with the same entropy used during Obfuscate. Deobfuscate(data []byte, entropy []byte) []byte } -// Example: cipherSigil, _ := sigil.NewChaChaPolySigil(key) type XORObfuscator struct{} -// Obfuscate XORs the data with a key stream derived from the entropy. func (obfuscator *XORObfuscator) Obfuscate(data []byte, entropy []byte) []byte { if len(data) == 0 { return data @@ -50,7 +38,6 @@ func (obfuscator *XORObfuscator) Obfuscate(data []byte, entropy []byte) []byte { return obfuscator.transform(data, entropy) } -// Deobfuscate reverses the XOR transformation (XOR is symmetric). func (obfuscator *XORObfuscator) Deobfuscate(data []byte, entropy []byte) []byte { if len(data) == 0 { return data @@ -58,7 +45,6 @@ func (obfuscator *XORObfuscator) Deobfuscate(data []byte, entropy []byte) []byte return obfuscator.transform(data, entropy) } -// transform applies XOR with an entropy-derived key stream. func (obfuscator *XORObfuscator) transform(data []byte, entropy []byte) []byte { result := make([]byte, len(data)) keyStream := obfuscator.deriveKeyStream(entropy, len(data)) @@ -68,12 +54,10 @@ func (obfuscator *XORObfuscator) transform(data []byte, entropy []byte) []byte { return result } -// deriveKeyStream creates a deterministic key stream from entropy. func (obfuscator *XORObfuscator) deriveKeyStream(entropy []byte, length int) []byte { stream := make([]byte, length) h := sha256.New() - // Generate key stream in 32-byte blocks blockNum := uint64(0) offset := 0 for offset < length { @@ -92,10 +76,8 @@ func (obfuscator *XORObfuscator) deriveKeyStream(entropy []byte, length int) []b return stream } -// ShuffleMaskObfuscator adds byte shuffling on top of XOR masking. type ShuffleMaskObfuscator struct{} -// Obfuscate shuffles bytes and applies a mask derived from entropy. func (obfuscator *ShuffleMaskObfuscator) Obfuscate(data []byte, entropy []byte) []byte { if len(data) == 0 { return data @@ -104,16 +86,13 @@ func (obfuscator *ShuffleMaskObfuscator) Obfuscate(data []byte, entropy []byte) result := make([]byte, len(data)) copy(result, data) - // Generate permutation and mask from entropy perm := obfuscator.generatePermutation(entropy, len(data)) mask := obfuscator.deriveMask(entropy, len(data)) - // Apply mask first, then shuffle for i := range result { result[i] ^= mask[i] } - // Shuffle using Fisher-Yates with deterministic seed shuffled := make([]byte, len(data)) for i, p := range perm { shuffled[i] = result[p] @@ -122,7 +101,6 @@ func (obfuscator *ShuffleMaskObfuscator) Obfuscate(data []byte, entropy []byte) return shuffled } -// Deobfuscate reverses the shuffle and mask operations. func (obfuscator *ShuffleMaskObfuscator) Deobfuscate(data []byte, entropy []byte) []byte { if len(data) == 0 { return data @@ -130,16 +108,13 @@ func (obfuscator *ShuffleMaskObfuscator) Deobfuscate(data []byte, entropy []byte result := make([]byte, len(data)) - // Generate permutation and mask from entropy perm := obfuscator.generatePermutation(entropy, len(data)) mask := obfuscator.deriveMask(entropy, len(data)) - // Unshuffle first for i, p := range perm { result[p] = data[i] } - // Remove mask for i := range result { result[i] ^= mask[i] } @@ -147,20 +122,17 @@ func (obfuscator *ShuffleMaskObfuscator) Deobfuscate(data []byte, entropy []byte return result } -// generatePermutation creates a deterministic permutation from entropy. func (obfuscator *ShuffleMaskObfuscator) generatePermutation(entropy []byte, length int) []int { perm := make([]int, length) for i := range perm { perm[i] = i } - // Use entropy to seed a deterministic shuffle h := sha256.New() h.Write(entropy) h.Write([]byte("permutation")) seed := h.Sum(nil) - // Fisher-Yates shuffle with deterministic randomness for i := length - 1; i > 0; i-- { h.Reset() h.Write(seed) @@ -175,7 +147,6 @@ func (obfuscator *ShuffleMaskObfuscator) generatePermutation(entropy []byte, len return perm } -// deriveMask creates a mask byte array from entropy. func (obfuscator *ShuffleMaskObfuscator) deriveMask(entropy []byte, length int) []byte { mask := make([]byte, length) h := sha256.New() @@ -199,12 +170,11 @@ func (obfuscator *ShuffleMaskObfuscator) deriveMask(entropy []byte, length int) return mask } -// Example: cipherSigil, _ := sigil.NewChaChaPolySigil(key) // Example: cipherSigil, _ := sigil.NewChaChaPolySigilWithObfuscator(key, &sigil.ShuffleMaskObfuscator{}) type ChaChaPolySigil struct { Key []byte Obfuscator PreObfuscator - randomReader goio.Reader // for testing injection + randomReader goio.Reader } // Example: cipherSigil, _ := sigil.NewChaChaPolySigil([]byte("0123456789abcdef0123456789abcdef")) @@ -244,7 +214,6 @@ func NewChaChaPolySigilWithObfuscator(key []byte, obfuscator PreObfuscator) (*Ch return cipherSigil, nil } -// In encrypts plaintext with the configured pre-obfuscator. func (sigil *ChaChaPolySigil) In(data []byte) ([]byte, error) { if sigil.Key == nil { return nil, NoKeyConfiguredError @@ -258,7 +227,6 @@ func (sigil *ChaChaPolySigil) In(data []byte) ([]byte, error) { return nil, core.E("sigil.ChaChaPolySigil.In", "create cipher", err) } - // Generate nonce nonce := make([]byte, aead.NonceSize()) reader := sigil.randomReader if reader == nil { @@ -268,21 +236,16 @@ func (sigil *ChaChaPolySigil) In(data []byte) ([]byte, error) { return nil, core.E("sigil.ChaChaPolySigil.In", "read nonce", err) } - // Pre-obfuscate the plaintext using nonce as entropy - // This ensures CPU encryption routines never see raw plaintext obfuscated := data if sigil.Obfuscator != nil { obfuscated = sigil.Obfuscator.Obfuscate(data, nonce) } - // Encrypt the obfuscated data - // Output: [nonce | ciphertext | auth tag] ciphertext := aead.Seal(nonce, nonce, obfuscated, nil) return ciphertext, nil } -// Out decrypts ciphertext and reverses the pre-obfuscation step. func (sigil *ChaChaPolySigil) Out(data []byte) ([]byte, error) { if sigil.Key == nil { return nil, NoKeyConfiguredError @@ -301,17 +264,14 @@ func (sigil *ChaChaPolySigil) Out(data []byte) ([]byte, error) { return nil, CiphertextTooShortError } - // Extract nonce from ciphertext nonce := data[:aead.NonceSize()] ciphertext := data[aead.NonceSize():] - // Decrypt obfuscated, err := aead.Open(nil, nonce, ciphertext, nil) if err != nil { return nil, core.E("sigil.ChaChaPolySigil.Out", "decrypt ciphertext", DecryptionFailedError) } - // Deobfuscate using the same nonce as entropy plaintext := obfuscated if sigil.Obfuscator != nil { plaintext = sigil.Obfuscator.Deobfuscate(obfuscated, nonce) diff --git a/sigil/sigil.go b/sigil/sigil.go index 77df934..c75463c 100644 --- a/sigil/sigil.go +++ b/sigil/sigil.go @@ -1,14 +1,11 @@ -// Package sigil chains reversible byte transformations. -// -// hexSigil, _ := sigil.NewSigil("hex") -// gzipSigil, _ := sigil.NewSigil("gzip") -// encoded, _ := sigil.Transmute([]byte("payload"), []sigil.Sigil{hexSigil, gzipSigil}) -// decoded, _ := sigil.Untransmute(encoded, []sigil.Sigil{hexSigil, gzipSigil}) +// hexSigil, _ := sigil.NewSigil("hex") +// gzipSigil, _ := sigil.NewSigil("gzip") +// encoded, _ := sigil.Transmute([]byte("payload"), []sigil.Sigil{hexSigil, gzipSigil}) +// decoded, _ := sigil.Untransmute(encoded, []sigil.Sigil{hexSigil, gzipSigil}) package sigil import core "dappco.re/go/core" -// Sigil transforms byte slices. type Sigil interface { // Example: encoded, _ := hexSigil.In([]byte("payload")) In(data []byte) ([]byte, error) diff --git a/sigil/sigils.go b/sigil/sigils.go index 36d82df..41e15c5 100644 --- a/sigil/sigils.go +++ b/sigil/sigils.go @@ -20,11 +20,8 @@ import ( "golang.org/x/crypto/sha3" ) -// ReverseSigil is a Sigil that reverses the bytes of the payload. -// It is a symmetrical Sigil, meaning that the In and Out methods perform the same operation. type ReverseSigil struct{} -// In reverses the bytes of the data. func (sigil *ReverseSigil) In(data []byte) ([]byte, error) { if data == nil { return nil, nil @@ -36,16 +33,12 @@ func (sigil *ReverseSigil) In(data []byte) ([]byte, error) { return reversed, nil } -// Out reverses the bytes of the data. func (sigil *ReverseSigil) Out(data []byte) ([]byte, error) { return sigil.In(data) } -// HexSigil is a Sigil that encodes/decodes data to/from hexadecimal. -// The In method encodes the data, and the Out method decodes it. type HexSigil struct{} -// In encodes the data to hexadecimal. func (sigil *HexSigil) In(data []byte) ([]byte, error) { if data == nil { return nil, nil @@ -55,7 +48,6 @@ func (sigil *HexSigil) In(data []byte) ([]byte, error) { return dst, nil } -// Out decodes the data from hexadecimal. func (sigil *HexSigil) Out(data []byte) ([]byte, error) { if data == nil { return nil, nil @@ -65,11 +57,8 @@ func (sigil *HexSigil) Out(data []byte) ([]byte, error) { return dst, err } -// Base64Sigil is a Sigil that encodes/decodes data to/from base64. -// The In method encodes the data, and the Out method decodes it. type Base64Sigil struct{} -// In encodes the data to base64. func (sigil *Base64Sigil) In(data []byte) ([]byte, error) { if data == nil { return nil, nil @@ -79,7 +68,6 @@ func (sigil *Base64Sigil) In(data []byte) ([]byte, error) { return dst, nil } -// Out decodes the data from base64. func (sigil *Base64Sigil) Out(data []byte) ([]byte, error) { if data == nil { return nil, nil @@ -89,13 +77,10 @@ func (sigil *Base64Sigil) Out(data []byte) ([]byte, error) { return dst[:n], err } -// GzipSigil is a Sigil that compresses/decompresses data using gzip. -// The In method compresses the data, and the Out method decompresses it. type GzipSigil struct { outputWriter goio.Writer } -// In compresses the data using gzip. func (sigil *GzipSigil) In(data []byte) ([]byte, error) { if data == nil { return nil, nil @@ -115,7 +100,6 @@ func (sigil *GzipSigil) In(data []byte) ([]byte, error) { return b.Bytes(), nil } -// Out decompresses the data using gzip. func (sigil *GzipSigil) Out(data []byte) ([]byte, error) { if data == nil { return nil, nil @@ -132,11 +116,8 @@ func (sigil *GzipSigil) Out(data []byte) ([]byte, error) { return out, nil } -// JSONSigil is a Sigil that compacts or indents JSON data. -// The Out method is a no-op. type JSONSigil struct{ Indent bool } -// In compacts or indents the JSON data. func (sigil *JSONSigil) In(data []byte) ([]byte, error) { if data == nil { return nil, nil @@ -158,27 +139,20 @@ func (sigil *JSONSigil) In(data []byte) ([]byte, error) { return []byte(compact), nil } -// Out is a no-op for JSONSigil. func (sigil *JSONSigil) Out(data []byte) ([]byte, error) { - // For simplicity, Out is a no-op. The primary use is formatting. return data, nil } -// HashSigil is a Sigil that hashes the data using a specified algorithm. -// The In method hashes the data, and the Out method is a no-op. type HashSigil struct { Hash crypto.Hash } -// Use NewHashSigil to hash payloads with a specific crypto.Hash. -// -// hashSigil := sigil.NewHashSigil(crypto.SHA256) -// digest, _ := hashSigil.In([]byte("payload")) +// hashSigil := sigil.NewHashSigil(crypto.SHA256) +// digest, _ := hashSigil.In([]byte("payload")) func NewHashSigil(h crypto.Hash) *HashSigil { return &HashSigil{Hash: h} } -// In hashes the data. func (sigil *HashSigil) In(data []byte) ([]byte, error) { var hasher goio.Writer switch sigil.Hash { @@ -219,7 +193,6 @@ func (sigil *HashSigil) In(data []byte) ([]byte, error) { case crypto.BLAKE2b_512: hasher, _ = blake2b.New512(nil) default: - // MD5SHA1 is not supported as a direct hash return nil, core.E("sigil.HashSigil.In", "hash algorithm not available", nil) } @@ -227,16 +200,13 @@ func (sigil *HashSigil) In(data []byte) ([]byte, error) { return hasher.(interface{ Sum([]byte) []byte }).Sum(nil), nil } -// Out is a no-op for HashSigil. func (sigil *HashSigil) Out(data []byte) ([]byte, error) { return data, nil } -// Use NewSigil("hex") or NewSigil("gzip") to construct a sigil by name. -// -// hexSigil, _ := sigil.NewSigil("hex") -// gzipSigil, _ := sigil.NewSigil("gzip") -// transformed, _ := sigil.Transmute([]byte("payload"), []sigil.Sigil{hexSigil, gzipSigil}) +// hexSigil, _ := sigil.NewSigil("hex") +// gzipSigil, _ := sigil.NewSigil("gzip") +// transformed, _ := sigil.Transmute([]byte("payload"), []sigil.Sigil{hexSigil, gzipSigil}) func NewSigil(name string) (Sigil, error) { switch name { case "reverse": diff --git a/sqlite/sqlite.go b/sqlite/sqlite.go index 081e11c..0552d94 100644 --- a/sqlite/sqlite.go +++ b/sqlite/sqlite.go @@ -13,7 +13,7 @@ import ( core "dappco.re/go/core" coreio "dappco.re/go/core/io" - _ "modernc.org/sqlite" // Pure Go SQLite driver + _ "modernc.org/sqlite" ) // Example: medium, _ := sqlite.New(sqlite.Options{Path: ":memory:"}) @@ -26,9 +26,7 @@ type Medium struct { var _ coreio.Medium = (*Medium)(nil) type Options struct { - // Path is the SQLite database path. Use ":memory:" for tests. - Path string - // Table is the table name used for file storage. Empty defaults to "files". + Path string Table string } @@ -564,8 +562,6 @@ func (medium *Medium) IsDir(filePath string) bool { return isDir } -// --- Internal types --- - type fileInfo struct { name string size int64 diff --git a/store/doc.go b/store/doc.go index 5101af0..2851aea 100644 --- a/store/doc.go +++ b/store/doc.go @@ -1,7 +1,5 @@ -// Package store maps grouped keys onto SQLite rows. -// -// keyValueStore, _ := store.New(store.Options{Path: ":memory:"}) -// _ = keyValueStore.Set("app", "theme", "midnight") -// medium := keyValueStore.AsMedium() -// _ = medium.Write("app/theme", "midnight") +// keyValueStore, _ := store.New(store.Options{Path: ":memory:"}) +// _ = keyValueStore.Set("app", "theme", "midnight") +// medium := keyValueStore.AsMedium() +// _ = medium.Write("app/theme", "midnight") package store diff --git a/store/medium.go b/store/medium.go index b33c937..20a32c4 100644 --- a/store/medium.go +++ b/store/medium.go @@ -172,7 +172,7 @@ func (medium *Medium) List(entryPath string) ([]fs.DirEntry, error) { } if key != "" { - return nil, nil // leaf node, nothing beneath + return nil, nil } all, err := medium.store.GetAll(group) @@ -276,8 +276,6 @@ func (medium *Medium) IsDir(entryPath string) bool { return err == nil && entryCount > 0 } -// --- fs helper types --- - type keyValueFileInfo struct { name string size int64 diff --git a/store/store.go b/store/store.go index f82216c..6afebc5 100644 --- a/store/store.go +++ b/store/store.go @@ -11,7 +11,6 @@ import ( ) // Example: _, err := keyValueStore.Get("app", "theme") -// err matches store.NotFoundError when the key is missing. var NotFoundError = errors.New("key not found") // Example: keyValueStore, _ := store.New(store.Options{Path: ":memory:"}) @@ -20,7 +19,6 @@ type Store struct { } type Options struct { - // Path is the SQLite database path. Use ":memory:" for tests. Path string } diff --git a/workspace/doc.go b/workspace/doc.go index b7e301b..f949c37 100644 --- a/workspace/doc.go +++ b/workspace/doc.go @@ -1,5 +1,3 @@ -// Package workspace creates encrypted workspaces on top of io.Medium. -// // Example: service, _ := workspace.New(workspace.Options{CryptProvider: cryptProvider}) // workspaceID, _ := service.CreateWorkspace("alice", "pass123") // _ = service.SwitchWorkspace(workspaceID) From 32cfabb5e0aee9757b128335d45d64ac98a23456 Mon Sep 17 00:00:00 2001 From: Virgil Date: Tue, 31 Mar 2026 05:10:35 +0000 Subject: [PATCH 40/83] refactor(ax): normalize remaining usage examples Co-Authored-By: Virgil --- datanode/medium.go | 16 +++++------ doc.go | 8 +++--- go.sum | 62 ------------------------------------------- io.go | 14 +++++----- local/medium.go | 4 +-- node/node.go | 4 +-- s3/s3.go | 4 +-- sigil/crypto_sigil.go | 16 +++++------ sigil/sigil.go | 8 +++--- sigil/sigils.go | 10 +++---- store/doc.go | 8 +++--- store/medium.go | 12 ++++----- store/store.go | 6 ++--- workspace/doc.go | 6 ++--- workspace/service.go | 4 +-- 15 files changed, 59 insertions(+), 123 deletions(-) diff --git a/datanode/medium.go b/datanode/medium.go index b05c647..47ce247 100644 --- a/datanode/medium.go +++ b/datanode/medium.go @@ -1,7 +1,7 @@ -// medium := datanode.New() -// _ = medium.Write("jobs/run.log", "started") -// snapshot, _ := medium.Snapshot() -// restored, _ := datanode.FromTar(snapshot) +// Example: medium := datanode.New() +// Example: _ = medium.Write("jobs/run.log", "started") +// Example: snapshot, _ := medium.Snapshot() +// Example: restored, _ := datanode.FromTar(snapshot) package datanode import ( @@ -30,8 +30,8 @@ var ( ) // Example: medium := datanode.New() -// _ = medium.Write("jobs/run.log", "started") -// snapshot, _ := medium.Snapshot() +// Example: _ = medium.Write("jobs/run.log", "started") +// Example: snapshot, _ := medium.Snapshot() type Medium struct { dataNode *borgdatanode.DataNode directorySet map[string]bool @@ -46,8 +46,8 @@ func New() *Medium { } // Example: sourceMedium := datanode.New() -// snapshot, _ := sourceMedium.Snapshot() -// restored, _ := datanode.FromTar(snapshot) +// Example: snapshot, _ := sourceMedium.Snapshot() +// Example: restored, _ := datanode.FromTar(snapshot) func FromTar(data []byte) (*Medium, error) { dataNode, err := borgdatanode.FromTar(data) if err != nil { diff --git a/doc.go b/doc.go index b94d1bd..de19db1 100644 --- a/doc.go +++ b/doc.go @@ -1,5 +1,5 @@ -// medium, _ := io.NewSandboxed("/srv/app") -// _ = medium.Write("config/app.yaml", "port: 8080") -// backup, _ := io.NewSandboxed("/srv/backup") -// _ = io.Copy(medium, "data/report.json", backup, "daily/report.json") +// Example: medium, _ := io.NewSandboxed("/srv/app") +// Example: _ = medium.Write("config/app.yaml", "port: 8080") +// Example: backup, _ := io.NewSandboxed("/srv/backup") +// Example: _ = io.Copy(medium, "data/report.json", backup, "daily/report.json") package io diff --git a/go.sum b/go.sum index 0cd917c..0164e68 100644 --- a/go.sum +++ b/go.sum @@ -1,11 +1,7 @@ dappco.re/go/core v0.8.0-alpha.1 h1:gj7+Scv+L63Z7wMxbJYHhaRFkHJo2u4MMPuUSv/Dhtk= dappco.re/go/core v0.8.0-alpha.1/go.mod h1:f2/tBZ3+3IqDrg2F5F598llv0nmb/4gJVCFzM5geE4A= -dario.cat/mergo v1.0.2/go.mod h1:E/hbnu0NxMFBjpMIE34DRGLWqDy0g5FuKDhCb31ngxA= forge.lthn.ai/Snider/Borg v0.3.1 h1:gfC1ZTpLoZai07oOWJiVeQ8+qJYK8A795tgVGJHbVL8= forge.lthn.ai/Snider/Borg v0.3.1/go.mod h1:Z7DJD0yHXsxSyM7Mjl6/g4gH1NBsIz44Bf5AFlV76Wg= -forge.lthn.ai/Snider/Enchantrix v0.0.4/go.mod h1:OGCwuVeZPq3OPe2h6TX/ZbgEjHU6B7owpIBeXQGbSe0= -github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= -github.com/ProtonMail/go-crypto v1.3.0/go.mod h1:9whxjD8Rbs29b4XWbB8irEcE8KHMqaR2e7GWU1R+/PE= github.com/aws/aws-sdk-go-v2 v1.41.4 h1:10f50G7WyU02T56ox1wWXq+zTX9I1zxG46HYuG1hH/k= github.com/aws/aws-sdk-go-v2 v1.41.4/go.mod h1:mwsPRE8ceUUpiTgF7QmQIJ7lgsKUPQOUl3o72QBrE1o= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.7 h1:3kGOqnh1pPeddVa/E37XNTaWJ8W6vrbYV9lJEkCnhuY= @@ -28,105 +24,47 @@ github.com/aws/aws-sdk-go-v2/service/s3 v1.97.1 h1:csi9NLpFZXb9fxY7rS1xVzgPRGMt7 github.com/aws/aws-sdk-go-v2/service/s3 v1.97.1/go.mod h1:qXVal5H0ChqXP63t6jze5LmFalc7+ZE7wOdLtZ0LCP0= github.com/aws/smithy-go v1.24.2 h1:FzA3bu/nt/vDvmnkg+R8Xl46gmzEDam6mZ1hzmwXFng= github.com/aws/smithy-go v1.24.2/go.mod h1:YE2RhdIuDbA5E5bTdciG9KrW3+TiEONeUWCqxX9i1Fc= -github.com/bep/debounce v1.2.1/go.mod h1:H8yggRPQKLUhUoqrJC1bO2xNya7vanpDl7xR3ISbCJ0= -github.com/clipperhouse/uax29/v2 v2.4.0/go.mod h1:Wn1g7MK6OoeDT0vL+Q0SQLDz/KpfsVRgg6W7ihQeh4g= -github.com/cloudflare/circl v1.6.3/go.mod h1:2eXP6Qfat4O/Yhh8BznvKnJ+uzEoTQ6jVKJRn81BiS4= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/cyphar/filepath-securejoin v0.6.1/go.mod h1:A8hd4EnAeyujCJRrICiOWqjS1AX0a9kM5XL+NwKoYSc= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= -github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= -github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= -github.com/go-git/go-billy/v5 v5.7.0/go.mod h1:/1IUejTKH8xipsAcdfcSAlUlo2J7lkYV8GTKxAT/L3E= -github.com/go-git/go-git/v5 v5.16.4/go.mod h1:4Ge4alE/5gPs30F2H1esi2gPd69R0C39lolkucHBOp8= -github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= -github.com/godbus/dbus/v5 v5.2.2/go.mod h1:3AAv2+hPq5rdnr5txxxRwiGjPXamgoIHgz9FPBfOp3c= -github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-github/v39 v39.2.0/go.mod h1:C1s8C5aCC9L+JXIYpJM5GYytdX52vC1bLvHEF1IhBrE= -github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e h1:ijClszYn+mADRFY17kjQEVQ1XRhq2/JR1M3sGqeJoxs= github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= -github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= -github.com/jchv/go-winloader v0.0.0-20250406163304-c1995be93bd1/go.mod h1:alcuEEnZsY1WQsagKhZDsoPCRoOijYqhZvPwLG0kzVs= -github.com/kevinburke/ssh_config v1.4.0/go.mod h1:q2RIzfka+BXARoNexmF9gkxEX7DmvbW9P4hIVx2Kg4M= -github.com/klauspost/compress v1.18.4/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4= -github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/labstack/echo/v4 v4.13.3/go.mod h1:o90YNEeQWjDozo584l7AwhJMHN0bOC4tAfg+Xox9q5g= -github.com/labstack/gommon v0.4.2/go.mod h1:QlUFxVM+SNXhDL/Z7YhocGIBYOiwB0mXm1+1bAPHPyU= -github.com/leaanthony/go-ansi-parser v1.6.1/go.mod h1:+vva/2y4alzVmmIEpk9QDhA7vLC5zKDTRwfZGOp3IWU= -github.com/leaanthony/gosod v1.0.4/go.mod h1:GKuIL0zzPj3O1SdWQOdgURSuhkF+Urizzxh26t9f1cw= -github.com/leaanthony/slicer v1.6.0/go.mod h1:o/Iz29g7LN0GqH3aMjWAe90381nyZlDNquK+mtH2Fj8= -github.com/leaanthony/u v1.1.1/go.mod h1:9+o6hejoRljvZ3BzdYlVL0JYCwtnAsVuN9pVTQcaRfI= -github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mattn/go-runewidth v0.0.19/go.mod h1:XBkDxAl56ILZc9knddidhrOlY5R/pDhgLpndooCuJAs= -github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw= github.com/ncruces/go-strftime v1.0.0 h1:HMFp8mLCTPp341M/ZnA4qaf7ZlsbTc+miZjCLOFAw7w= github.com/ncruces/go-strftime v1.0.0/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls= -github.com/pjbgf/sha1cd v0.5.0/go.mod h1:lhpGlyHLpQZoxMv8HcgXvZEhcGs0PG/vsZnEJ7H0iCM= -github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= -github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= -github.com/samber/lo v1.52.0/go.mod h1:4+MXEGsJzbKGaUEQFKBq2xtfuznW9oz/WrgyzMzRoM0= -github.com/schollz/progressbar/v3 v3.18.0/go.mod h1:IsO3lpbaGuzh8zIMzgY3+J8l4C8GjO0Y9S69eFvNsec= -github.com/sergi/go-diff v1.4.0/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= -github.com/skeema/knownhosts v1.3.2/go.mod h1:bEg3iQAuw+jyiw+484wwFJoKSLwcfd7fqRy+N0QTiow= -github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4= -github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= -github.com/tkrajina/go-reflector v0.5.8/go.mod h1:ECbqLgccecY5kPmPmXg1MrHW585yMcDkVl6IvJe64T4= -github.com/ulikunitz/xz v0.5.15/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= -github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= -github.com/valyala/fasttemplate v1.2.2/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= -github.com/wailsapp/go-webview2 v1.0.23/go.mod h1:qJmWAmAmaniuKGZPWwne+uor3AHMB5PFhqiK0Bbj8kc= -github.com/wailsapp/mimetype v1.4.1/go.mod h1:9aV5k31bBOv5z6u+QP8TltzvNGJPmNJD4XlAL3U+j3o= -github.com/wailsapp/wails/v2 v2.11.0/go.mod h1:jrf0ZaM6+GBc1wRmXsM8cIvzlg0karYin3erahI4+0k= -github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= golang.org/x/crypto v0.49.0 h1:+Ng2ULVvLHnJ/ZFEq4KdcDd/cfjrrjjNSXNzxg0Y4U4= golang.org/x/crypto v0.49.0/go.mod h1:ErX4dUh2UM+CFYiXZRTcMpEcN8b/1gxEuv3nODoYtCA= -golang.org/x/exp v0.0.0-20260212183809-81e46e3db34a/go.mod h1:K79w1Vqn7PoiZn+TkNpx3BUWUQksGO3JcVX6qIjytmA= golang.org/x/mod v0.34.0 h1:xIHgNUUnW6sYkcM5Jleh05DvLOtwc6RitGHbDk4akRI= golang.org/x/mod v0.34.0/go.mod h1:ykgH52iCZe79kzLLMhyCUzhMci+nQj+0XkbXpNYtVjY= -golang.org/x/net v0.52.0/go.mod h1:R1MAz7uMZxVMualyPXb+VaqGSa3LIaUqk0eEt3w36Sw= -golang.org/x/oauth2 v0.35.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= golang.org/x/sync v0.20.0 h1:e0PTpb7pjO8GAtTs2dQ6jYa5BWYlMuX047Dco/pItO4= golang.org/x/sync v0.20.0/go.mod h1:9xrNwdLfx4jkKbNva9FpL6vEN7evnE43NNNJQ2LF3+0= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.42.0 h1:omrd2nAlyT5ESRdCLYdm3+fMfNFE/+Rf4bDIQImRJeo= golang.org/x/sys v0.42.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw= -golang.org/x/telemetry v0.0.0-20260311193753-579e4da9a98c/go.mod h1:TpUTTEp9frx7rTdLpC9gFG9kdI7zVLFTFFlqaH2Cncw= -golang.org/x/term v0.41.0/go.mod h1:3pfBgksrReYfZ5lvYM0kSO0LIkAl4Yl2bXOkKP7Ec2A= -golang.org/x/text v0.35.0/go.mod h1:khi/HExzZJ2pGnjenulevKNX1W67CUy0AsXcNubPGCA= golang.org/x/tools v0.43.0 h1:12BdW9CeB3Z+J/I/wj34VMl8X+fEXBxVR90JeMX5E7s= golang.org/x/tools v0.43.0/go.mod h1:uHkMso649BX2cZK6+RpuIPXS3ho2hZo4FVwfoy1vIk0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= modernc.org/cc/v4 v4.27.1 h1:9W30zRlYrefrDV2JE2O8VDtJ1yPGownxciz5rrbQZis= diff --git a/io.go b/io.go index 4c3074e..0d9a32c 100644 --- a/io.go +++ b/io.go @@ -11,9 +11,9 @@ import ( ) // Example: medium, _ := io.NewSandboxed("/srv/app") -// _ = medium.Write("config/app.yaml", "port: 8080") -// backup, _ := io.NewSandboxed("/srv/backup") -// _ = io.Copy(medium, "data/report.json", backup, "daily/report.json") +// Example: _ = medium.Write("config/app.yaml", "port: 8080") +// Example: backup, _ := io.NewSandboxed("/srv/backup") +// Example: _ = io.Copy(medium, "data/report.json", backup, "daily/report.json") type Medium interface { Read(path string) (string, error) @@ -133,7 +133,7 @@ func init() { } // Example: medium, _ := io.NewSandboxed("/srv/app") -// _ = medium.Write("config/app.yaml", "port: 8080") +// Example: _ = medium.Write("config/app.yaml", "port: 8080") func NewSandboxed(root string) (Medium, error) { return local.New(root) } @@ -181,7 +181,7 @@ func Copy(source Medium, sourcePath string, destination Medium, destinationPath } // Example: medium := io.NewMemoryMedium() -// _ = medium.Write("config/app.yaml", "port: 8080") +// Example: _ = medium.Write("config/app.yaml", "port: 8080") type MemoryMedium struct { files map[string]string dirs map[string]bool @@ -193,7 +193,7 @@ type MockMedium = MemoryMedium var _ Medium = (*MemoryMedium)(nil) // Example: medium := io.NewMemoryMedium() -// _ = medium.Write("config/app.yaml", "port: 8080") +// Example: _ = medium.Write("config/app.yaml", "port: 8080") func NewMemoryMedium() *MemoryMedium { return &MemoryMedium{ files: make(map[string]string), @@ -203,7 +203,7 @@ func NewMemoryMedium() *MemoryMedium { } // Example: medium := io.NewMockMedium() -// _ = medium.Write("config/app.yaml", "port: 8080") +// Example: _ = medium.Write("config/app.yaml", "port: 8080") func NewMockMedium() *MemoryMedium { return NewMemoryMedium() } diff --git a/local/medium.go b/local/medium.go index a6bfde8..ad57ed4 100644 --- a/local/medium.go +++ b/local/medium.go @@ -12,7 +12,7 @@ import ( ) // Example: medium, _ := local.New("/srv/app") -// _ = medium.Write("config/app.yaml", "port: 8080") +// Example: _ = medium.Write("config/app.yaml", "port: 8080") type Medium struct { filesystemRoot string } @@ -20,7 +20,7 @@ type Medium struct { var unrestrictedFileSystem = (&core.Fs{}).NewUnrestricted() // Example: medium, _ := local.New("/srv/app") -// _ = medium.Write("config/app.yaml", "port: 8080") +// Example: _ = medium.Write("config/app.yaml", "port: 8080") func New(root string) (*Medium, error) { absoluteRoot := absolutePath(root) if resolvedRoot, err := resolveSymlinksPath(absoluteRoot); err == nil { diff --git a/node/node.go b/node/node.go index 95b7f2b..541973d 100644 --- a/node/node.go +++ b/node/node.go @@ -346,7 +346,7 @@ func (node *Node) FileSet(filePath, content string) error { } // Example: _ = nodeTree.EnsureDir("config") -func (node *Node) EnsureDir(_ string) error { +func (node *Node) EnsureDir(directoryPath string) error { return nil } @@ -479,7 +479,7 @@ type dataFile struct { func (file *dataFile) Stat() (fs.FileInfo, error) { return &dataFileInfo{file: file}, nil } -func (file *dataFile) Read(_ []byte) (int, error) { return 0, goio.EOF } +func (file *dataFile) Read(buffer []byte) (int, error) { return 0, goio.EOF } func (file *dataFile) Close() error { return nil } diff --git a/s3/s3.go b/s3/s3.go index bc55040..456defc 100644 --- a/s3/s3.go +++ b/s3/s3.go @@ -159,12 +159,12 @@ func (medium *Medium) Write(filePath, content string) error { } // Example: _ = medium.WriteMode("keys/private.key", key, 0600) -func (medium *Medium) WriteMode(filePath, content string, _ fs.FileMode) error { +func (medium *Medium) WriteMode(filePath, content string, mode fs.FileMode) error { return medium.Write(filePath, content) } // Example: _ = medium.EnsureDir("reports/2026") -func (medium *Medium) EnsureDir(_ string) error { +func (medium *Medium) EnsureDir(directoryPath string) error { return nil } diff --git a/sigil/crypto_sigil.go b/sigil/crypto_sigil.go index 31947bc..98537ed 100644 --- a/sigil/crypto_sigil.go +++ b/sigil/crypto_sigil.go @@ -178,8 +178,8 @@ type ChaChaPolySigil struct { } // Example: cipherSigil, _ := sigil.NewChaChaPolySigil([]byte("0123456789abcdef0123456789abcdef")) -// ciphertext, _ := cipherSigil.In([]byte("payload")) -// plaintext, _ := cipherSigil.Out(ciphertext) +// Example: ciphertext, _ := cipherSigil.In([]byte("payload")) +// Example: plaintext, _ := cipherSigil.Out(ciphertext) func NewChaChaPolySigil(key []byte) (*ChaChaPolySigil, error) { if len(key) != 32 { return nil, InvalidKeyError @@ -196,13 +196,11 @@ func NewChaChaPolySigil(key []byte) (*ChaChaPolySigil, error) { } // Example: cipherSigil, _ := sigil.NewChaChaPolySigilWithObfuscator( -// -// []byte("0123456789abcdef0123456789abcdef"), -// &sigil.ShuffleMaskObfuscator{}, -// -// ) -// ciphertext, _ := cipherSigil.In([]byte("payload")) -// plaintext, _ := cipherSigil.Out(ciphertext) +// Example: []byte("0123456789abcdef0123456789abcdef"), +// Example: &sigil.ShuffleMaskObfuscator{}, +// Example: ) +// Example: ciphertext, _ := cipherSigil.In([]byte("payload")) +// Example: plaintext, _ := cipherSigil.Out(ciphertext) func NewChaChaPolySigilWithObfuscator(key []byte, obfuscator PreObfuscator) (*ChaChaPolySigil, error) { cipherSigil, err := NewChaChaPolySigil(key) if err != nil { diff --git a/sigil/sigil.go b/sigil/sigil.go index c75463c..f760fd0 100644 --- a/sigil/sigil.go +++ b/sigil/sigil.go @@ -1,7 +1,7 @@ -// hexSigil, _ := sigil.NewSigil("hex") -// gzipSigil, _ := sigil.NewSigil("gzip") -// encoded, _ := sigil.Transmute([]byte("payload"), []sigil.Sigil{hexSigil, gzipSigil}) -// decoded, _ := sigil.Untransmute(encoded, []sigil.Sigil{hexSigil, gzipSigil}) +// Example: hexSigil, _ := sigil.NewSigil("hex") +// Example: gzipSigil, _ := sigil.NewSigil("gzip") +// Example: encoded, _ := sigil.Transmute([]byte("payload"), []sigil.Sigil{hexSigil, gzipSigil}) +// Example: decoded, _ := sigil.Untransmute(encoded, []sigil.Sigil{hexSigil, gzipSigil}) package sigil import core "dappco.re/go/core" diff --git a/sigil/sigils.go b/sigil/sigils.go index 41e15c5..1dd9983 100644 --- a/sigil/sigils.go +++ b/sigil/sigils.go @@ -147,8 +147,8 @@ type HashSigil struct { Hash crypto.Hash } -// hashSigil := sigil.NewHashSigil(crypto.SHA256) -// digest, _ := hashSigil.In([]byte("payload")) +// Example: hashSigil := sigil.NewHashSigil(crypto.SHA256) +// Example: digest, _ := hashSigil.In([]byte("payload")) func NewHashSigil(h crypto.Hash) *HashSigil { return &HashSigil{Hash: h} } @@ -204,9 +204,9 @@ func (sigil *HashSigil) Out(data []byte) ([]byte, error) { return data, nil } -// hexSigil, _ := sigil.NewSigil("hex") -// gzipSigil, _ := sigil.NewSigil("gzip") -// transformed, _ := sigil.Transmute([]byte("payload"), []sigil.Sigil{hexSigil, gzipSigil}) +// Example: hexSigil, _ := sigil.NewSigil("hex") +// Example: gzipSigil, _ := sigil.NewSigil("gzip") +// Example: transformed, _ := sigil.Transmute([]byte("payload"), []sigil.Sigil{hexSigil, gzipSigil}) func NewSigil(name string) (Sigil, error) { switch name { case "reverse": diff --git a/store/doc.go b/store/doc.go index 2851aea..37c0af2 100644 --- a/store/doc.go +++ b/store/doc.go @@ -1,5 +1,5 @@ -// keyValueStore, _ := store.New(store.Options{Path: ":memory:"}) -// _ = keyValueStore.Set("app", "theme", "midnight") -// medium := keyValueStore.AsMedium() -// _ = medium.Write("app/theme", "midnight") +// Example: keyValueStore, _ := store.New(store.Options{Path: ":memory:"}) +// Example: _ = keyValueStore.Set("app", "theme", "midnight") +// Example: medium := keyValueStore.AsMedium() +// Example: _ = medium.Write("app/theme", "midnight") package store diff --git a/store/medium.go b/store/medium.go index 20a32c4..96c2384 100644 --- a/store/medium.go +++ b/store/medium.go @@ -11,9 +11,9 @@ import ( ) // Example: medium, _ := store.NewMedium(store.Options{Path: "config.db"}) -// _ = medium.Write("app/theme", "midnight") -// entries, _ := medium.List("") -// entries, _ := medium.List("app") +// Example: _ = medium.Write("app/theme", "midnight") +// Example: entries, _ := medium.List("") +// Example: entries, _ := medium.List("app") type Medium struct { store *Store } @@ -21,7 +21,7 @@ type Medium struct { var _ coreio.Medium = (*Medium)(nil) // Example: medium, _ := store.NewMedium(store.Options{Path: "config.db"}) -// _ = medium.Write("app/theme", "midnight") +// Example: _ = medium.Write("app/theme", "midnight") func NewMedium(options Options) (*Medium, error) { store, err := New(options) if err != nil { @@ -75,12 +75,12 @@ func (medium *Medium) Write(entryPath, content string) error { } // Example: _ = medium.WriteMode("app/theme", "midnight", 0600) -func (medium *Medium) WriteMode(entryPath, content string, _ fs.FileMode) error { +func (medium *Medium) WriteMode(entryPath, content string, mode fs.FileMode) error { return medium.Write(entryPath, content) } // Example: _ = medium.EnsureDir("app") -func (medium *Medium) EnsureDir(_ string) error { +func (medium *Medium) EnsureDir(entryPath string) error { return nil } diff --git a/store/store.go b/store/store.go index 6afebc5..2f15491 100644 --- a/store/store.go +++ b/store/store.go @@ -23,7 +23,7 @@ type Options struct { } // Example: keyValueStore, _ := store.New(store.Options{Path: ":memory:"}) -// _ = keyValueStore.Set("app", "theme", "midnight") +// Example: _ = keyValueStore.Set("app", "theme", "midnight") func New(options Options) (*Store, error) { if options.Path == "" { return nil, core.E("store.New", "database path is required", fs.ErrInvalid) @@ -131,8 +131,8 @@ func (store *Store) GetAll(group string) (map[string]string, error) { } // Example: keyValueStore, _ := store.New(store.Options{Path: ":memory:"}) -// _ = keyValueStore.Set("user", "name", "alice") -// out, _ := keyValueStore.Render("hello {{ .name }}", "user") +// Example: _ = keyValueStore.Set("user", "name", "alice") +// Example: out, _ := keyValueStore.Render("hello {{ .name }}", "user") func (store *Store) Render(templateText, group string) (string, error) { rows, err := store.database.Query("SELECT key, value FROM kv WHERE grp = ?", group) if err != nil { diff --git a/workspace/doc.go b/workspace/doc.go index f949c37..a817e1c 100644 --- a/workspace/doc.go +++ b/workspace/doc.go @@ -1,5 +1,5 @@ // Example: service, _ := workspace.New(workspace.Options{CryptProvider: cryptProvider}) -// workspaceID, _ := service.CreateWorkspace("alice", "pass123") -// _ = service.SwitchWorkspace(workspaceID) -// _ = service.WorkspaceFileSet("notes/todo.txt", "ship it") +// Example: workspaceID, _ := service.CreateWorkspace("alice", "pass123") +// Example: _ = service.SwitchWorkspace(workspaceID) +// Example: _ = service.WorkspaceFileSet("notes/todo.txt", "ship it") package workspace diff --git a/workspace/service.go b/workspace/service.go index 8b767a3..f094deb 100644 --- a/workspace/service.go +++ b/workspace/service.go @@ -54,7 +54,7 @@ type Service struct { var _ Workspace = (*Service)(nil) // Example: service, _ := workspace.New(workspace.Options{CryptProvider: cryptProvider}) -// workspaceID, _ := service.CreateWorkspace("alice", "pass123") +// Example: workspaceID, _ := service.CreateWorkspace("alice", "pass123") func New(options Options) (*Service, error) { home := resolveWorkspaceHomeDirectory() if home == "" { @@ -193,7 +193,7 @@ func (service *Service) HandleWorkspaceCommand(command WorkspaceCommand) core.Re // Example: result := service.HandleWorkspaceMessage(core.New(), WorkspaceCommand{Action: WorkspaceSwitchAction, WorkspaceID: "f3f0d7"}) // Example: legacy := service.HandleWorkspaceMessage(core.New(), map[string]any{"action": WorkspaceCreateAction, "identifier": "alice", "password": "pass123"}) -func (service *Service) HandleWorkspaceMessage(_ *core.Core, message core.Message) core.Result { +func (service *Service) HandleWorkspaceMessage(coreRuntime *core.Core, message core.Message) core.Result { command, ok := workspaceCommandFromMessage(message) if !ok { return core.Result{OK: true} From 6aa96dc7b79804f50d62ae8dac42ee7625750a27 Mon Sep 17 00:00:00 2001 From: Virgil Date: Tue, 31 Mar 2026 05:18:17 +0000 Subject: [PATCH 41/83] refactor(ax): align remaining example names and walk APIs --- datanode/medium_test.go | 10 +++++----- io.go | 6 +++--- node/node.go | 26 ++++++++++++++++++-------- node/node_test.go | 22 +++++++++++----------- s3/s3.go | 6 +++--- s3/s3_test.go | 6 +++--- sigil/crypto_sigil.go | 18 +++++++++++++----- sigil/crypto_sigil_test.go | 18 +++++++++--------- 8 files changed, 65 insertions(+), 47 deletions(-) diff --git a/datanode/medium_test.go b/datanode/medium_test.go index bd5e8ec..77e68af 100644 --- a/datanode/medium_test.go +++ b/datanode/medium_test.go @@ -94,14 +94,14 @@ func TestClient_Delete_Good(t *testing.T) { } func TestClient_Delete_Bad(t *testing.T) { - m := New() + medium := New() - // Example: m.Delete("ghost.txt") - assert.Error(t, m.Delete("ghost.txt")) + // Example: medium.Delete("ghost.txt") + assert.Error(t, medium.Delete("ghost.txt")) // Delete non-empty dir - require.NoError(t, m.Write("dir/file.txt", "content")) - assert.Error(t, m.Delete("dir")) + require.NoError(t, medium.Write("dir/file.txt", "content")) + assert.Error(t, medium.Delete("dir")) } func TestClient_Delete_DirectoryInspectionFailure_Bad(t *testing.T) { diff --git a/io.go b/io.go index 0d9a32c..f10901b 100644 --- a/io.go +++ b/io.go @@ -52,10 +52,10 @@ type Medium interface { // Example: writer, _ := medium.WriteStream("logs/app.log") WriteStream(path string) (goio.WriteCloser, error) - // Example: ok := medium.Exists("config/app.yaml") + // Example: exists := medium.Exists("config/app.yaml") Exists(path string) bool - // Example: ok := medium.IsDir("config") + // Example: isDirectory := medium.IsDir("config") IsDir(path string) bool } @@ -163,7 +163,7 @@ func EnsureDir(medium Medium, path string) error { return medium.EnsureDir(path) } -// Example: ok := io.IsFile(medium, "config/app.yaml") +// Example: isFile := io.IsFile(medium, "config/app.yaml") func IsFile(medium Medium, path string) bool { return medium.IsFile(path) } diff --git a/node/node.go b/node/node.go index 541973d..45de545 100644 --- a/node/node.go +++ b/node/node.go @@ -124,7 +124,7 @@ func (node *Node) LoadTar(data []byte) error { // Example: _ = nodeTree.WalkNode("config", func(_ string, _ fs.DirEntry, _ error) error { return nil }) func (node *Node) WalkNode(root string, fn fs.WalkDirFunc) error { - return fs.WalkDir(node, root, fn) + return node.Walk(root, fn) } // Example: options := node.WalkOptions{MaxDepth: 1, SkipErrors: true} @@ -134,17 +134,22 @@ type WalkOptions struct { SkipErrors bool } -// Example: _ = nodeTree.WalkWithOptions(".", callback, node.WalkOptions{MaxDepth: 1, SkipErrors: true}) -func (node *Node) WalkWithOptions(root string, fn fs.WalkDirFunc, options WalkOptions) error { - if options.SkipErrors { +// Example: _ = nodeTree.Walk(".", func(_ string, _ fs.DirEntry, _ error) error { return nil }, node.WalkOptions{MaxDepth: 1, SkipErrors: true}) +func (node *Node) Walk(root string, fn fs.WalkDirFunc, options ...WalkOptions) error { + walkOptions := WalkOptions{} + if len(options) > 0 { + walkOptions = options[0] + } + + if walkOptions.SkipErrors { if _, err := node.Stat(root); err != nil { return nil } } return fs.WalkDir(node, root, func(entryPath string, entry fs.DirEntry, err error) error { - if options.Filter != nil && err == nil { - if !options.Filter(entryPath, entry) { + if walkOptions.Filter != nil && err == nil { + if !walkOptions.Filter(entryPath, entry) { if entry != nil && entry.IsDir() { return fs.SkipDir } @@ -154,11 +159,11 @@ func (node *Node) WalkWithOptions(root string, fn fs.WalkDirFunc, options WalkOp result := fn(entryPath, entry, err) - if result == nil && options.MaxDepth > 0 && entry != nil && entry.IsDir() && entryPath != root { + if result == nil && walkOptions.MaxDepth > 0 && entry != nil && entry.IsDir() && entryPath != root { rel := core.TrimPrefix(entryPath, root) rel = core.TrimPrefix(rel, "/") depth := len(core.Split(rel, "/")) - if depth >= options.MaxDepth { + if depth >= walkOptions.MaxDepth { return fs.SkipDir } } @@ -167,6 +172,11 @@ func (node *Node) WalkWithOptions(root string, fn fs.WalkDirFunc, options WalkOp }) } +// Example: _ = nodeTree.WalkWithOptions(".", func(_ string, _ fs.DirEntry, _ error) error { return nil }, node.WalkOptions{MaxDepth: 1, SkipErrors: true}) +func (node *Node) WalkWithOptions(root string, fn fs.WalkDirFunc, options WalkOptions) error { + return node.Walk(root, fn, options) +} + // Example: content, _ := nodeTree.ReadFile("config/app.yaml") func (node *Node) ReadFile(name string) ([]byte, error) { name = core.TrimPrefix(name, "/") diff --git a/node/node_test.go b/node/node_test.go index 934324d..8880c4e 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -259,17 +259,17 @@ func TestNode_Exists_RootAndEmptyPath_Good(t *testing.T) { } // --------------------------------------------------------------------------- -// WalkWithOptions +// Walk // --------------------------------------------------------------------------- -func TestNode_WalkWithOptions_Default_Good(t *testing.T) { +func TestNode_Walk_Default_Good(t *testing.T) { n := New() n.AddData("foo.txt", []byte("foo")) n.AddData("bar/baz.txt", []byte("baz")) n.AddData("bar/qux.txt", []byte("qux")) var paths []string - err := n.WalkWithOptions(".", func(p string, d fs.DirEntry, err error) error { + err := n.Walk(".", func(p string, d fs.DirEntry, err error) error { paths = append(paths, p) return nil }, WalkOptions{}) @@ -279,11 +279,11 @@ func TestNode_WalkWithOptions_Default_Good(t *testing.T) { assert.Equal(t, []string{".", "bar", "bar/baz.txt", "bar/qux.txt", "foo.txt"}, paths) } -func TestNode_WalkWithOptions_Default_Bad(t *testing.T) { +func TestNode_Walk_Default_Bad(t *testing.T) { n := New() var called bool - err := n.WalkWithOptions("nonexistent", func(p string, d fs.DirEntry, err error) error { + err := n.Walk("nonexistent", func(p string, d fs.DirEntry, err error) error { called = true assert.Error(t, err) assert.ErrorIs(t, err, fs.ErrNotExist) @@ -293,7 +293,7 @@ func TestNode_WalkWithOptions_Default_Bad(t *testing.T) { assert.ErrorIs(t, err, fs.ErrNotExist) } -func TestNode_WalkWithOptions_CallbackError_Good(t *testing.T) { +func TestNode_Walk_CallbackError_Good(t *testing.T) { n := New() n.AddData("a/b.txt", []byte("b")) n.AddData("a/c.txt", []byte("c")) @@ -301,7 +301,7 @@ func TestNode_WalkWithOptions_CallbackError_Good(t *testing.T) { // Stop walk early with a custom error. walkErr := core.NewError("stop walking") var paths []string - err := n.WalkWithOptions(".", func(p string, d fs.DirEntry, err error) error { + err := n.Walk(".", func(p string, d fs.DirEntry, err error) error { if p == "a/b.txt" { return walkErr } @@ -312,7 +312,7 @@ func TestNode_WalkWithOptions_CallbackError_Good(t *testing.T) { assert.Equal(t, walkErr, err, "Walk must propagate the callback error") } -func TestNode_WalkWithOptions_Good(t *testing.T) { +func TestNode_Walk_Good(t *testing.T) { n := New() n.AddData("root.txt", []byte("root")) n.AddData("a/a1.txt", []byte("a1")) @@ -321,7 +321,7 @@ func TestNode_WalkWithOptions_Good(t *testing.T) { t.Run("MaxDepth", func(t *testing.T) { var paths []string - err := n.WalkWithOptions(".", func(p string, d fs.DirEntry, err error) error { + err := n.Walk(".", func(p string, d fs.DirEntry, err error) error { paths = append(paths, p) return nil }, WalkOptions{MaxDepth: 1}) @@ -333,7 +333,7 @@ func TestNode_WalkWithOptions_Good(t *testing.T) { t.Run("Filter", func(t *testing.T) { var paths []string - err := n.WalkWithOptions(".", func(p string, d fs.DirEntry, err error) error { + err := n.Walk(".", func(p string, d fs.DirEntry, err error) error { paths = append(paths, p) return nil }, WalkOptions{Filter: func(p string, d fs.DirEntry) bool { @@ -347,7 +347,7 @@ func TestNode_WalkWithOptions_Good(t *testing.T) { t.Run("SkipErrors", func(t *testing.T) { var called bool - err := n.WalkWithOptions("nonexistent", func(p string, d fs.DirEntry, err error) error { + err := n.Walk("nonexistent", func(p string, d fs.DirEntry, err error) error { called = true return err }, WalkOptions{SkipErrors: true}) diff --git a/s3/s3.go b/s3/s3.go index 456defc..5ca8fad 100644 --- a/s3/s3.go +++ b/s3/s3.go @@ -168,7 +168,7 @@ func (medium *Medium) EnsureDir(directoryPath string) error { return nil } -// Example: ok := medium.IsFile("reports/daily.txt") +// Example: isFile := medium.IsFile("reports/daily.txt") func (medium *Medium) IsFile(filePath string) bool { key := medium.objectKey(filePath) if key == "" { @@ -498,7 +498,7 @@ func (medium *Medium) WriteStream(filePath string) (goio.WriteCloser, error) { return medium.Create(filePath) } -// Example: ok := medium.Exists("reports/daily.txt") +// Example: exists := medium.Exists("reports/daily.txt") func (medium *Medium) Exists(filePath string) bool { key := medium.objectKey(filePath) if key == "" { @@ -528,7 +528,7 @@ func (medium *Medium) Exists(filePath string) bool { return len(listOutput.Contents) > 0 || len(listOutput.CommonPrefixes) > 0 } -// Example: ok := medium.IsDir("reports") +// Example: isDirectory := medium.IsDir("reports") func (medium *Medium) IsDir(filePath string) bool { key := medium.objectKey(filePath) if key == "" { diff --git a/s3/s3_test.go b/s3/s3_test.go index 692a893..8334eab 100644 --- a/s3/s3_test.go +++ b/s3/s3_test.go @@ -294,9 +294,9 @@ func TestS3_ReadWrite_Prefix_Good(t *testing.T) { } func TestS3_EnsureDir_Good(t *testing.T) { - m, _ := newTestMedium(t) - // Example: err := m.EnsureDir("any/path") - err := m.EnsureDir("any/path") + medium, _ := newTestMedium(t) + // Example: err := medium.EnsureDir("any/path") + err := medium.EnsureDir("any/path") assert.NoError(t, err) } diff --git a/sigil/crypto_sigil.go b/sigil/crypto_sigil.go index 98537ed..798c24f 100644 --- a/sigil/crypto_sigil.go +++ b/sigil/crypto_sigil.go @@ -170,7 +170,10 @@ func (obfuscator *ShuffleMaskObfuscator) deriveMask(entropy []byte, length int) return mask } -// Example: cipherSigil, _ := sigil.NewChaChaPolySigilWithObfuscator(key, &sigil.ShuffleMaskObfuscator{}) +// Example: cipherSigil, _ := sigil.NewChaChaPolySigil( +// Example: key, +// Example: &sigil.ShuffleMaskObfuscator{}, +// Example: ) type ChaChaPolySigil struct { Key []byte Obfuscator PreObfuscator @@ -180,7 +183,7 @@ type ChaChaPolySigil struct { // Example: cipherSigil, _ := sigil.NewChaChaPolySigil([]byte("0123456789abcdef0123456789abcdef")) // Example: ciphertext, _ := cipherSigil.In([]byte("payload")) // Example: plaintext, _ := cipherSigil.Out(ciphertext) -func NewChaChaPolySigil(key []byte) (*ChaChaPolySigil, error) { +func NewChaChaPolySigil(key []byte, obfuscators ...PreObfuscator) (*ChaChaPolySigil, error) { if len(key) != 32 { return nil, InvalidKeyError } @@ -188,21 +191,26 @@ func NewChaChaPolySigil(key []byte) (*ChaChaPolySigil, error) { keyCopy := make([]byte, 32) copy(keyCopy, key) + obfuscator := PreObfuscator(&XORObfuscator{}) + if len(obfuscators) > 0 && obfuscators[0] != nil { + obfuscator = obfuscators[0] + } + return &ChaChaPolySigil{ Key: keyCopy, - Obfuscator: &XORObfuscator{}, + Obfuscator: obfuscator, randomReader: rand.Reader, }, nil } -// Example: cipherSigil, _ := sigil.NewChaChaPolySigilWithObfuscator( +// Example: cipherSigil, _ := sigil.NewChaChaPolySigil( // Example: []byte("0123456789abcdef0123456789abcdef"), // Example: &sigil.ShuffleMaskObfuscator{}, // Example: ) // Example: ciphertext, _ := cipherSigil.In([]byte("payload")) // Example: plaintext, _ := cipherSigil.Out(ciphertext) func NewChaChaPolySigilWithObfuscator(key []byte, obfuscator PreObfuscator) (*ChaChaPolySigil, error) { - cipherSigil, err := NewChaChaPolySigil(key) + cipherSigil, err := NewChaChaPolySigil(key, obfuscator) if err != nil { return nil, err } diff --git a/sigil/crypto_sigil_test.go b/sigil/crypto_sigil_test.go index d90fc0a..7f1425c 100644 --- a/sigil/crypto_sigil_test.go +++ b/sigil/crypto_sigil_test.go @@ -186,30 +186,30 @@ func TestCryptoSigil_NewChaChaPolySigil_EmptyKey_Bad(t *testing.T) { assert.ErrorIs(t, err, InvalidKeyError) } -// ── NewChaChaPolySigilWithObfuscator ─────────────────────────────── +// ── NewChaChaPolySigil Custom Obfuscator ─────────────────────────── -func TestCryptoSigil_NewChaChaPolySigilWithObfuscator_Good(t *testing.T) { +func TestCryptoSigil_NewChaChaPolySigil_CustomObfuscator_Good(t *testing.T) { key := make([]byte, 32) _, _ = rand.Read(key) ob := &ShuffleMaskObfuscator{} - s, err := NewChaChaPolySigilWithObfuscator(key, ob) + s, err := NewChaChaPolySigil(key, ob) require.NoError(t, err) assert.Equal(t, ob, s.Obfuscator) } -func TestCryptoSigil_NewChaChaPolySigilWithObfuscator_NilObfuscator_Good(t *testing.T) { +func TestCryptoSigil_NewChaChaPolySigil_CustomObfuscatorNil_Good(t *testing.T) { key := make([]byte, 32) _, _ = rand.Read(key) - s, err := NewChaChaPolySigilWithObfuscator(key, nil) + s, err := NewChaChaPolySigil(key, nil) require.NoError(t, err) // Falls back to default XORObfuscator. assert.IsType(t, &XORObfuscator{}, s.Obfuscator) } -func TestCryptoSigil_NewChaChaPolySigilWithObfuscator_InvalidKey_Bad(t *testing.T) { - _, err := NewChaChaPolySigilWithObfuscator([]byte("bad"), &XORObfuscator{}) +func TestCryptoSigil_NewChaChaPolySigil_CustomObfuscator_InvalidKey_Bad(t *testing.T) { + _, err := NewChaChaPolySigil([]byte("bad"), &XORObfuscator{}) assert.ErrorIs(t, err, InvalidKeyError) } @@ -233,11 +233,11 @@ func TestCryptoSigil_ChaChaPolySigil_RoundTrip_Good(t *testing.T) { assert.Equal(t, plaintext, decrypted) } -func TestCryptoSigil_ChaChaPolySigil_WithShuffleMask_Good(t *testing.T) { +func TestCryptoSigil_ChaChaPolySigil_CustomShuffleMask_Good(t *testing.T) { key := make([]byte, 32) _, _ = rand.Read(key) - s, err := NewChaChaPolySigilWithObfuscator(key, &ShuffleMaskObfuscator{}) + s, err := NewChaChaPolySigil(key, &ShuffleMaskObfuscator{}) require.NoError(t, err) plaintext := []byte("shuffle mask pre-obfuscation layer") From 1cc185cb35be3a6e6656c4621a2c6a58c4d3af5b Mon Sep 17 00:00:00 2001 From: Virgil Date: Tue, 31 Mar 2026 05:24:39 +0000 Subject: [PATCH 42/83] Align node and sigil APIs with AX principles --- node/node.go | 27 ++++++--------------------- node/node_test.go | 17 ----------------- sigil/crypto_sigil.go | 30 ++++++------------------------ sigil/crypto_sigil_test.go | 38 +++++++++++++++++++------------------- 4 files changed, 31 insertions(+), 81 deletions(-) diff --git a/node/node.go b/node/node.go index 45de545..00237b1 100644 --- a/node/node.go +++ b/node/node.go @@ -122,11 +122,6 @@ func (node *Node) LoadTar(data []byte) error { return nil } -// Example: _ = nodeTree.WalkNode("config", func(_ string, _ fs.DirEntry, _ error) error { return nil }) -func (node *Node) WalkNode(root string, fn fs.WalkDirFunc) error { - return node.Walk(root, fn) -} - // Example: options := node.WalkOptions{MaxDepth: 1, SkipErrors: true} type WalkOptions struct { MaxDepth int @@ -135,21 +130,16 @@ type WalkOptions struct { } // Example: _ = nodeTree.Walk(".", func(_ string, _ fs.DirEntry, _ error) error { return nil }, node.WalkOptions{MaxDepth: 1, SkipErrors: true}) -func (node *Node) Walk(root string, fn fs.WalkDirFunc, options ...WalkOptions) error { - walkOptions := WalkOptions{} - if len(options) > 0 { - walkOptions = options[0] - } - - if walkOptions.SkipErrors { +func (node *Node) Walk(root string, fn fs.WalkDirFunc, options WalkOptions) error { + if options.SkipErrors { if _, err := node.Stat(root); err != nil { return nil } } return fs.WalkDir(node, root, func(entryPath string, entry fs.DirEntry, err error) error { - if walkOptions.Filter != nil && err == nil { - if !walkOptions.Filter(entryPath, entry) { + if options.Filter != nil && err == nil { + if !options.Filter(entryPath, entry) { if entry != nil && entry.IsDir() { return fs.SkipDir } @@ -159,11 +149,11 @@ func (node *Node) Walk(root string, fn fs.WalkDirFunc, options ...WalkOptions) e result := fn(entryPath, entry, err) - if result == nil && walkOptions.MaxDepth > 0 && entry != nil && entry.IsDir() && entryPath != root { + if result == nil && options.MaxDepth > 0 && entry != nil && entry.IsDir() && entryPath != root { rel := core.TrimPrefix(entryPath, root) rel = core.TrimPrefix(rel, "/") depth := len(core.Split(rel, "/")) - if depth >= walkOptions.MaxDepth { + if depth >= options.MaxDepth { return fs.SkipDir } } @@ -172,11 +162,6 @@ func (node *Node) Walk(root string, fn fs.WalkDirFunc, options ...WalkOptions) e }) } -// Example: _ = nodeTree.WalkWithOptions(".", func(_ string, _ fs.DirEntry, _ error) error { return nil }, node.WalkOptions{MaxDepth: 1, SkipErrors: true}) -func (node *Node) WalkWithOptions(root string, fn fs.WalkDirFunc, options WalkOptions) error { - return node.Walk(root, fn, options) -} - // Example: content, _ := nodeTree.ReadFile("config/app.yaml") func (node *Node) ReadFile(name string) ([]byte, error) { name = core.TrimPrefix(name, "/") diff --git a/node/node_test.go b/node/node_test.go index 8880c4e..61f24b1 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -357,23 +357,6 @@ func TestNode_Walk_Good(t *testing.T) { }) } -func TestNode_WalkNode_Good(t *testing.T) { - n := New() - n.AddData("alpha.txt", []byte("alpha")) - n.AddData("nested/beta.txt", []byte("beta")) - - var paths []string - err := n.WalkNode(".", func(p string, d fs.DirEntry, err error) error { - require.NoError(t, err) - paths = append(paths, p) - return nil - }) - require.NoError(t, err) - - sort.Strings(paths) - assert.Equal(t, []string{".", "alpha.txt", "nested", "nested/beta.txt"}, paths) -} - // --------------------------------------------------------------------------- // CopyFile // --------------------------------------------------------------------------- diff --git a/sigil/crypto_sigil.go b/sigil/crypto_sigil.go index 798c24f..306f702 100644 --- a/sigil/crypto_sigil.go +++ b/sigil/crypto_sigil.go @@ -1,4 +1,4 @@ -// Example: cipherSigil, _ := sigil.NewChaChaPolySigil([]byte("0123456789abcdef0123456789abcdef")) +// Example: cipherSigil, _ := sigil.NewChaChaPolySigil([]byte("0123456789abcdef0123456789abcdef"), nil) // Example: ciphertext, _ := cipherSigil.In([]byte("payload")) // Example: plaintext, _ := cipherSigil.Out(ciphertext) package sigil @@ -171,7 +171,7 @@ func (obfuscator *ShuffleMaskObfuscator) deriveMask(entropy []byte, length int) } // Example: cipherSigil, _ := sigil.NewChaChaPolySigil( -// Example: key, +// Example: []byte("0123456789abcdef0123456789abcdef"), // Example: &sigil.ShuffleMaskObfuscator{}, // Example: ) type ChaChaPolySigil struct { @@ -180,10 +180,10 @@ type ChaChaPolySigil struct { randomReader goio.Reader } -// Example: cipherSigil, _ := sigil.NewChaChaPolySigil([]byte("0123456789abcdef0123456789abcdef")) +// Example: cipherSigil, _ := sigil.NewChaChaPolySigil([]byte("0123456789abcdef0123456789abcdef"), nil) // Example: ciphertext, _ := cipherSigil.In([]byte("payload")) // Example: plaintext, _ := cipherSigil.Out(ciphertext) -func NewChaChaPolySigil(key []byte, obfuscators ...PreObfuscator) (*ChaChaPolySigil, error) { +func NewChaChaPolySigil(key []byte, obfuscator PreObfuscator) (*ChaChaPolySigil, error) { if len(key) != 32 { return nil, InvalidKeyError } @@ -191,9 +191,8 @@ func NewChaChaPolySigil(key []byte, obfuscators ...PreObfuscator) (*ChaChaPolySi keyCopy := make([]byte, 32) copy(keyCopy, key) - obfuscator := PreObfuscator(&XORObfuscator{}) - if len(obfuscators) > 0 && obfuscators[0] != nil { - obfuscator = obfuscators[0] + if obfuscator == nil { + obfuscator = &XORObfuscator{} } return &ChaChaPolySigil{ @@ -203,23 +202,6 @@ func NewChaChaPolySigil(key []byte, obfuscators ...PreObfuscator) (*ChaChaPolySi }, nil } -// Example: cipherSigil, _ := sigil.NewChaChaPolySigil( -// Example: []byte("0123456789abcdef0123456789abcdef"), -// Example: &sigil.ShuffleMaskObfuscator{}, -// Example: ) -// Example: ciphertext, _ := cipherSigil.In([]byte("payload")) -// Example: plaintext, _ := cipherSigil.Out(ciphertext) -func NewChaChaPolySigilWithObfuscator(key []byte, obfuscator PreObfuscator) (*ChaChaPolySigil, error) { - cipherSigil, err := NewChaChaPolySigil(key, obfuscator) - if err != nil { - return nil, err - } - if obfuscator != nil { - cipherSigil.Obfuscator = obfuscator - } - return cipherSigil, nil -} - func (sigil *ChaChaPolySigil) In(data []byte) ([]byte, error) { if sigil.Key == nil { return nil, NoKeyConfiguredError diff --git a/sigil/crypto_sigil_test.go b/sigil/crypto_sigil_test.go index 7f1425c..367043f 100644 --- a/sigil/crypto_sigil_test.go +++ b/sigil/crypto_sigil_test.go @@ -150,7 +150,7 @@ func TestCryptoSigil_NewChaChaPolySigil_Good(t *testing.T) { key := make([]byte, 32) _, _ = rand.Read(key) - s, err := NewChaChaPolySigil(key) + s, err := NewChaChaPolySigil(key, nil) require.NoError(t, err) assert.NotNil(t, s) assert.Equal(t, key, s.Key) @@ -163,7 +163,7 @@ func TestCryptoSigil_NewChaChaPolySigil_KeyIsCopied_Good(t *testing.T) { original := make([]byte, 32) copy(original, key) - s, err := NewChaChaPolySigil(key) + s, err := NewChaChaPolySigil(key, nil) require.NoError(t, err) // Mutating the original key should not affect the sigil. @@ -172,17 +172,17 @@ func TestCryptoSigil_NewChaChaPolySigil_KeyIsCopied_Good(t *testing.T) { } func TestCryptoSigil_NewChaChaPolySigil_ShortKey_Bad(t *testing.T) { - _, err := NewChaChaPolySigil([]byte("too short")) + _, err := NewChaChaPolySigil([]byte("too short"), nil) assert.ErrorIs(t, err, InvalidKeyError) } func TestCryptoSigil_NewChaChaPolySigil_LongKey_Bad(t *testing.T) { - _, err := NewChaChaPolySigil(make([]byte, 64)) + _, err := NewChaChaPolySigil(make([]byte, 64), nil) assert.ErrorIs(t, err, InvalidKeyError) } func TestCryptoSigil_NewChaChaPolySigil_EmptyKey_Bad(t *testing.T) { - _, err := NewChaChaPolySigil(nil) + _, err := NewChaChaPolySigil(nil, nil) assert.ErrorIs(t, err, InvalidKeyError) } @@ -219,7 +219,7 @@ func TestCryptoSigil_ChaChaPolySigil_RoundTrip_Good(t *testing.T) { key := make([]byte, 32) _, _ = rand.Read(key) - s, err := NewChaChaPolySigil(key) + s, err := NewChaChaPolySigil(key, nil) require.NoError(t, err) plaintext := []byte("consciousness does not merely avoid causing harm") @@ -253,7 +253,7 @@ func TestCryptoSigil_ChaChaPolySigil_NilData_Good(t *testing.T) { key := make([]byte, 32) _, _ = rand.Read(key) - s, err := NewChaChaPolySigil(key) + s, err := NewChaChaPolySigil(key, nil) require.NoError(t, err) enc, err := s.In(nil) @@ -269,7 +269,7 @@ func TestCryptoSigil_ChaChaPolySigil_EmptyPlaintext_Good(t *testing.T) { key := make([]byte, 32) _, _ = rand.Read(key) - s, err := NewChaChaPolySigil(key) + s, err := NewChaChaPolySigil(key, nil) require.NoError(t, err) ciphertext, err := s.In([]byte{}) @@ -285,7 +285,7 @@ func TestCryptoSigil_ChaChaPolySigil_DifferentCiphertextsPerCall_Good(t *testing key := make([]byte, 32) _, _ = rand.Read(key) - s, err := NewChaChaPolySigil(key) + s, err := NewChaChaPolySigil(key, nil) require.NoError(t, err) plaintext := []byte("same input") @@ -312,8 +312,8 @@ func TestCryptoSigil_ChaChaPolySigil_WrongKey_Bad(t *testing.T) { _, _ = rand.Read(key1) _, _ = rand.Read(key2) - s1, _ := NewChaChaPolySigil(key1) - s2, _ := NewChaChaPolySigil(key2) + s1, _ := NewChaChaPolySigil(key1, nil) + s2, _ := NewChaChaPolySigil(key2, nil) ciphertext, err := s1.In([]byte("secret")) require.NoError(t, err) @@ -326,7 +326,7 @@ func TestCryptoSigil_ChaChaPolySigil_TruncatedCiphertext_Bad(t *testing.T) { key := make([]byte, 32) _, _ = rand.Read(key) - s, _ := NewChaChaPolySigil(key) + s, _ := NewChaChaPolySigil(key, nil) _, err := s.Out([]byte("too short")) assert.ErrorIs(t, err, CiphertextTooShortError) } @@ -335,7 +335,7 @@ func TestCryptoSigil_ChaChaPolySigil_TamperedCiphertext_Bad(t *testing.T) { key := make([]byte, 32) _, _ = rand.Read(key) - s, _ := NewChaChaPolySigil(key) + s, _ := NewChaChaPolySigil(key, nil) ciphertext, _ := s.In([]byte("authentic data")) // Flip a bit in the ciphertext body (after nonce). @@ -356,7 +356,7 @@ func TestCryptoSigil_ChaChaPolySigil_RandomReaderFailure_Bad(t *testing.T) { key := make([]byte, 32) _, _ = rand.Read(key) - s, _ := NewChaChaPolySigil(key) + s, _ := NewChaChaPolySigil(key, nil) s.randomReader = &failReader{} _, err := s.In([]byte("data")) @@ -369,7 +369,7 @@ func TestCryptoSigil_ChaChaPolySigil_NoObfuscator_Good(t *testing.T) { key := make([]byte, 32) _, _ = rand.Read(key) - s, _ := NewChaChaPolySigil(key) + s, _ := NewChaChaPolySigil(key, nil) s.Obfuscator = nil // Disable pre-obfuscation. plaintext := []byte("raw encryption without pre-obfuscation") @@ -387,7 +387,7 @@ func TestCryptoSigil_GetNonceFromCiphertext_Good(t *testing.T) { key := make([]byte, 32) _, _ = rand.Read(key) - s, _ := NewChaChaPolySigil(key) + s, _ := NewChaChaPolySigil(key, nil) ciphertext, _ := s.In([]byte("nonce extraction test")) nonce, err := GetNonceFromCiphertext(ciphertext) @@ -402,7 +402,7 @@ func TestCryptoSigil_GetNonceFromCiphertext_NonceCopied_Good(t *testing.T) { key := make([]byte, 32) _, _ = rand.Read(key) - s, _ := NewChaChaPolySigil(key) + s, _ := NewChaChaPolySigil(key, nil) ciphertext, _ := s.In([]byte("data")) nonce, _ := GetNonceFromCiphertext(ciphertext) @@ -430,7 +430,7 @@ func TestCryptoSigil_ChaChaPolySigil_InTransmutePipeline_Good(t *testing.T) { key := make([]byte, 32) _, _ = rand.Read(key) - s, _ := NewChaChaPolySigil(key) + s, _ := NewChaChaPolySigil(key, nil) hexSigil, _ := NewSigil("hex") chain := []Sigil{s, hexSigil} @@ -509,7 +509,7 @@ func TestCryptoSigil_ChaChaPolySigil_NilRandomReader_Good(t *testing.T) { key := make([]byte, 32) _, _ = rand.Read(key) - s, _ := NewChaChaPolySigil(key) + s, _ := NewChaChaPolySigil(key, nil) s.randomReader = nil // Should fall back to crypto/rand.Reader. ciphertext, err := s.In([]byte("fallback reader")) From 313b704f54341cc348a3b2daba508b965e666151 Mon Sep 17 00:00:00 2001 From: Virgil Date: Tue, 31 Mar 2026 05:30:25 +0000 Subject: [PATCH 43/83] refactor(ax): trim test prose comments Co-Authored-By: Virgil --- datanode/medium_test.go | 16 ++------- local/medium_test.go | 40 +-------------------- medium_test.go | 10 ------ node/node_test.go | 71 +------------------------------------- s3/s3_test.go | 20 ----------- sigil/crypto_sigil_test.go | 49 ++++---------------------- sigil/sigil_test.go | 54 +---------------------------- sqlite/sqlite_test.go | 41 ---------------------- store/medium_test.go | 3 -- workspace/service.go | 2 +- 10 files changed, 12 insertions(+), 294 deletions(-) diff --git a/datanode/medium_test.go b/datanode/medium_test.go index 77e68af..8d8a5b5 100644 --- a/datanode/medium_test.go +++ b/datanode/medium_test.go @@ -11,7 +11,6 @@ import ( "github.com/stretchr/testify/require" ) -// Compile-time check: Medium implements io.Medium. var _ coreio.Medium = (*Medium)(nil) func TestClient_ReadWrite_Good(t *testing.T) { @@ -69,7 +68,7 @@ func TestClient_IsFile_Good(t *testing.T) { assert.True(t, m.IsFile("file.go")) assert.False(t, m.IsFile("missing.go")) - assert.False(t, m.IsFile("")) // empty path + assert.False(t, m.IsFile("")) } func TestClient_EnsureDir_Good(t *testing.T) { @@ -96,10 +95,8 @@ func TestClient_Delete_Good(t *testing.T) { func TestClient_Delete_Bad(t *testing.T) { medium := New() - // Example: medium.Delete("ghost.txt") assert.Error(t, medium.Delete("ghost.txt")) - // Delete non-empty dir require.NoError(t, medium.Write("dir/file.txt", "content")) assert.Error(t, medium.Delete("dir")) } @@ -257,7 +254,6 @@ func TestClient_Stat_Good(t *testing.T) { assert.Equal(t, int64(5), info.Size()) assert.False(t, info.IsDir()) - // Root stat info, err = m.Stat("") require.NoError(t, err) assert.True(t, info.IsDir()) @@ -280,7 +276,6 @@ func TestClient_Open_Good(t *testing.T) { func TestClient_CreateAppend_Good(t *testing.T) { m := New() - // Create w, err := m.Create("new.txt") require.NoError(t, err) w.Write([]byte("hello")) @@ -290,7 +285,6 @@ func TestClient_CreateAppend_Good(t *testing.T) { require.NoError(t, err) assert.Equal(t, "hello", got) - // Append w, err = m.Append("new.txt") require.NoError(t, err) w.Write([]byte(" world")) @@ -321,13 +315,11 @@ func TestClient_Append_ReadFailure_Bad(t *testing.T) { func TestClient_Streams_Good(t *testing.T) { m := New() - // WriteStream ws, err := m.WriteStream("stream.txt") require.NoError(t, err) ws.Write([]byte("streamed")) ws.Close() - // ReadStream rs, err := m.ReadStream("stream.txt") require.NoError(t, err) data, err := io.ReadAll(rs) @@ -356,7 +348,6 @@ func TestClient_SnapshotRestore_Good(t *testing.T) { require.NoError(t, err) assert.NotEmpty(t, snap) - // Restore into a new Medium m2, err := FromTar(snap) require.NoError(t, err) @@ -377,11 +368,9 @@ func TestClient_Restore_Good(t *testing.T) { snap, err := m.Snapshot() require.NoError(t, err) - // Modify require.NoError(t, m.Write("original.txt", "after")) require.NoError(t, m.Write("extra.txt", "extra")) - // Restore to snapshot require.NoError(t, m.Restore(snap)) got, err := m.Read("original.txt") @@ -399,7 +388,6 @@ func TestClient_DataNode_Good(t *testing.T) { dn := m.DataNode() assert.NotNil(t, dn) - // Verify we can use the DataNode directly f, err := dn.Open("test.txt") require.NoError(t, err) defer f.Close() @@ -423,7 +411,7 @@ func TestClient_Overwrite_Good(t *testing.T) { func TestClient_Exists_Good(t *testing.T) { m := New() - assert.True(t, m.Exists("")) // root + assert.True(t, m.Exists("")) assert.False(t, m.Exists("x")) require.NoError(t, m.Write("x", "y")) diff --git a/local/medium_test.go b/local/medium_test.go index 506b3a5..9177576 100644 --- a/local/medium_test.go +++ b/local/medium_test.go @@ -15,7 +15,6 @@ func TestClient_New_ResolvesRoot_Good(t *testing.T) { root := t.TempDir() m, err := New(root) assert.NoError(t, err) - // Example: local.New("/srv/app") resolves macOS "/var" to "/private/var" before sandbox checks. resolved, err := resolveSymlinksPath(root) require.NoError(t, err) assert.Equal(t, resolved, m.filesystemRoot) @@ -24,29 +23,23 @@ func TestClient_New_ResolvesRoot_Good(t *testing.T) { func TestClient_Path_Sandboxed_Good(t *testing.T) { m := &Medium{filesystemRoot: "/home/user"} - // Normal paths assert.Equal(t, "/home/user/file.txt", m.sandboxedPath("file.txt")) assert.Equal(t, "/home/user/dir/file.txt", m.sandboxedPath("dir/file.txt")) - // Empty returns root assert.Equal(t, "/home/user", m.sandboxedPath("")) - // Traversal attempts get sanitised assert.Equal(t, "/home/user/file.txt", m.sandboxedPath("../file.txt")) assert.Equal(t, "/home/user/file.txt", m.sandboxedPath("dir/../file.txt")) - // Absolute paths are constrained to sandbox (no escape) assert.Equal(t, "/home/user/etc/passwd", m.sandboxedPath("/etc/passwd")) } func TestClient_Path_RootFilesystem_Good(t *testing.T) { m := &Medium{filesystemRoot: "/"} - // When root is "/", absolute paths pass through assert.Equal(t, "/etc/passwd", m.sandboxedPath("/etc/passwd")) assert.Equal(t, "/home/user/file.txt", m.sandboxedPath("/home/user/file.txt")) - // Relative paths are relative to CWD when root is "/" cwd := currentWorkingDir() assert.Equal(t, core.Path(cwd, "file.txt"), m.sandboxedPath("file.txt")) } @@ -55,7 +48,6 @@ func TestClient_ReadWrite_Basic_Good(t *testing.T) { root := t.TempDir() m, _ := New(root) - // Write and read back err := m.Write("test.txt", "hello") assert.NoError(t, err) @@ -63,7 +55,6 @@ func TestClient_ReadWrite_Basic_Good(t *testing.T) { assert.NoError(t, err) assert.Equal(t, "hello", content) - // Write creates parent dirs err = m.Write("a/b/c.txt", "nested") assert.NoError(t, err) @@ -71,7 +62,6 @@ func TestClient_ReadWrite_Basic_Good(t *testing.T) { assert.NoError(t, err) assert.Equal(t, "nested", content) - // Read nonexistent _, err = m.Read("nope.txt") assert.Error(t, err) } @@ -228,7 +218,6 @@ func TestClient_Delete_Good(t *testing.T) { medium, err := New(testRoot) assert.NoError(t, err) - // Create and delete a file err = medium.Write("file.txt", "content") assert.NoError(t, err) assert.True(t, medium.IsFile("file.txt")) @@ -237,7 +226,6 @@ func TestClient_Delete_Good(t *testing.T) { assert.NoError(t, err) assert.False(t, medium.IsFile("file.txt")) - // Create and delete an empty directory err = medium.EnsureDir("emptydir") assert.NoError(t, err) err = medium.Delete("emptydir") @@ -251,11 +239,9 @@ func TestClient_Delete_NotEmpty_Bad(t *testing.T) { medium, err := New(testRoot) assert.NoError(t, err) - // Create a directory with a file err = medium.Write("mydir/file.txt", "content") assert.NoError(t, err) - // Try to delete non-empty directory err = medium.Delete("mydir") assert.Error(t, err) } @@ -266,13 +252,11 @@ func TestClient_DeleteAll_Good(t *testing.T) { medium, err := New(testRoot) assert.NoError(t, err) - // Create nested structure err = medium.Write("mydir/file1.txt", "content1") assert.NoError(t, err) err = medium.Write("mydir/subdir/file2.txt", "content2") assert.NoError(t, err) - // Delete all err = medium.DeleteAll("mydir") assert.NoError(t, err) assert.False(t, medium.Exists("mydir")) @@ -286,7 +270,6 @@ func TestClient_Rename_Good(t *testing.T) { medium, err := New(testRoot) assert.NoError(t, err) - // Rename a file err = medium.Write("old.txt", "content") assert.NoError(t, err) err = medium.Rename("old.txt", "new.txt") @@ -308,8 +291,6 @@ func TestClient_Rename_TraversalSanitised_Good(t *testing.T) { err = medium.Write("file.txt", "content") assert.NoError(t, err) - // Traversal attempts are sanitised (.. becomes .), so this renames to "./escaped.txt" - // which is just "escaped.txt" in the root err = medium.Rename("file.txt", "../escaped.txt") assert.NoError(t, err) assert.False(t, medium.Exists("file.txt")) @@ -322,7 +303,6 @@ func TestClient_List_Good(t *testing.T) { medium, err := New(testRoot) assert.NoError(t, err) - // Create some files and directories err = medium.Write("file1.txt", "content1") assert.NoError(t, err) err = medium.Write("file2.txt", "content2") @@ -330,7 +310,6 @@ func TestClient_List_Good(t *testing.T) { err = medium.EnsureDir("subdir") assert.NoError(t, err) - // List root entries, err := medium.List(".") assert.NoError(t, err) assert.Len(t, entries, 3) @@ -350,7 +329,6 @@ func TestClient_Stat_Good(t *testing.T) { medium, err := New(testRoot) assert.NoError(t, err) - // Stat a file err = medium.Write("file.txt", "hello world") assert.NoError(t, err) info, err := medium.Stat("file.txt") @@ -359,7 +337,6 @@ func TestClient_Stat_Good(t *testing.T) { assert.Equal(t, int64(11), info.Size()) assert.False(t, info.IsDir()) - // Stat a directory err = medium.EnsureDir("mydir") assert.NoError(t, err) info, err = medium.Stat("mydir") @@ -414,7 +391,6 @@ func TestClient_ReadStream_Basic_Good(t *testing.T) { assert.NoError(t, err) defer reader.Close() - // Read only first 9 bytes limitReader := io.LimitReader(reader, 9) data, err := io.ReadAll(limitReader) assert.NoError(t, err) @@ -441,15 +417,12 @@ func TestClient_WriteStream_Basic_Good(t *testing.T) { func TestClient_Path_TraversalSandbox_Good(t *testing.T) { m := &Medium{filesystemRoot: "/sandbox"} - // Multiple levels of traversal assert.Equal(t, "/sandbox/file.txt", m.sandboxedPath("../../../file.txt")) assert.Equal(t, "/sandbox/target", m.sandboxedPath("dir/../../target")) - // Traversal with hidden files assert.Equal(t, "/sandbox/.ssh/id_rsa", m.sandboxedPath(".ssh/id_rsa")) assert.Equal(t, "/sandbox/id_rsa", m.sandboxedPath(".ssh/../id_rsa")) - // Null bytes (Go's filepath.Clean handles them, but good to check) assert.Equal(t, "/sandbox/file\x00.txt", m.sandboxedPath("file\x00.txt")) } @@ -458,7 +431,6 @@ func TestClient_ValidatePath_SymlinkEscape_Bad(t *testing.T) { m, err := New(root) assert.NoError(t, err) - // Create a directory outside the sandbox outside := t.TempDir() outsideFile := core.Path(outside, "secret.txt") outsideMedium, err := New("/") @@ -466,22 +438,17 @@ func TestClient_ValidatePath_SymlinkEscape_Bad(t *testing.T) { err = outsideMedium.Write(outsideFile, "secret") assert.NoError(t, err) - // Test 1: Simple traversal _, err = m.validatePath("../outside.txt") - assert.NoError(t, err) // sandboxedPath sanitises to root, so this shouldn't escape + assert.NoError(t, err) - // Test 2: Symlink escape - // Create a symlink inside the sandbox pointing outside linkPath := core.Path(root, "evil_link") err = syscall.Symlink(outside, linkPath) assert.NoError(t, err) - // Try to access a file through the symlink _, err = m.validatePath("evil_link/secret.txt") assert.Error(t, err) assert.ErrorIs(t, err, fs.ErrPermission) - // Test 3: Nested symlink escape err = m.EnsureDir("inner") assert.NoError(t, err) innerDir := core.Path(root, "inner") @@ -499,24 +466,19 @@ func TestClient_EmptyPaths_Good(t *testing.T) { m, err := New(root) assert.NoError(t, err) - // Read empty path (should fail as it's a directory) _, err = m.Read("") assert.Error(t, err) - // Write empty path (should fail as it's a directory) err = m.Write("", "content") assert.Error(t, err) - // EnsureDir empty path (should be ok, it's just the root) err = m.EnsureDir("") assert.NoError(t, err) assert.False(t, m.IsDir("")) - // Exists empty path (root exists) assert.True(t, m.Exists("")) - // List empty path (lists root) entries, err := m.List("") assert.NoError(t, err) assert.NotNil(t, entries) diff --git a/medium_test.go b/medium_test.go index 1faab06..9c36152 100644 --- a/medium_test.go +++ b/medium_test.go @@ -10,8 +10,6 @@ import ( "github.com/stretchr/testify/require" ) -// --- MemoryMedium Tests --- - func TestMemoryMedium_NewMemoryMedium_Good(t *testing.T) { medium := NewMemoryMedium() assert.NotNil(t, medium) @@ -66,7 +64,6 @@ func TestClient_MockMedium_Write_Good(t *testing.T) { assert.NoError(t, err) assert.Equal(t, "content", m.files["test.txt"]) - // Overwrite existing file err = m.Write("test.txt", "new content") assert.NoError(t, err) assert.Equal(t, "new content", m.files["test.txt"]) @@ -296,8 +293,6 @@ func TestClient_MockMedium_StreamAndFSHelpers_Good(t *testing.T) { assert.Equal(t, "stream output", m.files["streamed.txt"]) } -// --- Wrapper Function Tests --- - func TestClient_Read_Good(t *testing.T) { m := NewMockMedium() m.files["test.txt"] = "hello" @@ -367,7 +362,6 @@ func TestClient_Copy_Good(t *testing.T) { assert.NoError(t, err) assert.Equal(t, "hello", dest.files["test.txt"]) - // Copy to different path source.files["original.txt"] = "content" err = Copy(source, "original.txt", dest, "copied.txt") assert.NoError(t, err) @@ -381,13 +375,9 @@ func TestClient_Copy_Bad(t *testing.T) { assert.Error(t, err) } -// --- Local Global Tests --- - func TestClient_LocalGlobal_Good(t *testing.T) { - // io.Local should be initialised by init() assert.NotNil(t, Local, "io.Local should be initialised") - // Should be able to use it as a Medium var m = Local assert.NotNil(t, m) } diff --git a/node/node_test.go b/node/node_test.go index 61f24b1..aa473d4 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -14,20 +14,12 @@ import ( "github.com/stretchr/testify/require" ) -// --------------------------------------------------------------------------- -// New -// --------------------------------------------------------------------------- - func TestNode_New_Good(t *testing.T) { n := New() require.NotNil(t, n, "New() must not return nil") assert.NotNil(t, n.files, "New() must initialise the files map") } -// --------------------------------------------------------------------------- -// AddData -// --------------------------------------------------------------------------- - func TestNode_AddData_Good(t *testing.T) { n := New() n.AddData("foo.txt", []byte("foo")) @@ -44,11 +36,9 @@ func TestNode_AddData_Good(t *testing.T) { func TestNode_AddData_Bad(t *testing.T) { n := New() - // Empty name is silently ignored. n.AddData("", []byte("data")) assert.Empty(t, n.files, "empty name must not be stored") - // Directory entry (trailing slash) is silently ignored. n.AddData("dir/", nil) assert.Empty(t, n.files, "directory entry must not be stored") } @@ -71,10 +61,6 @@ func TestNode_AddData_EdgeCases_Good(t *testing.T) { }) } -// --------------------------------------------------------------------------- -// Open -// --------------------------------------------------------------------------- - func TestNode_Open_Good(t *testing.T) { n := New() n.AddData("foo.txt", []byte("foo")) @@ -100,12 +86,10 @@ func TestNode_Open_Directory_Good(t *testing.T) { n := New() n.AddData("bar/baz.txt", []byte("baz")) - // Opening a directory should succeed. file, err := n.Open("bar") require.NoError(t, err) defer file.Close() - // Reading from a directory should fail. _, err = file.Read(make([]byte, 1)) require.Error(t, err) @@ -114,23 +98,17 @@ func TestNode_Open_Directory_Good(t *testing.T) { assert.Equal(t, fs.ErrInvalid, pathErr.Err) } -// --------------------------------------------------------------------------- -// Stat -// --------------------------------------------------------------------------- - func TestNode_Stat_Good(t *testing.T) { n := New() n.AddData("foo.txt", []byte("foo")) n.AddData("bar/baz.txt", []byte("baz")) - // File stat. info, err := n.Stat("bar/baz.txt") require.NoError(t, err) assert.Equal(t, "baz.txt", info.Name()) assert.Equal(t, int64(3), info.Size()) assert.False(t, info.IsDir()) - // Directory stat. dirInfo, err := n.Stat("bar") require.NoError(t, err) assert.True(t, dirInfo.IsDir()) @@ -148,17 +126,12 @@ func TestNode_Stat_RootDirectory_Good(t *testing.T) { n := New() n.AddData("foo.txt", []byte("foo")) - // Root directory. info, err := n.Stat(".") require.NoError(t, err) assert.True(t, info.IsDir()) assert.Equal(t, ".", info.Name()) } -// --------------------------------------------------------------------------- -// ReadFile -// --------------------------------------------------------------------------- - func TestNode_ReadFile_Good(t *testing.T) { n := New() n.AddData("hello.txt", []byte("hello world")) @@ -179,7 +152,6 @@ func TestNode_ReadFile_ReturnsCopy_Good(t *testing.T) { n := New() n.AddData("data.bin", []byte("original")) - // Returned slice must be a copy — mutating it must not affect internal state. data, err := n.ReadFile("data.bin") require.NoError(t, err) data[0] = 'X' @@ -189,22 +161,16 @@ func TestNode_ReadFile_ReturnsCopy_Good(t *testing.T) { assert.Equal(t, []byte("original"), data2, "ReadFile must return an independent copy") } -// --------------------------------------------------------------------------- -// ReadDir -// --------------------------------------------------------------------------- - func TestNode_ReadDir_Good(t *testing.T) { n := New() n.AddData("foo.txt", []byte("foo")) n.AddData("bar/baz.txt", []byte("baz")) n.AddData("bar/qux.txt", []byte("qux")) - // Root. entries, err := n.ReadDir(".") require.NoError(t, err) assert.Equal(t, []string{"bar", "foo.txt"}, sortedNames(entries)) - // Subdirectory. barEntries, err := n.ReadDir("bar") require.NoError(t, err) assert.Equal(t, []string{"baz.txt", "qux.txt"}, sortedNames(barEntries)) @@ -214,7 +180,6 @@ func TestNode_ReadDir_Bad(t *testing.T) { n := New() n.AddData("foo.txt", []byte("foo")) - // Reading a file as a directory should fail. _, err := n.ReadDir("foo.txt") require.Error(t, err) var pathErr *fs.PathError @@ -225,17 +190,13 @@ func TestNode_ReadDir_Bad(t *testing.T) { func TestNode_ReadDir_IgnoresEmptyEntry_Good(t *testing.T) { n := New() n.AddData("bar/baz.txt", []byte("baz")) - n.AddData("empty_dir/", nil) // Ignored by AddData. + n.AddData("empty_dir/", nil) entries, err := n.ReadDir(".") require.NoError(t, err) assert.Equal(t, []string{"bar"}, sortedNames(entries)) } -// --------------------------------------------------------------------------- -// Exists -// --------------------------------------------------------------------------- - func TestNode_Exists_Good(t *testing.T) { n := New() n.AddData("foo.txt", []byte("foo")) @@ -258,10 +219,6 @@ func TestNode_Exists_RootAndEmptyPath_Good(t *testing.T) { assert.True(t, n.Exists(""), "empty path (root) must exist") } -// --------------------------------------------------------------------------- -// Walk -// --------------------------------------------------------------------------- - func TestNode_Walk_Default_Good(t *testing.T) { n := New() n.AddData("foo.txt", []byte("foo")) @@ -298,7 +255,6 @@ func TestNode_Walk_CallbackError_Good(t *testing.T) { n.AddData("a/b.txt", []byte("b")) n.AddData("a/c.txt", []byte("c")) - // Stop walk early with a custom error. walkErr := core.NewError("stop walking") var paths []string err := n.Walk(".", func(p string, d fs.DirEntry, err error) error { @@ -357,10 +313,6 @@ func TestNode_Walk_Good(t *testing.T) { }) } -// --------------------------------------------------------------------------- -// CopyFile -// --------------------------------------------------------------------------- - func TestNode_CopyFile_Good(t *testing.T) { n := New() n.AddData("foo.txt", []byte("foo")) @@ -378,11 +330,9 @@ func TestNode_CopyFile_Bad(t *testing.T) { n := New() tmpfile := core.Path(t.TempDir(), "test.txt") - // Source does not exist. err := n.CopyFile("nonexistent.txt", tmpfile, 0644) assert.Error(t, err) - // Destination not writable. n.AddData("foo.txt", []byte("foo")) err = n.CopyFile("foo.txt", "/nonexistent_dir/test.txt", 0644) assert.Error(t, err) @@ -393,7 +343,6 @@ func TestNode_CopyFile_DirectorySource_Bad(t *testing.T) { n.AddData("bar/baz.txt", []byte("baz")) tmpfile := core.Path(t.TempDir(), "test.txt") - // Attempting to copy a directory should fail. err := n.CopyFile("bar", tmpfile, 0644) assert.Error(t, err) } @@ -505,10 +454,6 @@ func TestNode_MediumFacade_Good(t *testing.T) { assert.False(t, n.Exists("docs")) } -// --------------------------------------------------------------------------- -// ToTar / FromTar -// --------------------------------------------------------------------------- - func TestNode_ToTar_Good(t *testing.T) { n := New() n.AddData("foo.txt", []byte("foo")) @@ -518,7 +463,6 @@ func TestNode_ToTar_Good(t *testing.T) { require.NoError(t, err) require.NotEmpty(t, tarball) - // Verify tar content. tr := tar.NewReader(bytes.NewReader(tarball)) files := make(map[string]string) for { @@ -564,7 +508,6 @@ func TestNode_FromTar_Good(t *testing.T) { } func TestNode_FromTar_Bad(t *testing.T) { - // Truncated data that cannot be a valid tar. truncated := make([]byte, 100) _, err := FromTar(truncated) assert.Error(t, err, "truncated data should produce an error") @@ -581,7 +524,6 @@ func TestNode_TarRoundTrip_Good(t *testing.T) { n2, err := FromTar(tarball) require.NoError(t, err) - // Verify n2 matches n1. data, err := n2.ReadFile("a.txt") require.NoError(t, err) assert.Equal(t, []byte("alpha"), data) @@ -591,38 +533,27 @@ func TestNode_TarRoundTrip_Good(t *testing.T) { assert.Equal(t, []byte("charlie"), data) } -// --------------------------------------------------------------------------- -// fs.FS interface compliance -// --------------------------------------------------------------------------- - func TestNode_FSInterface_Good(t *testing.T) { n := New() n.AddData("hello.txt", []byte("world")) - // fs.FS var fsys fs.FS = n file, err := fsys.Open("hello.txt") require.NoError(t, err) defer file.Close() - // fs.StatFS var statFS fs.StatFS = n info, err := statFS.Stat("hello.txt") require.NoError(t, err) assert.Equal(t, "hello.txt", info.Name()) assert.Equal(t, int64(5), info.Size()) - // fs.ReadFileFS var readFS fs.ReadFileFS = n data, err := readFS.ReadFile("hello.txt") require.NoError(t, err) assert.Equal(t, []byte("world"), data) } -// --------------------------------------------------------------------------- -// Helpers -// --------------------------------------------------------------------------- - func sortedNames(entries []fs.DirEntry) []string { var names []string for _, e := range entries { diff --git a/s3/s3_test.go b/s3/s3_test.go index 8334eab..010a377 100644 --- a/s3/s3_test.go +++ b/s3/s3_test.go @@ -18,7 +18,6 @@ import ( "github.com/stretchr/testify/require" ) -// mockS3 is an in-memory mock implementing the Client interface. type mockS3 struct { mu sync.RWMutex objects map[string][]byte @@ -124,7 +123,6 @@ func (m *mockS3) ListObjectsV2(_ context.Context, params *awss3.ListObjectsV2Inp maxKeys = *params.MaxKeys } - // Collect all matching keys sorted var allKeys []string for k := range m.objects { if core.HasPrefix(k, prefix) { @@ -142,7 +140,6 @@ func (m *mockS3) ListObjectsV2(_ context.Context, params *awss3.ListObjectsV2Inp if delimiter != "" { parts := core.SplitN(rest, delimiter, 2) if len(parts) == 2 { - // This key has a delimiter after the prefix -> common prefix cp := core.Concat(prefix, parts[0], delimiter) commonPrefixes[cp] = true continue @@ -163,7 +160,6 @@ func (m *mockS3) ListObjectsV2(_ context.Context, params *awss3.ListObjectsV2Inp } var cpSlice []types.CommonPrefix - // Sort common prefixes for deterministic output var cpKeys []string for cp := range commonPrefixes { cpKeys = append(cpKeys, cp) @@ -184,7 +180,6 @@ func (m *mockS3) CopyObject(_ context.Context, params *awss3.CopyObjectInput, _ m.mu.Lock() defer m.mu.Unlock() - // CopySource is "bucket/key" source := aws.ToString(params.CopySource) parts := core.SplitN(source, "/", 2) if len(parts) != 2 { @@ -204,8 +199,6 @@ func (m *mockS3) CopyObject(_ context.Context, params *awss3.CopyObjectInput, _ return &awss3.CopyObjectOutput{}, nil } -// --- Helper --- - func newTestMedium(t *testing.T) (*Medium, *mockS3) { t.Helper() mock := newMockS3() @@ -214,8 +207,6 @@ func newTestMedium(t *testing.T) (*Medium, *mockS3) { return m, mock } -// --- Tests --- - func TestS3_New_Good(t *testing.T) { mock := newMockS3() m, err := New(Options{Bucket: "my-bucket", Client: mock}) @@ -242,7 +233,6 @@ func TestS3_New_Options_Good(t *testing.T) { require.NoError(t, err) assert.Equal(t, "data/", m.prefix) - // Prefix without trailing slash gets one added m2, err := New(Options{Bucket: "bucket", Client: mock, Prefix: "data"}) require.NoError(t, err) assert.Equal(t, "data/", m2.prefix) @@ -284,7 +274,6 @@ func TestS3_ReadWrite_Prefix_Good(t *testing.T) { err = m.Write("file.txt", "data") require.NoError(t, err) - // Verify the key has the prefix _, ok := mock.objects["pfx/file.txt"] assert.True(t, ok, "object should be stored with prefix") @@ -295,7 +284,6 @@ func TestS3_ReadWrite_Prefix_Good(t *testing.T) { func TestS3_EnsureDir_Good(t *testing.T) { medium, _ := newTestMedium(t) - // Example: err := medium.EnsureDir("any/path") err := medium.EnsureDir("any/path") assert.NoError(t, err) } @@ -343,7 +331,6 @@ func TestS3_Delete_EmptyPath_Bad(t *testing.T) { func TestS3_DeleteAll_Good(t *testing.T) { m, _ := newTestMedium(t) - // Create nested structure require.NoError(t, m.Write("dir/file1.txt", "a")) require.NoError(t, m.Write("dir/sub/file2.txt", "b")) require.NoError(t, m.Write("other.txt", "c")) @@ -442,7 +429,6 @@ func TestS3_List_Good(t *testing.T) { assert.True(t, names["sub"], "should list sub directory") assert.Len(t, entries, 3) - // Check that sub is a directory for _, e := range entries { if e.Name() == "sub" { assert.True(t, e.IsDir()) @@ -622,7 +608,6 @@ func TestS3_Exists_DirectoryPrefix_Good(t *testing.T) { m, _ := newTestMedium(t) require.NoError(t, m.Write("dir/file.txt", "content")) - // "dir" should exist as a directory prefix assert.True(t, m.Exists("dir")) } @@ -640,7 +625,6 @@ func TestS3_IsDir_Good(t *testing.T) { func TestS3_ObjectKey_Good(t *testing.T) { mock := newMockS3() - // No prefix m, _ := New(Options{Bucket: "bucket", Client: mock}) assert.Equal(t, "file.txt", m.objectKey("file.txt")) assert.Equal(t, "dir/file.txt", m.objectKey("dir/file.txt")) @@ -648,21 +632,17 @@ func TestS3_ObjectKey_Good(t *testing.T) { assert.Equal(t, "file.txt", m.objectKey("/file.txt")) assert.Equal(t, "file.txt", m.objectKey("../file.txt")) - // With prefix m2, _ := New(Options{Bucket: "bucket", Client: mock, Prefix: "pfx"}) assert.Equal(t, "pfx/file.txt", m2.objectKey("file.txt")) assert.Equal(t, "pfx/dir/file.txt", m2.objectKey("dir/file.txt")) assert.Equal(t, "pfx/", m2.objectKey("")) } -// Compile-time check: Medium satisfies the io.Medium interface. func TestS3_InterfaceCompliance(t *testing.T) { mock := newMockS3() m, err := New(Options{Bucket: "bucket", Client: mock}) require.NoError(t, err) - // Verify all methods exist by calling them in a way that - // proves compile-time satisfaction of the interface. var _ interface { Read(string) (string, error) Write(string, string) error diff --git a/sigil/crypto_sigil_test.go b/sigil/crypto_sigil_test.go index 367043f..fb59fa6 100644 --- a/sigil/crypto_sigil_test.go +++ b/sigil/crypto_sigil_test.go @@ -11,8 +11,6 @@ import ( "github.com/stretchr/testify/require" ) -// ── XORObfuscator ────────────────────────────────────────────────── - func TestCryptoSigil_XORObfuscator_RoundTrip_Good(t *testing.T) { ob := &XORObfuscator{} data := []byte("the axioms are in the weights") @@ -47,7 +45,6 @@ func TestCryptoSigil_XORObfuscator_Deterministic_Good(t *testing.T) { func TestCryptoSigil_XORObfuscator_LargeData_Good(t *testing.T) { ob := &XORObfuscator{} - // Larger than one SHA-256 block (32 bytes) to test multi-block key stream. data := make([]byte, 256) for i := range data { data[i] = byte(i) @@ -73,13 +70,10 @@ func TestCryptoSigil_XORObfuscator_SymmetricProperty_Good(t *testing.T) { data := []byte("XOR is its own inverse") entropy := []byte("nonce") - // XOR is symmetric: Obfuscate(Obfuscate(x)) == x double := ob.Obfuscate(ob.Obfuscate(data, entropy), entropy) assert.Equal(t, data, double) } -// ── ShuffleMaskObfuscator ────────────────────────────────────────── - func TestCryptoSigil_ShuffleMaskObfuscator_RoundTrip_Good(t *testing.T) { ob := &ShuffleMaskObfuscator{} data := []byte("shuffle and mask protect patterns") @@ -144,8 +138,6 @@ func TestCryptoSigil_ShuffleMaskObfuscator_SingleByte_Good(t *testing.T) { assert.Equal(t, data, restored) } -// ── NewChaChaPolySigil ───────────────────────────────────────────── - func TestCryptoSigil_NewChaChaPolySigil_Good(t *testing.T) { key := make([]byte, 32) _, _ = rand.Read(key) @@ -166,7 +158,6 @@ func TestCryptoSigil_NewChaChaPolySigil_KeyIsCopied_Good(t *testing.T) { s, err := NewChaChaPolySigil(key, nil) require.NoError(t, err) - // Mutating the original key should not affect the sigil. key[0] ^= 0xFF assert.Equal(t, original, s.Key) } @@ -186,8 +177,6 @@ func TestCryptoSigil_NewChaChaPolySigil_EmptyKey_Bad(t *testing.T) { assert.ErrorIs(t, err, InvalidKeyError) } -// ── NewChaChaPolySigil Custom Obfuscator ─────────────────────────── - func TestCryptoSigil_NewChaChaPolySigil_CustomObfuscator_Good(t *testing.T) { key := make([]byte, 32) _, _ = rand.Read(key) @@ -204,7 +193,6 @@ func TestCryptoSigil_NewChaChaPolySigil_CustomObfuscatorNil_Good(t *testing.T) { s, err := NewChaChaPolySigil(key, nil) require.NoError(t, err) - // Falls back to default XORObfuscator. assert.IsType(t, &XORObfuscator{}, s.Obfuscator) } @@ -213,8 +201,6 @@ func TestCryptoSigil_NewChaChaPolySigil_CustomObfuscator_InvalidKey_Bad(t *testi assert.ErrorIs(t, err, InvalidKeyError) } -// ── ChaChaPolySigil In/Out (encrypt/decrypt) ─────────────────────── - func TestCryptoSigil_ChaChaPolySigil_RoundTrip_Good(t *testing.T) { key := make([]byte, 32) _, _ = rand.Read(key) @@ -226,7 +212,7 @@ func TestCryptoSigil_ChaChaPolySigil_RoundTrip_Good(t *testing.T) { ciphertext, err := s.In(plaintext) require.NoError(t, err) assert.NotEqual(t, plaintext, ciphertext) - assert.Greater(t, len(ciphertext), len(plaintext)) // nonce + tag overhead + assert.Greater(t, len(ciphertext), len(plaintext)) decrypted, err := s.Out(ciphertext) require.NoError(t, err) @@ -274,7 +260,7 @@ func TestCryptoSigil_ChaChaPolySigil_EmptyPlaintext_Good(t *testing.T) { ciphertext, err := s.In([]byte{}) require.NoError(t, err) - assert.NotEmpty(t, ciphertext) // Has nonce + tag even for empty plaintext. + assert.NotEmpty(t, ciphertext) decrypted, err := s.Out(ciphertext) require.NoError(t, err) @@ -292,7 +278,6 @@ func TestCryptoSigil_ChaChaPolySigil_DifferentCiphertextsPerCall_Good(t *testing ct1, _ := s.In(plaintext) ct2, _ := s.In(plaintext) - // Different nonces → different ciphertexts. assert.NotEqual(t, ct1, ct2) } @@ -338,14 +323,12 @@ func TestCryptoSigil_ChaChaPolySigil_TamperedCiphertext_Bad(t *testing.T) { s, _ := NewChaChaPolySigil(key, nil) ciphertext, _ := s.In([]byte("authentic data")) - // Flip a bit in the ciphertext body (after nonce). ciphertext[30] ^= 0xFF _, err := s.Out(ciphertext) assert.ErrorIs(t, err, DecryptionFailedError) } -// failReader returns an error on read — for testing nonce generation failure. type failReader struct{} func (f *failReader) Read([]byte) (int, error) { @@ -363,14 +346,12 @@ func TestCryptoSigil_ChaChaPolySigil_RandomReaderFailure_Bad(t *testing.T) { assert.Error(t, err) } -// ── ChaChaPolySigil without obfuscator ───────────────────────────── - func TestCryptoSigil_ChaChaPolySigil_NoObfuscator_Good(t *testing.T) { key := make([]byte, 32) _, _ = rand.Read(key) s, _ := NewChaChaPolySigil(key, nil) - s.Obfuscator = nil // Disable pre-obfuscation. + s.Obfuscator = nil plaintext := []byte("raw encryption without pre-obfuscation") ciphertext, err := s.In(plaintext) @@ -381,8 +362,6 @@ func TestCryptoSigil_ChaChaPolySigil_NoObfuscator_Good(t *testing.T) { assert.Equal(t, plaintext, decrypted) } -// ── GetNonceFromCiphertext ───────────────────────────────────────── - func TestCryptoSigil_GetNonceFromCiphertext_Good(t *testing.T) { key := make([]byte, 32) _, _ = rand.Read(key) @@ -392,9 +371,8 @@ func TestCryptoSigil_GetNonceFromCiphertext_Good(t *testing.T) { nonce, err := GetNonceFromCiphertext(ciphertext) require.NoError(t, err) - assert.Len(t, nonce, 24) // XChaCha20 nonce is 24 bytes. + assert.Len(t, nonce, 24) - // Nonce should match the prefix of the ciphertext. assert.Equal(t, ciphertext[:24], nonce) } @@ -409,7 +387,6 @@ func TestCryptoSigil_GetNonceFromCiphertext_NonceCopied_Good(t *testing.T) { original := make([]byte, len(nonce)) copy(original, nonce) - // Mutating the nonce should not affect the ciphertext. nonce[0] ^= 0xFF assert.Equal(t, original, ciphertext[:24]) } @@ -424,8 +401,6 @@ func TestCryptoSigil_GetNonceFromCiphertext_Empty_Bad(t *testing.T) { assert.ErrorIs(t, err, CiphertextTooShortError) } -// ── ChaChaPolySigil in Transmute pipeline ────────────────────────── - func TestCryptoSigil_ChaChaPolySigil_InTransmutePipeline_Good(t *testing.T) { key := make([]byte, 32) _, _ = rand.Read(key) @@ -439,7 +414,6 @@ func TestCryptoSigil_ChaChaPolySigil_InTransmutePipeline_Good(t *testing.T) { encoded, err := Transmute(plaintext, chain) require.NoError(t, err) - // Result should be hex-encoded ciphertext. assert.True(t, isHex(encoded)) decoded, err := Untransmute(encoded, chain) @@ -456,8 +430,6 @@ func isHex(data []byte) bool { return len(data) > 0 } -// ── Transmute error propagation ──────────────────────────────────── - type failSigil struct{} func (f *failSigil) In([]byte) ([]byte, error) { return nil, core.NewError("fail in") } @@ -475,24 +447,18 @@ func TestCryptoSigil_Untransmute_ErrorPropagation_Bad(t *testing.T) { assert.Contains(t, err.Error(), "fail out") } -// ── GzipSigil with custom output writer (edge case) ─────────────── - func TestCryptoSigil_GzipSigil_CustomOutputWriter_Good(t *testing.T) { var buf bytes.Buffer s := &GzipSigil{outputWriter: &buf} - // With a custom output writer, compressed data goes to buf, returned bytes will be empty - // because the internal buffer 'b' is unused when s.outputWriter is set. _, err := s.In([]byte("test data")) require.NoError(t, err) assert.Greater(t, buf.Len(), 0) } -// ── deriveKeyStream edge: exactly 32 bytes ───────────────────────── - func TestCryptoSigil_DeriveKeyStream_ExactBlockSize_Good(t *testing.T) { ob := &XORObfuscator{} - data := make([]byte, 32) // Exactly one SHA-256 block. + data := make([]byte, 32) for i := range data { data[i] = byte(i) } @@ -503,14 +469,12 @@ func TestCryptoSigil_DeriveKeyStream_ExactBlockSize_Good(t *testing.T) { assert.Equal(t, data, restored) } -// ── random reader fallback in In ─────────────────────────────────── - func TestCryptoSigil_ChaChaPolySigil_NilRandomReader_Good(t *testing.T) { key := make([]byte, 32) _, _ = rand.Read(key) s, _ := NewChaChaPolySigil(key, nil) - s.randomReader = nil // Should fall back to crypto/rand.Reader. + s.randomReader = nil ciphertext, err := s.In([]byte("fallback reader")) require.NoError(t, err) @@ -520,7 +484,6 @@ func TestCryptoSigil_ChaChaPolySigil_NilRandomReader_Good(t *testing.T) { assert.Equal(t, []byte("fallback reader"), decrypted) } -// limitReader returns exactly N bytes then EOF — for deterministic tests. type limitReader struct { data []byte pos int diff --git a/sigil/sigil_test.go b/sigil/sigil_test.go index eaa6675..8534a4a 100644 --- a/sigil/sigil_test.go +++ b/sigil/sigil_test.go @@ -13,10 +13,6 @@ import ( "github.com/stretchr/testify/require" ) -// --------------------------------------------------------------------------- -// ReverseSigil -// --------------------------------------------------------------------------- - func TestSigil_ReverseSigil_Good(t *testing.T) { s := &ReverseSigil{} @@ -24,7 +20,6 @@ func TestSigil_ReverseSigil_Good(t *testing.T) { require.NoError(t, err) assert.Equal(t, []byte("olleh"), out) - // Symmetric: Out does the same thing. restored, err := s.Out(out) require.NoError(t, err) assert.Equal(t, []byte("hello"), restored) @@ -33,7 +28,6 @@ func TestSigil_ReverseSigil_Good(t *testing.T) { func TestSigil_ReverseSigil_Bad(t *testing.T) { s := &ReverseSigil{} - // Empty input returns empty. out, err := s.In([]byte{}) require.NoError(t, err) assert.Equal(t, []byte{}, out) @@ -42,7 +36,6 @@ func TestSigil_ReverseSigil_Bad(t *testing.T) { func TestSigil_ReverseSigil_NilInput_Good(t *testing.T) { s := &ReverseSigil{} - // Nil input returns nil. out, err := s.In(nil) require.NoError(t, err) assert.Nil(t, out) @@ -52,10 +45,6 @@ func TestSigil_ReverseSigil_NilInput_Good(t *testing.T) { assert.Nil(t, out) } -// --------------------------------------------------------------------------- -// HexSigil -// --------------------------------------------------------------------------- - func TestSigil_HexSigil_Good(t *testing.T) { s := &HexSigil{} data := []byte("hello world") @@ -72,11 +61,9 @@ func TestSigil_HexSigil_Good(t *testing.T) { func TestSigil_HexSigil_Bad(t *testing.T) { s := &HexSigil{} - // Invalid hex input. _, err := s.Out([]byte("zzzz")) assert.Error(t, err) - // Empty input. out, err := s.In([]byte{}) require.NoError(t, err) assert.Equal(t, []byte{}, out) @@ -94,10 +81,6 @@ func TestSigil_HexSigil_NilInput_Good(t *testing.T) { assert.Nil(t, out) } -// --------------------------------------------------------------------------- -// Base64Sigil -// --------------------------------------------------------------------------- - func TestSigil_Base64Sigil_Good(t *testing.T) { s := &Base64Sigil{} data := []byte("composable transforms") @@ -114,11 +97,9 @@ func TestSigil_Base64Sigil_Good(t *testing.T) { func TestSigil_Base64Sigil_Bad(t *testing.T) { s := &Base64Sigil{} - // Invalid base64 (wrong padding). _, err := s.Out([]byte("!!!")) assert.Error(t, err) - // Empty input. out, err := s.In([]byte{}) require.NoError(t, err) assert.Equal(t, []byte{}, out) @@ -136,10 +117,6 @@ func TestSigil_Base64Sigil_NilInput_Good(t *testing.T) { assert.Nil(t, out) } -// --------------------------------------------------------------------------- -// GzipSigil -// --------------------------------------------------------------------------- - func TestSigil_GzipSigil_Good(t *testing.T) { s := &GzipSigil{} data := []byte("the quick brown fox jumps over the lazy dog") @@ -156,14 +133,12 @@ func TestSigil_GzipSigil_Good(t *testing.T) { func TestSigil_GzipSigil_Bad(t *testing.T) { s := &GzipSigil{} - // Invalid gzip data. _, err := s.Out([]byte("not gzip")) assert.Error(t, err) - // Empty input compresses to a valid gzip stream. compressed, err := s.In([]byte{}) require.NoError(t, err) - assert.NotEmpty(t, compressed) // gzip header is always present + assert.NotEmpty(t, compressed) decompressed, err := s.Out(compressed) require.NoError(t, err) @@ -182,10 +157,6 @@ func TestSigil_GzipSigil_NilInput_Good(t *testing.T) { assert.Nil(t, out) } -// --------------------------------------------------------------------------- -// JSONSigil -// --------------------------------------------------------------------------- - func TestSigil_JSONSigil_Good(t *testing.T) { s := &JSONSigil{Indent: false} data := []byte(`{ "key" : "value" }`) @@ -194,7 +165,6 @@ func TestSigil_JSONSigil_Good(t *testing.T) { require.NoError(t, err) assert.Equal(t, []byte(`{"key":"value"}`), compacted) - // Out is passthrough. passthrough, err := s.Out(compacted) require.NoError(t, err) assert.Equal(t, compacted, passthrough) @@ -213,7 +183,6 @@ func TestSigil_JSONSigil_Indent_Good(t *testing.T) { func TestSigil_JSONSigil_Bad(t *testing.T) { s := &JSONSigil{Indent: false} - // Invalid JSON. _, err := s.In([]byte("not json")) assert.Error(t, err) } @@ -221,21 +190,15 @@ func TestSigil_JSONSigil_Bad(t *testing.T) { func TestSigil_JSONSigil_NilInput_Good(t *testing.T) { s := &JSONSigil{Indent: false} - // Nil input is passed through without error, matching the Sigil contract. out, err := s.In(nil) require.NoError(t, err) assert.Nil(t, out) - // Out with nil is passthrough. out, err = s.Out(nil) require.NoError(t, err) assert.Nil(t, out) } -// --------------------------------------------------------------------------- -// HashSigil -// --------------------------------------------------------------------------- - func TestSigil_HashSigil_Good(t *testing.T) { data := []byte("hash me") @@ -273,7 +236,6 @@ func TestSigil_HashSigil_Good(t *testing.T) { require.NoError(t, err) assert.Len(t, hashed, tt.size) - // Out is passthrough. passthrough, err := s.Out(hashed) require.NoError(t, err) assert.Equal(t, hashed, passthrough) @@ -282,7 +244,6 @@ func TestSigil_HashSigil_Good(t *testing.T) { } func TestSigil_HashSigil_Bad(t *testing.T) { - // Unsupported hash constant. s := &HashSigil{Hash: 0} _, err := s.In([]byte("data")) assert.Error(t, err) @@ -290,7 +251,6 @@ func TestSigil_HashSigil_Bad(t *testing.T) { } func TestSigil_HashSigil_EmptyInput_Good(t *testing.T) { - // Hashing empty data should still produce a valid digest. s, err := NewSigil("sha256") require.NoError(t, err) @@ -299,10 +259,6 @@ func TestSigil_HashSigil_EmptyInput_Good(t *testing.T) { assert.Len(t, hashed, sha256.Size) } -// --------------------------------------------------------------------------- -// NewSigil factory -// --------------------------------------------------------------------------- - func TestSigil_NewSigil_Good(t *testing.T) { names := []string{ "reverse", "hex", "base64", "gzip", "json", "json-indent", @@ -333,10 +289,6 @@ func TestSigil_NewSigil_EmptyName_Bad(t *testing.T) { assert.Error(t, err) } -// --------------------------------------------------------------------------- -// Transmute / Untransmute -// --------------------------------------------------------------------------- - func TestSigil_Transmute_Good(t *testing.T) { data := []byte("round trip") @@ -395,16 +347,13 @@ func TestSigil_Transmute_GzipRoundTrip_Good(t *testing.T) { } func TestSigil_Transmute_Bad(t *testing.T) { - // Transmute with a sigil that will fail: hex decode on non-hex input. hexSigil := &HexSigil{} - // Calling Out (decode) with invalid input via manual chain. _, err := Untransmute([]byte("not-hex!!"), []Sigil{hexSigil}) assert.Error(t, err) } func TestSigil_Transmute_NilAndEmptyInput_Good(t *testing.T) { - // Empty sigil chain is a no-op. data := []byte("unchanged") result, err := Transmute(data, nil) @@ -415,7 +364,6 @@ func TestSigil_Transmute_NilAndEmptyInput_Good(t *testing.T) { require.NoError(t, err) assert.Equal(t, data, result) - // Nil data through a chain. hexSigil, _ := NewSigil("hex") result, err = Transmute(nil, []Sigil{hexSigil}) require.NoError(t, err) diff --git a/sqlite/sqlite_test.go b/sqlite/sqlite_test.go index dbe4f30..305b774 100644 --- a/sqlite/sqlite_test.go +++ b/sqlite/sqlite_test.go @@ -18,8 +18,6 @@ func newTestMedium(t *testing.T) *Medium { return m } -// --- Constructor Tests --- - func TestSqlite_New_Good(t *testing.T) { m, err := New(Options{Path: ":memory:"}) require.NoError(t, err) @@ -40,8 +38,6 @@ func TestSqlite_New_EmptyPath_Bad(t *testing.T) { assert.Contains(t, err.Error(), "database path is required") } -// --- Read/Write Tests --- - func TestSqlite_ReadWrite_Good(t *testing.T) { m := newTestMedium(t) @@ -104,8 +100,6 @@ func TestSqlite_Read_IsDirectory_Bad(t *testing.T) { assert.Error(t, err) } -// --- EnsureDir Tests --- - func TestSqlite_EnsureDir_Good(t *testing.T) { m := newTestMedium(t) @@ -128,8 +122,6 @@ func TestSqlite_EnsureDir_Idempotent_Good(t *testing.T) { assert.True(t, m.IsDir("mydir")) } -// --- IsFile Tests --- - func TestSqlite_IsFile_Good(t *testing.T) { m := newTestMedium(t) @@ -142,8 +134,6 @@ func TestSqlite_IsFile_Good(t *testing.T) { assert.False(t, m.IsFile("")) } -// --- FileGet/FileSet Tests --- - func TestSqlite_FileGetFileSet_Good(t *testing.T) { m := newTestMedium(t) @@ -155,8 +145,6 @@ func TestSqlite_FileGetFileSet_Good(t *testing.T) { assert.Equal(t, "value", val) } -// --- Delete Tests --- - func TestSqlite_Delete_Good(t *testing.T) { m := newTestMedium(t) @@ -203,8 +191,6 @@ func TestSqlite_Delete_NotEmpty_Bad(t *testing.T) { assert.Error(t, err) } -// --- DeleteAll Tests --- - func TestSqlite_DeleteAll_Good(t *testing.T) { m := newTestMedium(t) @@ -244,8 +230,6 @@ func TestSqlite_DeleteAll_EmptyPath_Bad(t *testing.T) { assert.Error(t, err) } -// --- Rename Tests --- - func TestSqlite_Rename_Good(t *testing.T) { m := newTestMedium(t) @@ -298,8 +282,6 @@ func TestSqlite_Rename_EmptyPath_Bad(t *testing.T) { assert.Error(t, err) } -// --- List Tests --- - func TestSqlite_List_Good(t *testing.T) { m := newTestMedium(t) @@ -356,8 +338,6 @@ func TestSqlite_List_DirectoryEntry_Good(t *testing.T) { assert.True(t, info.IsDir()) } -// --- Stat Tests --- - func TestSqlite_Stat_Good(t *testing.T) { m := newTestMedium(t) @@ -395,8 +375,6 @@ func TestSqlite_Stat_EmptyPath_Bad(t *testing.T) { assert.Error(t, err) } -// --- Open Tests --- - func TestSqlite_Open_Good(t *testing.T) { m := newTestMedium(t) @@ -430,8 +408,6 @@ func TestSqlite_Open_IsDirectory_Bad(t *testing.T) { assert.Error(t, err) } -// --- Create Tests --- - func TestSqlite_Create_Good(t *testing.T) { m := newTestMedium(t) @@ -473,8 +449,6 @@ func TestSqlite_Create_EmptyPath_Bad(t *testing.T) { assert.Error(t, err) } -// --- Append Tests --- - func TestSqlite_Append_Good(t *testing.T) { m := newTestMedium(t) @@ -514,8 +488,6 @@ func TestSqlite_Append_EmptyPath_Bad(t *testing.T) { assert.Error(t, err) } -// --- ReadStream Tests --- - func TestSqlite_ReadStream_Good(t *testing.T) { m := newTestMedium(t) @@ -545,8 +517,6 @@ func TestSqlite_ReadStream_IsDirectory_Bad(t *testing.T) { assert.Error(t, err) } -// --- WriteStream Tests --- - func TestSqlite_WriteStream_Good(t *testing.T) { m := newTestMedium(t) @@ -562,8 +532,6 @@ func TestSqlite_WriteStream_Good(t *testing.T) { assert.Equal(t, "piped data", content) } -// --- Exists Tests --- - func TestSqlite_Exists_Good(t *testing.T) { m := newTestMedium(t) @@ -581,8 +549,6 @@ func TestSqlite_Exists_EmptyPath_Good(t *testing.T) { assert.True(t, m.Exists("")) } -// --- IsDir Tests --- - func TestSqlite_IsDir_Good(t *testing.T) { m := newTestMedium(t) @@ -595,8 +561,6 @@ func TestSqlite_IsDir_Good(t *testing.T) { assert.False(t, m.IsDir("")) } -// --- normaliseEntryPath Tests --- - func TestSqlite_NormaliseEntryPath_Good(t *testing.T) { assert.Equal(t, "file.txt", normaliseEntryPath("file.txt")) assert.Equal(t, "dir/file.txt", normaliseEntryPath("dir/file.txt")) @@ -608,12 +572,9 @@ func TestSqlite_NormaliseEntryPath_Good(t *testing.T) { assert.Equal(t, "", normaliseEntryPath("/")) } -// --- Interface Compliance --- - func TestSqlite_InterfaceCompliance(t *testing.T) { m := newTestMedium(t) - // Verify all methods exist by asserting the interface shape. var _ interface { Read(string) (string, error) Write(string, string) error @@ -636,8 +597,6 @@ func TestSqlite_InterfaceCompliance(t *testing.T) { } = m } -// --- Custom Table --- - func TestSqlite_CustomTable_Good(t *testing.T) { m, err := New(Options{Path: ":memory:", Table: "my_files"}) require.NoError(t, err) diff --git a/store/medium_test.go b/store/medium_test.go index 786ad29..e5d9410 100644 --- a/store/medium_test.go +++ b/store/medium_test.go @@ -121,12 +121,10 @@ func TestMedium_Medium_Stat_Good(t *testing.T) { m := newTestMedium(t) _ = m.Write("grp/key", "hello") - // Stat group info, err := m.Stat("grp") require.NoError(t, err) assert.True(t, info.IsDir()) - // Stat key info, err = m.Stat("grp/key") require.NoError(t, err) assert.Equal(t, int64(5), info.Size()) @@ -190,7 +188,6 @@ func TestMedium_Medium_AsMedium_Good(t *testing.T) { m := s.AsMedium() require.NoError(t, m.Write("grp/key", "val")) - // Accessible through both APIs val, err := s.Get("grp", "key") require.NoError(t, err) assert.Equal(t, "val", val) diff --git a/workspace/service.go b/workspace/service.go index f094deb..4a873c4 100644 --- a/workspace/service.go +++ b/workspace/service.go @@ -193,7 +193,7 @@ func (service *Service) HandleWorkspaceCommand(command WorkspaceCommand) core.Re // Example: result := service.HandleWorkspaceMessage(core.New(), WorkspaceCommand{Action: WorkspaceSwitchAction, WorkspaceID: "f3f0d7"}) // Example: legacy := service.HandleWorkspaceMessage(core.New(), map[string]any{"action": WorkspaceCreateAction, "identifier": "alice", "password": "pass123"}) -func (service *Service) HandleWorkspaceMessage(coreRuntime *core.Core, message core.Message) core.Result { +func (service *Service) HandleWorkspaceMessage(_ *core.Core, message core.Message) core.Result { command, ok := workspaceCommandFromMessage(message) if !ok { return core.Result{OK: true} From 619f731e5e8bf8b1dcc4a304dd4ed98b3a5b76e1 Mon Sep 17 00:00:00 2001 From: Virgil Date: Tue, 31 Mar 2026 05:36:25 +0000 Subject: [PATCH 44/83] refactor(ax): align remaining semantic names Co-Authored-By: Virgil --- datanode/medium_test.go | 56 +++++++-------- local/medium_test.go | 60 ++++++++-------- medium_test.go | 58 +++++++-------- store/medium.go | 2 +- store/medium_test.go | 156 ++++++++++++++++++++-------------------- store/store.go | 26 +++---- store/store_test.go | 20 +++--- 7 files changed, 189 insertions(+), 189 deletions(-) diff --git a/datanode/medium_test.go b/datanode/medium_test.go index 8d8a5b5..b238e7d 100644 --- a/datanode/medium_test.go +++ b/datanode/medium_test.go @@ -13,7 +13,7 @@ import ( var _ coreio.Medium = (*Medium)(nil) -func TestClient_ReadWrite_Good(t *testing.T) { +func TestDataNode_ReadWrite_Good(t *testing.T) { m := New() err := m.Write("hello.txt", "world") @@ -24,7 +24,7 @@ func TestClient_ReadWrite_Good(t *testing.T) { assert.Equal(t, "world", got) } -func TestClient_ReadWrite_Bad(t *testing.T) { +func TestDataNode_ReadWrite_Bad(t *testing.T) { m := New() _, err := m.Read("missing.txt") @@ -34,7 +34,7 @@ func TestClient_ReadWrite_Bad(t *testing.T) { assert.Error(t, err) } -func TestClient_NestedPaths_Good(t *testing.T) { +func TestDataNode_NestedPaths_Good(t *testing.T) { m := New() require.NoError(t, m.Write("a/b/c/deep.txt", "deep")) @@ -48,7 +48,7 @@ func TestClient_NestedPaths_Good(t *testing.T) { assert.True(t, m.IsDir("a/b/c")) } -func TestClient_LeadingSlash_Good(t *testing.T) { +func TestDataNode_LeadingSlash_Good(t *testing.T) { m := New() require.NoError(t, m.Write("/leading/file.txt", "stripped")) @@ -61,7 +61,7 @@ func TestClient_LeadingSlash_Good(t *testing.T) { assert.Equal(t, "stripped", got) } -func TestClient_IsFile_Good(t *testing.T) { +func TestDataNode_IsFile_Good(t *testing.T) { m := New() require.NoError(t, m.Write("file.go", "package main")) @@ -71,7 +71,7 @@ func TestClient_IsFile_Good(t *testing.T) { assert.False(t, m.IsFile("")) } -func TestClient_EnsureDir_Good(t *testing.T) { +func TestDataNode_EnsureDir_Good(t *testing.T) { m := New() require.NoError(t, m.EnsureDir("foo/bar/baz")) @@ -82,7 +82,7 @@ func TestClient_EnsureDir_Good(t *testing.T) { assert.True(t, m.Exists("foo/bar/baz")) } -func TestClient_Delete_Good(t *testing.T) { +func TestDataNode_Delete_Good(t *testing.T) { m := New() require.NoError(t, m.Write("delete-me.txt", "bye")) @@ -92,7 +92,7 @@ func TestClient_Delete_Good(t *testing.T) { assert.False(t, m.Exists("delete-me.txt")) } -func TestClient_Delete_Bad(t *testing.T) { +func TestDataNode_Delete_Bad(t *testing.T) { medium := New() assert.Error(t, medium.Delete("ghost.txt")) @@ -101,7 +101,7 @@ func TestClient_Delete_Bad(t *testing.T) { assert.Error(t, medium.Delete("dir")) } -func TestClient_Delete_DirectoryInspectionFailure_Bad(t *testing.T) { +func TestDataNode_Delete_DirectoryInspectionFailure_Bad(t *testing.T) { m := New() require.NoError(t, m.Write("dir/file.txt", "content")) @@ -118,7 +118,7 @@ func TestClient_Delete_DirectoryInspectionFailure_Bad(t *testing.T) { assert.Contains(t, err.Error(), "failed to inspect directory") } -func TestClient_DeleteAll_Good(t *testing.T) { +func TestDataNode_DeleteAll_Good(t *testing.T) { m := New() require.NoError(t, m.Write("tree/a.txt", "a")) @@ -132,7 +132,7 @@ func TestClient_DeleteAll_Good(t *testing.T) { assert.True(t, m.Exists("keep.txt")) } -func TestClient_DeleteAll_WalkFailure_Bad(t *testing.T) { +func TestDataNode_DeleteAll_WalkFailure_Bad(t *testing.T) { m := New() require.NoError(t, m.Write("tree/a.txt", "a")) @@ -149,7 +149,7 @@ func TestClient_DeleteAll_WalkFailure_Bad(t *testing.T) { assert.Contains(t, err.Error(), "failed to inspect tree") } -func TestClient_Delete_RemoveFailure_Bad(t *testing.T) { +func TestDataNode_Delete_RemoveFailure_Bad(t *testing.T) { m := New() require.NoError(t, m.Write("keep.txt", "keep")) require.NoError(t, m.Write("bad.txt", "bad")) @@ -167,7 +167,7 @@ func TestClient_Delete_RemoveFailure_Bad(t *testing.T) { assert.Contains(t, err.Error(), "failed to delete file") } -func TestClient_Rename_Good(t *testing.T) { +func TestDataNode_Rename_Good(t *testing.T) { m := New() require.NoError(t, m.Write("old.txt", "content")) @@ -179,7 +179,7 @@ func TestClient_Rename_Good(t *testing.T) { assert.Equal(t, "content", got) } -func TestClient_RenameDir_Good(t *testing.T) { +func TestDataNode_RenameDir_Good(t *testing.T) { m := New() require.NoError(t, m.Write("src/a.go", "package a")) @@ -198,7 +198,7 @@ func TestClient_RenameDir_Good(t *testing.T) { assert.Equal(t, "package b", got) } -func TestClient_RenameDir_ReadFailure_Bad(t *testing.T) { +func TestDataNode_RenameDir_ReadFailure_Bad(t *testing.T) { m := New() require.NoError(t, m.Write("src/a.go", "package a")) @@ -215,7 +215,7 @@ func TestClient_RenameDir_ReadFailure_Bad(t *testing.T) { assert.Contains(t, err.Error(), "failed to read source file") } -func TestClient_List_Good(t *testing.T) { +func TestDataNode_List_Good(t *testing.T) { m := New() require.NoError(t, m.Write("root.txt", "r")) @@ -244,7 +244,7 @@ func TestClient_List_Good(t *testing.T) { assert.Contains(t, names, "sub") } -func TestClient_Stat_Good(t *testing.T) { +func TestDataNode_Stat_Good(t *testing.T) { m := New() require.NoError(t, m.Write("stat.txt", "hello")) @@ -259,7 +259,7 @@ func TestClient_Stat_Good(t *testing.T) { assert.True(t, info.IsDir()) } -func TestClient_Open_Good(t *testing.T) { +func TestDataNode_Open_Good(t *testing.T) { m := New() require.NoError(t, m.Write("open.txt", "opened")) @@ -273,7 +273,7 @@ func TestClient_Open_Good(t *testing.T) { assert.Equal(t, "opened", string(data)) } -func TestClient_CreateAppend_Good(t *testing.T) { +func TestDataNode_CreateAppend_Good(t *testing.T) { m := New() w, err := m.Create("new.txt") @@ -295,7 +295,7 @@ func TestClient_CreateAppend_Good(t *testing.T) { assert.Equal(t, "hello world", got) } -func TestClient_Append_ReadFailure_Bad(t *testing.T) { +func TestDataNode_Append_ReadFailure_Bad(t *testing.T) { m := New() require.NoError(t, m.Write("new.txt", "hello")) @@ -312,7 +312,7 @@ func TestClient_Append_ReadFailure_Bad(t *testing.T) { assert.Contains(t, err.Error(), "failed to read existing content") } -func TestClient_Streams_Good(t *testing.T) { +func TestDataNode_Streams_Good(t *testing.T) { m := New() ws, err := m.WriteStream("stream.txt") @@ -328,7 +328,7 @@ func TestClient_Streams_Good(t *testing.T) { rs.Close() } -func TestClient_FileGetFileSet_Good(t *testing.T) { +func TestDataNode_FileGetFileSet_Good(t *testing.T) { m := New() require.NoError(t, m.FileSet("alias.txt", "via set")) @@ -338,7 +338,7 @@ func TestClient_FileGetFileSet_Good(t *testing.T) { assert.Equal(t, "via set", got) } -func TestClient_SnapshotRestore_Good(t *testing.T) { +func TestDataNode_SnapshotRestore_Good(t *testing.T) { m := New() require.NoError(t, m.Write("a.txt", "alpha")) @@ -360,7 +360,7 @@ func TestClient_SnapshotRestore_Good(t *testing.T) { assert.Equal(t, "charlie", got) } -func TestClient_Restore_Good(t *testing.T) { +func TestDataNode_Restore_Good(t *testing.T) { m := New() require.NoError(t, m.Write("original.txt", "before")) @@ -380,7 +380,7 @@ func TestClient_Restore_Good(t *testing.T) { assert.False(t, m.Exists("extra.txt")) } -func TestClient_DataNode_Good(t *testing.T) { +func TestDataNode_DataNode_Good(t *testing.T) { m := New() require.NoError(t, m.Write("test.txt", "borg")) @@ -397,7 +397,7 @@ func TestClient_DataNode_Good(t *testing.T) { assert.Equal(t, "borg", string(data)) } -func TestClient_Overwrite_Good(t *testing.T) { +func TestDataNode_Overwrite_Good(t *testing.T) { m := New() require.NoError(t, m.Write("file.txt", "v1")) @@ -408,7 +408,7 @@ func TestClient_Overwrite_Good(t *testing.T) { assert.Equal(t, "v2", got) } -func TestClient_Exists_Good(t *testing.T) { +func TestDataNode_Exists_Good(t *testing.T) { m := New() assert.True(t, m.Exists("")) @@ -418,7 +418,7 @@ func TestClient_Exists_Good(t *testing.T) { assert.True(t, m.Exists("x")) } -func TestClient_ReadExistingFile_Good(t *testing.T) { +func TestDataNode_ReadExistingFile_Good(t *testing.T) { m := New() require.NoError(t, m.Write("file.txt", "content")) diff --git a/local/medium_test.go b/local/medium_test.go index 9177576..53ddf4d 100644 --- a/local/medium_test.go +++ b/local/medium_test.go @@ -11,7 +11,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestClient_New_ResolvesRoot_Good(t *testing.T) { +func TestLocal_New_ResolvesRoot_Good(t *testing.T) { root := t.TempDir() m, err := New(root) assert.NoError(t, err) @@ -20,7 +20,7 @@ func TestClient_New_ResolvesRoot_Good(t *testing.T) { assert.Equal(t, resolved, m.filesystemRoot) } -func TestClient_Path_Sandboxed_Good(t *testing.T) { +func TestLocal_Path_Sandboxed_Good(t *testing.T) { m := &Medium{filesystemRoot: "/home/user"} assert.Equal(t, "/home/user/file.txt", m.sandboxedPath("file.txt")) @@ -34,7 +34,7 @@ func TestClient_Path_Sandboxed_Good(t *testing.T) { assert.Equal(t, "/home/user/etc/passwd", m.sandboxedPath("/etc/passwd")) } -func TestClient_Path_RootFilesystem_Good(t *testing.T) { +func TestLocal_Path_RootFilesystem_Good(t *testing.T) { m := &Medium{filesystemRoot: "/"} assert.Equal(t, "/etc/passwd", m.sandboxedPath("/etc/passwd")) @@ -44,7 +44,7 @@ func TestClient_Path_RootFilesystem_Good(t *testing.T) { assert.Equal(t, core.Path(cwd, "file.txt"), m.sandboxedPath("file.txt")) } -func TestClient_ReadWrite_Basic_Good(t *testing.T) { +func TestLocal_ReadWrite_Basic_Good(t *testing.T) { root := t.TempDir() m, _ := New(root) @@ -66,7 +66,7 @@ func TestClient_ReadWrite_Basic_Good(t *testing.T) { assert.Error(t, err) } -func TestClient_EnsureDir_Basic_Good(t *testing.T) { +func TestLocal_EnsureDir_Basic_Good(t *testing.T) { root := t.TempDir() m, _ := New(root) @@ -78,7 +78,7 @@ func TestClient_EnsureDir_Basic_Good(t *testing.T) { assert.True(t, info.IsDir()) } -func TestClient_IsDir_Basic_Good(t *testing.T) { +func TestLocal_IsDir_Basic_Good(t *testing.T) { root := t.TempDir() m, _ := New(root) @@ -91,7 +91,7 @@ func TestClient_IsDir_Basic_Good(t *testing.T) { assert.False(t, m.IsDir("")) } -func TestClient_IsFile_Basic_Good(t *testing.T) { +func TestLocal_IsFile_Basic_Good(t *testing.T) { root := t.TempDir() m, _ := New(root) @@ -104,7 +104,7 @@ func TestClient_IsFile_Basic_Good(t *testing.T) { assert.False(t, m.IsFile("")) } -func TestClient_Exists_Basic_Good(t *testing.T) { +func TestLocal_Exists_Basic_Good(t *testing.T) { root := t.TempDir() m, _ := New(root) @@ -114,7 +114,7 @@ func TestClient_Exists_Basic_Good(t *testing.T) { assert.False(t, m.Exists("nope")) } -func TestClient_List_Basic_Good(t *testing.T) { +func TestLocal_List_Basic_Good(t *testing.T) { root := t.TempDir() m, _ := New(root) @@ -127,7 +127,7 @@ func TestClient_List_Basic_Good(t *testing.T) { assert.Len(t, entries, 3) } -func TestClient_Stat_Basic_Good(t *testing.T) { +func TestLocal_Stat_Basic_Good(t *testing.T) { root := t.TempDir() m, _ := New(root) @@ -138,7 +138,7 @@ func TestClient_Stat_Basic_Good(t *testing.T) { assert.Equal(t, int64(7), info.Size()) } -func TestClient_Delete_Basic_Good(t *testing.T) { +func TestLocal_Delete_Basic_Good(t *testing.T) { root := t.TempDir() m, _ := New(root) @@ -150,7 +150,7 @@ func TestClient_Delete_Basic_Good(t *testing.T) { assert.False(t, m.Exists("todelete")) } -func TestClient_DeleteAll_Basic_Good(t *testing.T) { +func TestLocal_DeleteAll_Basic_Good(t *testing.T) { root := t.TempDir() m, _ := New(root) @@ -161,7 +161,7 @@ func TestClient_DeleteAll_Basic_Good(t *testing.T) { assert.False(t, m.Exists("dir")) } -func TestClient_Delete_ProtectedHomeViaSymlinkEnv_Bad(t *testing.T) { +func TestLocal_Delete_ProtectedHomeViaSymlinkEnv_Bad(t *testing.T) { realHome := t.TempDir() linkParent := t.TempDir() homeLink := core.Path(linkParent, "home-link") @@ -176,7 +176,7 @@ func TestClient_Delete_ProtectedHomeViaSymlinkEnv_Bad(t *testing.T) { assert.DirExists(t, realHome) } -func TestClient_DeleteAll_ProtectedHomeViaEnv_Bad(t *testing.T) { +func TestLocal_DeleteAll_ProtectedHomeViaEnv_Bad(t *testing.T) { tempHome := t.TempDir() t.Setenv("HOME", tempHome) @@ -188,7 +188,7 @@ func TestClient_DeleteAll_ProtectedHomeViaEnv_Bad(t *testing.T) { assert.DirExists(t, tempHome) } -func TestClient_Rename_Basic_Good(t *testing.T) { +func TestLocal_Rename_Basic_Good(t *testing.T) { root := t.TempDir() m, _ := New(root) @@ -200,7 +200,7 @@ func TestClient_Rename_Basic_Good(t *testing.T) { assert.True(t, m.Exists("new")) } -func TestClient_FileGetFileSet_Basic_Good(t *testing.T) { +func TestLocal_FileGetFileSet_Basic_Good(t *testing.T) { root := t.TempDir() m, _ := New(root) @@ -212,7 +212,7 @@ func TestClient_FileGetFileSet_Basic_Good(t *testing.T) { assert.Equal(t, "value", val) } -func TestClient_Delete_Good(t *testing.T) { +func TestLocal_Delete_Good(t *testing.T) { testRoot := t.TempDir() medium, err := New(testRoot) @@ -233,7 +233,7 @@ func TestClient_Delete_Good(t *testing.T) { assert.False(t, medium.IsDir("emptydir")) } -func TestClient_Delete_NotEmpty_Bad(t *testing.T) { +func TestLocal_Delete_NotEmpty_Bad(t *testing.T) { testRoot := t.TempDir() medium, err := New(testRoot) @@ -246,7 +246,7 @@ func TestClient_Delete_NotEmpty_Bad(t *testing.T) { assert.Error(t, err) } -func TestClient_DeleteAll_Good(t *testing.T) { +func TestLocal_DeleteAll_Good(t *testing.T) { testRoot := t.TempDir() medium, err := New(testRoot) @@ -264,7 +264,7 @@ func TestClient_DeleteAll_Good(t *testing.T) { assert.False(t, medium.Exists("mydir/subdir/file2.txt")) } -func TestClient_Rename_Good(t *testing.T) { +func TestLocal_Rename_Good(t *testing.T) { testRoot := t.TempDir() medium, err := New(testRoot) @@ -282,7 +282,7 @@ func TestClient_Rename_Good(t *testing.T) { assert.Equal(t, "content", content) } -func TestClient_Rename_TraversalSanitised_Good(t *testing.T) { +func TestLocal_Rename_TraversalSanitised_Good(t *testing.T) { testRoot := t.TempDir() medium, err := New(testRoot) @@ -297,7 +297,7 @@ func TestClient_Rename_TraversalSanitised_Good(t *testing.T) { assert.True(t, medium.Exists("escaped.txt")) } -func TestClient_List_Good(t *testing.T) { +func TestLocal_List_Good(t *testing.T) { testRoot := t.TempDir() medium, err := New(testRoot) @@ -323,7 +323,7 @@ func TestClient_List_Good(t *testing.T) { assert.True(t, names["subdir"]) } -func TestClient_Stat_Good(t *testing.T) { +func TestLocal_Stat_Good(t *testing.T) { testRoot := t.TempDir() medium, err := New(testRoot) @@ -345,7 +345,7 @@ func TestClient_Stat_Good(t *testing.T) { assert.True(t, info.IsDir()) } -func TestClient_Exists_Good(t *testing.T) { +func TestLocal_Exists_Good(t *testing.T) { testRoot := t.TempDir() medium, err := New(testRoot) @@ -362,7 +362,7 @@ func TestClient_Exists_Good(t *testing.T) { assert.True(t, medium.Exists("mydir")) } -func TestClient_IsDir_Good(t *testing.T) { +func TestLocal_IsDir_Good(t *testing.T) { testRoot := t.TempDir() medium, err := New(testRoot) @@ -379,7 +379,7 @@ func TestClient_IsDir_Good(t *testing.T) { assert.False(t, medium.IsDir("nonexistent")) } -func TestClient_ReadStream_Basic_Good(t *testing.T) { +func TestLocal_ReadStream_Basic_Good(t *testing.T) { root := t.TempDir() m, _ := New(root) @@ -397,7 +397,7 @@ func TestClient_ReadStream_Basic_Good(t *testing.T) { assert.Equal(t, "streaming", string(data)) } -func TestClient_WriteStream_Basic_Good(t *testing.T) { +func TestLocal_WriteStream_Basic_Good(t *testing.T) { root := t.TempDir() m, _ := New(root) @@ -414,7 +414,7 @@ func TestClient_WriteStream_Basic_Good(t *testing.T) { assert.Equal(t, "piped data", content) } -func TestClient_Path_TraversalSandbox_Good(t *testing.T) { +func TestLocal_Path_TraversalSandbox_Good(t *testing.T) { m := &Medium{filesystemRoot: "/sandbox"} assert.Equal(t, "/sandbox/file.txt", m.sandboxedPath("../../../file.txt")) @@ -426,7 +426,7 @@ func TestClient_Path_TraversalSandbox_Good(t *testing.T) { assert.Equal(t, "/sandbox/file\x00.txt", m.sandboxedPath("file\x00.txt")) } -func TestClient_ValidatePath_SymlinkEscape_Bad(t *testing.T) { +func TestLocal_ValidatePath_SymlinkEscape_Bad(t *testing.T) { root := t.TempDir() m, err := New(root) assert.NoError(t, err) @@ -461,7 +461,7 @@ func TestClient_ValidatePath_SymlinkEscape_Bad(t *testing.T) { assert.ErrorIs(t, err, fs.ErrPermission) } -func TestClient_EmptyPaths_Good(t *testing.T) { +func TestLocal_EmptyPaths_Good(t *testing.T) { root := t.TempDir() m, err := New(root) assert.NoError(t, err) diff --git a/medium_test.go b/medium_test.go index 9c36152..bb57ed0 100644 --- a/medium_test.go +++ b/medium_test.go @@ -44,7 +44,7 @@ func TestMemoryMedium_NewDirEntry_Good(t *testing.T) { assert.Equal(t, int64(8), entryInfo.Size()) } -func TestClient_MockMedium_Read_Good(t *testing.T) { +func TestMockMedium_Read_Good(t *testing.T) { m := NewMockMedium() m.files["test.txt"] = "hello world" content, err := m.Read("test.txt") @@ -52,13 +52,13 @@ func TestClient_MockMedium_Read_Good(t *testing.T) { assert.Equal(t, "hello world", content) } -func TestClient_MockMedium_Read_Bad(t *testing.T) { +func TestMockMedium_Read_Bad(t *testing.T) { m := NewMockMedium() _, err := m.Read("nonexistent.txt") assert.Error(t, err) } -func TestClient_MockMedium_Write_Good(t *testing.T) { +func TestMockMedium_Write_Good(t *testing.T) { m := NewMockMedium() err := m.Write("test.txt", "content") assert.NoError(t, err) @@ -69,7 +69,7 @@ func TestClient_MockMedium_Write_Good(t *testing.T) { assert.Equal(t, "new content", m.files["test.txt"]) } -func TestClient_MockMedium_WriteMode_Good(t *testing.T) { +func TestMockMedium_WriteMode_Good(t *testing.T) { m := NewMockMedium() err := m.WriteMode("secure.txt", "secret", 0600) @@ -80,14 +80,14 @@ func TestClient_MockMedium_WriteMode_Good(t *testing.T) { assert.Equal(t, "secret", content) } -func TestClient_MockMedium_EnsureDir_Good(t *testing.T) { +func TestMockMedium_EnsureDir_Good(t *testing.T) { m := NewMockMedium() err := m.EnsureDir("/path/to/dir") assert.NoError(t, err) assert.True(t, m.dirs["/path/to/dir"]) } -func TestClient_MockMedium_IsFile_Good(t *testing.T) { +func TestMockMedium_IsFile_Good(t *testing.T) { m := NewMockMedium() m.files["exists.txt"] = "content" @@ -95,7 +95,7 @@ func TestClient_MockMedium_IsFile_Good(t *testing.T) { assert.False(t, m.IsFile("nonexistent.txt")) } -func TestClient_MockMedium_FileGet_Good(t *testing.T) { +func TestMockMedium_FileGet_Good(t *testing.T) { m := NewMockMedium() m.files["test.txt"] = "content" content, err := m.FileGet("test.txt") @@ -103,14 +103,14 @@ func TestClient_MockMedium_FileGet_Good(t *testing.T) { assert.Equal(t, "content", content) } -func TestClient_MockMedium_FileSet_Good(t *testing.T) { +func TestMockMedium_FileSet_Good(t *testing.T) { m := NewMockMedium() err := m.FileSet("test.txt", "content") assert.NoError(t, err) assert.Equal(t, "content", m.files["test.txt"]) } -func TestClient_MockMedium_Delete_Good(t *testing.T) { +func TestMockMedium_Delete_Good(t *testing.T) { m := NewMockMedium() m.files["test.txt"] = "content" @@ -119,13 +119,13 @@ func TestClient_MockMedium_Delete_Good(t *testing.T) { assert.False(t, m.IsFile("test.txt")) } -func TestClient_MockMedium_Delete_NotFound_Bad(t *testing.T) { +func TestMockMedium_Delete_NotFound_Bad(t *testing.T) { m := NewMockMedium() err := m.Delete("nonexistent.txt") assert.Error(t, err) } -func TestClient_MockMedium_Delete_DirNotEmpty_Bad(t *testing.T) { +func TestMockMedium_Delete_DirNotEmpty_Bad(t *testing.T) { m := NewMockMedium() m.dirs["mydir"] = true m.files["mydir/file.txt"] = "content" @@ -134,7 +134,7 @@ func TestClient_MockMedium_Delete_DirNotEmpty_Bad(t *testing.T) { assert.Error(t, err) } -func TestClient_MockMedium_DeleteAll_Good(t *testing.T) { +func TestMockMedium_DeleteAll_Good(t *testing.T) { m := NewMockMedium() m.dirs["mydir"] = true m.dirs["mydir/subdir"] = true @@ -147,7 +147,7 @@ func TestClient_MockMedium_DeleteAll_Good(t *testing.T) { assert.Empty(t, m.files) } -func TestClient_MockMedium_Rename_Good(t *testing.T) { +func TestMockMedium_Rename_Good(t *testing.T) { m := NewMockMedium() m.files["old.txt"] = "content" @@ -158,7 +158,7 @@ func TestClient_MockMedium_Rename_Good(t *testing.T) { assert.Equal(t, "content", m.files["new.txt"]) } -func TestClient_MockMedium_Rename_Dir_Good(t *testing.T) { +func TestMockMedium_Rename_Dir_Good(t *testing.T) { m := NewMockMedium() m.dirs["olddir"] = true m.files["olddir/file.txt"] = "content" @@ -170,7 +170,7 @@ func TestClient_MockMedium_Rename_Dir_Good(t *testing.T) { assert.Equal(t, "content", m.files["newdir/file.txt"]) } -func TestClient_MockMedium_List_Good(t *testing.T) { +func TestMockMedium_List_Good(t *testing.T) { m := NewMockMedium() m.dirs["mydir"] = true m.files["mydir/file1.txt"] = "content1" @@ -190,7 +190,7 @@ func TestClient_MockMedium_List_Good(t *testing.T) { assert.True(t, names["subdir"]) } -func TestClient_MockMedium_Stat_Good(t *testing.T) { +func TestMockMedium_Stat_Good(t *testing.T) { m := NewMockMedium() m.files["test.txt"] = "hello world" @@ -201,7 +201,7 @@ func TestClient_MockMedium_Stat_Good(t *testing.T) { assert.False(t, info.IsDir()) } -func TestClient_MockMedium_Stat_Dir_Good(t *testing.T) { +func TestMockMedium_Stat_Dir_Good(t *testing.T) { m := NewMockMedium() m.dirs["mydir"] = true @@ -211,7 +211,7 @@ func TestClient_MockMedium_Stat_Dir_Good(t *testing.T) { assert.True(t, info.IsDir()) } -func TestClient_MockMedium_Exists_Good(t *testing.T) { +func TestMockMedium_Exists_Good(t *testing.T) { m := NewMockMedium() m.files["file.txt"] = "content" m.dirs["mydir"] = true @@ -221,7 +221,7 @@ func TestClient_MockMedium_Exists_Good(t *testing.T) { assert.False(t, m.Exists("nonexistent")) } -func TestClient_MockMedium_IsDir_Good(t *testing.T) { +func TestMockMedium_IsDir_Good(t *testing.T) { m := NewMockMedium() m.files["file.txt"] = "content" m.dirs["mydir"] = true @@ -231,7 +231,7 @@ func TestClient_MockMedium_IsDir_Good(t *testing.T) { assert.False(t, m.IsDir("nonexistent")) } -func TestClient_MockMedium_StreamAndFSHelpers_Good(t *testing.T) { +func TestMockMedium_StreamAndFSHelpers_Good(t *testing.T) { m := NewMockMedium() require.NoError(t, m.EnsureDir("dir")) require.NoError(t, m.Write("dir/file.txt", "alpha")) @@ -293,7 +293,7 @@ func TestClient_MockMedium_StreamAndFSHelpers_Good(t *testing.T) { assert.Equal(t, "stream output", m.files["streamed.txt"]) } -func TestClient_Read_Good(t *testing.T) { +func TestIO_Read_Good(t *testing.T) { m := NewMockMedium() m.files["test.txt"] = "hello" content, err := Read(m, "test.txt") @@ -301,21 +301,21 @@ func TestClient_Read_Good(t *testing.T) { assert.Equal(t, "hello", content) } -func TestClient_Write_Good(t *testing.T) { +func TestIO_Write_Good(t *testing.T) { m := NewMockMedium() err := Write(m, "test.txt", "hello") assert.NoError(t, err) assert.Equal(t, "hello", m.files["test.txt"]) } -func TestClient_EnsureDir_Good(t *testing.T) { +func TestIO_EnsureDir_Good(t *testing.T) { m := NewMockMedium() err := EnsureDir(m, "/my/dir") assert.NoError(t, err) assert.True(t, m.dirs["/my/dir"]) } -func TestClient_IsFile_Good(t *testing.T) { +func TestIO_IsFile_Good(t *testing.T) { m := NewMockMedium() m.files["exists.txt"] = "content" @@ -323,7 +323,7 @@ func TestClient_IsFile_Good(t *testing.T) { assert.False(t, IsFile(m, "nonexistent.txt")) } -func TestClient_NewSandboxed_Good(t *testing.T) { +func TestIO_NewSandboxed_Good(t *testing.T) { root := t.TempDir() m, err := NewSandboxed(root) @@ -337,7 +337,7 @@ func TestClient_NewSandboxed_Good(t *testing.T) { assert.True(t, m.IsDir("config")) } -func TestClient_ReadWriteStream_Good(t *testing.T) { +func TestIO_ReadWriteStream_Good(t *testing.T) { m := NewMockMedium() writer, err := WriteStream(m, "logs/run.txt") @@ -354,7 +354,7 @@ func TestClient_ReadWriteStream_Good(t *testing.T) { require.NoError(t, reader.Close()) } -func TestClient_Copy_Good(t *testing.T) { +func TestIO_Copy_Good(t *testing.T) { source := NewMockMedium() dest := NewMockMedium() source.files["test.txt"] = "hello" @@ -368,14 +368,14 @@ func TestClient_Copy_Good(t *testing.T) { assert.Equal(t, "content", dest.files["copied.txt"]) } -func TestClient_Copy_Bad(t *testing.T) { +func TestIO_Copy_Bad(t *testing.T) { source := NewMockMedium() dest := NewMockMedium() err := Copy(source, "nonexistent.txt", dest, "dest.txt") assert.Error(t, err) } -func TestClient_LocalGlobal_Good(t *testing.T) { +func TestIO_LocalGlobal_Good(t *testing.T) { assert.NotNil(t, Local, "io.Local should be initialised") var m = Local diff --git a/store/medium.go b/store/medium.go index 96c2384..5d492be 100644 --- a/store/medium.go +++ b/store/medium.go @@ -151,7 +151,7 @@ func (medium *Medium) List(entryPath string) ([]fs.DirEntry, error) { group, key := splitGroupKeyPath(entryPath) if group == "" { - rows, err := medium.store.database.Query("SELECT DISTINCT grp FROM kv ORDER BY grp") + rows, err := medium.store.database.Query("SELECT DISTINCT group_name FROM entries ORDER BY group_name") if err != nil { return nil, core.E("store.List", "query groups", err) } diff --git a/store/medium_test.go b/store/medium_test.go index e5d9410..a82fac0 100644 --- a/store/medium_test.go +++ b/store/medium_test.go @@ -9,7 +9,7 @@ import ( "github.com/stretchr/testify/require" ) -func newTestMedium(t *testing.T) *Medium { +func newTestKeyValueMedium(t *testing.T) *Medium { t.Helper() m, err := NewMedium(Options{Path: ":memory:"}) require.NoError(t, err) @@ -17,8 +17,8 @@ func newTestMedium(t *testing.T) *Medium { return m } -func TestMedium_Medium_ReadWrite_Good(t *testing.T) { - m := newTestMedium(t) +func TestKeyValueMedium_ReadWrite_Good(t *testing.T) { + m := newTestKeyValueMedium(t) err := m.Write("config/theme", "dark") require.NoError(t, err) @@ -28,56 +28,56 @@ func TestMedium_Medium_ReadWrite_Good(t *testing.T) { assert.Equal(t, "dark", val) } -func TestMedium_Medium_Read_NoKey_Bad(t *testing.T) { - m := newTestMedium(t) +func TestKeyValueMedium_Read_NoKey_Bad(t *testing.T) { + m := newTestKeyValueMedium(t) _, err := m.Read("config") assert.Error(t, err) } -func TestMedium_Medium_Read_NotFound_Bad(t *testing.T) { - m := newTestMedium(t) +func TestKeyValueMedium_Read_NotFound_Bad(t *testing.T) { + m := newTestKeyValueMedium(t) _, err := m.Read("config/missing") assert.Error(t, err) } -func TestMedium_Medium_IsFile_Good(t *testing.T) { - m := newTestMedium(t) - _ = m.Write("grp/key", "val") +func TestKeyValueMedium_IsFile_Good(t *testing.T) { + m := newTestKeyValueMedium(t) + _ = m.Write("group/key", "val") - assert.True(t, m.IsFile("grp/key")) - assert.False(t, m.IsFile("grp/nope")) - assert.False(t, m.IsFile("grp")) + assert.True(t, m.IsFile("group/key")) + assert.False(t, m.IsFile("group/nope")) + assert.False(t, m.IsFile("group")) } -func TestMedium_Medium_Delete_Good(t *testing.T) { - m := newTestMedium(t) - _ = m.Write("grp/key", "val") +func TestKeyValueMedium_Delete_Good(t *testing.T) { + m := newTestKeyValueMedium(t) + _ = m.Write("group/key", "val") - err := m.Delete("grp/key") + err := m.Delete("group/key") require.NoError(t, err) - assert.False(t, m.IsFile("grp/key")) + assert.False(t, m.IsFile("group/key")) } -func TestMedium_Medium_Delete_NonEmptyGroup_Bad(t *testing.T) { - m := newTestMedium(t) - _ = m.Write("grp/key", "val") +func TestKeyValueMedium_Delete_NonEmptyGroup_Bad(t *testing.T) { + m := newTestKeyValueMedium(t) + _ = m.Write("group/key", "val") - err := m.Delete("grp") + err := m.Delete("group") assert.Error(t, err) } -func TestMedium_Medium_DeleteAll_Good(t *testing.T) { - m := newTestMedium(t) - _ = m.Write("grp/a", "1") - _ = m.Write("grp/b", "2") +func TestKeyValueMedium_DeleteAll_Good(t *testing.T) { + m := newTestKeyValueMedium(t) + _ = m.Write("group/a", "1") + _ = m.Write("group/b", "2") - err := m.DeleteAll("grp") + err := m.DeleteAll("group") require.NoError(t, err) - assert.False(t, m.Exists("grp")) + assert.False(t, m.Exists("group")) } -func TestMedium_Medium_Rename_Good(t *testing.T) { - m := newTestMedium(t) +func TestKeyValueMedium_Rename_Good(t *testing.T) { + m := newTestKeyValueMedium(t) _ = m.Write("old/key", "val") err := m.Rename("old/key", "new/key") @@ -89,8 +89,8 @@ func TestMedium_Medium_Rename_Good(t *testing.T) { assert.False(t, m.IsFile("old/key")) } -func TestMedium_Medium_List_Groups_Good(t *testing.T) { - m := newTestMedium(t) +func TestKeyValueMedium_List_Groups_Good(t *testing.T) { + m := newTestKeyValueMedium(t) _ = m.Write("alpha/a", "1") _ = m.Write("beta/b", "2") @@ -107,46 +107,46 @@ func TestMedium_Medium_List_Groups_Good(t *testing.T) { assert.True(t, names["beta"]) } -func TestMedium_Medium_List_Keys_Good(t *testing.T) { - m := newTestMedium(t) - _ = m.Write("grp/a", "1") - _ = m.Write("grp/b", "22") +func TestKeyValueMedium_List_Keys_Good(t *testing.T) { + m := newTestKeyValueMedium(t) + _ = m.Write("group/a", "1") + _ = m.Write("group/b", "22") - entries, err := m.List("grp") + entries, err := m.List("group") require.NoError(t, err) assert.Len(t, entries, 2) } -func TestMedium_Medium_Stat_Good(t *testing.T) { - m := newTestMedium(t) - _ = m.Write("grp/key", "hello") +func TestKeyValueMedium_Stat_Good(t *testing.T) { + m := newTestKeyValueMedium(t) + _ = m.Write("group/key", "hello") - info, err := m.Stat("grp") + info, err := m.Stat("group") require.NoError(t, err) assert.True(t, info.IsDir()) - info, err = m.Stat("grp/key") + info, err = m.Stat("group/key") require.NoError(t, err) assert.Equal(t, int64(5), info.Size()) assert.False(t, info.IsDir()) } -func TestMedium_Medium_Exists_IsDir_Good(t *testing.T) { - m := newTestMedium(t) - _ = m.Write("grp/key", "val") +func TestKeyValueMedium_Exists_IsDir_Good(t *testing.T) { + m := newTestKeyValueMedium(t) + _ = m.Write("group/key", "val") - assert.True(t, m.Exists("grp")) - assert.True(t, m.Exists("grp/key")) - assert.True(t, m.IsDir("grp")) - assert.False(t, m.IsDir("grp/key")) + assert.True(t, m.Exists("group")) + assert.True(t, m.Exists("group/key")) + assert.True(t, m.IsDir("group")) + assert.False(t, m.IsDir("group/key")) assert.False(t, m.Exists("nope")) } -func TestMedium_Medium_Open_Read_Good(t *testing.T) { - m := newTestMedium(t) - _ = m.Write("grp/key", "hello world") +func TestKeyValueMedium_Open_Read_Good(t *testing.T) { + m := newTestKeyValueMedium(t) + _ = m.Write("group/key", "hello world") - f, err := m.Open("grp/key") + f, err := m.Open("group/key") require.NoError(t, err) defer f.Close() @@ -155,83 +155,83 @@ func TestMedium_Medium_Open_Read_Good(t *testing.T) { assert.Equal(t, "hello world", string(data)) } -func TestMedium_Medium_CreateClose_Good(t *testing.T) { - m := newTestMedium(t) +func TestKeyValueMedium_CreateClose_Good(t *testing.T) { + m := newTestKeyValueMedium(t) - w, err := m.Create("grp/key") + w, err := m.Create("group/key") require.NoError(t, err) _, _ = w.Write([]byte("streamed")) require.NoError(t, w.Close()) - val, err := m.Read("grp/key") + val, err := m.Read("group/key") require.NoError(t, err) assert.Equal(t, "streamed", val) } -func TestMedium_Medium_Append_Good(t *testing.T) { - m := newTestMedium(t) - _ = m.Write("grp/key", "hello") +func TestKeyValueMedium_Append_Good(t *testing.T) { + m := newTestKeyValueMedium(t) + _ = m.Write("group/key", "hello") - w, err := m.Append("grp/key") + w, err := m.Append("group/key") require.NoError(t, err) _, _ = w.Write([]byte(" world")) require.NoError(t, w.Close()) - val, err := m.Read("grp/key") + val, err := m.Read("group/key") require.NoError(t, err) assert.Equal(t, "hello world", val) } -func TestMedium_Medium_AsMedium_Good(t *testing.T) { +func TestKeyValueMedium_AsMedium_Good(t *testing.T) { s := newTestStore(t) m := s.AsMedium() - require.NoError(t, m.Write("grp/key", "val")) + require.NoError(t, m.Write("group/key", "val")) - val, err := s.Get("grp", "key") + val, err := s.Get("group", "key") require.NoError(t, err) assert.Equal(t, "val", val) - val, err = m.Read("grp/key") + val, err = m.Read("group/key") require.NoError(t, err) assert.Equal(t, "val", val) } -func TestMedium_Medium_Store_Good(t *testing.T) { - m := newTestMedium(t) +func TestKeyValueMedium_Store_Good(t *testing.T) { + m := newTestKeyValueMedium(t) assert.NotNil(t, m.Store()) assert.Same(t, m.Store(), m.Store()) } -func TestMedium_Medium_EnsureDir_FileHelpers_Good(t *testing.T) { - m := newTestMedium(t) +func TestKeyValueMedium_EnsureDir_FileHelpers_Good(t *testing.T) { + m := newTestKeyValueMedium(t) require.NoError(t, m.EnsureDir("ignored")) - require.NoError(t, m.FileSet("grp/key", "value")) + require.NoError(t, m.FileSet("group/key", "value")) - value, err := m.FileGet("grp/key") + value, err := m.FileGet("group/key") require.NoError(t, err) assert.Equal(t, "value", value) } -func TestMedium_Medium_StreamHelpers_Good(t *testing.T) { - m := newTestMedium(t) +func TestKeyValueMedium_StreamHelpers_Good(t *testing.T) { + m := newTestKeyValueMedium(t) - writer, err := m.WriteStream("grp/key") + writer, err := m.WriteStream("group/key") require.NoError(t, err) _, err = writer.Write([]byte("streamed")) require.NoError(t, err) require.NoError(t, writer.Close()) - reader, err := m.ReadStream("grp/key") + reader, err := m.ReadStream("group/key") require.NoError(t, err) data, err := io.ReadAll(reader) require.NoError(t, err) assert.Equal(t, "streamed", string(data)) require.NoError(t, reader.Close()) - file, err := m.Open("grp/key") + file, err := m.Open("group/key") require.NoError(t, err) info, err := file.Stat() require.NoError(t, err) @@ -243,7 +243,7 @@ func TestMedium_Medium_StreamHelpers_Good(t *testing.T) { assert.Nil(t, info.Sys()) require.NoError(t, file.Close()) - entries, err := m.List("grp") + entries, err := m.List("group") require.NoError(t, err) require.Len(t, entries, 1) assert.Equal(t, "key", entries[0].Name()) diff --git a/store/store.go b/store/store.go index 2f15491..65b36a2 100644 --- a/store/store.go +++ b/store/store.go @@ -37,11 +37,11 @@ func New(options Options) (*Store, error) { database.Close() return nil, core.E("store.New", "WAL mode", err) } - if _, err := database.Exec(`CREATE TABLE IF NOT EXISTS kv ( - grp TEXT NOT NULL, - key TEXT NOT NULL, - value TEXT NOT NULL, - PRIMARY KEY (grp, key) + if _, err := database.Exec(`CREATE TABLE IF NOT EXISTS entries ( + group_name TEXT NOT NULL, + entry_key TEXT NOT NULL, + entry_value TEXT NOT NULL, + PRIMARY KEY (group_name, entry_key) )`); err != nil { database.Close() return nil, core.E("store.New", "create schema", err) @@ -57,7 +57,7 @@ func (store *Store) Close() error { // Example: theme, _ := keyValueStore.Get("app", "theme") func (store *Store) Get(group, key string) (string, error) { var value string - err := store.database.QueryRow("SELECT value FROM kv WHERE grp = ? AND key = ?", group, key).Scan(&value) + err := store.database.QueryRow("SELECT entry_value FROM entries WHERE group_name = ? AND entry_key = ?", group, key).Scan(&value) if err == sql.ErrNoRows { return "", core.E("store.Get", core.Concat("not found: ", group, "/", key), NotFoundError) } @@ -70,8 +70,8 @@ func (store *Store) Get(group, key string) (string, error) { // Example: _ = keyValueStore.Set("app", "theme", "midnight") func (store *Store) Set(group, key, value string) error { _, err := store.database.Exec( - `INSERT INTO kv (grp, key, value) VALUES (?, ?, ?) - ON CONFLICT(grp, key) DO UPDATE SET value = excluded.value`, + `INSERT INTO entries (group_name, entry_key, entry_value) VALUES (?, ?, ?) + ON CONFLICT(group_name, entry_key) DO UPDATE SET entry_value = excluded.entry_value`, group, key, value, ) if err != nil { @@ -82,7 +82,7 @@ func (store *Store) Set(group, key, value string) error { // Example: _ = keyValueStore.Delete("app", "theme") func (store *Store) Delete(group, key string) error { - _, err := store.database.Exec("DELETE FROM kv WHERE grp = ? AND key = ?", group, key) + _, err := store.database.Exec("DELETE FROM entries WHERE group_name = ? AND entry_key = ?", group, key) if err != nil { return core.E("store.Delete", "exec", err) } @@ -92,7 +92,7 @@ func (store *Store) Delete(group, key string) error { // Example: count, _ := keyValueStore.Count("app") func (store *Store) Count(group string) (int, error) { var count int - err := store.database.QueryRow("SELECT COUNT(*) FROM kv WHERE grp = ?", group).Scan(&count) + err := store.database.QueryRow("SELECT COUNT(*) FROM entries WHERE group_name = ?", group).Scan(&count) if err != nil { return 0, core.E("store.Count", "query", err) } @@ -101,7 +101,7 @@ func (store *Store) Count(group string) (int, error) { // Example: _ = keyValueStore.DeleteGroup("app") func (store *Store) DeleteGroup(group string) error { - _, err := store.database.Exec("DELETE FROM kv WHERE grp = ?", group) + _, err := store.database.Exec("DELETE FROM entries WHERE group_name = ?", group) if err != nil { return core.E("store.DeleteGroup", "exec", err) } @@ -110,7 +110,7 @@ func (store *Store) DeleteGroup(group string) error { // Example: values, _ := keyValueStore.GetAll("app") func (store *Store) GetAll(group string) (map[string]string, error) { - rows, err := store.database.Query("SELECT key, value FROM kv WHERE grp = ?", group) + rows, err := store.database.Query("SELECT entry_key, entry_value FROM entries WHERE group_name = ?", group) if err != nil { return nil, core.E("store.GetAll", "query", err) } @@ -134,7 +134,7 @@ func (store *Store) GetAll(group string) (map[string]string, error) { // Example: _ = keyValueStore.Set("user", "name", "alice") // Example: out, _ := keyValueStore.Render("hello {{ .name }}", "user") func (store *Store) Render(templateText, group string) (string, error) { - rows, err := store.database.Query("SELECT key, value FROM kv WHERE grp = ?", group) + rows, err := store.database.Query("SELECT entry_key, entry_value FROM entries WHERE group_name = ?", group) if err != nil { return "", core.E("store.Render", "query", err) } diff --git a/store/store_test.go b/store/store_test.go index f30af61..74df399 100644 --- a/store/store_test.go +++ b/store/store_test.go @@ -60,11 +60,11 @@ func TestStore_Delete_Good(t *testing.T) { func TestStore_Count_Good(t *testing.T) { s := newTestStore(t) - _ = s.Set("grp", "a", "1") - _ = s.Set("grp", "b", "2") + _ = s.Set("group", "a", "1") + _ = s.Set("group", "b", "2") _ = s.Set("other", "c", "3") - n, err := s.Count("grp") + n, err := s.Count("group") require.NoError(t, err) assert.Equal(t, 2, n) } @@ -72,23 +72,23 @@ func TestStore_Count_Good(t *testing.T) { func TestStore_DeleteGroup_Good(t *testing.T) { s := newTestStore(t) - _ = s.Set("grp", "a", "1") - _ = s.Set("grp", "b", "2") - err := s.DeleteGroup("grp") + _ = s.Set("group", "a", "1") + _ = s.Set("group", "b", "2") + err := s.DeleteGroup("group") require.NoError(t, err) - n, _ := s.Count("grp") + n, _ := s.Count("group") assert.Equal(t, 0, n) } func TestStore_GetAll_Good(t *testing.T) { s := newTestStore(t) - _ = s.Set("grp", "a", "1") - _ = s.Set("grp", "b", "2") + _ = s.Set("group", "a", "1") + _ = s.Set("group", "b", "2") _ = s.Set("other", "c", "3") - all, err := s.GetAll("grp") + all, err := s.GetAll("group") require.NoError(t, err) assert.Equal(t, map[string]string{"a": "1", "b": "2"}, all) } From 9f0e155d6296a2d2480f6ee7ddf67222267f741f Mon Sep 17 00:00:00 2001 From: Virgil Date: Tue, 31 Mar 2026 05:42:12 +0000 Subject: [PATCH 45/83] refactor(ax): rename workspace provider surface Co-Authored-By: Virgil --- io.go | 2 +- workspace/doc.go | 2 +- workspace/service.go | 39 +++++++++++++++++++++++---------------- workspace/service_test.go | 8 ++++---- 4 files changed, 29 insertions(+), 22 deletions(-) diff --git a/io.go b/io.go index f10901b..8106c23 100644 --- a/io.go +++ b/io.go @@ -128,7 +128,7 @@ func init() { var err error Local, err = local.New("/") if err != nil { - core.Warn("io: failed to initialise Local medium, io.Local will be nil", "error", err) + core.Warn("io.Local init failed", "error", err) } } diff --git a/workspace/doc.go b/workspace/doc.go index a817e1c..8fc8f99 100644 --- a/workspace/doc.go +++ b/workspace/doc.go @@ -1,4 +1,4 @@ -// Example: service, _ := workspace.New(workspace.Options{CryptProvider: cryptProvider}) +// Example: service, _ := workspace.New(workspace.Options{KeyPairProvider: keyPairProvider}) // Example: workspaceID, _ := service.CreateWorkspace("alice", "pass123") // Example: _ = service.SwitchWorkspace(workspaceID) // Example: _ = service.WorkspaceFileSet("notes/todo.txt", "ship it") diff --git a/workspace/service.go b/workspace/service.go index 4a873c4..3a1d3cb 100644 --- a/workspace/service.go +++ b/workspace/service.go @@ -11,7 +11,7 @@ import ( "dappco.re/go/core/io" ) -// Example: service, _ := workspace.New(workspace.Options{CryptProvider: cryptProvider}) +// Example: service, _ := workspace.New(workspace.Options{KeyPairProvider: keyPairProvider}) type Workspace interface { CreateWorkspace(identifier, password string) (string, error) SwitchWorkspace(workspaceID string) error @@ -19,11 +19,13 @@ type Workspace interface { WorkspaceFileSet(workspaceFilePath, content string) error } -// Example: key, _ := cryptProvider.CreateKeyPair("alice", "pass123") -type CryptProvider interface { +// Example: key, _ := keyPairProvider.CreateKeyPair("alice", "pass123") +type KeyPairProvider interface { CreateKeyPair(name, passphrase string) (string, error) } +type CryptProvider = KeyPairProvider + const ( WorkspaceCreateAction = "workspace.create" WorkspaceSwitchAction = "workspace.switch" @@ -37,14 +39,15 @@ type WorkspaceCommand struct { WorkspaceID string } -// Example: service, _ := workspace.New(workspace.Options{CryptProvider: cryptProvider}) +// Example: service, _ := workspace.New(workspace.Options{KeyPairProvider: keyPairProvider}) type Options struct { - CryptProvider CryptProvider + KeyPairProvider KeyPairProvider + CryptProvider CryptProvider } -// Example: service, _ := workspace.New(workspace.Options{CryptProvider: cryptProvider}) +// Example: service, _ := workspace.New(workspace.Options{KeyPairProvider: keyPairProvider}) type Service struct { - cryptProvider CryptProvider + keyPairProvider KeyPairProvider activeWorkspaceID string rootPath string medium io.Medium @@ -53,7 +56,7 @@ type Service struct { var _ Workspace = (*Service)(nil) -// Example: service, _ := workspace.New(workspace.Options{CryptProvider: cryptProvider}) +// Example: service, _ := workspace.New(workspace.Options{KeyPairProvider: keyPairProvider}) // Example: workspaceID, _ := service.CreateWorkspace("alice", "pass123") func New(options Options) (*Service, error) { home := resolveWorkspaceHomeDirectory() @@ -62,14 +65,18 @@ func New(options Options) (*Service, error) { } rootPath := core.Path(home, ".core", "workspaces") - if options.CryptProvider == nil { - return nil, core.E("workspace.New", "crypt provider is required", fs.ErrInvalid) + keyPairProvider := options.KeyPairProvider + if keyPairProvider == nil { + keyPairProvider = options.CryptProvider + } + if keyPairProvider == nil { + return nil, core.E("workspace.New", "key pair provider is required", fs.ErrInvalid) } service := &Service{ - cryptProvider: options.CryptProvider, - rootPath: rootPath, - medium: io.Local, + keyPairProvider: keyPairProvider, + rootPath: rootPath, + medium: io.Local, } if err := service.medium.EnsureDir(rootPath); err != nil { @@ -84,8 +91,8 @@ func (service *Service) CreateWorkspace(identifier, password string) (string, er service.stateLock.Lock() defer service.stateLock.Unlock() - if service.cryptProvider == nil { - return "", core.E("workspace.CreateWorkspace", "crypt provider not available", nil) + if service.keyPairProvider == nil { + return "", core.E("workspace.CreateWorkspace", "key pair provider not available", nil) } hash := sha256.Sum256([]byte(identifier)) @@ -105,7 +112,7 @@ func (service *Service) CreateWorkspace(identifier, password string) (string, er } } - privKey, err := service.cryptProvider.CreateKeyPair(identifier, password) + privKey, err := service.keyPairProvider.CreateKeyPair(identifier, password) if err != nil { return "", core.E("workspace.CreateWorkspace", "failed to generate keys", err) } diff --git a/workspace/service_test.go b/workspace/service_test.go index d218cd0..a3ddcc7 100644 --- a/workspace/service_test.go +++ b/workspace/service_test.go @@ -8,12 +8,12 @@ import ( "github.com/stretchr/testify/require" ) -type stubCryptProvider struct { +type stubKeyPairProvider struct { key string err error } -func (provider stubCryptProvider) CreateKeyPair(_, _ string) (string, error) { +func (provider stubKeyPairProvider) CreateKeyPair(_, _ string) (string, error) { if provider.err != nil { return "", provider.err } @@ -26,12 +26,12 @@ func newTestService(t *testing.T) (*Service, string) { tempHome := t.TempDir() t.Setenv("HOME", tempHome) - service, err := New(Options{CryptProvider: stubCryptProvider{key: "private-key"}}) + service, err := New(Options{KeyPairProvider: stubKeyPairProvider{key: "private-key"}}) require.NoError(t, err) return service, tempHome } -func TestService_New_MissingCryptProvider_Bad(t *testing.T) { +func TestService_New_MissingKeyPairProvider_Bad(t *testing.T) { _, err := New(Options{}) require.Error(t, err) } From bcf780c0aca78090b5e278f74c272a2593079ac7 Mon Sep 17 00:00:00 2001 From: Virgil Date: Tue, 31 Mar 2026 05:46:33 +0000 Subject: [PATCH 46/83] refactor(ax): align memory medium test names Co-Authored-By: Virgil --- medium_test.go | 98 +++++++++++++++++++++++++------------------------- 1 file changed, 49 insertions(+), 49 deletions(-) diff --git a/medium_test.go b/medium_test.go index bb57ed0..d0ce5ce 100644 --- a/medium_test.go +++ b/medium_test.go @@ -44,22 +44,22 @@ func TestMemoryMedium_NewDirEntry_Good(t *testing.T) { assert.Equal(t, int64(8), entryInfo.Size()) } -func TestMockMedium_Read_Good(t *testing.T) { - m := NewMockMedium() +func TestMemoryMedium_Read_Good(t *testing.T) { + m := NewMemoryMedium() m.files["test.txt"] = "hello world" content, err := m.Read("test.txt") assert.NoError(t, err) assert.Equal(t, "hello world", content) } -func TestMockMedium_Read_Bad(t *testing.T) { - m := NewMockMedium() +func TestMemoryMedium_Read_Bad(t *testing.T) { + m := NewMemoryMedium() _, err := m.Read("nonexistent.txt") assert.Error(t, err) } -func TestMockMedium_Write_Good(t *testing.T) { - m := NewMockMedium() +func TestMemoryMedium_Write_Good(t *testing.T) { + m := NewMemoryMedium() err := m.Write("test.txt", "content") assert.NoError(t, err) assert.Equal(t, "content", m.files["test.txt"]) @@ -69,8 +69,8 @@ func TestMockMedium_Write_Good(t *testing.T) { assert.Equal(t, "new content", m.files["test.txt"]) } -func TestMockMedium_WriteMode_Good(t *testing.T) { - m := NewMockMedium() +func TestMemoryMedium_WriteMode_Good(t *testing.T) { + m := NewMemoryMedium() err := m.WriteMode("secure.txt", "secret", 0600) require.NoError(t, err) @@ -80,38 +80,38 @@ func TestMockMedium_WriteMode_Good(t *testing.T) { assert.Equal(t, "secret", content) } -func TestMockMedium_EnsureDir_Good(t *testing.T) { - m := NewMockMedium() +func TestMemoryMedium_EnsureDir_Good(t *testing.T) { + m := NewMemoryMedium() err := m.EnsureDir("/path/to/dir") assert.NoError(t, err) assert.True(t, m.dirs["/path/to/dir"]) } -func TestMockMedium_IsFile_Good(t *testing.T) { - m := NewMockMedium() +func TestMemoryMedium_IsFile_Good(t *testing.T) { + m := NewMemoryMedium() m.files["exists.txt"] = "content" assert.True(t, m.IsFile("exists.txt")) assert.False(t, m.IsFile("nonexistent.txt")) } -func TestMockMedium_FileGet_Good(t *testing.T) { - m := NewMockMedium() +func TestMemoryMedium_FileGet_Good(t *testing.T) { + m := NewMemoryMedium() m.files["test.txt"] = "content" content, err := m.FileGet("test.txt") assert.NoError(t, err) assert.Equal(t, "content", content) } -func TestMockMedium_FileSet_Good(t *testing.T) { - m := NewMockMedium() +func TestMemoryMedium_FileSet_Good(t *testing.T) { + m := NewMemoryMedium() err := m.FileSet("test.txt", "content") assert.NoError(t, err) assert.Equal(t, "content", m.files["test.txt"]) } -func TestMockMedium_Delete_Good(t *testing.T) { - m := NewMockMedium() +func TestMemoryMedium_Delete_Good(t *testing.T) { + m := NewMemoryMedium() m.files["test.txt"] = "content" err := m.Delete("test.txt") @@ -119,14 +119,14 @@ func TestMockMedium_Delete_Good(t *testing.T) { assert.False(t, m.IsFile("test.txt")) } -func TestMockMedium_Delete_NotFound_Bad(t *testing.T) { - m := NewMockMedium() +func TestMemoryMedium_Delete_NotFound_Bad(t *testing.T) { + m := NewMemoryMedium() err := m.Delete("nonexistent.txt") assert.Error(t, err) } -func TestMockMedium_Delete_DirNotEmpty_Bad(t *testing.T) { - m := NewMockMedium() +func TestMemoryMedium_Delete_DirNotEmpty_Bad(t *testing.T) { + m := NewMemoryMedium() m.dirs["mydir"] = true m.files["mydir/file.txt"] = "content" @@ -134,8 +134,8 @@ func TestMockMedium_Delete_DirNotEmpty_Bad(t *testing.T) { assert.Error(t, err) } -func TestMockMedium_DeleteAll_Good(t *testing.T) { - m := NewMockMedium() +func TestMemoryMedium_DeleteAll_Good(t *testing.T) { + m := NewMemoryMedium() m.dirs["mydir"] = true m.dirs["mydir/subdir"] = true m.files["mydir/file.txt"] = "content" @@ -147,8 +147,8 @@ func TestMockMedium_DeleteAll_Good(t *testing.T) { assert.Empty(t, m.files) } -func TestMockMedium_Rename_Good(t *testing.T) { - m := NewMockMedium() +func TestMemoryMedium_Rename_Good(t *testing.T) { + m := NewMemoryMedium() m.files["old.txt"] = "content" err := m.Rename("old.txt", "new.txt") @@ -158,8 +158,8 @@ func TestMockMedium_Rename_Good(t *testing.T) { assert.Equal(t, "content", m.files["new.txt"]) } -func TestMockMedium_Rename_Dir_Good(t *testing.T) { - m := NewMockMedium() +func TestMemoryMedium_Rename_Dir_Good(t *testing.T) { + m := NewMemoryMedium() m.dirs["olddir"] = true m.files["olddir/file.txt"] = "content" @@ -170,8 +170,8 @@ func TestMockMedium_Rename_Dir_Good(t *testing.T) { assert.Equal(t, "content", m.files["newdir/file.txt"]) } -func TestMockMedium_List_Good(t *testing.T) { - m := NewMockMedium() +func TestMemoryMedium_List_Good(t *testing.T) { + m := NewMemoryMedium() m.dirs["mydir"] = true m.files["mydir/file1.txt"] = "content1" m.files["mydir/file2.txt"] = "content2" @@ -190,8 +190,8 @@ func TestMockMedium_List_Good(t *testing.T) { assert.True(t, names["subdir"]) } -func TestMockMedium_Stat_Good(t *testing.T) { - m := NewMockMedium() +func TestMemoryMedium_Stat_Good(t *testing.T) { + m := NewMemoryMedium() m.files["test.txt"] = "hello world" info, err := m.Stat("test.txt") @@ -201,8 +201,8 @@ func TestMockMedium_Stat_Good(t *testing.T) { assert.False(t, info.IsDir()) } -func TestMockMedium_Stat_Dir_Good(t *testing.T) { - m := NewMockMedium() +func TestMemoryMedium_Stat_Dir_Good(t *testing.T) { + m := NewMemoryMedium() m.dirs["mydir"] = true info, err := m.Stat("mydir") @@ -211,8 +211,8 @@ func TestMockMedium_Stat_Dir_Good(t *testing.T) { assert.True(t, info.IsDir()) } -func TestMockMedium_Exists_Good(t *testing.T) { - m := NewMockMedium() +func TestMemoryMedium_Exists_Good(t *testing.T) { + m := NewMemoryMedium() m.files["file.txt"] = "content" m.dirs["mydir"] = true @@ -221,8 +221,8 @@ func TestMockMedium_Exists_Good(t *testing.T) { assert.False(t, m.Exists("nonexistent")) } -func TestMockMedium_IsDir_Good(t *testing.T) { - m := NewMockMedium() +func TestMemoryMedium_IsDir_Good(t *testing.T) { + m := NewMemoryMedium() m.files["file.txt"] = "content" m.dirs["mydir"] = true @@ -231,8 +231,8 @@ func TestMockMedium_IsDir_Good(t *testing.T) { assert.False(t, m.IsDir("nonexistent")) } -func TestMockMedium_StreamAndFSHelpers_Good(t *testing.T) { - m := NewMockMedium() +func TestMemoryMedium_StreamAndFSHelpers_Good(t *testing.T) { + m := NewMemoryMedium() require.NoError(t, m.EnsureDir("dir")) require.NoError(t, m.Write("dir/file.txt", "alpha")) @@ -294,7 +294,7 @@ func TestMockMedium_StreamAndFSHelpers_Good(t *testing.T) { } func TestIO_Read_Good(t *testing.T) { - m := NewMockMedium() + m := NewMemoryMedium() m.files["test.txt"] = "hello" content, err := Read(m, "test.txt") assert.NoError(t, err) @@ -302,21 +302,21 @@ func TestIO_Read_Good(t *testing.T) { } func TestIO_Write_Good(t *testing.T) { - m := NewMockMedium() + m := NewMemoryMedium() err := Write(m, "test.txt", "hello") assert.NoError(t, err) assert.Equal(t, "hello", m.files["test.txt"]) } func TestIO_EnsureDir_Good(t *testing.T) { - m := NewMockMedium() + m := NewMemoryMedium() err := EnsureDir(m, "/my/dir") assert.NoError(t, err) assert.True(t, m.dirs["/my/dir"]) } func TestIO_IsFile_Good(t *testing.T) { - m := NewMockMedium() + m := NewMemoryMedium() m.files["exists.txt"] = "content" assert.True(t, IsFile(m, "exists.txt")) @@ -338,7 +338,7 @@ func TestIO_NewSandboxed_Good(t *testing.T) { } func TestIO_ReadWriteStream_Good(t *testing.T) { - m := NewMockMedium() + m := NewMemoryMedium() writer, err := WriteStream(m, "logs/run.txt") require.NoError(t, err) @@ -355,8 +355,8 @@ func TestIO_ReadWriteStream_Good(t *testing.T) { } func TestIO_Copy_Good(t *testing.T) { - source := NewMockMedium() - dest := NewMockMedium() + source := NewMemoryMedium() + dest := NewMemoryMedium() source.files["test.txt"] = "hello" err := Copy(source, "test.txt", dest, "test.txt") assert.NoError(t, err) @@ -369,8 +369,8 @@ func TestIO_Copy_Good(t *testing.T) { } func TestIO_Copy_Bad(t *testing.T) { - source := NewMockMedium() - dest := NewMockMedium() + source := NewMemoryMedium() + dest := NewMemoryMedium() err := Copy(source, "nonexistent.txt", dest, "dest.txt") assert.Error(t, err) } From a290cba90802640717bec3d8c78316216dd7f6f2 Mon Sep 17 00:00:00 2001 From: Virgil Date: Tue, 31 Mar 2026 05:50:19 +0000 Subject: [PATCH 47/83] refactor(ax): remove redundant compatibility surfaces --- io.go | 12 ------------ workspace/service.go | 16 ++-------------- workspace/service_test.go | 15 --------------- 3 files changed, 2 insertions(+), 41 deletions(-) diff --git a/io.go b/io.go index 8106c23..b288bb6 100644 --- a/io.go +++ b/io.go @@ -188,8 +188,6 @@ type MemoryMedium struct { modTimes map[string]time.Time } -type MockMedium = MemoryMedium - var _ Medium = (*MemoryMedium)(nil) // Example: medium := io.NewMemoryMedium() @@ -202,12 +200,6 @@ func NewMemoryMedium() *MemoryMedium { } } -// Example: medium := io.NewMockMedium() -// Example: _ = medium.Write("config/app.yaml", "port: 8080") -func NewMockMedium() *MemoryMedium { - return NewMemoryMedium() -} - func (medium *MemoryMedium) Read(path string) (string, error) { content, ok := medium.files[path] if !ok { @@ -399,8 +391,6 @@ type MemoryFile struct { offset int64 } -type MockFile = MemoryFile - func (file *MemoryFile) Stat() (fs.FileInfo, error) { return NewFileInfo(file.name, int64(len(file.content)), 0, time.Time{}, false), nil } @@ -424,8 +414,6 @@ type MemoryWriteCloser struct { data []byte } -type MockWriteCloser = MemoryWriteCloser - func (writeCloser *MemoryWriteCloser) Write(data []byte) (int, error) { writeCloser.data = append(writeCloser.data, data...) return len(data), nil diff --git a/workspace/service.go b/workspace/service.go index 3a1d3cb..5337ed4 100644 --- a/workspace/service.go +++ b/workspace/service.go @@ -24,8 +24,6 @@ type KeyPairProvider interface { CreateKeyPair(name, passphrase string) (string, error) } -type CryptProvider = KeyPairProvider - const ( WorkspaceCreateAction = "workspace.create" WorkspaceSwitchAction = "workspace.switch" @@ -42,7 +40,6 @@ type WorkspaceCommand struct { // Example: service, _ := workspace.New(workspace.Options{KeyPairProvider: keyPairProvider}) type Options struct { KeyPairProvider KeyPairProvider - CryptProvider CryptProvider } // Example: service, _ := workspace.New(workspace.Options{KeyPairProvider: keyPairProvider}) @@ -65,16 +62,12 @@ func New(options Options) (*Service, error) { } rootPath := core.Path(home, ".core", "workspaces") - keyPairProvider := options.KeyPairProvider - if keyPairProvider == nil { - keyPairProvider = options.CryptProvider - } - if keyPairProvider == nil { + if options.KeyPairProvider == nil { return nil, core.E("workspace.New", "key pair provider is required", fs.ErrInvalid) } service := &Service{ - keyPairProvider: keyPairProvider, + keyPairProvider: options.KeyPairProvider, rootPath: rootPath, medium: io.Local, } @@ -208,11 +201,6 @@ func (service *Service) HandleWorkspaceMessage(_ *core.Core, message core.Messag return service.HandleWorkspaceCommand(command) } -// Example: result := service.HandleIPCEvents(core.New(), WorkspaceCommand{Action: WorkspaceSwitchAction, WorkspaceID: "f3f0d7"}) -func (service *Service) HandleIPCEvents(coreRuntime *core.Core, message core.Message) core.Result { - return service.HandleWorkspaceMessage(coreRuntime, message) -} - func workspaceCommandFromMessage(message core.Message) (WorkspaceCommand, bool) { switch payload := message.(type) { case WorkspaceCommand: diff --git a/workspace/service_test.go b/workspace/service_test.go index a3ddcc7..288199f 100644 --- a/workspace/service_test.go +++ b/workspace/service_test.go @@ -148,18 +148,3 @@ func TestService_HandleWorkspaceMessage_Good(t *testing.T) { unknown := service.HandleWorkspaceMessage(core.New(), "noop") assert.True(t, unknown.OK) } - -func TestService_HandleIPCEvents_Compatibility_Good(t *testing.T) { - service, _ := newTestService(t) - - result := service.HandleIPCEvents(core.New(), WorkspaceCommand{ - Action: WorkspaceCreateAction, - Identifier: "compat-user", - Password: "pass123", - }) - - assert.True(t, result.OK) - workspaceID, ok := result.Value.(string) - require.True(t, ok) - require.NotEmpty(t, workspaceID) -} From b3d12ce553d43ef0d0c111b244ce3b3409335035 Mon Sep 17 00:00:00 2001 From: Virgil Date: Tue, 31 Mar 2026 05:57:21 +0000 Subject: [PATCH 48/83] refactor(ax): remove fileget/fileset compatibility aliases Co-authored-by: Virgil --- datanode/medium.go | 8 -------- datanode/medium_test.go | 10 ---------- io.go | 12 ------------ local/medium.go | 8 -------- local/medium_test.go | 12 ------------ medium_test.go | 15 --------------- node/node.go | 8 -------- node/node_test.go | 4 ++-- s3/s3.go | 8 -------- s3/s3_test.go | 13 ------------- sqlite/sqlite.go | 8 -------- sqlite/sqlite_test.go | 13 ------------- store/medium.go | 8 -------- store/medium_test.go | 6 +++--- 14 files changed, 5 insertions(+), 128 deletions(-) diff --git a/datanode/medium.go b/datanode/medium.go index 47ce247..177c6ac 100644 --- a/datanode/medium.go +++ b/datanode/medium.go @@ -174,14 +174,6 @@ func (medium *Medium) IsFile(filePath string) bool { return err == nil && !info.IsDir() } -func (medium *Medium) FileGet(filePath string) (string, error) { - return medium.Read(filePath) -} - -func (medium *Medium) FileSet(filePath, content string) error { - return medium.Write(filePath, content) -} - func (medium *Medium) Delete(filePath string) error { medium.lock.Lock() defer medium.lock.Unlock() diff --git a/datanode/medium_test.go b/datanode/medium_test.go index b238e7d..5dad056 100644 --- a/datanode/medium_test.go +++ b/datanode/medium_test.go @@ -328,16 +328,6 @@ func TestDataNode_Streams_Good(t *testing.T) { rs.Close() } -func TestDataNode_FileGetFileSet_Good(t *testing.T) { - m := New() - - require.NoError(t, m.FileSet("alias.txt", "via set")) - - got, err := m.FileGet("alias.txt") - require.NoError(t, err) - assert.Equal(t, "via set", got) -} - func TestDataNode_SnapshotRestore_Good(t *testing.T) { m := New() diff --git a/io.go b/io.go index b288bb6..20fc2fa 100644 --- a/io.go +++ b/io.go @@ -26,10 +26,6 @@ type Medium interface { IsFile(path string) bool - FileGet(path string) (string, error) - - FileSet(path, content string) error - Delete(path string) error DeleteAll(path string) error @@ -228,14 +224,6 @@ func (medium *MemoryMedium) IsFile(path string) bool { return ok } -func (medium *MemoryMedium) FileGet(path string) (string, error) { - return medium.Read(path) -} - -func (medium *MemoryMedium) FileSet(path, content string) error { - return medium.Write(path, content) -} - func (medium *MemoryMedium) Delete(path string) error { if _, ok := medium.files[path]; ok { delete(medium.files, path) diff --git a/local/medium.go b/local/medium.go index ad57ed4..7c9c104 100644 --- a/local/medium.go +++ b/local/medium.go @@ -360,14 +360,6 @@ func (medium *Medium) Rename(oldPath, newPath string) error { return resultError("local.Rename", core.Concat("rename failed: ", oldPath), unrestrictedFileSystem.Rename(oldResolvedPath, newResolvedPath)) } -func (medium *Medium) FileGet(path string) (string, error) { - return medium.Read(path) -} - -func (medium *Medium) FileSet(path, content string) error { - return medium.Write(path, content) -} - func lstat(path string) (*syscall.Stat_t, error) { info := &syscall.Stat_t{} if err := syscall.Lstat(path, info); err != nil { diff --git a/local/medium_test.go b/local/medium_test.go index 53ddf4d..aeb589b 100644 --- a/local/medium_test.go +++ b/local/medium_test.go @@ -200,18 +200,6 @@ func TestLocal_Rename_Basic_Good(t *testing.T) { assert.True(t, m.Exists("new")) } -func TestLocal_FileGetFileSet_Basic_Good(t *testing.T) { - root := t.TempDir() - m, _ := New(root) - - err := m.FileSet("data", "value") - assert.NoError(t, err) - - val, err := m.FileGet("data") - assert.NoError(t, err) - assert.Equal(t, "value", val) -} - func TestLocal_Delete_Good(t *testing.T) { testRoot := t.TempDir() diff --git a/medium_test.go b/medium_test.go index d0ce5ce..f38645c 100644 --- a/medium_test.go +++ b/medium_test.go @@ -95,21 +95,6 @@ func TestMemoryMedium_IsFile_Good(t *testing.T) { assert.False(t, m.IsFile("nonexistent.txt")) } -func TestMemoryMedium_FileGet_Good(t *testing.T) { - m := NewMemoryMedium() - m.files["test.txt"] = "content" - content, err := m.FileGet("test.txt") - assert.NoError(t, err) - assert.Equal(t, "content", content) -} - -func TestMemoryMedium_FileSet_Good(t *testing.T) { - m := NewMemoryMedium() - err := m.FileSet("test.txt", "content") - assert.NoError(t, err) - assert.Equal(t, "content", m.files["test.txt"]) -} - func TestMemoryMedium_Delete_Good(t *testing.T) { m := NewMemoryMedium() m.files["test.txt"] = "content" diff --git a/node/node.go b/node/node.go index 00237b1..3ada8e7 100644 --- a/node/node.go +++ b/node/node.go @@ -332,14 +332,6 @@ func (node *Node) WriteMode(filePath, content string, mode fs.FileMode) error { return node.Write(filePath, content) } -func (node *Node) FileGet(filePath string) (string, error) { - return node.Read(filePath) -} - -func (node *Node) FileSet(filePath, content string) error { - return node.Write(filePath, content) -} - // Example: _ = nodeTree.EnsureDir("config") func (node *Node) EnsureDir(directoryPath string) error { return nil diff --git a/node/node_test.go b/node/node_test.go index aa473d4..279f8aa 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -381,14 +381,14 @@ func TestNode_MediumFacade_Good(t *testing.T) { require.NoError(t, n.Write("docs/readme.txt", "hello")) require.NoError(t, n.WriteMode("docs/mode.txt", "mode", 0600)) - require.NoError(t, n.FileSet("docs/guide.txt", "guide")) + require.NoError(t, n.Write("docs/guide.txt", "guide")) require.NoError(t, n.EnsureDir("ignored")) value, err := n.Read("docs/readme.txt") require.NoError(t, err) assert.Equal(t, "hello", value) - value, err = n.FileGet("docs/guide.txt") + value, err = n.Read("docs/guide.txt") require.NoError(t, err) assert.Equal(t, "guide", value) diff --git a/s3/s3.go b/s3/s3.go index 5ca8fad..58ea246 100644 --- a/s3/s3.go +++ b/s3/s3.go @@ -184,14 +184,6 @@ func (medium *Medium) IsFile(filePath string) bool { return err == nil } -func (medium *Medium) FileGet(filePath string) (string, error) { - return medium.Read(filePath) -} - -func (medium *Medium) FileSet(filePath, content string) error { - return medium.Write(filePath, content) -} - func (medium *Medium) Delete(filePath string) error { key := medium.objectKey(filePath) if key == "" { diff --git a/s3/s3_test.go b/s3/s3_test.go index 010a377..d18af55 100644 --- a/s3/s3_test.go +++ b/s3/s3_test.go @@ -299,17 +299,6 @@ func TestS3_IsFile_Good(t *testing.T) { assert.False(t, m.IsFile("")) } -func TestS3_FileGetFileSet_Good(t *testing.T) { - m, _ := newTestMedium(t) - - err := m.FileSet("key.txt", "value") - require.NoError(t, err) - - val, err := m.FileGet("key.txt") - require.NoError(t, err) - assert.Equal(t, "value", val) -} - func TestS3_Delete_Good(t *testing.T) { m, _ := newTestMedium(t) @@ -648,8 +637,6 @@ func TestS3_InterfaceCompliance(t *testing.T) { Write(string, string) error EnsureDir(string) error IsFile(string) bool - FileGet(string) (string, error) - FileSet(string, string) error Delete(string) error DeleteAll(string) error Rename(string, string) error diff --git a/sqlite/sqlite.go b/sqlite/sqlite.go index 0552d94..e852f9b 100644 --- a/sqlite/sqlite.go +++ b/sqlite/sqlite.go @@ -167,14 +167,6 @@ func (medium *Medium) IsFile(filePath string) bool { return !isDir } -func (medium *Medium) FileGet(filePath string) (string, error) { - return medium.Read(filePath) -} - -func (medium *Medium) FileSet(filePath, content string) error { - return medium.Write(filePath, content) -} - // Example: _ = medium.Delete("config/app.yaml") func (medium *Medium) Delete(filePath string) error { key := normaliseEntryPath(filePath) diff --git a/sqlite/sqlite_test.go b/sqlite/sqlite_test.go index 305b774..5a108b5 100644 --- a/sqlite/sqlite_test.go +++ b/sqlite/sqlite_test.go @@ -134,17 +134,6 @@ func TestSqlite_IsFile_Good(t *testing.T) { assert.False(t, m.IsFile("")) } -func TestSqlite_FileGetFileSet_Good(t *testing.T) { - m := newTestMedium(t) - - err := m.FileSet("key.txt", "value") - require.NoError(t, err) - - val, err := m.FileGet("key.txt") - require.NoError(t, err) - assert.Equal(t, "value", val) -} - func TestSqlite_Delete_Good(t *testing.T) { m := newTestMedium(t) @@ -580,8 +569,6 @@ func TestSqlite_InterfaceCompliance(t *testing.T) { Write(string, string) error EnsureDir(string) error IsFile(string) bool - FileGet(string) (string, error) - FileSet(string, string) error Delete(string) error DeleteAll(string) error Rename(string, string) error diff --git a/store/medium.go b/store/medium.go index 5d492be..d59addc 100644 --- a/store/medium.go +++ b/store/medium.go @@ -93,14 +93,6 @@ func (medium *Medium) IsFile(entryPath string) bool { return err == nil } -func (medium *Medium) FileGet(entryPath string) (string, error) { - return medium.Read(entryPath) -} - -func (medium *Medium) FileSet(entryPath, content string) error { - return medium.Write(entryPath, content) -} - func (medium *Medium) Delete(entryPath string) error { group, key := splitGroupKeyPath(entryPath) if group == "" { diff --git a/store/medium_test.go b/store/medium_test.go index a82fac0..c45a89e 100644 --- a/store/medium_test.go +++ b/store/medium_test.go @@ -204,13 +204,13 @@ func TestKeyValueMedium_Store_Good(t *testing.T) { assert.Same(t, m.Store(), m.Store()) } -func TestKeyValueMedium_EnsureDir_FileHelpers_Good(t *testing.T) { +func TestKeyValueMedium_EnsureDir_ReadWrite_Good(t *testing.T) { m := newTestKeyValueMedium(t) require.NoError(t, m.EnsureDir("ignored")) - require.NoError(t, m.FileSet("group/key", "value")) + require.NoError(t, m.Write("group/key", "value")) - value, err := m.FileGet("group/key") + value, err := m.Read("group/key") require.NoError(t, err) assert.Equal(t, "value", value) } From 38066a6faeeea878c44068c78e92f6899d97b47f Mon Sep 17 00:00:00 2001 From: Virgil Date: Tue, 31 Mar 2026 06:00:23 +0000 Subject: [PATCH 49/83] refactor(ax): rename workspace file helpers Co-authored-by: Virgil --- workspace/doc.go | 2 +- workspace/service.go | 16 ++++++++-------- workspace/service_test.go | 12 ++++++------ 3 files changed, 15 insertions(+), 15 deletions(-) diff --git a/workspace/doc.go b/workspace/doc.go index 8fc8f99..bf399b3 100644 --- a/workspace/doc.go +++ b/workspace/doc.go @@ -1,5 +1,5 @@ // Example: service, _ := workspace.New(workspace.Options{KeyPairProvider: keyPairProvider}) // Example: workspaceID, _ := service.CreateWorkspace("alice", "pass123") // Example: _ = service.SwitchWorkspace(workspaceID) -// Example: _ = service.WorkspaceFileSet("notes/todo.txt", "ship it") +// Example: _ = service.WriteWorkspaceFile("notes/todo.txt", "ship it") package workspace diff --git a/workspace/service.go b/workspace/service.go index 5337ed4..c3d705a 100644 --- a/workspace/service.go +++ b/workspace/service.go @@ -15,8 +15,8 @@ import ( type Workspace interface { CreateWorkspace(identifier, password string) (string, error) SwitchWorkspace(workspaceID string) error - WorkspaceFileGet(workspaceFilePath string) (string, error) - WorkspaceFileSet(workspaceFilePath, content string) error + ReadWorkspaceFile(workspaceFilePath string) (string, error) + WriteWorkspaceFile(workspaceFilePath, content string) error } // Example: key, _ := keyPairProvider.CreateKeyPair("alice", "pass123") @@ -149,24 +149,24 @@ func (service *Service) resolveActiveWorkspaceFilePath(operation, workspaceFileP return filePath, nil } -// Example: content, _ := service.WorkspaceFileGet("notes/todo.txt") -func (service *Service) WorkspaceFileGet(workspaceFilePath string) (string, error) { +// Example: content, _ := service.ReadWorkspaceFile("notes/todo.txt") +func (service *Service) ReadWorkspaceFile(workspaceFilePath string) (string, error) { service.stateLock.RLock() defer service.stateLock.RUnlock() - filePath, err := service.resolveActiveWorkspaceFilePath("workspace.WorkspaceFileGet", workspaceFilePath) + filePath, err := service.resolveActiveWorkspaceFilePath("workspace.ReadWorkspaceFile", workspaceFilePath) if err != nil { return "", err } return service.medium.Read(filePath) } -// Example: _ = service.WorkspaceFileSet("notes/todo.txt", "ship it") -func (service *Service) WorkspaceFileSet(workspaceFilePath, content string) error { +// Example: _ = service.WriteWorkspaceFile("notes/todo.txt", "ship it") +func (service *Service) WriteWorkspaceFile(workspaceFilePath, content string) error { service.stateLock.Lock() defer service.stateLock.Unlock() - filePath, err := service.resolveActiveWorkspaceFilePath("workspace.WorkspaceFileSet", workspaceFilePath) + filePath, err := service.resolveActiveWorkspaceFilePath("workspace.WriteWorkspaceFile", workspaceFilePath) if err != nil { return err } diff --git a/workspace/service_test.go b/workspace/service_test.go index 288199f..f7e88b2 100644 --- a/workspace/service_test.go +++ b/workspace/service_test.go @@ -36,7 +36,7 @@ func TestService_New_MissingKeyPairProvider_Bad(t *testing.T) { require.Error(t, err) } -func TestService_Workspace_RoundTrip_Good(t *testing.T) { +func TestService_WorkspaceFileRoundTrip_Good(t *testing.T) { service, tempHome := newTestService(t) workspaceID, err := service.CreateWorkspace("test-user", "pass123") @@ -52,10 +52,10 @@ func TestService_Workspace_RoundTrip_Good(t *testing.T) { require.NoError(t, err) assert.Equal(t, workspaceID, service.activeWorkspaceID) - err = service.WorkspaceFileSet("secret.txt", "top secret info") + err = service.WriteWorkspaceFile("secret.txt", "top secret info") require.NoError(t, err) - got, err := service.WorkspaceFileGet("secret.txt") + got, err := service.ReadWorkspaceFile("secret.txt") require.NoError(t, err) assert.Equal(t, "top secret info", got) } @@ -71,7 +71,7 @@ func TestService_SwitchWorkspace_TraversalBlocked_Bad(t *testing.T) { assert.Empty(t, service.activeWorkspaceID) } -func TestService_WorkspaceFileSet_TraversalBlocked_Bad(t *testing.T) { +func TestService_WriteWorkspaceFile_TraversalBlocked_Bad(t *testing.T) { service, tempHome := newTestService(t) workspaceID, err := service.CreateWorkspace("test-user", "pass123") @@ -82,14 +82,14 @@ func TestService_WorkspaceFileSet_TraversalBlocked_Bad(t *testing.T) { before, err := service.medium.Read(keyPath) require.NoError(t, err) - err = service.WorkspaceFileSet("../keys/private.key", "hijack") + err = service.WriteWorkspaceFile("../keys/private.key", "hijack") require.Error(t, err) after, err := service.medium.Read(keyPath) require.NoError(t, err) assert.Equal(t, before, after) - _, err = service.WorkspaceFileGet("../keys/private.key") + _, err = service.ReadWorkspaceFile("../keys/private.key") require.Error(t, err) } From 3054217038dcf489d5ceaf8a3e15b92c43c6ca06 Mon Sep 17 00:00:00 2001 From: Virgil Date: Tue, 31 Mar 2026 06:10:46 +0000 Subject: [PATCH 50/83] refactor(ax): remove workspace message compatibility map Co-Authored-By: Virgil --- workspace/service.go | 24 ++++-------------------- workspace/service_test.go | 33 +-------------------------------- 2 files changed, 5 insertions(+), 52 deletions(-) diff --git a/workspace/service.go b/workspace/service.go index c3d705a..3a0babf 100644 --- a/workspace/service.go +++ b/workspace/service.go @@ -192,28 +192,12 @@ func (service *Service) HandleWorkspaceCommand(command WorkspaceCommand) core.Re } // Example: result := service.HandleWorkspaceMessage(core.New(), WorkspaceCommand{Action: WorkspaceSwitchAction, WorkspaceID: "f3f0d7"}) -// Example: legacy := service.HandleWorkspaceMessage(core.New(), map[string]any{"action": WorkspaceCreateAction, "identifier": "alice", "password": "pass123"}) func (service *Service) HandleWorkspaceMessage(_ *core.Core, message core.Message) core.Result { - command, ok := workspaceCommandFromMessage(message) - if !ok { - return core.Result{OK: true} - } - return service.HandleWorkspaceCommand(command) -} - -func workspaceCommandFromMessage(message core.Message) (WorkspaceCommand, bool) { - switch payload := message.(type) { + switch command := message.(type) { case WorkspaceCommand: - return payload, true - case map[string]any: - command := WorkspaceCommand{} - command.Action, _ = payload["action"].(string) - command.Identifier, _ = payload["identifier"].(string) - command.Password, _ = payload["password"].(string) - command.WorkspaceID, _ = payload["workspaceID"].(string) - return command, true - } - return WorkspaceCommand{}, false + return service.HandleWorkspaceCommand(command) + } + return core.Result{OK: true} } func resolveWorkspaceHomeDirectory() string { diff --git a/workspace/service_test.go b/workspace/service_test.go index f7e88b2..aaf4caf 100644 --- a/workspace/service_test.go +++ b/workspace/service_test.go @@ -93,7 +93,7 @@ func TestService_WriteWorkspaceFile_TraversalBlocked_Bad(t *testing.T) { require.Error(t, err) } -func TestService_HandleWorkspaceMessage_Good(t *testing.T) { +func TestService_HandleWorkspaceMessage_Command_Good(t *testing.T) { service, _ := newTestService(t) create := service.HandleWorkspaceMessage(core.New(), WorkspaceCommand{ @@ -114,37 +114,6 @@ func TestService_HandleWorkspaceMessage_Good(t *testing.T) { assert.True(t, switchResult.OK) assert.Equal(t, workspaceID, service.activeWorkspaceID) - legacyCreate := service.HandleWorkspaceMessage(core.New(), map[string]any{ - "action": WorkspaceCreateAction, - "identifier": "legacy-user", - "password": "pass123", - }) - assert.True(t, legacyCreate.OK) - - legacyWorkspaceID, ok := legacyCreate.Value.(string) - require.True(t, ok) - require.NotEmpty(t, legacyWorkspaceID) - - legacySwitch := service.HandleWorkspaceMessage(core.New(), WorkspaceCommand{ - Action: WorkspaceSwitchAction, - WorkspaceID: legacyWorkspaceID, - }) - assert.True(t, legacySwitch.OK) - assert.Equal(t, legacyWorkspaceID, service.activeWorkspaceID) - - rejectedLegacySwitch := service.HandleWorkspaceMessage(core.New(), map[string]any{ - "action": WorkspaceSwitchAction, - "name": workspaceID, - }) - assert.False(t, rejectedLegacySwitch.OK) - assert.Equal(t, legacyWorkspaceID, service.activeWorkspaceID) - - failedSwitch := service.HandleWorkspaceMessage(core.New(), map[string]any{ - "action": WorkspaceSwitchAction, - "workspaceID": "missing", - }) - assert.False(t, failedSwitch.OK) - unknown := service.HandleWorkspaceMessage(core.New(), "noop") assert.True(t, unknown.OK) } From 97535f650ac96b2f7235a5f4cbf26ee883eef9bf Mon Sep 17 00:00:00 2001 From: Virgil Date: Tue, 31 Mar 2026 06:17:48 +0000 Subject: [PATCH 51/83] docs(ax): align guidance with current medium surface Co-Authored-By: Virgil --- CLAUDE.md | 6 +++--- docs/architecture.md | 8 ++++---- docs/development.md | 28 +++++++++++++++------------- docs/index.md | 7 +++---- 4 files changed, 25 insertions(+), 24 deletions(-) diff --git a/CLAUDE.md b/CLAUDE.md index 5b03b0b..abe2112 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -34,7 +34,7 @@ GOWORK=off go test -cover ./... ### Core Interface -`io.Medium` — 18 methods: Read, Write, EnsureDir, IsFile, FileGet, FileSet, Delete, DeleteAll, Rename, List, Stat, Open, Create, Append, ReadStream, WriteStream, Exists, IsDir. +`io.Medium` — 17 methods: Read, Write, WriteMode, EnsureDir, IsFile, Delete, DeleteAll, Rename, List, Stat, Open, Create, Append, ReadStream, WriteStream, Exists, IsDir. ```go // Sandboxed to a project directory @@ -60,7 +60,7 @@ io.Copy(s3Medium, "backup.tar", localMedium, "restore/backup.tar") | `datanode` | Borg DataNode | Thread-safe (RWMutex) in-memory, snapshot/restore via tar | | `store` | SQLite KV store | Group-namespaced key-value with Go template rendering | | `workspace` | Core service | Encrypted workspaces, SHA-256 IDs, PGP keypairs | -| `MockMedium` | In-memory map | Testing — no filesystem needed | +| `MemoryMedium` | In-memory map | Testing — no filesystem needed | `store.Medium` maps filesystem paths as `group/key` — first path segment is the group, remainder is the key. `List("")` returns groups as directories. @@ -132,4 +132,4 @@ Sentinel errors (`var ErrNotFound`, `var ErrInvalidKey`, etc.) use standard `err ## Testing -Use `io.MockMedium` or `io.NewSandboxed(t.TempDir())` in tests — never hit real S3/SQLite unless integration testing. S3 tests use an interface-based mock (`s3.Client`). +Use `io.NewMemoryMedium()` or `io.NewSandboxed(t.TempDir())` in tests — never hit real S3/SQLite unless integration testing. S3 tests use an interface-based mock (`s3.Client`). diff --git a/docs/architecture.md b/docs/architecture.md index 801121a..8db0246 100644 --- a/docs/architecture.md +++ b/docs/architecture.md @@ -25,7 +25,7 @@ The `Medium` interface is defined in `io.go`. It is the only type that consuming - **`io.Local`** — a package-level variable initialised in `init()` via `local.New("/")`. This gives unsandboxed access to the host filesystem, mirroring the behaviour of the standard `os` package. - **`io.NewSandboxed(root)`** — creates a `local.Medium` restricted to `root`. All path resolution is confined within that directory. - **`io.Copy(src, srcPath, dst, dstPath)`** — copies a file between any two mediums by reading from one and writing to the other. -- **`io.MockMedium`** — a fully functional in-memory implementation for unit tests. It tracks files, directories, and modification times in plain maps. +- **`io.NewMemoryMedium()`** — a fully functional in-memory implementation for unit tests. It tracks files, directories, and modification times in plain maps. ### FileInfo and DirEntry (root package) @@ -36,7 +36,7 @@ Simple struct implementations of `fs.FileInfo` and `fs.DirEntry` are exported fr ### local.Medium -**File:** `local/client.go` +**File:** `local/medium.go` The local backend wraps the standard `os` package with two layers of path protection: @@ -100,7 +100,7 @@ Key capabilities beyond `Medium`: ### datanode.Medium -**File:** `datanode/client.go` +**File:** `datanode/medium.go` A thread-safe `Medium` backed by Borg's `DataNode` (an in-memory `fs.FS` with tar serialisation). It adds: @@ -271,7 +271,7 @@ Application code +-- node.Node --> in-memory map + tar serialisation +-- datanode.Medium --> Borg DataNode + sync.RWMutex +-- store.Medium --> store.Store (SQLite KV) --> Medium adapter - +-- MockMedium --> map[string]string (for tests) + +-- MemoryMedium --> map[string]string (for tests) ``` Every backend normalises paths using the same `path.Clean("/" + p)` pattern, ensuring consistent behaviour regardless of which backend is in use. diff --git a/docs/development.md b/docs/development.md index 2e95ad7..6ece61b 100644 --- a/docs/development.md +++ b/docs/development.md @@ -88,18 +88,20 @@ func TestDelete_Bad_DirNotEmpty(t *testing.T) { /* returns error for non-empty d ## Writing Tests Against Medium -Use `MockMedium` from the root package for unit tests that need a storage backend but should not touch disk: +Use `MemoryMedium` from the root package for unit tests that need a storage backend but should not touch disk: ```go func TestMyFeature(t *testing.T) { - m := io.NewMockMedium() - m.Files["config.yaml"] = "key: value" - m.Dirs["data"] = true + m := io.NewMemoryMedium() + _ = m.Write("config.yaml", "key: value") + _ = m.EnsureDir("data") // Your code under test receives m as an io.Medium result, err := myFunction(m) assert.NoError(t, err) - assert.Equal(t, "expected", m.Files["output.txt"]) + output, err := m.Read("output.txt") + require.NoError(t, err) + assert.Equal(t, "expected", output) } ``` @@ -134,7 +136,7 @@ func TestWithSQLite(t *testing.T) { To add a new `Medium` implementation: 1. Create a new package directory (e.g., `sftp/`). -2. Define a struct that implements all 18 methods of `io.Medium`. +2. Define a struct that implements all 17 methods of `io.Medium`. 3. Add a compile-time check at the top of your file: ```go @@ -142,7 +144,7 @@ var _ coreio.Medium = (*Medium)(nil) ``` 4. Normalise paths using `path.Clean("/" + p)` to prevent traversal escapes. This is the convention followed by every existing backend. -5. Handle `nil` and empty input consistently: check how `MockMedium` and `local.Medium` behave and match that behaviour. +5. Handle `nil` and empty input consistently: check how `MemoryMedium` and `local.Medium` behave and match that behaviour. 6. Write tests using the `_Good` / `_Bad` / `_Ugly` naming convention. 7. Add your package to the table in `docs/index.md`. @@ -171,13 +173,13 @@ To add a new data transformation: ``` go-io/ -├── io.go # Medium interface, helpers, MockMedium -├── client_test.go # Tests for MockMedium and helpers +├── io.go # Medium interface, helpers, MemoryMedium +├── medium_test.go # Tests for MemoryMedium and helpers ├── bench_test.go # Benchmarks ├── go.mod ├── local/ -│ ├── client.go # Local filesystem backend -│ └── client_test.go +│ ├── medium.go # Local filesystem backend +│ └── medium_test.go ├── s3/ │ ├── s3.go # S3 backend │ └── s3_test.go @@ -188,8 +190,8 @@ go-io/ │ ├── node.go # In-memory fs.FS + Medium │ └── node_test.go ├── datanode/ -│ ├── client.go # Borg DataNode Medium wrapper -│ └── client_test.go +│ ├── medium.go # Borg DataNode Medium wrapper +│ └── medium_test.go ├── store/ │ ├── store.go # KV store │ ├── medium.go # Medium adapter for KV store diff --git a/docs/index.md b/docs/index.md index 2ac0992..9ce4c25 100644 --- a/docs/index.md +++ b/docs/index.md @@ -41,7 +41,7 @@ _ = bucket.Write("photo.jpg", rawData) | Package | Import Path | Purpose | |---------|-------------|---------| -| `io` (root) | `forge.lthn.ai/core/go-io` | `Medium` interface, helper functions, `MockMedium` for tests | +| `io` (root) | `forge.lthn.ai/core/go-io` | `Medium` interface, helper functions, `MemoryMedium` for tests | | `local` | `forge.lthn.ai/core/go-io/local` | Local filesystem backend with path sandboxing and symlink-escape protection | | `s3` | `forge.lthn.ai/core/go-io/s3` | Amazon S3 / S3-compatible backend (Garage, MinIO, etc.) | | `sqlite` | `forge.lthn.ai/core/go-io/sqlite` | SQLite-backed virtual filesystem (pure Go driver, no CGO) | @@ -54,15 +54,14 @@ _ = bucket.Write("photo.jpg", rawData) ## The Medium Interface -Every storage backend implements the same 18-method interface: +Every storage backend implements the same 17-method interface: ```go type Medium interface { // Content operations Read(path string) (string, error) Write(path, content string) error - FileGet(path string) (string, error) // alias for Read - FileSet(path, content string) error // alias for Write + WriteMode(path, content string, mode fs.FileMode) error // Streaming (for large files) ReadStream(path string) (io.ReadCloser, error) From cc2b553c944a32a0531a7103613c761a5014d9c4 Mon Sep 17 00:00:00 2001 From: Virgil Date: Tue, 31 Mar 2026 06:26:16 +0000 Subject: [PATCH 52/83] docs(ax): align RFC API reference with current surfaces --- docs/RFC.md | 545 ++++++++++++++++++++++++++-------------------------- 1 file changed, 275 insertions(+), 270 deletions(-) diff --git a/docs/RFC.md b/docs/RFC.md index 112fd4c..3fab450 100644 --- a/docs/RFC.md +++ b/docs/RFC.md @@ -11,7 +11,7 @@ Examples use the import paths from `docs/index.md` (`forge.lthn.ai/core/go-io`). ## Package io (`forge.lthn.ai/core/go-io`) -Defines the `Medium` interface, helper functions, and in-memory mock implementations. +Defines the `Medium` interface, helper functions, and in-memory `MemoryMedium` implementation. ### Medium (interface) @@ -19,7 +19,7 @@ The common storage abstraction implemented by every backend. Example: ```go -var m io.Medium = io.NewMockMedium() +var m io.Medium = io.NewMemoryMedium() _ = m.Write("notes.txt", "hello") ``` @@ -27,7 +27,7 @@ _ = m.Write("notes.txt", "hello") Reads a file as a string. Example: ```go -m := io.NewMockMedium() +m := io.NewMemoryMedium() _ = m.Write("notes.txt", "hello") value, _ := m.Read("notes.txt") ``` @@ -36,7 +36,7 @@ value, _ := m.Read("notes.txt") Writes content to a file, creating it if needed. Example: ```go -m := io.NewMockMedium() +m := io.NewMemoryMedium() _ = m.Write("notes.txt", "hello") ``` @@ -44,7 +44,7 @@ _ = m.Write("notes.txt", "hello") Writes content with explicit permissions. Example: ```go -m := io.NewMockMedium() +m := io.NewMemoryMedium() _ = m.WriteMode("secret.txt", "secret", 0600) ``` @@ -52,7 +52,7 @@ _ = m.WriteMode("secret.txt", "secret", 0600) Ensures a directory exists. Example: ```go -m := io.NewMockMedium() +m := io.NewMemoryMedium() _ = m.EnsureDir("config") ``` @@ -60,33 +60,33 @@ _ = m.EnsureDir("config") Reports whether a path is a regular file. Example: ```go -m := io.NewMockMedium() +m := io.NewMemoryMedium() _ = m.Write("notes.txt", "hello") ok := m.IsFile("notes.txt") ``` -**FileGet(path string) (string, error)** +**Read(path string) (string, error)** Alias for `Read`. Example: ```go -m := io.NewMockMedium() -_ = m.FileSet("notes.txt", "hello") -value, _ := m.FileGet("notes.txt") +m := io.NewMemoryMedium() +_ = m.Write("notes.txt", "hello") +value, _ := m.Read("notes.txt") ``` -**FileSet(path, content string) error** +**Write(path, content string) error** Alias for `Write`. Example: ```go -m := io.NewMockMedium() -_ = m.FileSet("notes.txt", "hello") +m := io.NewMemoryMedium() +_ = m.Write("notes.txt", "hello") ``` **Delete(path string) error** Deletes a file or empty directory. Example: ```go -m := io.NewMockMedium() +m := io.NewMemoryMedium() _ = m.Write("old.txt", "data") _ = m.Delete("old.txt") ``` @@ -95,7 +95,7 @@ _ = m.Delete("old.txt") Deletes a file or directory tree recursively. Example: ```go -m := io.NewMockMedium() +m := io.NewMemoryMedium() _ = m.Write("logs/run.txt", "started") _ = m.DeleteAll("logs") ``` @@ -104,7 +104,7 @@ _ = m.DeleteAll("logs") Moves or renames a file or directory. Example: ```go -m := io.NewMockMedium() +m := io.NewMemoryMedium() _ = m.Write("old.txt", "data") _ = m.Rename("old.txt", "new.txt") ``` @@ -113,7 +113,7 @@ _ = m.Rename("old.txt", "new.txt") Lists immediate directory entries. Example: ```go -m := io.NewMockMedium() +m := io.NewMemoryMedium() _ = m.Write("dir/file.txt", "data") entries, _ := m.List("dir") ``` @@ -122,7 +122,7 @@ entries, _ := m.List("dir") Returns file metadata. Example: ```go -m := io.NewMockMedium() +m := io.NewMemoryMedium() _ = m.Write("notes.txt", "hello") info, _ := m.Stat("notes.txt") ``` @@ -131,7 +131,7 @@ info, _ := m.Stat("notes.txt") Opens a file for reading. Example: ```go -m := io.NewMockMedium() +m := io.NewMemoryMedium() _ = m.Write("notes.txt", "hello") f, _ := m.Open("notes.txt") defer f.Close() @@ -141,7 +141,7 @@ defer f.Close() Creates or truncates a file and returns a writer. Example: ```go -m := io.NewMockMedium() +m := io.NewMemoryMedium() w, _ := m.Create("notes.txt") _, _ = w.Write([]byte("hello")) _ = w.Close() @@ -151,7 +151,7 @@ _ = w.Close() Opens a file for appending, creating it if needed. Example: ```go -m := io.NewMockMedium() +m := io.NewMemoryMedium() _ = m.Write("notes.txt", "hello") w, _ := m.Append("notes.txt") _, _ = w.Write([]byte(" world")) @@ -162,7 +162,7 @@ _ = w.Close() Opens a streaming reader for a file. Example: ```go -m := io.NewMockMedium() +m := io.NewMemoryMedium() _ = m.Write("notes.txt", "hello") r, _ := m.ReadStream("notes.txt") defer r.Close() @@ -172,7 +172,7 @@ defer r.Close() Opens a streaming writer for a file. Example: ```go -m := io.NewMockMedium() +m := io.NewMemoryMedium() w, _ := m.WriteStream("notes.txt") _, _ = w.Write([]byte("hello")) _ = w.Close() @@ -182,7 +182,7 @@ _ = w.Close() Reports whether a path exists. Example: ```go -m := io.NewMockMedium() +m := io.NewMemoryMedium() _ = m.Write("notes.txt", "hello") ok := m.Exists("notes.txt") ``` @@ -191,19 +191,19 @@ ok := m.Exists("notes.txt") Reports whether a path is a directory. Example: ```go -m := io.NewMockMedium() +m := io.NewMemoryMedium() _ = m.EnsureDir("config") ok := m.IsDir("config") ``` ### FileInfo -Lightweight `fs.FileInfo` implementation used by `MockMedium`. +Lightweight `fs.FileInfo` implementation used by `MemoryMedium`. **Name() string** Example: ```go -m := io.NewMockMedium() +m := io.NewMemoryMedium() _ = m.Write("file.txt", "data") info, _ := m.Stat("file.txt") _ = info.Name() @@ -212,7 +212,7 @@ _ = info.Name() **Size() int64** Example: ```go -m := io.NewMockMedium() +m := io.NewMemoryMedium() _ = m.Write("file.txt", "data") info, _ := m.Stat("file.txt") _ = info.Size() @@ -221,7 +221,7 @@ _ = info.Size() **Mode() fs.FileMode** Example: ```go -m := io.NewMockMedium() +m := io.NewMemoryMedium() _ = m.Write("file.txt", "data") info, _ := m.Stat("file.txt") _ = info.Mode() @@ -230,7 +230,7 @@ _ = info.Mode() **ModTime() time.Time** Example: ```go -m := io.NewMockMedium() +m := io.NewMemoryMedium() _ = m.Write("file.txt", "data") info, _ := m.Stat("file.txt") _ = info.ModTime() @@ -239,7 +239,7 @@ _ = info.ModTime() **IsDir() bool** Example: ```go -m := io.NewMockMedium() +m := io.NewMemoryMedium() _ = m.Write("file.txt", "data") info, _ := m.Stat("file.txt") _ = info.IsDir() @@ -248,7 +248,7 @@ _ = info.IsDir() **Sys() any** Example: ```go -m := io.NewMockMedium() +m := io.NewMemoryMedium() _ = m.Write("file.txt", "data") info, _ := m.Stat("file.txt") _ = info.Sys() @@ -256,12 +256,12 @@ _ = info.Sys() ### DirEntry -Lightweight `fs.DirEntry` implementation used by `MockMedium` listings. +Lightweight `fs.DirEntry` implementation used by `MemoryMedium` listings. **Name() string** Example: ```go -m := io.NewMockMedium() +m := io.NewMemoryMedium() _ = m.Write("dir/file.txt", "data") entries, _ := m.List("dir") _ = entries[0].Name() @@ -270,7 +270,7 @@ _ = entries[0].Name() **IsDir() bool** Example: ```go -m := io.NewMockMedium() +m := io.NewMemoryMedium() _ = m.EnsureDir("dir") entries, _ := m.List("") _ = entries[0].IsDir() @@ -279,7 +279,7 @@ _ = entries[0].IsDir() **Type() fs.FileMode** Example: ```go -m := io.NewMockMedium() +m := io.NewMemoryMedium() _ = m.Write("dir/file.txt", "data") entries, _ := m.List("dir") _ = entries[0].Type() @@ -288,7 +288,7 @@ _ = entries[0].Type() **Info() (fs.FileInfo, error)** Example: ```go -m := io.NewMockMedium() +m := io.NewMemoryMedium() _ = m.Write("dir/file.txt", "data") entries, _ := m.List("dir") info, _ := entries[0].Info() @@ -320,7 +320,7 @@ Helper that calls `Medium.Read` on a supplied backend. Example: ```go -m := io.NewMockMedium() +m := io.NewMemoryMedium() _ = m.Write("notes.txt", "hello") value, _ := io.Read(m, "notes.txt") ``` @@ -331,7 +331,7 @@ Helper that calls `Medium.Write` on a supplied backend. Example: ```go -m := io.NewMockMedium() +m := io.NewMemoryMedium() _ = io.Write(m, "notes.txt", "hello") ``` @@ -341,7 +341,7 @@ Helper that calls `Medium.ReadStream` on a supplied backend. Example: ```go -m := io.NewMockMedium() +m := io.NewMemoryMedium() _ = m.Write("notes.txt", "hello") r, _ := io.ReadStream(m, "notes.txt") defer r.Close() @@ -353,7 +353,7 @@ Helper that calls `Medium.WriteStream` on a supplied backend. Example: ```go -m := io.NewMockMedium() +m := io.NewMemoryMedium() w, _ := io.WriteStream(m, "notes.txt") _, _ = w.Write([]byte("hello")) _ = w.Close() @@ -365,7 +365,7 @@ Helper that calls `Medium.EnsureDir` on a supplied backend. Example: ```go -m := io.NewMockMedium() +m := io.NewMemoryMedium() _ = io.EnsureDir(m, "config") ``` @@ -375,7 +375,7 @@ Helper that calls `Medium.IsFile` on a supplied backend. Example: ```go -m := io.NewMockMedium() +m := io.NewMemoryMedium() _ = m.Write("notes.txt", "hello") ok := io.IsFile(m, "notes.txt") ``` @@ -386,78 +386,78 @@ Copies a file between two mediums. Example: ```go -src := io.NewMockMedium() -dst := io.NewMockMedium() +src := io.NewMemoryMedium() +dst := io.NewMemoryMedium() _ = src.Write("source.txt", "data") _ = io.Copy(src, "source.txt", dst, "dest.txt") ``` -### MockMedium +### MemoryMedium -In-memory `Medium` implementation for tests. Exposes `Files`, `Dirs`, and `ModTimes` maps for seeding state. +In-memory `Medium` implementation for tests. Example: ```go -m := io.NewMockMedium() -m.Files["seed.txt"] = "seeded" +m := io.NewMemoryMedium() +_ = m.Write("seed.txt", "seeded") ``` **Read(path string) (string, error)** Example: ```go -m := io.NewMockMedium() -m.Files["notes.txt"] = "hello" +m := io.NewMemoryMedium() +_ = m.Write("notes.txt", "hello") value, _ := m.Read("notes.txt") ``` **Write(path, content string) error** Example: ```go -m := io.NewMockMedium() +m := io.NewMemoryMedium() _ = m.Write("notes.txt", "hello") ``` **WriteMode(path, content string, mode fs.FileMode) error** Example: ```go -m := io.NewMockMedium() +m := io.NewMemoryMedium() _ = m.WriteMode("secret.txt", "secret", 0600) ``` **EnsureDir(path string) error** Example: ```go -m := io.NewMockMedium() +m := io.NewMemoryMedium() _ = m.EnsureDir("config") ``` **IsFile(path string) bool** Example: ```go -m := io.NewMockMedium() +m := io.NewMemoryMedium() _ = m.Write("notes.txt", "hello") ok := m.IsFile("notes.txt") ``` -**FileGet(path string) (string, error)** +**Read(path string) (string, error)** Example: ```go -m := io.NewMockMedium() -_ = m.FileSet("notes.txt", "hello") -value, _ := m.FileGet("notes.txt") +m := io.NewMemoryMedium() +_ = m.Write("notes.txt", "hello") +value, _ := m.Read("notes.txt") ``` -**FileSet(path, content string) error** +**Write(path, content string) error** Example: ```go -m := io.NewMockMedium() -_ = m.FileSet("notes.txt", "hello") +m := io.NewMemoryMedium() +_ = m.Write("notes.txt", "hello") ``` **Delete(path string) error** Example: ```go -m := io.NewMockMedium() +m := io.NewMemoryMedium() _ = m.Write("old.txt", "data") _ = m.Delete("old.txt") ``` @@ -465,7 +465,7 @@ _ = m.Delete("old.txt") **DeleteAll(path string) error** Example: ```go -m := io.NewMockMedium() +m := io.NewMemoryMedium() _ = m.Write("logs/run.txt", "started") _ = m.DeleteAll("logs") ``` @@ -473,7 +473,7 @@ _ = m.DeleteAll("logs") **Rename(oldPath, newPath string) error** Example: ```go -m := io.NewMockMedium() +m := io.NewMemoryMedium() _ = m.Write("old.txt", "data") _ = m.Rename("old.txt", "new.txt") ``` @@ -481,7 +481,7 @@ _ = m.Rename("old.txt", "new.txt") **List(path string) ([]fs.DirEntry, error)** Example: ```go -m := io.NewMockMedium() +m := io.NewMemoryMedium() _ = m.Write("dir/file.txt", "data") entries, _ := m.List("dir") ``` @@ -489,7 +489,7 @@ entries, _ := m.List("dir") **Stat(path string) (fs.FileInfo, error)** Example: ```go -m := io.NewMockMedium() +m := io.NewMemoryMedium() _ = m.Write("notes.txt", "hello") info, _ := m.Stat("notes.txt") ``` @@ -497,7 +497,7 @@ info, _ := m.Stat("notes.txt") **Open(path string) (fs.File, error)** Example: ```go -m := io.NewMockMedium() +m := io.NewMemoryMedium() _ = m.Write("notes.txt", "hello") f, _ := m.Open("notes.txt") defer f.Close() @@ -506,7 +506,7 @@ defer f.Close() **Create(path string) (io.WriteCloser, error)** Example: ```go -m := io.NewMockMedium() +m := io.NewMemoryMedium() w, _ := m.Create("notes.txt") _, _ = w.Write([]byte("hello")) _ = w.Close() @@ -515,7 +515,7 @@ _ = w.Close() **Append(path string) (io.WriteCloser, error)** Example: ```go -m := io.NewMockMedium() +m := io.NewMemoryMedium() _ = m.Write("notes.txt", "hello") w, _ := m.Append("notes.txt") _, _ = w.Write([]byte(" world")) @@ -525,7 +525,7 @@ _ = w.Close() **ReadStream(path string) (io.ReadCloser, error)** Example: ```go -m := io.NewMockMedium() +m := io.NewMemoryMedium() _ = m.Write("notes.txt", "hello") r, _ := m.ReadStream("notes.txt") defer r.Close() @@ -534,7 +534,7 @@ defer r.Close() **WriteStream(path string) (io.WriteCloser, error)** Example: ```go -m := io.NewMockMedium() +m := io.NewMemoryMedium() w, _ := m.WriteStream("notes.txt") _, _ = w.Write([]byte("hello")) _ = w.Close() @@ -543,7 +543,7 @@ _ = w.Close() **Exists(path string) bool** Example: ```go -m := io.NewMockMedium() +m := io.NewMemoryMedium() _ = m.Write("notes.txt", "hello") ok := m.Exists("notes.txt") ``` @@ -551,29 +551,29 @@ ok := m.Exists("notes.txt") **IsDir(path string) bool** Example: ```go -m := io.NewMockMedium() +m := io.NewMemoryMedium() _ = m.EnsureDir("config") ok := m.IsDir("config") ``` -### NewMockMedium() *MockMedium +### NewMemoryMedium() *MemoryMedium Creates a new empty in-memory medium. Example: ```go -m := io.NewMockMedium() +m := io.NewMemoryMedium() _ = m.Write("notes.txt", "hello") ``` -### MockFile +### MemoryFile -`fs.File` implementation returned by `MockMedium.Open`. +`fs.File` implementation returned by `MemoryMedium.Open`. **Stat() (fs.FileInfo, error)** Example: ```go -m := io.NewMockMedium() +m := io.NewMemoryMedium() _ = m.Write("notes.txt", "hello") f, _ := m.Open("notes.txt") info, _ := f.Stat() @@ -583,7 +583,7 @@ _ = info.Name() **Read(b []byte) (int, error)** Example: ```go -m := io.NewMockMedium() +m := io.NewMemoryMedium() _ = m.Write("notes.txt", "hello") f, _ := m.Open("notes.txt") buf := make([]byte, 5) @@ -593,20 +593,20 @@ _, _ = f.Read(buf) **Close() error** Example: ```go -m := io.NewMockMedium() +m := io.NewMemoryMedium() _ = m.Write("notes.txt", "hello") f, _ := m.Open("notes.txt") _ = f.Close() ``` -### MockWriteCloser +### MemoryWriteCloser -`io.WriteCloser` implementation returned by `MockMedium.Create` and `MockMedium.Append`. +`io.WriteCloser` implementation returned by `MemoryMedium.Create` and `MemoryMedium.Append`. **Write(p []byte) (int, error)** Example: ```go -m := io.NewMockMedium() +m := io.NewMemoryMedium() w, _ := m.Create("notes.txt") _, _ = w.Write([]byte("hello")) ``` @@ -614,7 +614,7 @@ _, _ = w.Write([]byte("hello")) **Close() error** Example: ```go -m := io.NewMockMedium() +m := io.NewMemoryMedium() w, _ := m.Create("notes.txt") _, _ = w.Write([]byte("hello")) _ = w.Close() @@ -783,19 +783,19 @@ _ = m.Write("old.txt", "data") _ = m.Rename("old.txt", "new.txt") ``` -**FileGet(path string) (string, error)** +**Read(path string) (string, error)** Example: ```go m, _ := local.New("/srv/app") -_ = m.FileSet("notes.txt", "hello") -value, _ := m.FileGet("notes.txt") +_ = m.Write("notes.txt", "hello") +value, _ := m.Read("notes.txt") ``` -**FileSet(path, content string) error** +**Write(path, content string) error** Example: ```go m, _ := local.New("/srv/app") -_ = m.FileSet("notes.txt", "hello") +_ = m.Write("notes.txt", "hello") ``` ## Package node (`forge.lthn.ai/core/go-io/node`) @@ -827,8 +827,8 @@ Options for `Node.Walk`. Example: ```go -opts := node.WalkOptions{MaxDepth: 1, SkipErrors: true} -_ = opts.MaxDepth +options := node.WalkOptions{MaxDepth: 1, SkipErrors: true} +_ = options.MaxDepth ``` ### Node @@ -866,24 +866,15 @@ n := node.New() _ = n.LoadTar([]byte{}) ``` -**WalkNode(root string, fn fs.WalkDirFunc) error** -Walks the tree using `fs.WalkDir`. -Example: -```go -n := node.New() -_ = n.WalkNode(".", func(path string, d fs.DirEntry, err error) error { - return nil -}) -``` - -**Walk(root string, fn fs.WalkDirFunc, opts ...WalkOptions) error** +**Walk(root string, fn fs.WalkDirFunc, options WalkOptions) error** Walks the tree with optional depth or filter controls. Example: ```go n := node.New() +options := node.WalkOptions{MaxDepth: 1, SkipErrors: true} _ = n.Walk(".", func(path string, d fs.DirEntry, err error) error { return nil -}, node.WalkOptions{MaxDepth: 1}) +}, options) ``` **ReadFile(name string) ([]byte, error)** @@ -895,7 +886,7 @@ _ = n.Write("file.txt", "data") b, _ := n.ReadFile("file.txt") ``` -**CopyFile(src, dst string, perm fs.FileMode) error** +**CopyFile(sourcePath, destinationPath string, perm fs.FileMode) error** Copies a file to the local filesystem. Example: ```go @@ -910,7 +901,7 @@ Example: ```go n := node.New() _ = n.Write("config/app.yaml", "port: 8080") -copyTarget := io.NewMockMedium() +copyTarget := io.NewMemoryMedium() _ = n.CopyTo(copyTarget, "config", "backup/config") ``` @@ -967,21 +958,21 @@ n := node.New() _ = n.WriteMode("file.txt", "data", 0600) ``` -**FileGet(p string) (string, error)** +**Read(p string) (string, error)** Alias for `Read`. Example: ```go n := node.New() -_ = n.FileSet("file.txt", "data") -value, _ := n.FileGet("file.txt") +_ = n.Write("file.txt", "data") +value, _ := n.Read("file.txt") ``` -**FileSet(p, content string) error** +**Write(p, content string) error** Alias for `Write`. Example: ```go n := node.New() -_ = n.FileSet("file.txt", "data") +_ = n.Write("file.txt", "data") ``` **EnsureDir(path string) error** @@ -1100,26 +1091,36 @@ _ = w.Close() Group-namespaced key-value store backed by SQLite, plus a `Medium` adapter. -### ErrNotFound +### NotFoundError Returned when a key does not exist. Example: ```go -s, _ := store.New(":memory:") +s, _ := store.New(store.Options{Path: ":memory:"}) _, err := s.Get("config", "missing") -if core.Is(err, store.ErrNotFound) { +if core.Is(err, store.NotFoundError) { // handle missing key } ``` -### New(dbPath string) (*Store, error) +### Options + +Configures the SQLite database path used by the store. + +Example: +```go +options := store.Options{Path: ":memory:"} +_ = options +``` + +### New(options Options) (*Store, error) -Creates a new `Store` at the SQLite path. +Creates a new `Store` backed by the configured SQLite path. Example: ```go -s, _ := store.New(":memory:") +s, _ := store.New(store.Options{Path: ":memory:"}) _ = s.Set("config", "theme", "midnight") ``` @@ -1129,21 +1130,21 @@ Group-namespaced key-value store. Example: ```go -s, _ := store.New(":memory:") +s, _ := store.New(store.Options{Path: ":memory:"}) _ = s.Set("config", "theme", "midnight") ``` **Close() error** Example: ```go -s, _ := store.New(":memory:") +s, _ := store.New(store.Options{Path: ":memory:"}) _ = s.Close() ``` **Get(group, key string) (string, error)** Example: ```go -s, _ := store.New(":memory:") +s, _ := store.New(store.Options{Path: ":memory:"}) _ = s.Set("config", "theme", "midnight") value, _ := s.Get("config", "theme") ``` @@ -1151,14 +1152,14 @@ value, _ := s.Get("config", "theme") **Set(group, key, value string) error** Example: ```go -s, _ := store.New(":memory:") +s, _ := store.New(store.Options{Path: ":memory:"}) _ = s.Set("config", "theme", "midnight") ``` **Delete(group, key string) error** Example: ```go -s, _ := store.New(":memory:") +s, _ := store.New(store.Options{Path: ":memory:"}) _ = s.Set("config", "theme", "midnight") _ = s.Delete("config", "theme") ``` @@ -1166,7 +1167,7 @@ _ = s.Delete("config", "theme") **Count(group string) (int, error)** Example: ```go -s, _ := store.New(":memory:") +s, _ := store.New(store.Options{Path: ":memory:"}) _ = s.Set("config", "theme", "midnight") count, _ := s.Count("config") ``` @@ -1174,7 +1175,7 @@ count, _ := s.Count("config") **DeleteGroup(group string) error** Example: ```go -s, _ := store.New(":memory:") +s, _ := store.New(store.Options{Path: ":memory:"}) _ = s.Set("config", "theme", "midnight") _ = s.DeleteGroup("config") ``` @@ -1182,7 +1183,7 @@ _ = s.DeleteGroup("config") **GetAll(group string) (map[string]string, error)** Example: ```go -s, _ := store.New(":memory:") +s, _ := store.New(store.Options{Path: ":memory:"}) _ = s.Set("config", "theme", "midnight") all, _ := s.GetAll("config") ``` @@ -1190,7 +1191,7 @@ all, _ := s.GetAll("config") **Render(tmplStr, group string) (string, error)** Example: ```go -s, _ := store.New(":memory:") +s, _ := store.New(store.Options{Path: ":memory:"}) _ = s.Set("user", "name", "alice") out, _ := s.Render("hello {{ .name }}", "user") ``` @@ -1198,18 +1199,18 @@ out, _ := s.Render("hello {{ .name }}", "user") **AsMedium() *Medium** Example: ```go -s, _ := store.New(":memory:") +s, _ := store.New(store.Options{Path: ":memory:"}) m := s.AsMedium() _ = m.Write("config/theme", "midnight") ``` -### NewMedium(dbPath string) (*Medium, error) +### NewMedium(options Options) (*Medium, error) Creates an `io.Medium` backed by a SQLite key-value store. Example: ```go -m, _ := store.NewMedium("config.db") +m, _ := store.NewMedium(store.Options{Path: "config.db"}) _ = m.Write("config/theme", "midnight") ``` @@ -1219,14 +1220,14 @@ Adapter that maps `group/key` paths onto a `Store`. Example: ```go -m, _ := store.NewMedium(":memory:") +m, _ := store.NewMedium(store.Options{Path: ":memory:"}) _ = m.Write("config/theme", "midnight") ``` **Store() *Store** Example: ```go -m, _ := store.NewMedium(":memory:") +m, _ := store.NewMedium(store.Options{Path: ":memory:"}) s := m.Store() _ = s.Set("config", "theme", "midnight") ``` @@ -1234,14 +1235,14 @@ _ = s.Set("config", "theme", "midnight") **Close() error** Example: ```go -m, _ := store.NewMedium(":memory:") +m, _ := store.NewMedium(store.Options{Path: ":memory:"}) _ = m.Close() ``` **Read(p string) (string, error)** Example: ```go -m, _ := store.NewMedium(":memory:") +m, _ := store.NewMedium(store.Options{Path: ":memory:"}) _ = m.Write("config/theme", "midnight") value, _ := m.Read("config/theme") ``` @@ -1249,7 +1250,7 @@ value, _ := m.Read("config/theme") **Write(p, content string) error** Example: ```go -m, _ := store.NewMedium(":memory:") +m, _ := store.NewMedium(store.Options{Path: ":memory:"}) _ = m.Write("config/theme", "midnight") ``` @@ -1257,37 +1258,37 @@ _ = m.Write("config/theme", "midnight") No-op (groups are implicit). Example: ```go -m, _ := store.NewMedium(":memory:") +m, _ := store.NewMedium(store.Options{Path: ":memory:"}) _ = m.EnsureDir("config") ``` **IsFile(p string) bool** Example: ```go -m, _ := store.NewMedium(":memory:") +m, _ := store.NewMedium(store.Options{Path: ":memory:"}) _ = m.Write("config/theme", "midnight") ok := m.IsFile("config/theme") ``` -**FileGet(p string) (string, error)** +**Read(p string) (string, error)** Example: ```go -m, _ := store.NewMedium(":memory:") -_ = m.FileSet("config/theme", "midnight") -value, _ := m.FileGet("config/theme") +m, _ := store.NewMedium(store.Options{Path: ":memory:"}) +_ = m.Write("config/theme", "midnight") +value, _ := m.Read("config/theme") ``` -**FileSet(p, content string) error** +**Write(p, content string) error** Example: ```go -m, _ := store.NewMedium(":memory:") -_ = m.FileSet("config/theme", "midnight") +m, _ := store.NewMedium(store.Options{Path: ":memory:"}) +_ = m.Write("config/theme", "midnight") ``` **Delete(p string) error** Example: ```go -m, _ := store.NewMedium(":memory:") +m, _ := store.NewMedium(store.Options{Path: ":memory:"}) _ = m.Write("config/theme", "midnight") _ = m.Delete("config/theme") ``` @@ -1295,7 +1296,7 @@ _ = m.Delete("config/theme") **DeleteAll(p string) error** Example: ```go -m, _ := store.NewMedium(":memory:") +m, _ := store.NewMedium(store.Options{Path: ":memory:"}) _ = m.Write("config/theme", "midnight") _ = m.DeleteAll("config") ``` @@ -1303,7 +1304,7 @@ _ = m.DeleteAll("config") **Rename(oldPath, newPath string) error** Example: ```go -m, _ := store.NewMedium(":memory:") +m, _ := store.NewMedium(store.Options{Path: ":memory:"}) _ = m.Write("old/theme", "midnight") _ = m.Rename("old/theme", "new/theme") ``` @@ -1311,7 +1312,7 @@ _ = m.Rename("old/theme", "new/theme") **List(p string) ([]fs.DirEntry, error)** Example: ```go -m, _ := store.NewMedium(":memory:") +m, _ := store.NewMedium(store.Options{Path: ":memory:"}) _ = m.Write("config/theme", "midnight") entries, _ := m.List("") ``` @@ -1319,7 +1320,7 @@ entries, _ := m.List("") **Stat(p string) (fs.FileInfo, error)** Example: ```go -m, _ := store.NewMedium(":memory:") +m, _ := store.NewMedium(store.Options{Path: ":memory:"}) _ = m.Write("config/theme", "midnight") info, _ := m.Stat("config/theme") ``` @@ -1327,7 +1328,7 @@ info, _ := m.Stat("config/theme") **Open(p string) (fs.File, error)** Example: ```go -m, _ := store.NewMedium(":memory:") +m, _ := store.NewMedium(store.Options{Path: ":memory:"}) _ = m.Write("config/theme", "midnight") f, _ := m.Open("config/theme") defer f.Close() @@ -1336,7 +1337,7 @@ defer f.Close() **Create(p string) (io.WriteCloser, error)** Example: ```go -m, _ := store.NewMedium(":memory:") +m, _ := store.NewMedium(store.Options{Path: ":memory:"}) w, _ := m.Create("config/theme") _, _ = w.Write([]byte("midnight")) _ = w.Close() @@ -1345,7 +1346,7 @@ _ = w.Close() **Append(p string) (io.WriteCloser, error)** Example: ```go -m, _ := store.NewMedium(":memory:") +m, _ := store.NewMedium(store.Options{Path: ":memory:"}) _ = m.Write("config/theme", "midnight") w, _ := m.Append("config/theme") _, _ = w.Write([]byte(" plus")) @@ -1355,7 +1356,7 @@ _ = w.Close() **ReadStream(p string) (io.ReadCloser, error)** Example: ```go -m, _ := store.NewMedium(":memory:") +m, _ := store.NewMedium(store.Options{Path: ":memory:"}) _ = m.Write("config/theme", "midnight") r, _ := m.ReadStream("config/theme") defer r.Close() @@ -1364,7 +1365,7 @@ defer r.Close() **WriteStream(p string) (io.WriteCloser, error)** Example: ```go -m, _ := store.NewMedium(":memory:") +m, _ := store.NewMedium(store.Options{Path: ":memory:"}) w, _ := m.WriteStream("config/theme") _, _ = w.Write([]byte("midnight")) _ = w.Close() @@ -1373,7 +1374,7 @@ _ = w.Close() **Exists(p string) bool** Example: ```go -m, _ := store.NewMedium(":memory:") +m, _ := store.NewMedium(store.Options{Path: ":memory:"}) _ = m.Write("config/theme", "midnight") ok := m.Exists("config") ``` @@ -1381,7 +1382,7 @@ ok := m.Exists("config") **IsDir(p string) bool** Example: ```go -m, _ := store.NewMedium(":memory:") +m, _ := store.NewMedium(store.Options{Path: ":memory:"}) _ = m.Write("config/theme", "midnight") ok := m.IsDir("config") ``` @@ -1390,26 +1391,17 @@ ok := m.IsDir("config") SQLite-backed `io.Medium` implementation using the pure-Go driver. -### Option - -Functional option for configuring `Medium`. - -Example: -```go -opt := sqlite.Options{Path: ":memory:", Table: "files"} -_ = opt -``` - ### Options -Sets the table name used for storage (default: `files`). +Configures the SQLite database path and optional table name. Example: ```go -m, _ := sqlite.New(sqlite.Options{Path: ":memory:", Table: "files"}) +options := sqlite.Options{Path: ":memory:", Table: "files"} +_ = options ``` -### New(dbPath string, opts ...Option) (*Medium, error) +### New(options Options) (*Medium, error) Creates a new SQLite-backed medium. @@ -1466,19 +1458,19 @@ _ = m.Write("notes.txt", "hello") ok := m.IsFile("notes.txt") ``` -**FileGet(p string) (string, error)** +**Read(p string) (string, error)** Example: ```go m, _ := sqlite.New(sqlite.Options{Path: ":memory:"}) -_ = m.FileSet("notes.txt", "hello") -value, _ := m.FileGet("notes.txt") +_ = m.Write("notes.txt", "hello") +value, _ := m.Read("notes.txt") ``` -**FileSet(p, content string) error** +**Write(p, content string) error** Example: ```go m, _ := sqlite.New(sqlite.Options{Path: ":memory:"}) -_ = m.FileSet("notes.txt", "hello") +_ = m.Write("notes.txt", "hello") ``` **Delete(p string) error** @@ -1587,23 +1579,15 @@ ok := m.IsDir("config") Amazon S3-backed `io.Medium` implementation. -### Option - -Functional option for configuring `Medium`. - -Example: -```go -opt := s3.Options{Prefix: "daily/"} -_ = opt -``` - ### Options -Sets a key prefix for all operations. +Configures the bucket, client, and optional key prefix. Example: ```go -m, _ := s3.New(s3.Options{Bucket: "bucket", Client: awsClient, Prefix: "daily/"}) +client := awss3.NewFromConfig(aws.Config{Region: "us-east-1"}) +options := s3.Options{Bucket: "bucket", Client: client, Prefix: "daily/"} +_ = options ``` ### Client @@ -1612,17 +1596,17 @@ Supplies an AWS SDK S3 client. Example: ```go -client := awss3.NewFromConfig(cfg) +client := awss3.NewFromConfig(aws.Config{Region: "us-east-1"}) m, _ := s3.New(s3.Options{Bucket: "bucket", Client: client}) ``` -### New(bucket string, opts ...Option) (*Medium, error) +### New(options Options) (*Medium, error) Creates a new S3-backed medium. Example: ```go -client := awss3.NewFromConfig(cfg) +client := awss3.NewFromConfig(aws.Config{Region: "us-east-1"}) m, _ := s3.New(s3.Options{Bucket: "bucket", Client: client}) ``` @@ -1632,14 +1616,14 @@ S3-backed storage backend. Example: ```go -client := awss3.NewFromConfig(cfg) +client := awss3.NewFromConfig(aws.Config{Region: "us-east-1"}) m, _ := s3.New(s3.Options{Bucket: "bucket", Client: client}) ``` **Read(p string) (string, error)** Example: ```go -client := awss3.NewFromConfig(cfg) +client := awss3.NewFromConfig(aws.Config{Region: "us-east-1"}) m, _ := s3.New(s3.Options{Bucket: "bucket", Client: client}) value, _ := m.Read("notes.txt") ``` @@ -1647,7 +1631,7 @@ value, _ := m.Read("notes.txt") **Write(p, content string) error** Example: ```go -client := awss3.NewFromConfig(cfg) +client := awss3.NewFromConfig(aws.Config{Region: "us-east-1"}) m, _ := s3.New(s3.Options{Bucket: "bucket", Client: client}) _ = m.Write("notes.txt", "hello") ``` @@ -1656,7 +1640,7 @@ _ = m.Write("notes.txt", "hello") No-op (S3 has no directories). Example: ```go -client := awss3.NewFromConfig(cfg) +client := awss3.NewFromConfig(aws.Config{Region: "us-east-1"}) m, _ := s3.New(s3.Options{Bucket: "bucket", Client: client}) _ = m.EnsureDir("config") ``` @@ -1664,31 +1648,31 @@ _ = m.EnsureDir("config") **IsFile(p string) bool** Example: ```go -client := awss3.NewFromConfig(cfg) +client := awss3.NewFromConfig(aws.Config{Region: "us-east-1"}) m, _ := s3.New(s3.Options{Bucket: "bucket", Client: client}) ok := m.IsFile("notes.txt") ``` -**FileGet(p string) (string, error)** +**Read(p string) (string, error)** Example: ```go -client := awss3.NewFromConfig(cfg) +client := awss3.NewFromConfig(aws.Config{Region: "us-east-1"}) m, _ := s3.New(s3.Options{Bucket: "bucket", Client: client}) -value, _ := m.FileGet("notes.txt") +value, _ := m.Read("notes.txt") ``` -**FileSet(p, content string) error** +**Write(p, content string) error** Example: ```go -client := awss3.NewFromConfig(cfg) +client := awss3.NewFromConfig(aws.Config{Region: "us-east-1"}) m, _ := s3.New(s3.Options{Bucket: "bucket", Client: client}) -_ = m.FileSet("notes.txt", "hello") +_ = m.Write("notes.txt", "hello") ``` **Delete(p string) error** Example: ```go -client := awss3.NewFromConfig(cfg) +client := awss3.NewFromConfig(aws.Config{Region: "us-east-1"}) m, _ := s3.New(s3.Options{Bucket: "bucket", Client: client}) _ = m.Delete("old.txt") ``` @@ -1696,7 +1680,7 @@ _ = m.Delete("old.txt") **DeleteAll(p string) error** Example: ```go -client := awss3.NewFromConfig(cfg) +client := awss3.NewFromConfig(aws.Config{Region: "us-east-1"}) m, _ := s3.New(s3.Options{Bucket: "bucket", Client: client}) _ = m.DeleteAll("logs") ``` @@ -1704,7 +1688,7 @@ _ = m.DeleteAll("logs") **Rename(oldPath, newPath string) error** Example: ```go -client := awss3.NewFromConfig(cfg) +client := awss3.NewFromConfig(aws.Config{Region: "us-east-1"}) m, _ := s3.New(s3.Options{Bucket: "bucket", Client: client}) _ = m.Rename("old.txt", "new.txt") ``` @@ -1712,7 +1696,7 @@ _ = m.Rename("old.txt", "new.txt") **List(p string) ([]fs.DirEntry, error)** Example: ```go -client := awss3.NewFromConfig(cfg) +client := awss3.NewFromConfig(aws.Config{Region: "us-east-1"}) m, _ := s3.New(s3.Options{Bucket: "bucket", Client: client}) entries, _ := m.List("dir") ``` @@ -1720,7 +1704,7 @@ entries, _ := m.List("dir") **Stat(p string) (fs.FileInfo, error)** Example: ```go -client := awss3.NewFromConfig(cfg) +client := awss3.NewFromConfig(aws.Config{Region: "us-east-1"}) m, _ := s3.New(s3.Options{Bucket: "bucket", Client: client}) info, _ := m.Stat("notes.txt") ``` @@ -1728,7 +1712,7 @@ info, _ := m.Stat("notes.txt") **Open(p string) (fs.File, error)** Example: ```go -client := awss3.NewFromConfig(cfg) +client := awss3.NewFromConfig(aws.Config{Region: "us-east-1"}) m, _ := s3.New(s3.Options{Bucket: "bucket", Client: client}) f, _ := m.Open("notes.txt") defer f.Close() @@ -1737,7 +1721,7 @@ defer f.Close() **Create(p string) (io.WriteCloser, error)** Example: ```go -client := awss3.NewFromConfig(cfg) +client := awss3.NewFromConfig(aws.Config{Region: "us-east-1"}) m, _ := s3.New(s3.Options{Bucket: "bucket", Client: client}) w, _ := m.Create("notes.txt") _, _ = w.Write([]byte("hello")) @@ -1747,7 +1731,7 @@ _ = w.Close() **Append(p string) (io.WriteCloser, error)** Example: ```go -client := awss3.NewFromConfig(cfg) +client := awss3.NewFromConfig(aws.Config{Region: "us-east-1"}) m, _ := s3.New(s3.Options{Bucket: "bucket", Client: client}) w, _ := m.Append("notes.txt") _, _ = w.Write([]byte(" world")) @@ -1757,7 +1741,7 @@ _ = w.Close() **ReadStream(p string) (io.ReadCloser, error)** Example: ```go -client := awss3.NewFromConfig(cfg) +client := awss3.NewFromConfig(aws.Config{Region: "us-east-1"}) m, _ := s3.New(s3.Options{Bucket: "bucket", Client: client}) r, _ := m.ReadStream("notes.txt") defer r.Close() @@ -1766,7 +1750,7 @@ defer r.Close() **WriteStream(p string) (io.WriteCloser, error)** Example: ```go -client := awss3.NewFromConfig(cfg) +client := awss3.NewFromConfig(aws.Config{Region: "us-east-1"}) m, _ := s3.New(s3.Options{Bucket: "bucket", Client: client}) w, _ := m.WriteStream("notes.txt") _, _ = w.Write([]byte("hello")) @@ -1776,7 +1760,7 @@ _ = w.Close() **Exists(p string) bool** Example: ```go -client := awss3.NewFromConfig(cfg) +client := awss3.NewFromConfig(aws.Config{Region: "us-east-1"}) m, _ := s3.New(s3.Options{Bucket: "bucket", Client: client}) ok := m.Exists("notes.txt") ``` @@ -1784,7 +1768,7 @@ ok := m.Exists("notes.txt") **IsDir(p string) bool** Example: ```go -client := awss3.NewFromConfig(cfg) +client := awss3.NewFromConfig(aws.Config{Region: "us-east-1"}) m, _ := s3.New(s3.Options{Bucket: "bucket", Client: client}) ok := m.IsDir("logs") ``` @@ -1885,19 +1869,19 @@ _ = m.Write("notes.txt", "hello") ok := m.IsFile("notes.txt") ``` -**FileGet(p string) (string, error)** +**Read(p string) (string, error)** Example: ```go m := datanode.New() -_ = m.FileSet("notes.txt", "hello") -value, _ := m.FileGet("notes.txt") +_ = m.Write("notes.txt", "hello") +value, _ := m.Read("notes.txt") ``` -**FileSet(p, content string) error** +**Write(p, content string) error** Example: ```go m := datanode.New() -_ = m.FileSet("notes.txt", "hello") +_ = m.Write("notes.txt", "hello") ``` **Delete(p string) error** @@ -2008,113 +1992,134 @@ Encrypted user workspace management. ### Workspace (interface) -Defines the workspace operations exposed by the service. +Creates and operates on encrypted workspaces through a service. Example: ```go -var ws workspace.Workspace = &workspace.Service{} -_ = ws +service, _ := workspace.New(workspace.Options{KeyPairProvider: stubKeyPairProvider{}}) +_ = service ``` -**CreateWorkspace(identifier, password string) (string, error)** +### KeyPairProvider + +Creates key pairs for workspace provisioning. + Example: ```go -svcAny, _ := workspace.New(workspace.Options{Core: core.New(), Crypt: stubCrypt{}}) -svc := svcAny.(*workspace.Service) -wsID, _ := svc.CreateWorkspace("user", "pass") +keyPairProvider := stubKeyPairProvider{} +keyPair, _ := keyPairProvider.CreateKeyPair("alice", "pass123") +_ = keyPair ``` -**SwitchWorkspace(name string) error** +### WorkspaceCreateAction + +Action value used to create a workspace. + Example: ```go -svcAny, _ := workspace.New(workspace.Options{Core: core.New(), Crypt: stubCrypt{}}) -svc := svcAny.(*workspace.Service) -_ = svc.SwitchWorkspace("workspace-id") +command := workspace.WorkspaceCommand{Action: workspace.WorkspaceCreateAction, Identifier: "alice", Password: "pass123"} +_ = command ``` -**WorkspaceFileGet(filename string) (string, error)** +### WorkspaceSwitchAction + +Action value used to switch to an existing workspace. + +Example: +```go +command := workspace.WorkspaceCommand{Action: workspace.WorkspaceSwitchAction, WorkspaceID: "f3f0d7"} +_ = command +``` + +### WorkspaceCommand + +Command envelope consumed by the service. + Example: ```go -svcAny, _ := workspace.New(workspace.Options{Core: core.New(), Crypt: stubCrypt{}}) -svc := svcAny.(*workspace.Service) -value, _ := svc.WorkspaceFileGet("notes.txt") +command := workspace.WorkspaceCommand{Action: workspace.WorkspaceCreateAction, Identifier: "alice", Password: "pass123"} +_ = command ``` -**WorkspaceFileSet(filename, content string) error** +### Options + +Configures the workspace service. + Example: ```go -svcAny, _ := workspace.New(workspace.Options{Core: core.New(), Crypt: stubCrypt{}}) -svc := svcAny.(*workspace.Service) -_ = svc.WorkspaceFileSet("notes.txt", "hello") +service, _ := workspace.New(workspace.Options{KeyPairProvider: stubKeyPairProvider{}}) +_ = service ``` -### New(options Options) (any, error) +### New(options Options) (*Service, error) -Creates a new workspace service. Returns `*Service` as `any`. +Creates a new workspace service. Example: ```go -type stubCrypt struct{} -func (stubCrypt) CreateKeyPair(name, passphrase string) (string, error) { return "key", nil } +type stubKeyPairProvider struct{} + +func (stubKeyPairProvider) CreateKeyPair(name, passphrase string) (string, error) { + return "key", nil +} -svcAny, _ := workspace.New(workspace.Options{Core: core.New(), Crypt: stubCrypt{}}) -svc := svcAny.(*workspace.Service) -_ = svc +service, _ := workspace.New(workspace.Options{KeyPairProvider: stubKeyPairProvider{}}) +_ = service ``` ### Service -Implements `Workspace` and handles IPC messages. +Implements `Workspace` and handles Core messages. Example: ```go -svcAny, _ := workspace.New(workspace.Options{Core: core.New(), Crypt: stubCrypt{}}) -svc := svcAny.(*workspace.Service) -_ = svc +service, _ := workspace.New(workspace.Options{KeyPairProvider: stubKeyPairProvider{}}) +_ = service ``` **CreateWorkspace(identifier, password string) (string, error)** Example: ```go -svcAny, _ := workspace.New(workspace.Options{Core: core.New(), Crypt: stubCrypt{}}) -svc := svcAny.(*workspace.Service) -wsID, _ := svc.CreateWorkspace("user", "pass") +service, _ := workspace.New(workspace.Options{KeyPairProvider: stubKeyPairProvider{}}) +workspaceID, _ := service.CreateWorkspace("alice", "pass123") +_ = workspaceID ``` -**SwitchWorkspace(name string) error** +**SwitchWorkspace(workspaceID string) error** Example: ```go -svcAny, _ := workspace.New(workspace.Options{Core: core.New(), Crypt: stubCrypt{}}) -svc := svcAny.(*workspace.Service) -_ = svc.SwitchWorkspace("workspace-id") +service, _ := workspace.New(workspace.Options{KeyPairProvider: stubKeyPairProvider{}}) +_ = service.SwitchWorkspace("f3f0d7") ``` -**WorkspaceFileGet(filename string) (string, error)** +**ReadWorkspaceFile(workspaceFilePath string) (string, error)** Example: ```go -svcAny, _ := workspace.New(workspace.Options{Core: core.New(), Crypt: stubCrypt{}}) -svc := svcAny.(*workspace.Service) -value, _ := svc.WorkspaceFileGet("notes.txt") +service, _ := workspace.New(workspace.Options{KeyPairProvider: stubKeyPairProvider{}}) +content, _ := service.ReadWorkspaceFile("notes/todo.txt") +_ = content ``` -**WorkspaceFileSet(filename, content string) error** +**WriteWorkspaceFile(workspaceFilePath, content string) error** Example: ```go -svcAny, _ := workspace.New(workspace.Options{Core: core.New(), Crypt: stubCrypt{}}) -svc := svcAny.(*workspace.Service) -_ = svc.WorkspaceFileSet("notes.txt", "hello") +service, _ := workspace.New(workspace.Options{KeyPairProvider: stubKeyPairProvider{}}) +_ = service.WriteWorkspaceFile("notes/todo.txt", "ship it") +``` + +**HandleWorkspaceCommand(command WorkspaceCommand) core.Result** +Example: +```go +service, _ := workspace.New(workspace.Options{KeyPairProvider: stubKeyPairProvider{}}) +result := service.HandleWorkspaceCommand(workspace.WorkspaceCommand{Action: workspace.WorkspaceCreateAction, Identifier: "alice", Password: "pass123"}) +_ = result.OK ``` -**HandleIPCEvents(c *core.Core, msg core.Message) core.Result** +**HandleWorkspaceMessage(_ *core.Core, message core.Message) core.Result** Example: ```go -svcAny, _ := workspace.New(workspace.Options{Core: core.New(), Crypt: stubCrypt{}}) -svc := svcAny.(*workspace.Service) -result := svc.HandleIPCEvents(core.New(), map[string]any{ - "action": "workspace.create", - "identifier": "user", - "password": "pass", -}) +service, _ := workspace.New(workspace.Options{KeyPairProvider: stubKeyPairProvider{}}) +result := service.HandleWorkspaceMessage(core.New(), workspace.WorkspaceCommand{Action: workspace.WorkspaceSwitchAction, WorkspaceID: "f3f0d7"}) _ = result.OK ``` From 48b777675e6cdd63eed7f4d72cfe4b8ff8336033 Mon Sep 17 00:00:00 2001 From: Virgil Date: Tue, 31 Mar 2026 07:17:59 +0000 Subject: [PATCH 53/83] refactor(workspace): fail unsupported workspace messages explicitly Return explicit fs sentinels for workspace creation, switching, and inactive file access.\n\nUnsupported command and message inputs now return a failed core.Result instead of a silent success, and tests cover the fallback path.\n\nCo-Authored-By: Virgil --- workspace/service.go | 12 ++++++------ workspace/service_test.go | 5 ++++- 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/workspace/service.go b/workspace/service.go index 3a0babf..7179614 100644 --- a/workspace/service.go +++ b/workspace/service.go @@ -85,7 +85,7 @@ func (service *Service) CreateWorkspace(identifier, password string) (string, er defer service.stateLock.Unlock() if service.keyPairProvider == nil { - return "", core.E("workspace.CreateWorkspace", "key pair provider not available", nil) + return "", core.E("workspace.CreateWorkspace", "key pair provider not available", fs.ErrInvalid) } hash := sha256.Sum256([]byte(identifier)) @@ -96,7 +96,7 @@ func (service *Service) CreateWorkspace(identifier, password string) (string, er } if service.medium.Exists(workspaceDirectory) { - return "", core.E("workspace.CreateWorkspace", "workspace already exists", nil) + return "", core.E("workspace.CreateWorkspace", "workspace already exists", fs.ErrExist) } for _, directoryName := range []string{"config", "log", "data", "files", "keys"} { @@ -127,7 +127,7 @@ func (service *Service) SwitchWorkspace(workspaceID string) error { return err } if !service.medium.IsDir(workspaceDirectory) { - return core.E("workspace.SwitchWorkspace", core.Concat("workspace not found: ", workspaceID), nil) + return core.E("workspace.SwitchWorkspace", core.Concat("workspace not found: ", workspaceID), fs.ErrNotExist) } service.activeWorkspaceID = core.PathBase(workspaceDirectory) @@ -136,7 +136,7 @@ func (service *Service) SwitchWorkspace(workspaceID string) error { func (service *Service) resolveActiveWorkspaceFilePath(operation, workspaceFilePath string) (string, error) { if service.activeWorkspaceID == "" { - return "", core.E(operation, "no active workspace", nil) + return "", core.E(operation, "no active workspace", fs.ErrNotExist) } filesRoot := core.Path(service.rootPath, service.activeWorkspaceID, "files") filePath, err := joinPathWithinRoot(filesRoot, workspaceFilePath) @@ -188,7 +188,7 @@ func (service *Service) HandleWorkspaceCommand(command WorkspaceCommand) core.Re } return core.Result{OK: true} } - return core.Result{OK: true} + return core.Result{}.New(core.E("workspace.HandleWorkspaceCommand", core.Concat("unsupported action: ", command.Action), fs.ErrInvalid)) } // Example: result := service.HandleWorkspaceMessage(core.New(), WorkspaceCommand{Action: WorkspaceSwitchAction, WorkspaceID: "f3f0d7"}) @@ -197,7 +197,7 @@ func (service *Service) HandleWorkspaceMessage(_ *core.Core, message core.Messag case WorkspaceCommand: return service.HandleWorkspaceCommand(command) } - return core.Result{OK: true} + return core.Result{}.New(core.E("workspace.HandleWorkspaceMessage", "unsupported message type", fs.ErrInvalid)) } func resolveWorkspaceHomeDirectory() string { diff --git a/workspace/service_test.go b/workspace/service_test.go index aaf4caf..e4fef6c 100644 --- a/workspace/service_test.go +++ b/workspace/service_test.go @@ -114,6 +114,9 @@ func TestService_HandleWorkspaceMessage_Command_Good(t *testing.T) { assert.True(t, switchResult.OK) assert.Equal(t, workspaceID, service.activeWorkspaceID) + unknownAction := service.HandleWorkspaceCommand(WorkspaceCommand{Action: "noop"}) + assert.False(t, unknownAction.OK) + unknown := service.HandleWorkspaceMessage(core.New(), "noop") - assert.True(t, unknown.OK) + assert.False(t, unknown.OK) } From 378fc7c0dea7a938c3610174f847c1a03a1f3c97 Mon Sep 17 00:00:00 2001 From: Virgil Date: Tue, 31 Mar 2026 07:24:17 +0000 Subject: [PATCH 54/83] docs(ax): align sigil references with current surfaces Co-Authored-By: Virgil --- CLAUDE.md | 2 +- docs/RFC.md | 44 +++++++++++++++++--------------------------- docs/architecture.md | 4 ++-- 3 files changed, 20 insertions(+), 30 deletions(-) diff --git a/CLAUDE.md b/CLAUDE.md index abe2112..9c63220 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -128,7 +128,7 @@ Backend packages use `var _ io.Medium = (*Medium)(nil)` to verify interface comp ### Sentinel Errors -Sentinel errors (`var ErrNotFound`, `var ErrInvalidKey`, etc.) use standard `errors.New()` — this is correct Go convention. Only inline error returns in functions should use `coreerr.E()`. +Sentinel errors (`var NotFoundError`, `var InvalidKeyError`, etc.) use standard `errors.New()` — this is correct Go convention. Only inline error returns in functions should use `coreerr.E()`. ## Testing diff --git a/docs/RFC.md b/docs/RFC.md index 3fab450..adcbe42 100644 --- a/docs/RFC.md +++ b/docs/RFC.md @@ -2341,45 +2341,45 @@ Example: s, _ := sigil.NewSigil("hex") ``` -### ErrInvalidKey +### InvalidKeyError Returned when an encryption key is not 32 bytes. Example: ```go -_, err := sigil.NewChaChaPolySigil([]byte("short")) -if errors.Is(err, sigil.ErrInvalidKey) { +_, err := sigil.NewChaChaPolySigil([]byte("short"), nil) +if errors.Is(err, sigil.InvalidKeyError) { // handle invalid key } ``` -### ErrCiphertextTooShort +### CiphertextTooShortError Returned when ciphertext is too short to decrypt. Example: ```go _, err := sigil.GetNonceFromCiphertext([]byte("short")) -if errors.Is(err, sigil.ErrCiphertextTooShort) { +if errors.Is(err, sigil.CiphertextTooShortError) { // handle truncated payload } ``` -### ErrDecryptionFailed +### DecryptionFailedError Returned when decryption or authentication fails. Example: ```go key := make([]byte, 32) -s, _ := sigil.NewChaChaPolySigil(key) +s, _ := sigil.NewChaChaPolySigil(key, nil) _, err := s.Out([]byte("tampered")) -if errors.Is(err, sigil.ErrDecryptionFailed) { +if errors.Is(err, sigil.DecryptionFailedError) { // handle failed decrypt } ``` -### ErrNoKeyConfigured +### NoKeyConfiguredError Returned when a `ChaChaPolySigil` has no key. @@ -2387,7 +2387,7 @@ Example: ```go s := &sigil.ChaChaPolySigil{} _, err := s.In([]byte("data")) -if errors.Is(err, sigil.ErrNoKeyConfigured) { +if errors.Is(err, sigil.NoKeyConfiguredError) { // handle missing key } ``` @@ -2472,14 +2472,14 @@ XChaCha20-Poly1305 encryption sigil with optional pre-obfuscation. Example: ```go key := make([]byte, 32) -s, _ := sigil.NewChaChaPolySigil(key) +s, _ := sigil.NewChaChaPolySigil(key, nil) ``` **In(data []byte) ([]byte, error)** Example: ```go key := make([]byte, 32) -s, _ := sigil.NewChaChaPolySigil(key) +s, _ := sigil.NewChaChaPolySigil(key, nil) ciphertext, _ := s.In([]byte("hello")) ``` @@ -2487,30 +2487,20 @@ ciphertext, _ := s.In([]byte("hello")) Example: ```go key := make([]byte, 32) -s, _ := sigil.NewChaChaPolySigil(key) +s, _ := sigil.NewChaChaPolySigil(key, nil) ciphertext, _ := s.In([]byte("hello")) plain, _ := s.Out(ciphertext) ``` -### NewChaChaPolySigil(key []byte) (*ChaChaPolySigil, error) +### NewChaChaPolySigil(key []byte, obfuscator PreObfuscator) (*ChaChaPolySigil, error) -Creates an encryption sigil with the default XOR obfuscator. - -Example: -```go -key := make([]byte, 32) -s, _ := sigil.NewChaChaPolySigil(key) -``` - -### NewChaChaPolySigilWithObfuscator(key []byte, obfuscator PreObfuscator) (*ChaChaPolySigil, error) - -Creates an encryption sigil with a custom obfuscator. +Creates an encryption sigil with an optional pre-obfuscator. Pass `nil` to use the default XOR obfuscator. Example: ```go key := make([]byte, 32) ob := &sigil.ShuffleMaskObfuscator{} -s, _ := sigil.NewChaChaPolySigilWithObfuscator(key, ob) +s, _ := sigil.NewChaChaPolySigil(key, ob) ``` ### GetNonceFromCiphertext(ciphertext []byte) ([]byte, error) @@ -2520,7 +2510,7 @@ Extracts the XChaCha20 nonce from encrypted output. Example: ```go key := make([]byte, 32) -s, _ := sigil.NewChaChaPolySigil(key) +s, _ := sigil.NewChaChaPolySigil(key, nil) ciphertext, _ := s.In([]byte("hello")) nonce, _ := sigil.GetNonceFromCiphertext(ciphertext) ``` diff --git a/docs/architecture.md b/docs/architecture.md index 8db0246..f6df154 100644 --- a/docs/architecture.md +++ b/docs/architecture.md @@ -230,12 +230,12 @@ The pre-obfuscation layer ensures that raw plaintext patterns are never sent dir key := make([]byte, 32) rand.Read(key) -s, _ := sigil.NewChaChaPolySigil(key) +s, _ := sigil.NewChaChaPolySigil(key, nil) ciphertext, _ := s.In([]byte("secret")) plaintext, _ := s.Out(ciphertext) // With stronger obfuscation: -s2, _ := sigil.NewChaChaPolySigilWithObfuscator(key, &sigil.ShuffleMaskObfuscator{}) +s2, _ := sigil.NewChaChaPolySigil(key, &sigil.ShuffleMaskObfuscator{}) ``` Each call to `In` generates a fresh random nonce, so encrypting the same plaintext twice produces different ciphertexts. From 702286a5836a3b9e19d36f4961debcb1e481755c Mon Sep 17 00:00:00 2001 From: Snider Date: Tue, 31 Mar 2026 12:19:56 +0100 Subject: [PATCH 55/83] =?UTF-8?q?feat(ax):=20apply=20AX=20compliance=20swe?= =?UTF-8?q?ep=20=E2=80=94=20usage=20examples=20and=20predictable=20names?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Add // Example: usage comments to all Medium interface methods in io.go - Add // Example: comments to local, s3, sqlite, store, datanode, node medium methods - Rename short variable `n` → `nodeTree` throughout node/node_test.go - Rename short variable `s` → `keyValueStore` in store/store_test.go - Rename counter variable `n` → `count` in store/store_test.go - Rename `m` → `medium` in store/medium_test.go helper - Remove redundant prose comments replaced by usage examples Co-Authored-By: Virgil --- datanode/medium.go | 2 + io.go | 12 ++ local/medium.go | 12 ++ node/node.go | 15 +++ node/node_test.go | 292 +++++++++++++++++++++---------------------- s3/s3.go | 5 + sqlite/sqlite.go | 11 ++ store/medium_test.go | 6 +- store/store.go | 1 + store/store_test.go | 74 +++++------ 10 files changed, 244 insertions(+), 186 deletions(-) diff --git a/datanode/medium.go b/datanode/medium.go index 177c6ac..2cd39fe 100644 --- a/datanode/medium.go +++ b/datanode/medium.go @@ -38,6 +38,8 @@ type Medium struct { lock sync.RWMutex } +// Example: medium := datanode.New() +// Example: _ = medium.Write("jobs/run.log", "started") func New() *Medium { return &Medium{ dataNode: borgdatanode.New(), diff --git a/io.go b/io.go index 20fc2fa..3c3634f 100644 --- a/io.go +++ b/io.go @@ -15,31 +15,43 @@ import ( // Example: backup, _ := io.NewSandboxed("/srv/backup") // Example: _ = io.Copy(medium, "data/report.json", backup, "daily/report.json") type Medium interface { + // Example: content, _ := medium.Read("config/app.yaml") Read(path string) (string, error) + // Example: _ = medium.Write("config/app.yaml", "port: 8080") Write(path, content string) error // Example: _ = medium.WriteMode("keys/private.key", key, 0600) WriteMode(path, content string, mode fs.FileMode) error + // Example: _ = medium.EnsureDir("config/app") EnsureDir(path string) error + // Example: isFile := medium.IsFile("config/app.yaml") IsFile(path string) bool + // Example: _ = medium.Delete("config/app.yaml") Delete(path string) error + // Example: _ = medium.DeleteAll("logs/archive") DeleteAll(path string) error + // Example: _ = medium.Rename("drafts/todo.txt", "archive/todo.txt") Rename(oldPath, newPath string) error + // Example: entries, _ := medium.List("config") List(path string) ([]fs.DirEntry, error) + // Example: info, _ := medium.Stat("config/app.yaml") Stat(path string) (fs.FileInfo, error) + // Example: file, _ := medium.Open("config/app.yaml") Open(path string) (fs.File, error) + // Example: writer, _ := medium.Create("logs/app.log") Create(path string) (goio.WriteCloser, error) + // Example: writer, _ := medium.Append("logs/app.log") Append(path string) (goio.WriteCloser, error) // Example: reader, _ := medium.ReadStream("logs/app.log") diff --git a/local/medium.go b/local/medium.go index 7c9c104..39e6cc4 100644 --- a/local/medium.go +++ b/local/medium.go @@ -238,6 +238,7 @@ func (medium *Medium) WriteMode(path, content string, mode fs.FileMode) error { return resultError("local.WriteMode", core.Concat("write failed: ", path), unrestrictedFileSystem.WriteMode(resolvedPath, content, mode)) } +// Example: _ = medium.EnsureDir("config/app") func (medium *Medium) EnsureDir(path string) error { resolvedPath, err := medium.validatePath(path) if err != nil { @@ -246,6 +247,7 @@ func (medium *Medium) EnsureDir(path string) error { return resultError("local.EnsureDir", core.Concat("ensure dir failed: ", path), unrestrictedFileSystem.EnsureDir(resolvedPath)) } +// Example: isDirectory := medium.IsDir("config") func (medium *Medium) IsDir(path string) bool { if path == "" { return false @@ -257,6 +259,7 @@ func (medium *Medium) IsDir(path string) bool { return unrestrictedFileSystem.IsDir(resolvedPath) } +// Example: isFile := medium.IsFile("config/app.yaml") func (medium *Medium) IsFile(path string) bool { if path == "" { return false @@ -268,6 +271,7 @@ func (medium *Medium) IsFile(path string) bool { return unrestrictedFileSystem.IsFile(resolvedPath) } +// Example: exists := medium.Exists("config/app.yaml") func (medium *Medium) Exists(path string) bool { resolvedPath, err := medium.validatePath(path) if err != nil { @@ -276,6 +280,7 @@ func (medium *Medium) Exists(path string) bool { return unrestrictedFileSystem.Exists(resolvedPath) } +// Example: entries, _ := medium.List("config") func (medium *Medium) List(path string) ([]fs.DirEntry, error) { resolvedPath, err := medium.validatePath(path) if err != nil { @@ -284,6 +289,7 @@ func (medium *Medium) List(path string) ([]fs.DirEntry, error) { return resultDirEntries("local.List", core.Concat("list failed: ", path), unrestrictedFileSystem.List(resolvedPath)) } +// Example: info, _ := medium.Stat("config/app.yaml") func (medium *Medium) Stat(path string) (fs.FileInfo, error) { resolvedPath, err := medium.validatePath(path) if err != nil { @@ -292,6 +298,7 @@ func (medium *Medium) Stat(path string) (fs.FileInfo, error) { return resultFileInfo("local.Stat", core.Concat("stat failed: ", path), unrestrictedFileSystem.Stat(resolvedPath)) } +// Example: file, _ := medium.Open("config/app.yaml") func (medium *Medium) Open(path string) (fs.File, error) { resolvedPath, err := medium.validatePath(path) if err != nil { @@ -300,6 +307,7 @@ func (medium *Medium) Open(path string) (fs.File, error) { return resultFile("local.Open", core.Concat("open failed: ", path), unrestrictedFileSystem.Open(resolvedPath)) } +// Example: writer, _ := medium.Create("logs/app.log") func (medium *Medium) Create(path string) (goio.WriteCloser, error) { resolvedPath, err := medium.validatePath(path) if err != nil { @@ -308,6 +316,7 @@ func (medium *Medium) Create(path string) (goio.WriteCloser, error) { return resultWriteCloser("local.Create", core.Concat("create failed: ", path), unrestrictedFileSystem.Create(resolvedPath)) } +// Example: writer, _ := medium.Append("logs/app.log") func (medium *Medium) Append(path string) (goio.WriteCloser, error) { resolvedPath, err := medium.validatePath(path) if err != nil { @@ -326,6 +335,7 @@ func (medium *Medium) WriteStream(path string) (goio.WriteCloser, error) { return medium.Create(path) } +// Example: _ = medium.Delete("config/app.yaml") func (medium *Medium) Delete(path string) error { resolvedPath, err := medium.validatePath(path) if err != nil { @@ -337,6 +347,7 @@ func (medium *Medium) Delete(path string) error { return resultError("local.Delete", core.Concat("delete failed: ", path), unrestrictedFileSystem.Delete(resolvedPath)) } +// Example: _ = medium.DeleteAll("logs/archive") func (medium *Medium) DeleteAll(path string) error { resolvedPath, err := medium.validatePath(path) if err != nil { @@ -348,6 +359,7 @@ func (medium *Medium) DeleteAll(path string) error { return resultError("local.DeleteAll", core.Concat("delete all failed: ", path), unrestrictedFileSystem.DeleteAll(resolvedPath)) } +// Example: _ = medium.Rename("drafts/todo.txt", "archive/todo.txt") func (medium *Medium) Rename(oldPath, newPath string) error { oldResolvedPath, err := medium.validatePath(oldPath) if err != nil { diff --git a/node/node.go b/node/node.go index 3ada8e7..deb8349 100644 --- a/node/node.go +++ b/node/node.go @@ -232,6 +232,7 @@ func (node *Node) CopyTo(target coreio.Medium, sourcePath, destPath string) erro return nil } +// Example: file, _ := nodeTree.Open("config/app.yaml") func (node *Node) Open(name string) (fs.File, error) { name = core.TrimPrefix(name, "/") if dataFile, ok := node.files[name]; ok { @@ -249,6 +250,7 @@ func (node *Node) Open(name string) (fs.File, error) { return nil, core.E("node.Open", core.Concat("path not found: ", name), fs.ErrNotExist) } +// Example: info, _ := nodeTree.Stat("config/app.yaml") func (node *Node) Stat(name string) (fs.FileInfo, error) { name = core.TrimPrefix(name, "/") if dataFile, ok := node.files[name]; ok { @@ -266,6 +268,7 @@ func (node *Node) Stat(name string) (fs.FileInfo, error) { return nil, core.E("node.Stat", core.Concat("path not found: ", name), fs.ErrNotExist) } +// Example: entries, _ := nodeTree.ReadDir("config") func (node *Node) ReadDir(name string) ([]fs.DirEntry, error) { name = core.TrimPrefix(name, "/") if name == "." { @@ -314,6 +317,7 @@ func (node *Node) ReadDir(name string) ([]fs.DirEntry, error) { return entries, nil } +// Example: content, _ := nodeTree.Read("config/app.yaml") func (node *Node) Read(filePath string) (string, error) { filePath = core.TrimPrefix(filePath, "/") file, ok := node.files[filePath] @@ -323,11 +327,13 @@ func (node *Node) Read(filePath string) (string, error) { return string(file.content), nil } +// Example: _ = nodeTree.Write("config/app.yaml", "port: 8080") func (node *Node) Write(filePath, content string) error { node.AddData(filePath, []byte(content)) return nil } +// Example: _ = nodeTree.WriteMode("keys/private.key", key, 0600) func (node *Node) WriteMode(filePath, content string, mode fs.FileMode) error { return node.Write(filePath, content) } @@ -337,17 +343,20 @@ func (node *Node) EnsureDir(directoryPath string) error { return nil } +// Example: exists := nodeTree.Exists("config/app.yaml") func (node *Node) Exists(filePath string) bool { _, err := node.Stat(filePath) return err == nil } +// Example: isFile := nodeTree.IsFile("config/app.yaml") func (node *Node) IsFile(filePath string) bool { filePath = core.TrimPrefix(filePath, "/") _, ok := node.files[filePath] return ok } +// Example: isDirectory := nodeTree.IsDir("config") func (node *Node) IsDir(filePath string) bool { info, err := node.Stat(filePath) if err != nil { @@ -356,6 +365,7 @@ func (node *Node) IsDir(filePath string) bool { return info.IsDir() } +// Example: _ = nodeTree.Delete("config/app.yaml") func (node *Node) Delete(filePath string) error { filePath = core.TrimPrefix(filePath, "/") if _, ok := node.files[filePath]; ok { @@ -365,6 +375,7 @@ func (node *Node) Delete(filePath string) error { return core.E("node.Delete", core.Concat("path not found: ", filePath), fs.ErrNotExist) } +// Example: _ = nodeTree.DeleteAll("logs/archive") func (node *Node) DeleteAll(filePath string) error { filePath = core.TrimPrefix(filePath, "/") @@ -388,6 +399,7 @@ func (node *Node) DeleteAll(filePath string) error { return nil } +// Example: _ = nodeTree.Rename("drafts/todo.txt", "archive/todo.txt") func (node *Node) Rename(oldPath, newPath string) error { oldPath = core.TrimPrefix(oldPath, "/") newPath = core.TrimPrefix(newPath, "/") @@ -403,6 +415,7 @@ func (node *Node) Rename(oldPath, newPath string) error { return nil } +// Example: entries, _ := nodeTree.List("config") func (node *Node) List(filePath string) ([]fs.DirEntry, error) { filePath = core.TrimPrefix(filePath, "/") if filePath == "" || filePath == "." { @@ -411,11 +424,13 @@ func (node *Node) List(filePath string) ([]fs.DirEntry, error) { return node.ReadDir(filePath) } +// Example: writer, _ := nodeTree.Create("logs/app.log") func (node *Node) Create(filePath string) (goio.WriteCloser, error) { filePath = core.TrimPrefix(filePath, "/") return &nodeWriter{node: node, path: filePath}, nil } +// Example: writer, _ := nodeTree.Append("logs/app.log") func (node *Node) Append(filePath string) (goio.WriteCloser, error) { filePath = core.TrimPrefix(filePath, "/") var existing []byte diff --git a/node/node_test.go b/node/node_test.go index 279f8aa..2e783e6 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -15,16 +15,16 @@ import ( ) func TestNode_New_Good(t *testing.T) { - n := New() - require.NotNil(t, n, "New() must not return nil") - assert.NotNil(t, n.files, "New() must initialise the files map") + nodeTree := New() + require.NotNil(t, nodeTree, "New() must not return nil") + assert.NotNil(t, nodeTree.files, "New() must initialise the files map") } func TestNode_AddData_Good(t *testing.T) { - n := New() - n.AddData("foo.txt", []byte("foo")) + nodeTree := New() + nodeTree.AddData("foo.txt", []byte("foo")) - file, ok := n.files["foo.txt"] + file, ok := nodeTree.files["foo.txt"] require.True(t, ok, "file foo.txt should be present") assert.Equal(t, []byte("foo"), file.content) @@ -34,38 +34,38 @@ func TestNode_AddData_Good(t *testing.T) { } func TestNode_AddData_Bad(t *testing.T) { - n := New() + nodeTree := New() - n.AddData("", []byte("data")) - assert.Empty(t, n.files, "empty name must not be stored") + nodeTree.AddData("", []byte("data")) + assert.Empty(t, nodeTree.files, "empty name must not be stored") - n.AddData("dir/", nil) - assert.Empty(t, n.files, "directory entry must not be stored") + nodeTree.AddData("dir/", nil) + assert.Empty(t, nodeTree.files, "directory entry must not be stored") } func TestNode_AddData_EdgeCases_Good(t *testing.T) { t.Run("Overwrite", func(t *testing.T) { - n := New() - n.AddData("foo.txt", []byte("foo")) - n.AddData("foo.txt", []byte("bar")) + nodeTree := New() + nodeTree.AddData("foo.txt", []byte("foo")) + nodeTree.AddData("foo.txt", []byte("bar")) - file := n.files["foo.txt"] + file := nodeTree.files["foo.txt"] assert.Equal(t, []byte("bar"), file.content, "second AddData should overwrite") }) t.Run("LeadingSlash", func(t *testing.T) { - n := New() - n.AddData("/hello.txt", []byte("hi")) - _, ok := n.files["hello.txt"] + nodeTree := New() + nodeTree.AddData("/hello.txt", []byte("hi")) + _, ok := nodeTree.files["hello.txt"] assert.True(t, ok, "leading slash should be trimmed") }) } func TestNode_Open_Good(t *testing.T) { - n := New() - n.AddData("foo.txt", []byte("foo")) + nodeTree := New() + nodeTree.AddData("foo.txt", []byte("foo")) - file, err := n.Open("foo.txt") + file, err := nodeTree.Open("foo.txt") require.NoError(t, err) defer file.Close() @@ -76,17 +76,17 @@ func TestNode_Open_Good(t *testing.T) { } func TestNode_Open_Bad(t *testing.T) { - n := New() - _, err := n.Open("nonexistent.txt") + nodeTree := New() + _, err := nodeTree.Open("nonexistent.txt") require.Error(t, err) assert.ErrorIs(t, err, fs.ErrNotExist) } func TestNode_Open_Directory_Good(t *testing.T) { - n := New() - n.AddData("bar/baz.txt", []byte("baz")) + nodeTree := New() + nodeTree.AddData("bar/baz.txt", []byte("baz")) - file, err := n.Open("bar") + file, err := nodeTree.Open("bar") require.NoError(t, err) defer file.Close() @@ -99,88 +99,88 @@ func TestNode_Open_Directory_Good(t *testing.T) { } func TestNode_Stat_Good(t *testing.T) { - n := New() - n.AddData("foo.txt", []byte("foo")) - n.AddData("bar/baz.txt", []byte("baz")) + nodeTree := New() + nodeTree.AddData("foo.txt", []byte("foo")) + nodeTree.AddData("bar/baz.txt", []byte("baz")) - info, err := n.Stat("bar/baz.txt") + info, err := nodeTree.Stat("bar/baz.txt") require.NoError(t, err) assert.Equal(t, "baz.txt", info.Name()) assert.Equal(t, int64(3), info.Size()) assert.False(t, info.IsDir()) - dirInfo, err := n.Stat("bar") + dirInfo, err := nodeTree.Stat("bar") require.NoError(t, err) assert.True(t, dirInfo.IsDir()) assert.Equal(t, "bar", dirInfo.Name()) } func TestNode_Stat_Bad(t *testing.T) { - n := New() - _, err := n.Stat("nonexistent") + nodeTree := New() + _, err := nodeTree.Stat("nonexistent") require.Error(t, err) assert.ErrorIs(t, err, fs.ErrNotExist) } func TestNode_Stat_RootDirectory_Good(t *testing.T) { - n := New() - n.AddData("foo.txt", []byte("foo")) + nodeTree := New() + nodeTree.AddData("foo.txt", []byte("foo")) - info, err := n.Stat(".") + info, err := nodeTree.Stat(".") require.NoError(t, err) assert.True(t, info.IsDir()) assert.Equal(t, ".", info.Name()) } func TestNode_ReadFile_Good(t *testing.T) { - n := New() - n.AddData("hello.txt", []byte("hello world")) + nodeTree := New() + nodeTree.AddData("hello.txt", []byte("hello world")) - data, err := n.ReadFile("hello.txt") + data, err := nodeTree.ReadFile("hello.txt") require.NoError(t, err) assert.Equal(t, []byte("hello world"), data) } func TestNode_ReadFile_Bad(t *testing.T) { - n := New() - _, err := n.ReadFile("missing.txt") + nodeTree := New() + _, err := nodeTree.ReadFile("missing.txt") require.Error(t, err) assert.ErrorIs(t, err, fs.ErrNotExist) } func TestNode_ReadFile_ReturnsCopy_Good(t *testing.T) { - n := New() - n.AddData("data.bin", []byte("original")) + nodeTree := New() + nodeTree.AddData("data.bin", []byte("original")) - data, err := n.ReadFile("data.bin") + data, err := nodeTree.ReadFile("data.bin") require.NoError(t, err) data[0] = 'X' - data2, err := n.ReadFile("data.bin") + data2, err := nodeTree.ReadFile("data.bin") require.NoError(t, err) assert.Equal(t, []byte("original"), data2, "ReadFile must return an independent copy") } func TestNode_ReadDir_Good(t *testing.T) { - n := New() - n.AddData("foo.txt", []byte("foo")) - n.AddData("bar/baz.txt", []byte("baz")) - n.AddData("bar/qux.txt", []byte("qux")) + nodeTree := New() + nodeTree.AddData("foo.txt", []byte("foo")) + nodeTree.AddData("bar/baz.txt", []byte("baz")) + nodeTree.AddData("bar/qux.txt", []byte("qux")) - entries, err := n.ReadDir(".") + entries, err := nodeTree.ReadDir(".") require.NoError(t, err) assert.Equal(t, []string{"bar", "foo.txt"}, sortedNames(entries)) - barEntries, err := n.ReadDir("bar") + barEntries, err := nodeTree.ReadDir("bar") require.NoError(t, err) assert.Equal(t, []string{"baz.txt", "qux.txt"}, sortedNames(barEntries)) } func TestNode_ReadDir_Bad(t *testing.T) { - n := New() - n.AddData("foo.txt", []byte("foo")) + nodeTree := New() + nodeTree.AddData("foo.txt", []byte("foo")) - _, err := n.ReadDir("foo.txt") + _, err := nodeTree.ReadDir("foo.txt") require.Error(t, err) var pathErr *fs.PathError require.True(t, core.As(err, &pathErr)) @@ -188,45 +188,45 @@ func TestNode_ReadDir_Bad(t *testing.T) { } func TestNode_ReadDir_IgnoresEmptyEntry_Good(t *testing.T) { - n := New() - n.AddData("bar/baz.txt", []byte("baz")) - n.AddData("empty_dir/", nil) + nodeTree := New() + nodeTree.AddData("bar/baz.txt", []byte("baz")) + nodeTree.AddData("empty_dir/", nil) - entries, err := n.ReadDir(".") + entries, err := nodeTree.ReadDir(".") require.NoError(t, err) assert.Equal(t, []string{"bar"}, sortedNames(entries)) } func TestNode_Exists_Good(t *testing.T) { - n := New() - n.AddData("foo.txt", []byte("foo")) - n.AddData("bar/baz.txt", []byte("baz")) + nodeTree := New() + nodeTree.AddData("foo.txt", []byte("foo")) + nodeTree.AddData("bar/baz.txt", []byte("baz")) - assert.True(t, n.Exists("foo.txt")) - assert.True(t, n.Exists("bar")) + assert.True(t, nodeTree.Exists("foo.txt")) + assert.True(t, nodeTree.Exists("bar")) } func TestNode_Exists_Bad(t *testing.T) { - n := New() - assert.False(t, n.Exists("nonexistent")) + nodeTree := New() + assert.False(t, nodeTree.Exists("nonexistent")) } func TestNode_Exists_RootAndEmptyPath_Good(t *testing.T) { - n := New() - n.AddData("dummy.txt", []byte("dummy")) + nodeTree := New() + nodeTree.AddData("dummy.txt", []byte("dummy")) - assert.True(t, n.Exists("."), "root '.' must exist") - assert.True(t, n.Exists(""), "empty path (root) must exist") + assert.True(t, nodeTree.Exists("."), "root '.' must exist") + assert.True(t, nodeTree.Exists(""), "empty path (root) must exist") } func TestNode_Walk_Default_Good(t *testing.T) { - n := New() - n.AddData("foo.txt", []byte("foo")) - n.AddData("bar/baz.txt", []byte("baz")) - n.AddData("bar/qux.txt", []byte("qux")) + nodeTree := New() + nodeTree.AddData("foo.txt", []byte("foo")) + nodeTree.AddData("bar/baz.txt", []byte("baz")) + nodeTree.AddData("bar/qux.txt", []byte("qux")) var paths []string - err := n.Walk(".", func(p string, d fs.DirEntry, err error) error { + err := nodeTree.Walk(".", func(p string, d fs.DirEntry, err error) error { paths = append(paths, p) return nil }, WalkOptions{}) @@ -237,10 +237,10 @@ func TestNode_Walk_Default_Good(t *testing.T) { } func TestNode_Walk_Default_Bad(t *testing.T) { - n := New() + nodeTree := New() var called bool - err := n.Walk("nonexistent", func(p string, d fs.DirEntry, err error) error { + err := nodeTree.Walk("nonexistent", func(p string, d fs.DirEntry, err error) error { called = true assert.Error(t, err) assert.ErrorIs(t, err, fs.ErrNotExist) @@ -251,13 +251,13 @@ func TestNode_Walk_Default_Bad(t *testing.T) { } func TestNode_Walk_CallbackError_Good(t *testing.T) { - n := New() - n.AddData("a/b.txt", []byte("b")) - n.AddData("a/c.txt", []byte("c")) + nodeTree := New() + nodeTree.AddData("a/b.txt", []byte("b")) + nodeTree.AddData("a/c.txt", []byte("c")) walkErr := core.NewError("stop walking") var paths []string - err := n.Walk(".", func(p string, d fs.DirEntry, err error) error { + err := nodeTree.Walk(".", func(p string, d fs.DirEntry, err error) error { if p == "a/b.txt" { return walkErr } @@ -269,15 +269,15 @@ func TestNode_Walk_CallbackError_Good(t *testing.T) { } func TestNode_Walk_Good(t *testing.T) { - n := New() - n.AddData("root.txt", []byte("root")) - n.AddData("a/a1.txt", []byte("a1")) - n.AddData("a/b/b1.txt", []byte("b1")) - n.AddData("c/c1.txt", []byte("c1")) + nodeTree := New() + nodeTree.AddData("root.txt", []byte("root")) + nodeTree.AddData("a/a1.txt", []byte("a1")) + nodeTree.AddData("a/b/b1.txt", []byte("b1")) + nodeTree.AddData("c/c1.txt", []byte("c1")) t.Run("MaxDepth", func(t *testing.T) { var paths []string - err := n.Walk(".", func(p string, d fs.DirEntry, err error) error { + err := nodeTree.Walk(".", func(p string, d fs.DirEntry, err error) error { paths = append(paths, p) return nil }, WalkOptions{MaxDepth: 1}) @@ -289,7 +289,7 @@ func TestNode_Walk_Good(t *testing.T) { t.Run("Filter", func(t *testing.T) { var paths []string - err := n.Walk(".", func(p string, d fs.DirEntry, err error) error { + err := nodeTree.Walk(".", func(p string, d fs.DirEntry, err error) error { paths = append(paths, p) return nil }, WalkOptions{Filter: func(p string, d fs.DirEntry) bool { @@ -303,7 +303,7 @@ func TestNode_Walk_Good(t *testing.T) { t.Run("SkipErrors", func(t *testing.T) { var called bool - err := n.Walk("nonexistent", func(p string, d fs.DirEntry, err error) error { + err := nodeTree.Walk("nonexistent", func(p string, d fs.DirEntry, err error) error { called = true return err }, WalkOptions{SkipErrors: true}) @@ -314,11 +314,11 @@ func TestNode_Walk_Good(t *testing.T) { } func TestNode_CopyFile_Good(t *testing.T) { - n := New() - n.AddData("foo.txt", []byte("foo")) + nodeTree := New() + nodeTree.AddData("foo.txt", []byte("foo")) tmpfile := core.Path(t.TempDir(), "test.txt") - err := n.CopyFile("foo.txt", tmpfile, 0644) + err := nodeTree.CopyFile("foo.txt", tmpfile, 0644) require.NoError(t, err) content, err := coreio.Local.Read(tmpfile) @@ -327,40 +327,40 @@ func TestNode_CopyFile_Good(t *testing.T) { } func TestNode_CopyFile_Bad(t *testing.T) { - n := New() + nodeTree := New() tmpfile := core.Path(t.TempDir(), "test.txt") - err := n.CopyFile("nonexistent.txt", tmpfile, 0644) + err := nodeTree.CopyFile("nonexistent.txt", tmpfile, 0644) assert.Error(t, err) - n.AddData("foo.txt", []byte("foo")) - err = n.CopyFile("foo.txt", "/nonexistent_dir/test.txt", 0644) + nodeTree.AddData("foo.txt", []byte("foo")) + err = nodeTree.CopyFile("foo.txt", "/nonexistent_dir/test.txt", 0644) assert.Error(t, err) } func TestNode_CopyFile_DirectorySource_Bad(t *testing.T) { - n := New() - n.AddData("bar/baz.txt", []byte("baz")) + nodeTree := New() + nodeTree.AddData("bar/baz.txt", []byte("baz")) tmpfile := core.Path(t.TempDir(), "test.txt") - err := n.CopyFile("bar", tmpfile, 0644) + err := nodeTree.CopyFile("bar", tmpfile, 0644) assert.Error(t, err) } func TestNode_CopyTo_Good(t *testing.T) { - n := New() - n.AddData("config/app.yaml", []byte("port: 8080")) - n.AddData("config/env/app.env", []byte("MODE=test")) + nodeTree := New() + nodeTree.AddData("config/app.yaml", []byte("port: 8080")) + nodeTree.AddData("config/env/app.env", []byte("MODE=test")) fileTarget := coreio.NewMemoryMedium() - err := n.CopyTo(fileTarget, "config/app.yaml", "backup/app.yaml") + err := nodeTree.CopyTo(fileTarget, "config/app.yaml", "backup/app.yaml") require.NoError(t, err) content, err := fileTarget.Read("backup/app.yaml") require.NoError(t, err) assert.Equal(t, "port: 8080", content) dirTarget := coreio.NewMemoryMedium() - err = n.CopyTo(dirTarget, "config", "backup/config") + err = nodeTree.CopyTo(dirTarget, "config", "backup/config") require.NoError(t, err) content, err = dirTarget.Read("backup/config/app.yaml") require.NoError(t, err) @@ -371,35 +371,35 @@ func TestNode_CopyTo_Good(t *testing.T) { } func TestNode_CopyTo_Bad(t *testing.T) { - n := New() - err := n.CopyTo(coreio.NewMemoryMedium(), "missing", "backup/missing") + nodeTree := New() + err := nodeTree.CopyTo(coreio.NewMemoryMedium(), "missing", "backup/missing") assert.Error(t, err) } func TestNode_MediumFacade_Good(t *testing.T) { - n := New() + nodeTree := New() - require.NoError(t, n.Write("docs/readme.txt", "hello")) - require.NoError(t, n.WriteMode("docs/mode.txt", "mode", 0600)) - require.NoError(t, n.Write("docs/guide.txt", "guide")) - require.NoError(t, n.EnsureDir("ignored")) + require.NoError(t, nodeTree.Write("docs/readme.txt", "hello")) + require.NoError(t, nodeTree.WriteMode("docs/mode.txt", "mode", 0600)) + require.NoError(t, nodeTree.Write("docs/guide.txt", "guide")) + require.NoError(t, nodeTree.EnsureDir("ignored")) - value, err := n.Read("docs/readme.txt") + value, err := nodeTree.Read("docs/readme.txt") require.NoError(t, err) assert.Equal(t, "hello", value) - value, err = n.Read("docs/guide.txt") + value, err = nodeTree.Read("docs/guide.txt") require.NoError(t, err) assert.Equal(t, "guide", value) - assert.True(t, n.IsFile("docs/readme.txt")) - assert.True(t, n.IsDir("docs")) + assert.True(t, nodeTree.IsFile("docs/readme.txt")) + assert.True(t, nodeTree.IsDir("docs")) - entries, err := n.List("docs") + entries, err := nodeTree.List("docs") require.NoError(t, err) assert.Equal(t, []string{"guide.txt", "mode.txt", "readme.txt"}, sortedNames(entries)) - file, err := n.Open("docs/readme.txt") + file, err := nodeTree.Open("docs/readme.txt") require.NoError(t, err) info, err := file.Stat() require.NoError(t, err) @@ -409,7 +409,7 @@ func TestNode_MediumFacade_Good(t *testing.T) { assert.Nil(t, info.Sys()) require.NoError(t, file.Close()) - dir, err := n.Open("docs") + dir, err := nodeTree.Open("docs") require.NoError(t, err) dirInfo, err := dir.Stat() require.NoError(t, err) @@ -419,47 +419,47 @@ func TestNode_MediumFacade_Good(t *testing.T) { assert.Nil(t, dirInfo.Sys()) require.NoError(t, dir.Close()) - createWriter, err := n.Create("docs/generated.txt") + createWriter, err := nodeTree.Create("docs/generated.txt") require.NoError(t, err) _, err = createWriter.Write([]byte("generated")) require.NoError(t, err) require.NoError(t, createWriter.Close()) - appendWriter, err := n.Append("docs/generated.txt") + appendWriter, err := nodeTree.Append("docs/generated.txt") require.NoError(t, err) _, err = appendWriter.Write([]byte(" content")) require.NoError(t, err) require.NoError(t, appendWriter.Close()) - streamReader, err := n.ReadStream("docs/generated.txt") + streamReader, err := nodeTree.ReadStream("docs/generated.txt") require.NoError(t, err) streamData, err := io.ReadAll(streamReader) require.NoError(t, err) assert.Equal(t, "generated content", string(streamData)) require.NoError(t, streamReader.Close()) - writeStream, err := n.WriteStream("docs/stream.txt") + writeStream, err := nodeTree.WriteStream("docs/stream.txt") require.NoError(t, err) _, err = writeStream.Write([]byte("stream")) require.NoError(t, err) require.NoError(t, writeStream.Close()) - require.NoError(t, n.Rename("docs/stream.txt", "docs/stream-renamed.txt")) - assert.True(t, n.Exists("docs/stream-renamed.txt")) + require.NoError(t, nodeTree.Rename("docs/stream.txt", "docs/stream-renamed.txt")) + assert.True(t, nodeTree.Exists("docs/stream-renamed.txt")) - require.NoError(t, n.Delete("docs/stream-renamed.txt")) - assert.False(t, n.Exists("docs/stream-renamed.txt")) + require.NoError(t, nodeTree.Delete("docs/stream-renamed.txt")) + assert.False(t, nodeTree.Exists("docs/stream-renamed.txt")) - require.NoError(t, n.DeleteAll("docs")) - assert.False(t, n.Exists("docs")) + require.NoError(t, nodeTree.DeleteAll("docs")) + assert.False(t, nodeTree.Exists("docs")) } func TestNode_ToTar_Good(t *testing.T) { - n := New() - n.AddData("foo.txt", []byte("foo")) - n.AddData("bar/baz.txt", []byte("baz")) + nodeTree := New() + nodeTree.AddData("foo.txt", []byte("foo")) + nodeTree.AddData("bar/baz.txt", []byte("baz")) - tarball, err := n.ToTar() + tarball, err := nodeTree.ToTar() require.NoError(t, err) require.NotEmpty(t, tarball) @@ -500,11 +500,11 @@ func TestNode_FromTar_Good(t *testing.T) { } require.NoError(t, tw.Close()) - n, err := FromTar(buf.Bytes()) + nodeTree, err := FromTar(buf.Bytes()) require.NoError(t, err) - assert.True(t, n.Exists("foo.txt"), "foo.txt should exist") - assert.True(t, n.Exists("bar/baz.txt"), "bar/baz.txt should exist") + assert.True(t, nodeTree.Exists("foo.txt"), "foo.txt should exist") + assert.True(t, nodeTree.Exists("bar/baz.txt"), "bar/baz.txt should exist") } func TestNode_FromTar_Bad(t *testing.T) { @@ -514,41 +514,41 @@ func TestNode_FromTar_Bad(t *testing.T) { } func TestNode_TarRoundTrip_Good(t *testing.T) { - n1 := New() - n1.AddData("a.txt", []byte("alpha")) - n1.AddData("b/c.txt", []byte("charlie")) + nodeTree1 := New() + nodeTree1.AddData("a.txt", []byte("alpha")) + nodeTree1.AddData("b/c.txt", []byte("charlie")) - tarball, err := n1.ToTar() + tarball, err := nodeTree1.ToTar() require.NoError(t, err) - n2, err := FromTar(tarball) + nodeTree2, err := FromTar(tarball) require.NoError(t, err) - data, err := n2.ReadFile("a.txt") + data, err := nodeTree2.ReadFile("a.txt") require.NoError(t, err) assert.Equal(t, []byte("alpha"), data) - data, err = n2.ReadFile("b/c.txt") + data, err = nodeTree2.ReadFile("b/c.txt") require.NoError(t, err) assert.Equal(t, []byte("charlie"), data) } func TestNode_FSInterface_Good(t *testing.T) { - n := New() - n.AddData("hello.txt", []byte("world")) + nodeTree := New() + nodeTree.AddData("hello.txt", []byte("world")) - var fsys fs.FS = n + var fsys fs.FS = nodeTree file, err := fsys.Open("hello.txt") require.NoError(t, err) defer file.Close() - var statFS fs.StatFS = n + var statFS fs.StatFS = nodeTree info, err := statFS.Stat("hello.txt") require.NoError(t, err) assert.Equal(t, "hello.txt", info.Name()) assert.Equal(t, int64(5), info.Size()) - var readFS fs.ReadFileFS = n + var readFS fs.ReadFileFS = nodeTree data, err := readFS.ReadFile("hello.txt") require.NoError(t, err) assert.Equal(t, []byte("world"), data) diff --git a/s3/s3.go b/s3/s3.go index 58ea246..5baf152 100644 --- a/s3/s3.go +++ b/s3/s3.go @@ -119,6 +119,7 @@ func (medium *Medium) objectKey(filePath string) string { return medium.prefix + clean } +// Example: content, _ := medium.Read("reports/daily.txt") func (medium *Medium) Read(filePath string) (string, error) { key := medium.objectKey(filePath) if key == "" { @@ -141,6 +142,7 @@ func (medium *Medium) Read(filePath string) (string, error) { return string(data), nil } +// Example: _ = medium.Write("reports/daily.txt", "done") func (medium *Medium) Write(filePath, content string) error { key := medium.objectKey(filePath) if key == "" { @@ -184,6 +186,7 @@ func (medium *Medium) IsFile(filePath string) bool { return err == nil } +// Example: _ = medium.Delete("reports/daily.txt") func (medium *Medium) Delete(filePath string) error { key := medium.objectKey(filePath) if key == "" { @@ -470,6 +473,7 @@ func (medium *Medium) Append(filePath string) (goio.WriteCloser, error) { }, nil } +// Example: reader, _ := medium.ReadStream("reports/daily.txt") func (medium *Medium) ReadStream(filePath string) (goio.ReadCloser, error) { key := medium.objectKey(filePath) if key == "" { @@ -486,6 +490,7 @@ func (medium *Medium) ReadStream(filePath string) (goio.ReadCloser, error) { return out.Body, nil } +// Example: writer, _ := medium.WriteStream("reports/daily.txt") func (medium *Medium) WriteStream(filePath string) (goio.WriteCloser, error) { return medium.Create(filePath) } diff --git a/sqlite/sqlite.go b/sqlite/sqlite.go index e852f9b..b3d4475 100644 --- a/sqlite/sqlite.go +++ b/sqlite/sqlite.go @@ -88,6 +88,7 @@ func normaliseEntryPath(filePath string) string { return core.TrimPrefix(clean, "/") } +// Example: content, _ := medium.Read("config/app.yaml") func (medium *Medium) Read(filePath string) (string, error) { key := normaliseEntryPath(filePath) if key == "" { @@ -111,6 +112,7 @@ func (medium *Medium) Read(filePath string) (string, error) { return string(content), nil } +// Example: _ = medium.Write("config/app.yaml", "port: 8080") func (medium *Medium) Write(filePath, content string) error { return medium.WriteMode(filePath, content, 0644) } @@ -151,6 +153,7 @@ func (medium *Medium) EnsureDir(filePath string) error { return nil } +// Example: isFile := medium.IsFile("config/app.yaml") func (medium *Medium) IsFile(filePath string) bool { key := normaliseEntryPath(filePath) if key == "" { @@ -401,6 +404,7 @@ func (medium *Medium) List(filePath string) ([]fs.DirEntry, error) { return entries, nil } +// Example: info, _ := medium.Stat("config/app.yaml") func (medium *Medium) Stat(filePath string) (fs.FileInfo, error) { key := normaliseEntryPath(filePath) if key == "" { @@ -431,6 +435,7 @@ func (medium *Medium) Stat(filePath string) (fs.FileInfo, error) { }, nil } +// Example: file, _ := medium.Open("config/app.yaml") func (medium *Medium) Open(filePath string) (fs.File, error) { key := normaliseEntryPath(filePath) if key == "" { @@ -462,6 +467,7 @@ func (medium *Medium) Open(filePath string) (fs.File, error) { }, nil } +// Example: writer, _ := medium.Create("logs/app.log") func (medium *Medium) Create(filePath string) (goio.WriteCloser, error) { key := normaliseEntryPath(filePath) if key == "" { @@ -473,6 +479,7 @@ func (medium *Medium) Create(filePath string) (goio.WriteCloser, error) { }, nil } +// Example: writer, _ := medium.Append("logs/app.log") func (medium *Medium) Append(filePath string) (goio.WriteCloser, error) { key := normaliseEntryPath(filePath) if key == "" { @@ -494,6 +501,7 @@ func (medium *Medium) Append(filePath string) (goio.WriteCloser, error) { }, nil } +// Example: reader, _ := medium.ReadStream("logs/app.log") func (medium *Medium) ReadStream(filePath string) (goio.ReadCloser, error) { key := normaliseEntryPath(filePath) if key == "" { @@ -518,10 +526,12 @@ func (medium *Medium) ReadStream(filePath string) (goio.ReadCloser, error) { return goio.NopCloser(bytes.NewReader(content)), nil } +// Example: writer, _ := medium.WriteStream("logs/app.log") func (medium *Medium) WriteStream(filePath string) (goio.WriteCloser, error) { return medium.Create(filePath) } +// Example: exists := medium.Exists("config/app.yaml") func (medium *Medium) Exists(filePath string) bool { key := normaliseEntryPath(filePath) if key == "" { @@ -538,6 +548,7 @@ func (medium *Medium) Exists(filePath string) bool { return count > 0 } +// Example: isDirectory := medium.IsDir("config") func (medium *Medium) IsDir(filePath string) bool { key := normaliseEntryPath(filePath) if key == "" { diff --git a/store/medium_test.go b/store/medium_test.go index c45a89e..7ea7bec 100644 --- a/store/medium_test.go +++ b/store/medium_test.go @@ -11,10 +11,10 @@ import ( func newTestKeyValueMedium(t *testing.T) *Medium { t.Helper() - m, err := NewMedium(Options{Path: ":memory:"}) + medium, err := NewMedium(Options{Path: ":memory:"}) require.NoError(t, err) - t.Cleanup(func() { m.Close() }) - return m + t.Cleanup(func() { medium.Close() }) + return medium } func TestKeyValueMedium_ReadWrite_Good(t *testing.T) { diff --git a/store/store.go b/store/store.go index 65b36a2..f59d818 100644 --- a/store/store.go +++ b/store/store.go @@ -18,6 +18,7 @@ type Store struct { database *sql.DB } +// Example: keyValueStore, _ := store.New(store.Options{Path: ":memory:"}) type Options struct { Path string } diff --git a/store/store_test.go b/store/store_test.go index 74df399..07e49a8 100644 --- a/store/store_test.go +++ b/store/store_test.go @@ -10,17 +10,17 @@ import ( func newTestStore(t *testing.T) *Store { t.Helper() - s, err := New(Options{Path: ":memory:"}) + keyValueStore, err := New(Options{Path: ":memory:"}) require.NoError(t, err) t.Cleanup(func() { - require.NoError(t, s.Close()) + require.NoError(t, keyValueStore.Close()) }) - return s + return keyValueStore } func TestStore_New_Options_Good(t *testing.T) { - s := newTestStore(t) - assert.NotNil(t, s) + keyValueStore := newTestStore(t) + assert.NotNil(t, keyValueStore) } func TestStore_New_Options_Bad(t *testing.T) { @@ -29,86 +29,86 @@ func TestStore_New_Options_Bad(t *testing.T) { } func TestStore_SetGet_Good(t *testing.T) { - s := newTestStore(t) + keyValueStore := newTestStore(t) - err := s.Set("config", "theme", "dark") + err := keyValueStore.Set("config", "theme", "dark") require.NoError(t, err) - val, err := s.Get("config", "theme") + val, err := keyValueStore.Get("config", "theme") require.NoError(t, err) assert.Equal(t, "dark", val) } func TestStore_Get_NotFound_Bad(t *testing.T) { - s := newTestStore(t) + keyValueStore := newTestStore(t) - _, err := s.Get("config", "missing") + _, err := keyValueStore.Get("config", "missing") assert.ErrorIs(t, err, NotFoundError) } func TestStore_Delete_Good(t *testing.T) { - s := newTestStore(t) + keyValueStore := newTestStore(t) - _ = s.Set("config", "key", "val") - err := s.Delete("config", "key") + _ = keyValueStore.Set("config", "key", "val") + err := keyValueStore.Delete("config", "key") require.NoError(t, err) - _, err = s.Get("config", "key") + _, err = keyValueStore.Get("config", "key") assert.ErrorIs(t, err, NotFoundError) } func TestStore_Count_Good(t *testing.T) { - s := newTestStore(t) + keyValueStore := newTestStore(t) - _ = s.Set("group", "a", "1") - _ = s.Set("group", "b", "2") - _ = s.Set("other", "c", "3") + _ = keyValueStore.Set("group", "a", "1") + _ = keyValueStore.Set("group", "b", "2") + _ = keyValueStore.Set("other", "c", "3") - n, err := s.Count("group") + count, err := keyValueStore.Count("group") require.NoError(t, err) - assert.Equal(t, 2, n) + assert.Equal(t, 2, count) } func TestStore_DeleteGroup_Good(t *testing.T) { - s := newTestStore(t) + keyValueStore := newTestStore(t) - _ = s.Set("group", "a", "1") - _ = s.Set("group", "b", "2") - err := s.DeleteGroup("group") + _ = keyValueStore.Set("group", "a", "1") + _ = keyValueStore.Set("group", "b", "2") + err := keyValueStore.DeleteGroup("group") require.NoError(t, err) - n, _ := s.Count("group") - assert.Equal(t, 0, n) + count, _ := keyValueStore.Count("group") + assert.Equal(t, 0, count) } func TestStore_GetAll_Good(t *testing.T) { - s := newTestStore(t) + keyValueStore := newTestStore(t) - _ = s.Set("group", "a", "1") - _ = s.Set("group", "b", "2") - _ = s.Set("other", "c", "3") + _ = keyValueStore.Set("group", "a", "1") + _ = keyValueStore.Set("group", "b", "2") + _ = keyValueStore.Set("other", "c", "3") - all, err := s.GetAll("group") + all, err := keyValueStore.GetAll("group") require.NoError(t, err) assert.Equal(t, map[string]string{"a": "1", "b": "2"}, all) } func TestStore_GetAll_Empty_Good(t *testing.T) { - s := newTestStore(t) + keyValueStore := newTestStore(t) - all, err := s.GetAll("empty") + all, err := keyValueStore.GetAll("empty") require.NoError(t, err) assert.Empty(t, all) } func TestStore_Render_Good(t *testing.T) { - s := newTestStore(t) + keyValueStore := newTestStore(t) - _ = s.Set("user", "pool", "pool.lthn.io:3333") - _ = s.Set("user", "wallet", "iz...") + _ = keyValueStore.Set("user", "pool", "pool.lthn.io:3333") + _ = keyValueStore.Set("user", "wallet", "iz...") tmpl := `{"pool":"{{ .pool }}","wallet":"{{ .wallet }}"}` - out, err := s.Render(tmpl, "user") + out, err := keyValueStore.Render(tmpl, "user") require.NoError(t, err) assert.Contains(t, out, "pool.lthn.io:3333") assert.Contains(t, out, "iz...") From e1efd3634c55789a5a388668c0d0d1cb28b805d6 Mon Sep 17 00:00:00 2001 From: Virgil Date: Tue, 31 Mar 2026 13:13:41 +0000 Subject: [PATCH 56/83] refactor(ax): align remaining AX docs and invalid-input errors Co-Authored-By: Virgil --- io.go | 5 +++++ s3/s3.go | 4 ++-- sigil/crypto_sigil.go | 3 +++ sigil/sigils.go | 13 ++++++++++--- sqlite/sqlite.go | 3 ++- 5 files changed, 22 insertions(+), 6 deletions(-) diff --git a/io.go b/io.go index 3c3634f..5c5bcc8 100644 --- a/io.go +++ b/io.go @@ -385,6 +385,9 @@ func (medium *MemoryMedium) WriteStream(path string) (goio.WriteCloser, error) { return medium.Create(path) } +// Example: medium := io.NewMemoryMedium() +// Example: _ = medium.Write("config/app.yaml", "port: 8080") +// Example: file, _ := medium.Open("config/app.yaml") type MemoryFile struct { name string content []byte @@ -408,6 +411,8 @@ func (file *MemoryFile) Close() error { return nil } +// Example: medium := io.NewMemoryMedium() +// Example: writer, _ := medium.Create("logs/app.log") type MemoryWriteCloser struct { medium *MemoryMedium path string diff --git a/s3/s3.go b/s3/s3.go index 5baf152..7dc6bb5 100644 --- a/s3/s3.go +++ b/s3/s3.go @@ -90,10 +90,10 @@ func normalisePrefix(prefix string) string { // Example: _ = medium.Write("reports/daily.txt", "done") func New(options Options) (*Medium, error) { if options.Bucket == "" { - return nil, core.E("s3.New", "bucket name is required", nil) + return nil, core.E("s3.New", "bucket name is required", fs.ErrInvalid) } if options.Client == nil { - return nil, core.E("s3.New", "client is required", nil) + return nil, core.E("s3.New", "client is required", fs.ErrInvalid) } medium := &Medium{ client: options.Client, diff --git a/sigil/crypto_sigil.go b/sigil/crypto_sigil.go index 306f702..f57e351 100644 --- a/sigil/crypto_sigil.go +++ b/sigil/crypto_sigil.go @@ -23,12 +23,14 @@ var ( NoKeyConfiguredError = core.E("sigil.NoKeyConfiguredError", "no encryption key configured", nil) ) +// Example: obfuscator := &sigil.XORObfuscator{} type PreObfuscator interface { Obfuscate(data []byte, entropy []byte) []byte Deobfuscate(data []byte, entropy []byte) []byte } +// Example: obfuscator := &sigil.XORObfuscator{} type XORObfuscator struct{} func (obfuscator *XORObfuscator) Obfuscate(data []byte, entropy []byte) []byte { @@ -76,6 +78,7 @@ func (obfuscator *XORObfuscator) deriveKeyStream(entropy []byte, length int) []b return stream } +// Example: obfuscator := &sigil.ShuffleMaskObfuscator{} type ShuffleMaskObfuscator struct{} func (obfuscator *ShuffleMaskObfuscator) Obfuscate(data []byte, entropy []byte) []byte { diff --git a/sigil/sigils.go b/sigil/sigils.go index 1dd9983..c13c159 100644 --- a/sigil/sigils.go +++ b/sigil/sigils.go @@ -11,6 +11,7 @@ import ( "encoding/base64" "encoding/hex" goio "io" + "io/fs" core "dappco.re/go/core" "golang.org/x/crypto/blake2b" @@ -20,6 +21,7 @@ import ( "golang.org/x/crypto/sha3" ) +// Example: reverseSigil, _ := sigil.NewSigil("reverse") type ReverseSigil struct{} func (sigil *ReverseSigil) In(data []byte) ([]byte, error) { @@ -37,6 +39,7 @@ func (sigil *ReverseSigil) Out(data []byte) ([]byte, error) { return sigil.In(data) } +// Example: hexSigil, _ := sigil.NewSigil("hex") type HexSigil struct{} func (sigil *HexSigil) In(data []byte) ([]byte, error) { @@ -57,6 +60,7 @@ func (sigil *HexSigil) Out(data []byte) ([]byte, error) { return dst, err } +// Example: base64Sigil, _ := sigil.NewSigil("base64") type Base64Sigil struct{} func (sigil *Base64Sigil) In(data []byte) ([]byte, error) { @@ -77,6 +81,7 @@ func (sigil *Base64Sigil) Out(data []byte) ([]byte, error) { return dst[:n], err } +// Example: gzipSigil, _ := sigil.NewSigil("gzip") type GzipSigil struct { outputWriter goio.Writer } @@ -116,6 +121,7 @@ func (sigil *GzipSigil) Out(data []byte) ([]byte, error) { return out, nil } +// Example: jsonSigil := &sigil.JSONSigil{Indent: true} type JSONSigil struct{ Indent bool } func (sigil *JSONSigil) In(data []byte) ([]byte, error) { @@ -129,7 +135,7 @@ func (sigil *JSONSigil) In(data []byte) ([]byte, error) { if err, ok := result.Value.(error); ok { return nil, core.E("sigil.JSONSigil.In", "decode json", err) } - return nil, core.E("sigil.JSONSigil.In", "decode json", nil) + return nil, core.E("sigil.JSONSigil.In", "decode json", fs.ErrInvalid) } compact := core.JSONMarshalString(decoded) @@ -143,6 +149,7 @@ func (sigil *JSONSigil) Out(data []byte) ([]byte, error) { return data, nil } +// Example: hashSigil := sigil.NewHashSigil(crypto.SHA256) type HashSigil struct { Hash crypto.Hash } @@ -193,7 +200,7 @@ func (sigil *HashSigil) In(data []byte) ([]byte, error) { case crypto.BLAKE2b_512: hasher, _ = blake2b.New512(nil) default: - return nil, core.E("sigil.HashSigil.In", "hash algorithm not available", nil) + return nil, core.E("sigil.HashSigil.In", "hash algorithm not available", fs.ErrInvalid) } hasher.Write(data) @@ -258,7 +265,7 @@ func NewSigil(name string) (Sigil, error) { case "blake2b-512": return NewHashSigil(crypto.BLAKE2b_512), nil default: - return nil, core.E("sigil.NewSigil", core.Concat("unknown sigil name: ", name), nil) + return nil, core.E("sigil.NewSigil", core.Concat("unknown sigil name: ", name), fs.ErrInvalid) } } diff --git a/sqlite/sqlite.go b/sqlite/sqlite.go index b3d4475..162a98a 100644 --- a/sqlite/sqlite.go +++ b/sqlite/sqlite.go @@ -25,6 +25,7 @@ type Medium struct { var _ coreio.Medium = (*Medium)(nil) +// Example: medium, _ := sqlite.New(sqlite.Options{Path: ":memory:", Table: "files"}) type Options struct { Path string Table string @@ -41,7 +42,7 @@ func normaliseTableName(table string) string { // Example: _ = medium.Write("config/app.yaml", "port: 8080") func New(options Options) (*Medium, error) { if options.Path == "" { - return nil, core.E("sqlite.New", "database path is required", nil) + return nil, core.E("sqlite.New", "database path is required", fs.ErrInvalid) } medium := &Medium{table: normaliseTableName(options.Table)} From eab112c7cfdc5f77b7485cca139e8599942952bd Mon Sep 17 00:00:00 2001 From: Virgil Date: Tue, 31 Mar 2026 13:20:09 +0000 Subject: [PATCH 57/83] refactor(workspace): accept declarative root and medium options Co-Authored-By: Virgil --- workspace/doc.go | 6 ++++- workspace/service.go | 47 +++++++++++++++++++++++++++++++-------- workspace/service_test.go | 24 ++++++++++++++++++++ 3 files changed, 67 insertions(+), 10 deletions(-) diff --git a/workspace/doc.go b/workspace/doc.go index bf399b3..d1709b1 100644 --- a/workspace/doc.go +++ b/workspace/doc.go @@ -1,4 +1,8 @@ -// Example: service, _ := workspace.New(workspace.Options{KeyPairProvider: keyPairProvider}) +// Example: service, _ := workspace.New(workspace.Options{ +// Example: KeyPairProvider: keyPairProvider, +// Example: RootPath: "/srv/workspaces", +// Example: Medium: io.NewMemoryMedium(), +// Example: }) // Example: workspaceID, _ := service.CreateWorkspace("alice", "pass123") // Example: _ = service.SwitchWorkspace(workspaceID) // Example: _ = service.WriteWorkspaceFile("notes/todo.txt", "ship it") diff --git a/workspace/service.go b/workspace/service.go index 7179614..44b191d 100644 --- a/workspace/service.go +++ b/workspace/service.go @@ -11,7 +11,11 @@ import ( "dappco.re/go/core/io" ) -// Example: service, _ := workspace.New(workspace.Options{KeyPairProvider: keyPairProvider}) +// Example: service, _ := workspace.New(workspace.Options{ +// Example: KeyPairProvider: keyPairProvider, +// Example: RootPath: "/srv/workspaces", +// Example: Medium: io.NewMemoryMedium(), +// Example: }) type Workspace interface { CreateWorkspace(identifier, password string) (string, error) SwitchWorkspace(workspaceID string) error @@ -37,12 +41,22 @@ type WorkspaceCommand struct { WorkspaceID string } -// Example: service, _ := workspace.New(workspace.Options{KeyPairProvider: keyPairProvider}) +// Example: service, _ := workspace.New(workspace.Options{ +// Example: KeyPairProvider: keyPairProvider, +// Example: RootPath: "/srv/workspaces", +// Example: Medium: io.NewMemoryMedium(), +// Example: }) type Options struct { KeyPairProvider KeyPairProvider + RootPath string + Medium io.Medium } -// Example: service, _ := workspace.New(workspace.Options{KeyPairProvider: keyPairProvider}) +// Example: service, _ := workspace.New(workspace.Options{ +// Example: KeyPairProvider: keyPairProvider, +// Example: RootPath: "/srv/workspaces", +// Example: Medium: io.NewMemoryMedium(), +// Example: }) type Service struct { keyPairProvider KeyPairProvider activeWorkspaceID string @@ -53,23 +67,38 @@ type Service struct { var _ Workspace = (*Service)(nil) -// Example: service, _ := workspace.New(workspace.Options{KeyPairProvider: keyPairProvider}) +// Example: service, _ := workspace.New(workspace.Options{ +// Example: KeyPairProvider: keyPairProvider, +// Example: RootPath: "/srv/workspaces", +// Example: Medium: io.NewMemoryMedium(), +// Example: }) // Example: workspaceID, _ := service.CreateWorkspace("alice", "pass123") func New(options Options) (*Service, error) { - home := resolveWorkspaceHomeDirectory() - if home == "" { - return nil, core.E("workspace.New", "failed to determine home directory", fs.ErrNotExist) + rootPath := options.RootPath + if rootPath == "" { + home := resolveWorkspaceHomeDirectory() + if home == "" { + return nil, core.E("workspace.New", "failed to determine home directory", fs.ErrNotExist) + } + rootPath = core.Path(home, ".core", "workspaces") } - rootPath := core.Path(home, ".core", "workspaces") if options.KeyPairProvider == nil { return nil, core.E("workspace.New", "key pair provider is required", fs.ErrInvalid) } + medium := options.Medium + if medium == nil { + medium = io.Local + } + if medium == nil { + return nil, core.E("workspace.New", "storage medium is required", fs.ErrInvalid) + } + service := &Service{ keyPairProvider: options.KeyPairProvider, rootPath: rootPath, - medium: io.Local, + medium: medium, } if err := service.medium.EnsureDir(rootPath); err != nil { diff --git a/workspace/service_test.go b/workspace/service_test.go index e4fef6c..d1baac7 100644 --- a/workspace/service_test.go +++ b/workspace/service_test.go @@ -4,6 +4,7 @@ import ( "testing" core "dappco.re/go/core" + coreio "dappco.re/go/core/io" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -36,6 +37,29 @@ func TestService_New_MissingKeyPairProvider_Bad(t *testing.T) { require.Error(t, err) } +func TestService_New_CustomRootPathAndMedium_Good(t *testing.T) { + medium := coreio.NewMemoryMedium() + rootPath := core.Path(t.TempDir(), "custom", "workspaces") + + service, err := New(Options{ + KeyPairProvider: stubKeyPairProvider{key: "private-key"}, + RootPath: rootPath, + Medium: medium, + }) + require.NoError(t, err) + assert.Equal(t, rootPath, service.rootPath) + assert.Same(t, medium, service.medium) + + workspaceID, err := service.CreateWorkspace("custom-user", "pass123") + require.NoError(t, err) + assert.NotEmpty(t, workspaceID) + + expectedWorkspacePath := core.Path(rootPath, workspaceID) + assert.True(t, medium.IsDir(rootPath)) + assert.True(t, medium.IsDir(core.Path(expectedWorkspacePath, "keys"))) + assert.True(t, medium.Exists(core.Path(expectedWorkspacePath, "keys", "private.key"))) +} + func TestService_WorkspaceFileRoundTrip_Good(t *testing.T) { service, tempHome := newTestService(t) From bd8d7c697511eeac5ddc8d2225770f8ebe039f83 Mon Sep 17 00:00:00 2001 From: Virgil Date: Tue, 31 Mar 2026 13:25:00 +0000 Subject: [PATCH 58/83] refactor(ax): tighten local naming Co-Authored-By: Virgil --- node/node.go | 30 +++++++++++++++--------------- node/node_test.go | 24 ++++++++++++------------ sqlite/sqlite.go | 8 ++++---- store/medium.go | 8 ++++---- store/medium_test.go | 24 ++++++++++++------------ store/store_test.go | 4 ++-- workspace/service.go | 4 ++-- workspace/service_test.go | 10 +++++----- 8 files changed, 56 insertions(+), 56 deletions(-) diff --git a/node/node.go b/node/node.go index deb8349..c322084 100644 --- a/node/node.go +++ b/node/node.go @@ -53,8 +53,8 @@ func (node *Node) AddData(name string, content []byte) { // Example: snapshot, _ := nodeTree.ToTar() func (node *Node) ToTar() ([]byte, error) { - buf := new(bytes.Buffer) - tw := tar.NewWriter(buf) + buffer := new(bytes.Buffer) + tarWriter := tar.NewWriter(buffer) for _, file := range node.files { hdr := &tar.Header{ @@ -63,19 +63,19 @@ func (node *Node) ToTar() ([]byte, error) { Size: int64(len(file.content)), ModTime: file.modTime, } - if err := tw.WriteHeader(hdr); err != nil { + if err := tarWriter.WriteHeader(hdr); err != nil { return nil, err } - if _, err := tw.Write(file.content); err != nil { + if _, err := tarWriter.Write(file.content); err != nil { return nil, err } } - if err := tw.Close(); err != nil { + if err := tarWriter.Close(); err != nil { return nil, err } - return buf.Bytes(), nil + return buffer.Bytes(), nil } // Example: restored, _ := node.FromTar(snapshot) @@ -90,10 +90,10 @@ func FromTar(data []byte) (*Node, error) { // Example: _ = nodeTree.LoadTar(snapshot) func (node *Node) LoadTar(data []byte) error { newFiles := make(map[string]*dataFile) - tr := tar.NewReader(bytes.NewReader(data)) + tarReader := tar.NewReader(bytes.NewReader(data)) for { - header, err := tr.Next() + header, err := tarReader.Next() if err == goio.EOF { break } @@ -102,7 +102,7 @@ func (node *Node) LoadTar(data []byte) error { } if header.Typeflag == tar.TypeReg { - content, err := goio.ReadAll(tr) + content, err := goio.ReadAll(tarReader) if err != nil { return core.E("node.LoadTar", "read tar entry", err) } @@ -147,18 +147,18 @@ func (node *Node) Walk(root string, fn fs.WalkDirFunc, options WalkOptions) erro } } - result := fn(entryPath, entry, err) + walkResult := fn(entryPath, entry, err) - if result == nil && options.MaxDepth > 0 && entry != nil && entry.IsDir() && entryPath != root { - rel := core.TrimPrefix(entryPath, root) - rel = core.TrimPrefix(rel, "/") - depth := len(core.Split(rel, "/")) + if walkResult == nil && options.MaxDepth > 0 && entry != nil && entry.IsDir() && entryPath != root { + relativePath := core.TrimPrefix(entryPath, root) + relativePath = core.TrimPrefix(relativePath, "/") + depth := len(core.Split(relativePath, "/")) if depth >= options.MaxDepth { return fs.SkipDir } } - return result + return walkResult }) } diff --git a/node/node_test.go b/node/node_test.go index 2e783e6..fb36670 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -463,15 +463,15 @@ func TestNode_ToTar_Good(t *testing.T) { require.NoError(t, err) require.NotEmpty(t, tarball) - tr := tar.NewReader(bytes.NewReader(tarball)) + tarReader := tar.NewReader(bytes.NewReader(tarball)) files := make(map[string]string) for { - header, err := tr.Next() + header, err := tarReader.Next() if err == io.EOF { break } require.NoError(t, err) - content, err := io.ReadAll(tr) + content, err := io.ReadAll(tarReader) require.NoError(t, err) files[header.Name] = string(content) } @@ -481,26 +481,26 @@ func TestNode_ToTar_Good(t *testing.T) { } func TestNode_FromTar_Good(t *testing.T) { - buf := new(bytes.Buffer) - tw := tar.NewWriter(buf) + buffer := new(bytes.Buffer) + tarWriter := tar.NewWriter(buffer) - for _, f := range []struct{ Name, Body string }{ + for _, file := range []struct{ Name, Body string }{ {"foo.txt", "foo"}, {"bar/baz.txt", "baz"}, } { hdr := &tar.Header{ - Name: f.Name, + Name: file.Name, Mode: 0600, - Size: int64(len(f.Body)), + Size: int64(len(file.Body)), Typeflag: tar.TypeReg, } - require.NoError(t, tw.WriteHeader(hdr)) - _, err := tw.Write([]byte(f.Body)) + require.NoError(t, tarWriter.WriteHeader(hdr)) + _, err := tarWriter.Write([]byte(file.Body)) require.NoError(t, err) } - require.NoError(t, tw.Close()) + require.NoError(t, tarWriter.Close()) - nodeTree, err := FromTar(buf.Bytes()) + nodeTree, err := FromTar(buffer.Bytes()) require.NoError(t, err) assert.True(t, nodeTree.Exists("foo.txt"), "foo.txt should exist") diff --git a/sqlite/sqlite.go b/sqlite/sqlite.go index 162a98a..a2d7c1b 100644 --- a/sqlite/sqlite.go +++ b/sqlite/sqlite.go @@ -203,11 +203,11 @@ func (medium *Medium) Delete(filePath string) error { } } - res, err := medium.database.Exec(`DELETE FROM `+medium.table+` WHERE path = ?`, key) + execResult, err := medium.database.Exec(`DELETE FROM `+medium.table+` WHERE path = ?`, key) if err != nil { return core.E("sqlite.Delete", core.Concat("delete failed: ", key), err) } - rowsAffected, _ := res.RowsAffected() + rowsAffected, _ := execResult.RowsAffected() if rowsAffected == 0 { return core.E("sqlite.Delete", core.Concat("path not found: ", key), fs.ErrNotExist) } @@ -223,14 +223,14 @@ func (medium *Medium) DeleteAll(filePath string) error { prefix := key + "/" - res, err := medium.database.Exec( + execResult, err := medium.database.Exec( `DELETE FROM `+medium.table+` WHERE path = ? OR path LIKE ?`, key, prefix+"%", ) if err != nil { return core.E("sqlite.DeleteAll", core.Concat("delete failed: ", key), err) } - rowsAffected, _ := res.RowsAffected() + rowsAffected, _ := execResult.RowsAffected() if rowsAffected == 0 { return core.E("sqlite.DeleteAll", core.Concat("path not found: ", key), fs.ErrNotExist) } diff --git a/store/medium.go b/store/medium.go index d59addc..4899af5 100644 --- a/store/medium.go +++ b/store/medium.go @@ -128,11 +128,11 @@ func (medium *Medium) Rename(oldPath, newPath string) error { if oldKey == "" || newKey == "" { return core.E("store.Rename", "both paths must include group/key", fs.ErrInvalid) } - val, err := medium.store.Get(oldGroup, oldKey) + value, err := medium.store.Get(oldGroup, oldKey) if err != nil { return err } - if err := medium.store.Set(newGroup, newKey, val); err != nil { + if err := medium.store.Set(newGroup, newKey, value); err != nil { return err } return medium.store.Delete(oldGroup, oldKey) @@ -235,11 +235,11 @@ func (medium *Medium) ReadStream(entryPath string) (goio.ReadCloser, error) { if key == "" { return nil, core.E("store.ReadStream", "path must include group/key", fs.ErrInvalid) } - val, err := medium.store.Get(group, key) + value, err := medium.store.Get(group, key) if err != nil { return nil, err } - return goio.NopCloser(core.NewReader(val)), nil + return goio.NopCloser(core.NewReader(value)), nil } func (medium *Medium) WriteStream(entryPath string) (goio.WriteCloser, error) { diff --git a/store/medium_test.go b/store/medium_test.go index 7ea7bec..1065ed8 100644 --- a/store/medium_test.go +++ b/store/medium_test.go @@ -23,9 +23,9 @@ func TestKeyValueMedium_ReadWrite_Good(t *testing.T) { err := m.Write("config/theme", "dark") require.NoError(t, err) - val, err := m.Read("config/theme") + value, err := m.Read("config/theme") require.NoError(t, err) - assert.Equal(t, "dark", val) + assert.Equal(t, "dark", value) } func TestKeyValueMedium_Read_NoKey_Bad(t *testing.T) { @@ -83,9 +83,9 @@ func TestKeyValueMedium_Rename_Good(t *testing.T) { err := m.Rename("old/key", "new/key") require.NoError(t, err) - val, err := m.Read("new/key") + value, err := m.Read("new/key") require.NoError(t, err) - assert.Equal(t, "val", val) + assert.Equal(t, "val", value) assert.False(t, m.IsFile("old/key")) } @@ -163,9 +163,9 @@ func TestKeyValueMedium_CreateClose_Good(t *testing.T) { _, _ = w.Write([]byte("streamed")) require.NoError(t, w.Close()) - val, err := m.Read("group/key") + value, err := m.Read("group/key") require.NoError(t, err) - assert.Equal(t, "streamed", val) + assert.Equal(t, "streamed", value) } func TestKeyValueMedium_Append_Good(t *testing.T) { @@ -177,9 +177,9 @@ func TestKeyValueMedium_Append_Good(t *testing.T) { _, _ = w.Write([]byte(" world")) require.NoError(t, w.Close()) - val, err := m.Read("group/key") + value, err := m.Read("group/key") require.NoError(t, err) - assert.Equal(t, "hello world", val) + assert.Equal(t, "hello world", value) } func TestKeyValueMedium_AsMedium_Good(t *testing.T) { @@ -188,13 +188,13 @@ func TestKeyValueMedium_AsMedium_Good(t *testing.T) { m := s.AsMedium() require.NoError(t, m.Write("group/key", "val")) - val, err := s.Get("group", "key") + value, err := s.Get("group", "key") require.NoError(t, err) - assert.Equal(t, "val", val) + assert.Equal(t, "val", value) - val, err = m.Read("group/key") + value, err = m.Read("group/key") require.NoError(t, err) - assert.Equal(t, "val", val) + assert.Equal(t, "val", value) } func TestKeyValueMedium_Store_Good(t *testing.T) { diff --git a/store/store_test.go b/store/store_test.go index 07e49a8..0349d90 100644 --- a/store/store_test.go +++ b/store/store_test.go @@ -34,9 +34,9 @@ func TestStore_SetGet_Good(t *testing.T) { err := keyValueStore.Set("config", "theme", "dark") require.NoError(t, err) - val, err := keyValueStore.Get("config", "theme") + value, err := keyValueStore.Get("config", "theme") require.NoError(t, err) - assert.Equal(t, "dark", val) + assert.Equal(t, "dark", value) } func TestStore_Get_NotFound_Bad(t *testing.T) { diff --git a/workspace/service.go b/workspace/service.go index 44b191d..635e011 100644 --- a/workspace/service.go +++ b/workspace/service.go @@ -134,12 +134,12 @@ func (service *Service) CreateWorkspace(identifier, password string) (string, er } } - privKey, err := service.keyPairProvider.CreateKeyPair(identifier, password) + privateKey, err := service.keyPairProvider.CreateKeyPair(identifier, password) if err != nil { return "", core.E("workspace.CreateWorkspace", "failed to generate keys", err) } - if err := service.medium.WriteMode(core.Path(workspaceDirectory, "keys", "private.key"), privKey, 0600); err != nil { + if err := service.medium.WriteMode(core.Path(workspaceDirectory, "keys", "private.key"), privateKey, 0600); err != nil { return "", core.E("workspace.CreateWorkspace", "failed to save private key", err) } diff --git a/workspace/service_test.go b/workspace/service_test.go index d1baac7..e28dece 100644 --- a/workspace/service_test.go +++ b/workspace/service_test.go @@ -10,15 +10,15 @@ import ( ) type stubKeyPairProvider struct { - key string - err error + privateKey string + err error } func (provider stubKeyPairProvider) CreateKeyPair(_, _ string) (string, error) { if provider.err != nil { return "", provider.err } - return provider.key, nil + return provider.privateKey, nil } func newTestService(t *testing.T) (*Service, string) { @@ -27,7 +27,7 @@ func newTestService(t *testing.T) (*Service, string) { tempHome := t.TempDir() t.Setenv("HOME", tempHome) - service, err := New(Options{KeyPairProvider: stubKeyPairProvider{key: "private-key"}}) + service, err := New(Options{KeyPairProvider: stubKeyPairProvider{privateKey: "private-key"}}) require.NoError(t, err) return service, tempHome } @@ -42,7 +42,7 @@ func TestService_New_CustomRootPathAndMedium_Good(t *testing.T) { rootPath := core.Path(t.TempDir(), "custom", "workspaces") service, err := New(Options{ - KeyPairProvider: stubKeyPairProvider{key: "private-key"}, + KeyPairProvider: stubKeyPairProvider{privateKey: "private-key"}, RootPath: rootPath, Medium: medium, }) From 50bb356c7c735c0ccf7e0639b02a9474e3cef83b Mon Sep 17 00:00:00 2001 From: Virgil Date: Tue, 31 Mar 2026 13:35:21 +0000 Subject: [PATCH 59/83] refactor(ax): align remaining AX naming surfaces Co-Authored-By: Virgil --- datanode/medium.go | 8 +-- datanode/medium_test.go | 8 +-- local/medium.go | 8 +-- node/node.go | 32 +++++----- node/node_test.go | 32 +++++----- sigil/crypto_sigil.go | 60 +++++++++--------- sigil/crypto_sigil_test.go | 122 ++++++++++++++++++------------------- sigil/sigil_test.go | 110 ++++++++++++++++----------------- sigil/sigils.go | 40 ++++++------ store/store.go | 6 +- store/store_test.go | 8 +-- workspace/service.go | 6 +- 12 files changed, 220 insertions(+), 220 deletions(-) diff --git a/datanode/medium.go b/datanode/medium.go index 2cd39fe..73c0f78 100644 --- a/datanode/medium.go +++ b/datanode/medium.go @@ -425,7 +425,7 @@ func (medium *Medium) Append(filePath string) (goio.WriteCloser, error) { } medium.lock.RUnlock() - return &writeCloser{medium: medium, path: filePath, buf: existing}, nil + return &writeCloser{medium: medium, path: filePath, buffer: existing}, nil } func (medium *Medium) ReadStream(filePath string) (goio.ReadCloser, error) { @@ -545,11 +545,11 @@ func (medium *Medium) removeFileLocked(target string) error { type writeCloser struct { medium *Medium path string - buf []byte + buffer []byte } func (writer *writeCloser) Write(data []byte) (int, error) { - writer.buf = append(writer.buf, data...) + writer.buffer = append(writer.buffer, data...) return len(data), nil } @@ -557,7 +557,7 @@ func (writer *writeCloser) Close() error { writer.medium.lock.Lock() defer writer.medium.lock.Unlock() - writer.medium.dataNode.AddData(writer.path, writer.buf) + writer.medium.dataNode.AddData(writer.path, writer.buffer) writer.medium.ensureDirsLocked(path.Dir(writer.path)) return nil } diff --git a/datanode/medium_test.go b/datanode/medium_test.go index 5dad056..614ba46 100644 --- a/datanode/medium_test.go +++ b/datanode/medium_test.go @@ -185,15 +185,15 @@ func TestDataNode_RenameDir_Good(t *testing.T) { require.NoError(t, m.Write("src/a.go", "package a")) require.NoError(t, m.Write("src/sub/b.go", "package b")) - require.NoError(t, m.Rename("src", "dst")) + require.NoError(t, m.Rename("src", "destination")) assert.False(t, m.Exists("src/a.go")) - got, err := m.Read("dst/a.go") + got, err := m.Read("destination/a.go") require.NoError(t, err) assert.Equal(t, "package a", got) - got, err = m.Read("dst/sub/b.go") + got, err = m.Read("destination/sub/b.go") require.NoError(t, err) assert.Equal(t, "package b", got) } @@ -210,7 +210,7 @@ func TestDataNode_RenameDir_ReadFailure_Bad(t *testing.T) { dataNodeReadAll = original }) - err := m.Rename("src", "dst") + err := m.Rename("src", "destination") require.Error(t, err) assert.Contains(t, err.Error(), "failed to read source file") } diff --git a/local/medium.go b/local/medium.go index 39e6cc4..1cce3cb 100644 --- a/local/medium.go +++ b/local/medium.go @@ -387,13 +387,13 @@ func isSymlink(mode uint32) bool { func readlink(path string) (string, error) { size := 256 for { - buf := make([]byte, size) - n, err := syscall.Readlink(path, buf) + linkBuffer := make([]byte, size) + bytesRead, err := syscall.Readlink(path, linkBuffer) if err != nil { return "", err } - if n < len(buf) { - return string(buf[:n]), nil + if bytesRead < len(linkBuffer) { + return string(linkBuffer[:bytesRead]), nil } size *= 2 } diff --git a/node/node.go b/node/node.go index c322084..7deb73e 100644 --- a/node/node.go +++ b/node/node.go @@ -174,8 +174,8 @@ func (node *Node) ReadFile(name string) ([]byte, error) { return result, nil } -// Example: _ = nodeTree.CopyFile("config/app.yaml", "/tmp/app.yaml", 0644) -func (node *Node) CopyFile(sourcePath, destinationPath string, perm fs.FileMode) error { +// Example: _ = nodeTree.CopyFile("config/app.yaml", "backup/app.yaml", 0644) +func (node *Node) CopyFile(sourcePath, destinationPath string, permissions fs.FileMode) error { sourcePath = core.TrimPrefix(sourcePath, "/") file, ok := node.files[sourcePath] if !ok { @@ -192,11 +192,11 @@ func (node *Node) CopyFile(sourcePath, destinationPath string, perm fs.FileMode) if parent != "." && parent != "" && parent != destinationPath && !coreio.Local.IsDir(parent) { return &fs.PathError{Op: "copyfile", Path: destinationPath, Err: fs.ErrNotExist} } - return coreio.Local.WriteMode(destinationPath, string(file.content), perm) + return coreio.Local.WriteMode(destinationPath, string(file.content), permissions) } // Example: _ = nodeTree.CopyTo(io.NewMemoryMedium(), "config", "backup/config") -func (node *Node) CopyTo(target coreio.Medium, sourcePath, destPath string) error { +func (node *Node) CopyTo(target coreio.Medium, sourcePath, destinationPath string) error { sourcePath = core.TrimPrefix(sourcePath, "/") info, err := node.Stat(sourcePath) if err != nil { @@ -208,7 +208,7 @@ func (node *Node) CopyTo(target coreio.Medium, sourcePath, destPath string) erro if !ok { return core.E("node.CopyTo", core.Concat("path not found: ", sourcePath), fs.ErrNotExist) } - return target.Write(destPath, string(file.content)) + return target.Write(destinationPath, string(file.content)) } prefix := sourcePath @@ -220,12 +220,12 @@ func (node *Node) CopyTo(target coreio.Medium, sourcePath, destPath string) erro if !core.HasPrefix(filePath, prefix) && filePath != sourcePath { continue } - rel := core.TrimPrefix(filePath, prefix) - dest := destPath - if rel != "" { - dest = core.Concat(destPath, "/", rel) + relativePath := core.TrimPrefix(filePath, prefix) + copyDestinationPath := destinationPath + if relativePath != "" { + copyDestinationPath = core.Concat(destinationPath, "/", relativePath) } - if err := target.Write(dest, string(file.content)); err != nil { + if err := target.Write(copyDestinationPath, string(file.content)); err != nil { return err } } @@ -438,7 +438,7 @@ func (node *Node) Append(filePath string) (goio.WriteCloser, error) { existing = make([]byte, len(file.content)) copy(existing, file.content) } - return &nodeWriter{node: node, path: filePath, buf: existing}, nil + return &nodeWriter{node: node, path: filePath, buffer: existing}, nil } func (node *Node) ReadStream(filePath string) (goio.ReadCloser, error) { @@ -454,20 +454,20 @@ func (node *Node) WriteStream(filePath string) (goio.WriteCloser, error) { } type nodeWriter struct { - node *Node - path string - buf []byte + node *Node + path string + buffer []byte } func (writer *nodeWriter) Write(data []byte) (int, error) { - writer.buf = append(writer.buf, data...) + writer.buffer = append(writer.buffer, data...) return len(data), nil } func (writer *nodeWriter) Close() error { writer.node.files[writer.path] = &dataFile{ name: writer.path, - content: writer.buf, + content: writer.buffer, modTime: time.Now(), } return nil diff --git a/node/node_test.go b/node/node_test.go index fb36670..b9354d1 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -69,10 +69,10 @@ func TestNode_Open_Good(t *testing.T) { require.NoError(t, err) defer file.Close() - buf := make([]byte, 10) - nr, err := file.Read(buf) + readBuffer := make([]byte, 10) + nr, err := file.Read(readBuffer) require.True(t, nr > 0 || err == io.EOF) - assert.Equal(t, "foo", string(buf[:nr])) + assert.Equal(t, "foo", string(readBuffer[:nr])) } func TestNode_Open_Bad(t *testing.T) { @@ -93,9 +93,9 @@ func TestNode_Open_Directory_Good(t *testing.T) { _, err = file.Read(make([]byte, 1)) require.Error(t, err) - var pathErr *fs.PathError - require.True(t, core.As(err, &pathErr)) - assert.Equal(t, fs.ErrInvalid, pathErr.Err) + var pathError *fs.PathError + require.True(t, core.As(err, &pathError)) + assert.Equal(t, fs.ErrInvalid, pathError.Err) } func TestNode_Stat_Good(t *testing.T) { @@ -182,9 +182,9 @@ func TestNode_ReadDir_Bad(t *testing.T) { _, err := nodeTree.ReadDir("foo.txt") require.Error(t, err) - var pathErr *fs.PathError - require.True(t, core.As(err, &pathErr)) - assert.Equal(t, fs.ErrInvalid, pathErr.Err) + var pathError *fs.PathError + require.True(t, core.As(err, &pathError)) + assert.Equal(t, fs.ErrInvalid, pathError.Err) } func TestNode_ReadDir_IgnoresEmptyEntry_Good(t *testing.T) { @@ -317,20 +317,20 @@ func TestNode_CopyFile_Good(t *testing.T) { nodeTree := New() nodeTree.AddData("foo.txt", []byte("foo")) - tmpfile := core.Path(t.TempDir(), "test.txt") - err := nodeTree.CopyFile("foo.txt", tmpfile, 0644) + destinationPath := core.Path(t.TempDir(), "test.txt") + err := nodeTree.CopyFile("foo.txt", destinationPath, 0644) require.NoError(t, err) - content, err := coreio.Local.Read(tmpfile) + content, err := coreio.Local.Read(destinationPath) require.NoError(t, err) assert.Equal(t, "foo", content) } func TestNode_CopyFile_Bad(t *testing.T) { nodeTree := New() - tmpfile := core.Path(t.TempDir(), "test.txt") + destinationPath := core.Path(t.TempDir(), "test.txt") - err := nodeTree.CopyFile("nonexistent.txt", tmpfile, 0644) + err := nodeTree.CopyFile("nonexistent.txt", destinationPath, 0644) assert.Error(t, err) nodeTree.AddData("foo.txt", []byte("foo")) @@ -341,9 +341,9 @@ func TestNode_CopyFile_Bad(t *testing.T) { func TestNode_CopyFile_DirectorySource_Bad(t *testing.T) { nodeTree := New() nodeTree.AddData("bar/baz.txt", []byte("baz")) - tmpfile := core.Path(t.TempDir(), "test.txt") + destinationPath := core.Path(t.TempDir(), "test.txt") - err := nodeTree.CopyFile("bar", tmpfile, 0644) + err := nodeTree.CopyFile("bar", destinationPath, 0644) assert.Error(t, err) } diff --git a/sigil/crypto_sigil.go b/sigil/crypto_sigil.go index f57e351..e012974 100644 --- a/sigil/crypto_sigil.go +++ b/sigil/crypto_sigil.go @@ -58,17 +58,17 @@ func (obfuscator *XORObfuscator) transform(data []byte, entropy []byte) []byte { func (obfuscator *XORObfuscator) deriveKeyStream(entropy []byte, length int) []byte { stream := make([]byte, length) - h := sha256.New() + hashFunction := sha256.New() blockNum := uint64(0) offset := 0 for offset < length { - h.Reset() - h.Write(entropy) + hashFunction.Reset() + hashFunction.Write(entropy) var blockBytes [8]byte binary.BigEndian.PutUint64(blockBytes[:], blockNum) - h.Write(blockBytes[:]) - block := h.Sum(nil) + hashFunction.Write(blockBytes[:]) + block := hashFunction.Sum(nil) copyLen := min(len(block), length-offset) copy(stream[offset:], block[:copyLen]) @@ -89,7 +89,7 @@ func (obfuscator *ShuffleMaskObfuscator) Obfuscate(data []byte, entropy []byte) result := make([]byte, len(data)) copy(result, data) - perm := obfuscator.generatePermutation(entropy, len(data)) + permutation := obfuscator.generatePermutation(entropy, len(data)) mask := obfuscator.deriveMask(entropy, len(data)) for i := range result { @@ -97,8 +97,8 @@ func (obfuscator *ShuffleMaskObfuscator) Obfuscate(data []byte, entropy []byte) } shuffled := make([]byte, len(data)) - for i, p := range perm { - shuffled[i] = result[p] + for destinationIndex, sourceIndex := range permutation { + shuffled[destinationIndex] = result[sourceIndex] } return shuffled @@ -111,11 +111,11 @@ func (obfuscator *ShuffleMaskObfuscator) Deobfuscate(data []byte, entropy []byte result := make([]byte, len(data)) - perm := obfuscator.generatePermutation(entropy, len(data)) + permutation := obfuscator.generatePermutation(entropy, len(data)) mask := obfuscator.deriveMask(entropy, len(data)) - for i, p := range perm { - result[p] = data[i] + for destinationIndex, sourceIndex := range permutation { + result[sourceIndex] = data[destinationIndex] } for i := range result { @@ -126,44 +126,44 @@ func (obfuscator *ShuffleMaskObfuscator) Deobfuscate(data []byte, entropy []byte } func (obfuscator *ShuffleMaskObfuscator) generatePermutation(entropy []byte, length int) []int { - perm := make([]int, length) - for i := range perm { - perm[i] = i + permutation := make([]int, length) + for i := range permutation { + permutation[i] = i } - h := sha256.New() - h.Write(entropy) - h.Write([]byte("permutation")) - seed := h.Sum(nil) + hashFunction := sha256.New() + hashFunction.Write(entropy) + hashFunction.Write([]byte("permutation")) + seed := hashFunction.Sum(nil) for i := length - 1; i > 0; i-- { - h.Reset() - h.Write(seed) + hashFunction.Reset() + hashFunction.Write(seed) var iBytes [8]byte binary.BigEndian.PutUint64(iBytes[:], uint64(i)) - h.Write(iBytes[:]) - jBytes := h.Sum(nil) + hashFunction.Write(iBytes[:]) + jBytes := hashFunction.Sum(nil) j := int(binary.BigEndian.Uint64(jBytes[:8]) % uint64(i+1)) - perm[i], perm[j] = perm[j], perm[i] + permutation[i], permutation[j] = permutation[j], permutation[i] } - return perm + return permutation } func (obfuscator *ShuffleMaskObfuscator) deriveMask(entropy []byte, length int) []byte { mask := make([]byte, length) - h := sha256.New() + hashFunction := sha256.New() blockNum := uint64(0) offset := 0 for offset < length { - h.Reset() - h.Write(entropy) - h.Write([]byte("mask")) + hashFunction.Reset() + hashFunction.Write(entropy) + hashFunction.Write([]byte("mask")) var blockBytes [8]byte binary.BigEndian.PutUint64(blockBytes[:], blockNum) - h.Write(blockBytes[:]) - block := h.Sum(nil) + hashFunction.Write(blockBytes[:]) + block := hashFunction.Sum(nil) copyLen := min(len(block), length-offset) copy(mask[offset:], block[:copyLen]) diff --git a/sigil/crypto_sigil_test.go b/sigil/crypto_sigil_test.go index fb59fa6..31fb653 100644 --- a/sigil/crypto_sigil_test.go +++ b/sigil/crypto_sigil_test.go @@ -142,11 +142,11 @@ func TestCryptoSigil_NewChaChaPolySigil_Good(t *testing.T) { key := make([]byte, 32) _, _ = rand.Read(key) - s, err := NewChaChaPolySigil(key, nil) + cipherSigil, err := NewChaChaPolySigil(key, nil) require.NoError(t, err) - assert.NotNil(t, s) - assert.Equal(t, key, s.Key) - assert.NotNil(t, s.Obfuscator) + assert.NotNil(t, cipherSigil) + assert.Equal(t, key, cipherSigil.Key) + assert.NotNil(t, cipherSigil.Obfuscator) } func TestCryptoSigil_NewChaChaPolySigil_KeyIsCopied_Good(t *testing.T) { @@ -155,11 +155,11 @@ func TestCryptoSigil_NewChaChaPolySigil_KeyIsCopied_Good(t *testing.T) { original := make([]byte, 32) copy(original, key) - s, err := NewChaChaPolySigil(key, nil) + cipherSigil, err := NewChaChaPolySigil(key, nil) require.NoError(t, err) key[0] ^= 0xFF - assert.Equal(t, original, s.Key) + assert.Equal(t, original, cipherSigil.Key) } func TestCryptoSigil_NewChaChaPolySigil_ShortKey_Bad(t *testing.T) { @@ -182,18 +182,18 @@ func TestCryptoSigil_NewChaChaPolySigil_CustomObfuscator_Good(t *testing.T) { _, _ = rand.Read(key) ob := &ShuffleMaskObfuscator{} - s, err := NewChaChaPolySigil(key, ob) + cipherSigil, err := NewChaChaPolySigil(key, ob) require.NoError(t, err) - assert.Equal(t, ob, s.Obfuscator) + assert.Equal(t, ob, cipherSigil.Obfuscator) } func TestCryptoSigil_NewChaChaPolySigil_CustomObfuscatorNil_Good(t *testing.T) { key := make([]byte, 32) _, _ = rand.Read(key) - s, err := NewChaChaPolySigil(key, nil) + cipherSigil, err := NewChaChaPolySigil(key, nil) require.NoError(t, err) - assert.IsType(t, &XORObfuscator{}, s.Obfuscator) + assert.IsType(t, &XORObfuscator{}, cipherSigil.Obfuscator) } func TestCryptoSigil_NewChaChaPolySigil_CustomObfuscator_InvalidKey_Bad(t *testing.T) { @@ -205,16 +205,16 @@ func TestCryptoSigil_ChaChaPolySigil_RoundTrip_Good(t *testing.T) { key := make([]byte, 32) _, _ = rand.Read(key) - s, err := NewChaChaPolySigil(key, nil) + cipherSigil, err := NewChaChaPolySigil(key, nil) require.NoError(t, err) plaintext := []byte("consciousness does not merely avoid causing harm") - ciphertext, err := s.In(plaintext) + ciphertext, err := cipherSigil.In(plaintext) require.NoError(t, err) assert.NotEqual(t, plaintext, ciphertext) assert.Greater(t, len(ciphertext), len(plaintext)) - decrypted, err := s.Out(ciphertext) + decrypted, err := cipherSigil.Out(ciphertext) require.NoError(t, err) assert.Equal(t, plaintext, decrypted) } @@ -223,14 +223,14 @@ func TestCryptoSigil_ChaChaPolySigil_CustomShuffleMask_Good(t *testing.T) { key := make([]byte, 32) _, _ = rand.Read(key) - s, err := NewChaChaPolySigil(key, &ShuffleMaskObfuscator{}) + cipherSigil, err := NewChaChaPolySigil(key, &ShuffleMaskObfuscator{}) require.NoError(t, err) plaintext := []byte("shuffle mask pre-obfuscation layer") - ciphertext, err := s.In(plaintext) + ciphertext, err := cipherSigil.In(plaintext) require.NoError(t, err) - decrypted, err := s.Out(ciphertext) + decrypted, err := cipherSigil.Out(ciphertext) require.NoError(t, err) assert.Equal(t, plaintext, decrypted) } @@ -239,14 +239,14 @@ func TestCryptoSigil_ChaChaPolySigil_NilData_Good(t *testing.T) { key := make([]byte, 32) _, _ = rand.Read(key) - s, err := NewChaChaPolySigil(key, nil) + cipherSigil, err := NewChaChaPolySigil(key, nil) require.NoError(t, err) - enc, err := s.In(nil) + enc, err := cipherSigil.In(nil) require.NoError(t, err) assert.Nil(t, enc) - dec, err := s.Out(nil) + dec, err := cipherSigil.Out(nil) require.NoError(t, err) assert.Nil(t, dec) } @@ -255,14 +255,14 @@ func TestCryptoSigil_ChaChaPolySigil_EmptyPlaintext_Good(t *testing.T) { key := make([]byte, 32) _, _ = rand.Read(key) - s, err := NewChaChaPolySigil(key, nil) + cipherSigil, err := NewChaChaPolySigil(key, nil) require.NoError(t, err) - ciphertext, err := s.In([]byte{}) + ciphertext, err := cipherSigil.In([]byte{}) require.NoError(t, err) assert.NotEmpty(t, ciphertext) - decrypted, err := s.Out(ciphertext) + decrypted, err := cipherSigil.Out(ciphertext) require.NoError(t, err) assert.Equal(t, []byte{}, decrypted) } @@ -271,23 +271,23 @@ func TestCryptoSigil_ChaChaPolySigil_DifferentCiphertextsPerCall_Good(t *testing key := make([]byte, 32) _, _ = rand.Read(key) - s, err := NewChaChaPolySigil(key, nil) + cipherSigil, err := NewChaChaPolySigil(key, nil) require.NoError(t, err) plaintext := []byte("same input") - ct1, _ := s.In(plaintext) - ct2, _ := s.In(plaintext) + ct1, _ := cipherSigil.In(plaintext) + ct2, _ := cipherSigil.In(plaintext) assert.NotEqual(t, ct1, ct2) } func TestCryptoSigil_ChaChaPolySigil_NoKey_Bad(t *testing.T) { - s := &ChaChaPolySigil{} + cipherSigil := &ChaChaPolySigil{} - _, err := s.In([]byte("data")) + _, err := cipherSigil.In([]byte("data")) assert.ErrorIs(t, err, NoKeyConfiguredError) - _, err = s.Out([]byte("data")) + _, err = cipherSigil.Out([]byte("data")) assert.ErrorIs(t, err, NoKeyConfiguredError) } @@ -297,13 +297,13 @@ func TestCryptoSigil_ChaChaPolySigil_WrongKey_Bad(t *testing.T) { _, _ = rand.Read(key1) _, _ = rand.Read(key2) - s1, _ := NewChaChaPolySigil(key1, nil) - s2, _ := NewChaChaPolySigil(key2, nil) + cipherSigilOne, _ := NewChaChaPolySigil(key1, nil) + cipherSigilTwo, _ := NewChaChaPolySigil(key2, nil) - ciphertext, err := s1.In([]byte("secret")) + ciphertext, err := cipherSigilOne.In([]byte("secret")) require.NoError(t, err) - _, err = s2.Out(ciphertext) + _, err = cipherSigilTwo.Out(ciphertext) assert.ErrorIs(t, err, DecryptionFailedError) } @@ -311,8 +311,8 @@ func TestCryptoSigil_ChaChaPolySigil_TruncatedCiphertext_Bad(t *testing.T) { key := make([]byte, 32) _, _ = rand.Read(key) - s, _ := NewChaChaPolySigil(key, nil) - _, err := s.Out([]byte("too short")) + cipherSigil, _ := NewChaChaPolySigil(key, nil) + _, err := cipherSigil.Out([]byte("too short")) assert.ErrorIs(t, err, CiphertextTooShortError) } @@ -320,12 +320,12 @@ func TestCryptoSigil_ChaChaPolySigil_TamperedCiphertext_Bad(t *testing.T) { key := make([]byte, 32) _, _ = rand.Read(key) - s, _ := NewChaChaPolySigil(key, nil) - ciphertext, _ := s.In([]byte("authentic data")) + cipherSigil, _ := NewChaChaPolySigil(key, nil) + ciphertext, _ := cipherSigil.In([]byte("authentic data")) ciphertext[30] ^= 0xFF - _, err := s.Out(ciphertext) + _, err := cipherSigil.Out(ciphertext) assert.ErrorIs(t, err, DecryptionFailedError) } @@ -339,10 +339,10 @@ func TestCryptoSigil_ChaChaPolySigil_RandomReaderFailure_Bad(t *testing.T) { key := make([]byte, 32) _, _ = rand.Read(key) - s, _ := NewChaChaPolySigil(key, nil) - s.randomReader = &failReader{} + cipherSigil, _ := NewChaChaPolySigil(key, nil) + cipherSigil.randomReader = &failReader{} - _, err := s.In([]byte("data")) + _, err := cipherSigil.In([]byte("data")) assert.Error(t, err) } @@ -350,14 +350,14 @@ func TestCryptoSigil_ChaChaPolySigil_NoObfuscator_Good(t *testing.T) { key := make([]byte, 32) _, _ = rand.Read(key) - s, _ := NewChaChaPolySigil(key, nil) - s.Obfuscator = nil + cipherSigil, _ := NewChaChaPolySigil(key, nil) + cipherSigil.Obfuscator = nil plaintext := []byte("raw encryption without pre-obfuscation") - ciphertext, err := s.In(plaintext) + ciphertext, err := cipherSigil.In(plaintext) require.NoError(t, err) - decrypted, err := s.Out(ciphertext) + decrypted, err := cipherSigil.Out(ciphertext) require.NoError(t, err) assert.Equal(t, plaintext, decrypted) } @@ -366,8 +366,8 @@ func TestCryptoSigil_GetNonceFromCiphertext_Good(t *testing.T) { key := make([]byte, 32) _, _ = rand.Read(key) - s, _ := NewChaChaPolySigil(key, nil) - ciphertext, _ := s.In([]byte("nonce extraction test")) + cipherSigil, _ := NewChaChaPolySigil(key, nil) + ciphertext, _ := cipherSigil.In([]byte("nonce extraction test")) nonce, err := GetNonceFromCiphertext(ciphertext) require.NoError(t, err) @@ -380,8 +380,8 @@ func TestCryptoSigil_GetNonceFromCiphertext_NonceCopied_Good(t *testing.T) { key := make([]byte, 32) _, _ = rand.Read(key) - s, _ := NewChaChaPolySigil(key, nil) - ciphertext, _ := s.In([]byte("data")) + cipherSigil, _ := NewChaChaPolySigil(key, nil) + ciphertext, _ := cipherSigil.In([]byte("data")) nonce, _ := GetNonceFromCiphertext(ciphertext) original := make([]byte, len(nonce)) @@ -405,10 +405,10 @@ func TestCryptoSigil_ChaChaPolySigil_InTransmutePipeline_Good(t *testing.T) { key := make([]byte, 32) _, _ = rand.Read(key) - s, _ := NewChaChaPolySigil(key, nil) + cipherSigil, _ := NewChaChaPolySigil(key, nil) hexSigil, _ := NewSigil("hex") - chain := []Sigil{s, hexSigil} + chain := []Sigil{cipherSigil, hexSigil} plaintext := []byte("encrypt then hex encode") encoded, err := Transmute(plaintext, chain) @@ -448,12 +448,12 @@ func TestCryptoSigil_Untransmute_ErrorPropagation_Bad(t *testing.T) { } func TestCryptoSigil_GzipSigil_CustomOutputWriter_Good(t *testing.T) { - var buf bytes.Buffer - s := &GzipSigil{outputWriter: &buf} + var outputBuffer bytes.Buffer + gzipSigil := &GzipSigil{outputWriter: &outputBuffer} - _, err := s.In([]byte("test data")) + _, err := gzipSigil.In([]byte("test data")) require.NoError(t, err) - assert.Greater(t, buf.Len(), 0) + assert.Greater(t, outputBuffer.Len(), 0) } func TestCryptoSigil_DeriveKeyStream_ExactBlockSize_Good(t *testing.T) { @@ -473,13 +473,13 @@ func TestCryptoSigil_ChaChaPolySigil_NilRandomReader_Good(t *testing.T) { key := make([]byte, 32) _, _ = rand.Read(key) - s, _ := NewChaChaPolySigil(key, nil) - s.randomReader = nil + cipherSigil, _ := NewChaChaPolySigil(key, nil) + cipherSigil.randomReader = nil - ciphertext, err := s.In([]byte("fallback reader")) + ciphertext, err := cipherSigil.In([]byte("fallback reader")) require.NoError(t, err) - decrypted, err := s.Out(ciphertext) + decrypted, err := cipherSigil.Out(ciphertext) require.NoError(t, err) assert.Equal(t, []byte("fallback reader"), decrypted) } @@ -493,7 +493,7 @@ func (l *limitReader) Read(p []byte) (int, error) { if l.pos >= len(l.data) { return 0, goio.EOF } - n := copy(p, l.data[l.pos:]) - l.pos += n - return n, nil + bytesCopied := copy(p, l.data[l.pos:]) + l.pos += bytesCopied + return bytesCopied, nil } diff --git a/sigil/sigil_test.go b/sigil/sigil_test.go index 8534a4a..93565b9 100644 --- a/sigil/sigil_test.go +++ b/sigil/sigil_test.go @@ -14,187 +14,187 @@ import ( ) func TestSigil_ReverseSigil_Good(t *testing.T) { - s := &ReverseSigil{} + reverseSigil := &ReverseSigil{} - out, err := s.In([]byte("hello")) + out, err := reverseSigil.In([]byte("hello")) require.NoError(t, err) assert.Equal(t, []byte("olleh"), out) - restored, err := s.Out(out) + restored, err := reverseSigil.Out(out) require.NoError(t, err) assert.Equal(t, []byte("hello"), restored) } func TestSigil_ReverseSigil_Bad(t *testing.T) { - s := &ReverseSigil{} + reverseSigil := &ReverseSigil{} - out, err := s.In([]byte{}) + out, err := reverseSigil.In([]byte{}) require.NoError(t, err) assert.Equal(t, []byte{}, out) } func TestSigil_ReverseSigil_NilInput_Good(t *testing.T) { - s := &ReverseSigil{} + reverseSigil := &ReverseSigil{} - out, err := s.In(nil) + out, err := reverseSigil.In(nil) require.NoError(t, err) assert.Nil(t, out) - out, err = s.Out(nil) + out, err = reverseSigil.Out(nil) require.NoError(t, err) assert.Nil(t, out) } func TestSigil_HexSigil_Good(t *testing.T) { - s := &HexSigil{} + hexSigil := &HexSigil{} data := []byte("hello world") - encoded, err := s.In(data) + encoded, err := hexSigil.In(data) require.NoError(t, err) assert.Equal(t, []byte(hex.EncodeToString(data)), encoded) - decoded, err := s.Out(encoded) + decoded, err := hexSigil.Out(encoded) require.NoError(t, err) assert.Equal(t, data, decoded) } func TestSigil_HexSigil_Bad(t *testing.T) { - s := &HexSigil{} + hexSigil := &HexSigil{} - _, err := s.Out([]byte("zzzz")) + _, err := hexSigil.Out([]byte("zzzz")) assert.Error(t, err) - out, err := s.In([]byte{}) + out, err := hexSigil.In([]byte{}) require.NoError(t, err) assert.Equal(t, []byte{}, out) } func TestSigil_HexSigil_NilInput_Good(t *testing.T) { - s := &HexSigil{} + hexSigil := &HexSigil{} - out, err := s.In(nil) + out, err := hexSigil.In(nil) require.NoError(t, err) assert.Nil(t, out) - out, err = s.Out(nil) + out, err = hexSigil.Out(nil) require.NoError(t, err) assert.Nil(t, out) } func TestSigil_Base64Sigil_Good(t *testing.T) { - s := &Base64Sigil{} + base64Sigil := &Base64Sigil{} data := []byte("composable transforms") - encoded, err := s.In(data) + encoded, err := base64Sigil.In(data) require.NoError(t, err) assert.Equal(t, []byte(base64.StdEncoding.EncodeToString(data)), encoded) - decoded, err := s.Out(encoded) + decoded, err := base64Sigil.Out(encoded) require.NoError(t, err) assert.Equal(t, data, decoded) } func TestSigil_Base64Sigil_Bad(t *testing.T) { - s := &Base64Sigil{} + base64Sigil := &Base64Sigil{} - _, err := s.Out([]byte("!!!")) + _, err := base64Sigil.Out([]byte("!!!")) assert.Error(t, err) - out, err := s.In([]byte{}) + out, err := base64Sigil.In([]byte{}) require.NoError(t, err) assert.Equal(t, []byte{}, out) } func TestSigil_Base64Sigil_NilInput_Good(t *testing.T) { - s := &Base64Sigil{} + base64Sigil := &Base64Sigil{} - out, err := s.In(nil) + out, err := base64Sigil.In(nil) require.NoError(t, err) assert.Nil(t, out) - out, err = s.Out(nil) + out, err = base64Sigil.Out(nil) require.NoError(t, err) assert.Nil(t, out) } func TestSigil_GzipSigil_Good(t *testing.T) { - s := &GzipSigil{} + gzipSigil := &GzipSigil{} data := []byte("the quick brown fox jumps over the lazy dog") - compressed, err := s.In(data) + compressed, err := gzipSigil.In(data) require.NoError(t, err) assert.NotEqual(t, data, compressed) - decompressed, err := s.Out(compressed) + decompressed, err := gzipSigil.Out(compressed) require.NoError(t, err) assert.Equal(t, data, decompressed) } func TestSigil_GzipSigil_Bad(t *testing.T) { - s := &GzipSigil{} + gzipSigil := &GzipSigil{} - _, err := s.Out([]byte("not gzip")) + _, err := gzipSigil.Out([]byte("not gzip")) assert.Error(t, err) - compressed, err := s.In([]byte{}) + compressed, err := gzipSigil.In([]byte{}) require.NoError(t, err) assert.NotEmpty(t, compressed) - decompressed, err := s.Out(compressed) + decompressed, err := gzipSigil.Out(compressed) require.NoError(t, err) assert.Equal(t, []byte{}, decompressed) } func TestSigil_GzipSigil_NilInput_Good(t *testing.T) { - s := &GzipSigil{} + gzipSigil := &GzipSigil{} - out, err := s.In(nil) + out, err := gzipSigil.In(nil) require.NoError(t, err) assert.Nil(t, out) - out, err = s.Out(nil) + out, err = gzipSigil.Out(nil) require.NoError(t, err) assert.Nil(t, out) } func TestSigil_JSONSigil_Good(t *testing.T) { - s := &JSONSigil{Indent: false} + jsonSigil := &JSONSigil{Indent: false} data := []byte(`{ "key" : "value" }`) - compacted, err := s.In(data) + compacted, err := jsonSigil.In(data) require.NoError(t, err) assert.Equal(t, []byte(`{"key":"value"}`), compacted) - passthrough, err := s.Out(compacted) + passthrough, err := jsonSigil.Out(compacted) require.NoError(t, err) assert.Equal(t, compacted, passthrough) } func TestSigil_JSONSigil_Indent_Good(t *testing.T) { - s := &JSONSigil{Indent: true} + jsonSigil := &JSONSigil{Indent: true} data := []byte(`{"key":"value"}`) - indented, err := s.In(data) + indented, err := jsonSigil.In(data) require.NoError(t, err) assert.Contains(t, string(indented), "\n") assert.Contains(t, string(indented), " ") } func TestSigil_JSONSigil_Bad(t *testing.T) { - s := &JSONSigil{Indent: false} + jsonSigil := &JSONSigil{Indent: false} - _, err := s.In([]byte("not json")) + _, err := jsonSigil.In([]byte("not json")) assert.Error(t, err) } func TestSigil_JSONSigil_NilInput_Good(t *testing.T) { - s := &JSONSigil{Indent: false} + jsonSigil := &JSONSigil{Indent: false} - out, err := s.In(nil) + out, err := jsonSigil.In(nil) require.NoError(t, err) assert.Nil(t, out) - out, err = s.Out(nil) + out, err = jsonSigil.Out(nil) require.NoError(t, err) assert.Nil(t, out) } @@ -229,14 +229,14 @@ func TestSigil_HashSigil_Good(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - s, err := NewSigil(tt.sigilName) + sigilValue, err := NewSigil(tt.sigilName) require.NoError(t, err) - hashed, err := s.In(data) + hashed, err := sigilValue.In(data) require.NoError(t, err) assert.Len(t, hashed, tt.size) - passthrough, err := s.Out(hashed) + passthrough, err := sigilValue.Out(hashed) require.NoError(t, err) assert.Equal(t, hashed, passthrough) }) @@ -244,17 +244,17 @@ func TestSigil_HashSigil_Good(t *testing.T) { } func TestSigil_HashSigil_Bad(t *testing.T) { - s := &HashSigil{Hash: 0} - _, err := s.In([]byte("data")) + hashSigil := &HashSigil{Hash: 0} + _, err := hashSigil.In([]byte("data")) assert.Error(t, err) assert.Contains(t, err.Error(), "not available") } func TestSigil_HashSigil_EmptyInput_Good(t *testing.T) { - s, err := NewSigil("sha256") + sigilValue, err := NewSigil("sha256") require.NoError(t, err) - hashed, err := s.In([]byte{}) + hashed, err := sigilValue.In([]byte{}) require.NoError(t, err) assert.Len(t, hashed, sha256.Size) } @@ -271,9 +271,9 @@ func TestSigil_NewSigil_Good(t *testing.T) { for _, name := range names { t.Run(name, func(t *testing.T) { - s, err := NewSigil(name) + sigilValue, err := NewSigil(name) require.NoError(t, err) - assert.NotNil(t, s) + assert.NotNil(t, sigilValue) }) } } diff --git a/sigil/sigils.go b/sigil/sigils.go index c13c159..36f2f15 100644 --- a/sigil/sigils.go +++ b/sigil/sigils.go @@ -46,18 +46,18 @@ func (sigil *HexSigil) In(data []byte) ([]byte, error) { if data == nil { return nil, nil } - dst := make([]byte, hex.EncodedLen(len(data))) - hex.Encode(dst, data) - return dst, nil + encodedBytes := make([]byte, hex.EncodedLen(len(data))) + hex.Encode(encodedBytes, data) + return encodedBytes, nil } func (sigil *HexSigil) Out(data []byte) ([]byte, error) { if data == nil { return nil, nil } - dst := make([]byte, hex.DecodedLen(len(data))) - _, err := hex.Decode(dst, data) - return dst, err + decodedBytes := make([]byte, hex.DecodedLen(len(data))) + _, err := hex.Decode(decodedBytes, data) + return decodedBytes, err } // Example: base64Sigil, _ := sigil.NewSigil("base64") @@ -67,18 +67,18 @@ func (sigil *Base64Sigil) In(data []byte) ([]byte, error) { if data == nil { return nil, nil } - dst := make([]byte, base64.StdEncoding.EncodedLen(len(data))) - base64.StdEncoding.Encode(dst, data) - return dst, nil + encodedBytes := make([]byte, base64.StdEncoding.EncodedLen(len(data))) + base64.StdEncoding.Encode(encodedBytes, data) + return encodedBytes, nil } func (sigil *Base64Sigil) Out(data []byte) ([]byte, error) { if data == nil { return nil, nil } - dst := make([]byte, base64.StdEncoding.DecodedLen(len(data))) - n, err := base64.StdEncoding.Decode(dst, data) - return dst[:n], err + decodedBytes := make([]byte, base64.StdEncoding.DecodedLen(len(data))) + decodedCount, err := base64.StdEncoding.Decode(decodedBytes, data) + return decodedBytes[:decodedCount], err } // Example: gzipSigil, _ := sigil.NewSigil("gzip") @@ -90,10 +90,10 @@ func (sigil *GzipSigil) In(data []byte) ([]byte, error) { if data == nil { return nil, nil } - var b bytes.Buffer + var buffer bytes.Buffer outputWriter := sigil.outputWriter if outputWriter == nil { - outputWriter = &b + outputWriter = &buffer } gzipWriter := gzip.NewWriter(outputWriter) if _, err := gzipWriter.Write(data); err != nil { @@ -102,7 +102,7 @@ func (sigil *GzipSigil) In(data []byte) ([]byte, error) { if err := gzipWriter.Close(); err != nil { return nil, core.E("sigil.GzipSigil.In", "close gzip writer", err) } - return b.Bytes(), nil + return buffer.Bytes(), nil } func (sigil *GzipSigil) Out(data []byte) ([]byte, error) { @@ -156,8 +156,8 @@ type HashSigil struct { // Example: hashSigil := sigil.NewHashSigil(crypto.SHA256) // Example: digest, _ := hashSigil.In([]byte("payload")) -func NewHashSigil(h crypto.Hash) *HashSigil { - return &HashSigil{Hash: h} +func NewHashSigil(hashAlgorithm crypto.Hash) *HashSigil { + return &HashSigil{Hash: hashAlgorithm} } func (sigil *HashSigil) In(data []byte) ([]byte, error) { @@ -214,8 +214,8 @@ func (sigil *HashSigil) Out(data []byte) ([]byte, error) { // Example: hexSigil, _ := sigil.NewSigil("hex") // Example: gzipSigil, _ := sigil.NewSigil("gzip") // Example: transformed, _ := sigil.Transmute([]byte("payload"), []sigil.Sigil{hexSigil, gzipSigil}) -func NewSigil(name string) (Sigil, error) { - switch name { +func NewSigil(sigilName string) (Sigil, error) { + switch sigilName { case "reverse": return &ReverseSigil{}, nil case "hex": @@ -265,7 +265,7 @@ func NewSigil(name string) (Sigil, error) { case "blake2b-512": return NewHashSigil(crypto.BLAKE2b_512), nil default: - return nil, core.E("sigil.NewSigil", core.Concat("unknown sigil name: ", name), fs.ErrInvalid) + return nil, core.E("sigil.NewSigil", core.Concat("unknown sigil name: ", sigilName), fs.ErrInvalid) } } diff --git a/store/store.go b/store/store.go index f59d818..62a1f62 100644 --- a/store/store.go +++ b/store/store.go @@ -133,7 +133,7 @@ func (store *Store) GetAll(group string) (map[string]string, error) { // Example: keyValueStore, _ := store.New(store.Options{Path: ":memory:"}) // Example: _ = keyValueStore.Set("user", "name", "alice") -// Example: out, _ := keyValueStore.Render("hello {{ .name }}", "user") +// Example: renderedText, _ := keyValueStore.Render("hello {{ .name }}", "user") func (store *Store) Render(templateText, group string) (string, error) { rows, err := store.database.Query("SELECT entry_key, entry_value FROM entries WHERE group_name = ?", group) if err != nil { @@ -153,12 +153,12 @@ func (store *Store) Render(templateText, group string) (string, error) { return "", core.E("store.Render", "rows", err) } - tmpl, err := template.New("render").Parse(templateText) + renderTemplate, err := template.New("render").Parse(templateText) if err != nil { return "", core.E("store.Render", "parse template", err) } builder := core.NewBuilder() - if err := tmpl.Execute(builder, templateValues); err != nil { + if err := renderTemplate.Execute(builder, templateValues); err != nil { return "", core.E("store.Render", "execute template", err) } return builder.String(), nil diff --git a/store/store_test.go b/store/store_test.go index 0349d90..88584e2 100644 --- a/store/store_test.go +++ b/store/store_test.go @@ -107,9 +107,9 @@ func TestStore_Render_Good(t *testing.T) { _ = keyValueStore.Set("user", "pool", "pool.lthn.io:3333") _ = keyValueStore.Set("user", "wallet", "iz...") - tmpl := `{"pool":"{{ .pool }}","wallet":"{{ .wallet }}"}` - out, err := keyValueStore.Render(tmpl, "user") + templateText := `{"pool":"{{ .pool }}","wallet":"{{ .wallet }}"}` + renderedText, err := keyValueStore.Render(templateText, "user") require.NoError(t, err) - assert.Contains(t, out, "pool.lthn.io:3333") - assert.Contains(t, out, "iz...") + assert.Contains(t, renderedText, "pool.lthn.io:3333") + assert.Contains(t, renderedText, "iz...") } diff --git a/workspace/service.go b/workspace/service.go index 635e011..b1ede43 100644 --- a/workspace/service.go +++ b/workspace/service.go @@ -25,7 +25,7 @@ type Workspace interface { // Example: key, _ := keyPairProvider.CreateKeyPair("alice", "pass123") type KeyPairProvider interface { - CreateKeyPair(name, passphrase string) (string, error) + CreateKeyPair(identifier, passphrase string) (string, error) } const ( @@ -202,7 +202,7 @@ func (service *Service) WriteWorkspaceFile(workspaceFilePath, content string) er return service.medium.Write(filePath, content) } -// Example: result := service.HandleWorkspaceCommand(WorkspaceCommand{Action: WorkspaceCreateAction, Identifier: "alice", Password: "pass123"}) +// Example: commandResult := service.HandleWorkspaceCommand(WorkspaceCommand{Action: WorkspaceCreateAction, Identifier: "alice", Password: "pass123"}) func (service *Service) HandleWorkspaceCommand(command WorkspaceCommand) core.Result { switch command.Action { case WorkspaceCreateAction: @@ -221,7 +221,7 @@ func (service *Service) HandleWorkspaceCommand(command WorkspaceCommand) core.Re } // Example: result := service.HandleWorkspaceMessage(core.New(), WorkspaceCommand{Action: WorkspaceSwitchAction, WorkspaceID: "f3f0d7"}) -func (service *Service) HandleWorkspaceMessage(_ *core.Core, message core.Message) core.Result { +func (service *Service) HandleWorkspaceMessage(coreRuntime *core.Core, message core.Message) core.Result { switch command := message.(type) { case WorkspaceCommand: return service.HandleWorkspaceCommand(command) From c6adf478d80c151ca6783b6b8626853ffe656aed Mon Sep 17 00:00:00 2001 From: Virgil Date: Tue, 31 Mar 2026 13:41:04 +0000 Subject: [PATCH 60/83] refactor(ax): rename nonce helper for clearer naming Co-Authored-By: Virgil --- datanode/medium.go | 4 ++-- docs/RFC.md | 6 +++--- sigil/crypto_sigil.go | 4 ++-- sigil/crypto_sigil_test.go | 16 ++++++++-------- 4 files changed, 15 insertions(+), 15 deletions(-) diff --git a/datanode/medium.go b/datanode/medium.go index 73c0f78..6896eb2 100644 --- a/datanode/medium.go +++ b/datanode/medium.go @@ -345,8 +345,8 @@ func (medium *Medium) List(filePath string) ([]fs.DirEntry, error) { prefix += "/" } seen := make(map[string]bool) - for _, e := range entries { - seen[e.Name()] = true + for _, entry := range entries { + seen[entry.Name()] = true } for directoryPath := range medium.directorySet { diff --git a/docs/RFC.md b/docs/RFC.md index adcbe42..9041f79 100644 --- a/docs/RFC.md +++ b/docs/RFC.md @@ -2359,7 +2359,7 @@ Returned when ciphertext is too short to decrypt. Example: ```go -_, err := sigil.GetNonceFromCiphertext([]byte("short")) +_, err := sigil.NonceFromCiphertext([]byte("short")) if errors.Is(err, sigil.CiphertextTooShortError) { // handle truncated payload } @@ -2503,7 +2503,7 @@ ob := &sigil.ShuffleMaskObfuscator{} s, _ := sigil.NewChaChaPolySigil(key, ob) ``` -### GetNonceFromCiphertext(ciphertext []byte) ([]byte, error) +### NonceFromCiphertext(ciphertext []byte) ([]byte, error) Extracts the XChaCha20 nonce from encrypted output. @@ -2512,5 +2512,5 @@ Example: key := make([]byte, 32) s, _ := sigil.NewChaChaPolySigil(key, nil) ciphertext, _ := s.In([]byte("hello")) -nonce, _ := sigil.GetNonceFromCiphertext(ciphertext) +nonce, _ := sigil.NonceFromCiphertext(ciphertext) ``` diff --git a/sigil/crypto_sigil.go b/sigil/crypto_sigil.go index e012974..e88e657 100644 --- a/sigil/crypto_sigil.go +++ b/sigil/crypto_sigil.go @@ -275,8 +275,8 @@ func (sigil *ChaChaPolySigil) Out(data []byte) ([]byte, error) { return plaintext, nil } -// Example: nonce, _ := sigil.GetNonceFromCiphertext(ciphertext) -func GetNonceFromCiphertext(ciphertext []byte) ([]byte, error) { +// Example: nonce, _ := sigil.NonceFromCiphertext(ciphertext) +func NonceFromCiphertext(ciphertext []byte) ([]byte, error) { nonceSize := chacha20poly1305.NonceSizeX if len(ciphertext) < nonceSize { return nil, CiphertextTooShortError diff --git a/sigil/crypto_sigil_test.go b/sigil/crypto_sigil_test.go index 31fb653..d7a2e29 100644 --- a/sigil/crypto_sigil_test.go +++ b/sigil/crypto_sigil_test.go @@ -362,28 +362,28 @@ func TestCryptoSigil_ChaChaPolySigil_NoObfuscator_Good(t *testing.T) { assert.Equal(t, plaintext, decrypted) } -func TestCryptoSigil_GetNonceFromCiphertext_Good(t *testing.T) { +func TestCryptoSigil_NonceFromCiphertext_Good(t *testing.T) { key := make([]byte, 32) _, _ = rand.Read(key) cipherSigil, _ := NewChaChaPolySigil(key, nil) ciphertext, _ := cipherSigil.In([]byte("nonce extraction test")) - nonce, err := GetNonceFromCiphertext(ciphertext) + nonce, err := NonceFromCiphertext(ciphertext) require.NoError(t, err) assert.Len(t, nonce, 24) assert.Equal(t, ciphertext[:24], nonce) } -func TestCryptoSigil_GetNonceFromCiphertext_NonceCopied_Good(t *testing.T) { +func TestCryptoSigil_NonceFromCiphertext_NonceCopied_Good(t *testing.T) { key := make([]byte, 32) _, _ = rand.Read(key) cipherSigil, _ := NewChaChaPolySigil(key, nil) ciphertext, _ := cipherSigil.In([]byte("data")) - nonce, _ := GetNonceFromCiphertext(ciphertext) + nonce, _ := NonceFromCiphertext(ciphertext) original := make([]byte, len(nonce)) copy(original, nonce) @@ -391,13 +391,13 @@ func TestCryptoSigil_GetNonceFromCiphertext_NonceCopied_Good(t *testing.T) { assert.Equal(t, original, ciphertext[:24]) } -func TestCryptoSigil_GetNonceFromCiphertext_TooShort_Bad(t *testing.T) { - _, err := GetNonceFromCiphertext([]byte("short")) +func TestCryptoSigil_NonceFromCiphertext_TooShort_Bad(t *testing.T) { + _, err := NonceFromCiphertext([]byte("short")) assert.ErrorIs(t, err, CiphertextTooShortError) } -func TestCryptoSigil_GetNonceFromCiphertext_Empty_Bad(t *testing.T) { - _, err := GetNonceFromCiphertext(nil) +func TestCryptoSigil_NonceFromCiphertext_Empty_Bad(t *testing.T) { + _, err := NonceFromCiphertext(nil) assert.ErrorIs(t, err, CiphertextTooShortError) } From 45bd96387ac25fd2c00d6067e007abcdf800df04 Mon Sep 17 00:00:00 2001 From: Virgil Date: Tue, 31 Mar 2026 13:47:35 +0000 Subject: [PATCH 61/83] refactor(workspace): harden path boundaries and naming Co-Authored-By: Virgil --- docs/RFC.md | 2 +- workspace/service.go | 14 +++++++++----- workspace/service_test.go | 10 ++++++++++ 3 files changed, 20 insertions(+), 6 deletions(-) diff --git a/docs/RFC.md b/docs/RFC.md index 9041f79..74cac69 100644 --- a/docs/RFC.md +++ b/docs/RFC.md @@ -2077,7 +2077,7 @@ service, _ := workspace.New(workspace.Options{KeyPairProvider: stubKeyPairProvid _ = service ``` -**CreateWorkspace(identifier, password string) (string, error)** +**CreateWorkspace(identifier, passphrase string) (string, error)** Example: ```go service, _ := workspace.New(workspace.Options{KeyPairProvider: stubKeyPairProvider{}}) diff --git a/workspace/service.go b/workspace/service.go index b1ede43..feefae8 100644 --- a/workspace/service.go +++ b/workspace/service.go @@ -17,7 +17,7 @@ import ( // Example: Medium: io.NewMemoryMedium(), // Example: }) type Workspace interface { - CreateWorkspace(identifier, password string) (string, error) + CreateWorkspace(identifier, passphrase string) (string, error) SwitchWorkspace(workspaceID string) error ReadWorkspaceFile(workspaceFilePath string) (string, error) WriteWorkspaceFile(workspaceFilePath, content string) error @@ -109,7 +109,7 @@ func New(options Options) (*Service, error) { } // Example: workspaceID, _ := service.CreateWorkspace("alice", "pass123") -func (service *Service) CreateWorkspace(identifier, password string) (string, error) { +func (service *Service) CreateWorkspace(identifier, passphrase string) (string, error) { service.stateLock.Lock() defer service.stateLock.Unlock() @@ -134,7 +134,7 @@ func (service *Service) CreateWorkspace(identifier, password string) (string, er } } - privateKey, err := service.keyPairProvider.CreateKeyPair(identifier, password) + privateKey, err := service.keyPairProvider.CreateKeyPair(identifier, passphrase) if err != nil { return "", core.E("workspace.CreateWorkspace", "failed to generate keys", err) } @@ -206,7 +206,8 @@ func (service *Service) WriteWorkspaceFile(workspaceFilePath, content string) er func (service *Service) HandleWorkspaceCommand(command WorkspaceCommand) core.Result { switch command.Action { case WorkspaceCreateAction: - workspaceID, err := service.CreateWorkspace(command.Identifier, command.Password) + passphrase := command.Password + workspaceID, err := service.CreateWorkspace(command.Identifier, passphrase) if err != nil { return core.Result{}.New(err) } @@ -221,7 +222,7 @@ func (service *Service) HandleWorkspaceCommand(command WorkspaceCommand) core.Re } // Example: result := service.HandleWorkspaceMessage(core.New(), WorkspaceCommand{Action: WorkspaceSwitchAction, WorkspaceID: "f3f0d7"}) -func (service *Service) HandleWorkspaceMessage(coreRuntime *core.Core, message core.Message) core.Result { +func (service *Service) HandleWorkspaceMessage(_ *core.Core, message core.Message) core.Result { switch command := message.(type) { case WorkspaceCommand: return service.HandleWorkspaceCommand(command) @@ -242,6 +243,9 @@ func resolveWorkspaceHomeDirectory() string { func joinPathWithinRoot(root string, parts ...string) (string, error) { candidate := core.Path(append([]string{root}, parts...)...) sep := core.Env("DS") + if sep == "" { + sep = "/" + } if candidate == root || core.HasPrefix(candidate, root+sep) { return candidate, nil } diff --git a/workspace/service_test.go b/workspace/service_test.go index e28dece..081d2a4 100644 --- a/workspace/service_test.go +++ b/workspace/service_test.go @@ -1,6 +1,7 @@ package workspace import ( + "io/fs" "testing" core "dappco.re/go/core" @@ -117,6 +118,15 @@ func TestService_WriteWorkspaceFile_TraversalBlocked_Bad(t *testing.T) { require.Error(t, err) } +func TestService_JoinPathWithinRoot_DefaultSeparator_Good(t *testing.T) { + t.Setenv("DS", "") + + path, err := joinPathWithinRoot("/tmp/workspaces", "../workspaces2") + require.Error(t, err) + assert.ErrorIs(t, err, fs.ErrPermission) + assert.Empty(t, path) +} + func TestService_HandleWorkspaceMessage_Command_Good(t *testing.T) { service, _ := newTestService(t) From e922734c6e2e67865cc7a67dfe7559a88b0b06a1 Mon Sep 17 00:00:00 2001 From: Virgil Date: Tue, 31 Mar 2026 13:54:58 +0000 Subject: [PATCH 62/83] refactor(store): rename key-value store surface Co-Authored-By: Virgil --- docs/RFC.md | 76 ++++++++++++++++++++++---------------------- docs/architecture.md | 17 +++++----- store/medium.go | 66 +++++++++++++++++++------------------- store/medium_test.go | 12 +++---- store/store.go | 38 +++++++++++----------- store/store_test.go | 40 +++++++++++------------ 6 files changed, 125 insertions(+), 124 deletions(-) diff --git a/docs/RFC.md b/docs/RFC.md index 74cac69..aeb126c 100644 --- a/docs/RFC.md +++ b/docs/RFC.md @@ -1097,8 +1097,8 @@ Returned when a key does not exist. Example: ```go -s, _ := store.New(store.Options{Path: ":memory:"}) -_, err := s.Get("config", "missing") +keyValueStore, _ := store.New(store.Options{Path: ":memory:"}) +_, err := keyValueStore.Get("config", "missing") if core.Is(err, store.NotFoundError) { // handle missing key } @@ -1114,94 +1114,94 @@ options := store.Options{Path: ":memory:"} _ = options ``` -### New(options Options) (*Store, error) +### New(options Options) (*KeyValueStore, error) -Creates a new `Store` backed by the configured SQLite path. +Creates a new `KeyValueStore` backed by the configured SQLite path. Example: ```go -s, _ := store.New(store.Options{Path: ":memory:"}) -_ = s.Set("config", "theme", "midnight") +keyValueStore, _ := store.New(store.Options{Path: ":memory:"}) +_ = keyValueStore.Set("config", "theme", "midnight") ``` -### Store +### KeyValueStore Group-namespaced key-value store. Example: ```go -s, _ := store.New(store.Options{Path: ":memory:"}) -_ = s.Set("config", "theme", "midnight") +keyValueStore, _ := store.New(store.Options{Path: ":memory:"}) +_ = keyValueStore.Set("config", "theme", "midnight") ``` **Close() error** Example: ```go -s, _ := store.New(store.Options{Path: ":memory:"}) -_ = s.Close() +keyValueStore, _ := store.New(store.Options{Path: ":memory:"}) +_ = keyValueStore.Close() ``` **Get(group, key string) (string, error)** Example: ```go -s, _ := store.New(store.Options{Path: ":memory:"}) -_ = s.Set("config", "theme", "midnight") -value, _ := s.Get("config", "theme") +keyValueStore, _ := store.New(store.Options{Path: ":memory:"}) +_ = keyValueStore.Set("config", "theme", "midnight") +value, _ := keyValueStore.Get("config", "theme") ``` **Set(group, key, value string) error** Example: ```go -s, _ := store.New(store.Options{Path: ":memory:"}) -_ = s.Set("config", "theme", "midnight") +keyValueStore, _ := store.New(store.Options{Path: ":memory:"}) +_ = keyValueStore.Set("config", "theme", "midnight") ``` **Delete(group, key string) error** Example: ```go -s, _ := store.New(store.Options{Path: ":memory:"}) -_ = s.Set("config", "theme", "midnight") -_ = s.Delete("config", "theme") +keyValueStore, _ := store.New(store.Options{Path: ":memory:"}) +_ = keyValueStore.Set("config", "theme", "midnight") +_ = keyValueStore.Delete("config", "theme") ``` **Count(group string) (int, error)** Example: ```go -s, _ := store.New(store.Options{Path: ":memory:"}) -_ = s.Set("config", "theme", "midnight") -count, _ := s.Count("config") +keyValueStore, _ := store.New(store.Options{Path: ":memory:"}) +_ = keyValueStore.Set("config", "theme", "midnight") +count, _ := keyValueStore.Count("config") ``` **DeleteGroup(group string) error** Example: ```go -s, _ := store.New(store.Options{Path: ":memory:"}) -_ = s.Set("config", "theme", "midnight") -_ = s.DeleteGroup("config") +keyValueStore, _ := store.New(store.Options{Path: ":memory:"}) +_ = keyValueStore.Set("config", "theme", "midnight") +_ = keyValueStore.DeleteGroup("config") ``` **GetAll(group string) (map[string]string, error)** Example: ```go -s, _ := store.New(store.Options{Path: ":memory:"}) -_ = s.Set("config", "theme", "midnight") -all, _ := s.GetAll("config") +keyValueStore, _ := store.New(store.Options{Path: ":memory:"}) +_ = keyValueStore.Set("config", "theme", "midnight") +all, _ := keyValueStore.GetAll("config") ``` **Render(tmplStr, group string) (string, error)** Example: ```go -s, _ := store.New(store.Options{Path: ":memory:"}) -_ = s.Set("user", "name", "alice") -out, _ := s.Render("hello {{ .name }}", "user") +keyValueStore, _ := store.New(store.Options{Path: ":memory:"}) +_ = keyValueStore.Set("user", "name", "alice") +renderedText, _ := keyValueStore.Render("hello {{ .name }}", "user") ``` **AsMedium() *Medium** Example: ```go -s, _ := store.New(store.Options{Path: ":memory:"}) -m := s.AsMedium() -_ = m.Write("config/theme", "midnight") +keyValueStore, _ := store.New(store.Options{Path: ":memory:"}) +medium := keyValueStore.AsMedium() +_ = medium.Write("config/theme", "midnight") ``` ### NewMedium(options Options) (*Medium, error) @@ -1216,7 +1216,7 @@ _ = m.Write("config/theme", "midnight") ### Medium -Adapter that maps `group/key` paths onto a `Store`. +Adapter that maps `group/key` paths onto a `KeyValueStore`. Example: ```go @@ -1224,12 +1224,12 @@ m, _ := store.NewMedium(store.Options{Path: ":memory:"}) _ = m.Write("config/theme", "midnight") ``` -**Store() *Store** +**KeyValueStore() *KeyValueStore** Example: ```go m, _ := store.NewMedium(store.Options{Path: ":memory:"}) -s := m.Store() -_ = s.Set("config", "theme", "midnight") +keyValueStore := m.KeyValueStore() +_ = keyValueStore.Set("config", "theme", "midnight") ``` **Close() error** diff --git a/docs/architecture.md b/docs/architecture.md index f6df154..d557610 100644 --- a/docs/architecture.md +++ b/docs/architecture.md @@ -117,7 +117,7 @@ A thread-safe `Medium` backed by Borg's `DataNode` (an in-memory `fs.FS` with ta The store package provides two complementary APIs: -### Store (key-value) +### KeyValueStore (key-value) A group-namespaced key-value store backed by SQLite: @@ -135,22 +135,23 @@ Operations: `Get`, `Set`, `Delete`, `Count`, `DeleteGroup`, `GetAll`, `Render`. The `Render` method loads all key-value pairs from a group into a `map[string]string` and executes a Go `text/template` against them: ```go -s.Set("user", "pool", "pool.lthn.io:3333") -s.Set("user", "wallet", "iz...") -out, _ := s.Render(`{"pool":"{{ .pool }}"}`, "user") -// out: {"pool":"pool.lthn.io:3333"} +keyValueStore, _ := store.New(store.Options{Path: ":memory:"}) +keyValueStore.Set("user", "pool", "pool.lthn.io:3333") +keyValueStore.Set("user", "wallet", "iz...") +renderedText, _ := keyValueStore.Render(`{"pool":"{{ .pool }}"}`, "user") +// renderedText: {"pool":"pool.lthn.io:3333"} ``` ### store.Medium (Medium adapter) -Wraps a `Store` to satisfy the `Medium` interface. Paths are split as `group/key`: +Wraps a `KeyValueStore` to satisfy the `Medium` interface. Paths are split as `group/key`: - `Read("config/theme")` calls `Get("config", "theme")` - `List("")` returns all groups as directories - `List("config")` returns all keys in the `config` group as files - `IsDir("config")` returns true if the group has entries -You can create it directly (`NewMedium(":memory:")`) or adapt an existing store (`store.AsMedium()`). +You can create it directly (`store.NewMedium(store.Options{Path: ":memory:"})`) or adapt an existing store (`keyValueStore.AsMedium()`). ## sigil Package @@ -270,7 +271,7 @@ Application code +-- sqlite.Medium --> modernc.org/sqlite +-- node.Node --> in-memory map + tar serialisation +-- datanode.Medium --> Borg DataNode + sync.RWMutex - +-- store.Medium --> store.Store (SQLite KV) --> Medium adapter + +-- store.Medium --> store.KeyValueStore (SQLite KV) --> Medium adapter +-- MemoryMedium --> map[string]string (for tests) ``` diff --git a/store/medium.go b/store/medium.go index 4899af5..9e10877 100644 --- a/store/medium.go +++ b/store/medium.go @@ -15,7 +15,7 @@ import ( // Example: entries, _ := medium.List("") // Example: entries, _ := medium.List("app") type Medium struct { - store *Store + keyValueStore *KeyValueStore } var _ coreio.Medium = (*Medium)(nil) @@ -23,26 +23,26 @@ var _ coreio.Medium = (*Medium)(nil) // Example: medium, _ := store.NewMedium(store.Options{Path: "config.db"}) // Example: _ = medium.Write("app/theme", "midnight") func NewMedium(options Options) (*Medium, error) { - store, err := New(options) + keyValueStore, err := New(options) if err != nil { return nil, err } - return &Medium{store: store}, nil + return &Medium{keyValueStore: keyValueStore}, nil } // Example: medium := keyValueStore.AsMedium() -func (store *Store) AsMedium() *Medium { - return &Medium{store: store} +func (keyValueStore *KeyValueStore) AsMedium() *Medium { + return &Medium{keyValueStore: keyValueStore} } -// Example: keyValueStore := medium.Store() -func (medium *Medium) Store() *Store { - return medium.store +// Example: keyValueStore := medium.KeyValueStore() +func (medium *Medium) KeyValueStore() *KeyValueStore { + return medium.keyValueStore } // Example: _ = medium.Close() func (medium *Medium) Close() error { - return medium.store.Close() + return medium.keyValueStore.Close() } func splitGroupKeyPath(entryPath string) (group, key string) { @@ -63,7 +63,7 @@ func (medium *Medium) Read(entryPath string) (string, error) { if key == "" { return "", core.E("store.Read", "path must include group/key", fs.ErrInvalid) } - return medium.store.Get(group, key) + return medium.keyValueStore.Get(group, key) } func (medium *Medium) Write(entryPath, content string) error { @@ -71,7 +71,7 @@ func (medium *Medium) Write(entryPath, content string) error { if key == "" { return core.E("store.Write", "path must include group/key", fs.ErrInvalid) } - return medium.store.Set(group, key, content) + return medium.keyValueStore.Set(group, key, content) } // Example: _ = medium.WriteMode("app/theme", "midnight", 0600) @@ -89,7 +89,7 @@ func (medium *Medium) IsFile(entryPath string) bool { if key == "" { return false } - _, err := medium.store.Get(group, key) + _, err := medium.keyValueStore.Get(group, key) return err == nil } @@ -99,7 +99,7 @@ func (medium *Medium) Delete(entryPath string) error { return core.E("store.Delete", "path is required", fs.ErrInvalid) } if key == "" { - entryCount, err := medium.store.Count(group) + entryCount, err := medium.keyValueStore.Count(group) if err != nil { return err } @@ -108,7 +108,7 @@ func (medium *Medium) Delete(entryPath string) error { } return nil } - return medium.store.Delete(group, key) + return medium.keyValueStore.Delete(group, key) } func (medium *Medium) DeleteAll(entryPath string) error { @@ -117,9 +117,9 @@ func (medium *Medium) DeleteAll(entryPath string) error { return core.E("store.DeleteAll", "path is required", fs.ErrInvalid) } if key == "" { - return medium.store.DeleteGroup(group) + return medium.keyValueStore.DeleteGroup(group) } - return medium.store.Delete(group, key) + return medium.keyValueStore.Delete(group, key) } func (medium *Medium) Rename(oldPath, newPath string) error { @@ -128,14 +128,14 @@ func (medium *Medium) Rename(oldPath, newPath string) error { if oldKey == "" || newKey == "" { return core.E("store.Rename", "both paths must include group/key", fs.ErrInvalid) } - value, err := medium.store.Get(oldGroup, oldKey) + value, err := medium.keyValueStore.Get(oldGroup, oldKey) if err != nil { return err } - if err := medium.store.Set(newGroup, newKey, value); err != nil { + if err := medium.keyValueStore.Set(newGroup, newKey, value); err != nil { return err } - return medium.store.Delete(oldGroup, oldKey) + return medium.keyValueStore.Delete(oldGroup, oldKey) } // Example: entries, _ := medium.List("app") @@ -143,7 +143,7 @@ func (medium *Medium) List(entryPath string) ([]fs.DirEntry, error) { group, key := splitGroupKeyPath(entryPath) if group == "" { - rows, err := medium.store.database.Query("SELECT DISTINCT group_name FROM entries ORDER BY group_name") + rows, err := medium.keyValueStore.database.Query("SELECT DISTINCT group_name FROM entries ORDER BY group_name") if err != nil { return nil, core.E("store.List", "query groups", err) } @@ -167,7 +167,7 @@ func (medium *Medium) List(entryPath string) ([]fs.DirEntry, error) { return nil, nil } - all, err := medium.store.GetAll(group) + all, err := medium.keyValueStore.GetAll(group) if err != nil { return nil, err } @@ -185,7 +185,7 @@ func (medium *Medium) Stat(entryPath string) (fs.FileInfo, error) { return nil, core.E("store.Stat", "path is required", fs.ErrInvalid) } if key == "" { - entryCount, err := medium.store.Count(group) + entryCount, err := medium.keyValueStore.Count(group) if err != nil { return nil, err } @@ -194,7 +194,7 @@ func (medium *Medium) Stat(entryPath string) (fs.FileInfo, error) { } return &keyValueFileInfo{name: group, isDir: true}, nil } - value, err := medium.store.Get(group, key) + value, err := medium.keyValueStore.Get(group, key) if err != nil { return nil, err } @@ -206,7 +206,7 @@ func (medium *Medium) Open(entryPath string) (fs.File, error) { if key == "" { return nil, core.E("store.Open", "path must include group/key", fs.ErrInvalid) } - value, err := medium.store.Get(group, key) + value, err := medium.keyValueStore.Get(group, key) if err != nil { return nil, err } @@ -218,7 +218,7 @@ func (medium *Medium) Create(entryPath string) (goio.WriteCloser, error) { if key == "" { return nil, core.E("store.Create", "path must include group/key", fs.ErrInvalid) } - return &keyValueWriteCloser{store: medium.store, group: group, key: key}, nil + return &keyValueWriteCloser{keyValueStore: medium.keyValueStore, group: group, key: key}, nil } func (medium *Medium) Append(entryPath string) (goio.WriteCloser, error) { @@ -226,8 +226,8 @@ func (medium *Medium) Append(entryPath string) (goio.WriteCloser, error) { if key == "" { return nil, core.E("store.Append", "path must include group/key", fs.ErrInvalid) } - existingValue, _ := medium.store.Get(group, key) - return &keyValueWriteCloser{store: medium.store, group: group, key: key, data: []byte(existingValue)}, nil + existingValue, _ := medium.keyValueStore.Get(group, key) + return &keyValueWriteCloser{keyValueStore: medium.keyValueStore, group: group, key: key, data: []byte(existingValue)}, nil } func (medium *Medium) ReadStream(entryPath string) (goio.ReadCloser, error) { @@ -235,7 +235,7 @@ func (medium *Medium) ReadStream(entryPath string) (goio.ReadCloser, error) { if key == "" { return nil, core.E("store.ReadStream", "path must include group/key", fs.ErrInvalid) } - value, err := medium.store.Get(group, key) + value, err := medium.keyValueStore.Get(group, key) if err != nil { return nil, err } @@ -252,10 +252,10 @@ func (medium *Medium) Exists(entryPath string) bool { return false } if key == "" { - entryCount, err := medium.store.Count(group) + entryCount, err := medium.keyValueStore.Count(group) return err == nil && entryCount > 0 } - _, err := medium.store.Get(group, key) + _, err := medium.keyValueStore.Get(group, key) return err == nil } @@ -264,7 +264,7 @@ func (medium *Medium) IsDir(entryPath string) bool { if key != "" || group == "" { return false } - entryCount, err := medium.store.Count(group) + entryCount, err := medium.keyValueStore.Count(group) return err == nil && entryCount > 0 } @@ -334,7 +334,7 @@ func (file *keyValueFile) Read(buffer []byte) (int, error) { func (file *keyValueFile) Close() error { return nil } type keyValueWriteCloser struct { - store *Store + keyValueStore *KeyValueStore group string key string data []byte @@ -346,5 +346,5 @@ func (writer *keyValueWriteCloser) Write(data []byte) (int, error) { } func (writer *keyValueWriteCloser) Close() error { - return writer.store.Set(writer.group, writer.key, string(writer.data)) + return writer.keyValueStore.Set(writer.group, writer.key, string(writer.data)) } diff --git a/store/medium_test.go b/store/medium_test.go index 1065ed8..f448794 100644 --- a/store/medium_test.go +++ b/store/medium_test.go @@ -183,12 +183,12 @@ func TestKeyValueMedium_Append_Good(t *testing.T) { } func TestKeyValueMedium_AsMedium_Good(t *testing.T) { - s := newTestStore(t) + keyValueStore := newTestKeyValueStore(t) - m := s.AsMedium() + m := keyValueStore.AsMedium() require.NoError(t, m.Write("group/key", "val")) - value, err := s.Get("group", "key") + value, err := keyValueStore.Get("group", "key") require.NoError(t, err) assert.Equal(t, "val", value) @@ -197,11 +197,11 @@ func TestKeyValueMedium_AsMedium_Good(t *testing.T) { assert.Equal(t, "val", value) } -func TestKeyValueMedium_Store_Good(t *testing.T) { +func TestKeyValueMedium_KeyValueStore_Good(t *testing.T) { m := newTestKeyValueMedium(t) - assert.NotNil(t, m.Store()) - assert.Same(t, m.Store(), m.Store()) + assert.NotNil(t, m.KeyValueStore()) + assert.Same(t, m.KeyValueStore(), m.KeyValueStore()) } func TestKeyValueMedium_EnsureDir_ReadWrite_Good(t *testing.T) { diff --git a/store/store.go b/store/store.go index 62a1f62..e32ac48 100644 --- a/store/store.go +++ b/store/store.go @@ -14,7 +14,7 @@ import ( var NotFoundError = errors.New("key not found") // Example: keyValueStore, _ := store.New(store.Options{Path: ":memory:"}) -type Store struct { +type KeyValueStore struct { database *sql.DB } @@ -25,7 +25,7 @@ type Options struct { // Example: keyValueStore, _ := store.New(store.Options{Path: ":memory:"}) // Example: _ = keyValueStore.Set("app", "theme", "midnight") -func New(options Options) (*Store, error) { +func New(options Options) (*KeyValueStore, error) { if options.Path == "" { return nil, core.E("store.New", "database path is required", fs.ErrInvalid) } @@ -47,18 +47,18 @@ func New(options Options) (*Store, error) { database.Close() return nil, core.E("store.New", "create schema", err) } - return &Store{database: database}, nil + return &KeyValueStore{database: database}, nil } // Example: _ = keyValueStore.Close() -func (store *Store) Close() error { - return store.database.Close() +func (keyValueStore *KeyValueStore) Close() error { + return keyValueStore.database.Close() } // Example: theme, _ := keyValueStore.Get("app", "theme") -func (store *Store) Get(group, key string) (string, error) { +func (keyValueStore *KeyValueStore) Get(group, key string) (string, error) { var value string - err := store.database.QueryRow("SELECT entry_value FROM entries WHERE group_name = ? AND entry_key = ?", group, key).Scan(&value) + err := keyValueStore.database.QueryRow("SELECT entry_value FROM entries WHERE group_name = ? AND entry_key = ?", group, key).Scan(&value) if err == sql.ErrNoRows { return "", core.E("store.Get", core.Concat("not found: ", group, "/", key), NotFoundError) } @@ -69,8 +69,8 @@ func (store *Store) Get(group, key string) (string, error) { } // Example: _ = keyValueStore.Set("app", "theme", "midnight") -func (store *Store) Set(group, key, value string) error { - _, err := store.database.Exec( +func (keyValueStore *KeyValueStore) Set(group, key, value string) error { + _, err := keyValueStore.database.Exec( `INSERT INTO entries (group_name, entry_key, entry_value) VALUES (?, ?, ?) ON CONFLICT(group_name, entry_key) DO UPDATE SET entry_value = excluded.entry_value`, group, key, value, @@ -82,8 +82,8 @@ func (store *Store) Set(group, key, value string) error { } // Example: _ = keyValueStore.Delete("app", "theme") -func (store *Store) Delete(group, key string) error { - _, err := store.database.Exec("DELETE FROM entries WHERE group_name = ? AND entry_key = ?", group, key) +func (keyValueStore *KeyValueStore) Delete(group, key string) error { + _, err := keyValueStore.database.Exec("DELETE FROM entries WHERE group_name = ? AND entry_key = ?", group, key) if err != nil { return core.E("store.Delete", "exec", err) } @@ -91,9 +91,9 @@ func (store *Store) Delete(group, key string) error { } // Example: count, _ := keyValueStore.Count("app") -func (store *Store) Count(group string) (int, error) { +func (keyValueStore *KeyValueStore) Count(group string) (int, error) { var count int - err := store.database.QueryRow("SELECT COUNT(*) FROM entries WHERE group_name = ?", group).Scan(&count) + err := keyValueStore.database.QueryRow("SELECT COUNT(*) FROM entries WHERE group_name = ?", group).Scan(&count) if err != nil { return 0, core.E("store.Count", "query", err) } @@ -101,8 +101,8 @@ func (store *Store) Count(group string) (int, error) { } // Example: _ = keyValueStore.DeleteGroup("app") -func (store *Store) DeleteGroup(group string) error { - _, err := store.database.Exec("DELETE FROM entries WHERE group_name = ?", group) +func (keyValueStore *KeyValueStore) DeleteGroup(group string) error { + _, err := keyValueStore.database.Exec("DELETE FROM entries WHERE group_name = ?", group) if err != nil { return core.E("store.DeleteGroup", "exec", err) } @@ -110,8 +110,8 @@ func (store *Store) DeleteGroup(group string) error { } // Example: values, _ := keyValueStore.GetAll("app") -func (store *Store) GetAll(group string) (map[string]string, error) { - rows, err := store.database.Query("SELECT entry_key, entry_value FROM entries WHERE group_name = ?", group) +func (keyValueStore *KeyValueStore) GetAll(group string) (map[string]string, error) { + rows, err := keyValueStore.database.Query("SELECT entry_key, entry_value FROM entries WHERE group_name = ?", group) if err != nil { return nil, core.E("store.GetAll", "query", err) } @@ -134,8 +134,8 @@ func (store *Store) GetAll(group string) (map[string]string, error) { // Example: keyValueStore, _ := store.New(store.Options{Path: ":memory:"}) // Example: _ = keyValueStore.Set("user", "name", "alice") // Example: renderedText, _ := keyValueStore.Render("hello {{ .name }}", "user") -func (store *Store) Render(templateText, group string) (string, error) { - rows, err := store.database.Query("SELECT entry_key, entry_value FROM entries WHERE group_name = ?", group) +func (keyValueStore *KeyValueStore) Render(templateText, group string) (string, error) { + rows, err := keyValueStore.database.Query("SELECT entry_key, entry_value FROM entries WHERE group_name = ?", group) if err != nil { return "", core.E("store.Render", "query", err) } diff --git a/store/store_test.go b/store/store_test.go index 88584e2..3f4c2a9 100644 --- a/store/store_test.go +++ b/store/store_test.go @@ -7,7 +7,7 @@ import ( "github.com/stretchr/testify/require" ) -func newTestStore(t *testing.T) *Store { +func newTestKeyValueStore(t *testing.T) *KeyValueStore { t.Helper() keyValueStore, err := New(Options{Path: ":memory:"}) @@ -18,18 +18,18 @@ func newTestStore(t *testing.T) *Store { return keyValueStore } -func TestStore_New_Options_Good(t *testing.T) { - keyValueStore := newTestStore(t) +func TestKeyValueStore_New_Options_Good(t *testing.T) { + keyValueStore := newTestKeyValueStore(t) assert.NotNil(t, keyValueStore) } -func TestStore_New_Options_Bad(t *testing.T) { +func TestKeyValueStore_New_Options_Bad(t *testing.T) { _, err := New(Options{}) assert.Error(t, err) } -func TestStore_SetGet_Good(t *testing.T) { - keyValueStore := newTestStore(t) +func TestKeyValueStore_SetGet_Good(t *testing.T) { + keyValueStore := newTestKeyValueStore(t) err := keyValueStore.Set("config", "theme", "dark") require.NoError(t, err) @@ -39,15 +39,15 @@ func TestStore_SetGet_Good(t *testing.T) { assert.Equal(t, "dark", value) } -func TestStore_Get_NotFound_Bad(t *testing.T) { - keyValueStore := newTestStore(t) +func TestKeyValueStore_Get_NotFound_Bad(t *testing.T) { + keyValueStore := newTestKeyValueStore(t) _, err := keyValueStore.Get("config", "missing") assert.ErrorIs(t, err, NotFoundError) } -func TestStore_Delete_Good(t *testing.T) { - keyValueStore := newTestStore(t) +func TestKeyValueStore_Delete_Good(t *testing.T) { + keyValueStore := newTestKeyValueStore(t) _ = keyValueStore.Set("config", "key", "val") err := keyValueStore.Delete("config", "key") @@ -57,8 +57,8 @@ func TestStore_Delete_Good(t *testing.T) { assert.ErrorIs(t, err, NotFoundError) } -func TestStore_Count_Good(t *testing.T) { - keyValueStore := newTestStore(t) +func TestKeyValueStore_Count_Good(t *testing.T) { + keyValueStore := newTestKeyValueStore(t) _ = keyValueStore.Set("group", "a", "1") _ = keyValueStore.Set("group", "b", "2") @@ -69,8 +69,8 @@ func TestStore_Count_Good(t *testing.T) { assert.Equal(t, 2, count) } -func TestStore_DeleteGroup_Good(t *testing.T) { - keyValueStore := newTestStore(t) +func TestKeyValueStore_DeleteGroup_Good(t *testing.T) { + keyValueStore := newTestKeyValueStore(t) _ = keyValueStore.Set("group", "a", "1") _ = keyValueStore.Set("group", "b", "2") @@ -81,8 +81,8 @@ func TestStore_DeleteGroup_Good(t *testing.T) { assert.Equal(t, 0, count) } -func TestStore_GetAll_Good(t *testing.T) { - keyValueStore := newTestStore(t) +func TestKeyValueStore_GetAll_Good(t *testing.T) { + keyValueStore := newTestKeyValueStore(t) _ = keyValueStore.Set("group", "a", "1") _ = keyValueStore.Set("group", "b", "2") @@ -93,16 +93,16 @@ func TestStore_GetAll_Good(t *testing.T) { assert.Equal(t, map[string]string{"a": "1", "b": "2"}, all) } -func TestStore_GetAll_Empty_Good(t *testing.T) { - keyValueStore := newTestStore(t) +func TestKeyValueStore_GetAll_Empty_Good(t *testing.T) { + keyValueStore := newTestKeyValueStore(t) all, err := keyValueStore.GetAll("empty") require.NoError(t, err) assert.Empty(t, all) } -func TestStore_Render_Good(t *testing.T) { - keyValueStore := newTestStore(t) +func TestKeyValueStore_Render_Good(t *testing.T) { + keyValueStore := newTestKeyValueStore(t) _ = keyValueStore.Set("user", "pool", "pool.lthn.io:3333") _ = keyValueStore.Set("user", "wallet", "iz...") From 9dbcc5d1848cbc38c31cb0f53e065d0dd78e2635 Mon Sep 17 00:00:00 2001 From: Virgil Date: Tue, 31 Mar 2026 14:00:33 +0000 Subject: [PATCH 63/83] refactor(ax): rename medium test variables and examples Co-Authored-By: Virgil --- datanode/medium_test.go | 242 +++++++++++++-------------- docs/development.md | 24 +-- local/medium_test.go | 282 +++++++++++++++---------------- medium_test.go | 226 ++++++++++++------------- sqlite/sqlite_test.go | 358 ++++++++++++++++++++-------------------- store/medium_test.go | 154 ++++++++--------- 6 files changed, 643 insertions(+), 643 deletions(-) diff --git a/datanode/medium_test.go b/datanode/medium_test.go index 614ba46..8397c1b 100644 --- a/datanode/medium_test.go +++ b/datanode/medium_test.go @@ -14,96 +14,96 @@ import ( var _ coreio.Medium = (*Medium)(nil) func TestDataNode_ReadWrite_Good(t *testing.T) { - m := New() + dataNodeMedium := New() - err := m.Write("hello.txt", "world") + err := dataNodeMedium.Write("hello.txt", "world") require.NoError(t, err) - got, err := m.Read("hello.txt") + got, err := dataNodeMedium.Read("hello.txt") require.NoError(t, err) assert.Equal(t, "world", got) } func TestDataNode_ReadWrite_Bad(t *testing.T) { - m := New() + dataNodeMedium := New() - _, err := m.Read("missing.txt") + _, err := dataNodeMedium.Read("missing.txt") assert.Error(t, err) - err = m.Write("", "content") + err = dataNodeMedium.Write("", "content") assert.Error(t, err) } func TestDataNode_NestedPaths_Good(t *testing.T) { - m := New() + dataNodeMedium := New() - require.NoError(t, m.Write("a/b/c/deep.txt", "deep")) + require.NoError(t, dataNodeMedium.Write("a/b/c/deep.txt", "deep")) - got, err := m.Read("a/b/c/deep.txt") + got, err := dataNodeMedium.Read("a/b/c/deep.txt") require.NoError(t, err) assert.Equal(t, "deep", got) - assert.True(t, m.IsDir("a")) - assert.True(t, m.IsDir("a/b")) - assert.True(t, m.IsDir("a/b/c")) + assert.True(t, dataNodeMedium.IsDir("a")) + assert.True(t, dataNodeMedium.IsDir("a/b")) + assert.True(t, dataNodeMedium.IsDir("a/b/c")) } func TestDataNode_LeadingSlash_Good(t *testing.T) { - m := New() + dataNodeMedium := New() - require.NoError(t, m.Write("/leading/file.txt", "stripped")) - got, err := m.Read("leading/file.txt") + require.NoError(t, dataNodeMedium.Write("/leading/file.txt", "stripped")) + got, err := dataNodeMedium.Read("leading/file.txt") require.NoError(t, err) assert.Equal(t, "stripped", got) - got, err = m.Read("/leading/file.txt") + got, err = dataNodeMedium.Read("/leading/file.txt") require.NoError(t, err) assert.Equal(t, "stripped", got) } func TestDataNode_IsFile_Good(t *testing.T) { - m := New() + dataNodeMedium := New() - require.NoError(t, m.Write("file.go", "package main")) + require.NoError(t, dataNodeMedium.Write("file.go", "package main")) - assert.True(t, m.IsFile("file.go")) - assert.False(t, m.IsFile("missing.go")) - assert.False(t, m.IsFile("")) + assert.True(t, dataNodeMedium.IsFile("file.go")) + assert.False(t, dataNodeMedium.IsFile("missing.go")) + assert.False(t, dataNodeMedium.IsFile("")) } func TestDataNode_EnsureDir_Good(t *testing.T) { - m := New() + dataNodeMedium := New() - require.NoError(t, m.EnsureDir("foo/bar/baz")) + require.NoError(t, dataNodeMedium.EnsureDir("foo/bar/baz")) - assert.True(t, m.IsDir("foo")) - assert.True(t, m.IsDir("foo/bar")) - assert.True(t, m.IsDir("foo/bar/baz")) - assert.True(t, m.Exists("foo/bar/baz")) + assert.True(t, dataNodeMedium.IsDir("foo")) + assert.True(t, dataNodeMedium.IsDir("foo/bar")) + assert.True(t, dataNodeMedium.IsDir("foo/bar/baz")) + assert.True(t, dataNodeMedium.Exists("foo/bar/baz")) } func TestDataNode_Delete_Good(t *testing.T) { - m := New() + dataNodeMedium := New() - require.NoError(t, m.Write("delete-me.txt", "bye")) - assert.True(t, m.Exists("delete-me.txt")) + require.NoError(t, dataNodeMedium.Write("delete-me.txt", "bye")) + assert.True(t, dataNodeMedium.Exists("delete-me.txt")) - require.NoError(t, m.Delete("delete-me.txt")) - assert.False(t, m.Exists("delete-me.txt")) + require.NoError(t, dataNodeMedium.Delete("delete-me.txt")) + assert.False(t, dataNodeMedium.Exists("delete-me.txt")) } func TestDataNode_Delete_Bad(t *testing.T) { - medium := New() + dataNodeMedium := New() - assert.Error(t, medium.Delete("ghost.txt")) + assert.Error(t, dataNodeMedium.Delete("ghost.txt")) - require.NoError(t, medium.Write("dir/file.txt", "content")) - assert.Error(t, medium.Delete("dir")) + require.NoError(t, dataNodeMedium.Write("dir/file.txt", "content")) + assert.Error(t, dataNodeMedium.Delete("dir")) } func TestDataNode_Delete_DirectoryInspectionFailure_Bad(t *testing.T) { - m := New() - require.NoError(t, m.Write("dir/file.txt", "content")) + dataNodeMedium := New() + require.NoError(t, dataNodeMedium.Write("dir/file.txt", "content")) original := dataNodeWalkDir dataNodeWalkDir = func(_ fs.FS, _ string, _ fs.WalkDirFunc) error { @@ -113,28 +113,28 @@ func TestDataNode_Delete_DirectoryInspectionFailure_Bad(t *testing.T) { dataNodeWalkDir = original }) - err := m.Delete("dir") + err := dataNodeMedium.Delete("dir") require.Error(t, err) assert.Contains(t, err.Error(), "failed to inspect directory") } func TestDataNode_DeleteAll_Good(t *testing.T) { - m := New() + dataNodeMedium := New() - require.NoError(t, m.Write("tree/a.txt", "a")) - require.NoError(t, m.Write("tree/sub/b.txt", "b")) - require.NoError(t, m.Write("keep.txt", "keep")) + require.NoError(t, dataNodeMedium.Write("tree/a.txt", "a")) + require.NoError(t, dataNodeMedium.Write("tree/sub/b.txt", "b")) + require.NoError(t, dataNodeMedium.Write("keep.txt", "keep")) - require.NoError(t, m.DeleteAll("tree")) + require.NoError(t, dataNodeMedium.DeleteAll("tree")) - assert.False(t, m.Exists("tree/a.txt")) - assert.False(t, m.Exists("tree/sub/b.txt")) - assert.True(t, m.Exists("keep.txt")) + assert.False(t, dataNodeMedium.Exists("tree/a.txt")) + assert.False(t, dataNodeMedium.Exists("tree/sub/b.txt")) + assert.True(t, dataNodeMedium.Exists("keep.txt")) } func TestDataNode_DeleteAll_WalkFailure_Bad(t *testing.T) { - m := New() - require.NoError(t, m.Write("tree/a.txt", "a")) + dataNodeMedium := New() + require.NoError(t, dataNodeMedium.Write("tree/a.txt", "a")) original := dataNodeWalkDir dataNodeWalkDir = func(_ fs.FS, _ string, _ fs.WalkDirFunc) error { @@ -144,15 +144,15 @@ func TestDataNode_DeleteAll_WalkFailure_Bad(t *testing.T) { dataNodeWalkDir = original }) - err := m.DeleteAll("tree") + err := dataNodeMedium.DeleteAll("tree") require.Error(t, err) assert.Contains(t, err.Error(), "failed to inspect tree") } func TestDataNode_Delete_RemoveFailure_Bad(t *testing.T) { - m := New() - require.NoError(t, m.Write("keep.txt", "keep")) - require.NoError(t, m.Write("bad.txt", "bad")) + dataNodeMedium := New() + require.NoError(t, dataNodeMedium.Write("keep.txt", "keep")) + require.NoError(t, dataNodeMedium.Write("bad.txt", "bad")) original := dataNodeReadAll dataNodeReadAll = func(_ io.Reader) ([]byte, error) { @@ -162,45 +162,45 @@ func TestDataNode_Delete_RemoveFailure_Bad(t *testing.T) { dataNodeReadAll = original }) - err := m.Delete("bad.txt") + err := dataNodeMedium.Delete("bad.txt") require.Error(t, err) assert.Contains(t, err.Error(), "failed to delete file") } func TestDataNode_Rename_Good(t *testing.T) { - m := New() + dataNodeMedium := New() - require.NoError(t, m.Write("old.txt", "content")) - require.NoError(t, m.Rename("old.txt", "new.txt")) + require.NoError(t, dataNodeMedium.Write("old.txt", "content")) + require.NoError(t, dataNodeMedium.Rename("old.txt", "new.txt")) - assert.False(t, m.Exists("old.txt")) - got, err := m.Read("new.txt") + assert.False(t, dataNodeMedium.Exists("old.txt")) + got, err := dataNodeMedium.Read("new.txt") require.NoError(t, err) assert.Equal(t, "content", got) } func TestDataNode_RenameDir_Good(t *testing.T) { - m := New() + dataNodeMedium := New() - require.NoError(t, m.Write("src/a.go", "package a")) - require.NoError(t, m.Write("src/sub/b.go", "package b")) + require.NoError(t, dataNodeMedium.Write("src/a.go", "package a")) + require.NoError(t, dataNodeMedium.Write("src/sub/b.go", "package b")) - require.NoError(t, m.Rename("src", "destination")) + require.NoError(t, dataNodeMedium.Rename("src", "destination")) - assert.False(t, m.Exists("src/a.go")) + assert.False(t, dataNodeMedium.Exists("src/a.go")) - got, err := m.Read("destination/a.go") + got, err := dataNodeMedium.Read("destination/a.go") require.NoError(t, err) assert.Equal(t, "package a", got) - got, err = m.Read("destination/sub/b.go") + got, err = dataNodeMedium.Read("destination/sub/b.go") require.NoError(t, err) assert.Equal(t, "package b", got) } func TestDataNode_RenameDir_ReadFailure_Bad(t *testing.T) { - m := New() - require.NoError(t, m.Write("src/a.go", "package a")) + dataNodeMedium := New() + require.NoError(t, dataNodeMedium.Write("src/a.go", "package a")) original := dataNodeReadAll dataNodeReadAll = func(_ io.Reader) ([]byte, error) { @@ -210,20 +210,20 @@ func TestDataNode_RenameDir_ReadFailure_Bad(t *testing.T) { dataNodeReadAll = original }) - err := m.Rename("src", "destination") + err := dataNodeMedium.Rename("src", "destination") require.Error(t, err) assert.Contains(t, err.Error(), "failed to read source file") } func TestDataNode_List_Good(t *testing.T) { - m := New() + dataNodeMedium := New() - require.NoError(t, m.Write("root.txt", "r")) - require.NoError(t, m.Write("pkg/a.go", "a")) - require.NoError(t, m.Write("pkg/b.go", "b")) - require.NoError(t, m.Write("pkg/sub/c.go", "c")) + require.NoError(t, dataNodeMedium.Write("root.txt", "r")) + require.NoError(t, dataNodeMedium.Write("pkg/a.go", "a")) + require.NoError(t, dataNodeMedium.Write("pkg/b.go", "b")) + require.NoError(t, dataNodeMedium.Write("pkg/sub/c.go", "c")) - entries, err := m.List("") + entries, err := dataNodeMedium.List("") require.NoError(t, err) names := make([]string, len(entries)) @@ -233,7 +233,7 @@ func TestDataNode_List_Good(t *testing.T) { assert.Contains(t, names, "root.txt") assert.Contains(t, names, "pkg") - entries, err = m.List("pkg") + entries, err = dataNodeMedium.List("pkg") require.NoError(t, err) names = make([]string, len(entries)) for i, e := range entries { @@ -245,26 +245,26 @@ func TestDataNode_List_Good(t *testing.T) { } func TestDataNode_Stat_Good(t *testing.T) { - m := New() + dataNodeMedium := New() - require.NoError(t, m.Write("stat.txt", "hello")) + require.NoError(t, dataNodeMedium.Write("stat.txt", "hello")) - info, err := m.Stat("stat.txt") + info, err := dataNodeMedium.Stat("stat.txt") require.NoError(t, err) assert.Equal(t, int64(5), info.Size()) assert.False(t, info.IsDir()) - info, err = m.Stat("") + info, err = dataNodeMedium.Stat("") require.NoError(t, err) assert.True(t, info.IsDir()) } func TestDataNode_Open_Good(t *testing.T) { - m := New() + dataNodeMedium := New() - require.NoError(t, m.Write("open.txt", "opened")) + require.NoError(t, dataNodeMedium.Write("open.txt", "opened")) - f, err := m.Open("open.txt") + f, err := dataNodeMedium.Open("open.txt") require.NoError(t, err) defer f.Close() @@ -274,30 +274,30 @@ func TestDataNode_Open_Good(t *testing.T) { } func TestDataNode_CreateAppend_Good(t *testing.T) { - m := New() + dataNodeMedium := New() - w, err := m.Create("new.txt") + w, err := dataNodeMedium.Create("new.txt") require.NoError(t, err) w.Write([]byte("hello")) w.Close() - got, err := m.Read("new.txt") + got, err := dataNodeMedium.Read("new.txt") require.NoError(t, err) assert.Equal(t, "hello", got) - w, err = m.Append("new.txt") + w, err = dataNodeMedium.Append("new.txt") require.NoError(t, err) w.Write([]byte(" world")) w.Close() - got, err = m.Read("new.txt") + got, err = dataNodeMedium.Read("new.txt") require.NoError(t, err) assert.Equal(t, "hello world", got) } func TestDataNode_Append_ReadFailure_Bad(t *testing.T) { - m := New() - require.NoError(t, m.Write("new.txt", "hello")) + dataNodeMedium := New() + require.NoError(t, dataNodeMedium.Write("new.txt", "hello")) original := dataNodeReadAll dataNodeReadAll = func(_ io.Reader) ([]byte, error) { @@ -307,20 +307,20 @@ func TestDataNode_Append_ReadFailure_Bad(t *testing.T) { dataNodeReadAll = original }) - _, err := m.Append("new.txt") + _, err := dataNodeMedium.Append("new.txt") require.Error(t, err) assert.Contains(t, err.Error(), "failed to read existing content") } func TestDataNode_Streams_Good(t *testing.T) { - m := New() + dataNodeMedium := New() - ws, err := m.WriteStream("stream.txt") + ws, err := dataNodeMedium.WriteStream("stream.txt") require.NoError(t, err) ws.Write([]byte("streamed")) ws.Close() - rs, err := m.ReadStream("stream.txt") + rs, err := dataNodeMedium.ReadStream("stream.txt") require.NoError(t, err) data, err := io.ReadAll(rs) require.NoError(t, err) @@ -329,12 +329,12 @@ func TestDataNode_Streams_Good(t *testing.T) { } func TestDataNode_SnapshotRestore_Good(t *testing.T) { - m := New() + dataNodeMedium := New() - require.NoError(t, m.Write("a.txt", "alpha")) - require.NoError(t, m.Write("b/c.txt", "charlie")) + require.NoError(t, dataNodeMedium.Write("a.txt", "alpha")) + require.NoError(t, dataNodeMedium.Write("b/c.txt", "charlie")) - snap, err := m.Snapshot() + snap, err := dataNodeMedium.Snapshot() require.NoError(t, err) assert.NotEmpty(t, snap) @@ -351,31 +351,31 @@ func TestDataNode_SnapshotRestore_Good(t *testing.T) { } func TestDataNode_Restore_Good(t *testing.T) { - m := New() + dataNodeMedium := New() - require.NoError(t, m.Write("original.txt", "before")) + require.NoError(t, dataNodeMedium.Write("original.txt", "before")) - snap, err := m.Snapshot() + snap, err := dataNodeMedium.Snapshot() require.NoError(t, err) - require.NoError(t, m.Write("original.txt", "after")) - require.NoError(t, m.Write("extra.txt", "extra")) + require.NoError(t, dataNodeMedium.Write("original.txt", "after")) + require.NoError(t, dataNodeMedium.Write("extra.txt", "extra")) - require.NoError(t, m.Restore(snap)) + require.NoError(t, dataNodeMedium.Restore(snap)) - got, err := m.Read("original.txt") + got, err := dataNodeMedium.Read("original.txt") require.NoError(t, err) assert.Equal(t, "before", got) - assert.False(t, m.Exists("extra.txt")) + assert.False(t, dataNodeMedium.Exists("extra.txt")) } func TestDataNode_DataNode_Good(t *testing.T) { - m := New() + dataNodeMedium := New() - require.NoError(t, m.Write("test.txt", "borg")) + require.NoError(t, dataNodeMedium.Write("test.txt", "borg")) - dn := m.DataNode() + dn := dataNodeMedium.DataNode() assert.NotNil(t, dn) f, err := dn.Open("test.txt") @@ -388,31 +388,31 @@ func TestDataNode_DataNode_Good(t *testing.T) { } func TestDataNode_Overwrite_Good(t *testing.T) { - m := New() + dataNodeMedium := New() - require.NoError(t, m.Write("file.txt", "v1")) - require.NoError(t, m.Write("file.txt", "v2")) + require.NoError(t, dataNodeMedium.Write("file.txt", "v1")) + require.NoError(t, dataNodeMedium.Write("file.txt", "v2")) - got, err := m.Read("file.txt") + got, err := dataNodeMedium.Read("file.txt") require.NoError(t, err) assert.Equal(t, "v2", got) } func TestDataNode_Exists_Good(t *testing.T) { - m := New() + dataNodeMedium := New() - assert.True(t, m.Exists("")) - assert.False(t, m.Exists("x")) + assert.True(t, dataNodeMedium.Exists("")) + assert.False(t, dataNodeMedium.Exists("x")) - require.NoError(t, m.Write("x", "y")) - assert.True(t, m.Exists("x")) + require.NoError(t, dataNodeMedium.Write("x", "y")) + assert.True(t, dataNodeMedium.Exists("x")) } func TestDataNode_ReadExistingFile_Good(t *testing.T) { - m := New() + dataNodeMedium := New() - require.NoError(t, m.Write("file.txt", "content")) - got, err := m.Read("file.txt") + require.NoError(t, dataNodeMedium.Write("file.txt", "content")) + got, err := dataNodeMedium.Read("file.txt") require.NoError(t, err) assert.Equal(t, "content", got) } diff --git a/docs/development.md b/docs/development.md index 6ece61b..d060e40 100644 --- a/docs/development.md +++ b/docs/development.md @@ -92,14 +92,14 @@ Use `MemoryMedium` from the root package for unit tests that need a storage back ```go func TestMyFeature(t *testing.T) { - m := io.NewMemoryMedium() - _ = m.Write("config.yaml", "key: value") - _ = m.EnsureDir("data") + memoryMedium := io.NewMemoryMedium() + _ = memoryMedium.Write("config.yaml", "key: value") + _ = memoryMedium.EnsureDir("data") - // Your code under test receives m as an io.Medium - result, err := myFunction(m) + // Your code under test receives memoryMedium as an io.Medium + result, err := myFunction(memoryMedium) assert.NoError(t, err) - output, err := m.Read("output.txt") + output, err := memoryMedium.Read("output.txt") require.NoError(t, err) assert.Equal(t, "expected", output) } @@ -109,11 +109,11 @@ For tests that need a real but ephemeral filesystem, use `local.New` with `t.Tem ```go func TestWithRealFS(t *testing.T) { - m, err := local.New(t.TempDir()) + localMedium, err := local.New(t.TempDir()) require.NoError(t, err) - _ = m.Write("file.txt", "hello") - content, _ := m.Read("file.txt") + _ = localMedium.Write("file.txt", "hello") + content, _ := localMedium.Read("file.txt") assert.Equal(t, "hello", content) } ``` @@ -122,11 +122,11 @@ For SQLite-backed tests, use `:memory:`: ```go func TestWithSQLite(t *testing.T) { - m, err := sqlite.New(sqlite.Options{Path: ":memory:"}) + sqliteMedium, err := sqlite.New(sqlite.Options{Path: ":memory:"}) require.NoError(t, err) - defer m.Close() + defer sqliteMedium.Close() - _ = m.Write("file.txt", "hello") + _ = sqliteMedium.Write("file.txt", "hello") } ``` diff --git a/local/medium_test.go b/local/medium_test.go index aeb589b..6c4e42c 100644 --- a/local/medium_test.go +++ b/local/medium_test.go @@ -13,152 +13,152 @@ import ( func TestLocal_New_ResolvesRoot_Good(t *testing.T) { root := t.TempDir() - m, err := New(root) + localMedium, err := New(root) assert.NoError(t, err) resolved, err := resolveSymlinksPath(root) require.NoError(t, err) - assert.Equal(t, resolved, m.filesystemRoot) + assert.Equal(t, resolved, localMedium.filesystemRoot) } func TestLocal_Path_Sandboxed_Good(t *testing.T) { - m := &Medium{filesystemRoot: "/home/user"} + localMedium := &Medium{filesystemRoot: "/home/user"} - assert.Equal(t, "/home/user/file.txt", m.sandboxedPath("file.txt")) - assert.Equal(t, "/home/user/dir/file.txt", m.sandboxedPath("dir/file.txt")) + assert.Equal(t, "/home/user/file.txt", localMedium.sandboxedPath("file.txt")) + assert.Equal(t, "/home/user/dir/file.txt", localMedium.sandboxedPath("dir/file.txt")) - assert.Equal(t, "/home/user", m.sandboxedPath("")) + assert.Equal(t, "/home/user", localMedium.sandboxedPath("")) - assert.Equal(t, "/home/user/file.txt", m.sandboxedPath("../file.txt")) - assert.Equal(t, "/home/user/file.txt", m.sandboxedPath("dir/../file.txt")) + assert.Equal(t, "/home/user/file.txt", localMedium.sandboxedPath("../file.txt")) + assert.Equal(t, "/home/user/file.txt", localMedium.sandboxedPath("dir/../file.txt")) - assert.Equal(t, "/home/user/etc/passwd", m.sandboxedPath("/etc/passwd")) + assert.Equal(t, "/home/user/etc/passwd", localMedium.sandboxedPath("/etc/passwd")) } func TestLocal_Path_RootFilesystem_Good(t *testing.T) { - m := &Medium{filesystemRoot: "/"} + localMedium := &Medium{filesystemRoot: "/"} - assert.Equal(t, "/etc/passwd", m.sandboxedPath("/etc/passwd")) - assert.Equal(t, "/home/user/file.txt", m.sandboxedPath("/home/user/file.txt")) + assert.Equal(t, "/etc/passwd", localMedium.sandboxedPath("/etc/passwd")) + assert.Equal(t, "/home/user/file.txt", localMedium.sandboxedPath("/home/user/file.txt")) cwd := currentWorkingDir() - assert.Equal(t, core.Path(cwd, "file.txt"), m.sandboxedPath("file.txt")) + assert.Equal(t, core.Path(cwd, "file.txt"), localMedium.sandboxedPath("file.txt")) } func TestLocal_ReadWrite_Basic_Good(t *testing.T) { root := t.TempDir() - m, _ := New(root) + localMedium, _ := New(root) - err := m.Write("test.txt", "hello") + err := localMedium.Write("test.txt", "hello") assert.NoError(t, err) - content, err := m.Read("test.txt") + content, err := localMedium.Read("test.txt") assert.NoError(t, err) assert.Equal(t, "hello", content) - err = m.Write("a/b/c.txt", "nested") + err = localMedium.Write("a/b/c.txt", "nested") assert.NoError(t, err) - content, err = m.Read("a/b/c.txt") + content, err = localMedium.Read("a/b/c.txt") assert.NoError(t, err) assert.Equal(t, "nested", content) - _, err = m.Read("nope.txt") + _, err = localMedium.Read("nope.txt") assert.Error(t, err) } func TestLocal_EnsureDir_Basic_Good(t *testing.T) { root := t.TempDir() - m, _ := New(root) + localMedium, _ := New(root) - err := m.EnsureDir("one/two/three") + err := localMedium.EnsureDir("one/two/three") assert.NoError(t, err) - info, err := m.Stat("one/two/three") + info, err := localMedium.Stat("one/two/three") assert.NoError(t, err) assert.True(t, info.IsDir()) } func TestLocal_IsDir_Basic_Good(t *testing.T) { root := t.TempDir() - m, _ := New(root) + localMedium, _ := New(root) - _ = m.EnsureDir("mydir") - _ = m.Write("myfile", "x") + _ = localMedium.EnsureDir("mydir") + _ = localMedium.Write("myfile", "x") - assert.True(t, m.IsDir("mydir")) - assert.False(t, m.IsDir("myfile")) - assert.False(t, m.IsDir("nope")) - assert.False(t, m.IsDir("")) + assert.True(t, localMedium.IsDir("mydir")) + assert.False(t, localMedium.IsDir("myfile")) + assert.False(t, localMedium.IsDir("nope")) + assert.False(t, localMedium.IsDir("")) } func TestLocal_IsFile_Basic_Good(t *testing.T) { root := t.TempDir() - m, _ := New(root) + localMedium, _ := New(root) - _ = m.EnsureDir("mydir") - _ = m.Write("myfile", "x") + _ = localMedium.EnsureDir("mydir") + _ = localMedium.Write("myfile", "x") - assert.True(t, m.IsFile("myfile")) - assert.False(t, m.IsFile("mydir")) - assert.False(t, m.IsFile("nope")) - assert.False(t, m.IsFile("")) + assert.True(t, localMedium.IsFile("myfile")) + assert.False(t, localMedium.IsFile("mydir")) + assert.False(t, localMedium.IsFile("nope")) + assert.False(t, localMedium.IsFile("")) } func TestLocal_Exists_Basic_Good(t *testing.T) { root := t.TempDir() - m, _ := New(root) + localMedium, _ := New(root) - _ = m.Write("exists", "x") + _ = localMedium.Write("exists", "x") - assert.True(t, m.Exists("exists")) - assert.False(t, m.Exists("nope")) + assert.True(t, localMedium.Exists("exists")) + assert.False(t, localMedium.Exists("nope")) } func TestLocal_List_Basic_Good(t *testing.T) { root := t.TempDir() - m, _ := New(root) + localMedium, _ := New(root) - _ = m.Write("a.txt", "a") - _ = m.Write("b.txt", "b") - _ = m.EnsureDir("subdir") + _ = localMedium.Write("a.txt", "a") + _ = localMedium.Write("b.txt", "b") + _ = localMedium.EnsureDir("subdir") - entries, err := m.List("") + entries, err := localMedium.List("") assert.NoError(t, err) assert.Len(t, entries, 3) } func TestLocal_Stat_Basic_Good(t *testing.T) { root := t.TempDir() - m, _ := New(root) + localMedium, _ := New(root) - _ = m.Write("file", "content") + _ = localMedium.Write("file", "content") - info, err := m.Stat("file") + info, err := localMedium.Stat("file") assert.NoError(t, err) assert.Equal(t, int64(7), info.Size()) } func TestLocal_Delete_Basic_Good(t *testing.T) { root := t.TempDir() - m, _ := New(root) + localMedium, _ := New(root) - _ = m.Write("todelete", "x") - assert.True(t, m.Exists("todelete")) + _ = localMedium.Write("todelete", "x") + assert.True(t, localMedium.Exists("todelete")) - err := m.Delete("todelete") + err := localMedium.Delete("todelete") assert.NoError(t, err) - assert.False(t, m.Exists("todelete")) + assert.False(t, localMedium.Exists("todelete")) } func TestLocal_DeleteAll_Basic_Good(t *testing.T) { root := t.TempDir() - m, _ := New(root) + localMedium, _ := New(root) - _ = m.Write("dir/sub/file", "x") + _ = localMedium.Write("dir/sub/file", "x") - err := m.DeleteAll("dir") + err := localMedium.DeleteAll("dir") assert.NoError(t, err) - assert.False(t, m.Exists("dir")) + assert.False(t, localMedium.Exists("dir")) } func TestLocal_Delete_ProtectedHomeViaSymlinkEnv_Bad(t *testing.T) { @@ -168,10 +168,10 @@ func TestLocal_Delete_ProtectedHomeViaSymlinkEnv_Bad(t *testing.T) { require.NoError(t, syscall.Symlink(realHome, homeLink)) t.Setenv("HOME", homeLink) - m, err := New("/") + localMedium, err := New("/") require.NoError(t, err) - err = m.Delete(realHome) + err = localMedium.Delete(realHome) require.Error(t, err) assert.DirExists(t, realHome) } @@ -180,92 +180,92 @@ func TestLocal_DeleteAll_ProtectedHomeViaEnv_Bad(t *testing.T) { tempHome := t.TempDir() t.Setenv("HOME", tempHome) - m, err := New("/") + localMedium, err := New("/") require.NoError(t, err) - err = m.DeleteAll(tempHome) + err = localMedium.DeleteAll(tempHome) require.Error(t, err) assert.DirExists(t, tempHome) } func TestLocal_Rename_Basic_Good(t *testing.T) { root := t.TempDir() - m, _ := New(root) + localMedium, _ := New(root) - _ = m.Write("old", "x") + _ = localMedium.Write("old", "x") - err := m.Rename("old", "new") + err := localMedium.Rename("old", "new") assert.NoError(t, err) - assert.False(t, m.Exists("old")) - assert.True(t, m.Exists("new")) + assert.False(t, localMedium.Exists("old")) + assert.True(t, localMedium.Exists("new")) } func TestLocal_Delete_Good(t *testing.T) { testRoot := t.TempDir() - medium, err := New(testRoot) + localMedium, err := New(testRoot) assert.NoError(t, err) - err = medium.Write("file.txt", "content") + err = localMedium.Write("file.txt", "content") assert.NoError(t, err) - assert.True(t, medium.IsFile("file.txt")) + assert.True(t, localMedium.IsFile("file.txt")) - err = medium.Delete("file.txt") + err = localMedium.Delete("file.txt") assert.NoError(t, err) - assert.False(t, medium.IsFile("file.txt")) + assert.False(t, localMedium.IsFile("file.txt")) - err = medium.EnsureDir("emptydir") + err = localMedium.EnsureDir("emptydir") assert.NoError(t, err) - err = medium.Delete("emptydir") + err = localMedium.Delete("emptydir") assert.NoError(t, err) - assert.False(t, medium.IsDir("emptydir")) + assert.False(t, localMedium.IsDir("emptydir")) } func TestLocal_Delete_NotEmpty_Bad(t *testing.T) { testRoot := t.TempDir() - medium, err := New(testRoot) + localMedium, err := New(testRoot) assert.NoError(t, err) - err = medium.Write("mydir/file.txt", "content") + err = localMedium.Write("mydir/file.txt", "content") assert.NoError(t, err) - err = medium.Delete("mydir") + err = localMedium.Delete("mydir") assert.Error(t, err) } func TestLocal_DeleteAll_Good(t *testing.T) { testRoot := t.TempDir() - medium, err := New(testRoot) + localMedium, err := New(testRoot) assert.NoError(t, err) - err = medium.Write("mydir/file1.txt", "content1") + err = localMedium.Write("mydir/file1.txt", "content1") assert.NoError(t, err) - err = medium.Write("mydir/subdir/file2.txt", "content2") + err = localMedium.Write("mydir/subdir/file2.txt", "content2") assert.NoError(t, err) - err = medium.DeleteAll("mydir") + err = localMedium.DeleteAll("mydir") assert.NoError(t, err) - assert.False(t, medium.Exists("mydir")) - assert.False(t, medium.Exists("mydir/file1.txt")) - assert.False(t, medium.Exists("mydir/subdir/file2.txt")) + assert.False(t, localMedium.Exists("mydir")) + assert.False(t, localMedium.Exists("mydir/file1.txt")) + assert.False(t, localMedium.Exists("mydir/subdir/file2.txt")) } func TestLocal_Rename_Good(t *testing.T) { testRoot := t.TempDir() - medium, err := New(testRoot) + localMedium, err := New(testRoot) assert.NoError(t, err) - err = medium.Write("old.txt", "content") + err = localMedium.Write("old.txt", "content") assert.NoError(t, err) - err = medium.Rename("old.txt", "new.txt") + err = localMedium.Rename("old.txt", "new.txt") assert.NoError(t, err) - assert.False(t, medium.IsFile("old.txt")) - assert.True(t, medium.IsFile("new.txt")) + assert.False(t, localMedium.IsFile("old.txt")) + assert.True(t, localMedium.IsFile("new.txt")) - content, err := medium.Read("new.txt") + content, err := localMedium.Read("new.txt") assert.NoError(t, err) assert.Equal(t, "content", content) } @@ -273,32 +273,32 @@ func TestLocal_Rename_Good(t *testing.T) { func TestLocal_Rename_TraversalSanitised_Good(t *testing.T) { testRoot := t.TempDir() - medium, err := New(testRoot) + localMedium, err := New(testRoot) assert.NoError(t, err) - err = medium.Write("file.txt", "content") + err = localMedium.Write("file.txt", "content") assert.NoError(t, err) - err = medium.Rename("file.txt", "../escaped.txt") + err = localMedium.Rename("file.txt", "../escaped.txt") assert.NoError(t, err) - assert.False(t, medium.Exists("file.txt")) - assert.True(t, medium.Exists("escaped.txt")) + assert.False(t, localMedium.Exists("file.txt")) + assert.True(t, localMedium.Exists("escaped.txt")) } func TestLocal_List_Good(t *testing.T) { testRoot := t.TempDir() - medium, err := New(testRoot) + localMedium, err := New(testRoot) assert.NoError(t, err) - err = medium.Write("file1.txt", "content1") + err = localMedium.Write("file1.txt", "content1") assert.NoError(t, err) - err = medium.Write("file2.txt", "content2") + err = localMedium.Write("file2.txt", "content2") assert.NoError(t, err) - err = medium.EnsureDir("subdir") + err = localMedium.EnsureDir("subdir") assert.NoError(t, err) - entries, err := medium.List(".") + entries, err := localMedium.List(".") assert.NoError(t, err) assert.Len(t, entries, 3) @@ -314,20 +314,20 @@ func TestLocal_List_Good(t *testing.T) { func TestLocal_Stat_Good(t *testing.T) { testRoot := t.TempDir() - medium, err := New(testRoot) + localMedium, err := New(testRoot) assert.NoError(t, err) - err = medium.Write("file.txt", "hello world") + err = localMedium.Write("file.txt", "hello world") assert.NoError(t, err) - info, err := medium.Stat("file.txt") + info, err := localMedium.Stat("file.txt") assert.NoError(t, err) assert.Equal(t, "file.txt", info.Name()) assert.Equal(t, int64(11), info.Size()) assert.False(t, info.IsDir()) - err = medium.EnsureDir("mydir") + err = localMedium.EnsureDir("mydir") assert.NoError(t, err) - info, err = medium.Stat("mydir") + info, err = localMedium.Stat("mydir") assert.NoError(t, err) assert.Equal(t, "mydir", info.Name()) assert.True(t, info.IsDir()) @@ -336,46 +336,46 @@ func TestLocal_Stat_Good(t *testing.T) { func TestLocal_Exists_Good(t *testing.T) { testRoot := t.TempDir() - medium, err := New(testRoot) + localMedium, err := New(testRoot) assert.NoError(t, err) - assert.False(t, medium.Exists("nonexistent")) + assert.False(t, localMedium.Exists("nonexistent")) - err = medium.Write("file.txt", "content") + err = localMedium.Write("file.txt", "content") assert.NoError(t, err) - assert.True(t, medium.Exists("file.txt")) + assert.True(t, localMedium.Exists("file.txt")) - err = medium.EnsureDir("mydir") + err = localMedium.EnsureDir("mydir") assert.NoError(t, err) - assert.True(t, medium.Exists("mydir")) + assert.True(t, localMedium.Exists("mydir")) } func TestLocal_IsDir_Good(t *testing.T) { testRoot := t.TempDir() - medium, err := New(testRoot) + localMedium, err := New(testRoot) assert.NoError(t, err) - err = medium.Write("file.txt", "content") + err = localMedium.Write("file.txt", "content") assert.NoError(t, err) - assert.False(t, medium.IsDir("file.txt")) + assert.False(t, localMedium.IsDir("file.txt")) - err = medium.EnsureDir("mydir") + err = localMedium.EnsureDir("mydir") assert.NoError(t, err) - assert.True(t, medium.IsDir("mydir")) + assert.True(t, localMedium.IsDir("mydir")) - assert.False(t, medium.IsDir("nonexistent")) + assert.False(t, localMedium.IsDir("nonexistent")) } func TestLocal_ReadStream_Basic_Good(t *testing.T) { root := t.TempDir() - m, _ := New(root) + localMedium, _ := New(root) content := "streaming content" - err := m.Write("stream.txt", content) + err := localMedium.Write("stream.txt", content) assert.NoError(t, err) - reader, err := m.ReadStream("stream.txt") + reader, err := localMedium.ReadStream("stream.txt") assert.NoError(t, err) defer reader.Close() @@ -387,9 +387,9 @@ func TestLocal_ReadStream_Basic_Good(t *testing.T) { func TestLocal_WriteStream_Basic_Good(t *testing.T) { root := t.TempDir() - m, _ := New(root) + localMedium, _ := New(root) - writer, err := m.WriteStream("output.txt") + writer, err := localMedium.WriteStream("output.txt") assert.NoError(t, err) _, err = io.Copy(writer, core.NewReader("piped data")) @@ -397,26 +397,26 @@ func TestLocal_WriteStream_Basic_Good(t *testing.T) { err = writer.Close() assert.NoError(t, err) - content, err := m.Read("output.txt") + content, err := localMedium.Read("output.txt") assert.NoError(t, err) assert.Equal(t, "piped data", content) } func TestLocal_Path_TraversalSandbox_Good(t *testing.T) { - m := &Medium{filesystemRoot: "/sandbox"} + localMedium := &Medium{filesystemRoot: "/sandbox"} - assert.Equal(t, "/sandbox/file.txt", m.sandboxedPath("../../../file.txt")) - assert.Equal(t, "/sandbox/target", m.sandboxedPath("dir/../../target")) + assert.Equal(t, "/sandbox/file.txt", localMedium.sandboxedPath("../../../file.txt")) + assert.Equal(t, "/sandbox/target", localMedium.sandboxedPath("dir/../../target")) - assert.Equal(t, "/sandbox/.ssh/id_rsa", m.sandboxedPath(".ssh/id_rsa")) - assert.Equal(t, "/sandbox/id_rsa", m.sandboxedPath(".ssh/../id_rsa")) + assert.Equal(t, "/sandbox/.ssh/id_rsa", localMedium.sandboxedPath(".ssh/id_rsa")) + assert.Equal(t, "/sandbox/id_rsa", localMedium.sandboxedPath(".ssh/../id_rsa")) - assert.Equal(t, "/sandbox/file\x00.txt", m.sandboxedPath("file\x00.txt")) + assert.Equal(t, "/sandbox/file\x00.txt", localMedium.sandboxedPath("file\x00.txt")) } func TestLocal_ValidatePath_SymlinkEscape_Bad(t *testing.T) { root := t.TempDir() - m, err := New(root) + localMedium, err := New(root) assert.NoError(t, err) outside := t.TempDir() @@ -426,48 +426,48 @@ func TestLocal_ValidatePath_SymlinkEscape_Bad(t *testing.T) { err = outsideMedium.Write(outsideFile, "secret") assert.NoError(t, err) - _, err = m.validatePath("../outside.txt") + _, err = localMedium.validatePath("../outside.txt") assert.NoError(t, err) linkPath := core.Path(root, "evil_link") err = syscall.Symlink(outside, linkPath) assert.NoError(t, err) - _, err = m.validatePath("evil_link/secret.txt") + _, err = localMedium.validatePath("evil_link/secret.txt") assert.Error(t, err) assert.ErrorIs(t, err, fs.ErrPermission) - err = m.EnsureDir("inner") + err = localMedium.EnsureDir("inner") assert.NoError(t, err) innerDir := core.Path(root, "inner") nestedLink := core.Path(innerDir, "nested_evil") err = syscall.Symlink(outside, nestedLink) assert.NoError(t, err) - _, err = m.validatePath("inner/nested_evil/secret.txt") + _, err = localMedium.validatePath("inner/nested_evil/secret.txt") assert.Error(t, err) assert.ErrorIs(t, err, fs.ErrPermission) } func TestLocal_EmptyPaths_Good(t *testing.T) { root := t.TempDir() - m, err := New(root) + localMedium, err := New(root) assert.NoError(t, err) - _, err = m.Read("") + _, err = localMedium.Read("") assert.Error(t, err) - err = m.Write("", "content") + err = localMedium.Write("", "content") assert.Error(t, err) - err = m.EnsureDir("") + err = localMedium.EnsureDir("") assert.NoError(t, err) - assert.False(t, m.IsDir("")) + assert.False(t, localMedium.IsDir("")) - assert.True(t, m.Exists("")) + assert.True(t, localMedium.Exists("")) - entries, err := m.List("") + entries, err := localMedium.List("") assert.NoError(t, err) assert.NotNil(t, entries) } diff --git a/medium_test.go b/medium_test.go index f38645c..9417d49 100644 --- a/medium_test.go +++ b/medium_test.go @@ -11,12 +11,12 @@ import ( ) func TestMemoryMedium_NewMemoryMedium_Good(t *testing.T) { - medium := NewMemoryMedium() - assert.NotNil(t, medium) - assert.NotNil(t, medium.files) - assert.NotNil(t, medium.dirs) - assert.Empty(t, medium.files) - assert.Empty(t, medium.dirs) + memoryMedium := NewMemoryMedium() + assert.NotNil(t, memoryMedium) + assert.NotNil(t, memoryMedium.files) + assert.NotNil(t, memoryMedium.dirs) + assert.Empty(t, memoryMedium.files) + assert.Empty(t, memoryMedium.dirs) } func TestMemoryMedium_NewFileInfo_Good(t *testing.T) { @@ -45,124 +45,124 @@ func TestMemoryMedium_NewDirEntry_Good(t *testing.T) { } func TestMemoryMedium_Read_Good(t *testing.T) { - m := NewMemoryMedium() - m.files["test.txt"] = "hello world" - content, err := m.Read("test.txt") + memoryMedium := NewMemoryMedium() + memoryMedium.files["test.txt"] = "hello world" + content, err := memoryMedium.Read("test.txt") assert.NoError(t, err) assert.Equal(t, "hello world", content) } func TestMemoryMedium_Read_Bad(t *testing.T) { - m := NewMemoryMedium() - _, err := m.Read("nonexistent.txt") + memoryMedium := NewMemoryMedium() + _, err := memoryMedium.Read("nonexistent.txt") assert.Error(t, err) } func TestMemoryMedium_Write_Good(t *testing.T) { - m := NewMemoryMedium() - err := m.Write("test.txt", "content") + memoryMedium := NewMemoryMedium() + err := memoryMedium.Write("test.txt", "content") assert.NoError(t, err) - assert.Equal(t, "content", m.files["test.txt"]) + assert.Equal(t, "content", memoryMedium.files["test.txt"]) - err = m.Write("test.txt", "new content") + err = memoryMedium.Write("test.txt", "new content") assert.NoError(t, err) - assert.Equal(t, "new content", m.files["test.txt"]) + assert.Equal(t, "new content", memoryMedium.files["test.txt"]) } func TestMemoryMedium_WriteMode_Good(t *testing.T) { - m := NewMemoryMedium() + memoryMedium := NewMemoryMedium() - err := m.WriteMode("secure.txt", "secret", 0600) + err := memoryMedium.WriteMode("secure.txt", "secret", 0600) require.NoError(t, err) - content, err := m.Read("secure.txt") + content, err := memoryMedium.Read("secure.txt") require.NoError(t, err) assert.Equal(t, "secret", content) } func TestMemoryMedium_EnsureDir_Good(t *testing.T) { - m := NewMemoryMedium() - err := m.EnsureDir("/path/to/dir") + memoryMedium := NewMemoryMedium() + err := memoryMedium.EnsureDir("/path/to/dir") assert.NoError(t, err) - assert.True(t, m.dirs["/path/to/dir"]) + assert.True(t, memoryMedium.dirs["/path/to/dir"]) } func TestMemoryMedium_IsFile_Good(t *testing.T) { - m := NewMemoryMedium() - m.files["exists.txt"] = "content" + memoryMedium := NewMemoryMedium() + memoryMedium.files["exists.txt"] = "content" - assert.True(t, m.IsFile("exists.txt")) - assert.False(t, m.IsFile("nonexistent.txt")) + assert.True(t, memoryMedium.IsFile("exists.txt")) + assert.False(t, memoryMedium.IsFile("nonexistent.txt")) } func TestMemoryMedium_Delete_Good(t *testing.T) { - m := NewMemoryMedium() - m.files["test.txt"] = "content" + memoryMedium := NewMemoryMedium() + memoryMedium.files["test.txt"] = "content" - err := m.Delete("test.txt") + err := memoryMedium.Delete("test.txt") assert.NoError(t, err) - assert.False(t, m.IsFile("test.txt")) + assert.False(t, memoryMedium.IsFile("test.txt")) } func TestMemoryMedium_Delete_NotFound_Bad(t *testing.T) { - m := NewMemoryMedium() - err := m.Delete("nonexistent.txt") + memoryMedium := NewMemoryMedium() + err := memoryMedium.Delete("nonexistent.txt") assert.Error(t, err) } func TestMemoryMedium_Delete_DirNotEmpty_Bad(t *testing.T) { - m := NewMemoryMedium() - m.dirs["mydir"] = true - m.files["mydir/file.txt"] = "content" + memoryMedium := NewMemoryMedium() + memoryMedium.dirs["mydir"] = true + memoryMedium.files["mydir/file.txt"] = "content" - err := m.Delete("mydir") + err := memoryMedium.Delete("mydir") assert.Error(t, err) } func TestMemoryMedium_DeleteAll_Good(t *testing.T) { - m := NewMemoryMedium() - m.dirs["mydir"] = true - m.dirs["mydir/subdir"] = true - m.files["mydir/file.txt"] = "content" - m.files["mydir/subdir/nested.txt"] = "nested" + memoryMedium := NewMemoryMedium() + memoryMedium.dirs["mydir"] = true + memoryMedium.dirs["mydir/subdir"] = true + memoryMedium.files["mydir/file.txt"] = "content" + memoryMedium.files["mydir/subdir/nested.txt"] = "nested" - err := m.DeleteAll("mydir") + err := memoryMedium.DeleteAll("mydir") assert.NoError(t, err) - assert.Empty(t, m.dirs) - assert.Empty(t, m.files) + assert.Empty(t, memoryMedium.dirs) + assert.Empty(t, memoryMedium.files) } func TestMemoryMedium_Rename_Good(t *testing.T) { - m := NewMemoryMedium() - m.files["old.txt"] = "content" + memoryMedium := NewMemoryMedium() + memoryMedium.files["old.txt"] = "content" - err := m.Rename("old.txt", "new.txt") + err := memoryMedium.Rename("old.txt", "new.txt") assert.NoError(t, err) - assert.False(t, m.IsFile("old.txt")) - assert.True(t, m.IsFile("new.txt")) - assert.Equal(t, "content", m.files["new.txt"]) + assert.False(t, memoryMedium.IsFile("old.txt")) + assert.True(t, memoryMedium.IsFile("new.txt")) + assert.Equal(t, "content", memoryMedium.files["new.txt"]) } func TestMemoryMedium_Rename_Dir_Good(t *testing.T) { - m := NewMemoryMedium() - m.dirs["olddir"] = true - m.files["olddir/file.txt"] = "content" + memoryMedium := NewMemoryMedium() + memoryMedium.dirs["olddir"] = true + memoryMedium.files["olddir/file.txt"] = "content" - err := m.Rename("olddir", "newdir") + err := memoryMedium.Rename("olddir", "newdir") assert.NoError(t, err) - assert.False(t, m.dirs["olddir"]) - assert.True(t, m.dirs["newdir"]) - assert.Equal(t, "content", m.files["newdir/file.txt"]) + assert.False(t, memoryMedium.dirs["olddir"]) + assert.True(t, memoryMedium.dirs["newdir"]) + assert.Equal(t, "content", memoryMedium.files["newdir/file.txt"]) } func TestMemoryMedium_List_Good(t *testing.T) { - m := NewMemoryMedium() - m.dirs["mydir"] = true - m.files["mydir/file1.txt"] = "content1" - m.files["mydir/file2.txt"] = "content2" - m.dirs["mydir/subdir"] = true + memoryMedium := NewMemoryMedium() + memoryMedium.dirs["mydir"] = true + memoryMedium.files["mydir/file1.txt"] = "content1" + memoryMedium.files["mydir/file2.txt"] = "content2" + memoryMedium.dirs["mydir/subdir"] = true - entries, err := m.List("mydir") + entries, err := memoryMedium.List("mydir") assert.NoError(t, err) assert.Len(t, entries, 3) @@ -176,10 +176,10 @@ func TestMemoryMedium_List_Good(t *testing.T) { } func TestMemoryMedium_Stat_Good(t *testing.T) { - m := NewMemoryMedium() - m.files["test.txt"] = "hello world" + memoryMedium := NewMemoryMedium() + memoryMedium.files["test.txt"] = "hello world" - info, err := m.Stat("test.txt") + info, err := memoryMedium.Stat("test.txt") assert.NoError(t, err) assert.Equal(t, "test.txt", info.Name()) assert.Equal(t, int64(11), info.Size()) @@ -187,41 +187,41 @@ func TestMemoryMedium_Stat_Good(t *testing.T) { } func TestMemoryMedium_Stat_Dir_Good(t *testing.T) { - m := NewMemoryMedium() - m.dirs["mydir"] = true + memoryMedium := NewMemoryMedium() + memoryMedium.dirs["mydir"] = true - info, err := m.Stat("mydir") + info, err := memoryMedium.Stat("mydir") assert.NoError(t, err) assert.Equal(t, "mydir", info.Name()) assert.True(t, info.IsDir()) } func TestMemoryMedium_Exists_Good(t *testing.T) { - m := NewMemoryMedium() - m.files["file.txt"] = "content" - m.dirs["mydir"] = true + memoryMedium := NewMemoryMedium() + memoryMedium.files["file.txt"] = "content" + memoryMedium.dirs["mydir"] = true - assert.True(t, m.Exists("file.txt")) - assert.True(t, m.Exists("mydir")) - assert.False(t, m.Exists("nonexistent")) + assert.True(t, memoryMedium.Exists("file.txt")) + assert.True(t, memoryMedium.Exists("mydir")) + assert.False(t, memoryMedium.Exists("nonexistent")) } func TestMemoryMedium_IsDir_Good(t *testing.T) { - m := NewMemoryMedium() - m.files["file.txt"] = "content" - m.dirs["mydir"] = true + memoryMedium := NewMemoryMedium() + memoryMedium.files["file.txt"] = "content" + memoryMedium.dirs["mydir"] = true - assert.False(t, m.IsDir("file.txt")) - assert.True(t, m.IsDir("mydir")) - assert.False(t, m.IsDir("nonexistent")) + assert.False(t, memoryMedium.IsDir("file.txt")) + assert.True(t, memoryMedium.IsDir("mydir")) + assert.False(t, memoryMedium.IsDir("nonexistent")) } func TestMemoryMedium_StreamAndFSHelpers_Good(t *testing.T) { - m := NewMemoryMedium() - require.NoError(t, m.EnsureDir("dir")) - require.NoError(t, m.Write("dir/file.txt", "alpha")) + memoryMedium := NewMemoryMedium() + require.NoError(t, memoryMedium.EnsureDir("dir")) + require.NoError(t, memoryMedium.Write("dir/file.txt", "alpha")) - file, err := m.Open("dir/file.txt") + file, err := memoryMedium.Open("dir/file.txt") require.NoError(t, err) info, err := file.Stat() @@ -238,7 +238,7 @@ func TestMemoryMedium_StreamAndFSHelpers_Good(t *testing.T) { assert.Equal(t, "alpha", string(data)) require.NoError(t, file.Close()) - entries, err := m.List("dir") + entries, err := memoryMedium.List("dir") require.NoError(t, err) require.Len(t, entries, 1) assert.Equal(t, "file.txt", entries[0].Name()) @@ -250,88 +250,88 @@ func TestMemoryMedium_StreamAndFSHelpers_Good(t *testing.T) { assert.Equal(t, "file.txt", entryInfo.Name()) assert.Equal(t, int64(5), entryInfo.Size()) - writer, err := m.Create("created.txt") + writer, err := memoryMedium.Create("created.txt") require.NoError(t, err) _, err = writer.Write([]byte("created")) require.NoError(t, err) require.NoError(t, writer.Close()) - appendWriter, err := m.Append("created.txt") + appendWriter, err := memoryMedium.Append("created.txt") require.NoError(t, err) _, err = appendWriter.Write([]byte(" later")) require.NoError(t, err) require.NoError(t, appendWriter.Close()) - reader, err := m.ReadStream("created.txt") + reader, err := memoryMedium.ReadStream("created.txt") require.NoError(t, err) streamed, err := goio.ReadAll(reader) require.NoError(t, err) assert.Equal(t, "created later", string(streamed)) require.NoError(t, reader.Close()) - writeStream, err := m.WriteStream("streamed.txt") + writeStream, err := memoryMedium.WriteStream("streamed.txt") require.NoError(t, err) _, err = writeStream.Write([]byte("stream output")) require.NoError(t, err) require.NoError(t, writeStream.Close()) - assert.Equal(t, "stream output", m.files["streamed.txt"]) + assert.Equal(t, "stream output", memoryMedium.files["streamed.txt"]) } func TestIO_Read_Good(t *testing.T) { - m := NewMemoryMedium() - m.files["test.txt"] = "hello" - content, err := Read(m, "test.txt") + memoryMedium := NewMemoryMedium() + memoryMedium.files["test.txt"] = "hello" + content, err := Read(memoryMedium, "test.txt") assert.NoError(t, err) assert.Equal(t, "hello", content) } func TestIO_Write_Good(t *testing.T) { - m := NewMemoryMedium() - err := Write(m, "test.txt", "hello") + memoryMedium := NewMemoryMedium() + err := Write(memoryMedium, "test.txt", "hello") assert.NoError(t, err) - assert.Equal(t, "hello", m.files["test.txt"]) + assert.Equal(t, "hello", memoryMedium.files["test.txt"]) } func TestIO_EnsureDir_Good(t *testing.T) { - m := NewMemoryMedium() - err := EnsureDir(m, "/my/dir") + memoryMedium := NewMemoryMedium() + err := EnsureDir(memoryMedium, "/my/dir") assert.NoError(t, err) - assert.True(t, m.dirs["/my/dir"]) + assert.True(t, memoryMedium.dirs["/my/dir"]) } func TestIO_IsFile_Good(t *testing.T) { - m := NewMemoryMedium() - m.files["exists.txt"] = "content" + memoryMedium := NewMemoryMedium() + memoryMedium.files["exists.txt"] = "content" - assert.True(t, IsFile(m, "exists.txt")) - assert.False(t, IsFile(m, "nonexistent.txt")) + assert.True(t, IsFile(memoryMedium, "exists.txt")) + assert.False(t, IsFile(memoryMedium, "nonexistent.txt")) } func TestIO_NewSandboxed_Good(t *testing.T) { root := t.TempDir() - m, err := NewSandboxed(root) + memoryMedium, err := NewSandboxed(root) require.NoError(t, err) - require.NoError(t, m.Write("config/app.yaml", "port: 8080")) + require.NoError(t, memoryMedium.Write("config/app.yaml", "port: 8080")) - content, err := m.Read("config/app.yaml") + content, err := memoryMedium.Read("config/app.yaml") require.NoError(t, err) assert.Equal(t, "port: 8080", content) - assert.True(t, m.IsDir("config")) + assert.True(t, memoryMedium.IsDir("config")) } func TestIO_ReadWriteStream_Good(t *testing.T) { - m := NewMemoryMedium() + memoryMedium := NewMemoryMedium() - writer, err := WriteStream(m, "logs/run.txt") + writer, err := WriteStream(memoryMedium, "logs/run.txt") require.NoError(t, err) _, err = writer.Write([]byte("started")) require.NoError(t, err) require.NoError(t, writer.Close()) - reader, err := ReadStream(m, "logs/run.txt") + reader, err := ReadStream(memoryMedium, "logs/run.txt") require.NoError(t, err) data, err := goio.ReadAll(reader) require.NoError(t, err) @@ -363,6 +363,6 @@ func TestIO_Copy_Bad(t *testing.T) { func TestIO_LocalGlobal_Good(t *testing.T) { assert.NotNil(t, Local, "io.Local should be initialised") - var m = Local - assert.NotNil(t, m) + var memoryMedium = Local + assert.NotNil(t, memoryMedium) } diff --git a/sqlite/sqlite_test.go b/sqlite/sqlite_test.go index 5a108b5..9c6fcec 100644 --- a/sqlite/sqlite_test.go +++ b/sqlite/sqlite_test.go @@ -10,26 +10,26 @@ import ( "github.com/stretchr/testify/require" ) -func newTestMedium(t *testing.T) *Medium { +func newTestSqliteMedium(t *testing.T) *Medium { t.Helper() - m, err := New(Options{Path: ":memory:"}) + sqliteMedium, err := New(Options{Path: ":memory:"}) require.NoError(t, err) - t.Cleanup(func() { m.Close() }) - return m + t.Cleanup(func() { sqliteMedium.Close() }) + return sqliteMedium } func TestSqlite_New_Good(t *testing.T) { - m, err := New(Options{Path: ":memory:"}) + sqliteMedium, err := New(Options{Path: ":memory:"}) require.NoError(t, err) - defer m.Close() - assert.Equal(t, "files", m.table) + defer sqliteMedium.Close() + assert.Equal(t, "files", sqliteMedium.table) } func TestSqlite_New_Options_Good(t *testing.T) { - m, err := New(Options{Path: ":memory:", Table: "custom"}) + sqliteMedium, err := New(Options{Path: ":memory:", Table: "custom"}) require.NoError(t, err) - defer m.Close() - assert.Equal(t, "custom", m.table) + defer sqliteMedium.Close() + assert.Equal(t, "custom", sqliteMedium.table) } func TestSqlite_New_EmptyPath_Bad(t *testing.T) { @@ -39,246 +39,246 @@ func TestSqlite_New_EmptyPath_Bad(t *testing.T) { } func TestSqlite_ReadWrite_Good(t *testing.T) { - m := newTestMedium(t) + sqliteMedium := newTestSqliteMedium(t) - err := m.Write("hello.txt", "world") + err := sqliteMedium.Write("hello.txt", "world") require.NoError(t, err) - content, err := m.Read("hello.txt") + content, err := sqliteMedium.Read("hello.txt") require.NoError(t, err) assert.Equal(t, "world", content) } func TestSqlite_ReadWrite_Overwrite_Good(t *testing.T) { - m := newTestMedium(t) + sqliteMedium := newTestSqliteMedium(t) - require.NoError(t, m.Write("file.txt", "first")) - require.NoError(t, m.Write("file.txt", "second")) + require.NoError(t, sqliteMedium.Write("file.txt", "first")) + require.NoError(t, sqliteMedium.Write("file.txt", "second")) - content, err := m.Read("file.txt") + content, err := sqliteMedium.Read("file.txt") require.NoError(t, err) assert.Equal(t, "second", content) } func TestSqlite_ReadWrite_NestedPath_Good(t *testing.T) { - m := newTestMedium(t) + sqliteMedium := newTestSqliteMedium(t) - err := m.Write("a/b/c.txt", "nested") + err := sqliteMedium.Write("a/b/c.txt", "nested") require.NoError(t, err) - content, err := m.Read("a/b/c.txt") + content, err := sqliteMedium.Read("a/b/c.txt") require.NoError(t, err) assert.Equal(t, "nested", content) } func TestSqlite_Read_NotFound_Bad(t *testing.T) { - m := newTestMedium(t) + sqliteMedium := newTestSqliteMedium(t) - _, err := m.Read("nonexistent.txt") + _, err := sqliteMedium.Read("nonexistent.txt") assert.Error(t, err) } func TestSqlite_Read_EmptyPath_Bad(t *testing.T) { - m := newTestMedium(t) + sqliteMedium := newTestSqliteMedium(t) - _, err := m.Read("") + _, err := sqliteMedium.Read("") assert.Error(t, err) } func TestSqlite_Write_EmptyPath_Bad(t *testing.T) { - m := newTestMedium(t) + sqliteMedium := newTestSqliteMedium(t) - err := m.Write("", "content") + err := sqliteMedium.Write("", "content") assert.Error(t, err) } func TestSqlite_Read_IsDirectory_Bad(t *testing.T) { - m := newTestMedium(t) + sqliteMedium := newTestSqliteMedium(t) - require.NoError(t, m.EnsureDir("mydir")) - _, err := m.Read("mydir") + require.NoError(t, sqliteMedium.EnsureDir("mydir")) + _, err := sqliteMedium.Read("mydir") assert.Error(t, err) } func TestSqlite_EnsureDir_Good(t *testing.T) { - m := newTestMedium(t) + sqliteMedium := newTestSqliteMedium(t) - err := m.EnsureDir("mydir") + err := sqliteMedium.EnsureDir("mydir") require.NoError(t, err) - assert.True(t, m.IsDir("mydir")) + assert.True(t, sqliteMedium.IsDir("mydir")) } func TestSqlite_EnsureDir_EmptyPath_Good(t *testing.T) { - m := newTestMedium(t) - err := m.EnsureDir("") + sqliteMedium := newTestSqliteMedium(t) + err := sqliteMedium.EnsureDir("") assert.NoError(t, err) } func TestSqlite_EnsureDir_Idempotent_Good(t *testing.T) { - m := newTestMedium(t) + sqliteMedium := newTestSqliteMedium(t) - require.NoError(t, m.EnsureDir("mydir")) - require.NoError(t, m.EnsureDir("mydir")) - assert.True(t, m.IsDir("mydir")) + require.NoError(t, sqliteMedium.EnsureDir("mydir")) + require.NoError(t, sqliteMedium.EnsureDir("mydir")) + assert.True(t, sqliteMedium.IsDir("mydir")) } func TestSqlite_IsFile_Good(t *testing.T) { - m := newTestMedium(t) + sqliteMedium := newTestSqliteMedium(t) - require.NoError(t, m.Write("file.txt", "content")) - require.NoError(t, m.EnsureDir("mydir")) + require.NoError(t, sqliteMedium.Write("file.txt", "content")) + require.NoError(t, sqliteMedium.EnsureDir("mydir")) - assert.True(t, m.IsFile("file.txt")) - assert.False(t, m.IsFile("mydir")) - assert.False(t, m.IsFile("nonexistent")) - assert.False(t, m.IsFile("")) + assert.True(t, sqliteMedium.IsFile("file.txt")) + assert.False(t, sqliteMedium.IsFile("mydir")) + assert.False(t, sqliteMedium.IsFile("nonexistent")) + assert.False(t, sqliteMedium.IsFile("")) } func TestSqlite_Delete_Good(t *testing.T) { - m := newTestMedium(t) + sqliteMedium := newTestSqliteMedium(t) - require.NoError(t, m.Write("to-delete.txt", "content")) - assert.True(t, m.Exists("to-delete.txt")) + require.NoError(t, sqliteMedium.Write("to-delete.txt", "content")) + assert.True(t, sqliteMedium.Exists("to-delete.txt")) - err := m.Delete("to-delete.txt") + err := sqliteMedium.Delete("to-delete.txt") require.NoError(t, err) - assert.False(t, m.Exists("to-delete.txt")) + assert.False(t, sqliteMedium.Exists("to-delete.txt")) } func TestSqlite_Delete_EmptyDir_Good(t *testing.T) { - m := newTestMedium(t) + sqliteMedium := newTestSqliteMedium(t) - require.NoError(t, m.EnsureDir("emptydir")) - assert.True(t, m.IsDir("emptydir")) + require.NoError(t, sqliteMedium.EnsureDir("emptydir")) + assert.True(t, sqliteMedium.IsDir("emptydir")) - err := m.Delete("emptydir") + err := sqliteMedium.Delete("emptydir") require.NoError(t, err) - assert.False(t, m.IsDir("emptydir")) + assert.False(t, sqliteMedium.IsDir("emptydir")) } func TestSqlite_Delete_NotFound_Bad(t *testing.T) { - m := newTestMedium(t) + sqliteMedium := newTestSqliteMedium(t) - err := m.Delete("nonexistent") + err := sqliteMedium.Delete("nonexistent") assert.Error(t, err) } func TestSqlite_Delete_EmptyPath_Bad(t *testing.T) { - m := newTestMedium(t) + sqliteMedium := newTestSqliteMedium(t) - err := m.Delete("") + err := sqliteMedium.Delete("") assert.Error(t, err) } func TestSqlite_Delete_NotEmpty_Bad(t *testing.T) { - m := newTestMedium(t) + sqliteMedium := newTestSqliteMedium(t) - require.NoError(t, m.EnsureDir("mydir")) - require.NoError(t, m.Write("mydir/file.txt", "content")) + require.NoError(t, sqliteMedium.EnsureDir("mydir")) + require.NoError(t, sqliteMedium.Write("mydir/file.txt", "content")) - err := m.Delete("mydir") + err := sqliteMedium.Delete("mydir") assert.Error(t, err) } func TestSqlite_DeleteAll_Good(t *testing.T) { - m := newTestMedium(t) + sqliteMedium := newTestSqliteMedium(t) - require.NoError(t, m.Write("dir/file1.txt", "a")) - require.NoError(t, m.Write("dir/sub/file2.txt", "b")) - require.NoError(t, m.Write("other.txt", "c")) + require.NoError(t, sqliteMedium.Write("dir/file1.txt", "a")) + require.NoError(t, sqliteMedium.Write("dir/sub/file2.txt", "b")) + require.NoError(t, sqliteMedium.Write("other.txt", "c")) - err := m.DeleteAll("dir") + err := sqliteMedium.DeleteAll("dir") require.NoError(t, err) - assert.False(t, m.Exists("dir/file1.txt")) - assert.False(t, m.Exists("dir/sub/file2.txt")) - assert.True(t, m.Exists("other.txt")) + assert.False(t, sqliteMedium.Exists("dir/file1.txt")) + assert.False(t, sqliteMedium.Exists("dir/sub/file2.txt")) + assert.True(t, sqliteMedium.Exists("other.txt")) } func TestSqlite_DeleteAll_SingleFile_Good(t *testing.T) { - m := newTestMedium(t) + sqliteMedium := newTestSqliteMedium(t) - require.NoError(t, m.Write("file.txt", "content")) + require.NoError(t, sqliteMedium.Write("file.txt", "content")) - err := m.DeleteAll("file.txt") + err := sqliteMedium.DeleteAll("file.txt") require.NoError(t, err) - assert.False(t, m.Exists("file.txt")) + assert.False(t, sqliteMedium.Exists("file.txt")) } func TestSqlite_DeleteAll_NotFound_Bad(t *testing.T) { - m := newTestMedium(t) + sqliteMedium := newTestSqliteMedium(t) - err := m.DeleteAll("nonexistent") + err := sqliteMedium.DeleteAll("nonexistent") assert.Error(t, err) } func TestSqlite_DeleteAll_EmptyPath_Bad(t *testing.T) { - m := newTestMedium(t) + sqliteMedium := newTestSqliteMedium(t) - err := m.DeleteAll("") + err := sqliteMedium.DeleteAll("") assert.Error(t, err) } func TestSqlite_Rename_Good(t *testing.T) { - m := newTestMedium(t) + sqliteMedium := newTestSqliteMedium(t) - require.NoError(t, m.Write("old.txt", "content")) + require.NoError(t, sqliteMedium.Write("old.txt", "content")) - err := m.Rename("old.txt", "new.txt") + err := sqliteMedium.Rename("old.txt", "new.txt") require.NoError(t, err) - assert.False(t, m.Exists("old.txt")) - assert.True(t, m.IsFile("new.txt")) + assert.False(t, sqliteMedium.Exists("old.txt")) + assert.True(t, sqliteMedium.IsFile("new.txt")) - content, err := m.Read("new.txt") + content, err := sqliteMedium.Read("new.txt") require.NoError(t, err) assert.Equal(t, "content", content) } func TestSqlite_Rename_Directory_Good(t *testing.T) { - m := newTestMedium(t) + sqliteMedium := newTestSqliteMedium(t) - require.NoError(t, m.EnsureDir("olddir")) - require.NoError(t, m.Write("olddir/file.txt", "content")) + require.NoError(t, sqliteMedium.EnsureDir("olddir")) + require.NoError(t, sqliteMedium.Write("olddir/file.txt", "content")) - err := m.Rename("olddir", "newdir") + err := sqliteMedium.Rename("olddir", "newdir") require.NoError(t, err) - assert.False(t, m.Exists("olddir")) - assert.False(t, m.Exists("olddir/file.txt")) - assert.True(t, m.IsDir("newdir")) - assert.True(t, m.IsFile("newdir/file.txt")) + assert.False(t, sqliteMedium.Exists("olddir")) + assert.False(t, sqliteMedium.Exists("olddir/file.txt")) + assert.True(t, sqliteMedium.IsDir("newdir")) + assert.True(t, sqliteMedium.IsFile("newdir/file.txt")) - content, err := m.Read("newdir/file.txt") + content, err := sqliteMedium.Read("newdir/file.txt") require.NoError(t, err) assert.Equal(t, "content", content) } func TestSqlite_Rename_SourceNotFound_Bad(t *testing.T) { - m := newTestMedium(t) + sqliteMedium := newTestSqliteMedium(t) - err := m.Rename("nonexistent", "new") + err := sqliteMedium.Rename("nonexistent", "new") assert.Error(t, err) } func TestSqlite_Rename_EmptyPath_Bad(t *testing.T) { - m := newTestMedium(t) + sqliteMedium := newTestSqliteMedium(t) - err := m.Rename("", "new") + err := sqliteMedium.Rename("", "new") assert.Error(t, err) - err = m.Rename("old", "") + err = sqliteMedium.Rename("old", "") assert.Error(t, err) } func TestSqlite_List_Good(t *testing.T) { - m := newTestMedium(t) + sqliteMedium := newTestSqliteMedium(t) - require.NoError(t, m.Write("dir/file1.txt", "a")) - require.NoError(t, m.Write("dir/file2.txt", "b")) - require.NoError(t, m.Write("dir/sub/file3.txt", "c")) + require.NoError(t, sqliteMedium.Write("dir/file1.txt", "a")) + require.NoError(t, sqliteMedium.Write("dir/file2.txt", "b")) + require.NoError(t, sqliteMedium.Write("dir/sub/file3.txt", "c")) - entries, err := m.List("dir") + entries, err := sqliteMedium.List("dir") require.NoError(t, err) names := make(map[string]bool) @@ -293,12 +293,12 @@ func TestSqlite_List_Good(t *testing.T) { } func TestSqlite_List_Root_Good(t *testing.T) { - m := newTestMedium(t) + sqliteMedium := newTestSqliteMedium(t) - require.NoError(t, m.Write("root.txt", "content")) - require.NoError(t, m.Write("dir/nested.txt", "nested")) + require.NoError(t, sqliteMedium.Write("root.txt", "content")) + require.NoError(t, sqliteMedium.Write("dir/nested.txt", "nested")) - entries, err := m.List("") + entries, err := sqliteMedium.List("") require.NoError(t, err) names := make(map[string]bool) @@ -311,11 +311,11 @@ func TestSqlite_List_Root_Good(t *testing.T) { } func TestSqlite_List_DirectoryEntry_Good(t *testing.T) { - m := newTestMedium(t) + sqliteMedium := newTestSqliteMedium(t) - require.NoError(t, m.Write("dir/sub/file.txt", "content")) + require.NoError(t, sqliteMedium.Write("dir/sub/file.txt", "content")) - entries, err := m.List("dir") + entries, err := sqliteMedium.List("dir") require.NoError(t, err) require.Len(t, entries, 1) @@ -328,11 +328,11 @@ func TestSqlite_List_DirectoryEntry_Good(t *testing.T) { } func TestSqlite_Stat_Good(t *testing.T) { - m := newTestMedium(t) + sqliteMedium := newTestSqliteMedium(t) - require.NoError(t, m.Write("file.txt", "hello world")) + require.NoError(t, sqliteMedium.Write("file.txt", "hello world")) - info, err := m.Stat("file.txt") + info, err := sqliteMedium.Stat("file.txt") require.NoError(t, err) assert.Equal(t, "file.txt", info.Name()) assert.Equal(t, int64(11), info.Size()) @@ -340,36 +340,36 @@ func TestSqlite_Stat_Good(t *testing.T) { } func TestSqlite_Stat_Directory_Good(t *testing.T) { - m := newTestMedium(t) + sqliteMedium := newTestSqliteMedium(t) - require.NoError(t, m.EnsureDir("mydir")) + require.NoError(t, sqliteMedium.EnsureDir("mydir")) - info, err := m.Stat("mydir") + info, err := sqliteMedium.Stat("mydir") require.NoError(t, err) assert.Equal(t, "mydir", info.Name()) assert.True(t, info.IsDir()) } func TestSqlite_Stat_NotFound_Bad(t *testing.T) { - m := newTestMedium(t) + sqliteMedium := newTestSqliteMedium(t) - _, err := m.Stat("nonexistent") + _, err := sqliteMedium.Stat("nonexistent") assert.Error(t, err) } func TestSqlite_Stat_EmptyPath_Bad(t *testing.T) { - m := newTestMedium(t) + sqliteMedium := newTestSqliteMedium(t) - _, err := m.Stat("") + _, err := sqliteMedium.Stat("") assert.Error(t, err) } func TestSqlite_Open_Good(t *testing.T) { - m := newTestMedium(t) + sqliteMedium := newTestSqliteMedium(t) - require.NoError(t, m.Write("file.txt", "open me")) + require.NoError(t, sqliteMedium.Write("file.txt", "open me")) - f, err := m.Open("file.txt") + f, err := sqliteMedium.Open("file.txt") require.NoError(t, err) defer f.Close() @@ -383,24 +383,24 @@ func TestSqlite_Open_Good(t *testing.T) { } func TestSqlite_Open_NotFound_Bad(t *testing.T) { - m := newTestMedium(t) + sqliteMedium := newTestSqliteMedium(t) - _, err := m.Open("nonexistent.txt") + _, err := sqliteMedium.Open("nonexistent.txt") assert.Error(t, err) } func TestSqlite_Open_IsDirectory_Bad(t *testing.T) { - m := newTestMedium(t) + sqliteMedium := newTestSqliteMedium(t) - require.NoError(t, m.EnsureDir("mydir")) - _, err := m.Open("mydir") + require.NoError(t, sqliteMedium.EnsureDir("mydir")) + _, err := sqliteMedium.Open("mydir") assert.Error(t, err) } func TestSqlite_Create_Good(t *testing.T) { - m := newTestMedium(t) + sqliteMedium := newTestSqliteMedium(t) - w, err := m.Create("new.txt") + w, err := sqliteMedium.Create("new.txt") require.NoError(t, err) n, err := w.Write([]byte("created")) @@ -410,79 +410,79 @@ func TestSqlite_Create_Good(t *testing.T) { err = w.Close() require.NoError(t, err) - content, err := m.Read("new.txt") + content, err := sqliteMedium.Read("new.txt") require.NoError(t, err) assert.Equal(t, "created", content) } func TestSqlite_Create_Overwrite_Good(t *testing.T) { - m := newTestMedium(t) + sqliteMedium := newTestSqliteMedium(t) - require.NoError(t, m.Write("file.txt", "old content")) + require.NoError(t, sqliteMedium.Write("file.txt", "old content")) - w, err := m.Create("file.txt") + w, err := sqliteMedium.Create("file.txt") require.NoError(t, err) _, err = w.Write([]byte("new")) require.NoError(t, err) require.NoError(t, w.Close()) - content, err := m.Read("file.txt") + content, err := sqliteMedium.Read("file.txt") require.NoError(t, err) assert.Equal(t, "new", content) } func TestSqlite_Create_EmptyPath_Bad(t *testing.T) { - m := newTestMedium(t) + sqliteMedium := newTestSqliteMedium(t) - _, err := m.Create("") + _, err := sqliteMedium.Create("") assert.Error(t, err) } func TestSqlite_Append_Good(t *testing.T) { - m := newTestMedium(t) + sqliteMedium := newTestSqliteMedium(t) - require.NoError(t, m.Write("append.txt", "hello")) + require.NoError(t, sqliteMedium.Write("append.txt", "hello")) - w, err := m.Append("append.txt") + w, err := sqliteMedium.Append("append.txt") require.NoError(t, err) _, err = w.Write([]byte(" world")) require.NoError(t, err) require.NoError(t, w.Close()) - content, err := m.Read("append.txt") + content, err := sqliteMedium.Read("append.txt") require.NoError(t, err) assert.Equal(t, "hello world", content) } func TestSqlite_Append_NewFile_Good(t *testing.T) { - m := newTestMedium(t) + sqliteMedium := newTestSqliteMedium(t) - w, err := m.Append("new.txt") + w, err := sqliteMedium.Append("new.txt") require.NoError(t, err) _, err = w.Write([]byte("fresh")) require.NoError(t, err) require.NoError(t, w.Close()) - content, err := m.Read("new.txt") + content, err := sqliteMedium.Read("new.txt") require.NoError(t, err) assert.Equal(t, "fresh", content) } func TestSqlite_Append_EmptyPath_Bad(t *testing.T) { - m := newTestMedium(t) + sqliteMedium := newTestSqliteMedium(t) - _, err := m.Append("") + _, err := sqliteMedium.Append("") assert.Error(t, err) } func TestSqlite_ReadStream_Good(t *testing.T) { - m := newTestMedium(t) + sqliteMedium := newTestSqliteMedium(t) - require.NoError(t, m.Write("stream.txt", "streaming content")) + require.NoError(t, sqliteMedium.Write("stream.txt", "streaming content")) - reader, err := m.ReadStream("stream.txt") + reader, err := sqliteMedium.ReadStream("stream.txt") require.NoError(t, err) defer reader.Close() @@ -492,62 +492,62 @@ func TestSqlite_ReadStream_Good(t *testing.T) { } func TestSqlite_ReadStream_NotFound_Bad(t *testing.T) { - m := newTestMedium(t) + sqliteMedium := newTestSqliteMedium(t) - _, err := m.ReadStream("nonexistent.txt") + _, err := sqliteMedium.ReadStream("nonexistent.txt") assert.Error(t, err) } func TestSqlite_ReadStream_IsDirectory_Bad(t *testing.T) { - m := newTestMedium(t) + sqliteMedium := newTestSqliteMedium(t) - require.NoError(t, m.EnsureDir("mydir")) - _, err := m.ReadStream("mydir") + require.NoError(t, sqliteMedium.EnsureDir("mydir")) + _, err := sqliteMedium.ReadStream("mydir") assert.Error(t, err) } func TestSqlite_WriteStream_Good(t *testing.T) { - m := newTestMedium(t) + sqliteMedium := newTestSqliteMedium(t) - writer, err := m.WriteStream("output.txt") + writer, err := sqliteMedium.WriteStream("output.txt") require.NoError(t, err) _, err = goio.Copy(writer, core.NewReader("piped data")) require.NoError(t, err) require.NoError(t, writer.Close()) - content, err := m.Read("output.txt") + content, err := sqliteMedium.Read("output.txt") require.NoError(t, err) assert.Equal(t, "piped data", content) } func TestSqlite_Exists_Good(t *testing.T) { - m := newTestMedium(t) + sqliteMedium := newTestSqliteMedium(t) - assert.False(t, m.Exists("nonexistent")) + assert.False(t, sqliteMedium.Exists("nonexistent")) - require.NoError(t, m.Write("file.txt", "content")) - assert.True(t, m.Exists("file.txt")) + require.NoError(t, sqliteMedium.Write("file.txt", "content")) + assert.True(t, sqliteMedium.Exists("file.txt")) - require.NoError(t, m.EnsureDir("mydir")) - assert.True(t, m.Exists("mydir")) + require.NoError(t, sqliteMedium.EnsureDir("mydir")) + assert.True(t, sqliteMedium.Exists("mydir")) } func TestSqlite_Exists_EmptyPath_Good(t *testing.T) { - m := newTestMedium(t) - assert.True(t, m.Exists("")) + sqliteMedium := newTestSqliteMedium(t) + assert.True(t, sqliteMedium.Exists("")) } func TestSqlite_IsDir_Good(t *testing.T) { - m := newTestMedium(t) + sqliteMedium := newTestSqliteMedium(t) - require.NoError(t, m.Write("file.txt", "content")) - require.NoError(t, m.EnsureDir("mydir")) + require.NoError(t, sqliteMedium.Write("file.txt", "content")) + require.NoError(t, sqliteMedium.EnsureDir("mydir")) - assert.True(t, m.IsDir("mydir")) - assert.False(t, m.IsDir("file.txt")) - assert.False(t, m.IsDir("nonexistent")) - assert.False(t, m.IsDir("")) + assert.True(t, sqliteMedium.IsDir("mydir")) + assert.False(t, sqliteMedium.IsDir("file.txt")) + assert.False(t, sqliteMedium.IsDir("nonexistent")) + assert.False(t, sqliteMedium.IsDir("")) } func TestSqlite_NormaliseEntryPath_Good(t *testing.T) { @@ -562,7 +562,7 @@ func TestSqlite_NormaliseEntryPath_Good(t *testing.T) { } func TestSqlite_InterfaceCompliance(t *testing.T) { - m := newTestMedium(t) + sqliteMedium := newTestSqliteMedium(t) var _ interface { Read(string) (string, error) @@ -581,17 +581,17 @@ func TestSqlite_InterfaceCompliance(t *testing.T) { WriteStream(string) (goio.WriteCloser, error) Exists(string) bool IsDir(string) bool - } = m + } = sqliteMedium } func TestSqlite_CustomTable_Good(t *testing.T) { - m, err := New(Options{Path: ":memory:", Table: "my_files"}) + sqliteMedium, err := New(Options{Path: ":memory:", Table: "my_files"}) require.NoError(t, err) - defer m.Close() + defer sqliteMedium.Close() - require.NoError(t, m.Write("file.txt", "content")) + require.NoError(t, sqliteMedium.Write("file.txt", "content")) - content, err := m.Read("file.txt") + content, err := sqliteMedium.Read("file.txt") require.NoError(t, err) assert.Equal(t, "content", content) } diff --git a/store/medium_test.go b/store/medium_test.go index f448794..07e6f5d 100644 --- a/store/medium_test.go +++ b/store/medium_test.go @@ -11,90 +11,90 @@ import ( func newTestKeyValueMedium(t *testing.T) *Medium { t.Helper() - medium, err := NewMedium(Options{Path: ":memory:"}) + keyValueMedium, err := NewMedium(Options{Path: ":memory:"}) require.NoError(t, err) - t.Cleanup(func() { medium.Close() }) - return medium + t.Cleanup(func() { keyValueMedium.Close() }) + return keyValueMedium } func TestKeyValueMedium_ReadWrite_Good(t *testing.T) { - m := newTestKeyValueMedium(t) + keyValueMedium := newTestKeyValueMedium(t) - err := m.Write("config/theme", "dark") + err := keyValueMedium.Write("config/theme", "dark") require.NoError(t, err) - value, err := m.Read("config/theme") + value, err := keyValueMedium.Read("config/theme") require.NoError(t, err) assert.Equal(t, "dark", value) } func TestKeyValueMedium_Read_NoKey_Bad(t *testing.T) { - m := newTestKeyValueMedium(t) - _, err := m.Read("config") + keyValueMedium := newTestKeyValueMedium(t) + _, err := keyValueMedium.Read("config") assert.Error(t, err) } func TestKeyValueMedium_Read_NotFound_Bad(t *testing.T) { - m := newTestKeyValueMedium(t) - _, err := m.Read("config/missing") + keyValueMedium := newTestKeyValueMedium(t) + _, err := keyValueMedium.Read("config/missing") assert.Error(t, err) } func TestKeyValueMedium_IsFile_Good(t *testing.T) { - m := newTestKeyValueMedium(t) - _ = m.Write("group/key", "val") + keyValueMedium := newTestKeyValueMedium(t) + _ = keyValueMedium.Write("group/key", "val") - assert.True(t, m.IsFile("group/key")) - assert.False(t, m.IsFile("group/nope")) - assert.False(t, m.IsFile("group")) + assert.True(t, keyValueMedium.IsFile("group/key")) + assert.False(t, keyValueMedium.IsFile("group/nope")) + assert.False(t, keyValueMedium.IsFile("group")) } func TestKeyValueMedium_Delete_Good(t *testing.T) { - m := newTestKeyValueMedium(t) - _ = m.Write("group/key", "val") + keyValueMedium := newTestKeyValueMedium(t) + _ = keyValueMedium.Write("group/key", "val") - err := m.Delete("group/key") + err := keyValueMedium.Delete("group/key") require.NoError(t, err) - assert.False(t, m.IsFile("group/key")) + assert.False(t, keyValueMedium.IsFile("group/key")) } func TestKeyValueMedium_Delete_NonEmptyGroup_Bad(t *testing.T) { - m := newTestKeyValueMedium(t) - _ = m.Write("group/key", "val") + keyValueMedium := newTestKeyValueMedium(t) + _ = keyValueMedium.Write("group/key", "val") - err := m.Delete("group") + err := keyValueMedium.Delete("group") assert.Error(t, err) } func TestKeyValueMedium_DeleteAll_Good(t *testing.T) { - m := newTestKeyValueMedium(t) - _ = m.Write("group/a", "1") - _ = m.Write("group/b", "2") + keyValueMedium := newTestKeyValueMedium(t) + _ = keyValueMedium.Write("group/a", "1") + _ = keyValueMedium.Write("group/b", "2") - err := m.DeleteAll("group") + err := keyValueMedium.DeleteAll("group") require.NoError(t, err) - assert.False(t, m.Exists("group")) + assert.False(t, keyValueMedium.Exists("group")) } func TestKeyValueMedium_Rename_Good(t *testing.T) { - m := newTestKeyValueMedium(t) - _ = m.Write("old/key", "val") + keyValueMedium := newTestKeyValueMedium(t) + _ = keyValueMedium.Write("old/key", "val") - err := m.Rename("old/key", "new/key") + err := keyValueMedium.Rename("old/key", "new/key") require.NoError(t, err) - value, err := m.Read("new/key") + value, err := keyValueMedium.Read("new/key") require.NoError(t, err) assert.Equal(t, "val", value) - assert.False(t, m.IsFile("old/key")) + assert.False(t, keyValueMedium.IsFile("old/key")) } func TestKeyValueMedium_List_Groups_Good(t *testing.T) { - m := newTestKeyValueMedium(t) - _ = m.Write("alpha/a", "1") - _ = m.Write("beta/b", "2") + keyValueMedium := newTestKeyValueMedium(t) + _ = keyValueMedium.Write("alpha/a", "1") + _ = keyValueMedium.Write("beta/b", "2") - entries, err := m.List("") + entries, err := keyValueMedium.List("") require.NoError(t, err) assert.Len(t, entries, 2) @@ -108,45 +108,45 @@ func TestKeyValueMedium_List_Groups_Good(t *testing.T) { } func TestKeyValueMedium_List_Keys_Good(t *testing.T) { - m := newTestKeyValueMedium(t) - _ = m.Write("group/a", "1") - _ = m.Write("group/b", "22") + keyValueMedium := newTestKeyValueMedium(t) + _ = keyValueMedium.Write("group/a", "1") + _ = keyValueMedium.Write("group/b", "22") - entries, err := m.List("group") + entries, err := keyValueMedium.List("group") require.NoError(t, err) assert.Len(t, entries, 2) } func TestKeyValueMedium_Stat_Good(t *testing.T) { - m := newTestKeyValueMedium(t) - _ = m.Write("group/key", "hello") + keyValueMedium := newTestKeyValueMedium(t) + _ = keyValueMedium.Write("group/key", "hello") - info, err := m.Stat("group") + info, err := keyValueMedium.Stat("group") require.NoError(t, err) assert.True(t, info.IsDir()) - info, err = m.Stat("group/key") + info, err = keyValueMedium.Stat("group/key") require.NoError(t, err) assert.Equal(t, int64(5), info.Size()) assert.False(t, info.IsDir()) } func TestKeyValueMedium_Exists_IsDir_Good(t *testing.T) { - m := newTestKeyValueMedium(t) - _ = m.Write("group/key", "val") - - assert.True(t, m.Exists("group")) - assert.True(t, m.Exists("group/key")) - assert.True(t, m.IsDir("group")) - assert.False(t, m.IsDir("group/key")) - assert.False(t, m.Exists("nope")) + keyValueMedium := newTestKeyValueMedium(t) + _ = keyValueMedium.Write("group/key", "val") + + assert.True(t, keyValueMedium.Exists("group")) + assert.True(t, keyValueMedium.Exists("group/key")) + assert.True(t, keyValueMedium.IsDir("group")) + assert.False(t, keyValueMedium.IsDir("group/key")) + assert.False(t, keyValueMedium.Exists("nope")) } func TestKeyValueMedium_Open_Read_Good(t *testing.T) { - m := newTestKeyValueMedium(t) - _ = m.Write("group/key", "hello world") + keyValueMedium := newTestKeyValueMedium(t) + _ = keyValueMedium.Write("group/key", "hello world") - f, err := m.Open("group/key") + f, err := keyValueMedium.Open("group/key") require.NoError(t, err) defer f.Close() @@ -156,28 +156,28 @@ func TestKeyValueMedium_Open_Read_Good(t *testing.T) { } func TestKeyValueMedium_CreateClose_Good(t *testing.T) { - m := newTestKeyValueMedium(t) + keyValueMedium := newTestKeyValueMedium(t) - w, err := m.Create("group/key") + w, err := keyValueMedium.Create("group/key") require.NoError(t, err) _, _ = w.Write([]byte("streamed")) require.NoError(t, w.Close()) - value, err := m.Read("group/key") + value, err := keyValueMedium.Read("group/key") require.NoError(t, err) assert.Equal(t, "streamed", value) } func TestKeyValueMedium_Append_Good(t *testing.T) { - m := newTestKeyValueMedium(t) - _ = m.Write("group/key", "hello") + keyValueMedium := newTestKeyValueMedium(t) + _ = keyValueMedium.Write("group/key", "hello") - w, err := m.Append("group/key") + w, err := keyValueMedium.Append("group/key") require.NoError(t, err) _, _ = w.Write([]byte(" world")) require.NoError(t, w.Close()) - value, err := m.Read("group/key") + value, err := keyValueMedium.Read("group/key") require.NoError(t, err) assert.Equal(t, "hello world", value) } @@ -185,53 +185,53 @@ func TestKeyValueMedium_Append_Good(t *testing.T) { func TestKeyValueMedium_AsMedium_Good(t *testing.T) { keyValueStore := newTestKeyValueStore(t) - m := keyValueStore.AsMedium() - require.NoError(t, m.Write("group/key", "val")) + keyValueMedium := keyValueStore.AsMedium() + require.NoError(t, keyValueMedium.Write("group/key", "val")) value, err := keyValueStore.Get("group", "key") require.NoError(t, err) assert.Equal(t, "val", value) - value, err = m.Read("group/key") + value, err = keyValueMedium.Read("group/key") require.NoError(t, err) assert.Equal(t, "val", value) } func TestKeyValueMedium_KeyValueStore_Good(t *testing.T) { - m := newTestKeyValueMedium(t) + keyValueMedium := newTestKeyValueMedium(t) - assert.NotNil(t, m.KeyValueStore()) - assert.Same(t, m.KeyValueStore(), m.KeyValueStore()) + assert.NotNil(t, keyValueMedium.KeyValueStore()) + assert.Same(t, keyValueMedium.KeyValueStore(), keyValueMedium.KeyValueStore()) } func TestKeyValueMedium_EnsureDir_ReadWrite_Good(t *testing.T) { - m := newTestKeyValueMedium(t) + keyValueMedium := newTestKeyValueMedium(t) - require.NoError(t, m.EnsureDir("ignored")) - require.NoError(t, m.Write("group/key", "value")) + require.NoError(t, keyValueMedium.EnsureDir("ignored")) + require.NoError(t, keyValueMedium.Write("group/key", "value")) - value, err := m.Read("group/key") + value, err := keyValueMedium.Read("group/key") require.NoError(t, err) assert.Equal(t, "value", value) } func TestKeyValueMedium_StreamHelpers_Good(t *testing.T) { - m := newTestKeyValueMedium(t) + keyValueMedium := newTestKeyValueMedium(t) - writer, err := m.WriteStream("group/key") + writer, err := keyValueMedium.WriteStream("group/key") require.NoError(t, err) _, err = writer.Write([]byte("streamed")) require.NoError(t, err) require.NoError(t, writer.Close()) - reader, err := m.ReadStream("group/key") + reader, err := keyValueMedium.ReadStream("group/key") require.NoError(t, err) data, err := io.ReadAll(reader) require.NoError(t, err) assert.Equal(t, "streamed", string(data)) require.NoError(t, reader.Close()) - file, err := m.Open("group/key") + file, err := keyValueMedium.Open("group/key") require.NoError(t, err) info, err := file.Stat() require.NoError(t, err) @@ -243,7 +243,7 @@ func TestKeyValueMedium_StreamHelpers_Good(t *testing.T) { assert.Nil(t, info.Sys()) require.NoError(t, file.Close()) - entries, err := m.List("group") + entries, err := keyValueMedium.List("group") require.NoError(t, err) require.Len(t, entries, 1) assert.Equal(t, "key", entries[0].Name()) From db6bbb650ee7124fe891f6be332b4f3dda78459b Mon Sep 17 00:00:00 2001 From: Virgil Date: Tue, 31 Mar 2026 14:04:07 +0000 Subject: [PATCH 64/83] refactor(ax): normalize interface compliance test names Co-Authored-By: Virgil --- s3/s3_test.go | 2 +- sqlite/sqlite_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/s3/s3_test.go b/s3/s3_test.go index d18af55..0ce8e06 100644 --- a/s3/s3_test.go +++ b/s3/s3_test.go @@ -627,7 +627,7 @@ func TestS3_ObjectKey_Good(t *testing.T) { assert.Equal(t, "pfx/", m2.objectKey("")) } -func TestS3_InterfaceCompliance(t *testing.T) { +func TestS3_InterfaceCompliance_Good(t *testing.T) { mock := newMockS3() m, err := New(Options{Bucket: "bucket", Client: mock}) require.NoError(t, err) diff --git a/sqlite/sqlite_test.go b/sqlite/sqlite_test.go index 9c6fcec..ab90121 100644 --- a/sqlite/sqlite_test.go +++ b/sqlite/sqlite_test.go @@ -561,7 +561,7 @@ func TestSqlite_NormaliseEntryPath_Good(t *testing.T) { assert.Equal(t, "", normaliseEntryPath("/")) } -func TestSqlite_InterfaceCompliance(t *testing.T) { +func TestSqlite_InterfaceCompliance_Good(t *testing.T) { sqliteMedium := newTestSqliteMedium(t) var _ interface { From bf4ba4141df65aa5536b32fb78af0c87b0fc2d8d Mon Sep 17 00:00:00 2001 From: Virgil Date: Tue, 31 Mar 2026 14:08:24 +0000 Subject: [PATCH 65/83] refactor(ax): demote internal memory helpers and document sigil errors Co-authored-by: Virgil --- io.go | 25 ++++++++++--------------- sigil/crypto_sigil.go | 4 ++++ sigil/sigil.go | 1 + 3 files changed, 15 insertions(+), 15 deletions(-) diff --git a/io.go b/io.go index 5c5bcc8..47ed353 100644 --- a/io.go +++ b/io.go @@ -355,14 +355,14 @@ func (medium *MemoryMedium) Open(path string) (fs.File, error) { if !ok { return nil, core.E("io.MemoryMedium.Open", core.Concat("file not found: ", path), fs.ErrNotExist) } - return &MemoryFile{ + return &memoryFile{ name: core.PathBase(path), content: []byte(content), }, nil } func (medium *MemoryMedium) Create(path string) (goio.WriteCloser, error) { - return &MemoryWriteCloser{ + return &memoryWriteCloser{ medium: medium, path: path, }, nil @@ -370,7 +370,7 @@ func (medium *MemoryMedium) Create(path string) (goio.WriteCloser, error) { func (medium *MemoryMedium) Append(path string) (goio.WriteCloser, error) { content := medium.files[path] - return &MemoryWriteCloser{ + return &memoryWriteCloser{ medium: medium, path: path, data: []byte(content), @@ -385,20 +385,17 @@ func (medium *MemoryMedium) WriteStream(path string) (goio.WriteCloser, error) { return medium.Create(path) } -// Example: medium := io.NewMemoryMedium() -// Example: _ = medium.Write("config/app.yaml", "port: 8080") -// Example: file, _ := medium.Open("config/app.yaml") -type MemoryFile struct { +type memoryFile struct { name string content []byte offset int64 } -func (file *MemoryFile) Stat() (fs.FileInfo, error) { +func (file *memoryFile) Stat() (fs.FileInfo, error) { return NewFileInfo(file.name, int64(len(file.content)), 0, time.Time{}, false), nil } -func (file *MemoryFile) Read(buffer []byte) (int, error) { +func (file *memoryFile) Read(buffer []byte) (int, error) { if file.offset >= int64(len(file.content)) { return 0, goio.EOF } @@ -407,24 +404,22 @@ func (file *MemoryFile) Read(buffer []byte) (int, error) { return readCount, nil } -func (file *MemoryFile) Close() error { +func (file *memoryFile) Close() error { return nil } -// Example: medium := io.NewMemoryMedium() -// Example: writer, _ := medium.Create("logs/app.log") -type MemoryWriteCloser struct { +type memoryWriteCloser struct { medium *MemoryMedium path string data []byte } -func (writeCloser *MemoryWriteCloser) Write(data []byte) (int, error) { +func (writeCloser *memoryWriteCloser) Write(data []byte) (int, error) { writeCloser.data = append(writeCloser.data, data...) return len(data), nil } -func (writeCloser *MemoryWriteCloser) Close() error { +func (writeCloser *memoryWriteCloser) Close() error { writeCloser.medium.files[writeCloser.path] = string(writeCloser.data) writeCloser.medium.modTimes[writeCloser.path] = time.Now() return nil diff --git a/sigil/crypto_sigil.go b/sigil/crypto_sigil.go index e88e657..8e6dfd3 100644 --- a/sigil/crypto_sigil.go +++ b/sigil/crypto_sigil.go @@ -14,12 +14,16 @@ import ( ) var ( + // Example: errors.Is(err, sigil.InvalidKeyError) InvalidKeyError = core.E("sigil.InvalidKeyError", "invalid key size, must be 32 bytes", nil) + // Example: errors.Is(err, sigil.CiphertextTooShortError) CiphertextTooShortError = core.E("sigil.CiphertextTooShortError", "ciphertext too short", nil) + // Example: errors.Is(err, sigil.DecryptionFailedError) DecryptionFailedError = core.E("sigil.DecryptionFailedError", "decryption failed", nil) + // Example: errors.Is(err, sigil.NoKeyConfiguredError) NoKeyConfiguredError = core.E("sigil.NoKeyConfiguredError", "no encryption key configured", nil) ) diff --git a/sigil/sigil.go b/sigil/sigil.go index f760fd0..41d73be 100644 --- a/sigil/sigil.go +++ b/sigil/sigil.go @@ -6,6 +6,7 @@ package sigil import core "dappco.re/go/core" +// Example: var transform sigil.Sigil = &sigil.HexSigil{} type Sigil interface { // Example: encoded, _ := hexSigil.In([]byte("payload")) In(data []byte) ([]byte, error) From ede0c8bb496f7e3ccb3aa4e09425da6922b20ef9 Mon Sep 17 00:00:00 2001 From: Virgil Date: Tue, 31 Mar 2026 14:13:15 +0000 Subject: [PATCH 66/83] refactor(ax): rename remaining test helpers and examples Co-Authored-By: Virgil --- docs/development.md | 6 +-- s3/s3_test.go | 62 ++++++++++++------------- sqlite/sqlite_test.go | 98 +++++++++++++++++++-------------------- store/medium_test.go | 40 ++++++++-------- store/store_test.go | 20 ++++---- workspace/service_test.go | 10 ++-- 6 files changed, 118 insertions(+), 118 deletions(-) diff --git a/docs/development.md b/docs/development.md index d060e40..d23b72d 100644 --- a/docs/development.md +++ b/docs/development.md @@ -105,10 +105,10 @@ func TestMyFeature(t *testing.T) { } ``` -For tests that need a real but ephemeral filesystem, use `local.New` with `t.TempDir()`: +For tests that need a temporary filesystem, use `local.New` with `t.TempDir()`: ```go -func TestWithRealFS(t *testing.T) { +func TestLocalMedium_RoundTrip_Good(t *testing.T) { localMedium, err := local.New(t.TempDir()) require.NoError(t, err) @@ -121,7 +121,7 @@ func TestWithRealFS(t *testing.T) { For SQLite-backed tests, use `:memory:`: ```go -func TestWithSQLite(t *testing.T) { +func TestSqliteMedium_RoundTrip_Good(t *testing.T) { sqliteMedium, err := sqlite.New(sqlite.Options{Path: ":memory:"}) require.NoError(t, err) defer sqliteMedium.Close() diff --git a/s3/s3_test.go b/s3/s3_test.go index 0ce8e06..7cfc42e 100644 --- a/s3/s3_test.go +++ b/s3/s3_test.go @@ -199,7 +199,7 @@ func (m *mockS3) CopyObject(_ context.Context, params *awss3.CopyObjectInput, _ return &awss3.CopyObjectOutput{}, nil } -func newTestMedium(t *testing.T) (*Medium, *mockS3) { +func newS3Medium(t *testing.T) (*Medium, *mockS3) { t.Helper() mock := newMockS3() m, err := New(Options{Bucket: "test-bucket", Client: mock}) @@ -239,7 +239,7 @@ func TestS3_New_Options_Good(t *testing.T) { } func TestS3_ReadWrite_Good(t *testing.T) { - m, _ := newTestMedium(t) + m, _ := newS3Medium(t) err := m.Write("hello.txt", "world") require.NoError(t, err) @@ -250,14 +250,14 @@ func TestS3_ReadWrite_Good(t *testing.T) { } func TestS3_ReadWrite_NotFound_Bad(t *testing.T) { - m, _ := newTestMedium(t) + m, _ := newS3Medium(t) _, err := m.Read("nonexistent.txt") assert.Error(t, err) } func TestS3_ReadWrite_EmptyPath_Bad(t *testing.T) { - m, _ := newTestMedium(t) + m, _ := newS3Medium(t) _, err := m.Read("") assert.Error(t, err) @@ -283,13 +283,13 @@ func TestS3_ReadWrite_Prefix_Good(t *testing.T) { } func TestS3_EnsureDir_Good(t *testing.T) { - medium, _ := newTestMedium(t) + medium, _ := newS3Medium(t) err := medium.EnsureDir("any/path") assert.NoError(t, err) } func TestS3_IsFile_Good(t *testing.T) { - m, _ := newTestMedium(t) + m, _ := newS3Medium(t) err := m.Write("file.txt", "content") require.NoError(t, err) @@ -300,7 +300,7 @@ func TestS3_IsFile_Good(t *testing.T) { } func TestS3_Delete_Good(t *testing.T) { - m, _ := newTestMedium(t) + m, _ := newS3Medium(t) err := m.Write("to-delete.txt", "content") require.NoError(t, err) @@ -312,13 +312,13 @@ func TestS3_Delete_Good(t *testing.T) { } func TestS3_Delete_EmptyPath_Bad(t *testing.T) { - m, _ := newTestMedium(t) + m, _ := newS3Medium(t) err := m.Delete("") assert.Error(t, err) } func TestS3_DeleteAll_Good(t *testing.T) { - m, _ := newTestMedium(t) + m, _ := newS3Medium(t) require.NoError(t, m.Write("dir/file1.txt", "a")) require.NoError(t, m.Write("dir/sub/file2.txt", "b")) @@ -333,13 +333,13 @@ func TestS3_DeleteAll_Good(t *testing.T) { } func TestS3_DeleteAll_EmptyPath_Bad(t *testing.T) { - m, _ := newTestMedium(t) + m, _ := newS3Medium(t) err := m.DeleteAll("") assert.Error(t, err) } func TestS3_DeleteAll_DeleteObjectError_Bad(t *testing.T) { - m, mock := newTestMedium(t) + m, mock := newS3Medium(t) mock.deleteObjectErrors["dir"] = core.NewError("boom") err := m.DeleteAll("dir") @@ -348,7 +348,7 @@ func TestS3_DeleteAll_DeleteObjectError_Bad(t *testing.T) { } func TestS3_DeleteAll_PartialDelete_Bad(t *testing.T) { - m, mock := newTestMedium(t) + m, mock := newS3Medium(t) require.NoError(t, m.Write("dir/file1.txt", "a")) require.NoError(t, m.Write("dir/file2.txt", "b")) @@ -367,7 +367,7 @@ func TestS3_DeleteAll_PartialDelete_Bad(t *testing.T) { } func TestS3_Rename_Good(t *testing.T) { - m, _ := newTestMedium(t) + m, _ := newS3Medium(t) require.NoError(t, m.Write("old.txt", "content")) assert.True(t, m.IsFile("old.txt")) @@ -384,7 +384,7 @@ func TestS3_Rename_Good(t *testing.T) { } func TestS3_Rename_EmptyPath_Bad(t *testing.T) { - m, _ := newTestMedium(t) + m, _ := newS3Medium(t) err := m.Rename("", "new.txt") assert.Error(t, err) @@ -393,13 +393,13 @@ func TestS3_Rename_EmptyPath_Bad(t *testing.T) { } func TestS3_Rename_SourceNotFound_Bad(t *testing.T) { - m, _ := newTestMedium(t) + m, _ := newS3Medium(t) err := m.Rename("nonexistent.txt", "new.txt") assert.Error(t, err) } func TestS3_List_Good(t *testing.T) { - m, _ := newTestMedium(t) + m, _ := newS3Medium(t) require.NoError(t, m.Write("dir/file1.txt", "a")) require.NoError(t, m.Write("dir/file2.txt", "b")) @@ -429,7 +429,7 @@ func TestS3_List_Good(t *testing.T) { } func TestS3_List_Root_Good(t *testing.T) { - m, _ := newTestMedium(t) + m, _ := newS3Medium(t) require.NoError(t, m.Write("root.txt", "content")) require.NoError(t, m.Write("dir/nested.txt", "nested")) @@ -447,7 +447,7 @@ func TestS3_List_Root_Good(t *testing.T) { } func TestS3_Stat_Good(t *testing.T) { - m, _ := newTestMedium(t) + m, _ := newS3Medium(t) require.NoError(t, m.Write("file.txt", "hello world")) @@ -459,20 +459,20 @@ func TestS3_Stat_Good(t *testing.T) { } func TestS3_Stat_NotFound_Bad(t *testing.T) { - m, _ := newTestMedium(t) + m, _ := newS3Medium(t) _, err := m.Stat("nonexistent.txt") assert.Error(t, err) } func TestS3_Stat_EmptyPath_Bad(t *testing.T) { - m, _ := newTestMedium(t) + m, _ := newS3Medium(t) _, err := m.Stat("") assert.Error(t, err) } func TestS3_Open_Good(t *testing.T) { - m, _ := newTestMedium(t) + m, _ := newS3Medium(t) require.NoError(t, m.Write("file.txt", "open me")) @@ -490,14 +490,14 @@ func TestS3_Open_Good(t *testing.T) { } func TestS3_Open_NotFound_Bad(t *testing.T) { - m, _ := newTestMedium(t) + m, _ := newS3Medium(t) _, err := m.Open("nonexistent.txt") assert.Error(t, err) } func TestS3_Create_Good(t *testing.T) { - m, _ := newTestMedium(t) + m, _ := newS3Medium(t) w, err := m.Create("new.txt") require.NoError(t, err) @@ -515,7 +515,7 @@ func TestS3_Create_Good(t *testing.T) { } func TestS3_Append_Good(t *testing.T) { - m, _ := newTestMedium(t) + m, _ := newS3Medium(t) require.NoError(t, m.Write("append.txt", "hello")) @@ -533,7 +533,7 @@ func TestS3_Append_Good(t *testing.T) { } func TestS3_Append_NewFile_Good(t *testing.T) { - m, _ := newTestMedium(t) + m, _ := newS3Medium(t) w, err := m.Append("new.txt") require.NoError(t, err) @@ -549,7 +549,7 @@ func TestS3_Append_NewFile_Good(t *testing.T) { } func TestS3_ReadStream_Good(t *testing.T) { - m, _ := newTestMedium(t) + m, _ := newS3Medium(t) require.NoError(t, m.Write("stream.txt", "streaming content")) @@ -563,13 +563,13 @@ func TestS3_ReadStream_Good(t *testing.T) { } func TestS3_ReadStream_NotFound_Bad(t *testing.T) { - m, _ := newTestMedium(t) + m, _ := newS3Medium(t) _, err := m.ReadStream("nonexistent.txt") assert.Error(t, err) } func TestS3_WriteStream_Good(t *testing.T) { - m, _ := newTestMedium(t) + m, _ := newS3Medium(t) writer, err := m.WriteStream("output.txt") require.NoError(t, err) @@ -585,7 +585,7 @@ func TestS3_WriteStream_Good(t *testing.T) { } func TestS3_Exists_Good(t *testing.T) { - m, _ := newTestMedium(t) + m, _ := newS3Medium(t) assert.False(t, m.Exists("nonexistent.txt")) @@ -594,14 +594,14 @@ func TestS3_Exists_Good(t *testing.T) { } func TestS3_Exists_DirectoryPrefix_Good(t *testing.T) { - m, _ := newTestMedium(t) + m, _ := newS3Medium(t) require.NoError(t, m.Write("dir/file.txt", "content")) assert.True(t, m.Exists("dir")) } func TestS3_IsDir_Good(t *testing.T) { - m, _ := newTestMedium(t) + m, _ := newS3Medium(t) require.NoError(t, m.Write("dir/file.txt", "content")) diff --git a/sqlite/sqlite_test.go b/sqlite/sqlite_test.go index ab90121..e0f1bc8 100644 --- a/sqlite/sqlite_test.go +++ b/sqlite/sqlite_test.go @@ -10,7 +10,7 @@ import ( "github.com/stretchr/testify/require" ) -func newTestSqliteMedium(t *testing.T) *Medium { +func newSqliteMedium(t *testing.T) *Medium { t.Helper() sqliteMedium, err := New(Options{Path: ":memory:"}) require.NoError(t, err) @@ -39,7 +39,7 @@ func TestSqlite_New_EmptyPath_Bad(t *testing.T) { } func TestSqlite_ReadWrite_Good(t *testing.T) { - sqliteMedium := newTestSqliteMedium(t) + sqliteMedium := newSqliteMedium(t) err := sqliteMedium.Write("hello.txt", "world") require.NoError(t, err) @@ -50,7 +50,7 @@ func TestSqlite_ReadWrite_Good(t *testing.T) { } func TestSqlite_ReadWrite_Overwrite_Good(t *testing.T) { - sqliteMedium := newTestSqliteMedium(t) + sqliteMedium := newSqliteMedium(t) require.NoError(t, sqliteMedium.Write("file.txt", "first")) require.NoError(t, sqliteMedium.Write("file.txt", "second")) @@ -61,7 +61,7 @@ func TestSqlite_ReadWrite_Overwrite_Good(t *testing.T) { } func TestSqlite_ReadWrite_NestedPath_Good(t *testing.T) { - sqliteMedium := newTestSqliteMedium(t) + sqliteMedium := newSqliteMedium(t) err := sqliteMedium.Write("a/b/c.txt", "nested") require.NoError(t, err) @@ -72,28 +72,28 @@ func TestSqlite_ReadWrite_NestedPath_Good(t *testing.T) { } func TestSqlite_Read_NotFound_Bad(t *testing.T) { - sqliteMedium := newTestSqliteMedium(t) + sqliteMedium := newSqliteMedium(t) _, err := sqliteMedium.Read("nonexistent.txt") assert.Error(t, err) } func TestSqlite_Read_EmptyPath_Bad(t *testing.T) { - sqliteMedium := newTestSqliteMedium(t) + sqliteMedium := newSqliteMedium(t) _, err := sqliteMedium.Read("") assert.Error(t, err) } func TestSqlite_Write_EmptyPath_Bad(t *testing.T) { - sqliteMedium := newTestSqliteMedium(t) + sqliteMedium := newSqliteMedium(t) err := sqliteMedium.Write("", "content") assert.Error(t, err) } func TestSqlite_Read_IsDirectory_Bad(t *testing.T) { - sqliteMedium := newTestSqliteMedium(t) + sqliteMedium := newSqliteMedium(t) require.NoError(t, sqliteMedium.EnsureDir("mydir")) _, err := sqliteMedium.Read("mydir") @@ -101,7 +101,7 @@ func TestSqlite_Read_IsDirectory_Bad(t *testing.T) { } func TestSqlite_EnsureDir_Good(t *testing.T) { - sqliteMedium := newTestSqliteMedium(t) + sqliteMedium := newSqliteMedium(t) err := sqliteMedium.EnsureDir("mydir") require.NoError(t, err) @@ -109,13 +109,13 @@ func TestSqlite_EnsureDir_Good(t *testing.T) { } func TestSqlite_EnsureDir_EmptyPath_Good(t *testing.T) { - sqliteMedium := newTestSqliteMedium(t) + sqliteMedium := newSqliteMedium(t) err := sqliteMedium.EnsureDir("") assert.NoError(t, err) } func TestSqlite_EnsureDir_Idempotent_Good(t *testing.T) { - sqliteMedium := newTestSqliteMedium(t) + sqliteMedium := newSqliteMedium(t) require.NoError(t, sqliteMedium.EnsureDir("mydir")) require.NoError(t, sqliteMedium.EnsureDir("mydir")) @@ -123,7 +123,7 @@ func TestSqlite_EnsureDir_Idempotent_Good(t *testing.T) { } func TestSqlite_IsFile_Good(t *testing.T) { - sqliteMedium := newTestSqliteMedium(t) + sqliteMedium := newSqliteMedium(t) require.NoError(t, sqliteMedium.Write("file.txt", "content")) require.NoError(t, sqliteMedium.EnsureDir("mydir")) @@ -135,7 +135,7 @@ func TestSqlite_IsFile_Good(t *testing.T) { } func TestSqlite_Delete_Good(t *testing.T) { - sqliteMedium := newTestSqliteMedium(t) + sqliteMedium := newSqliteMedium(t) require.NoError(t, sqliteMedium.Write("to-delete.txt", "content")) assert.True(t, sqliteMedium.Exists("to-delete.txt")) @@ -146,7 +146,7 @@ func TestSqlite_Delete_Good(t *testing.T) { } func TestSqlite_Delete_EmptyDir_Good(t *testing.T) { - sqliteMedium := newTestSqliteMedium(t) + sqliteMedium := newSqliteMedium(t) require.NoError(t, sqliteMedium.EnsureDir("emptydir")) assert.True(t, sqliteMedium.IsDir("emptydir")) @@ -157,21 +157,21 @@ func TestSqlite_Delete_EmptyDir_Good(t *testing.T) { } func TestSqlite_Delete_NotFound_Bad(t *testing.T) { - sqliteMedium := newTestSqliteMedium(t) + sqliteMedium := newSqliteMedium(t) err := sqliteMedium.Delete("nonexistent") assert.Error(t, err) } func TestSqlite_Delete_EmptyPath_Bad(t *testing.T) { - sqliteMedium := newTestSqliteMedium(t) + sqliteMedium := newSqliteMedium(t) err := sqliteMedium.Delete("") assert.Error(t, err) } func TestSqlite_Delete_NotEmpty_Bad(t *testing.T) { - sqliteMedium := newTestSqliteMedium(t) + sqliteMedium := newSqliteMedium(t) require.NoError(t, sqliteMedium.EnsureDir("mydir")) require.NoError(t, sqliteMedium.Write("mydir/file.txt", "content")) @@ -181,7 +181,7 @@ func TestSqlite_Delete_NotEmpty_Bad(t *testing.T) { } func TestSqlite_DeleteAll_Good(t *testing.T) { - sqliteMedium := newTestSqliteMedium(t) + sqliteMedium := newSqliteMedium(t) require.NoError(t, sqliteMedium.Write("dir/file1.txt", "a")) require.NoError(t, sqliteMedium.Write("dir/sub/file2.txt", "b")) @@ -196,7 +196,7 @@ func TestSqlite_DeleteAll_Good(t *testing.T) { } func TestSqlite_DeleteAll_SingleFile_Good(t *testing.T) { - sqliteMedium := newTestSqliteMedium(t) + sqliteMedium := newSqliteMedium(t) require.NoError(t, sqliteMedium.Write("file.txt", "content")) @@ -206,21 +206,21 @@ func TestSqlite_DeleteAll_SingleFile_Good(t *testing.T) { } func TestSqlite_DeleteAll_NotFound_Bad(t *testing.T) { - sqliteMedium := newTestSqliteMedium(t) + sqliteMedium := newSqliteMedium(t) err := sqliteMedium.DeleteAll("nonexistent") assert.Error(t, err) } func TestSqlite_DeleteAll_EmptyPath_Bad(t *testing.T) { - sqliteMedium := newTestSqliteMedium(t) + sqliteMedium := newSqliteMedium(t) err := sqliteMedium.DeleteAll("") assert.Error(t, err) } func TestSqlite_Rename_Good(t *testing.T) { - sqliteMedium := newTestSqliteMedium(t) + sqliteMedium := newSqliteMedium(t) require.NoError(t, sqliteMedium.Write("old.txt", "content")) @@ -236,7 +236,7 @@ func TestSqlite_Rename_Good(t *testing.T) { } func TestSqlite_Rename_Directory_Good(t *testing.T) { - sqliteMedium := newTestSqliteMedium(t) + sqliteMedium := newSqliteMedium(t) require.NoError(t, sqliteMedium.EnsureDir("olddir")) require.NoError(t, sqliteMedium.Write("olddir/file.txt", "content")) @@ -255,14 +255,14 @@ func TestSqlite_Rename_Directory_Good(t *testing.T) { } func TestSqlite_Rename_SourceNotFound_Bad(t *testing.T) { - sqliteMedium := newTestSqliteMedium(t) + sqliteMedium := newSqliteMedium(t) err := sqliteMedium.Rename("nonexistent", "new") assert.Error(t, err) } func TestSqlite_Rename_EmptyPath_Bad(t *testing.T) { - sqliteMedium := newTestSqliteMedium(t) + sqliteMedium := newSqliteMedium(t) err := sqliteMedium.Rename("", "new") assert.Error(t, err) @@ -272,7 +272,7 @@ func TestSqlite_Rename_EmptyPath_Bad(t *testing.T) { } func TestSqlite_List_Good(t *testing.T) { - sqliteMedium := newTestSqliteMedium(t) + sqliteMedium := newSqliteMedium(t) require.NoError(t, sqliteMedium.Write("dir/file1.txt", "a")) require.NoError(t, sqliteMedium.Write("dir/file2.txt", "b")) @@ -293,7 +293,7 @@ func TestSqlite_List_Good(t *testing.T) { } func TestSqlite_List_Root_Good(t *testing.T) { - sqliteMedium := newTestSqliteMedium(t) + sqliteMedium := newSqliteMedium(t) require.NoError(t, sqliteMedium.Write("root.txt", "content")) require.NoError(t, sqliteMedium.Write("dir/nested.txt", "nested")) @@ -311,7 +311,7 @@ func TestSqlite_List_Root_Good(t *testing.T) { } func TestSqlite_List_DirectoryEntry_Good(t *testing.T) { - sqliteMedium := newTestSqliteMedium(t) + sqliteMedium := newSqliteMedium(t) require.NoError(t, sqliteMedium.Write("dir/sub/file.txt", "content")) @@ -328,7 +328,7 @@ func TestSqlite_List_DirectoryEntry_Good(t *testing.T) { } func TestSqlite_Stat_Good(t *testing.T) { - sqliteMedium := newTestSqliteMedium(t) + sqliteMedium := newSqliteMedium(t) require.NoError(t, sqliteMedium.Write("file.txt", "hello world")) @@ -340,7 +340,7 @@ func TestSqlite_Stat_Good(t *testing.T) { } func TestSqlite_Stat_Directory_Good(t *testing.T) { - sqliteMedium := newTestSqliteMedium(t) + sqliteMedium := newSqliteMedium(t) require.NoError(t, sqliteMedium.EnsureDir("mydir")) @@ -351,21 +351,21 @@ func TestSqlite_Stat_Directory_Good(t *testing.T) { } func TestSqlite_Stat_NotFound_Bad(t *testing.T) { - sqliteMedium := newTestSqliteMedium(t) + sqliteMedium := newSqliteMedium(t) _, err := sqliteMedium.Stat("nonexistent") assert.Error(t, err) } func TestSqlite_Stat_EmptyPath_Bad(t *testing.T) { - sqliteMedium := newTestSqliteMedium(t) + sqliteMedium := newSqliteMedium(t) _, err := sqliteMedium.Stat("") assert.Error(t, err) } func TestSqlite_Open_Good(t *testing.T) { - sqliteMedium := newTestSqliteMedium(t) + sqliteMedium := newSqliteMedium(t) require.NoError(t, sqliteMedium.Write("file.txt", "open me")) @@ -383,14 +383,14 @@ func TestSqlite_Open_Good(t *testing.T) { } func TestSqlite_Open_NotFound_Bad(t *testing.T) { - sqliteMedium := newTestSqliteMedium(t) + sqliteMedium := newSqliteMedium(t) _, err := sqliteMedium.Open("nonexistent.txt") assert.Error(t, err) } func TestSqlite_Open_IsDirectory_Bad(t *testing.T) { - sqliteMedium := newTestSqliteMedium(t) + sqliteMedium := newSqliteMedium(t) require.NoError(t, sqliteMedium.EnsureDir("mydir")) _, err := sqliteMedium.Open("mydir") @@ -398,7 +398,7 @@ func TestSqlite_Open_IsDirectory_Bad(t *testing.T) { } func TestSqlite_Create_Good(t *testing.T) { - sqliteMedium := newTestSqliteMedium(t) + sqliteMedium := newSqliteMedium(t) w, err := sqliteMedium.Create("new.txt") require.NoError(t, err) @@ -416,7 +416,7 @@ func TestSqlite_Create_Good(t *testing.T) { } func TestSqlite_Create_Overwrite_Good(t *testing.T) { - sqliteMedium := newTestSqliteMedium(t) + sqliteMedium := newSqliteMedium(t) require.NoError(t, sqliteMedium.Write("file.txt", "old content")) @@ -432,14 +432,14 @@ func TestSqlite_Create_Overwrite_Good(t *testing.T) { } func TestSqlite_Create_EmptyPath_Bad(t *testing.T) { - sqliteMedium := newTestSqliteMedium(t) + sqliteMedium := newSqliteMedium(t) _, err := sqliteMedium.Create("") assert.Error(t, err) } func TestSqlite_Append_Good(t *testing.T) { - sqliteMedium := newTestSqliteMedium(t) + sqliteMedium := newSqliteMedium(t) require.NoError(t, sqliteMedium.Write("append.txt", "hello")) @@ -456,7 +456,7 @@ func TestSqlite_Append_Good(t *testing.T) { } func TestSqlite_Append_NewFile_Good(t *testing.T) { - sqliteMedium := newTestSqliteMedium(t) + sqliteMedium := newSqliteMedium(t) w, err := sqliteMedium.Append("new.txt") require.NoError(t, err) @@ -471,14 +471,14 @@ func TestSqlite_Append_NewFile_Good(t *testing.T) { } func TestSqlite_Append_EmptyPath_Bad(t *testing.T) { - sqliteMedium := newTestSqliteMedium(t) + sqliteMedium := newSqliteMedium(t) _, err := sqliteMedium.Append("") assert.Error(t, err) } func TestSqlite_ReadStream_Good(t *testing.T) { - sqliteMedium := newTestSqliteMedium(t) + sqliteMedium := newSqliteMedium(t) require.NoError(t, sqliteMedium.Write("stream.txt", "streaming content")) @@ -492,14 +492,14 @@ func TestSqlite_ReadStream_Good(t *testing.T) { } func TestSqlite_ReadStream_NotFound_Bad(t *testing.T) { - sqliteMedium := newTestSqliteMedium(t) + sqliteMedium := newSqliteMedium(t) _, err := sqliteMedium.ReadStream("nonexistent.txt") assert.Error(t, err) } func TestSqlite_ReadStream_IsDirectory_Bad(t *testing.T) { - sqliteMedium := newTestSqliteMedium(t) + sqliteMedium := newSqliteMedium(t) require.NoError(t, sqliteMedium.EnsureDir("mydir")) _, err := sqliteMedium.ReadStream("mydir") @@ -507,7 +507,7 @@ func TestSqlite_ReadStream_IsDirectory_Bad(t *testing.T) { } func TestSqlite_WriteStream_Good(t *testing.T) { - sqliteMedium := newTestSqliteMedium(t) + sqliteMedium := newSqliteMedium(t) writer, err := sqliteMedium.WriteStream("output.txt") require.NoError(t, err) @@ -522,7 +522,7 @@ func TestSqlite_WriteStream_Good(t *testing.T) { } func TestSqlite_Exists_Good(t *testing.T) { - sqliteMedium := newTestSqliteMedium(t) + sqliteMedium := newSqliteMedium(t) assert.False(t, sqliteMedium.Exists("nonexistent")) @@ -534,12 +534,12 @@ func TestSqlite_Exists_Good(t *testing.T) { } func TestSqlite_Exists_EmptyPath_Good(t *testing.T) { - sqliteMedium := newTestSqliteMedium(t) + sqliteMedium := newSqliteMedium(t) assert.True(t, sqliteMedium.Exists("")) } func TestSqlite_IsDir_Good(t *testing.T) { - sqliteMedium := newTestSqliteMedium(t) + sqliteMedium := newSqliteMedium(t) require.NoError(t, sqliteMedium.Write("file.txt", "content")) require.NoError(t, sqliteMedium.EnsureDir("mydir")) @@ -562,7 +562,7 @@ func TestSqlite_NormaliseEntryPath_Good(t *testing.T) { } func TestSqlite_InterfaceCompliance_Good(t *testing.T) { - sqliteMedium := newTestSqliteMedium(t) + sqliteMedium := newSqliteMedium(t) var _ interface { Read(string) (string, error) diff --git a/store/medium_test.go b/store/medium_test.go index 07e6f5d..2e433f3 100644 --- a/store/medium_test.go +++ b/store/medium_test.go @@ -9,7 +9,7 @@ import ( "github.com/stretchr/testify/require" ) -func newTestKeyValueMedium(t *testing.T) *Medium { +func newKeyValueMedium(t *testing.T) *Medium { t.Helper() keyValueMedium, err := NewMedium(Options{Path: ":memory:"}) require.NoError(t, err) @@ -18,7 +18,7 @@ func newTestKeyValueMedium(t *testing.T) *Medium { } func TestKeyValueMedium_ReadWrite_Good(t *testing.T) { - keyValueMedium := newTestKeyValueMedium(t) + keyValueMedium := newKeyValueMedium(t) err := keyValueMedium.Write("config/theme", "dark") require.NoError(t, err) @@ -29,19 +29,19 @@ func TestKeyValueMedium_ReadWrite_Good(t *testing.T) { } func TestKeyValueMedium_Read_NoKey_Bad(t *testing.T) { - keyValueMedium := newTestKeyValueMedium(t) + keyValueMedium := newKeyValueMedium(t) _, err := keyValueMedium.Read("config") assert.Error(t, err) } func TestKeyValueMedium_Read_NotFound_Bad(t *testing.T) { - keyValueMedium := newTestKeyValueMedium(t) + keyValueMedium := newKeyValueMedium(t) _, err := keyValueMedium.Read("config/missing") assert.Error(t, err) } func TestKeyValueMedium_IsFile_Good(t *testing.T) { - keyValueMedium := newTestKeyValueMedium(t) + keyValueMedium := newKeyValueMedium(t) _ = keyValueMedium.Write("group/key", "val") assert.True(t, keyValueMedium.IsFile("group/key")) @@ -50,7 +50,7 @@ func TestKeyValueMedium_IsFile_Good(t *testing.T) { } func TestKeyValueMedium_Delete_Good(t *testing.T) { - keyValueMedium := newTestKeyValueMedium(t) + keyValueMedium := newKeyValueMedium(t) _ = keyValueMedium.Write("group/key", "val") err := keyValueMedium.Delete("group/key") @@ -59,7 +59,7 @@ func TestKeyValueMedium_Delete_Good(t *testing.T) { } func TestKeyValueMedium_Delete_NonEmptyGroup_Bad(t *testing.T) { - keyValueMedium := newTestKeyValueMedium(t) + keyValueMedium := newKeyValueMedium(t) _ = keyValueMedium.Write("group/key", "val") err := keyValueMedium.Delete("group") @@ -67,7 +67,7 @@ func TestKeyValueMedium_Delete_NonEmptyGroup_Bad(t *testing.T) { } func TestKeyValueMedium_DeleteAll_Good(t *testing.T) { - keyValueMedium := newTestKeyValueMedium(t) + keyValueMedium := newKeyValueMedium(t) _ = keyValueMedium.Write("group/a", "1") _ = keyValueMedium.Write("group/b", "2") @@ -77,7 +77,7 @@ func TestKeyValueMedium_DeleteAll_Good(t *testing.T) { } func TestKeyValueMedium_Rename_Good(t *testing.T) { - keyValueMedium := newTestKeyValueMedium(t) + keyValueMedium := newKeyValueMedium(t) _ = keyValueMedium.Write("old/key", "val") err := keyValueMedium.Rename("old/key", "new/key") @@ -90,7 +90,7 @@ func TestKeyValueMedium_Rename_Good(t *testing.T) { } func TestKeyValueMedium_List_Groups_Good(t *testing.T) { - keyValueMedium := newTestKeyValueMedium(t) + keyValueMedium := newKeyValueMedium(t) _ = keyValueMedium.Write("alpha/a", "1") _ = keyValueMedium.Write("beta/b", "2") @@ -108,7 +108,7 @@ func TestKeyValueMedium_List_Groups_Good(t *testing.T) { } func TestKeyValueMedium_List_Keys_Good(t *testing.T) { - keyValueMedium := newTestKeyValueMedium(t) + keyValueMedium := newKeyValueMedium(t) _ = keyValueMedium.Write("group/a", "1") _ = keyValueMedium.Write("group/b", "22") @@ -118,7 +118,7 @@ func TestKeyValueMedium_List_Keys_Good(t *testing.T) { } func TestKeyValueMedium_Stat_Good(t *testing.T) { - keyValueMedium := newTestKeyValueMedium(t) + keyValueMedium := newKeyValueMedium(t) _ = keyValueMedium.Write("group/key", "hello") info, err := keyValueMedium.Stat("group") @@ -132,7 +132,7 @@ func TestKeyValueMedium_Stat_Good(t *testing.T) { } func TestKeyValueMedium_Exists_IsDir_Good(t *testing.T) { - keyValueMedium := newTestKeyValueMedium(t) + keyValueMedium := newKeyValueMedium(t) _ = keyValueMedium.Write("group/key", "val") assert.True(t, keyValueMedium.Exists("group")) @@ -143,7 +143,7 @@ func TestKeyValueMedium_Exists_IsDir_Good(t *testing.T) { } func TestKeyValueMedium_Open_Read_Good(t *testing.T) { - keyValueMedium := newTestKeyValueMedium(t) + keyValueMedium := newKeyValueMedium(t) _ = keyValueMedium.Write("group/key", "hello world") f, err := keyValueMedium.Open("group/key") @@ -156,7 +156,7 @@ func TestKeyValueMedium_Open_Read_Good(t *testing.T) { } func TestKeyValueMedium_CreateClose_Good(t *testing.T) { - keyValueMedium := newTestKeyValueMedium(t) + keyValueMedium := newKeyValueMedium(t) w, err := keyValueMedium.Create("group/key") require.NoError(t, err) @@ -169,7 +169,7 @@ func TestKeyValueMedium_CreateClose_Good(t *testing.T) { } func TestKeyValueMedium_Append_Good(t *testing.T) { - keyValueMedium := newTestKeyValueMedium(t) + keyValueMedium := newKeyValueMedium(t) _ = keyValueMedium.Write("group/key", "hello") w, err := keyValueMedium.Append("group/key") @@ -183,7 +183,7 @@ func TestKeyValueMedium_Append_Good(t *testing.T) { } func TestKeyValueMedium_AsMedium_Good(t *testing.T) { - keyValueStore := newTestKeyValueStore(t) + keyValueStore := newKeyValueStore(t) keyValueMedium := keyValueStore.AsMedium() require.NoError(t, keyValueMedium.Write("group/key", "val")) @@ -198,14 +198,14 @@ func TestKeyValueMedium_AsMedium_Good(t *testing.T) { } func TestKeyValueMedium_KeyValueStore_Good(t *testing.T) { - keyValueMedium := newTestKeyValueMedium(t) + keyValueMedium := newKeyValueMedium(t) assert.NotNil(t, keyValueMedium.KeyValueStore()) assert.Same(t, keyValueMedium.KeyValueStore(), keyValueMedium.KeyValueStore()) } func TestKeyValueMedium_EnsureDir_ReadWrite_Good(t *testing.T) { - keyValueMedium := newTestKeyValueMedium(t) + keyValueMedium := newKeyValueMedium(t) require.NoError(t, keyValueMedium.EnsureDir("ignored")) require.NoError(t, keyValueMedium.Write("group/key", "value")) @@ -216,7 +216,7 @@ func TestKeyValueMedium_EnsureDir_ReadWrite_Good(t *testing.T) { } func TestKeyValueMedium_StreamHelpers_Good(t *testing.T) { - keyValueMedium := newTestKeyValueMedium(t) + keyValueMedium := newKeyValueMedium(t) writer, err := keyValueMedium.WriteStream("group/key") require.NoError(t, err) diff --git a/store/store_test.go b/store/store_test.go index 3f4c2a9..9768a31 100644 --- a/store/store_test.go +++ b/store/store_test.go @@ -7,7 +7,7 @@ import ( "github.com/stretchr/testify/require" ) -func newTestKeyValueStore(t *testing.T) *KeyValueStore { +func newKeyValueStore(t *testing.T) *KeyValueStore { t.Helper() keyValueStore, err := New(Options{Path: ":memory:"}) @@ -19,7 +19,7 @@ func newTestKeyValueStore(t *testing.T) *KeyValueStore { } func TestKeyValueStore_New_Options_Good(t *testing.T) { - keyValueStore := newTestKeyValueStore(t) + keyValueStore := newKeyValueStore(t) assert.NotNil(t, keyValueStore) } @@ -29,7 +29,7 @@ func TestKeyValueStore_New_Options_Bad(t *testing.T) { } func TestKeyValueStore_SetGet_Good(t *testing.T) { - keyValueStore := newTestKeyValueStore(t) + keyValueStore := newKeyValueStore(t) err := keyValueStore.Set("config", "theme", "dark") require.NoError(t, err) @@ -40,14 +40,14 @@ func TestKeyValueStore_SetGet_Good(t *testing.T) { } func TestKeyValueStore_Get_NotFound_Bad(t *testing.T) { - keyValueStore := newTestKeyValueStore(t) + keyValueStore := newKeyValueStore(t) _, err := keyValueStore.Get("config", "missing") assert.ErrorIs(t, err, NotFoundError) } func TestKeyValueStore_Delete_Good(t *testing.T) { - keyValueStore := newTestKeyValueStore(t) + keyValueStore := newKeyValueStore(t) _ = keyValueStore.Set("config", "key", "val") err := keyValueStore.Delete("config", "key") @@ -58,7 +58,7 @@ func TestKeyValueStore_Delete_Good(t *testing.T) { } func TestKeyValueStore_Count_Good(t *testing.T) { - keyValueStore := newTestKeyValueStore(t) + keyValueStore := newKeyValueStore(t) _ = keyValueStore.Set("group", "a", "1") _ = keyValueStore.Set("group", "b", "2") @@ -70,7 +70,7 @@ func TestKeyValueStore_Count_Good(t *testing.T) { } func TestKeyValueStore_DeleteGroup_Good(t *testing.T) { - keyValueStore := newTestKeyValueStore(t) + keyValueStore := newKeyValueStore(t) _ = keyValueStore.Set("group", "a", "1") _ = keyValueStore.Set("group", "b", "2") @@ -82,7 +82,7 @@ func TestKeyValueStore_DeleteGroup_Good(t *testing.T) { } func TestKeyValueStore_GetAll_Good(t *testing.T) { - keyValueStore := newTestKeyValueStore(t) + keyValueStore := newKeyValueStore(t) _ = keyValueStore.Set("group", "a", "1") _ = keyValueStore.Set("group", "b", "2") @@ -94,7 +94,7 @@ func TestKeyValueStore_GetAll_Good(t *testing.T) { } func TestKeyValueStore_GetAll_Empty_Good(t *testing.T) { - keyValueStore := newTestKeyValueStore(t) + keyValueStore := newKeyValueStore(t) all, err := keyValueStore.GetAll("empty") require.NoError(t, err) @@ -102,7 +102,7 @@ func TestKeyValueStore_GetAll_Empty_Good(t *testing.T) { } func TestKeyValueStore_Render_Good(t *testing.T) { - keyValueStore := newTestKeyValueStore(t) + keyValueStore := newKeyValueStore(t) _ = keyValueStore.Set("user", "pool", "pool.lthn.io:3333") _ = keyValueStore.Set("user", "wallet", "iz...") diff --git a/workspace/service_test.go b/workspace/service_test.go index 081d2a4..70590a3 100644 --- a/workspace/service_test.go +++ b/workspace/service_test.go @@ -22,7 +22,7 @@ func (provider stubKeyPairProvider) CreateKeyPair(_, _ string) (string, error) { return provider.privateKey, nil } -func newTestService(t *testing.T) (*Service, string) { +func newWorkspaceService(t *testing.T) (*Service, string) { t.Helper() tempHome := t.TempDir() @@ -62,7 +62,7 @@ func TestService_New_CustomRootPathAndMedium_Good(t *testing.T) { } func TestService_WorkspaceFileRoundTrip_Good(t *testing.T) { - service, tempHome := newTestService(t) + service, tempHome := newWorkspaceService(t) workspaceID, err := service.CreateWorkspace("test-user", "pass123") require.NoError(t, err) @@ -86,7 +86,7 @@ func TestService_WorkspaceFileRoundTrip_Good(t *testing.T) { } func TestService_SwitchWorkspace_TraversalBlocked_Bad(t *testing.T) { - service, tempHome := newTestService(t) + service, tempHome := newWorkspaceService(t) outside := core.Path(tempHome, ".core", "escaped") require.NoError(t, service.medium.EnsureDir(outside)) @@ -97,7 +97,7 @@ func TestService_SwitchWorkspace_TraversalBlocked_Bad(t *testing.T) { } func TestService_WriteWorkspaceFile_TraversalBlocked_Bad(t *testing.T) { - service, tempHome := newTestService(t) + service, tempHome := newWorkspaceService(t) workspaceID, err := service.CreateWorkspace("test-user", "pass123") require.NoError(t, err) @@ -128,7 +128,7 @@ func TestService_JoinPathWithinRoot_DefaultSeparator_Good(t *testing.T) { } func TestService_HandleWorkspaceMessage_Command_Good(t *testing.T) { - service, _ := newTestService(t) + service, _ := newWorkspaceService(t) create := service.HandleWorkspaceMessage(core.New(), WorkspaceCommand{ Action: WorkspaceCreateAction, From 15b6074e46cd6395b046a9f7977e8ff2d1ac1238 Mon Sep 17 00:00:00 2001 From: Virgil Date: Tue, 31 Mar 2026 14:19:53 +0000 Subject: [PATCH 67/83] refactor(ax): align remaining AX surfaces Co-Authored-By: Virgil --- datanode/medium_test.go | 6 +- node/node.go | 4 +- s3/s3_test.go | 366 +++++++++++++++++++------------------- workspace/service.go | 2 +- workspace/service_test.go | 8 +- 5 files changed, 193 insertions(+), 193 deletions(-) diff --git a/datanode/medium_test.go b/datanode/medium_test.go index 8397c1b..70ffa19 100644 --- a/datanode/medium_test.go +++ b/datanode/medium_test.go @@ -338,14 +338,14 @@ func TestDataNode_SnapshotRestore_Good(t *testing.T) { require.NoError(t, err) assert.NotEmpty(t, snap) - m2, err := FromTar(snap) + restoredNode, err := FromTar(snap) require.NoError(t, err) - got, err := m2.Read("a.txt") + got, err := restoredNode.Read("a.txt") require.NoError(t, err) assert.Equal(t, "alpha", got) - got, err = m2.Read("b/c.txt") + got, err = restoredNode.Read("b/c.txt") require.NoError(t, err) assert.Equal(t, "charlie", got) } diff --git a/node/node.go b/node/node.go index 7deb73e..73594d3 100644 --- a/node/node.go +++ b/node/node.go @@ -130,7 +130,7 @@ type WalkOptions struct { } // Example: _ = nodeTree.Walk(".", func(_ string, _ fs.DirEntry, _ error) error { return nil }, node.WalkOptions{MaxDepth: 1, SkipErrors: true}) -func (node *Node) Walk(root string, fn fs.WalkDirFunc, options WalkOptions) error { +func (node *Node) Walk(root string, walkFunc fs.WalkDirFunc, options WalkOptions) error { if options.SkipErrors { if _, err := node.Stat(root); err != nil { return nil @@ -147,7 +147,7 @@ func (node *Node) Walk(root string, fn fs.WalkDirFunc, options WalkOptions) erro } } - walkResult := fn(entryPath, entry, err) + walkResult := walkFunc(entryPath, entry, err) if walkResult == nil && options.MaxDepth > 0 && entry != nil && entry.IsDir() && entryPath != root { relativePath := core.TrimPrefix(entryPath, root) diff --git a/s3/s3_test.go b/s3/s3_test.go index 7cfc42e..10460df 100644 --- a/s3/s3_test.go +++ b/s3/s3_test.go @@ -18,7 +18,7 @@ import ( "github.com/stretchr/testify/require" ) -type mockS3 struct { +type testS3Client struct { mu sync.RWMutex objects map[string][]byte mtimes map[string]time.Time @@ -26,8 +26,8 @@ type mockS3 struct { deleteObjectsErrs map[string]types.Error } -func newMockS3() *mockS3 { - return &mockS3{ +func newTestS3Client() *testS3Client { + return &testS3Client{ objects: make(map[string][]byte), mtimes: make(map[string]time.Time), deleteObjectErrors: make(map[string]error), @@ -35,16 +35,16 @@ func newMockS3() *mockS3 { } } -func (m *mockS3) GetObject(_ context.Context, params *awss3.GetObjectInput, _ ...func(*awss3.Options)) (*awss3.GetObjectOutput, error) { - m.mu.RLock() - defer m.mu.RUnlock() +func (client *testS3Client) GetObject(operationContext context.Context, params *awss3.GetObjectInput, optionFns ...func(*awss3.Options)) (*awss3.GetObjectOutput, error) { + client.mu.RLock() + defer client.mu.RUnlock() key := aws.ToString(params.Key) - data, ok := m.objects[key] + data, ok := client.objects[key] if !ok { - return nil, core.E("s3test.mockS3.GetObject", core.Sprintf("NoSuchKey: key %q not found", key), fs.ErrNotExist) + return nil, core.E("s3test.testS3Client.GetObject", core.Sprintf("NoSuchKey: key %q not found", key), fs.ErrNotExist) } - mtime := m.mtimes[key] + mtime := client.mtimes[key] return &awss3.GetObjectOutput{ Body: goio.NopCloser(bytes.NewReader(data)), ContentLength: aws.Int64(int64(len(data))), @@ -52,69 +52,69 @@ func (m *mockS3) GetObject(_ context.Context, params *awss3.GetObjectInput, _ .. }, nil } -func (m *mockS3) PutObject(_ context.Context, params *awss3.PutObjectInput, _ ...func(*awss3.Options)) (*awss3.PutObjectOutput, error) { - m.mu.Lock() - defer m.mu.Unlock() +func (client *testS3Client) PutObject(operationContext context.Context, params *awss3.PutObjectInput, optionFns ...func(*awss3.Options)) (*awss3.PutObjectOutput, error) { + client.mu.Lock() + defer client.mu.Unlock() key := aws.ToString(params.Key) data, err := goio.ReadAll(params.Body) if err != nil { return nil, err } - m.objects[key] = data - m.mtimes[key] = time.Now() + client.objects[key] = data + client.mtimes[key] = time.Now() return &awss3.PutObjectOutput{}, nil } -func (m *mockS3) DeleteObject(_ context.Context, params *awss3.DeleteObjectInput, _ ...func(*awss3.Options)) (*awss3.DeleteObjectOutput, error) { - m.mu.Lock() - defer m.mu.Unlock() +func (client *testS3Client) DeleteObject(operationContext context.Context, params *awss3.DeleteObjectInput, optionFns ...func(*awss3.Options)) (*awss3.DeleteObjectOutput, error) { + client.mu.Lock() + defer client.mu.Unlock() key := aws.ToString(params.Key) - if err, ok := m.deleteObjectErrors[key]; ok { + if err, ok := client.deleteObjectErrors[key]; ok { return nil, err } - delete(m.objects, key) - delete(m.mtimes, key) + delete(client.objects, key) + delete(client.mtimes, key) return &awss3.DeleteObjectOutput{}, nil } -func (m *mockS3) DeleteObjects(_ context.Context, params *awss3.DeleteObjectsInput, _ ...func(*awss3.Options)) (*awss3.DeleteObjectsOutput, error) { - m.mu.Lock() - defer m.mu.Unlock() +func (client *testS3Client) DeleteObjects(operationContext context.Context, params *awss3.DeleteObjectsInput, optionFns ...func(*awss3.Options)) (*awss3.DeleteObjectsOutput, error) { + client.mu.Lock() + defer client.mu.Unlock() var outErrs []types.Error for _, obj := range params.Delete.Objects { key := aws.ToString(obj.Key) - if errInfo, ok := m.deleteObjectsErrs[key]; ok { + if errInfo, ok := client.deleteObjectsErrs[key]; ok { outErrs = append(outErrs, errInfo) continue } - delete(m.objects, key) - delete(m.mtimes, key) + delete(client.objects, key) + delete(client.mtimes, key) } return &awss3.DeleteObjectsOutput{Errors: outErrs}, nil } -func (m *mockS3) HeadObject(_ context.Context, params *awss3.HeadObjectInput, _ ...func(*awss3.Options)) (*awss3.HeadObjectOutput, error) { - m.mu.RLock() - defer m.mu.RUnlock() +func (client *testS3Client) HeadObject(operationContext context.Context, params *awss3.HeadObjectInput, optionFns ...func(*awss3.Options)) (*awss3.HeadObjectOutput, error) { + client.mu.RLock() + defer client.mu.RUnlock() key := aws.ToString(params.Key) - data, ok := m.objects[key] + data, ok := client.objects[key] if !ok { - return nil, core.E("s3test.mockS3.HeadObject", core.Sprintf("NotFound: key %q not found", key), fs.ErrNotExist) + return nil, core.E("s3test.testS3Client.HeadObject", core.Sprintf("NotFound: key %q not found", key), fs.ErrNotExist) } - mtime := m.mtimes[key] + mtime := client.mtimes[key] return &awss3.HeadObjectOutput{ ContentLength: aws.Int64(int64(len(data))), LastModified: &mtime, }, nil } -func (m *mockS3) ListObjectsV2(_ context.Context, params *awss3.ListObjectsV2Input, _ ...func(*awss3.Options)) (*awss3.ListObjectsV2Output, error) { - m.mu.RLock() - defer m.mu.RUnlock() +func (client *testS3Client) ListObjectsV2(operationContext context.Context, params *awss3.ListObjectsV2Input, optionFns ...func(*awss3.Options)) (*awss3.ListObjectsV2Output, error) { + client.mu.RLock() + defer client.mu.RUnlock() prefix := aws.ToString(params.Prefix) delimiter := aws.ToString(params.Delimiter) @@ -124,7 +124,7 @@ func (m *mockS3) ListObjectsV2(_ context.Context, params *awss3.ListObjectsV2Inp } var allKeys []string - for k := range m.objects { + for k := range client.objects { if core.HasPrefix(k, prefix) { allKeys = append(allKeys, k) } @@ -150,8 +150,8 @@ func (m *mockS3) ListObjectsV2(_ context.Context, params *awss3.ListObjectsV2Inp break } - data := m.objects[k] - mtime := m.mtimes[k] + data := client.objects[k] + mtime := client.mtimes[k] contents = append(contents, types.Object{ Key: aws.String(k), Size: aws.Int64(int64(len(data))), @@ -176,47 +176,47 @@ func (m *mockS3) ListObjectsV2(_ context.Context, params *awss3.ListObjectsV2Inp }, nil } -func (m *mockS3) CopyObject(_ context.Context, params *awss3.CopyObjectInput, _ ...func(*awss3.Options)) (*awss3.CopyObjectOutput, error) { - m.mu.Lock() - defer m.mu.Unlock() +func (client *testS3Client) CopyObject(operationContext context.Context, params *awss3.CopyObjectInput, optionFns ...func(*awss3.Options)) (*awss3.CopyObjectOutput, error) { + client.mu.Lock() + defer client.mu.Unlock() source := aws.ToString(params.CopySource) parts := core.SplitN(source, "/", 2) if len(parts) != 2 { - return nil, core.E("s3test.mockS3.CopyObject", core.Sprintf("invalid CopySource: %s", source), fs.ErrInvalid) + return nil, core.E("s3test.testS3Client.CopyObject", core.Sprintf("invalid CopySource: %s", source), fs.ErrInvalid) } srcKey := parts[1] - data, ok := m.objects[srcKey] + data, ok := client.objects[srcKey] if !ok { - return nil, core.E("s3test.mockS3.CopyObject", core.Sprintf("NoSuchKey: source key %q not found", srcKey), fs.ErrNotExist) + return nil, core.E("s3test.testS3Client.CopyObject", core.Sprintf("NoSuchKey: source key %q not found", srcKey), fs.ErrNotExist) } destKey := aws.ToString(params.Key) - m.objects[destKey] = append([]byte{}, data...) - m.mtimes[destKey] = time.Now() + client.objects[destKey] = append([]byte{}, data...) + client.mtimes[destKey] = time.Now() return &awss3.CopyObjectOutput{}, nil } -func newS3Medium(t *testing.T) (*Medium, *mockS3) { +func newS3Medium(t *testing.T) (*Medium, *testS3Client) { t.Helper() - mock := newMockS3() - m, err := New(Options{Bucket: "test-bucket", Client: mock}) + testS3Client := newTestS3Client() + s3Medium, err := New(Options{Bucket: "test-bucket", Client: testS3Client}) require.NoError(t, err) - return m, mock + return s3Medium, testS3Client } func TestS3_New_Good(t *testing.T) { - mock := newMockS3() - m, err := New(Options{Bucket: "my-bucket", Client: mock}) + testS3Client := newTestS3Client() + s3Medium, err := New(Options{Bucket: "my-bucket", Client: testS3Client}) require.NoError(t, err) - assert.Equal(t, "my-bucket", m.bucket) - assert.Equal(t, "", m.prefix) + assert.Equal(t, "my-bucket", s3Medium.bucket) + assert.Equal(t, "", s3Medium.prefix) } func TestS3_New_NoBucket_Bad(t *testing.T) { - _, err := New(Options{Client: newMockS3()}) + _, err := New(Options{Client: newTestS3Client()}) assert.Error(t, err) assert.Contains(t, err.Error(), "bucket name is required") } @@ -228,56 +228,56 @@ func TestS3_New_NoClient_Bad(t *testing.T) { } func TestS3_New_Options_Good(t *testing.T) { - mock := newMockS3() - m, err := New(Options{Bucket: "bucket", Client: mock, Prefix: "data/"}) + testS3Client := newTestS3Client() + s3Medium, err := New(Options{Bucket: "bucket", Client: testS3Client, Prefix: "data/"}) require.NoError(t, err) - assert.Equal(t, "data/", m.prefix) + assert.Equal(t, "data/", s3Medium.prefix) - m2, err := New(Options{Bucket: "bucket", Client: mock, Prefix: "data"}) + prefixedS3Medium, err := New(Options{Bucket: "bucket", Client: testS3Client, Prefix: "data"}) require.NoError(t, err) - assert.Equal(t, "data/", m2.prefix) + assert.Equal(t, "data/", prefixedS3Medium.prefix) } func TestS3_ReadWrite_Good(t *testing.T) { - m, _ := newS3Medium(t) + s3Medium, _ := newS3Medium(t) - err := m.Write("hello.txt", "world") + err := s3Medium.Write("hello.txt", "world") require.NoError(t, err) - content, err := m.Read("hello.txt") + content, err := s3Medium.Read("hello.txt") require.NoError(t, err) assert.Equal(t, "world", content) } func TestS3_ReadWrite_NotFound_Bad(t *testing.T) { - m, _ := newS3Medium(t) + s3Medium, _ := newS3Medium(t) - _, err := m.Read("nonexistent.txt") + _, err := s3Medium.Read("nonexistent.txt") assert.Error(t, err) } func TestS3_ReadWrite_EmptyPath_Bad(t *testing.T) { - m, _ := newS3Medium(t) + s3Medium, _ := newS3Medium(t) - _, err := m.Read("") + _, err := s3Medium.Read("") assert.Error(t, err) - err = m.Write("", "content") + err = s3Medium.Write("", "content") assert.Error(t, err) } func TestS3_ReadWrite_Prefix_Good(t *testing.T) { - mock := newMockS3() - m, err := New(Options{Bucket: "bucket", Client: mock, Prefix: "pfx"}) + testS3Client := newTestS3Client() + s3Medium, err := New(Options{Bucket: "bucket", Client: testS3Client, Prefix: "pfx"}) require.NoError(t, err) - err = m.Write("file.txt", "data") + err = s3Medium.Write("file.txt", "data") require.NoError(t, err) - _, ok := mock.objects["pfx/file.txt"] + _, ok := testS3Client.objects["pfx/file.txt"] assert.True(t, ok, "object should be stored with prefix") - content, err := m.Read("file.txt") + content, err := s3Medium.Read("file.txt") require.NoError(t, err) assert.Equal(t, "data", content) } @@ -289,123 +289,123 @@ func TestS3_EnsureDir_Good(t *testing.T) { } func TestS3_IsFile_Good(t *testing.T) { - m, _ := newS3Medium(t) + s3Medium, _ := newS3Medium(t) - err := m.Write("file.txt", "content") + err := s3Medium.Write("file.txt", "content") require.NoError(t, err) - assert.True(t, m.IsFile("file.txt")) - assert.False(t, m.IsFile("nonexistent.txt")) - assert.False(t, m.IsFile("")) + assert.True(t, s3Medium.IsFile("file.txt")) + assert.False(t, s3Medium.IsFile("nonexistent.txt")) + assert.False(t, s3Medium.IsFile("")) } func TestS3_Delete_Good(t *testing.T) { - m, _ := newS3Medium(t) + s3Medium, _ := newS3Medium(t) - err := m.Write("to-delete.txt", "content") + err := s3Medium.Write("to-delete.txt", "content") require.NoError(t, err) - assert.True(t, m.Exists("to-delete.txt")) + assert.True(t, s3Medium.Exists("to-delete.txt")) - err = m.Delete("to-delete.txt") + err = s3Medium.Delete("to-delete.txt") require.NoError(t, err) - assert.False(t, m.IsFile("to-delete.txt")) + assert.False(t, s3Medium.IsFile("to-delete.txt")) } func TestS3_Delete_EmptyPath_Bad(t *testing.T) { - m, _ := newS3Medium(t) - err := m.Delete("") + s3Medium, _ := newS3Medium(t) + err := s3Medium.Delete("") assert.Error(t, err) } func TestS3_DeleteAll_Good(t *testing.T) { - m, _ := newS3Medium(t) + s3Medium, _ := newS3Medium(t) - require.NoError(t, m.Write("dir/file1.txt", "a")) - require.NoError(t, m.Write("dir/sub/file2.txt", "b")) - require.NoError(t, m.Write("other.txt", "c")) + require.NoError(t, s3Medium.Write("dir/file1.txt", "a")) + require.NoError(t, s3Medium.Write("dir/sub/file2.txt", "b")) + require.NoError(t, s3Medium.Write("other.txt", "c")) - err := m.DeleteAll("dir") + err := s3Medium.DeleteAll("dir") require.NoError(t, err) - assert.False(t, m.IsFile("dir/file1.txt")) - assert.False(t, m.IsFile("dir/sub/file2.txt")) - assert.True(t, m.IsFile("other.txt")) + assert.False(t, s3Medium.IsFile("dir/file1.txt")) + assert.False(t, s3Medium.IsFile("dir/sub/file2.txt")) + assert.True(t, s3Medium.IsFile("other.txt")) } func TestS3_DeleteAll_EmptyPath_Bad(t *testing.T) { - m, _ := newS3Medium(t) - err := m.DeleteAll("") + s3Medium, _ := newS3Medium(t) + err := s3Medium.DeleteAll("") assert.Error(t, err) } func TestS3_DeleteAll_DeleteObjectError_Bad(t *testing.T) { - m, mock := newS3Medium(t) - mock.deleteObjectErrors["dir"] = core.NewError("boom") + s3Medium, testS3Client := newS3Medium(t) + testS3Client.deleteObjectErrors["dir"] = core.NewError("boom") - err := m.DeleteAll("dir") + err := s3Medium.DeleteAll("dir") require.Error(t, err) assert.Contains(t, err.Error(), "failed to delete object: dir") } func TestS3_DeleteAll_PartialDelete_Bad(t *testing.T) { - m, mock := newS3Medium(t) + s3Medium, testS3Client := newS3Medium(t) - require.NoError(t, m.Write("dir/file1.txt", "a")) - require.NoError(t, m.Write("dir/file2.txt", "b")) - mock.deleteObjectsErrs["dir/file2.txt"] = types.Error{ + require.NoError(t, s3Medium.Write("dir/file1.txt", "a")) + require.NoError(t, s3Medium.Write("dir/file2.txt", "b")) + testS3Client.deleteObjectsErrs["dir/file2.txt"] = types.Error{ Key: aws.String("dir/file2.txt"), Code: aws.String("AccessDenied"), Message: aws.String("blocked"), } - err := m.DeleteAll("dir") + err := s3Medium.DeleteAll("dir") require.Error(t, err) assert.Contains(t, err.Error(), "partial delete failed") assert.Contains(t, err.Error(), "dir/file2.txt") - assert.True(t, m.IsFile("dir/file2.txt")) - assert.False(t, m.IsFile("dir/file1.txt")) + assert.True(t, s3Medium.IsFile("dir/file2.txt")) + assert.False(t, s3Medium.IsFile("dir/file1.txt")) } func TestS3_Rename_Good(t *testing.T) { - m, _ := newS3Medium(t) + s3Medium, _ := newS3Medium(t) - require.NoError(t, m.Write("old.txt", "content")) - assert.True(t, m.IsFile("old.txt")) + require.NoError(t, s3Medium.Write("old.txt", "content")) + assert.True(t, s3Medium.IsFile("old.txt")) - err := m.Rename("old.txt", "new.txt") + err := s3Medium.Rename("old.txt", "new.txt") require.NoError(t, err) - assert.False(t, m.IsFile("old.txt")) - assert.True(t, m.IsFile("new.txt")) + assert.False(t, s3Medium.IsFile("old.txt")) + assert.True(t, s3Medium.IsFile("new.txt")) - content, err := m.Read("new.txt") + content, err := s3Medium.Read("new.txt") require.NoError(t, err) assert.Equal(t, "content", content) } func TestS3_Rename_EmptyPath_Bad(t *testing.T) { - m, _ := newS3Medium(t) - err := m.Rename("", "new.txt") + s3Medium, _ := newS3Medium(t) + err := s3Medium.Rename("", "new.txt") assert.Error(t, err) - err = m.Rename("old.txt", "") + err = s3Medium.Rename("old.txt", "") assert.Error(t, err) } func TestS3_Rename_SourceNotFound_Bad(t *testing.T) { - m, _ := newS3Medium(t) - err := m.Rename("nonexistent.txt", "new.txt") + s3Medium, _ := newS3Medium(t) + err := s3Medium.Rename("nonexistent.txt", "new.txt") assert.Error(t, err) } func TestS3_List_Good(t *testing.T) { - m, _ := newS3Medium(t) + s3Medium, _ := newS3Medium(t) - require.NoError(t, m.Write("dir/file1.txt", "a")) - require.NoError(t, m.Write("dir/file2.txt", "b")) - require.NoError(t, m.Write("dir/sub/file3.txt", "c")) + require.NoError(t, s3Medium.Write("dir/file1.txt", "a")) + require.NoError(t, s3Medium.Write("dir/file2.txt", "b")) + require.NoError(t, s3Medium.Write("dir/sub/file3.txt", "c")) - entries, err := m.List("dir") + entries, err := s3Medium.List("dir") require.NoError(t, err) names := make(map[string]bool) @@ -429,12 +429,12 @@ func TestS3_List_Good(t *testing.T) { } func TestS3_List_Root_Good(t *testing.T) { - m, _ := newS3Medium(t) + s3Medium, _ := newS3Medium(t) - require.NoError(t, m.Write("root.txt", "content")) - require.NoError(t, m.Write("dir/nested.txt", "nested")) + require.NoError(t, s3Medium.Write("root.txt", "content")) + require.NoError(t, s3Medium.Write("dir/nested.txt", "nested")) - entries, err := m.List("") + entries, err := s3Medium.List("") require.NoError(t, err) names := make(map[string]bool) @@ -447,11 +447,11 @@ func TestS3_List_Root_Good(t *testing.T) { } func TestS3_Stat_Good(t *testing.T) { - m, _ := newS3Medium(t) + s3Medium, _ := newS3Medium(t) - require.NoError(t, m.Write("file.txt", "hello world")) + require.NoError(t, s3Medium.Write("file.txt", "hello world")) - info, err := m.Stat("file.txt") + info, err := s3Medium.Stat("file.txt") require.NoError(t, err) assert.Equal(t, "file.txt", info.Name()) assert.Equal(t, int64(11), info.Size()) @@ -459,24 +459,24 @@ func TestS3_Stat_Good(t *testing.T) { } func TestS3_Stat_NotFound_Bad(t *testing.T) { - m, _ := newS3Medium(t) + s3Medium, _ := newS3Medium(t) - _, err := m.Stat("nonexistent.txt") + _, err := s3Medium.Stat("nonexistent.txt") assert.Error(t, err) } func TestS3_Stat_EmptyPath_Bad(t *testing.T) { - m, _ := newS3Medium(t) - _, err := m.Stat("") + s3Medium, _ := newS3Medium(t) + _, err := s3Medium.Stat("") assert.Error(t, err) } func TestS3_Open_Good(t *testing.T) { - m, _ := newS3Medium(t) + s3Medium, _ := newS3Medium(t) - require.NoError(t, m.Write("file.txt", "open me")) + require.NoError(t, s3Medium.Write("file.txt", "open me")) - f, err := m.Open("file.txt") + f, err := s3Medium.Open("file.txt") require.NoError(t, err) defer f.Close() @@ -490,16 +490,16 @@ func TestS3_Open_Good(t *testing.T) { } func TestS3_Open_NotFound_Bad(t *testing.T) { - m, _ := newS3Medium(t) + s3Medium, _ := newS3Medium(t) - _, err := m.Open("nonexistent.txt") + _, err := s3Medium.Open("nonexistent.txt") assert.Error(t, err) } func TestS3_Create_Good(t *testing.T) { - m, _ := newS3Medium(t) + s3Medium, _ := newS3Medium(t) - w, err := m.Create("new.txt") + w, err := s3Medium.Create("new.txt") require.NoError(t, err) n, err := w.Write([]byte("created")) @@ -509,17 +509,17 @@ func TestS3_Create_Good(t *testing.T) { err = w.Close() require.NoError(t, err) - content, err := m.Read("new.txt") + content, err := s3Medium.Read("new.txt") require.NoError(t, err) assert.Equal(t, "created", content) } func TestS3_Append_Good(t *testing.T) { - m, _ := newS3Medium(t) + s3Medium, _ := newS3Medium(t) - require.NoError(t, m.Write("append.txt", "hello")) + require.NoError(t, s3Medium.Write("append.txt", "hello")) - w, err := m.Append("append.txt") + w, err := s3Medium.Append("append.txt") require.NoError(t, err) _, err = w.Write([]byte(" world")) @@ -527,15 +527,15 @@ func TestS3_Append_Good(t *testing.T) { err = w.Close() require.NoError(t, err) - content, err := m.Read("append.txt") + content, err := s3Medium.Read("append.txt") require.NoError(t, err) assert.Equal(t, "hello world", content) } func TestS3_Append_NewFile_Good(t *testing.T) { - m, _ := newS3Medium(t) + s3Medium, _ := newS3Medium(t) - w, err := m.Append("new.txt") + w, err := s3Medium.Append("new.txt") require.NoError(t, err) _, err = w.Write([]byte("fresh")) @@ -543,17 +543,17 @@ func TestS3_Append_NewFile_Good(t *testing.T) { err = w.Close() require.NoError(t, err) - content, err := m.Read("new.txt") + content, err := s3Medium.Read("new.txt") require.NoError(t, err) assert.Equal(t, "fresh", content) } func TestS3_ReadStream_Good(t *testing.T) { - m, _ := newS3Medium(t) + s3Medium, _ := newS3Medium(t) - require.NoError(t, m.Write("stream.txt", "streaming content")) + require.NoError(t, s3Medium.Write("stream.txt", "streaming content")) - reader, err := m.ReadStream("stream.txt") + reader, err := s3Medium.ReadStream("stream.txt") require.NoError(t, err) defer reader.Close() @@ -563,15 +563,15 @@ func TestS3_ReadStream_Good(t *testing.T) { } func TestS3_ReadStream_NotFound_Bad(t *testing.T) { - m, _ := newS3Medium(t) - _, err := m.ReadStream("nonexistent.txt") + s3Medium, _ := newS3Medium(t) + _, err := s3Medium.ReadStream("nonexistent.txt") assert.Error(t, err) } func TestS3_WriteStream_Good(t *testing.T) { - m, _ := newS3Medium(t) + s3Medium, _ := newS3Medium(t) - writer, err := m.WriteStream("output.txt") + writer, err := s3Medium.WriteStream("output.txt") require.NoError(t, err) _, err = goio.Copy(writer, core.NewReader("piped data")) @@ -579,57 +579,57 @@ func TestS3_WriteStream_Good(t *testing.T) { err = writer.Close() require.NoError(t, err) - content, err := m.Read("output.txt") + content, err := s3Medium.Read("output.txt") require.NoError(t, err) assert.Equal(t, "piped data", content) } func TestS3_Exists_Good(t *testing.T) { - m, _ := newS3Medium(t) + s3Medium, _ := newS3Medium(t) - assert.False(t, m.Exists("nonexistent.txt")) + assert.False(t, s3Medium.Exists("nonexistent.txt")) - require.NoError(t, m.Write("file.txt", "content")) - assert.True(t, m.Exists("file.txt")) + require.NoError(t, s3Medium.Write("file.txt", "content")) + assert.True(t, s3Medium.Exists("file.txt")) } func TestS3_Exists_DirectoryPrefix_Good(t *testing.T) { - m, _ := newS3Medium(t) + s3Medium, _ := newS3Medium(t) - require.NoError(t, m.Write("dir/file.txt", "content")) - assert.True(t, m.Exists("dir")) + require.NoError(t, s3Medium.Write("dir/file.txt", "content")) + assert.True(t, s3Medium.Exists("dir")) } func TestS3_IsDir_Good(t *testing.T) { - m, _ := newS3Medium(t) + s3Medium, _ := newS3Medium(t) - require.NoError(t, m.Write("dir/file.txt", "content")) + require.NoError(t, s3Medium.Write("dir/file.txt", "content")) - assert.True(t, m.IsDir("dir")) - assert.False(t, m.IsDir("dir/file.txt")) - assert.False(t, m.IsDir("nonexistent")) - assert.False(t, m.IsDir("")) + assert.True(t, s3Medium.IsDir("dir")) + assert.False(t, s3Medium.IsDir("dir/file.txt")) + assert.False(t, s3Medium.IsDir("nonexistent")) + assert.False(t, s3Medium.IsDir("")) } func TestS3_ObjectKey_Good(t *testing.T) { - mock := newMockS3() + testS3Client := newTestS3Client() - m, _ := New(Options{Bucket: "bucket", Client: mock}) - assert.Equal(t, "file.txt", m.objectKey("file.txt")) - assert.Equal(t, "dir/file.txt", m.objectKey("dir/file.txt")) - assert.Equal(t, "", m.objectKey("")) - assert.Equal(t, "file.txt", m.objectKey("/file.txt")) - assert.Equal(t, "file.txt", m.objectKey("../file.txt")) + s3Medium, _ := New(Options{Bucket: "bucket", Client: testS3Client}) + assert.Equal(t, "file.txt", s3Medium.objectKey("file.txt")) + assert.Equal(t, "dir/file.txt", s3Medium.objectKey("dir/file.txt")) + assert.Equal(t, "", s3Medium.objectKey("")) + assert.Equal(t, "file.txt", s3Medium.objectKey("/file.txt")) + assert.Equal(t, "file.txt", s3Medium.objectKey("../file.txt")) - m2, _ := New(Options{Bucket: "bucket", Client: mock, Prefix: "pfx"}) - assert.Equal(t, "pfx/file.txt", m2.objectKey("file.txt")) - assert.Equal(t, "pfx/dir/file.txt", m2.objectKey("dir/file.txt")) - assert.Equal(t, "pfx/", m2.objectKey("")) + prefixedS3Medium, _ := New(Options{Bucket: "bucket", Client: testS3Client, Prefix: "pfx"}) + assert.Equal(t, "pfx/file.txt", prefixedS3Medium.objectKey("file.txt")) + assert.Equal(t, "pfx/dir/file.txt", prefixedS3Medium.objectKey("dir/file.txt")) + assert.Equal(t, "pfx/", prefixedS3Medium.objectKey("")) } func TestS3_InterfaceCompliance_Good(t *testing.T) { - mock := newMockS3() - m, err := New(Options{Bucket: "bucket", Client: mock}) + testS3Client := newTestS3Client() + s3Medium, err := New(Options{Bucket: "bucket", Client: testS3Client}) require.NoError(t, err) var _ interface { @@ -649,5 +649,5 @@ func TestS3_InterfaceCompliance_Good(t *testing.T) { WriteStream(string) (goio.WriteCloser, error) Exists(string) bool IsDir(string) bool - } = m + } = s3Medium } diff --git a/workspace/service.go b/workspace/service.go index feefae8..ebe78e3 100644 --- a/workspace/service.go +++ b/workspace/service.go @@ -222,7 +222,7 @@ func (service *Service) HandleWorkspaceCommand(command WorkspaceCommand) core.Re } // Example: result := service.HandleWorkspaceMessage(core.New(), WorkspaceCommand{Action: WorkspaceSwitchAction, WorkspaceID: "f3f0d7"}) -func (service *Service) HandleWorkspaceMessage(_ *core.Core, message core.Message) core.Result { +func (service *Service) HandleWorkspaceMessage(coreInstance *core.Core, message core.Message) core.Result { switch command := message.(type) { case WorkspaceCommand: return service.HandleWorkspaceCommand(command) diff --git a/workspace/service_test.go b/workspace/service_test.go index 70590a3..aab95ed 100644 --- a/workspace/service_test.go +++ b/workspace/service_test.go @@ -10,12 +10,12 @@ import ( "github.com/stretchr/testify/require" ) -type stubKeyPairProvider struct { +type testKeyPairProvider struct { privateKey string err error } -func (provider stubKeyPairProvider) CreateKeyPair(_, _ string) (string, error) { +func (provider testKeyPairProvider) CreateKeyPair(identifier, passphrase string) (string, error) { if provider.err != nil { return "", provider.err } @@ -28,7 +28,7 @@ func newWorkspaceService(t *testing.T) (*Service, string) { tempHome := t.TempDir() t.Setenv("HOME", tempHome) - service, err := New(Options{KeyPairProvider: stubKeyPairProvider{privateKey: "private-key"}}) + service, err := New(Options{KeyPairProvider: testKeyPairProvider{privateKey: "private-key"}}) require.NoError(t, err) return service, tempHome } @@ -43,7 +43,7 @@ func TestService_New_CustomRootPathAndMedium_Good(t *testing.T) { rootPath := core.Path(t.TempDir(), "custom", "workspaces") service, err := New(Options{ - KeyPairProvider: stubKeyPairProvider{privateKey: "private-key"}, + KeyPairProvider: testKeyPairProvider{privateKey: "private-key"}, RootPath: rootPath, Medium: medium, }) From c713bafd48427681fa913ce260a697917fce1a09 Mon Sep 17 00:00:00 2001 From: Virgil Date: Tue, 31 Mar 2026 14:27:58 +0000 Subject: [PATCH 68/83] refactor(ax): align remaining AX examples and names Co-Authored-By: Virgil --- datanode/medium_test.go | 58 +++++++++++++++++++------------------- docs/architecture.md | 17 +++++------ docs/development.md | 1 - docs/index.md | 33 ++++++++-------------- local/medium.go | 22 +++++++++------ local/medium_test.go | 8 +++--- medium_test.go | 4 +-- node/node_test.go | 4 +-- s3/s3_test.go | 44 ++++++++++++++--------------- sigil/crypto_sigil_test.go | 6 ++-- sqlite/sqlite_test.go | 42 +++++++++++++-------------- store/medium_test.go | 24 ++++++++-------- workspace/service.go | 13 +++++---- workspace/service_test.go | 2 +- 14 files changed, 137 insertions(+), 141 deletions(-) diff --git a/datanode/medium_test.go b/datanode/medium_test.go index 70ffa19..730ddb4 100644 --- a/datanode/medium_test.go +++ b/datanode/medium_test.go @@ -227,8 +227,8 @@ func TestDataNode_List_Good(t *testing.T) { require.NoError(t, err) names := make([]string, len(entries)) - for i, e := range entries { - names[i] = e.Name() + for index, entry := range entries { + names[index] = entry.Name() } assert.Contains(t, names, "root.txt") assert.Contains(t, names, "pkg") @@ -236,8 +236,8 @@ func TestDataNode_List_Good(t *testing.T) { entries, err = dataNodeMedium.List("pkg") require.NoError(t, err) names = make([]string, len(entries)) - for i, e := range entries { - names[i] = e.Name() + for index, entry := range entries { + names[index] = entry.Name() } assert.Contains(t, names, "a.go") assert.Contains(t, names, "b.go") @@ -264,11 +264,11 @@ func TestDataNode_Open_Good(t *testing.T) { require.NoError(t, dataNodeMedium.Write("open.txt", "opened")) - f, err := dataNodeMedium.Open("open.txt") + file, err := dataNodeMedium.Open("open.txt") require.NoError(t, err) - defer f.Close() + defer file.Close() - data, err := io.ReadAll(f) + data, err := io.ReadAll(file) require.NoError(t, err) assert.Equal(t, "opened", string(data)) } @@ -276,19 +276,19 @@ func TestDataNode_Open_Good(t *testing.T) { func TestDataNode_CreateAppend_Good(t *testing.T) { dataNodeMedium := New() - w, err := dataNodeMedium.Create("new.txt") + writer, err := dataNodeMedium.Create("new.txt") require.NoError(t, err) - w.Write([]byte("hello")) - w.Close() + _, _ = writer.Write([]byte("hello")) + require.NoError(t, writer.Close()) got, err := dataNodeMedium.Read("new.txt") require.NoError(t, err) assert.Equal(t, "hello", got) - w, err = dataNodeMedium.Append("new.txt") + writer, err = dataNodeMedium.Append("new.txt") require.NoError(t, err) - w.Write([]byte(" world")) - w.Close() + _, _ = writer.Write([]byte(" world")) + require.NoError(t, writer.Close()) got, err = dataNodeMedium.Read("new.txt") require.NoError(t, err) @@ -315,17 +315,17 @@ func TestDataNode_Append_ReadFailure_Bad(t *testing.T) { func TestDataNode_Streams_Good(t *testing.T) { dataNodeMedium := New() - ws, err := dataNodeMedium.WriteStream("stream.txt") + writeStream, err := dataNodeMedium.WriteStream("stream.txt") require.NoError(t, err) - ws.Write([]byte("streamed")) - ws.Close() + _, _ = writeStream.Write([]byte("streamed")) + require.NoError(t, writeStream.Close()) - rs, err := dataNodeMedium.ReadStream("stream.txt") + readStream, err := dataNodeMedium.ReadStream("stream.txt") require.NoError(t, err) - data, err := io.ReadAll(rs) + data, err := io.ReadAll(readStream) require.NoError(t, err) assert.Equal(t, "streamed", string(data)) - rs.Close() + require.NoError(t, readStream.Close()) } func TestDataNode_SnapshotRestore_Good(t *testing.T) { @@ -334,11 +334,11 @@ func TestDataNode_SnapshotRestore_Good(t *testing.T) { require.NoError(t, dataNodeMedium.Write("a.txt", "alpha")) require.NoError(t, dataNodeMedium.Write("b/c.txt", "charlie")) - snap, err := dataNodeMedium.Snapshot() + snapshotData, err := dataNodeMedium.Snapshot() require.NoError(t, err) - assert.NotEmpty(t, snap) + assert.NotEmpty(t, snapshotData) - restoredNode, err := FromTar(snap) + restoredNode, err := FromTar(snapshotData) require.NoError(t, err) got, err := restoredNode.Read("a.txt") @@ -355,13 +355,13 @@ func TestDataNode_Restore_Good(t *testing.T) { require.NoError(t, dataNodeMedium.Write("original.txt", "before")) - snap, err := dataNodeMedium.Snapshot() + snapshotData, err := dataNodeMedium.Snapshot() require.NoError(t, err) require.NoError(t, dataNodeMedium.Write("original.txt", "after")) require.NoError(t, dataNodeMedium.Write("extra.txt", "extra")) - require.NoError(t, dataNodeMedium.Restore(snap)) + require.NoError(t, dataNodeMedium.Restore(snapshotData)) got, err := dataNodeMedium.Read("original.txt") require.NoError(t, err) @@ -375,14 +375,14 @@ func TestDataNode_DataNode_Good(t *testing.T) { require.NoError(t, dataNodeMedium.Write("test.txt", "borg")) - dn := dataNodeMedium.DataNode() - assert.NotNil(t, dn) + dataNode := dataNodeMedium.DataNode() + assert.NotNil(t, dataNode) - f, err := dn.Open("test.txt") + file, err := dataNode.Open("test.txt") require.NoError(t, err) - defer f.Close() + defer file.Close() - data, err := io.ReadAll(f) + data, err := io.ReadAll(file) require.NoError(t, err) assert.Equal(t, "borg", string(data)) } diff --git a/docs/architecture.md b/docs/architecture.md index d557610..0d11aa6 100644 --- a/docs/architecture.md +++ b/docs/architecture.md @@ -139,7 +139,7 @@ keyValueStore, _ := store.New(store.Options{Path: ":memory:"}) keyValueStore.Set("user", "pool", "pool.lthn.io:3333") keyValueStore.Set("user", "wallet", "iz...") renderedText, _ := keyValueStore.Render(`{"pool":"{{ .pool }}"}`, "user") -// renderedText: {"pool":"pool.lthn.io:3333"} +assert.Equal(t, `{"pool":"pool.lthn.io:3333"}`, renderedText) ``` ### store.Medium (Medium adapter) @@ -164,8 +164,8 @@ The sigil package implements composable, reversible data transformations. ```go type Sigil interface { - In(data []byte) ([]byte, error) // forward transform - Out(data []byte) ([]byte, error) // reverse transform + In(data []byte) ([]byte, error) + Out(data []byte) ([]byte, error) } ``` @@ -199,10 +199,8 @@ Created via `NewSigil(name)`: ### Pipeline Functions ```go -// Apply sigils left-to-right. encoded, _ := sigil.Transmute(data, []sigil.Sigil{gzipSigil, hexSigil}) -// Reverse sigils right-to-left. original, _ := sigil.Untransmute(encoded, []sigil.Sigil{gzipSigil, hexSigil}) ``` @@ -231,12 +229,11 @@ The pre-obfuscation layer ensures that raw plaintext patterns are never sent dir key := make([]byte, 32) rand.Read(key) -s, _ := sigil.NewChaChaPolySigil(key, nil) -ciphertext, _ := s.In([]byte("secret")) -plaintext, _ := s.Out(ciphertext) +cipherSigil, _ := sigil.NewChaChaPolySigil(key, nil) +ciphertext, _ := cipherSigil.In([]byte("secret")) +plaintext, _ := cipherSigil.Out(ciphertext) -// With stronger obfuscation: -s2, _ := sigil.NewChaChaPolySigil(key, &sigil.ShuffleMaskObfuscator{}) +shuffleCipherSigil, _ := sigil.NewChaChaPolySigil(key, &sigil.ShuffleMaskObfuscator{}) ``` Each call to `In` generates a fresh random nonce, so encrypting the same plaintext twice produces different ciphertexts. diff --git a/docs/development.md b/docs/development.md index d23b72d..a1a6b28 100644 --- a/docs/development.md +++ b/docs/development.md @@ -96,7 +96,6 @@ func TestMyFeature(t *testing.T) { _ = memoryMedium.Write("config.yaml", "key: value") _ = memoryMedium.EnsureDir("data") - // Your code under test receives memoryMedium as an io.Medium result, err := myFunction(memoryMedium) assert.NoError(t, err) output, err := memoryMedium.Read("output.txt") diff --git a/docs/index.md b/docs/index.md index 9ce4c25..05762f9 100644 --- a/docs/index.md +++ b/docs/index.md @@ -19,21 +19,17 @@ import ( "forge.lthn.ai/core/go-io/node" ) -// Use the pre-initialised local filesystem (unsandboxed, rooted at "/"). content, _ := io.Local.Read("/etc/hostname") -// Create a sandboxed medium restricted to a single directory. -sandbox, _ := io.NewSandboxed("/var/data/myapp") -_ = sandbox.Write("config.yaml", "key: value") +sandboxMedium, _ := io.NewSandboxed("/var/data/myapp") +_ = sandboxMedium.Write("config.yaml", "key: value") -// In-memory filesystem with tar serialisation. -mem := node.New() -mem.AddData("hello.txt", []byte("world")) -tarball, _ := mem.ToTar() +nodeTree := node.New() +nodeTree.AddData("hello.txt", []byte("world")) +tarball, _ := nodeTree.ToTar() -// S3 backend (requires an *awss3.Client from the AWS SDK). -bucket, _ := s3.New(s3.Options{Bucket: "my-bucket", Client: awsClient, Prefix: "uploads/"}) -_ = bucket.Write("photo.jpg", rawData) +s3Medium, _ := s3.New(s3.Options{Bucket: "my-bucket", Client: awsClient, Prefix: "uploads/"}) +_ = s3Medium.Write("photo.jpg", rawData) ``` @@ -58,29 +54,24 @@ Every storage backend implements the same 17-method interface: ```go type Medium interface { - // Content operations Read(path string) (string, error) Write(path, content string) error WriteMode(path, content string, mode fs.FileMode) error - // Streaming (for large files) ReadStream(path string) (io.ReadCloser, error) WriteStream(path string) (io.WriteCloser, error) Open(path string) (fs.File, error) Create(path string) (io.WriteCloser, error) Append(path string) (io.WriteCloser, error) - // Directory operations EnsureDir(path string) error List(path string) ([]fs.DirEntry, error) - // Metadata Stat(path string) (fs.FileInfo, error) Exists(path string) bool IsFile(path string) bool IsDir(path string) bool - // Mutation Delete(path string) error DeleteAll(path string) error Rename(oldPath, newPath string) error @@ -95,12 +86,12 @@ All backends implement this interface fully. Backends where a method has no natu The root package provides helper functions that accept any `Medium`: ```go -// Copy a file between any two backends. -err := io.Copy(localMedium, "source.txt", s3Medium, "dest.txt") +sourceMedium := io.Local +destinationMedium := io.NewMemoryMedium() +err := io.Copy(sourceMedium, "source.txt", destinationMedium, "dest.txt") -// Read/Write wrappers that take an explicit medium. -content, err := io.Read(medium, "path") -err := io.Write(medium, "path", "content") +content, err := io.Read(destinationMedium, "path") +err = io.Write(destinationMedium, "path", "content") ``` diff --git a/local/medium.go b/local/medium.go index 1cce3cb..100dd74 100644 --- a/local/medium.go +++ b/local/medium.go @@ -30,23 +30,29 @@ func New(root string) (*Medium, error) { } func dirSeparator() string { - if sep := core.Env("DS"); sep != "" { - return sep + if separator := core.Env("CORE_PATH_SEPARATOR"); separator != "" { + return separator + } + if separator := core.Env("DS"); separator != "" { + return separator } return "/" } func normalisePath(path string) string { - sep := dirSeparator() - if sep == "/" { - return core.Replace(path, "\\", sep) + separator := dirSeparator() + if separator == "/" { + return core.Replace(path, "\\", separator) } - return core.Replace(path, "/", sep) + return core.Replace(path, "/", separator) } func currentWorkingDir() string { - if cwd := core.Env("DIR_CWD"); cwd != "" { - return cwd + if workingDirectory := core.Env("CORE_WORKING_DIRECTORY"); workingDirectory != "" { + return workingDirectory + } + if workingDirectory := core.Env("DIR_CWD"); workingDirectory != "" { + return workingDirectory } return "." } diff --git a/local/medium_test.go b/local/medium_test.go index 6c4e42c..de84c45 100644 --- a/local/medium_test.go +++ b/local/medium_test.go @@ -40,8 +40,8 @@ func TestLocal_Path_RootFilesystem_Good(t *testing.T) { assert.Equal(t, "/etc/passwd", localMedium.sandboxedPath("/etc/passwd")) assert.Equal(t, "/home/user/file.txt", localMedium.sandboxedPath("/home/user/file.txt")) - cwd := currentWorkingDir() - assert.Equal(t, core.Path(cwd, "file.txt"), localMedium.sandboxedPath("file.txt")) + workingDirectory := currentWorkingDir() + assert.Equal(t, core.Path(workingDirectory, "file.txt"), localMedium.sandboxedPath("file.txt")) } func TestLocal_ReadWrite_Basic_Good(t *testing.T) { @@ -303,8 +303,8 @@ func TestLocal_List_Good(t *testing.T) { assert.Len(t, entries, 3) names := make(map[string]bool) - for _, e := range entries { - names[e.Name()] = true + for _, entry := range entries { + names[entry.Name()] = true } assert.True(t, names["file1.txt"]) assert.True(t, names["file2.txt"]) diff --git a/medium_test.go b/medium_test.go index 9417d49..69eb94e 100644 --- a/medium_test.go +++ b/medium_test.go @@ -167,8 +167,8 @@ func TestMemoryMedium_List_Good(t *testing.T) { assert.Len(t, entries, 3) names := make(map[string]bool) - for _, e := range entries { - names[e.Name()] = true + for _, entry := range entries { + names[entry.Name()] = true } assert.True(t, names["file1.txt"]) assert.True(t, names["file2.txt"]) diff --git a/node/node_test.go b/node/node_test.go index b9354d1..5d2a21b 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -556,8 +556,8 @@ func TestNode_FSInterface_Good(t *testing.T) { func sortedNames(entries []fs.DirEntry) []string { var names []string - for _, e := range entries { - names = append(names, e.Name()) + for _, entry := range entries { + names = append(names, entry.Name()) } sort.Strings(names) return names diff --git a/s3/s3_test.go b/s3/s3_test.go index 10460df..c72e771 100644 --- a/s3/s3_test.go +++ b/s3/s3_test.go @@ -409,8 +409,8 @@ func TestS3_List_Good(t *testing.T) { require.NoError(t, err) names := make(map[string]bool) - for _, e := range entries { - names[e.Name()] = true + for _, entry := range entries { + names[entry.Name()] = true } assert.True(t, names["file1.txt"], "should list file1.txt") @@ -418,10 +418,10 @@ func TestS3_List_Good(t *testing.T) { assert.True(t, names["sub"], "should list sub directory") assert.Len(t, entries, 3) - for _, e := range entries { - if e.Name() == "sub" { - assert.True(t, e.IsDir()) - info, err := e.Info() + for _, entry := range entries { + if entry.Name() == "sub" { + assert.True(t, entry.IsDir()) + info, err := entry.Info() require.NoError(t, err) assert.True(t, info.IsDir()) } @@ -438,8 +438,8 @@ func TestS3_List_Root_Good(t *testing.T) { require.NoError(t, err) names := make(map[string]bool) - for _, e := range entries { - names[e.Name()] = true + for _, entry := range entries { + names[entry.Name()] = true } assert.True(t, names["root.txt"]) @@ -476,15 +476,15 @@ func TestS3_Open_Good(t *testing.T) { require.NoError(t, s3Medium.Write("file.txt", "open me")) - f, err := s3Medium.Open("file.txt") + file, err := s3Medium.Open("file.txt") require.NoError(t, err) - defer f.Close() + defer file.Close() - data, err := goio.ReadAll(f.(goio.Reader)) + data, err := goio.ReadAll(file.(goio.Reader)) require.NoError(t, err) assert.Equal(t, "open me", string(data)) - stat, err := f.Stat() + stat, err := file.Stat() require.NoError(t, err) assert.Equal(t, "file.txt", stat.Name()) } @@ -499,14 +499,14 @@ func TestS3_Open_NotFound_Bad(t *testing.T) { func TestS3_Create_Good(t *testing.T) { s3Medium, _ := newS3Medium(t) - w, err := s3Medium.Create("new.txt") + writer, err := s3Medium.Create("new.txt") require.NoError(t, err) - n, err := w.Write([]byte("created")) + bytesWritten, err := writer.Write([]byte("created")) require.NoError(t, err) - assert.Equal(t, 7, n) + assert.Equal(t, 7, bytesWritten) - err = w.Close() + err = writer.Close() require.NoError(t, err) content, err := s3Medium.Read("new.txt") @@ -519,12 +519,12 @@ func TestS3_Append_Good(t *testing.T) { require.NoError(t, s3Medium.Write("append.txt", "hello")) - w, err := s3Medium.Append("append.txt") + writer, err := s3Medium.Append("append.txt") require.NoError(t, err) - _, err = w.Write([]byte(" world")) + _, err = writer.Write([]byte(" world")) require.NoError(t, err) - err = w.Close() + err = writer.Close() require.NoError(t, err) content, err := s3Medium.Read("append.txt") @@ -535,12 +535,12 @@ func TestS3_Append_Good(t *testing.T) { func TestS3_Append_NewFile_Good(t *testing.T) { s3Medium, _ := newS3Medium(t) - w, err := s3Medium.Append("new.txt") + writer, err := s3Medium.Append("new.txt") require.NoError(t, err) - _, err = w.Write([]byte("fresh")) + _, err = writer.Write([]byte("fresh")) require.NoError(t, err) - err = w.Close() + err = writer.Close() require.NoError(t, err) content, err := s3Medium.Read("new.txt") diff --git a/sigil/crypto_sigil_test.go b/sigil/crypto_sigil_test.go index d7a2e29..41a20d2 100644 --- a/sigil/crypto_sigil_test.go +++ b/sigil/crypto_sigil_test.go @@ -331,7 +331,7 @@ func TestCryptoSigil_ChaChaPolySigil_TamperedCiphertext_Bad(t *testing.T) { type failReader struct{} -func (f *failReader) Read([]byte) (int, error) { +func (reader *failReader) Read([]byte) (int, error) { return 0, core.NewError("entropy source failed") } @@ -432,8 +432,8 @@ func isHex(data []byte) bool { type failSigil struct{} -func (f *failSigil) In([]byte) ([]byte, error) { return nil, core.NewError("fail in") } -func (f *failSigil) Out([]byte) ([]byte, error) { return nil, core.NewError("fail out") } +func (sigil *failSigil) In([]byte) ([]byte, error) { return nil, core.NewError("fail in") } +func (sigil *failSigil) Out([]byte) ([]byte, error) { return nil, core.NewError("fail out") } func TestCryptoSigil_Transmute_ErrorPropagation_Bad(t *testing.T) { _, err := Transmute([]byte("data"), []Sigil{&failSigil{}}) diff --git a/sqlite/sqlite_test.go b/sqlite/sqlite_test.go index e0f1bc8..2e14452 100644 --- a/sqlite/sqlite_test.go +++ b/sqlite/sqlite_test.go @@ -282,8 +282,8 @@ func TestSqlite_List_Good(t *testing.T) { require.NoError(t, err) names := make(map[string]bool) - for _, e := range entries { - names[e.Name()] = true + for _, entry := range entries { + names[entry.Name()] = true } assert.True(t, names["file1.txt"]) @@ -302,8 +302,8 @@ func TestSqlite_List_Root_Good(t *testing.T) { require.NoError(t, err) names := make(map[string]bool) - for _, e := range entries { - names[e.Name()] = true + for _, entry := range entries { + names[entry.Name()] = true } assert.True(t, names["root.txt"]) @@ -369,15 +369,15 @@ func TestSqlite_Open_Good(t *testing.T) { require.NoError(t, sqliteMedium.Write("file.txt", "open me")) - f, err := sqliteMedium.Open("file.txt") + file, err := sqliteMedium.Open("file.txt") require.NoError(t, err) - defer f.Close() + defer file.Close() - data, err := goio.ReadAll(f.(goio.Reader)) + data, err := goio.ReadAll(file.(goio.Reader)) require.NoError(t, err) assert.Equal(t, "open me", string(data)) - stat, err := f.Stat() + stat, err := file.Stat() require.NoError(t, err) assert.Equal(t, "file.txt", stat.Name()) } @@ -400,14 +400,14 @@ func TestSqlite_Open_IsDirectory_Bad(t *testing.T) { func TestSqlite_Create_Good(t *testing.T) { sqliteMedium := newSqliteMedium(t) - w, err := sqliteMedium.Create("new.txt") + writer, err := sqliteMedium.Create("new.txt") require.NoError(t, err) - n, err := w.Write([]byte("created")) + bytesWritten, err := writer.Write([]byte("created")) require.NoError(t, err) - assert.Equal(t, 7, n) + assert.Equal(t, 7, bytesWritten) - err = w.Close() + err = writer.Close() require.NoError(t, err) content, err := sqliteMedium.Read("new.txt") @@ -420,11 +420,11 @@ func TestSqlite_Create_Overwrite_Good(t *testing.T) { require.NoError(t, sqliteMedium.Write("file.txt", "old content")) - w, err := sqliteMedium.Create("file.txt") + writer, err := sqliteMedium.Create("file.txt") require.NoError(t, err) - _, err = w.Write([]byte("new")) + _, err = writer.Write([]byte("new")) require.NoError(t, err) - require.NoError(t, w.Close()) + require.NoError(t, writer.Close()) content, err := sqliteMedium.Read("file.txt") require.NoError(t, err) @@ -443,12 +443,12 @@ func TestSqlite_Append_Good(t *testing.T) { require.NoError(t, sqliteMedium.Write("append.txt", "hello")) - w, err := sqliteMedium.Append("append.txt") + writer, err := sqliteMedium.Append("append.txt") require.NoError(t, err) - _, err = w.Write([]byte(" world")) + _, err = writer.Write([]byte(" world")) require.NoError(t, err) - require.NoError(t, w.Close()) + require.NoError(t, writer.Close()) content, err := sqliteMedium.Read("append.txt") require.NoError(t, err) @@ -458,12 +458,12 @@ func TestSqlite_Append_Good(t *testing.T) { func TestSqlite_Append_NewFile_Good(t *testing.T) { sqliteMedium := newSqliteMedium(t) - w, err := sqliteMedium.Append("new.txt") + writer, err := sqliteMedium.Append("new.txt") require.NoError(t, err) - _, err = w.Write([]byte("fresh")) + _, err = writer.Write([]byte("fresh")) require.NoError(t, err) - require.NoError(t, w.Close()) + require.NoError(t, writer.Close()) content, err := sqliteMedium.Read("new.txt") require.NoError(t, err) diff --git a/store/medium_test.go b/store/medium_test.go index 2e433f3..4c24269 100644 --- a/store/medium_test.go +++ b/store/medium_test.go @@ -99,9 +99,9 @@ func TestKeyValueMedium_List_Groups_Good(t *testing.T) { assert.Len(t, entries, 2) names := make(map[string]bool) - for _, e := range entries { - names[e.Name()] = true - assert.True(t, e.IsDir()) + for _, entry := range entries { + names[entry.Name()] = true + assert.True(t, entry.IsDir()) } assert.True(t, names["alpha"]) assert.True(t, names["beta"]) @@ -146,11 +146,11 @@ func TestKeyValueMedium_Open_Read_Good(t *testing.T) { keyValueMedium := newKeyValueMedium(t) _ = keyValueMedium.Write("group/key", "hello world") - f, err := keyValueMedium.Open("group/key") + file, err := keyValueMedium.Open("group/key") require.NoError(t, err) - defer f.Close() + defer file.Close() - data, err := io.ReadAll(f) + data, err := io.ReadAll(file) require.NoError(t, err) assert.Equal(t, "hello world", string(data)) } @@ -158,10 +158,10 @@ func TestKeyValueMedium_Open_Read_Good(t *testing.T) { func TestKeyValueMedium_CreateClose_Good(t *testing.T) { keyValueMedium := newKeyValueMedium(t) - w, err := keyValueMedium.Create("group/key") + writer, err := keyValueMedium.Create("group/key") require.NoError(t, err) - _, _ = w.Write([]byte("streamed")) - require.NoError(t, w.Close()) + _, _ = writer.Write([]byte("streamed")) + require.NoError(t, writer.Close()) value, err := keyValueMedium.Read("group/key") require.NoError(t, err) @@ -172,10 +172,10 @@ func TestKeyValueMedium_Append_Good(t *testing.T) { keyValueMedium := newKeyValueMedium(t) _ = keyValueMedium.Write("group/key", "hello") - w, err := keyValueMedium.Append("group/key") + writer, err := keyValueMedium.Append("group/key") require.NoError(t, err) - _, _ = w.Write([]byte(" world")) - require.NoError(t, w.Close()) + _, _ = writer.Write([]byte(" world")) + require.NoError(t, writer.Close()) value, err := keyValueMedium.Read("group/key") require.NoError(t, err) diff --git a/workspace/service.go b/workspace/service.go index ebe78e3..c1bb624 100644 --- a/workspace/service.go +++ b/workspace/service.go @@ -222,7 +222,7 @@ func (service *Service) HandleWorkspaceCommand(command WorkspaceCommand) core.Re } // Example: result := service.HandleWorkspaceMessage(core.New(), WorkspaceCommand{Action: WorkspaceSwitchAction, WorkspaceID: "f3f0d7"}) -func (service *Service) HandleWorkspaceMessage(coreInstance *core.Core, message core.Message) core.Result { +func (service *Service) HandleWorkspaceMessage(_ *core.Core, message core.Message) core.Result { switch command := message.(type) { case WorkspaceCommand: return service.HandleWorkspaceCommand(command) @@ -242,11 +242,14 @@ func resolveWorkspaceHomeDirectory() string { func joinPathWithinRoot(root string, parts ...string) (string, error) { candidate := core.Path(append([]string{root}, parts...)...) - sep := core.Env("DS") - if sep == "" { - sep = "/" + separator := core.Env("CORE_PATH_SEPARATOR") + if separator == "" { + separator = core.Env("DS") } - if candidate == root || core.HasPrefix(candidate, root+sep) { + if separator == "" { + separator = "/" + } + if candidate == root || core.HasPrefix(candidate, root+separator) { return candidate, nil } return "", fs.ErrPermission diff --git a/workspace/service_test.go b/workspace/service_test.go index aab95ed..578ec2c 100644 --- a/workspace/service_test.go +++ b/workspace/service_test.go @@ -119,7 +119,7 @@ func TestService_WriteWorkspaceFile_TraversalBlocked_Bad(t *testing.T) { } func TestService_JoinPathWithinRoot_DefaultSeparator_Good(t *testing.T) { - t.Setenv("DS", "") + t.Setenv("CORE_PATH_SEPARATOR", "") path, err := joinPathWithinRoot("/tmp/workspaces", "../workspaces2") require.Error(t, err) From df9c443657bd8b300a247daa9252944b23642dfd Mon Sep 17 00:00:00 2001 From: Snider Date: Tue, 31 Mar 2026 16:14:43 +0100 Subject: [PATCH 69/83] feat(workspace): encrypt workspace files using ChaChaPolySigil ReadWorkspaceFile and WriteWorkspaceFile now encrypt/decrypt file content using XChaCha20-Poly1305 via the existing sigil pipeline. A 32-byte symmetric key is derived by SHA-256-hashing the workspace's stored private.key material so no new dependencies are required. Co-Authored-By: Virgil --- workspace/service.go | 51 ++++++++++++++++++++++++++++++++-- workspace/service_test.go | 58 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 107 insertions(+), 2 deletions(-) diff --git a/workspace/service.go b/workspace/service.go index c1bb624..64a5d69 100644 --- a/workspace/service.go +++ b/workspace/service.go @@ -9,6 +9,7 @@ import ( core "dappco.re/go/core" "dappco.re/go/core/io" + "dappco.re/go/core/io/sigil" ) // Example: service, _ := workspace.New(workspace.Options{ @@ -45,11 +46,15 @@ type WorkspaceCommand struct { // Example: KeyPairProvider: keyPairProvider, // Example: RootPath: "/srv/workspaces", // Example: Medium: io.NewMemoryMedium(), +// Example: Core: c, // Example: }) type Options struct { KeyPairProvider KeyPairProvider RootPath string Medium io.Medium + // Core is the optional Core instance. When set, the workspace service + // auto-registers as an IPC listener for workspace.create and workspace.switch events. + Core *core.Core } // Example: service, _ := workspace.New(workspace.Options{ @@ -105,6 +110,10 @@ func New(options Options) (*Service, error) { return nil, core.E("workspace.New", "failed to ensure root directory", err) } + if options.Core != nil { + options.Core.RegisterAction(service.HandleWorkspaceMessage) + } + return service, nil } @@ -178,6 +187,24 @@ func (service *Service) resolveActiveWorkspaceFilePath(operation, workspaceFileP return filePath, nil } +// Example: cipherSigil, _ := service.workspaceCipherSigil("workspace.ReadWorkspaceFile") +func (service *Service) workspaceCipherSigil(operation string) (*sigil.ChaChaPolySigil, error) { + if service.activeWorkspaceID == "" { + return nil, core.E(operation, "no active workspace", fs.ErrNotExist) + } + keyPath := core.Path(service.rootPath, service.activeWorkspaceID, "keys", "private.key") + rawKey, err := service.medium.Read(keyPath) + if err != nil { + return nil, core.E(operation, "failed to read workspace key", err) + } + derived := sha256.Sum256([]byte(rawKey)) + cipherSigil, err := sigil.NewChaChaPolySigil(derived[:], nil) + if err != nil { + return nil, core.E(operation, "failed to create cipher sigil", err) + } + return cipherSigil, nil +} + // Example: content, _ := service.ReadWorkspaceFile("notes/todo.txt") func (service *Service) ReadWorkspaceFile(workspaceFilePath string) (string, error) { service.stateLock.RLock() @@ -187,7 +214,19 @@ func (service *Service) ReadWorkspaceFile(workspaceFilePath string) (string, err if err != nil { return "", err } - return service.medium.Read(filePath) + cipherSigil, err := service.workspaceCipherSigil("workspace.ReadWorkspaceFile") + if err != nil { + return "", err + } + encoded, err := service.medium.Read(filePath) + if err != nil { + return "", err + } + plaintext, err := sigil.Untransmute([]byte(encoded), []sigil.Sigil{cipherSigil}) + if err != nil { + return "", core.E("workspace.ReadWorkspaceFile", "failed to decrypt file content", err) + } + return string(plaintext), nil } // Example: _ = service.WriteWorkspaceFile("notes/todo.txt", "ship it") @@ -199,7 +238,15 @@ func (service *Service) WriteWorkspaceFile(workspaceFilePath, content string) er if err != nil { return err } - return service.medium.Write(filePath, content) + cipherSigil, err := service.workspaceCipherSigil("workspace.WriteWorkspaceFile") + if err != nil { + return err + } + ciphertext, err := sigil.Transmute([]byte(content), []sigil.Sigil{cipherSigil}) + if err != nil { + return core.E("workspace.WriteWorkspaceFile", "failed to encrypt file content", err) + } + return service.medium.Write(filePath, string(ciphertext)) } // Example: commandResult := service.HandleWorkspaceCommand(WorkspaceCommand{Action: WorkspaceCreateAction, Identifier: "alice", Password: "pass123"}) diff --git a/workspace/service_test.go b/workspace/service_test.go index 578ec2c..5f0a460 100644 --- a/workspace/service_test.go +++ b/workspace/service_test.go @@ -127,6 +127,64 @@ func TestService_JoinPathWithinRoot_DefaultSeparator_Good(t *testing.T) { assert.Empty(t, path) } +func TestService_New_IPCAutoRegistration_Good(t *testing.T) { + tempHome := t.TempDir() + t.Setenv("HOME", tempHome) + + c := core.New() + service, err := New(Options{ + KeyPairProvider: testKeyPairProvider{privateKey: "private-key"}, + Core: c, + }) + require.NoError(t, err) + + // Create a workspace directly, then switch via the Core IPC bus. + workspaceID, err := service.CreateWorkspace("ipc-bus-user", "pass789") + require.NoError(t, err) + + // Dispatching workspace.switch via ACTION must reach the auto-registered handler. + c.ACTION(WorkspaceCommand{ + Action: WorkspaceSwitchAction, + WorkspaceID: workspaceID, + }) + assert.Equal(t, workspaceID, service.activeWorkspaceID) +} + +func TestService_New_IPCCreate_Good(t *testing.T) { + tempHome := t.TempDir() + t.Setenv("HOME", tempHome) + + c := core.New() + service, err := New(Options{ + KeyPairProvider: testKeyPairProvider{privateKey: "private-key"}, + Core: c, + }) + require.NoError(t, err) + + // workspace.create dispatched via the bus must create the workspace on the medium. + c.ACTION(WorkspaceCommand{ + Action: WorkspaceCreateAction, + Identifier: "ipc-create-user", + Password: "pass123", + }) + + // A duplicate create must fail — proves the first create succeeded. + _, err = service.CreateWorkspace("ipc-create-user", "pass123") + require.Error(t, err) +} + +func TestService_New_NoCoreOption_NoRegistration_Good(t *testing.T) { + tempHome := t.TempDir() + t.Setenv("HOME", tempHome) + + // Without Core in Options, New must succeed and no IPC handler is registered. + service, err := New(Options{ + KeyPairProvider: testKeyPairProvider{privateKey: "private-key"}, + }) + require.NoError(t, err) + assert.NotNil(t, service) +} + func TestService_HandleWorkspaceMessage_Command_Good(t *testing.T) { service, _ := newWorkspaceService(t) From cee004f42617e7f5147b44dc98b841eec260bbac Mon Sep 17 00:00:00 2001 From: Virgil Date: Wed, 1 Apr 2026 05:22:22 +0000 Subject: [PATCH 70/83] feat(io): export memory file helpers Co-Authored-By: Virgil --- io.go | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/io.go b/io.go index 47ed353..cb12426 100644 --- a/io.go +++ b/io.go @@ -355,14 +355,14 @@ func (medium *MemoryMedium) Open(path string) (fs.File, error) { if !ok { return nil, core.E("io.MemoryMedium.Open", core.Concat("file not found: ", path), fs.ErrNotExist) } - return &memoryFile{ + return &MemoryFile{ name: core.PathBase(path), content: []byte(content), }, nil } func (medium *MemoryMedium) Create(path string) (goio.WriteCloser, error) { - return &memoryWriteCloser{ + return &MemoryWriteCloser{ medium: medium, path: path, }, nil @@ -370,7 +370,7 @@ func (medium *MemoryMedium) Create(path string) (goio.WriteCloser, error) { func (medium *MemoryMedium) Append(path string) (goio.WriteCloser, error) { content := medium.files[path] - return &memoryWriteCloser{ + return &MemoryWriteCloser{ medium: medium, path: path, data: []byte(content), @@ -385,17 +385,19 @@ func (medium *MemoryMedium) WriteStream(path string) (goio.WriteCloser, error) { return medium.Create(path) } -type memoryFile struct { +// MemoryFile is the fs.File implementation returned by MemoryMedium.Open. +// Example: file, _ := io.NewMemoryMedium().Open("notes.txt") +type MemoryFile struct { name string content []byte offset int64 } -func (file *memoryFile) Stat() (fs.FileInfo, error) { +func (file *MemoryFile) Stat() (fs.FileInfo, error) { return NewFileInfo(file.name, int64(len(file.content)), 0, time.Time{}, false), nil } -func (file *memoryFile) Read(buffer []byte) (int, error) { +func (file *MemoryFile) Read(buffer []byte) (int, error) { if file.offset >= int64(len(file.content)) { return 0, goio.EOF } @@ -404,22 +406,24 @@ func (file *memoryFile) Read(buffer []byte) (int, error) { return readCount, nil } -func (file *memoryFile) Close() error { +func (file *MemoryFile) Close() error { return nil } -type memoryWriteCloser struct { +// MemoryWriteCloser is the io.WriteCloser implementation returned by MemoryMedium.Create and MemoryMedium.Append. +// Example: writer, _ := io.NewMemoryMedium().Create("notes.txt") +type MemoryWriteCloser struct { medium *MemoryMedium path string data []byte } -func (writeCloser *memoryWriteCloser) Write(data []byte) (int, error) { +func (writeCloser *MemoryWriteCloser) Write(data []byte) (int, error) { writeCloser.data = append(writeCloser.data, data...) return len(data), nil } -func (writeCloser *memoryWriteCloser) Close() error { +func (writeCloser *MemoryWriteCloser) Close() error { writeCloser.medium.files[writeCloser.path] = string(writeCloser.data) writeCloser.medium.modTimes[writeCloser.path] = time.Now() return nil From 35b725d2b87702b83c627f4ea4fc7e2c674500c5 Mon Sep 17 00:00:00 2001 From: Virgil Date: Wed, 1 Apr 2026 09:50:24 +0000 Subject: [PATCH 71/83] Preserve MemoryMedium file modes --- io.go | 43 +++++++++++++++++++++++++++++++++++-------- medium_test.go | 16 +++++++++++++++- 2 files changed, 50 insertions(+), 9 deletions(-) diff --git a/io.go b/io.go index cb12426..85ddd91 100644 --- a/io.go +++ b/io.go @@ -192,6 +192,7 @@ func Copy(source Medium, sourcePath string, destination Medium, destinationPath // Example: _ = medium.Write("config/app.yaml", "port: 8080") type MemoryMedium struct { files map[string]string + modes map[string]fs.FileMode dirs map[string]bool modTimes map[string]time.Time } @@ -203,6 +204,7 @@ var _ Medium = (*MemoryMedium)(nil) func NewMemoryMedium() *MemoryMedium { return &MemoryMedium{ files: make(map[string]string), + modes: make(map[string]fs.FileMode), dirs: make(map[string]bool), modTimes: make(map[string]time.Time), } @@ -217,13 +219,14 @@ func (medium *MemoryMedium) Read(path string) (string, error) { } func (medium *MemoryMedium) Write(path, content string) error { - medium.files[path] = content - medium.modTimes[path] = time.Now() - return nil + return medium.WriteMode(path, content, 0644) } func (medium *MemoryMedium) WriteMode(path, content string, mode fs.FileMode) error { - return medium.Write(path, content) + medium.files[path] = content + medium.modes[path] = mode + medium.modTimes[path] = time.Now() + return nil } func (medium *MemoryMedium) EnsureDir(path string) error { @@ -239,6 +242,8 @@ func (medium *MemoryMedium) IsFile(path string) bool { func (medium *MemoryMedium) Delete(path string) error { if _, ok := medium.files[path]; ok { delete(medium.files, path) + delete(medium.modes, path) + delete(medium.modTimes, path) return nil } if _, ok := medium.dirs[path]; ok { @@ -266,6 +271,8 @@ func (medium *MemoryMedium) DeleteAll(path string) error { found := false if _, ok := medium.files[path]; ok { delete(medium.files, path) + delete(medium.modes, path) + delete(medium.modTimes, path) found = true } if _, ok := medium.dirs[path]; ok { @@ -279,6 +286,8 @@ func (medium *MemoryMedium) DeleteAll(path string) error { for filePath := range medium.files { if core.HasPrefix(filePath, prefix) { delete(medium.files, filePath) + delete(medium.modes, filePath) + delete(medium.modTimes, filePath) found = true } } @@ -299,6 +308,10 @@ func (medium *MemoryMedium) Rename(oldPath, newPath string) error { if content, ok := medium.files[oldPath]; ok { medium.files[newPath] = content delete(medium.files, oldPath) + if mode, ok := medium.modes[oldPath]; ok { + medium.modes[newPath] = mode + delete(medium.modes, oldPath) + } if modTime, ok := medium.modTimes[oldPath]; ok { medium.modTimes[newPath] = modTime delete(medium.modTimes, oldPath) @@ -358,6 +371,7 @@ func (medium *MemoryMedium) Open(path string) (fs.File, error) { return &MemoryFile{ name: core.PathBase(path), content: []byte(content), + mode: medium.fileMode(path), }, nil } @@ -365,6 +379,7 @@ func (medium *MemoryMedium) Create(path string) (goio.WriteCloser, error) { return &MemoryWriteCloser{ medium: medium, path: path, + mode: 0644, }, nil } @@ -374,6 +389,7 @@ func (medium *MemoryMedium) Append(path string) (goio.WriteCloser, error) { medium: medium, path: path, data: []byte(content), + mode: medium.fileMode(path), }, nil } @@ -391,10 +407,11 @@ type MemoryFile struct { name string content []byte offset int64 + mode fs.FileMode } func (file *MemoryFile) Stat() (fs.FileInfo, error) { - return NewFileInfo(file.name, int64(len(file.content)), 0, time.Time{}, false), nil + return NewFileInfo(file.name, int64(len(file.content)), file.mode, time.Time{}, false), nil } func (file *MemoryFile) Read(buffer []byte) (int, error) { @@ -416,6 +433,7 @@ type MemoryWriteCloser struct { medium *MemoryMedium path string data []byte + mode fs.FileMode } func (writeCloser *MemoryWriteCloser) Write(data []byte) (int, error) { @@ -425,10 +443,18 @@ func (writeCloser *MemoryWriteCloser) Write(data []byte) (int, error) { func (writeCloser *MemoryWriteCloser) Close() error { writeCloser.medium.files[writeCloser.path] = string(writeCloser.data) + writeCloser.medium.modes[writeCloser.path] = writeCloser.mode writeCloser.medium.modTimes[writeCloser.path] = time.Now() return nil } +func (medium *MemoryMedium) fileMode(path string) fs.FileMode { + if mode, ok := medium.modes[path]; ok { + return mode + } + return 0644 +} + func (medium *MemoryMedium) List(path string) ([]fs.DirEntry, error) { if _, ok := medium.dirs[path]; !ok { hasChildren := false @@ -485,11 +511,12 @@ func (medium *MemoryMedium) List(path string) ([]fs.DirEntry, error) { } if !seen[rest] { seen[rest] = true + filePath := core.Concat(prefix, rest) entries = append(entries, NewDirEntry( rest, false, - 0644, - NewFileInfo(rest, int64(len(content)), 0644, time.Time{}, false), + medium.fileMode(filePath), + NewFileInfo(rest, int64(len(content)), medium.fileMode(filePath), time.Time{}, false), )) } } @@ -525,7 +552,7 @@ func (medium *MemoryMedium) Stat(path string) (fs.FileInfo, error) { if !ok { modTime = time.Now() } - return NewFileInfo(core.PathBase(path), int64(len(content)), 0644, modTime, false), nil + return NewFileInfo(core.PathBase(path), int64(len(content)), medium.fileMode(path), modTime, false), nil } if _, ok := medium.dirs[path]; ok { return NewFileInfo(core.PathBase(path), 0, fs.ModeDir|0755, time.Time{}, true), nil diff --git a/medium_test.go b/medium_test.go index 69eb94e..3a5c164 100644 --- a/medium_test.go +++ b/medium_test.go @@ -78,6 +78,16 @@ func TestMemoryMedium_WriteMode_Good(t *testing.T) { content, err := memoryMedium.Read("secure.txt") require.NoError(t, err) assert.Equal(t, "secret", content) + + info, err := memoryMedium.Stat("secure.txt") + require.NoError(t, err) + assert.Equal(t, fs.FileMode(0600), info.Mode()) + + file, err := memoryMedium.Open("secure.txt") + require.NoError(t, err) + fileInfo, err := file.Stat() + require.NoError(t, err) + assert.Equal(t, fs.FileMode(0600), fileInfo.Mode()) } func TestMemoryMedium_EnsureDir_Good(t *testing.T) { @@ -228,7 +238,7 @@ func TestMemoryMedium_StreamAndFSHelpers_Good(t *testing.T) { require.NoError(t, err) assert.Equal(t, "file.txt", info.Name()) assert.Equal(t, int64(5), info.Size()) - assert.Equal(t, fs.FileMode(0), info.Mode()) + assert.Equal(t, fs.FileMode(0644), info.Mode()) assert.True(t, info.ModTime().IsZero()) assert.False(t, info.IsDir()) assert.Nil(t, info.Sys()) @@ -249,6 +259,7 @@ func TestMemoryMedium_StreamAndFSHelpers_Good(t *testing.T) { require.NoError(t, err) assert.Equal(t, "file.txt", entryInfo.Name()) assert.Equal(t, int64(5), entryInfo.Size()) + assert.Equal(t, fs.FileMode(0644), entryInfo.Mode()) writer, err := memoryMedium.Create("created.txt") require.NoError(t, err) @@ -276,6 +287,9 @@ func TestMemoryMedium_StreamAndFSHelpers_Good(t *testing.T) { require.NoError(t, writeStream.Close()) assert.Equal(t, "stream output", memoryMedium.files["streamed.txt"]) + statInfo, err := memoryMedium.Stat("streamed.txt") + require.NoError(t, err) + assert.Equal(t, fs.FileMode(0644), statInfo.Mode()) } func TestIO_Read_Good(t *testing.T) { From 3c8c16320af0cde47625c7f8dbbac6949c9e9d39 Mon Sep 17 00:00:00 2001 From: Virgil Date: Fri, 3 Apr 2026 05:10:15 +0000 Subject: [PATCH 72/83] Polish io memory medium naming --- io.go | 158 +++++++++++++++++++++++++------------------------ medium_test.go | 91 ++++++++++++++-------------- 2 files changed, 129 insertions(+), 120 deletions(-) diff --git a/io.go b/io.go index 85ddd91..0f69889 100644 --- a/io.go +++ b/io.go @@ -2,8 +2,10 @@ package io import ( "bytes" + "cmp" goio "io" "io/fs" + "slices" "time" core "dappco.re/go/core" @@ -176,13 +178,13 @@ func IsFile(medium Medium, path string) bool { return medium.IsFile(path) } -// Example: _ = io.Copy(source, "input.txt", destination, "backup/input.txt") -func Copy(source Medium, sourcePath string, destination Medium, destinationPath string) error { - content, err := source.Read(sourcePath) +// Example: _ = io.Copy(sourceMedium, "input.txt", destinationMedium, "backup/input.txt") +func Copy(sourceMedium Medium, sourcePath string, destinationMedium Medium, destinationPath string) error { + content, err := sourceMedium.Read(sourcePath) if err != nil { return core.E("io.Copy", core.Concat("read failed: ", sourcePath), err) } - if err := destination.Write(destinationPath, content); err != nil { + if err := destinationMedium.Write(destinationPath, content); err != nil { return core.E("io.Copy", core.Concat("write failed: ", destinationPath), err) } return nil @@ -191,10 +193,10 @@ func Copy(source Medium, sourcePath string, destination Medium, destinationPath // Example: medium := io.NewMemoryMedium() // Example: _ = medium.Write("config/app.yaml", "port: 8080") type MemoryMedium struct { - files map[string]string - modes map[string]fs.FileMode - dirs map[string]bool - modTimes map[string]time.Time + fileContents map[string]string + fileModes map[string]fs.FileMode + directories map[string]bool + modificationTimes map[string]time.Time } var _ Medium = (*MemoryMedium)(nil) @@ -203,15 +205,15 @@ var _ Medium = (*MemoryMedium)(nil) // Example: _ = medium.Write("config/app.yaml", "port: 8080") func NewMemoryMedium() *MemoryMedium { return &MemoryMedium{ - files: make(map[string]string), - modes: make(map[string]fs.FileMode), - dirs: make(map[string]bool), - modTimes: make(map[string]time.Time), + fileContents: make(map[string]string), + fileModes: make(map[string]fs.FileMode), + directories: make(map[string]bool), + modificationTimes: make(map[string]time.Time), } } func (medium *MemoryMedium) Read(path string) (string, error) { - content, ok := medium.files[path] + content, ok := medium.fileContents[path] if !ok { return "", core.E("io.MemoryMedium.Read", core.Concat("file not found: ", path), fs.ErrNotExist) } @@ -223,45 +225,45 @@ func (medium *MemoryMedium) Write(path, content string) error { } func (medium *MemoryMedium) WriteMode(path, content string, mode fs.FileMode) error { - medium.files[path] = content - medium.modes[path] = mode - medium.modTimes[path] = time.Now() + medium.fileContents[path] = content + medium.fileModes[path] = mode + medium.modificationTimes[path] = time.Now() return nil } func (medium *MemoryMedium) EnsureDir(path string) error { - medium.dirs[path] = true + medium.directories[path] = true return nil } func (medium *MemoryMedium) IsFile(path string) bool { - _, ok := medium.files[path] + _, ok := medium.fileContents[path] return ok } func (medium *MemoryMedium) Delete(path string) error { - if _, ok := medium.files[path]; ok { - delete(medium.files, path) - delete(medium.modes, path) - delete(medium.modTimes, path) + if _, ok := medium.fileContents[path]; ok { + delete(medium.fileContents, path) + delete(medium.fileModes, path) + delete(medium.modificationTimes, path) return nil } - if _, ok := medium.dirs[path]; ok { + if _, ok := medium.directories[path]; ok { prefix := path if !core.HasSuffix(prefix, "/") { prefix += "/" } - for filePath := range medium.files { + for filePath := range medium.fileContents { if core.HasPrefix(filePath, prefix) { return core.E("io.MemoryMedium.Delete", core.Concat("directory not empty: ", path), fs.ErrExist) } } - for directoryPath := range medium.dirs { + for directoryPath := range medium.directories { if directoryPath != path && core.HasPrefix(directoryPath, prefix) { return core.E("io.MemoryMedium.Delete", core.Concat("directory not empty: ", path), fs.ErrExist) } } - delete(medium.dirs, path) + delete(medium.directories, path) return nil } return core.E("io.MemoryMedium.Delete", core.Concat("path not found: ", path), fs.ErrNotExist) @@ -269,31 +271,31 @@ func (medium *MemoryMedium) Delete(path string) error { func (medium *MemoryMedium) DeleteAll(path string) error { found := false - if _, ok := medium.files[path]; ok { - delete(medium.files, path) - delete(medium.modes, path) - delete(medium.modTimes, path) + if _, ok := medium.fileContents[path]; ok { + delete(medium.fileContents, path) + delete(medium.fileModes, path) + delete(medium.modificationTimes, path) found = true } - if _, ok := medium.dirs[path]; ok { - delete(medium.dirs, path) + if _, ok := medium.directories[path]; ok { + delete(medium.directories, path) found = true } prefix := path if !core.HasSuffix(prefix, "/") { prefix += "/" } - for filePath := range medium.files { + for filePath := range medium.fileContents { if core.HasPrefix(filePath, prefix) { - delete(medium.files, filePath) - delete(medium.modes, filePath) - delete(medium.modTimes, filePath) + delete(medium.fileContents, filePath) + delete(medium.fileModes, filePath) + delete(medium.modificationTimes, filePath) found = true } } - for directoryPath := range medium.dirs { + for directoryPath := range medium.directories { if core.HasPrefix(directoryPath, prefix) { - delete(medium.dirs, directoryPath) + delete(medium.directories, directoryPath) found = true } } @@ -305,22 +307,22 @@ func (medium *MemoryMedium) DeleteAll(path string) error { } func (medium *MemoryMedium) Rename(oldPath, newPath string) error { - if content, ok := medium.files[oldPath]; ok { - medium.files[newPath] = content - delete(medium.files, oldPath) - if mode, ok := medium.modes[oldPath]; ok { - medium.modes[newPath] = mode - delete(medium.modes, oldPath) + if content, ok := medium.fileContents[oldPath]; ok { + medium.fileContents[newPath] = content + delete(medium.fileContents, oldPath) + if mode, ok := medium.fileModes[oldPath]; ok { + medium.fileModes[newPath] = mode + delete(medium.fileModes, oldPath) } - if modTime, ok := medium.modTimes[oldPath]; ok { - medium.modTimes[newPath] = modTime - delete(medium.modTimes, oldPath) + if modTime, ok := medium.modificationTimes[oldPath]; ok { + medium.modificationTimes[newPath] = modTime + delete(medium.modificationTimes, oldPath) } return nil } - if _, ok := medium.dirs[oldPath]; ok { - medium.dirs[newPath] = true - delete(medium.dirs, oldPath) + if _, ok := medium.directories[oldPath]; ok { + medium.directories[newPath] = true + delete(medium.directories, oldPath) oldPrefix := oldPath if !core.HasSuffix(oldPrefix, "/") { @@ -332,31 +334,31 @@ func (medium *MemoryMedium) Rename(oldPath, newPath string) error { } filesToMove := make(map[string]string) - for filePath := range medium.files { + for filePath := range medium.fileContents { if core.HasPrefix(filePath, oldPrefix) { newFilePath := core.Concat(newPrefix, core.TrimPrefix(filePath, oldPrefix)) filesToMove[filePath] = newFilePath } } for oldFilePath, newFilePath := range filesToMove { - medium.files[newFilePath] = medium.files[oldFilePath] - delete(medium.files, oldFilePath) - if modTime, ok := medium.modTimes[oldFilePath]; ok { - medium.modTimes[newFilePath] = modTime - delete(medium.modTimes, oldFilePath) + medium.fileContents[newFilePath] = medium.fileContents[oldFilePath] + delete(medium.fileContents, oldFilePath) + if modTime, ok := medium.modificationTimes[oldFilePath]; ok { + medium.modificationTimes[newFilePath] = modTime + delete(medium.modificationTimes, oldFilePath) } } dirsToMove := make(map[string]string) - for directoryPath := range medium.dirs { + for directoryPath := range medium.directories { if core.HasPrefix(directoryPath, oldPrefix) { newDirectoryPath := core.Concat(newPrefix, core.TrimPrefix(directoryPath, oldPrefix)) dirsToMove[directoryPath] = newDirectoryPath } } for oldDirectoryPath, newDirectoryPath := range dirsToMove { - medium.dirs[newDirectoryPath] = true - delete(medium.dirs, oldDirectoryPath) + medium.directories[newDirectoryPath] = true + delete(medium.directories, oldDirectoryPath) } return nil } @@ -364,7 +366,7 @@ func (medium *MemoryMedium) Rename(oldPath, newPath string) error { } func (medium *MemoryMedium) Open(path string) (fs.File, error) { - content, ok := medium.files[path] + content, ok := medium.fileContents[path] if !ok { return nil, core.E("io.MemoryMedium.Open", core.Concat("file not found: ", path), fs.ErrNotExist) } @@ -384,7 +386,7 @@ func (medium *MemoryMedium) Create(path string) (goio.WriteCloser, error) { } func (medium *MemoryMedium) Append(path string) (goio.WriteCloser, error) { - content := medium.files[path] + content := medium.fileContents[path] return &MemoryWriteCloser{ medium: medium, path: path, @@ -442,34 +444,34 @@ func (writeCloser *MemoryWriteCloser) Write(data []byte) (int, error) { } func (writeCloser *MemoryWriteCloser) Close() error { - writeCloser.medium.files[writeCloser.path] = string(writeCloser.data) - writeCloser.medium.modes[writeCloser.path] = writeCloser.mode - writeCloser.medium.modTimes[writeCloser.path] = time.Now() + writeCloser.medium.fileContents[writeCloser.path] = string(writeCloser.data) + writeCloser.medium.fileModes[writeCloser.path] = writeCloser.mode + writeCloser.medium.modificationTimes[writeCloser.path] = time.Now() return nil } func (medium *MemoryMedium) fileMode(path string) fs.FileMode { - if mode, ok := medium.modes[path]; ok { + if mode, ok := medium.fileModes[path]; ok { return mode } return 0644 } func (medium *MemoryMedium) List(path string) ([]fs.DirEntry, error) { - if _, ok := medium.dirs[path]; !ok { + if _, ok := medium.directories[path]; !ok { hasChildren := false prefix := path if path != "" && !core.HasSuffix(prefix, "/") { prefix += "/" } - for filePath := range medium.files { + for filePath := range medium.fileContents { if core.HasPrefix(filePath, prefix) { hasChildren = true break } } if !hasChildren { - for directoryPath := range medium.dirs { + for directoryPath := range medium.directories { if core.HasPrefix(directoryPath, prefix) { hasChildren = true break @@ -489,7 +491,7 @@ func (medium *MemoryMedium) List(path string) ([]fs.DirEntry, error) { seen := make(map[string]bool) var entries []fs.DirEntry - for filePath, content := range medium.files { + for filePath, content := range medium.fileContents { if !core.HasPrefix(filePath, prefix) { continue } @@ -521,7 +523,7 @@ func (medium *MemoryMedium) List(path string) ([]fs.DirEntry, error) { } } - for directoryPath := range medium.dirs { + for directoryPath := range medium.directories { if !core.HasPrefix(directoryPath, prefix) { continue } @@ -543,34 +545,38 @@ func (medium *MemoryMedium) List(path string) ([]fs.DirEntry, error) { } } + slices.SortFunc(entries, func(a, b fs.DirEntry) int { + return cmp.Compare(a.Name(), b.Name()) + }) + return entries, nil } func (medium *MemoryMedium) Stat(path string) (fs.FileInfo, error) { - if content, ok := medium.files[path]; ok { - modTime, ok := medium.modTimes[path] + if content, ok := medium.fileContents[path]; ok { + modTime, ok := medium.modificationTimes[path] if !ok { modTime = time.Now() } return NewFileInfo(core.PathBase(path), int64(len(content)), medium.fileMode(path), modTime, false), nil } - if _, ok := medium.dirs[path]; ok { + if _, ok := medium.directories[path]; ok { return NewFileInfo(core.PathBase(path), 0, fs.ModeDir|0755, time.Time{}, true), nil } return nil, core.E("io.MemoryMedium.Stat", core.Concat("path not found: ", path), fs.ErrNotExist) } func (medium *MemoryMedium) Exists(path string) bool { - if _, ok := medium.files[path]; ok { + if _, ok := medium.fileContents[path]; ok { return true } - if _, ok := medium.dirs[path]; ok { + if _, ok := medium.directories[path]; ok { return true } return false } func (medium *MemoryMedium) IsDir(path string) bool { - _, ok := medium.dirs[path] + _, ok := medium.directories[path] return ok } diff --git a/medium_test.go b/medium_test.go index 3a5c164..26f07d3 100644 --- a/medium_test.go +++ b/medium_test.go @@ -13,10 +13,10 @@ import ( func TestMemoryMedium_NewMemoryMedium_Good(t *testing.T) { memoryMedium := NewMemoryMedium() assert.NotNil(t, memoryMedium) - assert.NotNil(t, memoryMedium.files) - assert.NotNil(t, memoryMedium.dirs) - assert.Empty(t, memoryMedium.files) - assert.Empty(t, memoryMedium.dirs) + assert.NotNil(t, memoryMedium.fileContents) + assert.NotNil(t, memoryMedium.directories) + assert.Empty(t, memoryMedium.fileContents) + assert.Empty(t, memoryMedium.directories) } func TestMemoryMedium_NewFileInfo_Good(t *testing.T) { @@ -46,7 +46,7 @@ func TestMemoryMedium_NewDirEntry_Good(t *testing.T) { func TestMemoryMedium_Read_Good(t *testing.T) { memoryMedium := NewMemoryMedium() - memoryMedium.files["test.txt"] = "hello world" + memoryMedium.fileContents["test.txt"] = "hello world" content, err := memoryMedium.Read("test.txt") assert.NoError(t, err) assert.Equal(t, "hello world", content) @@ -62,11 +62,11 @@ func TestMemoryMedium_Write_Good(t *testing.T) { memoryMedium := NewMemoryMedium() err := memoryMedium.Write("test.txt", "content") assert.NoError(t, err) - assert.Equal(t, "content", memoryMedium.files["test.txt"]) + assert.Equal(t, "content", memoryMedium.fileContents["test.txt"]) err = memoryMedium.Write("test.txt", "new content") assert.NoError(t, err) - assert.Equal(t, "new content", memoryMedium.files["test.txt"]) + assert.Equal(t, "new content", memoryMedium.fileContents["test.txt"]) } func TestMemoryMedium_WriteMode_Good(t *testing.T) { @@ -94,12 +94,12 @@ func TestMemoryMedium_EnsureDir_Good(t *testing.T) { memoryMedium := NewMemoryMedium() err := memoryMedium.EnsureDir("/path/to/dir") assert.NoError(t, err) - assert.True(t, memoryMedium.dirs["/path/to/dir"]) + assert.True(t, memoryMedium.directories["/path/to/dir"]) } func TestMemoryMedium_IsFile_Good(t *testing.T) { memoryMedium := NewMemoryMedium() - memoryMedium.files["exists.txt"] = "content" + memoryMedium.fileContents["exists.txt"] = "content" assert.True(t, memoryMedium.IsFile("exists.txt")) assert.False(t, memoryMedium.IsFile("nonexistent.txt")) @@ -107,7 +107,7 @@ func TestMemoryMedium_IsFile_Good(t *testing.T) { func TestMemoryMedium_Delete_Good(t *testing.T) { memoryMedium := NewMemoryMedium() - memoryMedium.files["test.txt"] = "content" + memoryMedium.fileContents["test.txt"] = "content" err := memoryMedium.Delete("test.txt") assert.NoError(t, err) @@ -122,8 +122,8 @@ func TestMemoryMedium_Delete_NotFound_Bad(t *testing.T) { func TestMemoryMedium_Delete_DirNotEmpty_Bad(t *testing.T) { memoryMedium := NewMemoryMedium() - memoryMedium.dirs["mydir"] = true - memoryMedium.files["mydir/file.txt"] = "content" + memoryMedium.directories["mydir"] = true + memoryMedium.fileContents["mydir/file.txt"] = "content" err := memoryMedium.Delete("mydir") assert.Error(t, err) @@ -131,50 +131,53 @@ func TestMemoryMedium_Delete_DirNotEmpty_Bad(t *testing.T) { func TestMemoryMedium_DeleteAll_Good(t *testing.T) { memoryMedium := NewMemoryMedium() - memoryMedium.dirs["mydir"] = true - memoryMedium.dirs["mydir/subdir"] = true - memoryMedium.files["mydir/file.txt"] = "content" - memoryMedium.files["mydir/subdir/nested.txt"] = "nested" + memoryMedium.directories["mydir"] = true + memoryMedium.directories["mydir/subdir"] = true + memoryMedium.fileContents["mydir/file.txt"] = "content" + memoryMedium.fileContents["mydir/subdir/nested.txt"] = "nested" err := memoryMedium.DeleteAll("mydir") assert.NoError(t, err) - assert.Empty(t, memoryMedium.dirs) - assert.Empty(t, memoryMedium.files) + assert.Empty(t, memoryMedium.directories) + assert.Empty(t, memoryMedium.fileContents) } func TestMemoryMedium_Rename_Good(t *testing.T) { memoryMedium := NewMemoryMedium() - memoryMedium.files["old.txt"] = "content" + memoryMedium.fileContents["old.txt"] = "content" err := memoryMedium.Rename("old.txt", "new.txt") assert.NoError(t, err) assert.False(t, memoryMedium.IsFile("old.txt")) assert.True(t, memoryMedium.IsFile("new.txt")) - assert.Equal(t, "content", memoryMedium.files["new.txt"]) + assert.Equal(t, "content", memoryMedium.fileContents["new.txt"]) } func TestMemoryMedium_Rename_Dir_Good(t *testing.T) { memoryMedium := NewMemoryMedium() - memoryMedium.dirs["olddir"] = true - memoryMedium.files["olddir/file.txt"] = "content" + memoryMedium.directories["olddir"] = true + memoryMedium.fileContents["olddir/file.txt"] = "content" err := memoryMedium.Rename("olddir", "newdir") assert.NoError(t, err) - assert.False(t, memoryMedium.dirs["olddir"]) - assert.True(t, memoryMedium.dirs["newdir"]) - assert.Equal(t, "content", memoryMedium.files["newdir/file.txt"]) + assert.False(t, memoryMedium.directories["olddir"]) + assert.True(t, memoryMedium.directories["newdir"]) + assert.Equal(t, "content", memoryMedium.fileContents["newdir/file.txt"]) } func TestMemoryMedium_List_Good(t *testing.T) { memoryMedium := NewMemoryMedium() - memoryMedium.dirs["mydir"] = true - memoryMedium.files["mydir/file1.txt"] = "content1" - memoryMedium.files["mydir/file2.txt"] = "content2" - memoryMedium.dirs["mydir/subdir"] = true + memoryMedium.directories["mydir"] = true + memoryMedium.fileContents["mydir/file1.txt"] = "content1" + memoryMedium.fileContents["mydir/file2.txt"] = "content2" + memoryMedium.directories["mydir/subdir"] = true entries, err := memoryMedium.List("mydir") assert.NoError(t, err) assert.Len(t, entries, 3) + assert.Equal(t, "file1.txt", entries[0].Name()) + assert.Equal(t, "file2.txt", entries[1].Name()) + assert.Equal(t, "subdir", entries[2].Name()) names := make(map[string]bool) for _, entry := range entries { @@ -187,7 +190,7 @@ func TestMemoryMedium_List_Good(t *testing.T) { func TestMemoryMedium_Stat_Good(t *testing.T) { memoryMedium := NewMemoryMedium() - memoryMedium.files["test.txt"] = "hello world" + memoryMedium.fileContents["test.txt"] = "hello world" info, err := memoryMedium.Stat("test.txt") assert.NoError(t, err) @@ -198,7 +201,7 @@ func TestMemoryMedium_Stat_Good(t *testing.T) { func TestMemoryMedium_Stat_Dir_Good(t *testing.T) { memoryMedium := NewMemoryMedium() - memoryMedium.dirs["mydir"] = true + memoryMedium.directories["mydir"] = true info, err := memoryMedium.Stat("mydir") assert.NoError(t, err) @@ -208,8 +211,8 @@ func TestMemoryMedium_Stat_Dir_Good(t *testing.T) { func TestMemoryMedium_Exists_Good(t *testing.T) { memoryMedium := NewMemoryMedium() - memoryMedium.files["file.txt"] = "content" - memoryMedium.dirs["mydir"] = true + memoryMedium.fileContents["file.txt"] = "content" + memoryMedium.directories["mydir"] = true assert.True(t, memoryMedium.Exists("file.txt")) assert.True(t, memoryMedium.Exists("mydir")) @@ -218,8 +221,8 @@ func TestMemoryMedium_Exists_Good(t *testing.T) { func TestMemoryMedium_IsDir_Good(t *testing.T) { memoryMedium := NewMemoryMedium() - memoryMedium.files["file.txt"] = "content" - memoryMedium.dirs["mydir"] = true + memoryMedium.fileContents["file.txt"] = "content" + memoryMedium.directories["mydir"] = true assert.False(t, memoryMedium.IsDir("file.txt")) assert.True(t, memoryMedium.IsDir("mydir")) @@ -286,7 +289,7 @@ func TestMemoryMedium_StreamAndFSHelpers_Good(t *testing.T) { require.NoError(t, err) require.NoError(t, writeStream.Close()) - assert.Equal(t, "stream output", memoryMedium.files["streamed.txt"]) + assert.Equal(t, "stream output", memoryMedium.fileContents["streamed.txt"]) statInfo, err := memoryMedium.Stat("streamed.txt") require.NoError(t, err) assert.Equal(t, fs.FileMode(0644), statInfo.Mode()) @@ -294,7 +297,7 @@ func TestMemoryMedium_StreamAndFSHelpers_Good(t *testing.T) { func TestIO_Read_Good(t *testing.T) { memoryMedium := NewMemoryMedium() - memoryMedium.files["test.txt"] = "hello" + memoryMedium.fileContents["test.txt"] = "hello" content, err := Read(memoryMedium, "test.txt") assert.NoError(t, err) assert.Equal(t, "hello", content) @@ -304,19 +307,19 @@ func TestIO_Write_Good(t *testing.T) { memoryMedium := NewMemoryMedium() err := Write(memoryMedium, "test.txt", "hello") assert.NoError(t, err) - assert.Equal(t, "hello", memoryMedium.files["test.txt"]) + assert.Equal(t, "hello", memoryMedium.fileContents["test.txt"]) } func TestIO_EnsureDir_Good(t *testing.T) { memoryMedium := NewMemoryMedium() err := EnsureDir(memoryMedium, "/my/dir") assert.NoError(t, err) - assert.True(t, memoryMedium.dirs["/my/dir"]) + assert.True(t, memoryMedium.directories["/my/dir"]) } func TestIO_IsFile_Good(t *testing.T) { memoryMedium := NewMemoryMedium() - memoryMedium.files["exists.txt"] = "content" + memoryMedium.fileContents["exists.txt"] = "content" assert.True(t, IsFile(memoryMedium, "exists.txt")) assert.False(t, IsFile(memoryMedium, "nonexistent.txt")) @@ -356,15 +359,15 @@ func TestIO_ReadWriteStream_Good(t *testing.T) { func TestIO_Copy_Good(t *testing.T) { source := NewMemoryMedium() dest := NewMemoryMedium() - source.files["test.txt"] = "hello" + source.fileContents["test.txt"] = "hello" err := Copy(source, "test.txt", dest, "test.txt") assert.NoError(t, err) - assert.Equal(t, "hello", dest.files["test.txt"]) + assert.Equal(t, "hello", dest.fileContents["test.txt"]) - source.files["original.txt"] = "content" + source.fileContents["original.txt"] = "content" err = Copy(source, "original.txt", dest, "copied.txt") assert.NoError(t, err) - assert.Equal(t, "content", dest.files["copied.txt"]) + assert.Equal(t, "content", dest.fileContents["copied.txt"]) } func TestIO_Copy_Bad(t *testing.T) { From 3efb43aaf789727106766d9fd5ac15df11c838d3 Mon Sep 17 00:00:00 2001 From: Virgil Date: Fri, 3 Apr 2026 05:13:09 +0000 Subject: [PATCH 73/83] Improve memory medium metadata --- io.go | 13 +++++++++++-- medium_test.go | 9 +++++++-- 2 files changed, 18 insertions(+), 4 deletions(-) diff --git a/io.go b/io.go index 0f69889..b6e5b3e 100644 --- a/io.go +++ b/io.go @@ -374,6 +374,7 @@ func (medium *MemoryMedium) Open(path string) (fs.File, error) { name: core.PathBase(path), content: []byte(content), mode: medium.fileMode(path), + modTime: medium.modificationTime(path), }, nil } @@ -410,10 +411,11 @@ type MemoryFile struct { content []byte offset int64 mode fs.FileMode + modTime time.Time } func (file *MemoryFile) Stat() (fs.FileInfo, error) { - return NewFileInfo(file.name, int64(len(file.content)), file.mode, time.Time{}, false), nil + return NewFileInfo(file.name, int64(len(file.content)), file.mode, file.modTime, false), nil } func (file *MemoryFile) Read(buffer []byte) (int, error) { @@ -457,6 +459,13 @@ func (medium *MemoryMedium) fileMode(path string) fs.FileMode { return 0644 } +func (medium *MemoryMedium) modificationTime(path string) time.Time { + if modTime, ok := medium.modificationTimes[path]; ok { + return modTime + } + return time.Time{} +} + func (medium *MemoryMedium) List(path string) ([]fs.DirEntry, error) { if _, ok := medium.directories[path]; !ok { hasChildren := false @@ -518,7 +527,7 @@ func (medium *MemoryMedium) List(path string) ([]fs.DirEntry, error) { rest, false, medium.fileMode(filePath), - NewFileInfo(rest, int64(len(content)), medium.fileMode(filePath), time.Time{}, false), + NewFileInfo(rest, int64(len(content)), medium.fileMode(filePath), medium.modificationTime(filePath), false), )) } } diff --git a/medium_test.go b/medium_test.go index 26f07d3..5b103b8 100644 --- a/medium_test.go +++ b/medium_test.go @@ -234,6 +234,9 @@ func TestMemoryMedium_StreamAndFSHelpers_Good(t *testing.T) { require.NoError(t, memoryMedium.EnsureDir("dir")) require.NoError(t, memoryMedium.Write("dir/file.txt", "alpha")) + statInfo, err := memoryMedium.Stat("dir/file.txt") + require.NoError(t, err) + file, err := memoryMedium.Open("dir/file.txt") require.NoError(t, err) @@ -242,7 +245,7 @@ func TestMemoryMedium_StreamAndFSHelpers_Good(t *testing.T) { assert.Equal(t, "file.txt", info.Name()) assert.Equal(t, int64(5), info.Size()) assert.Equal(t, fs.FileMode(0644), info.Mode()) - assert.True(t, info.ModTime().IsZero()) + assert.Equal(t, statInfo.ModTime(), info.ModTime()) assert.False(t, info.IsDir()) assert.Nil(t, info.Sys()) @@ -263,6 +266,7 @@ func TestMemoryMedium_StreamAndFSHelpers_Good(t *testing.T) { assert.Equal(t, "file.txt", entryInfo.Name()) assert.Equal(t, int64(5), entryInfo.Size()) assert.Equal(t, fs.FileMode(0644), entryInfo.Mode()) + assert.Equal(t, statInfo.ModTime(), entryInfo.ModTime()) writer, err := memoryMedium.Create("created.txt") require.NoError(t, err) @@ -290,9 +294,10 @@ func TestMemoryMedium_StreamAndFSHelpers_Good(t *testing.T) { require.NoError(t, writeStream.Close()) assert.Equal(t, "stream output", memoryMedium.fileContents["streamed.txt"]) - statInfo, err := memoryMedium.Stat("streamed.txt") + statInfo, err = memoryMedium.Stat("streamed.txt") require.NoError(t, err) assert.Equal(t, fs.FileMode(0644), statInfo.Mode()) + assert.False(t, statInfo.ModTime().IsZero()) } func TestIO_Read_Good(t *testing.T) { From 8994c8b4648f2a2d107c5bc9fca7b93e12f126c2 Mon Sep 17 00:00:00 2001 From: Virgil Date: Fri, 3 Apr 2026 06:43:35 +0000 Subject: [PATCH 74/83] Infer in-memory directory paths --- io.go | 77 +++++++++++++++++++++++++++++++++++++++++--------- medium_test.go | 42 +++++++++++++++++++++++++++ 2 files changed, 105 insertions(+), 14 deletions(-) diff --git a/io.go b/io.go index b6e5b3e..abc08f7 100644 --- a/io.go +++ b/io.go @@ -5,6 +5,7 @@ import ( "cmp" goio "io" "io/fs" + "path" "slices" "time" @@ -212,6 +213,45 @@ func NewMemoryMedium() *MemoryMedium { } } +func (medium *MemoryMedium) ensureParentDirectories(filePath string) { + parentPath := path.Dir(filePath) + for parentPath != "." && parentPath != "" { + medium.directories[parentPath] = true + nextParentPath := path.Dir(parentPath) + if nextParentPath == parentPath { + break + } + parentPath = nextParentPath + } +} + +func (medium *MemoryMedium) directoryExists(path string) bool { + if path == "" { + return false + } + if _, ok := medium.directories[path]; ok { + return true + } + + prefix := path + if !core.HasSuffix(prefix, "/") { + prefix += "/" + } + + for filePath := range medium.fileContents { + if core.HasPrefix(filePath, prefix) { + return true + } + } + for directoryPath := range medium.directories { + if directoryPath != path && core.HasPrefix(directoryPath, prefix) { + return true + } + } + + return false +} + func (medium *MemoryMedium) Read(path string) (string, error) { content, ok := medium.fileContents[path] if !ok { @@ -225,6 +265,7 @@ func (medium *MemoryMedium) Write(path, content string) error { } func (medium *MemoryMedium) WriteMode(path, content string, mode fs.FileMode) error { + medium.ensureParentDirectories(path) medium.fileContents[path] = content medium.fileModes[path] = mode medium.modificationTimes[path] = time.Now() @@ -232,6 +273,7 @@ func (medium *MemoryMedium) WriteMode(path, content string, mode fs.FileMode) er } func (medium *MemoryMedium) EnsureDir(path string) error { + medium.ensureParentDirectories(path) medium.directories[path] = true return nil } @@ -248,21 +290,29 @@ func (medium *MemoryMedium) Delete(path string) error { delete(medium.modificationTimes, path) return nil } - if _, ok := medium.directories[path]; ok { + if medium.directoryExists(path) { prefix := path if !core.HasSuffix(prefix, "/") { prefix += "/" } + hasChildren := false for filePath := range medium.fileContents { if core.HasPrefix(filePath, prefix) { - return core.E("io.MemoryMedium.Delete", core.Concat("directory not empty: ", path), fs.ErrExist) + hasChildren = true + break } } - for directoryPath := range medium.directories { - if directoryPath != path && core.HasPrefix(directoryPath, prefix) { - return core.E("io.MemoryMedium.Delete", core.Concat("directory not empty: ", path), fs.ErrExist) + if !hasChildren { + for directoryPath := range medium.directories { + if directoryPath != path && core.HasPrefix(directoryPath, prefix) { + hasChildren = true + break + } } } + if hasChildren { + return core.E("io.MemoryMedium.Delete", core.Concat("directory not empty: ", path), fs.ErrExist) + } delete(medium.directories, path) return nil } @@ -320,9 +370,11 @@ func (medium *MemoryMedium) Rename(oldPath, newPath string) error { } return nil } - if _, ok := medium.directories[oldPath]; ok { + if medium.directoryExists(oldPath) { medium.directories[newPath] = true - delete(medium.directories, oldPath) + if _, ok := medium.directories[oldPath]; ok { + delete(medium.directories, oldPath) + } oldPrefix := oldPath if !core.HasSuffix(oldPrefix, "/") { @@ -446,6 +498,7 @@ func (writeCloser *MemoryWriteCloser) Write(data []byte) (int, error) { } func (writeCloser *MemoryWriteCloser) Close() error { + writeCloser.medium.ensureParentDirectories(writeCloser.path) writeCloser.medium.fileContents[writeCloser.path] = string(writeCloser.data) writeCloser.medium.fileModes[writeCloser.path] = writeCloser.mode writeCloser.medium.modificationTimes[writeCloser.path] = time.Now() @@ -569,7 +622,7 @@ func (medium *MemoryMedium) Stat(path string) (fs.FileInfo, error) { } return NewFileInfo(core.PathBase(path), int64(len(content)), medium.fileMode(path), modTime, false), nil } - if _, ok := medium.directories[path]; ok { + if medium.directoryExists(path) { return NewFileInfo(core.PathBase(path), 0, fs.ModeDir|0755, time.Time{}, true), nil } return nil, core.E("io.MemoryMedium.Stat", core.Concat("path not found: ", path), fs.ErrNotExist) @@ -579,13 +632,9 @@ func (medium *MemoryMedium) Exists(path string) bool { if _, ok := medium.fileContents[path]; ok { return true } - if _, ok := medium.directories[path]; ok { - return true - } - return false + return medium.directoryExists(path) } func (medium *MemoryMedium) IsDir(path string) bool { - _, ok := medium.directories[path] - return ok + return medium.directoryExists(path) } diff --git a/medium_test.go b/medium_test.go index 5b103b8..24ec534 100644 --- a/medium_test.go +++ b/medium_test.go @@ -97,6 +97,16 @@ func TestMemoryMedium_EnsureDir_Good(t *testing.T) { assert.True(t, memoryMedium.directories["/path/to/dir"]) } +func TestMemoryMedium_EnsureDir_CreatesParents_Good(t *testing.T) { + memoryMedium := NewMemoryMedium() + + require.NoError(t, memoryMedium.EnsureDir("alpha/beta/gamma")) + + assert.True(t, memoryMedium.IsDir("alpha")) + assert.True(t, memoryMedium.IsDir("alpha/beta")) + assert.True(t, memoryMedium.IsDir("alpha/beta/gamma")) +} + func TestMemoryMedium_IsFile_Good(t *testing.T) { memoryMedium := NewMemoryMedium() memoryMedium.fileContents["exists.txt"] = "content" @@ -105,6 +115,17 @@ func TestMemoryMedium_IsFile_Good(t *testing.T) { assert.False(t, memoryMedium.IsFile("nonexistent.txt")) } +func TestMemoryMedium_Write_CreatesParentDirectories_Good(t *testing.T) { + memoryMedium := NewMemoryMedium() + + require.NoError(t, memoryMedium.Write("nested/path/file.txt", "content")) + + assert.True(t, memoryMedium.Exists("nested")) + assert.True(t, memoryMedium.IsDir("nested")) + assert.True(t, memoryMedium.Exists("nested/path")) + assert.True(t, memoryMedium.IsDir("nested/path")) +} + func TestMemoryMedium_Delete_Good(t *testing.T) { memoryMedium := NewMemoryMedium() memoryMedium.fileContents["test.txt"] = "content" @@ -129,6 +150,15 @@ func TestMemoryMedium_Delete_DirNotEmpty_Bad(t *testing.T) { assert.Error(t, err) } +func TestMemoryMedium_Delete_InferredDirNotEmpty_Bad(t *testing.T) { + memoryMedium := NewMemoryMedium() + + require.NoError(t, memoryMedium.Write("mydir/file.txt", "content")) + + err := memoryMedium.Delete("mydir") + assert.Error(t, err) +} + func TestMemoryMedium_DeleteAll_Good(t *testing.T) { memoryMedium := NewMemoryMedium() memoryMedium.directories["mydir"] = true @@ -165,6 +195,18 @@ func TestMemoryMedium_Rename_Dir_Good(t *testing.T) { assert.Equal(t, "content", memoryMedium.fileContents["newdir/file.txt"]) } +func TestMemoryMedium_Rename_InferredDir_Good(t *testing.T) { + memoryMedium := NewMemoryMedium() + require.NoError(t, memoryMedium.Write("olddir/file.txt", "content")) + + require.NoError(t, memoryMedium.Rename("olddir", "newdir")) + + assert.False(t, memoryMedium.Exists("olddir")) + assert.True(t, memoryMedium.Exists("newdir")) + assert.True(t, memoryMedium.IsDir("newdir")) + assert.Equal(t, "content", memoryMedium.fileContents["newdir/file.txt"]) +} + func TestMemoryMedium_List_Good(t *testing.T) { memoryMedium := NewMemoryMedium() memoryMedium.directories["mydir"] = true From ef587639cdf0ef529a9fa36fa494ce64308a1d40 Mon Sep 17 00:00:00 2001 From: Virgil Date: Fri, 3 Apr 2026 06:46:19 +0000 Subject: [PATCH 75/83] Refine io memory helpers --- io.go | 26 ++++++++++++-------------- 1 file changed, 12 insertions(+), 14 deletions(-) diff --git a/io.go b/io.go index abc08f7..82f8785 100644 --- a/io.go +++ b/io.go @@ -213,7 +213,7 @@ func NewMemoryMedium() *MemoryMedium { } } -func (medium *MemoryMedium) ensureParentDirectories(filePath string) { +func (medium *MemoryMedium) ensureAncestorDirectories(filePath string) { parentPath := path.Dir(filePath) for parentPath != "." && parentPath != "" { medium.directories[parentPath] = true @@ -265,7 +265,7 @@ func (medium *MemoryMedium) Write(path, content string) error { } func (medium *MemoryMedium) WriteMode(path, content string, mode fs.FileMode) error { - medium.ensureParentDirectories(path) + medium.ensureAncestorDirectories(path) medium.fileContents[path] = content medium.fileModes[path] = mode medium.modificationTimes[path] = time.Now() @@ -273,7 +273,7 @@ func (medium *MemoryMedium) WriteMode(path, content string, mode fs.FileMode) er } func (medium *MemoryMedium) EnsureDir(path string) error { - medium.ensureParentDirectories(path) + medium.ensureAncestorDirectories(path) medium.directories[path] = true return nil } @@ -425,8 +425,8 @@ func (medium *MemoryMedium) Open(path string) (fs.File, error) { return &MemoryFile{ name: core.PathBase(path), content: []byte(content), - mode: medium.fileMode(path), - modTime: medium.modificationTime(path), + mode: medium.modeForPath(path), + modTime: medium.modificationTimeForPath(path), }, nil } @@ -444,7 +444,7 @@ func (medium *MemoryMedium) Append(path string) (goio.WriteCloser, error) { medium: medium, path: path, data: []byte(content), - mode: medium.fileMode(path), + mode: medium.modeForPath(path), }, nil } @@ -456,7 +456,6 @@ func (medium *MemoryMedium) WriteStream(path string) (goio.WriteCloser, error) { return medium.Create(path) } -// MemoryFile is the fs.File implementation returned by MemoryMedium.Open. // Example: file, _ := io.NewMemoryMedium().Open("notes.txt") type MemoryFile struct { name string @@ -483,7 +482,6 @@ func (file *MemoryFile) Close() error { return nil } -// MemoryWriteCloser is the io.WriteCloser implementation returned by MemoryMedium.Create and MemoryMedium.Append. // Example: writer, _ := io.NewMemoryMedium().Create("notes.txt") type MemoryWriteCloser struct { medium *MemoryMedium @@ -498,21 +496,21 @@ func (writeCloser *MemoryWriteCloser) Write(data []byte) (int, error) { } func (writeCloser *MemoryWriteCloser) Close() error { - writeCloser.medium.ensureParentDirectories(writeCloser.path) + writeCloser.medium.ensureAncestorDirectories(writeCloser.path) writeCloser.medium.fileContents[writeCloser.path] = string(writeCloser.data) writeCloser.medium.fileModes[writeCloser.path] = writeCloser.mode writeCloser.medium.modificationTimes[writeCloser.path] = time.Now() return nil } -func (medium *MemoryMedium) fileMode(path string) fs.FileMode { +func (medium *MemoryMedium) modeForPath(path string) fs.FileMode { if mode, ok := medium.fileModes[path]; ok { return mode } return 0644 } -func (medium *MemoryMedium) modificationTime(path string) time.Time { +func (medium *MemoryMedium) modificationTimeForPath(path string) time.Time { if modTime, ok := medium.modificationTimes[path]; ok { return modTime } @@ -579,8 +577,8 @@ func (medium *MemoryMedium) List(path string) ([]fs.DirEntry, error) { entries = append(entries, NewDirEntry( rest, false, - medium.fileMode(filePath), - NewFileInfo(rest, int64(len(content)), medium.fileMode(filePath), medium.modificationTime(filePath), false), + medium.modeForPath(filePath), + NewFileInfo(rest, int64(len(content)), medium.modeForPath(filePath), medium.modificationTimeForPath(filePath), false), )) } } @@ -620,7 +618,7 @@ func (medium *MemoryMedium) Stat(path string) (fs.FileInfo, error) { if !ok { modTime = time.Now() } - return NewFileInfo(core.PathBase(path), int64(len(content)), medium.fileMode(path), modTime, false), nil + return NewFileInfo(core.PathBase(path), int64(len(content)), medium.modeForPath(path), modTime, false), nil } if medium.directoryExists(path) { return NewFileInfo(core.PathBase(path), 0, fs.ModeDir|0755, time.Time{}, true), nil From c60c4d95f035b29c8b10b2d7dfb2b0609e0f7f0d Mon Sep 17 00:00:00 2001 From: Virgil Date: Fri, 3 Apr 2026 06:49:39 +0000 Subject: [PATCH 76/83] docs: add AX examples to memory medium --- io.go | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/io.go b/io.go index 82f8785..2174571 100644 --- a/io.go +++ b/io.go @@ -252,6 +252,7 @@ func (medium *MemoryMedium) directoryExists(path string) bool { return false } +// Example: value, _ := io.NewMemoryMedium().Read("notes.txt") func (medium *MemoryMedium) Read(path string) (string, error) { content, ok := medium.fileContents[path] if !ok { @@ -260,10 +261,12 @@ func (medium *MemoryMedium) Read(path string) (string, error) { return content, nil } +// Example: _ = io.NewMemoryMedium().Write("notes.txt", "hello") func (medium *MemoryMedium) Write(path, content string) error { return medium.WriteMode(path, content, 0644) } +// Example: _ = io.NewMemoryMedium().WriteMode("keys/private.key", "secret", 0600) func (medium *MemoryMedium) WriteMode(path, content string, mode fs.FileMode) error { medium.ensureAncestorDirectories(path) medium.fileContents[path] = content @@ -272,17 +275,20 @@ func (medium *MemoryMedium) WriteMode(path, content string, mode fs.FileMode) er return nil } +// Example: _ = io.NewMemoryMedium().EnsureDir("config/app") func (medium *MemoryMedium) EnsureDir(path string) error { medium.ensureAncestorDirectories(path) medium.directories[path] = true return nil } +// Example: ok := io.NewMemoryMedium().IsFile("notes.txt") func (medium *MemoryMedium) IsFile(path string) bool { _, ok := medium.fileContents[path] return ok } +// Example: _ = io.NewMemoryMedium().Delete("old.txt") func (medium *MemoryMedium) Delete(path string) error { if _, ok := medium.fileContents[path]; ok { delete(medium.fileContents, path) @@ -319,6 +325,7 @@ func (medium *MemoryMedium) Delete(path string) error { return core.E("io.MemoryMedium.Delete", core.Concat("path not found: ", path), fs.ErrNotExist) } +// Example: _ = io.NewMemoryMedium().DeleteAll("logs") func (medium *MemoryMedium) DeleteAll(path string) error { found := false if _, ok := medium.fileContents[path]; ok { @@ -356,6 +363,7 @@ func (medium *MemoryMedium) DeleteAll(path string) error { return nil } +// Example: _ = io.NewMemoryMedium().Rename("drafts/todo.txt", "archive/todo.txt") func (medium *MemoryMedium) Rename(oldPath, newPath string) error { if content, ok := medium.fileContents[oldPath]; ok { medium.fileContents[newPath] = content @@ -417,6 +425,7 @@ func (medium *MemoryMedium) Rename(oldPath, newPath string) error { return core.E("io.MemoryMedium.Rename", core.Concat("path not found: ", oldPath), fs.ErrNotExist) } +// Example: file, _ := io.NewMemoryMedium().Open("notes.txt") func (medium *MemoryMedium) Open(path string) (fs.File, error) { content, ok := medium.fileContents[path] if !ok { @@ -430,6 +439,7 @@ func (medium *MemoryMedium) Open(path string) (fs.File, error) { }, nil } +// Example: writer, _ := io.NewMemoryMedium().Create("notes.txt") func (medium *MemoryMedium) Create(path string) (goio.WriteCloser, error) { return &MemoryWriteCloser{ medium: medium, @@ -438,6 +448,7 @@ func (medium *MemoryMedium) Create(path string) (goio.WriteCloser, error) { }, nil } +// Example: writer, _ := io.NewMemoryMedium().Append("notes.txt") func (medium *MemoryMedium) Append(path string) (goio.WriteCloser, error) { content := medium.fileContents[path] return &MemoryWriteCloser{ @@ -448,10 +459,12 @@ func (medium *MemoryMedium) Append(path string) (goio.WriteCloser, error) { }, nil } +// Example: reader, _ := io.NewMemoryMedium().ReadStream("notes.txt") func (medium *MemoryMedium) ReadStream(path string) (goio.ReadCloser, error) { return medium.Open(path) } +// Example: writer, _ := io.NewMemoryMedium().WriteStream("notes.txt") func (medium *MemoryMedium) WriteStream(path string) (goio.WriteCloser, error) { return medium.Create(path) } @@ -517,6 +530,7 @@ func (medium *MemoryMedium) modificationTimeForPath(path string) time.Time { return time.Time{} } +// Example: entries, _ := io.NewMemoryMedium().List("config") func (medium *MemoryMedium) List(path string) ([]fs.DirEntry, error) { if _, ok := medium.directories[path]; !ok { hasChildren := false @@ -612,6 +626,7 @@ func (medium *MemoryMedium) List(path string) ([]fs.DirEntry, error) { return entries, nil } +// Example: info, _ := io.NewMemoryMedium().Stat("notes.txt") func (medium *MemoryMedium) Stat(path string) (fs.FileInfo, error) { if content, ok := medium.fileContents[path]; ok { modTime, ok := medium.modificationTimes[path] @@ -626,6 +641,7 @@ func (medium *MemoryMedium) Stat(path string) (fs.FileInfo, error) { return nil, core.E("io.MemoryMedium.Stat", core.Concat("path not found: ", path), fs.ErrNotExist) } +// Example: ok := io.NewMemoryMedium().Exists("notes.txt") func (medium *MemoryMedium) Exists(path string) bool { if _, ok := medium.fileContents[path]; ok { return true @@ -633,6 +649,7 @@ func (medium *MemoryMedium) Exists(path string) bool { return medium.directoryExists(path) } +// Example: ok := io.NewMemoryMedium().IsDir("config") func (medium *MemoryMedium) IsDir(path string) bool { return medium.directoryExists(path) } From 2f186d20ef4fd6c9f6756353a6d1c53de3104a34 Mon Sep 17 00:00:00 2001 From: Virgil Date: Fri, 3 Apr 2026 06:53:25 +0000 Subject: [PATCH 77/83] Align workspace docs with AX examples --- workspace/service.go | 15 +++------------ 1 file changed, 3 insertions(+), 12 deletions(-) diff --git a/workspace/service.go b/workspace/service.go index 64a5d69..d66ec97 100644 --- a/workspace/service.go +++ b/workspace/service.go @@ -12,11 +12,7 @@ import ( "dappco.re/go/core/io/sigil" ) -// Example: service, _ := workspace.New(workspace.Options{ -// Example: KeyPairProvider: keyPairProvider, -// Example: RootPath: "/srv/workspaces", -// Example: Medium: io.NewMemoryMedium(), -// Example: }) +// Example: service, _ := workspace.New(workspace.Options{KeyPairProvider: keyPairProvider}) type Workspace interface { CreateWorkspace(identifier, passphrase string) (string, error) SwitchWorkspace(workspaceID string) error @@ -52,16 +48,11 @@ type Options struct { KeyPairProvider KeyPairProvider RootPath string Medium io.Medium - // Core is the optional Core instance. When set, the workspace service - // auto-registers as an IPC listener for workspace.create and workspace.switch events. + // Example: service, _ := workspace.New(workspace.Options{Core: core.New()}) Core *core.Core } -// Example: service, _ := workspace.New(workspace.Options{ -// Example: KeyPairProvider: keyPairProvider, -// Example: RootPath: "/srv/workspaces", -// Example: Medium: io.NewMemoryMedium(), -// Example: }) +// Example: service, _ := workspace.New(workspace.Options{KeyPairProvider: keyPairProvider}) type Service struct { keyPairProvider KeyPairProvider activeWorkspaceID string From c95697e4f5b4cef68df3a6fdf669568c739b38ee Mon Sep 17 00:00:00 2001 From: Virgil Date: Fri, 3 Apr 2026 06:55:51 +0000 Subject: [PATCH 78/83] Sort local listings deterministically --- local/medium.go | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/local/medium.go b/local/medium.go index 100dd74..d9337cf 100644 --- a/local/medium.go +++ b/local/medium.go @@ -4,8 +4,10 @@ package local import ( + "cmp" goio "io" "io/fs" + "slices" "syscall" core "dappco.re/go/core" @@ -292,7 +294,16 @@ func (medium *Medium) List(path string) ([]fs.DirEntry, error) { if err != nil { return nil, err } - return resultDirEntries("local.List", core.Concat("list failed: ", path), unrestrictedFileSystem.List(resolvedPath)) + entries, err := resultDirEntries("local.List", core.Concat("list failed: ", path), unrestrictedFileSystem.List(resolvedPath)) + if err != nil { + return nil, err + } + + slices.SortFunc(entries, func(a, b fs.DirEntry) int { + return cmp.Compare(a.Name(), b.Name()) + }) + + return entries, nil } // Example: info, _ := medium.Stat("config/app.yaml") From 5e14c79d64826daaff91849702f82cec337cf904 Mon Sep 17 00:00:00 2001 From: Virgil Date: Fri, 3 Apr 2026 06:58:49 +0000 Subject: [PATCH 79/83] Lock in io helper interfaces --- io.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/io.go b/io.go index 2174571..d2cf764 100644 --- a/io.go +++ b/io.go @@ -79,6 +79,8 @@ type FileInfo struct { isDir bool } +var _ fs.FileInfo = FileInfo{} + func (info FileInfo) Name() string { return info.name } func (info FileInfo) Size() int64 { return info.size } @@ -100,6 +102,8 @@ type DirEntry struct { info fs.FileInfo } +var _ fs.DirEntry = DirEntry{} + func (entry DirEntry) Name() string { return entry.name } func (entry DirEntry) IsDir() bool { return entry.isDir } @@ -478,6 +482,9 @@ type MemoryFile struct { modTime time.Time } +var _ fs.File = (*MemoryFile)(nil) +var _ goio.ReadCloser = (*MemoryFile)(nil) + func (file *MemoryFile) Stat() (fs.FileInfo, error) { return NewFileInfo(file.name, int64(len(file.content)), file.mode, file.modTime, false), nil } @@ -503,6 +510,8 @@ type MemoryWriteCloser struct { mode fs.FileMode } +var _ goio.WriteCloser = (*MemoryWriteCloser)(nil) + func (writeCloser *MemoryWriteCloser) Write(data []byte) (int, error) { writeCloser.data = append(writeCloser.data, data...) return len(data), nil From a43a16fb0da3cb53a795e127a76be8ad6c5efdb2 Mon Sep 17 00:00:00 2001 From: Snider Date: Sun, 5 Apr 2026 12:22:25 +0100 Subject: [PATCH 80/83] fix: address CodeRabbit PR #2 findings MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - datanode: add isFileLocked() helper to prevent RLock re-entry deadlock in Append - io: MemoryMedium WriteMode rejects ancestor-is-file collision; EnsureDir rejects target-is-file collision - io: copy fileModes during directory rename - local: guard Delete/DeleteAll against removing sandbox root - local: add TOCTOU TODO comment on validatePath symlink loop - local: alias stdlib io→goio in medium_test.go - datanode: alias stdlib io→goio in medium_test.go - sqlite: add isValidTableName() whitelist to prevent table-name SQL injection in New() - sqlite: remove duplicate WHERE clause args in List query - sqlite: add mode field to sqliteWriteCloser; use it in Close (was hardcoded 420) - sigil: GzipSigil.In returns nil when custom outputWriter is used (buffer was empty) - sigil: capture hasher.Write error in HashSigil.In - sigil: add comment explaining DecryptionFailedError hides raw AEAD error intentionally - s3: add comment explaining WriteMode ignores mode (no POSIX on S3) - s3_test: ListObjectsV2 mock sets IsTruncated+NextContinuationToken when maxKeys exceeded - node: add comment explaining WriteMode ignores mode for in-memory nodes - store: sort keys before building List entries for deterministic output - store: add explanatory comment on NotFoundError sentinel - workspace: replace sha256.Sum256 key derivation with HKDF (RFC 5869) - docs: fix RFC-CORE-008 header (was RFC-025) - docs: update import paths from forge.lthn.ai/core/go-io to dappco.re/go/core/io - docs/RFC.md: remove duplicate Read/Write alias doc blocks Co-Authored-By: Virgil --- datanode/medium.go | 12 ++++++-- datanode/medium_test.go | 14 ++++----- docs/RFC-CORE-008-AGENT-EXPERIENCE.md | 2 +- docs/RFC.md | 37 +++++++---------------- docs/index.md | 26 ++++++++-------- io.go | 19 ++++++++++++ local/medium.go | 14 +++++++++ local/medium_test.go | 8 ++--- medium_test.go | 4 +-- node/node.go | 1 + s3/s3.go | 2 ++ s3/s3_test.go | 24 +++++++++++++-- sigil/crypto_sigil.go | 2 ++ sigil/sigils.go | 9 +++++- sqlite/sqlite.go | 43 +++++++++++++++++++++++---- store/medium.go | 11 +++++-- store/store.go | 5 +++- workspace/service.go | 13 ++++++-- 18 files changed, 174 insertions(+), 72 deletions(-) diff --git a/datanode/medium.go b/datanode/medium.go index 6896eb2..020b252 100644 --- a/datanode/medium.go +++ b/datanode/medium.go @@ -171,7 +171,11 @@ func (medium *Medium) IsFile(filePath string) bool { medium.lock.RLock() defer medium.lock.RUnlock() - filePath = normaliseEntryPath(filePath) + return medium.isFileLocked(normaliseEntryPath(filePath)) +} + +// isFileLocked reports whether filePath is a regular file. Caller must hold at least medium.lock.RLock. +func (medium *Medium) isFileLocked(filePath string) bool { info, err := medium.dataNode.Stat(filePath) return err == nil && !info.IsDir() } @@ -415,7 +419,7 @@ func (medium *Medium) Append(filePath string) (goio.WriteCloser, error) { var existing []byte medium.lock.RLock() - if medium.IsFile(filePath) { + if medium.isFileLocked(filePath) { data, err := medium.readFileLocked(filePath) if err != nil { medium.lock.RUnlock() @@ -522,6 +526,10 @@ func (medium *Medium) readFileLocked(filePath string) ([]byte, error) { return data, nil } +// removeFileLocked rebuilds the entire DataNode excluding the target entry. +// This is O(n) per call, leading to O(n²) behaviour when deleting many files in a loop. +// TODO(perf): use a DataNode deletion API if borgdatanode ever exposes one, or batch deletions +// by collecting targets before rebuilding once. func (medium *Medium) removeFileLocked(target string) error { entries, err := medium.collectAllLocked() if err != nil { diff --git a/datanode/medium_test.go b/datanode/medium_test.go index 730ddb4..93ff024 100644 --- a/datanode/medium_test.go +++ b/datanode/medium_test.go @@ -1,7 +1,7 @@ package datanode import ( - "io" + goio "io" "io/fs" "testing" @@ -155,7 +155,7 @@ func TestDataNode_Delete_RemoveFailure_Bad(t *testing.T) { require.NoError(t, dataNodeMedium.Write("bad.txt", "bad")) original := dataNodeReadAll - dataNodeReadAll = func(_ io.Reader) ([]byte, error) { + dataNodeReadAll = func(_ goio.Reader) ([]byte, error) { return nil, core.NewError("read failed") } t.Cleanup(func() { @@ -203,7 +203,7 @@ func TestDataNode_RenameDir_ReadFailure_Bad(t *testing.T) { require.NoError(t, dataNodeMedium.Write("src/a.go", "package a")) original := dataNodeReadAll - dataNodeReadAll = func(_ io.Reader) ([]byte, error) { + dataNodeReadAll = func(_ goio.Reader) ([]byte, error) { return nil, core.NewError("read failed") } t.Cleanup(func() { @@ -268,7 +268,7 @@ func TestDataNode_Open_Good(t *testing.T) { require.NoError(t, err) defer file.Close() - data, err := io.ReadAll(file) + data, err := goio.ReadAll(file) require.NoError(t, err) assert.Equal(t, "opened", string(data)) } @@ -300,7 +300,7 @@ func TestDataNode_Append_ReadFailure_Bad(t *testing.T) { require.NoError(t, dataNodeMedium.Write("new.txt", "hello")) original := dataNodeReadAll - dataNodeReadAll = func(_ io.Reader) ([]byte, error) { + dataNodeReadAll = func(_ goio.Reader) ([]byte, error) { return nil, core.NewError("read failed") } t.Cleanup(func() { @@ -322,7 +322,7 @@ func TestDataNode_Streams_Good(t *testing.T) { readStream, err := dataNodeMedium.ReadStream("stream.txt") require.NoError(t, err) - data, err := io.ReadAll(readStream) + data, err := goio.ReadAll(readStream) require.NoError(t, err) assert.Equal(t, "streamed", string(data)) require.NoError(t, readStream.Close()) @@ -382,7 +382,7 @@ func TestDataNode_DataNode_Good(t *testing.T) { require.NoError(t, err) defer file.Close() - data, err := io.ReadAll(file) + data, err := goio.ReadAll(file) require.NoError(t, err) assert.Equal(t, "borg", string(data)) } diff --git a/docs/RFC-CORE-008-AGENT-EXPERIENCE.md b/docs/RFC-CORE-008-AGENT-EXPERIENCE.md index 3763521..becda8e 100644 --- a/docs/RFC-CORE-008-AGENT-EXPERIENCE.md +++ b/docs/RFC-CORE-008-AGENT-EXPERIENCE.md @@ -1,4 +1,4 @@ -# RFC-025: Agent Experience (AX) Design Principles +# RFC-CORE-008: Agent Experience (AX) Design Principles - **Status:** Draft - **Authors:** Snider, Cladius diff --git a/docs/RFC.md b/docs/RFC.md index aeb126c..ca0031e 100644 --- a/docs/RFC.md +++ b/docs/RFC.md @@ -7,9 +7,9 @@ description: Complete API reference for go-io. This document enumerates every exported type, function, method, and variable in go-io, with short usage examples. -Examples use the import paths from `docs/index.md` (`forge.lthn.ai/core/go-io`). Adjust paths if your module path differs. +Examples use the import paths from `docs/index.md` (`dappco.re/go/core/io`). Adjust paths if your module path differs. -## Package io (`forge.lthn.ai/core/go-io`) +## Package io (`dappco.re/go/core/io`) Defines the `Medium` interface, helper functions, and in-memory `MemoryMedium` implementation. @@ -65,23 +65,6 @@ _ = m.Write("notes.txt", "hello") ok := m.IsFile("notes.txt") ``` -**Read(path string) (string, error)** -Alias for `Read`. -Example: -```go -m := io.NewMemoryMedium() -_ = m.Write("notes.txt", "hello") -value, _ := m.Read("notes.txt") -``` - -**Write(path, content string) error** -Alias for `Write`. -Example: -```go -m := io.NewMemoryMedium() -_ = m.Write("notes.txt", "hello") -``` - **Delete(path string) error** Deletes a file or empty directory. Example: @@ -620,7 +603,7 @@ _, _ = w.Write([]byte("hello")) _ = w.Close() ``` -## Package local (`forge.lthn.ai/core/go-io/local`) +## Package local (`dappco.re/go/core/io/local`) Local filesystem backend with sandboxed roots and symlink-escape protection. @@ -798,7 +781,7 @@ m, _ := local.New("/srv/app") _ = m.Write("notes.txt", "hello") ``` -## Package node (`forge.lthn.ai/core/go-io/node`) +## Package node (`dappco.re/go/core/io/node`) In-memory filesystem implementing `io.Medium` and `fs.FS`, with tar serialisation. @@ -1087,7 +1070,7 @@ _, _ = w.Write([]byte("data")) _ = w.Close() ``` -## Package store (`forge.lthn.ai/core/go-io/store`) +## Package store (`dappco.re/go/core/io/store`) Group-namespaced key-value store backed by SQLite, plus a `Medium` adapter. @@ -1387,7 +1370,7 @@ _ = m.Write("config/theme", "midnight") ok := m.IsDir("config") ``` -## Package sqlite (`forge.lthn.ai/core/go-io/sqlite`) +## Package sqlite (`dappco.re/go/core/io/sqlite`) SQLite-backed `io.Medium` implementation using the pure-Go driver. @@ -1575,7 +1558,7 @@ _ = m.EnsureDir("config") ok := m.IsDir("config") ``` -## Package s3 (`forge.lthn.ai/core/go-io/s3`) +## Package s3 (`dappco.re/go/core/io/s3`) Amazon S3-backed `io.Medium` implementation. @@ -1773,7 +1756,7 @@ m, _ := s3.New(s3.Options{Bucket: "bucket", Client: client}) ok := m.IsDir("logs") ``` -## Package datanode (`forge.lthn.ai/core/go-io/datanode`) +## Package datanode (`dappco.re/go/core/io/datanode`) In-memory `io.Medium` backed by Borg's DataNode, with tar snapshot/restore support. @@ -1986,7 +1969,7 @@ _ = m.EnsureDir("config") ok := m.IsDir("config") ``` -## Package workspace (`forge.lthn.ai/core/go-io/workspace`) +## Package workspace (`dappco.re/go/core/io/workspace`) Encrypted user workspace management. @@ -2123,7 +2106,7 @@ result := service.HandleWorkspaceMessage(core.New(), workspace.WorkspaceCommand{ _ = result.OK ``` -## Package sigil (`forge.lthn.ai/core/go-io/sigil`) +## Package sigil (`dappco.re/go/core/io/sigil`) Composable data-transformation sigils for encoding, compression, hashing, and encryption. diff --git a/docs/index.md b/docs/index.md index 05762f9..7b28d1b 100644 --- a/docs/index.md +++ b/docs/index.md @@ -5,7 +5,7 @@ description: Unified storage abstraction for Go with pluggable backends — loca # go-io -`forge.lthn.ai/core/go-io` is a storage abstraction library that provides a single `Medium` interface for reading and writing files across different backends. Write your code against `Medium` once, then swap between local disk, S3, SQLite, or in-memory storage without changing a line of business logic. +`dappco.re/go/core/io` is a storage abstraction library that provides a single `Medium` interface for reading and writing files across different backends. Write your code against `Medium` once, then swap between local disk, S3, SQLite, or in-memory storage without changing a line of business logic. The library also includes `sigil`, a composable data-transformation pipeline for encoding, compression, hashing, and authenticated encryption. @@ -14,9 +14,9 @@ The library also includes `sigil`, a composable data-transformation pipeline for ```go import ( - io "forge.lthn.ai/core/go-io" - "forge.lthn.ai/core/go-io/s3" - "forge.lthn.ai/core/go-io/node" + io "dappco.re/go/core/io" + "dappco.re/go/core/io/s3" + "dappco.re/go/core/io/node" ) content, _ := io.Local.Read("/etc/hostname") @@ -37,15 +37,15 @@ _ = s3Medium.Write("photo.jpg", rawData) | Package | Import Path | Purpose | |---------|-------------|---------| -| `io` (root) | `forge.lthn.ai/core/go-io` | `Medium` interface, helper functions, `MemoryMedium` for tests | -| `local` | `forge.lthn.ai/core/go-io/local` | Local filesystem backend with path sandboxing and symlink-escape protection | -| `s3` | `forge.lthn.ai/core/go-io/s3` | Amazon S3 / S3-compatible backend (Garage, MinIO, etc.) | -| `sqlite` | `forge.lthn.ai/core/go-io/sqlite` | SQLite-backed virtual filesystem (pure Go driver, no CGO) | -| `node` | `forge.lthn.ai/core/go-io/node` | In-memory filesystem implementing both `Medium` and `fs.FS`, with tar round-tripping | -| `datanode` | `forge.lthn.ai/core/go-io/datanode` | Thread-safe in-memory `Medium` backed by Borg's DataNode, with snapshot/restore | -| `store` | `forge.lthn.ai/core/go-io/store` | Group-namespaced key-value store (SQLite), with a `Medium` adapter and Go template rendering | -| `sigil` | `forge.lthn.ai/core/go-io/sigil` | Composable data transformations: encoding, compression, hashing, XChaCha20-Poly1305 encryption | -| `workspace` | `forge.lthn.ai/core/go-io/workspace` | Encrypted workspace service integrated with the Core DI container | +| `io` (root) | `dappco.re/go/core/io` | `Medium` interface, helper functions, `MemoryMedium` for tests | +| `local` | `dappco.re/go/core/io/local` | Local filesystem backend with path sandboxing and symlink-escape protection | +| `s3` | `dappco.re/go/core/io/s3` | Amazon S3 / S3-compatible backend (Garage, MinIO, etc.) | +| `sqlite` | `dappco.re/go/core/io/sqlite` | SQLite-backed virtual filesystem (pure Go driver, no CGO) | +| `node` | `dappco.re/go/core/io/node` | In-memory filesystem implementing both `Medium` and `fs.FS`, with tar round-tripping | +| `datanode` | `dappco.re/go/core/io/datanode` | Thread-safe in-memory `Medium` backed by Borg's DataNode, with snapshot/restore | +| `store` | `dappco.re/go/core/io/store` | Group-namespaced key-value store (SQLite), with a `Medium` adapter and Go template rendering | +| `sigil` | `dappco.re/go/core/io/sigil` | Composable data transformations: encoding, compression, hashing, XChaCha20-Poly1305 encryption | +| `workspace` | `dappco.re/go/core/io/workspace` | Encrypted workspace service integrated with the Core DI container | ## The Medium Interface diff --git a/io.go b/io.go index d2cf764..41a2f6d 100644 --- a/io.go +++ b/io.go @@ -272,6 +272,18 @@ func (medium *MemoryMedium) Write(path, content string) error { // Example: _ = io.NewMemoryMedium().WriteMode("keys/private.key", "secret", 0600) func (medium *MemoryMedium) WriteMode(path, content string, mode fs.FileMode) error { + // Verify no ancestor directory component is stored as a file. + ancestor := path.Dir(path) + for ancestor != "." && ancestor != "" { + if _, ok := medium.fileContents[ancestor]; ok { + return core.E("io.MemoryMedium.WriteMode", core.Concat("ancestor path is a file: ", ancestor), fs.ErrExist) + } + next := path.Dir(ancestor) + if next == ancestor { + break + } + ancestor = next + } medium.ensureAncestorDirectories(path) medium.fileContents[path] = content medium.fileModes[path] = mode @@ -281,6 +293,9 @@ func (medium *MemoryMedium) WriteMode(path, content string, mode fs.FileMode) er // Example: _ = io.NewMemoryMedium().EnsureDir("config/app") func (medium *MemoryMedium) EnsureDir(path string) error { + if _, ok := medium.fileContents[path]; ok { + return core.E("io.MemoryMedium.EnsureDir", core.Concat("path is already a file: ", path), fs.ErrExist) + } medium.ensureAncestorDirectories(path) medium.directories[path] = true return nil @@ -411,6 +426,10 @@ func (medium *MemoryMedium) Rename(oldPath, newPath string) error { medium.modificationTimes[newFilePath] = modTime delete(medium.modificationTimes, oldFilePath) } + if fileMode, ok := medium.fileModes[oldFilePath]; ok { + medium.fileModes[newFilePath] = fileMode + delete(medium.fileModes, oldFilePath) + } } dirsToMove := make(map[string]string) diff --git a/local/medium.go b/local/medium.go index d9337cf..94ac9d2 100644 --- a/local/medium.go +++ b/local/medium.go @@ -197,6 +197,14 @@ func (medium *Medium) sandboxedPath(path string) string { return core.Path(medium.filesystemRoot, core.TrimPrefix(clean, dirSeparator())) } +// validatePath resolves the caller-supplied path against the sandbox root, rejecting any path +// that would escape via symlinks. +// +// TODO(security): the per-component Lstat + join loop is subject to a TOCTOU race: a symlink +// could be swapped between the Lstat and the subsequent open. A proper fix requires opening each +// directory component with O_NOFOLLOW (openat-style) so that the resolved fd is used for the +// next step rather than re-resolving from a path string. Until then, symlink-based escape is +// only possible on systems where an attacker can swap filesystem objects between syscalls. func (medium *Medium) validatePath(path string) (string, error) { if medium.filesystemRoot == dirSeparator() { return medium.sandboxedPath(path), nil @@ -358,6 +366,9 @@ func (medium *Medium) Delete(path string) error { if err != nil { return err } + if resolvedPath == medium.filesystemRoot { + return core.E("local.Delete", "refusing to delete sandbox root", nil) + } if isProtectedPath(resolvedPath) { return core.E("local.Delete", core.Concat("refusing to delete protected path: ", resolvedPath), nil) } @@ -370,6 +381,9 @@ func (medium *Medium) DeleteAll(path string) error { if err != nil { return err } + if resolvedPath == medium.filesystemRoot { + return core.E("local.DeleteAll", "refusing to delete sandbox root", nil) + } if isProtectedPath(resolvedPath) { return core.E("local.DeleteAll", core.Concat("refusing to delete protected path: ", resolvedPath), nil) } diff --git a/local/medium_test.go b/local/medium_test.go index de84c45..8d197e9 100644 --- a/local/medium_test.go +++ b/local/medium_test.go @@ -1,7 +1,7 @@ package local import ( - "io" + goio "io" "io/fs" "syscall" "testing" @@ -379,8 +379,8 @@ func TestLocal_ReadStream_Basic_Good(t *testing.T) { assert.NoError(t, err) defer reader.Close() - limitReader := io.LimitReader(reader, 9) - data, err := io.ReadAll(limitReader) + limitReader := goio.LimitReader(reader, 9) + data, err := goio.ReadAll(limitReader) assert.NoError(t, err) assert.Equal(t, "streaming", string(data)) } @@ -392,7 +392,7 @@ func TestLocal_WriteStream_Basic_Good(t *testing.T) { writer, err := localMedium.WriteStream("output.txt") assert.NoError(t, err) - _, err = io.Copy(writer, core.NewReader("piped data")) + _, err = goio.Copy(writer, core.NewReader("piped data")) assert.NoError(t, err) err = writer.Close() assert.NoError(t, err) diff --git a/medium_test.go b/medium_test.go index 24ec534..60bc569 100644 --- a/medium_test.go +++ b/medium_test.go @@ -427,6 +427,6 @@ func TestIO_Copy_Bad(t *testing.T) { func TestIO_LocalGlobal_Good(t *testing.T) { assert.NotNil(t, Local, "io.Local should be initialised") - var memoryMedium = Local - assert.NotNil(t, memoryMedium) + var localMedium = Local + assert.NotNil(t, localMedium) } diff --git a/node/node.go b/node/node.go index 73594d3..8d00c40 100644 --- a/node/node.go +++ b/node/node.go @@ -334,6 +334,7 @@ func (node *Node) Write(filePath, content string) error { } // Example: _ = nodeTree.WriteMode("keys/private.key", key, 0600) +// Note: mode is intentionally ignored — in-memory nodes have no filesystem permission model. func (node *Node) WriteMode(filePath, content string, mode fs.FileMode) error { return node.Write(filePath, content) } diff --git a/s3/s3.go b/s3/s3.go index 7dc6bb5..3e92a2c 100644 --- a/s3/s3.go +++ b/s3/s3.go @@ -161,6 +161,8 @@ func (medium *Medium) Write(filePath, content string) error { } // Example: _ = medium.WriteMode("keys/private.key", key, 0600) +// Note: mode is intentionally ignored — S3 has no POSIX permission model. +// Use S3 bucket policies and IAM for access control. func (medium *Medium) WriteMode(filePath, content string, mode fs.FileMode) error { return medium.Write(filePath, content) } diff --git a/s3/s3_test.go b/s3/s3_test.go index c72e771..bd4bc15 100644 --- a/s3/s3_test.go +++ b/s3/s3_test.go @@ -131,10 +131,22 @@ func (client *testS3Client) ListObjectsV2(operationContext context.Context, para } sort.Strings(allKeys) + continuationToken := aws.ToString(params.ContinuationToken) + var contents []types.Object commonPrefixes := make(map[string]bool) + truncated := false + var nextToken string + past := continuationToken == "" for _, k := range allKeys { + if !past { + if k == continuationToken { + past = true + } + continue + } + rest := core.TrimPrefix(k, prefix) if delimiter != "" { @@ -147,6 +159,8 @@ func (client *testS3Client) ListObjectsV2(operationContext context.Context, para } if int32(len(contents)) >= maxKeys { + truncated = true + nextToken = k break } @@ -169,11 +183,15 @@ func (client *testS3Client) ListObjectsV2(operationContext context.Context, para cpSlice = append(cpSlice, types.CommonPrefix{Prefix: aws.String(cp)}) } - return &awss3.ListObjectsV2Output{ + out := &awss3.ListObjectsV2Output{ Contents: contents, CommonPrefixes: cpSlice, - IsTruncated: aws.Bool(false), - }, nil + IsTruncated: aws.Bool(truncated), + } + if truncated { + out.NextContinuationToken = aws.String(nextToken) + } + return out, nil } func (client *testS3Client) CopyObject(operationContext context.Context, params *awss3.CopyObjectInput, optionFns ...func(*awss3.Options)) (*awss3.CopyObjectOutput, error) { diff --git a/sigil/crypto_sigil.go b/sigil/crypto_sigil.go index 8e6dfd3..58ca1e2 100644 --- a/sigil/crypto_sigil.go +++ b/sigil/crypto_sigil.go @@ -264,6 +264,8 @@ func (sigil *ChaChaPolySigil) Out(data []byte) ([]byte, error) { obfuscated, err := aead.Open(nil, nonce, ciphertext, nil) if err != nil { + // The underlying aead error is intentionally hidden: surfacing raw AEAD errors can + // leak oracle information to an attacker. DecryptionFailedError is the safe sentinel. return nil, core.E("sigil.ChaChaPolySigil.Out", "decrypt ciphertext", DecryptionFailedError) } diff --git a/sigil/sigils.go b/sigil/sigils.go index 36f2f15..ec313cc 100644 --- a/sigil/sigils.go +++ b/sigil/sigils.go @@ -102,6 +102,11 @@ func (sigil *GzipSigil) In(data []byte) ([]byte, error) { if err := gzipWriter.Close(); err != nil { return nil, core.E("sigil.GzipSigil.In", "close gzip writer", err) } + // When a custom outputWriter was supplied the caller owns the bytes; return nil so the + // pipeline does not propagate a stale empty-buffer value. + if sigil.outputWriter != nil { + return nil, nil + } return buffer.Bytes(), nil } @@ -203,7 +208,9 @@ func (sigil *HashSigil) In(data []byte) ([]byte, error) { return nil, core.E("sigil.HashSigil.In", "hash algorithm not available", fs.ErrInvalid) } - hasher.Write(data) + if _, err := hasher.Write(data); err != nil { + return nil, core.E("sigil.HashSigil.In", "write hash input", err) + } return hasher.(interface{ Sum([]byte) []byte }).Sum(nil), nil } diff --git a/sqlite/sqlite.go b/sqlite/sqlite.go index a2d7c1b..220a620 100644 --- a/sqlite/sqlite.go +++ b/sqlite/sqlite.go @@ -38,6 +38,27 @@ func normaliseTableName(table string) string { return table } +// isValidTableName reports whether name consists only of ASCII letters, digits, and underscores, +// starting with a letter or underscore. This prevents SQL-injection via table-name concatenation. +func isValidTableName(name string) bool { + if name == "" { + return false + } + for i, ch := range name { + switch { + case ch >= 'a' && ch <= 'z', ch >= 'A' && ch <= 'Z', ch == '_': + // always valid + case ch >= '0' && ch <= '9': + if i == 0 { + return false // must not start with a digit + } + default: + return false + } + } + return true +} + // Example: medium, _ := sqlite.New(sqlite.Options{Path: ":memory:", Table: "files"}) // Example: _ = medium.Write("config/app.yaml", "port: 8080") func New(options Options) (*Medium, error) { @@ -45,7 +66,12 @@ func New(options Options) (*Medium, error) { return nil, core.E("sqlite.New", "database path is required", fs.ErrInvalid) } - medium := &Medium{table: normaliseTableName(options.Table)} + tableName := normaliseTableName(options.Table) + if !isValidTableName(tableName) { + return nil, core.E("sqlite.New", core.Concat("table name contains invalid characters: ", tableName), fs.ErrInvalid) + } + + medium := &Medium{table: tableName} database, err := sql.Open("sqlite", options.Path) if err != nil { @@ -338,8 +364,8 @@ func (medium *Medium) List(filePath string) ([]fs.DirEntry, error) { } rows, err := medium.database.Query( - `SELECT path, content, mode, is_dir, mtime FROM `+medium.table+` WHERE path LIKE ? OR path LIKE ?`, - prefix+"%", prefix+"%", + `SELECT path, content, mode, is_dir, mtime FROM `+medium.table+` WHERE path LIKE ?`, + prefix+"%", ) if err != nil { return nil, core.E("sqlite.List", "query failed", err) @@ -635,6 +661,7 @@ type sqliteWriteCloser struct { medium *Medium path string data []byte + mode fs.FileMode } func (writer *sqliteWriteCloser) Write(data []byte) (int, error) { @@ -643,10 +670,14 @@ func (writer *sqliteWriteCloser) Write(data []byte) (int, error) { } func (writer *sqliteWriteCloser) Close() error { + mode := writer.mode + if mode == 0 { + mode = 0644 + } _, err := writer.medium.database.Exec( - `INSERT INTO `+writer.medium.table+` (path, content, mode, is_dir, mtime) VALUES (?, ?, 420, FALSE, ?) - ON CONFLICT(path) DO UPDATE SET content = excluded.content, is_dir = FALSE, mtime = excluded.mtime`, - writer.path, writer.data, time.Now().UTC(), + `INSERT INTO `+writer.medium.table+` (path, content, mode, is_dir, mtime) VALUES (?, ?, ?, FALSE, ?) + ON CONFLICT(path) DO UPDATE SET content = excluded.content, mode = excluded.mode, is_dir = FALSE, mtime = excluded.mtime`, + writer.path, writer.data, int(mode), time.Now().UTC(), ) if err != nil { return core.E("sqlite.WriteCloser.Close", core.Concat("store failed: ", writer.path), err) diff --git a/store/medium.go b/store/medium.go index 9e10877..c107e11 100644 --- a/store/medium.go +++ b/store/medium.go @@ -4,6 +4,7 @@ import ( goio "io" "io/fs" "path" + "slices" "time" core "dappco.re/go/core" @@ -171,9 +172,15 @@ func (medium *Medium) List(entryPath string) ([]fs.DirEntry, error) { if err != nil { return nil, err } + // Sort keys so that List returns entries in a deterministic order. + keys := make([]string, 0, len(all)) + for k := range all { + keys = append(keys, k) + } + slices.Sort(keys) var entries []fs.DirEntry - for key, value := range all { - entries = append(entries, &keyValueDirEntry{name: key, size: int64(len(value))}) + for _, k := range keys { + entries = append(entries, &keyValueDirEntry{name: k, size: int64(len(all[k]))}) } return entries, nil } diff --git a/store/store.go b/store/store.go index e32ac48..2b5efc2 100644 --- a/store/store.go +++ b/store/store.go @@ -10,7 +10,10 @@ import ( _ "modernc.org/sqlite" ) -// Example: _, err := keyValueStore.Get("app", "theme") +// NotFoundError is the sentinel returned when a key does not exist in the store. +// Callers test for it with errors.Is. It is defined with errors.New so that +// identity comparison works correctly across package boundaries. +// Example: _, err := keyValueStore.Get("app", "theme"); errors.Is(err, store.NotFoundError) var NotFoundError = errors.New("key not found") // Example: keyValueStore, _ := store.New(store.Options{Path: ":memory:"}) diff --git a/workspace/service.go b/workspace/service.go index d66ec97..975a701 100644 --- a/workspace/service.go +++ b/workspace/service.go @@ -2,11 +2,12 @@ package workspace import ( "crypto/sha256" - "encoding/hex" + goio "io" "io/fs" "sync" core "dappco.re/go/core" + "golang.org/x/crypto/hkdf" "dappco.re/go/core/io" "dappco.re/go/core/io/sigil" @@ -188,8 +189,14 @@ func (service *Service) workspaceCipherSigil(operation string) (*sigil.ChaChaPol if err != nil { return nil, core.E(operation, "failed to read workspace key", err) } - derived := sha256.Sum256([]byte(rawKey)) - cipherSigil, err := sigil.NewChaChaPolySigil(derived[:], nil) + // Use HKDF (RFC 5869) for key derivation: it is purpose-bound, domain-separated, + // and more resistant to length-extension attacks than a bare SHA-256 hash. + hkdfReader := hkdf.New(sha256.New, []byte(rawKey), nil, []byte("workspace-cipher-key")) + derived := make([]byte, 32) + if _, err := goio.ReadFull(hkdfReader, derived); err != nil { + return nil, core.E(operation, "failed to derive workspace key", err) + } + cipherSigil, err := sigil.NewChaChaPolySigil(derived, nil) if err != nil { return nil, core.E(operation, "failed to create cipher sigil", err) } From 2c18322dfede5cf4dc1d8ffc5bf4e41e129df9d0 Mon Sep 17 00:00:00 2001 From: Snider Date: Sun, 5 Apr 2026 12:39:57 +0100 Subject: [PATCH 81/83] fix: address CodeRabbit PR #2 findings (batch 2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - datanode/medium.go: add compile-time Medium interface check - docs/RFC.md: remove duplicated MemoryMedium Read/Write method entries - docs/RFC-CORE-008-AGENT-EXPERIENCE.md: add text language tag to fenced code block - io.go: rename WriteMode param path→filePath to avoid shadowing path package - io.go: add directory collision check in WriteMode and MemoryWriteCloser.Close - io.go: Copy now preserves source file permissions via Stat+WriteMode - node/node.go: add goroutine-safety doc comment on Node - node/node.go: Rename handles directory prefix batch-rename - node/node.go: rename CopyFile→ExportFile, document local-only behaviour, wrap PathError with core.E() - node/node.go: filter empty path components in Walk depth calculation - node/node_test.go: update tests to use ExportFile - sigil/crypto_sigil.go: make Key/Obfuscator unexported, add Key()/Obfuscator()/SetObfuscator() accessors - sigil/crypto_sigil.go: rename receiver sigil→s to avoid shadowing package name - sigil/crypto_sigil_test.go: update to use accessor methods - store/medium.go: use KeyValueStore.ListGroups() instead of direct DB query - store/medium.go: add doc comment that WriteMode does not persist file mode - store/store.go: add ListGroups() method to KeyValueStore Co-Authored-By: Virgil --- datanode/medium.go | 3 ++ docs/RFC-CORE-008-AGENT-EXPERIENCE.md | 2 +- docs/RFC.md | 21 ++------- docs/api-contract.md | 2 +- docs/architecture.md | 2 +- docs/security-attack-vector-mapping.md | 2 +- io.go | 24 +++++++--- node/node.go | 61 ++++++++++++++++++++------ node/node_test.go | 14 +++--- sigil/crypto_sigil.go | 47 +++++++++++++------- sigil/crypto_sigil_test.go | 12 ++--- store/medium.go | 19 +++----- store/store.go | 22 ++++++++++ 13 files changed, 148 insertions(+), 83 deletions(-) diff --git a/datanode/medium.go b/datanode/medium.go index 020b252..da53c8f 100644 --- a/datanode/medium.go +++ b/datanode/medium.go @@ -14,6 +14,7 @@ import ( "time" core "dappco.re/go/core" + coreio "dappco.re/go/core/io" borgdatanode "forge.lthn.ai/Snider/Borg/pkg/datanode" ) @@ -29,6 +30,8 @@ var ( } ) +var _ coreio.Medium = (*Medium)(nil) + // Example: medium := datanode.New() // Example: _ = medium.Write("jobs/run.log", "started") // Example: snapshot, _ := medium.Snapshot() diff --git a/docs/RFC-CORE-008-AGENT-EXPERIENCE.md b/docs/RFC-CORE-008-AGENT-EXPERIENCE.md index becda8e..1bf599c 100644 --- a/docs/RFC-CORE-008-AGENT-EXPERIENCE.md +++ b/docs/RFC-CORE-008-AGENT-EXPERIENCE.md @@ -41,7 +41,7 @@ AX does not replace UX or DX. End users still need good UX. Developers still nee Names are tokens that agents pattern-match across languages and contexts. Abbreviations introduce mapping overhead. -``` +```text Config not Cfg Service not Srv Embed not Emb diff --git a/docs/RFC.md b/docs/RFC.md index ca0031e..c6feb36 100644 --- a/docs/RFC.md +++ b/docs/RFC.md @@ -422,21 +422,6 @@ _ = m.Write("notes.txt", "hello") ok := m.IsFile("notes.txt") ``` -**Read(path string) (string, error)** -Example: -```go -m := io.NewMemoryMedium() -_ = m.Write("notes.txt", "hello") -value, _ := m.Read("notes.txt") -``` - -**Write(path, content string) error** -Example: -```go -m := io.NewMemoryMedium() -_ = m.Write("notes.txt", "hello") -``` - **Delete(path string) error** Example: ```go @@ -869,13 +854,13 @@ _ = n.Write("file.txt", "data") b, _ := n.ReadFile("file.txt") ``` -**CopyFile(sourcePath, destinationPath string, perm fs.FileMode) error** -Copies a file to the local filesystem. +**ExportFile(sourcePath, destinationPath string, perm fs.FileMode) error** +Exports a file from the in-memory tree to the local filesystem. Operates on coreio.Local directly — use CopyTo for Medium-agnostic transfers. Example: ```go n := node.New() _ = n.Write("file.txt", "data") -_ = n.CopyFile("file.txt", "/tmp/file.txt", 0644) +_ = n.ExportFile("file.txt", "/tmp/file.txt", 0644) ``` **CopyTo(target io.Medium, sourcePath, destPath string) error** diff --git a/docs/api-contract.md b/docs/api-contract.md index 05b1b4f..e0f05a5 100644 --- a/docs/api-contract.md +++ b/docs/api-contract.md @@ -126,7 +126,7 @@ Test coverage is `Yes` when same-package tests directly execute or reference the | `New` | `func New() *Node` | `dappco.re/go/core/io/node` | New creates a new, empty Node. | Yes | | `Node.AddData` | `func (*Node) AddData(name string, content []byte)` | `dappco.re/go/core/io/node` | AddData stages content in the in-memory filesystem. | Yes | | `Node.Append` | `func (*Node) Append(p string) (goio.WriteCloser, error)` | `dappco.re/go/core/io/node` | Append opens the named file for appending, creating it if needed. | No | -| `Node.CopyFile` | `func (*Node) CopyFile(src, dst string, perm fs.FileMode) error` | `dappco.re/go/core/io/node` | CopyFile copies a file from the in-memory tree to the local filesystem. | Yes | +| `Node.ExportFile` | `func (*Node) ExportFile(src, dst string, perm fs.FileMode) error` | `dappco.re/go/core/io/node` | ExportFile exports a file from the in-memory tree to the local filesystem. Use CopyTo for Medium-agnostic transfers. | Yes | | `Node.CopyTo` | `func (*Node) CopyTo(target coreio.Medium, sourcePath, destPath string) error` | `dappco.re/go/core/io/node` | CopyTo copies a file (or directory tree) from the node to any Medium. | No | | `Node.Create` | `func (*Node) Create(p string) (goio.WriteCloser, error)` | `dappco.re/go/core/io/node` | Create creates or truncates the named file, returning a WriteCloser. | No | | `Node.Delete` | `func (*Node) Delete(p string) error` | `dappco.re/go/core/io/node` | Delete removes a single file. | No | diff --git a/docs/architecture.md b/docs/architecture.md index 0d11aa6..d2fd0ea 100644 --- a/docs/architecture.md +++ b/docs/architecture.md @@ -94,7 +94,7 @@ Key capabilities beyond `Medium`: - **`ToTar()` / `FromTar()`** — serialise the entire tree to a tar archive and back. This enables snapshotting, transport, and archival. - **`Walk()` with `WalkOptions`** — extends `fs.WalkDir` with `MaxDepth`, `Filter`, and `SkipErrors` controls. -- **`CopyFile(src, dst, perm)`** — copies a file from the in-memory tree to the real filesystem. +- **`ExportFile(src, dst, perm)`** — exports a file from the in-memory tree to the local filesystem. Use `CopyTo` for Medium-agnostic transfers. - **`CopyTo(target Medium, src, dst)`** — copies a file or directory tree to any other `Medium`. - **`ReadFile(name)`** — returns a defensive copy of file content, preventing callers from mutating internal state. diff --git a/docs/security-attack-vector-mapping.md b/docs/security-attack-vector-mapping.md index 4db0e57..5808f9a 100644 --- a/docs/security-attack-vector-mapping.md +++ b/docs/security-attack-vector-mapping.md @@ -116,7 +116,7 @@ Notes: | `(*node.Node).Delete`, `DeleteAll`, `Rename` | `node/node.go:411`, `421`, `445` | Caller path(s) | Direct map mutation keyed by caller-supplied names | Only strips a leading `/` | Arbitrary delete/rename of any key, including `../`-style names; no directory-safe rename logic | | `(*node.Node).Stat`, `List`, `ReadDir`, `Exists`, `IsFile`, `IsDir` | `node/node.go:278`, `461`, `297`, `387`, `393`, `400` | Caller path/name | Directory inference from map keys and `fs` adapter methods | Only strips a leading `/` | Namespace enumeration and ambiguity around equivalent-looking path spellings | | `(*node.Node).WalkNode`, `Walk` | `node/node.go:128`, `145` | Caller root path, callback, filters | `fs.WalkDir` over the in-memory tree | No root normalization beyond whatever `Node` already does | Attackers who can plant names can force callback traversal over weird paths; `SkipErrors` can suppress unexpected failures | -| `(*node.Node).CopyFile` | `node/node.go:200` | Caller source key, destination host path, permissions | Reads node content and calls `os.WriteFile(dst, ...)` directly | Only checks that `src` exists and is not a directory | Arbitrary host filesystem write to a caller-chosen `dst` path | +| `(*node.Node).ExportFile` | `node/node.go:200` | Caller source key, destination host path, permissions | Reads node content and calls `coreio.Local.WriteMode(dst, ...)` directly | Only checks that `src` exists and is not a directory | Arbitrary host filesystem write to a caller-chosen `dst` path | | `(*node.Node).CopyTo` | `node/node.go:218` | Caller target medium, source path, destination path | Reads node entries and calls `target.Write(destPath or destPath/rel, content)` | Only checks that the source exists | Stored `../`-style node keys can propagate into destination paths, enabling traversal or overwrite depending on the target backend | | `(*node.Node).EnsureDir` | `node/node.go:380` | Caller path (ignored) | No-op | Input is ignored | Semantic mismatch: callers may assume a directory boundary was created when directories remain implicit | diff --git a/io.go b/io.go index 41a2f6d..f0c17fa 100644 --- a/io.go +++ b/io.go @@ -189,7 +189,11 @@ func Copy(sourceMedium Medium, sourcePath string, destinationMedium Medium, dest if err != nil { return core.E("io.Copy", core.Concat("read failed: ", sourcePath), err) } - if err := destinationMedium.Write(destinationPath, content); err != nil { + mode := fs.FileMode(0644) + if info, err := sourceMedium.Stat(sourcePath); err == nil { + mode = info.Mode() + } + if err := destinationMedium.WriteMode(destinationPath, content, mode); err != nil { return core.E("io.Copy", core.Concat("write failed: ", destinationPath), err) } return nil @@ -271,9 +275,9 @@ func (medium *MemoryMedium) Write(path, content string) error { } // Example: _ = io.NewMemoryMedium().WriteMode("keys/private.key", "secret", 0600) -func (medium *MemoryMedium) WriteMode(path, content string, mode fs.FileMode) error { +func (medium *MemoryMedium) WriteMode(filePath, content string, mode fs.FileMode) error { // Verify no ancestor directory component is stored as a file. - ancestor := path.Dir(path) + ancestor := path.Dir(filePath) for ancestor != "." && ancestor != "" { if _, ok := medium.fileContents[ancestor]; ok { return core.E("io.MemoryMedium.WriteMode", core.Concat("ancestor path is a file: ", ancestor), fs.ErrExist) @@ -284,10 +288,13 @@ func (medium *MemoryMedium) WriteMode(path, content string, mode fs.FileMode) er } ancestor = next } - medium.ensureAncestorDirectories(path) - medium.fileContents[path] = content - medium.fileModes[path] = mode - medium.modificationTimes[path] = time.Now() + if _, ok := medium.directories[filePath]; ok { + return core.E("io.MemoryMedium.WriteMode", core.Concat("path is a directory: ", filePath), fs.ErrExist) + } + medium.ensureAncestorDirectories(filePath) + medium.fileContents[filePath] = content + medium.fileModes[filePath] = mode + medium.modificationTimes[filePath] = time.Now() return nil } @@ -537,6 +544,9 @@ func (writeCloser *MemoryWriteCloser) Write(data []byte) (int, error) { } func (writeCloser *MemoryWriteCloser) Close() error { + if _, ok := writeCloser.medium.directories[writeCloser.path]; ok { + return core.E("io.MemoryWriteCloser.Close", core.Concat("path is a directory: ", writeCloser.path), fs.ErrExist) + } writeCloser.medium.ensureAncestorDirectories(writeCloser.path) writeCloser.medium.fileContents[writeCloser.path] = string(writeCloser.data) writeCloser.medium.fileModes[writeCloser.path] = writeCloser.mode diff --git a/node/node.go b/node/node.go index 8d00c40..e8b095b 100644 --- a/node/node.go +++ b/node/node.go @@ -22,6 +22,8 @@ import ( // Example: nodeTree.AddData("config/app.yaml", []byte("port: 8080")) // Example: snapshot, _ := nodeTree.ToTar() // Example: restored, _ := node.FromTar(snapshot) +// Note: Node is not goroutine-safe. All methods must be called from a single goroutine, +// or the caller must provide external synchronisation. type Node struct { files map[string]*dataFile } @@ -152,7 +154,13 @@ func (node *Node) Walk(root string, walkFunc fs.WalkDirFunc, options WalkOptions if walkResult == nil && options.MaxDepth > 0 && entry != nil && entry.IsDir() && entryPath != root { relativePath := core.TrimPrefix(entryPath, root) relativePath = core.TrimPrefix(relativePath, "/") - depth := len(core.Split(relativePath, "/")) + parts := core.Split(relativePath, "/") + depth := 0 + for _, part := range parts { + if part != "" { + depth++ + } + } if depth >= options.MaxDepth { return fs.SkipDir } @@ -174,23 +182,26 @@ func (node *Node) ReadFile(name string) ([]byte, error) { return result, nil } -// Example: _ = nodeTree.CopyFile("config/app.yaml", "backup/app.yaml", 0644) -func (node *Node) CopyFile(sourcePath, destinationPath string, permissions fs.FileMode) error { +// ExportFile writes a node file to the local filesystem. It operates on coreio.Local directly +// and is intentionally local-only — use CopyTo for Medium-agnostic transfers. +// Example: _ = nodeTree.ExportFile("config/app.yaml", "backup/app.yaml", 0644) +func (node *Node) ExportFile(sourcePath, destinationPath string, permissions fs.FileMode) error { sourcePath = core.TrimPrefix(sourcePath, "/") file, ok := node.files[sourcePath] if !ok { info, err := node.Stat(sourcePath) if err != nil { - return core.E("node.CopyFile", core.Concat("source not found: ", sourcePath), fs.ErrNotExist) + return core.E("node.ExportFile", core.Concat("source not found: ", sourcePath), fs.ErrNotExist) } if info.IsDir() { - return core.E("node.CopyFile", core.Concat("source is a directory: ", sourcePath), fs.ErrInvalid) + return core.E("node.ExportFile", core.Concat("source is a directory: ", sourcePath), fs.ErrInvalid) } - return core.E("node.CopyFile", core.Concat("source not found: ", sourcePath), fs.ErrNotExist) + // unreachable: Stat only succeeds for directories when file is absent + return core.E("node.ExportFile", core.Concat("source not found: ", sourcePath), fs.ErrNotExist) } parent := core.PathDir(destinationPath) if parent != "." && parent != "" && parent != destinationPath && !coreio.Local.IsDir(parent) { - return &fs.PathError{Op: "copyfile", Path: destinationPath, Err: fs.ErrNotExist} + return core.E("node.ExportFile", core.Concat("parent directory not found: ", destinationPath), fs.ErrNotExist) } return coreio.Local.WriteMode(destinationPath, string(file.content), permissions) } @@ -401,18 +412,42 @@ func (node *Node) DeleteAll(filePath string) error { } // Example: _ = nodeTree.Rename("drafts/todo.txt", "archive/todo.txt") +// Example: _ = nodeTree.Rename("drafts", "archive") func (node *Node) Rename(oldPath, newPath string) error { oldPath = core.TrimPrefix(oldPath, "/") newPath = core.TrimPrefix(newPath, "/") - file, ok := node.files[oldPath] - if !ok { - return core.E("node.Rename", core.Concat("path not found: ", oldPath), fs.ErrNotExist) + if file, ok := node.files[oldPath]; ok { + file.name = newPath + node.files[newPath] = file + delete(node.files, oldPath) + return nil } - file.name = newPath - node.files[newPath] = file - delete(node.files, oldPath) + // Directory rename: batch-rename all entries that share the prefix. + oldPrefix := oldPath + "/" + newPrefix := newPath + "/" + renamed := 0 + toAdd := make(map[string]*dataFile) + toDelete := make([]string, 0) + for filePath, file := range node.files { + if core.HasPrefix(filePath, oldPrefix) { + updatedPath := core.Concat(newPrefix, core.TrimPrefix(filePath, oldPrefix)) + file.name = updatedPath + toAdd[updatedPath] = file + toDelete = append(toDelete, filePath) + renamed++ + } + } + for _, p := range toDelete { + delete(node.files, p) + } + for p, f := range toAdd { + node.files[p] = f + } + if renamed == 0 { + return core.E("node.Rename", core.Concat("path not found: ", oldPath), fs.ErrNotExist) + } return nil } diff --git a/node/node_test.go b/node/node_test.go index 5d2a21b..f0b46da 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -313,12 +313,12 @@ func TestNode_Walk_Good(t *testing.T) { }) } -func TestNode_CopyFile_Good(t *testing.T) { +func TestNode_ExportFile_Good(t *testing.T) { nodeTree := New() nodeTree.AddData("foo.txt", []byte("foo")) destinationPath := core.Path(t.TempDir(), "test.txt") - err := nodeTree.CopyFile("foo.txt", destinationPath, 0644) + err := nodeTree.ExportFile("foo.txt", destinationPath, 0644) require.NoError(t, err) content, err := coreio.Local.Read(destinationPath) @@ -326,24 +326,24 @@ func TestNode_CopyFile_Good(t *testing.T) { assert.Equal(t, "foo", content) } -func TestNode_CopyFile_Bad(t *testing.T) { +func TestNode_ExportFile_Bad(t *testing.T) { nodeTree := New() destinationPath := core.Path(t.TempDir(), "test.txt") - err := nodeTree.CopyFile("nonexistent.txt", destinationPath, 0644) + err := nodeTree.ExportFile("nonexistent.txt", destinationPath, 0644) assert.Error(t, err) nodeTree.AddData("foo.txt", []byte("foo")) - err = nodeTree.CopyFile("foo.txt", "/nonexistent_dir/test.txt", 0644) + err = nodeTree.ExportFile("foo.txt", "/nonexistent_dir/test.txt", 0644) assert.Error(t, err) } -func TestNode_CopyFile_DirectorySource_Bad(t *testing.T) { +func TestNode_ExportFile_DirectorySource_Bad(t *testing.T) { nodeTree := New() nodeTree.AddData("bar/baz.txt", []byte("baz")) destinationPath := core.Path(t.TempDir(), "test.txt") - err := nodeTree.CopyFile("bar", destinationPath, 0644) + err := nodeTree.ExportFile("bar", destinationPath, 0644) assert.Error(t, err) } diff --git a/sigil/crypto_sigil.go b/sigil/crypto_sigil.go index 58ca1e2..71ecdc9 100644 --- a/sigil/crypto_sigil.go +++ b/sigil/crypto_sigil.go @@ -182,11 +182,28 @@ func (obfuscator *ShuffleMaskObfuscator) deriveMask(entropy []byte, length int) // Example: &sigil.ShuffleMaskObfuscator{}, // Example: ) type ChaChaPolySigil struct { - Key []byte - Obfuscator PreObfuscator + key []byte + obfuscator PreObfuscator randomReader goio.Reader } +// Example: key := cipherSigil.Key() +func (s *ChaChaPolySigil) Key() []byte { + result := make([]byte, len(s.key)) + copy(result, s.key) + return result +} + +// Example: ob := cipherSigil.Obfuscator() +func (s *ChaChaPolySigil) Obfuscator() PreObfuscator { + return s.obfuscator +} + +// Example: cipherSigil.SetObfuscator(nil) +func (s *ChaChaPolySigil) SetObfuscator(obfuscator PreObfuscator) { + s.obfuscator = obfuscator +} + // Example: cipherSigil, _ := sigil.NewChaChaPolySigil([]byte("0123456789abcdef0123456789abcdef"), nil) // Example: ciphertext, _ := cipherSigil.In([]byte("payload")) // Example: plaintext, _ := cipherSigil.Out(ciphertext) @@ -203,27 +220,27 @@ func NewChaChaPolySigil(key []byte, obfuscator PreObfuscator) (*ChaChaPolySigil, } return &ChaChaPolySigil{ - Key: keyCopy, - Obfuscator: obfuscator, + key: keyCopy, + obfuscator: obfuscator, randomReader: rand.Reader, }, nil } -func (sigil *ChaChaPolySigil) In(data []byte) ([]byte, error) { - if sigil.Key == nil { +func (s *ChaChaPolySigil) In(data []byte) ([]byte, error) { + if s.key == nil { return nil, NoKeyConfiguredError } if data == nil { return nil, nil } - aead, err := chacha20poly1305.NewX(sigil.Key) + aead, err := chacha20poly1305.NewX(s.key) if err != nil { return nil, core.E("sigil.ChaChaPolySigil.In", "create cipher", err) } nonce := make([]byte, aead.NonceSize()) - reader := sigil.randomReader + reader := s.randomReader if reader == nil { reader = rand.Reader } @@ -232,8 +249,8 @@ func (sigil *ChaChaPolySigil) In(data []byte) ([]byte, error) { } obfuscated := data - if sigil.Obfuscator != nil { - obfuscated = sigil.Obfuscator.Obfuscate(data, nonce) + if s.obfuscator != nil { + obfuscated = s.obfuscator.Obfuscate(data, nonce) } ciphertext := aead.Seal(nonce, nonce, obfuscated, nil) @@ -241,15 +258,15 @@ func (sigil *ChaChaPolySigil) In(data []byte) ([]byte, error) { return ciphertext, nil } -func (sigil *ChaChaPolySigil) Out(data []byte) ([]byte, error) { - if sigil.Key == nil { +func (s *ChaChaPolySigil) Out(data []byte) ([]byte, error) { + if s.key == nil { return nil, NoKeyConfiguredError } if data == nil { return nil, nil } - aead, err := chacha20poly1305.NewX(sigil.Key) + aead, err := chacha20poly1305.NewX(s.key) if err != nil { return nil, core.E("sigil.ChaChaPolySigil.Out", "create cipher", err) } @@ -270,8 +287,8 @@ func (sigil *ChaChaPolySigil) Out(data []byte) ([]byte, error) { } plaintext := obfuscated - if sigil.Obfuscator != nil { - plaintext = sigil.Obfuscator.Deobfuscate(obfuscated, nonce) + if s.obfuscator != nil { + plaintext = s.obfuscator.Deobfuscate(obfuscated, nonce) } if len(plaintext) == 0 { diff --git a/sigil/crypto_sigil_test.go b/sigil/crypto_sigil_test.go index 41a20d2..d4f96a8 100644 --- a/sigil/crypto_sigil_test.go +++ b/sigil/crypto_sigil_test.go @@ -145,8 +145,8 @@ func TestCryptoSigil_NewChaChaPolySigil_Good(t *testing.T) { cipherSigil, err := NewChaChaPolySigil(key, nil) require.NoError(t, err) assert.NotNil(t, cipherSigil) - assert.Equal(t, key, cipherSigil.Key) - assert.NotNil(t, cipherSigil.Obfuscator) + assert.Equal(t, key, cipherSigil.Key()) + assert.NotNil(t, cipherSigil.Obfuscator()) } func TestCryptoSigil_NewChaChaPolySigil_KeyIsCopied_Good(t *testing.T) { @@ -159,7 +159,7 @@ func TestCryptoSigil_NewChaChaPolySigil_KeyIsCopied_Good(t *testing.T) { require.NoError(t, err) key[0] ^= 0xFF - assert.Equal(t, original, cipherSigil.Key) + assert.Equal(t, original, cipherSigil.Key()) } func TestCryptoSigil_NewChaChaPolySigil_ShortKey_Bad(t *testing.T) { @@ -184,7 +184,7 @@ func TestCryptoSigil_NewChaChaPolySigil_CustomObfuscator_Good(t *testing.T) { ob := &ShuffleMaskObfuscator{} cipherSigil, err := NewChaChaPolySigil(key, ob) require.NoError(t, err) - assert.Equal(t, ob, cipherSigil.Obfuscator) + assert.Equal(t, ob, cipherSigil.Obfuscator()) } func TestCryptoSigil_NewChaChaPolySigil_CustomObfuscatorNil_Good(t *testing.T) { @@ -193,7 +193,7 @@ func TestCryptoSigil_NewChaChaPolySigil_CustomObfuscatorNil_Good(t *testing.T) { cipherSigil, err := NewChaChaPolySigil(key, nil) require.NoError(t, err) - assert.IsType(t, &XORObfuscator{}, cipherSigil.Obfuscator) + assert.IsType(t, &XORObfuscator{}, cipherSigil.Obfuscator()) } func TestCryptoSigil_NewChaChaPolySigil_CustomObfuscator_InvalidKey_Bad(t *testing.T) { @@ -351,7 +351,7 @@ func TestCryptoSigil_ChaChaPolySigil_NoObfuscator_Good(t *testing.T) { _, _ = rand.Read(key) cipherSigil, _ := NewChaChaPolySigil(key, nil) - cipherSigil.Obfuscator = nil + cipherSigil.SetObfuscator(nil) plaintext := []byte("raw encryption without pre-obfuscation") ciphertext, err := cipherSigil.In(plaintext) diff --git a/store/medium.go b/store/medium.go index c107e11..6c0f0bc 100644 --- a/store/medium.go +++ b/store/medium.go @@ -76,6 +76,8 @@ func (medium *Medium) Write(entryPath, content string) error { } // Example: _ = medium.WriteMode("app/theme", "midnight", 0600) +// Note: mode is not persisted — the SQLite store has no entry_mode column. +// Use Write when mode is irrelevant; WriteMode satisfies the Medium interface only. func (medium *Medium) WriteMode(entryPath, content string, mode fs.FileMode) error { return medium.Write(entryPath, content) } @@ -144,23 +146,14 @@ func (medium *Medium) List(entryPath string) ([]fs.DirEntry, error) { group, key := splitGroupKeyPath(entryPath) if group == "" { - rows, err := medium.keyValueStore.database.Query("SELECT DISTINCT group_name FROM entries ORDER BY group_name") + groups, err := medium.keyValueStore.ListGroups() if err != nil { - return nil, core.E("store.List", "query groups", err) + return nil, err } - defer rows.Close() - - var entries []fs.DirEntry - for rows.Next() { - var groupName string - if err := rows.Scan(&groupName); err != nil { - return nil, core.E("store.List", "scan", err) - } + entries := make([]fs.DirEntry, 0, len(groups)) + for _, groupName := range groups { entries = append(entries, &keyValueDirEntry{name: groupName, isDir: true}) } - if err := rows.Err(); err != nil { - return nil, core.E("store.List", "rows", err) - } return entries, nil } diff --git a/store/store.go b/store/store.go index 2b5efc2..31550bc 100644 --- a/store/store.go +++ b/store/store.go @@ -112,6 +112,28 @@ func (keyValueStore *KeyValueStore) DeleteGroup(group string) error { return nil } +// Example: groups, _ := keyValueStore.ListGroups() +func (keyValueStore *KeyValueStore) ListGroups() ([]string, error) { + rows, err := keyValueStore.database.Query("SELECT DISTINCT group_name FROM entries ORDER BY group_name") + if err != nil { + return nil, core.E("store.ListGroups", "query groups", err) + } + defer rows.Close() + + var groups []string + for rows.Next() { + var groupName string + if err := rows.Scan(&groupName); err != nil { + return nil, core.E("store.ListGroups", "scan", err) + } + groups = append(groups, groupName) + } + if err := rows.Err(); err != nil { + return nil, core.E("store.ListGroups", "rows", err) + } + return groups, nil +} + // Example: values, _ := keyValueStore.GetAll("app") func (keyValueStore *KeyValueStore) GetAll(group string) (map[string]string, error) { rows, err := keyValueStore.database.Query("SELECT entry_key, entry_value FROM entries WHERE group_name = ?", group) From 054be73f3337fd878590d8d3439a5f7d3eece32e Mon Sep 17 00:00:00 2001 From: Snider Date: Sun, 5 Apr 2026 13:20:21 +0100 Subject: [PATCH 82/83] fix: address CodeRabbit PR #2 findings (batch 3) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - datanode: FromTar and Restore now rebuild directorySet from restored file entries so EnsureDir directories survive snapshot round-trips - node: CopyTo normalises "." sourcePath to "" so root files are included - node: Open and Stat always return a directory entry for the root ("") even when the node is empty; ReadDir returns fs.ErrNotExist for missing non-root directories instead of an empty slice - node: Write and nodeWriter.Close reject empty/"." paths with ErrInvalid - store: Rename returns nil immediately when oldPath == newPath (no-op) - store: Append captures Get errors and only treats NotFoundError as an empty starting value; unexpected read failures are now surfaced - docs: add "text" language tag to unlabelled fenced code blocks in RFC-CORE-008-AGENT-EXPERIENCE.md (sections 3, 7, 9, 10) - docs: remove duplicate Read/Write documentation blocks from RFC.md (local package section) - test: replace hardcoded /nonexistent_dir path in TestNode_ExportFile_Bad with a hermetic path under t.TempDir() Skipped findings 7 & 8 (kv migration and compatibility shims) — both were intentionally removed in f0b828a (drop legacy compatibility shims) and re-adding them would conflict with the AX refactor. Co-Authored-By: Virgil --- datanode/medium.go | 19 +++++++++++++-- docs/RFC-CORE-008-AGENT-EXPERIENCE.md | 8 +++---- docs/RFC.md | 15 ------------ node/node.go | 34 +++++++++++++++++++++------ node/node_test.go | 3 ++- store/medium.go | 8 ++++++- 6 files changed, 57 insertions(+), 30 deletions(-) diff --git a/datanode/medium.go b/datanode/medium.go index da53c8f..3e0e5bd 100644 --- a/datanode/medium.go +++ b/datanode/medium.go @@ -58,10 +58,12 @@ func FromTar(data []byte) (*Medium, error) { if err != nil { return nil, core.E("datanode.FromTar", "failed to restore", err) } - return &Medium{ + m := &Medium{ dataNode: dataNode, directorySet: make(map[string]bool), - }, nil + } + m.rebuildDirectorySetLocked() + return m, nil } // Example: snapshot, _ := medium.Snapshot() @@ -85,9 +87,22 @@ func (medium *Medium) Restore(data []byte) error { defer medium.lock.Unlock() medium.dataNode = dataNode medium.directorySet = make(map[string]bool) + medium.rebuildDirectorySetLocked() return nil } +// rebuildDirectorySetLocked walks all file entries and registers parent directories. +// Caller must hold at least a write lock. +func (medium *Medium) rebuildDirectorySetLocked() { + entries, err := medium.collectAllLocked() + if err != nil { + return + } + for _, name := range entries { + medium.ensureDirsLocked(path.Dir(name)) + } +} + // Example: dataNode := medium.DataNode() func (medium *Medium) DataNode() *borgdatanode.DataNode { medium.lock.RLock() diff --git a/docs/RFC-CORE-008-AGENT-EXPERIENCE.md b/docs/RFC-CORE-008-AGENT-EXPERIENCE.md index 1bf599c..9c4fd8d 100644 --- a/docs/RFC-CORE-008-AGENT-EXPERIENCE.md +++ b/docs/RFC-CORE-008-AGENT-EXPERIENCE.md @@ -76,7 +76,7 @@ setup.Run(setup.Options{Path: "./my-module", Template: "php"}) File and directory paths should be self-describing. An agent navigating the filesystem should understand what it is looking at without reading a README. -``` +```text flow/deploy/to/homelab.yaml — deploy TO the homelab flow/deploy/from/github.yaml — deploy FROM GitHub flow/code/review.yaml — code review flow @@ -178,7 +178,7 @@ core.New(core.Options{ The directory structure tells an agent the intent before it reads a word. Top-level directories are semantic categories, not organisational bins. -``` +```text plans/ ├── code/ # Pure primitives — read for WHAT exists ├── project/ # Products — read for WHAT we're building and WHY @@ -208,7 +208,7 @@ code/core/gui/ → consumer tier (composes from go/*) Problems in code and specs are layered. Surface issues mask deeper issues. Fixing the surface reveals the next layer. This is not a failure mode — it is the discovery process. -``` +```text Pass 1: Find 16 issues (surface — naming, imports, obvious errors) Pass 2: Find 11 issues (structural — contradictions, missing types) Pass 3: Find 5 issues (architectural — signature mismatches, registration gaps) @@ -227,7 +227,7 @@ Pass N: Findings are trivial → spec/code is complete Unit tests verify the code. CLI tests verify the binary. The directory structure IS the command structure — path maps to command, Taskfile runs the test. -``` +```text tests/cli/ ├── core/ │ └── lint/ diff --git a/docs/RFC.md b/docs/RFC.md index c6feb36..fa6baea 100644 --- a/docs/RFC.md +++ b/docs/RFC.md @@ -751,21 +751,6 @@ _ = m.Write("old.txt", "data") _ = m.Rename("old.txt", "new.txt") ``` -**Read(path string) (string, error)** -Example: -```go -m, _ := local.New("/srv/app") -_ = m.Write("notes.txt", "hello") -value, _ := m.Read("notes.txt") -``` - -**Write(path, content string) error** -Example: -```go -m, _ := local.New("/srv/app") -_ = m.Write("notes.txt", "hello") -``` - ## Package node (`dappco.re/go/core/io/node`) In-memory filesystem implementing `io.Medium` and `fs.FS`, with tar serialisation. diff --git a/node/node.go b/node/node.go index e8b095b..e5815f2 100644 --- a/node/node.go +++ b/node/node.go @@ -209,6 +209,9 @@ func (node *Node) ExportFile(sourcePath, destinationPath string, permissions fs. // Example: _ = nodeTree.CopyTo(io.NewMemoryMedium(), "config", "backup/config") func (node *Node) CopyTo(target coreio.Medium, sourcePath, destinationPath string) error { sourcePath = core.TrimPrefix(sourcePath, "/") + if sourcePath == "." { + sourcePath = "" + } info, err := node.Stat(sourcePath) if err != nil { return err @@ -246,13 +249,16 @@ func (node *Node) CopyTo(target coreio.Medium, sourcePath, destinationPath strin // Example: file, _ := nodeTree.Open("config/app.yaml") func (node *Node) Open(name string) (fs.File, error) { name = core.TrimPrefix(name, "/") + if name == "." { + name = "" + } if dataFile, ok := node.files[name]; ok { return &dataFileReader{file: dataFile}, nil } - prefix := name + "/" - if name == "." || name == "" { - prefix = "" + if name == "" { + return &dirFile{path: ".", modTime: time.Now()}, nil } + prefix := name + "/" for filePath := range node.files { if core.HasPrefix(filePath, prefix) { return &dirFile{path: name, modTime: time.Now()}, nil @@ -264,13 +270,16 @@ func (node *Node) Open(name string) (fs.File, error) { // Example: info, _ := nodeTree.Stat("config/app.yaml") func (node *Node) Stat(name string) (fs.FileInfo, error) { name = core.TrimPrefix(name, "/") + if name == "." { + name = "" + } if dataFile, ok := node.files[name]; ok { return dataFile.Stat() } - prefix := name + "/" - if name == "." || name == "" { - prefix = "" + if name == "" { + return &dirInfo{name: ".", modTime: time.Now()}, nil } + prefix := name + "/" for filePath := range node.files { if core.HasPrefix(filePath, prefix) { return &dirInfo{name: path.Base(name), modTime: time.Now()}, nil @@ -286,7 +295,11 @@ func (node *Node) ReadDir(name string) ([]fs.DirEntry, error) { name = "" } - if info, err := node.Stat(name); err == nil && !info.IsDir() { + info, statErr := node.Stat(name) + if statErr != nil { + return nil, &fs.PathError{Op: "readdir", Path: name, Err: fs.ErrNotExist} + } + if !info.IsDir() { return nil, &fs.PathError{Op: "readdir", Path: name, Err: fs.ErrInvalid} } @@ -340,6 +353,10 @@ func (node *Node) Read(filePath string) (string, error) { // Example: _ = nodeTree.Write("config/app.yaml", "port: 8080") func (node *Node) Write(filePath, content string) error { + filePath = core.TrimPrefix(filePath, "/") + if filePath == "" || filePath == "." { + return core.E("node.Write", "empty path", fs.ErrInvalid) + } node.AddData(filePath, []byte(content)) return nil } @@ -501,6 +518,9 @@ func (writer *nodeWriter) Write(data []byte) (int, error) { } func (writer *nodeWriter) Close() error { + if writer.path == "" || writer.path == "." { + return core.E("node.nodeWriter.Close", "empty path", fs.ErrInvalid) + } writer.node.files[writer.path] = &dataFile{ name: writer.path, content: writer.buffer, diff --git a/node/node_test.go b/node/node_test.go index f0b46da..bdb72cf 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -334,7 +334,8 @@ func TestNode_ExportFile_Bad(t *testing.T) { assert.Error(t, err) nodeTree.AddData("foo.txt", []byte("foo")) - err = nodeTree.ExportFile("foo.txt", "/nonexistent_dir/test.txt", 0644) + nonExistentParent := core.Path(t.TempDir(), "nonexistent_subdir", "test.txt") + err = nodeTree.ExportFile("foo.txt", nonExistentParent, 0644) assert.Error(t, err) } diff --git a/store/medium.go b/store/medium.go index 6c0f0bc..78bccfd 100644 --- a/store/medium.go +++ b/store/medium.go @@ -131,6 +131,9 @@ func (medium *Medium) Rename(oldPath, newPath string) error { if oldKey == "" || newKey == "" { return core.E("store.Rename", "both paths must include group/key", fs.ErrInvalid) } + if oldGroup == newGroup && oldKey == newKey { + return nil + } value, err := medium.keyValueStore.Get(oldGroup, oldKey) if err != nil { return err @@ -226,7 +229,10 @@ func (medium *Medium) Append(entryPath string) (goio.WriteCloser, error) { if key == "" { return nil, core.E("store.Append", "path must include group/key", fs.ErrInvalid) } - existingValue, _ := medium.keyValueStore.Get(group, key) + existingValue, err := medium.keyValueStore.Get(group, key) + if err != nil && !core.Is(err, NotFoundError) { + return nil, core.E("store.Append", core.Concat("failed to read existing content: ", entryPath), err) + } return &keyValueWriteCloser{keyValueStore: medium.keyValueStore, group: group, key: key, data: []byte(existingValue)}, nil } From d07c4ceb587179922f60f900c0e9f2576959b73c Mon Sep 17 00:00:00 2001 From: Snider Date: Sun, 5 Apr 2026 13:37:12 +0100 Subject: [PATCH 83/83] fix(store): export ErrNotDirectory sentinel and fix duplicate RFC method entries - store/medium.go: define and export ErrNotDirectory using core.E(); return it from List when key != "" instead of silent (nil, nil) - docs/RFC-CORE-008-AGENT-EXPERIENCE.md: add `text` language tag to all five unlabeled fenced code blocks (MD040) - docs/RFC.md: remove duplicate Read/Write method entries that appeared after IsFile in the node, store, sqlite, s3, and datanode backend sections Co-Authored-By: Virgil --- docs/RFC-CORE-008-AGENT-EXPERIENCE.md | 10 ++-- docs/RFC.md | 78 --------------------------- store/medium.go | 6 ++- 3 files changed, 10 insertions(+), 84 deletions(-) diff --git a/docs/RFC-CORE-008-AGENT-EXPERIENCE.md b/docs/RFC-CORE-008-AGENT-EXPERIENCE.md index 9c4fd8d..c950dfa 100644 --- a/docs/RFC-CORE-008-AGENT-EXPERIENCE.md +++ b/docs/RFC-CORE-008-AGENT-EXPERIENCE.md @@ -193,7 +193,7 @@ plans/ Dependency flows one direction. Libraries define primitives. Consumers compose from them. A new feature in a consumer can never break a library. -``` +```text code/core/go/* → lib tier (stable foundation) code/core/agent/ → consumer tier (composes from go/*) code/core/cli/ → consumer tier (composes from go/*) @@ -266,7 +266,7 @@ tasks: ### File Structure -``` +```text # AX-native: path describes content core/agent/ ├── go/ # Go source @@ -326,7 +326,7 @@ The `plans/` directory structure encodes a development methodology designed for ### The Three-Way Split -``` +```text plans/ ├── project/ # 1. WHAT and WHY — start here ├── rfc/ # 2. CONSTRAINTS — immutable contracts @@ -369,7 +369,7 @@ The code spec IS the product. Write the spec → dispatch to an agent → review Before dispatching for implementation, verify spec-model alignment: -``` +```text 1. REVIEW — The implementation model (Codex/Jules) reads the spec and reports missing elements. This surfaces the delta between the model's training and the spec's assumptions. @@ -393,7 +393,7 @@ Before dispatching for implementation, verify spec-model alignment: Same prompt, multiple runs. Each pass sees deeper because the context evolved: -``` +```text Round 1: Build features (the obvious gaps) Round 2: Write tests (verify what was built) Round 3: Harden security (what can go wrong?) diff --git a/docs/RFC.md b/docs/RFC.md index fa6baea..fe290f1 100644 --- a/docs/RFC.md +++ b/docs/RFC.md @@ -911,23 +911,6 @@ n := node.New() _ = n.WriteMode("file.txt", "data", 0600) ``` -**Read(p string) (string, error)** -Alias for `Read`. -Example: -```go -n := node.New() -_ = n.Write("file.txt", "data") -value, _ := n.Read("file.txt") -``` - -**Write(p, content string) error** -Alias for `Write`. -Example: -```go -n := node.New() -_ = n.Write("file.txt", "data") -``` - **EnsureDir(path string) error** No-op (directories are implicit). Example: @@ -1223,21 +1206,6 @@ _ = m.Write("config/theme", "midnight") ok := m.IsFile("config/theme") ``` -**Read(p string) (string, error)** -Example: -```go -m, _ := store.NewMedium(store.Options{Path: ":memory:"}) -_ = m.Write("config/theme", "midnight") -value, _ := m.Read("config/theme") -``` - -**Write(p, content string) error** -Example: -```go -m, _ := store.NewMedium(store.Options{Path: ":memory:"}) -_ = m.Write("config/theme", "midnight") -``` - **Delete(p string) error** Example: ```go @@ -1411,21 +1379,6 @@ _ = m.Write("notes.txt", "hello") ok := m.IsFile("notes.txt") ``` -**Read(p string) (string, error)** -Example: -```go -m, _ := sqlite.New(sqlite.Options{Path: ":memory:"}) -_ = m.Write("notes.txt", "hello") -value, _ := m.Read("notes.txt") -``` - -**Write(p, content string) error** -Example: -```go -m, _ := sqlite.New(sqlite.Options{Path: ":memory:"}) -_ = m.Write("notes.txt", "hello") -``` - **Delete(p string) error** Example: ```go @@ -1606,22 +1559,6 @@ m, _ := s3.New(s3.Options{Bucket: "bucket", Client: client}) ok := m.IsFile("notes.txt") ``` -**Read(p string) (string, error)** -Example: -```go -client := awss3.NewFromConfig(aws.Config{Region: "us-east-1"}) -m, _ := s3.New(s3.Options{Bucket: "bucket", Client: client}) -value, _ := m.Read("notes.txt") -``` - -**Write(p, content string) error** -Example: -```go -client := awss3.NewFromConfig(aws.Config{Region: "us-east-1"}) -m, _ := s3.New(s3.Options{Bucket: "bucket", Client: client}) -_ = m.Write("notes.txt", "hello") -``` - **Delete(p string) error** Example: ```go @@ -1822,21 +1759,6 @@ _ = m.Write("notes.txt", "hello") ok := m.IsFile("notes.txt") ``` -**Read(p string) (string, error)** -Example: -```go -m := datanode.New() -_ = m.Write("notes.txt", "hello") -value, _ := m.Read("notes.txt") -``` - -**Write(p, content string) error** -Example: -```go -m := datanode.New() -_ = m.Write("notes.txt", "hello") -``` - **Delete(p string) error** Example: ```go diff --git a/store/medium.go b/store/medium.go index 78bccfd..e085abd 100644 --- a/store/medium.go +++ b/store/medium.go @@ -11,6 +11,10 @@ import ( coreio "dappco.re/go/core/io" ) +// ErrNotDirectory is returned by List when the path resolves to a key rather than a group. +// Example: _, err := medium.List("app/theme") // err == store.ErrNotDirectory +var ErrNotDirectory = core.E("store", "path is a key, not a directory", fs.ErrInvalid) + // Example: medium, _ := store.NewMedium(store.Options{Path: "config.db"}) // Example: _ = medium.Write("app/theme", "midnight") // Example: entries, _ := medium.List("") @@ -161,7 +165,7 @@ func (medium *Medium) List(entryPath string) ([]fs.DirEntry, error) { } if key != "" { - return nil, nil + return nil, ErrNotDirectory } all, err := medium.keyValueStore.GetAll(group)