diff --git a/actions.go b/actions.go new file mode 100644 index 0000000..92460bc --- /dev/null +++ b/actions.go @@ -0,0 +1,240 @@ +// SPDX-License-Identifier: EUPL-1.2 + +// Example: io.RegisterActions(c) +// Example: result := c.Action("core.io.local.read").Run(ctx, core.NewOptions( +// Example: core.Option{Key: "root", Value: "/srv/app"}, +// Example: core.Option{Key: "path", Value: "config/app.yaml"}, +// Example: )) +package io + +import ( + "context" + "io/fs" + + core "dappco.re/go/core" +) + +// Named action identifiers used by Core consumers. Each maps to a Medium +// operation with a predictable path name. +// +// Example: result := c.Action(io.ActionLocalRead).Run(ctx, opts) +const ( + ActionLocalRead = "core.io.local.read" + ActionLocalWrite = "core.io.local.write" + ActionLocalList = "core.io.local.list" + ActionLocalDelete = "core.io.local.delete" + + ActionMemoryRead = "core.io.memory.read" + ActionMemoryWrite = "core.io.memory.write" + + ActionGitHubClone = "core.io.github.clone" + ActionGitHubRead = "core.io.github.read" + + ActionPWAScrape = "core.io.pwa.scrape" + + ActionSFTPRead = "core.io.sftp.read" + ActionSFTPWrite = "core.io.sftp.write" + + ActionS3Read = "core.io.s3.read" + ActionS3Write = "core.io.s3.write" + + ActionCubeRead = "core.io.cube.read" + ActionCubeWrite = "core.io.cube.write" + ActionCubePack = "core.io.cube.pack" + ActionCubeUnpack = "core.io.cube.unpack" + + ActionCopy = "core.io.copy" +) + +// memoryActionStore is the shared in-memory backing for +// core.io.memory.read/core.io.memory.write. Keeping it package-level lets the +// two actions agree on state without the caller supplying a backend. +var memoryActionStore = NewMemoryMedium() + +// Example: io.RegisterActions(c) +// +// RegisterActions installs the named actions listed in the go-io RFC §15 on +// the given Core. Consumers call this at service registration time so that any +// agent or CLI can dispatch Medium operations by name. +func RegisterActions(c *core.Core) { + if c == nil { + return + } + c.Action(ActionLocalRead, localReadAction) + c.Action(ActionLocalWrite, localWriteAction) + c.Action(ActionLocalList, localListAction) + c.Action(ActionLocalDelete, localDeleteAction) + c.Action(ActionMemoryRead, memoryReadAction) + c.Action(ActionMemoryWrite, memoryWriteAction) + c.Action(ActionGitHubClone, githubNotImplementedAction) + c.Action(ActionGitHubRead, githubNotImplementedAction) + c.Action(ActionPWAScrape, pwaNotImplementedAction) + c.Action(ActionSFTPRead, mediumReadAction("io.sftp.readAction")) + c.Action(ActionSFTPWrite, mediumWriteAction("io.sftp.writeAction")) + c.Action(ActionS3Read, mediumReadAction("io.s3.readAction")) + c.Action(ActionS3Write, mediumWriteAction("io.s3.writeAction")) + c.Action(ActionCopy, copyAction) +} + +// Example: opts := core.NewOptions(core.Option{Key: "root", Value: "/srv/app"}, core.Option{Key: "path", Value: "config/app.yaml"}) +func localReadAction(_ context.Context, opts core.Options) core.Result { + medium, err := localMediumFromOptions(opts) + if err != nil { + return core.Result{}.New(err) + } + content, err := medium.Read(opts.String("path")) + if err != nil { + return core.Result{}.New(err) + } + return core.Result{Value: content, OK: true} +} + +// Example: opts := core.NewOptions(core.Option{Key: "root", Value: "/srv/app"}, core.Option{Key: "path", Value: "log.txt"}, core.Option{Key: "content", Value: "event"}) +func localWriteAction(_ context.Context, opts core.Options) core.Result { + medium, err := localMediumFromOptions(opts) + if err != nil { + return core.Result{}.New(err) + } + if err := medium.Write(opts.String("path"), opts.String("content")); err != nil { + return core.Result{}.New(err) + } + return core.Result{OK: true} +} + +// Example: opts := core.NewOptions(core.Option{Key: "root", Value: "/srv/app"}, core.Option{Key: "path", Value: "config"}) +func localListAction(_ context.Context, opts core.Options) core.Result { + medium, err := localMediumFromOptions(opts) + if err != nil { + return core.Result{}.New(err) + } + entries, err := medium.List(opts.String("path")) + if err != nil { + return core.Result{}.New(err) + } + return core.Result{Value: entries, OK: true} +} + +// Example: opts := core.NewOptions(core.Option{Key: "root", Value: "/srv/app"}, core.Option{Key: "path", Value: "tmp/old.log"}) +func localDeleteAction(_ context.Context, opts core.Options) core.Result { + medium, err := localMediumFromOptions(opts) + if err != nil { + return core.Result{}.New(err) + } + path := opts.String("path") + recursive := opts.Bool("recursive") + if recursive { + if err := medium.DeleteAll(path); err != nil { + return core.Result{}.New(err) + } + } else { + if err := medium.Delete(path); err != nil { + return core.Result{}.New(err) + } + } + return core.Result{OK: true} +} + +// Example: opts := core.NewOptions(core.Option{Key: "path", Value: "config/app.yaml"}) +func memoryReadAction(_ context.Context, opts core.Options) core.Result { + content, err := memoryActionStore.Read(opts.String("path")) + if err != nil { + return core.Result{}.New(err) + } + return core.Result{Value: content, OK: true} +} + +// Example: opts := core.NewOptions(core.Option{Key: "path", Value: "config/app.yaml"}, core.Option{Key: "content", Value: "port: 8080"}) +func memoryWriteAction(_ context.Context, opts core.Options) core.Result { + if err := memoryActionStore.Write(opts.String("path"), opts.String("content")); err != nil { + return core.Result{}.New(err) + } + return core.Result{OK: true} +} + +func githubNotImplementedAction(context.Context, core.Options) core.Result { + return core.Result{ + OK: false, + Value: core.E("io.github", "not implemented — see #633 for backend tracking", nil), + } +} + +func pwaNotImplementedAction(context.Context, core.Options) core.Result { + return core.Result{ + OK: false, + Value: core.E("io.pwa", "not implemented — see #633 for backend tracking", nil), + } +} + +func mediumReadAction(operation string) core.ActionHandler { + return func(_ context.Context, opts core.Options) core.Result { + medium, err := mediumFromOptions(opts, operation) + if err != nil { + return core.Result{}.New(err) + } + content, err := medium.Read(opts.String("path")) + if err != nil { + return core.Result{}.New(err) + } + return core.Result{Value: content, OK: true} + } +} + +func mediumWriteAction(operation string) core.ActionHandler { + return func(_ context.Context, opts core.Options) core.Result { + medium, err := mediumFromOptions(opts, operation) + if err != nil { + return core.Result{}.New(err) + } + if err := medium.Write(opts.String("path"), opts.String("content")); err != nil { + return core.Result{}.New(err) + } + return core.Result{OK: true} + } +} + +// Example: opts := core.NewOptions( +// Example: core.Option{Key: "source", Value: sourceMedium}, +// Example: core.Option{Key: "sourcePath", Value: "input.txt"}, +// Example: core.Option{Key: "destination", Value: destinationMedium}, +// Example: core.Option{Key: "destinationPath", Value: "backup/input.txt"}, +// Example: ) +func copyAction(_ context.Context, opts core.Options) core.Result { + source, ok := opts.Get("source").Value.(Medium) + if !ok { + return core.Result{}.New(core.E("io.copyAction", "source medium is required", fs.ErrInvalid)) + } + destination, ok := opts.Get("destination").Value.(Medium) + if !ok { + return core.Result{}.New(core.E("io.copyAction", "destination medium is required", fs.ErrInvalid)) + } + if err := Copy(source, opts.String("sourcePath"), destination, opts.String("destinationPath")); err != nil { + return core.Result{}.New(err) + } + return core.Result{OK: true} +} + +// localMediumFromOptions constructs a sandboxed local Medium using the +// "root" option. +func localMediumFromOptions(opts core.Options) (Medium, error) { + root := opts.String("root") + if root == "" { + return nil, core.E("io.localMediumFromOptions", "root is required", fs.ErrInvalid) + } + return NewSandboxed(root) +} + +func mediumFromOptions(opts core.Options, operation string) (Medium, error) { + medium, ok := opts.Get("medium").Value.(Medium) + if !ok { + return nil, core.E(operation, "medium is required", fs.ErrInvalid) + } + return medium, nil +} + +// ResetMemoryActionStore clears the in-memory state used by memory action +// handlers. Tests call this to isolate runs from each other. +// +// Example: io.ResetMemoryActionStore() +func ResetMemoryActionStore() { + memoryActionStore = NewMemoryMedium() +} diff --git a/actions_test.go b/actions_test.go new file mode 100644 index 0000000..a85e12f --- /dev/null +++ b/actions_test.go @@ -0,0 +1,617 @@ +// SPDX-License-Identifier: EUPL-1.2 + +package io_test + +import ( + "bytes" + "context" + goio "io" + "io/fs" + "net" + "strings" + "sync" + "testing" + "time" + + core "dappco.re/go/core" + coreio "dappco.re/go/io" + "dappco.re/go/io/cube" + iosftp "dappco.re/go/io/pkg/medium/sftp" + ios3 "dappco.re/go/io/s3" + "github.com/aws/aws-sdk-go-v2/aws" + awss3 "github.com/aws/aws-sdk-go-v2/service/s3" + pkgsftp "github.com/pkg/sftp" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var actionTestCubeKey = []byte("0123456789abcdef0123456789abcdef") + +func TestActions_RegisterActions_Good(t *testing.T) { + c := core.New() + coreio.RegisterActions(c) + + for _, name := range []string{ + coreio.ActionLocalRead, coreio.ActionLocalWrite, coreio.ActionLocalList, coreio.ActionLocalDelete, + coreio.ActionMemoryRead, coreio.ActionMemoryWrite, + coreio.ActionGitHubClone, coreio.ActionGitHubRead, coreio.ActionPWAScrape, + coreio.ActionSFTPRead, coreio.ActionSFTPWrite, + coreio.ActionS3Read, coreio.ActionS3Write, + coreio.ActionCopy, + } { + assert.True(t, c.Action(name).Exists(), name) + } +} + +func TestActions_RegisterActions_Bad(t *testing.T) { + // Nil Core must not panic and must be a no-op. + assert.NotPanics(t, func() { coreio.RegisterActions(nil) }) +} + +func TestActions_RegisterActions_Ugly(t *testing.T) { + // Calling RegisterActions twice on the same Core is safe (idempotent overwrite). + c := core.New() + coreio.RegisterActions(c) + assert.NotPanics(t, func() { coreio.RegisterActions(c) }) + assert.True(t, c.Action(coreio.ActionMemoryRead).Exists()) +} + +func TestActions_LocalRead_Good(t *testing.T) { + tempDir := t.TempDir() + c := core.New() + coreio.RegisterActions(c) + + // Prime a file via the write action, then read it back via the read action. + writeResult := c.Action(coreio.ActionLocalWrite).Run(context.Background(), core.NewOptions( + core.Option{Key: "root", Value: tempDir}, + core.Option{Key: "path", Value: "hello.txt"}, + core.Option{Key: "content", Value: "world"}, + )) + require.True(t, writeResult.OK) + + readResult := c.Action(coreio.ActionLocalRead).Run(context.Background(), core.NewOptions( + core.Option{Key: "root", Value: tempDir}, + core.Option{Key: "path", Value: "hello.txt"}, + )) + require.True(t, readResult.OK) + assert.Equal(t, "world", readResult.Value) +} + +func TestActions_LocalRead_Bad(t *testing.T) { + tempDir := t.TempDir() + c := core.New() + coreio.RegisterActions(c) + + // Reading a missing file returns !OK and an error in Value. + result := c.Action(coreio.ActionLocalRead).Run(context.Background(), core.NewOptions( + core.Option{Key: "root", Value: tempDir}, + core.Option{Key: "path", Value: "missing.txt"}, + )) + assert.False(t, result.OK) +} + +func TestActions_LocalRead_Ugly(t *testing.T) { + c := core.New() + coreio.RegisterActions(c) + + // Empty path — read attempts to read the sandbox root which is not a file. + result := c.Action(coreio.ActionLocalRead).Run(context.Background(), core.NewOptions( + core.Option{Key: "root", Value: t.TempDir()}, + core.Option{Key: "path", Value: ""}, + )) + assert.False(t, result.OK) +} + +func TestActions_LocalList_Good(t *testing.T) { + tempDir := t.TempDir() + c := core.New() + coreio.RegisterActions(c) + + require.True(t, c.Action(coreio.ActionLocalWrite).Run(context.Background(), core.NewOptions( + core.Option{Key: "root", Value: tempDir}, + core.Option{Key: "path", Value: "a.txt"}, + core.Option{Key: "content", Value: "alpha"}, + )).OK) + require.True(t, c.Action(coreio.ActionLocalWrite).Run(context.Background(), core.NewOptions( + core.Option{Key: "root", Value: tempDir}, + core.Option{Key: "path", Value: "b.txt"}, + core.Option{Key: "content", Value: "beta"}, + )).OK) + + listResult := c.Action(coreio.ActionLocalList).Run(context.Background(), core.NewOptions( + core.Option{Key: "root", Value: tempDir}, + core.Option{Key: "path", Value: ""}, + )) + require.True(t, listResult.OK) + entries, ok := listResult.Value.([]fs.DirEntry) + require.True(t, ok) + assert.Len(t, entries, 2) +} + +func TestActions_LocalList_Bad(t *testing.T) { + tempDir := t.TempDir() + c := core.New() + coreio.RegisterActions(c) + + // Listing a path that does not exist returns !OK. + result := c.Action(coreio.ActionLocalList).Run(context.Background(), core.NewOptions( + core.Option{Key: "root", Value: tempDir}, + core.Option{Key: "path", Value: "missing"}, + )) + assert.False(t, result.OK) +} + +func TestActions_LocalList_Ugly(t *testing.T) { + tempDir := t.TempDir() + c := core.New() + coreio.RegisterActions(c) + + // Missing root must fail instead of falling back to host root. + result := c.Action(coreio.ActionLocalList).Run(context.Background(), core.NewOptions( + core.Option{Key: "path", Value: tempDir}, + )) + assert.False(t, result.OK) +} + +func TestActions_LocalDelete_Good(t *testing.T) { + tempDir := t.TempDir() + c := core.New() + coreio.RegisterActions(c) + + require.True(t, c.Action(coreio.ActionLocalWrite).Run(context.Background(), core.NewOptions( + core.Option{Key: "root", Value: tempDir}, + core.Option{Key: "path", Value: "temp.txt"}, + core.Option{Key: "content", Value: "ephemeral"}, + )).OK) + + result := c.Action(coreio.ActionLocalDelete).Run(context.Background(), core.NewOptions( + core.Option{Key: "root", Value: tempDir}, + core.Option{Key: "path", Value: "temp.txt"}, + )) + assert.True(t, result.OK) +} + +func TestActions_LocalDelete_Bad(t *testing.T) { + tempDir := t.TempDir() + c := core.New() + coreio.RegisterActions(c) + + // Deleting a missing file returns !OK. + result := c.Action(coreio.ActionLocalDelete).Run(context.Background(), core.NewOptions( + core.Option{Key: "root", Value: tempDir}, + core.Option{Key: "path", Value: "missing.txt"}, + )) + assert.False(t, result.OK) +} + +func TestActions_LocalDelete_Ugly(t *testing.T) { + tempDir := t.TempDir() + c := core.New() + coreio.RegisterActions(c) + + // Recursive delete of a subtree. + require.True(t, c.Action(coreio.ActionLocalWrite).Run(context.Background(), core.NewOptions( + core.Option{Key: "root", Value: tempDir}, + core.Option{Key: "path", Value: "branch/a.txt"}, + core.Option{Key: "content", Value: "a"}, + )).OK) + require.True(t, c.Action(coreio.ActionLocalWrite).Run(context.Background(), core.NewOptions( + core.Option{Key: "root", Value: tempDir}, + core.Option{Key: "path", Value: "branch/b.txt"}, + core.Option{Key: "content", Value: "b"}, + )).OK) + + result := c.Action(coreio.ActionLocalDelete).Run(context.Background(), core.NewOptions( + core.Option{Key: "root", Value: tempDir}, + core.Option{Key: "path", Value: "branch"}, + core.Option{Key: "recursive", Value: true}, + )) + assert.True(t, result.OK) +} + +func TestActions_MemoryRoundTrip_Good(t *testing.T) { + c := core.New() + coreio.RegisterActions(c) + defer coreio.ResetMemoryActionStore() + coreio.ResetMemoryActionStore() + + writeResult := c.Action(coreio.ActionMemoryWrite).Run(context.Background(), core.NewOptions( + core.Option{Key: "path", Value: "config/app.yaml"}, + core.Option{Key: "content", Value: "port: 8080"}, + )) + require.True(t, writeResult.OK) + + readResult := c.Action(coreio.ActionMemoryRead).Run(context.Background(), core.NewOptions( + core.Option{Key: "path", Value: "config/app.yaml"}, + )) + require.True(t, readResult.OK) + assert.Equal(t, "port: 8080", readResult.Value) +} + +func TestActions_MemoryRoundTrip_Bad(t *testing.T) { + c := core.New() + coreio.RegisterActions(c) + coreio.ResetMemoryActionStore() + + // Reading a missing path returns !OK. + result := c.Action(coreio.ActionMemoryRead).Run(context.Background(), core.NewOptions( + core.Option{Key: "path", Value: "missing.txt"}, + )) + assert.False(t, result.OK) +} + +func TestActions_MemoryRoundTrip_Ugly(t *testing.T) { + c := core.New() + coreio.RegisterActions(c) + coreio.ResetMemoryActionStore() + + // ResetMemoryActionStore clears previous state between actions. + writeResult := c.Action(coreio.ActionMemoryWrite).Run(context.Background(), core.NewOptions( + core.Option{Key: "path", Value: "tmp.txt"}, + core.Option{Key: "content", Value: "payload"}, + )) + require.True(t, writeResult.OK) + + coreio.ResetMemoryActionStore() + + readResult := c.Action(coreio.ActionMemoryRead).Run(context.Background(), core.NewOptions( + core.Option{Key: "path", Value: "tmp.txt"}, + )) + assert.False(t, readResult.OK) +} + +func TestActions_Copy_Good(t *testing.T) { + c := core.New() + coreio.RegisterActions(c) + + source := coreio.NewMemoryMedium() + destination := coreio.NewMemoryMedium() + require.NoError(t, source.Write("input.txt", "payload")) + + result := c.Action(coreio.ActionCopy).Run(context.Background(), core.NewOptions( + core.Option{Key: "source", Value: coreio.Medium(source)}, + core.Option{Key: "sourcePath", Value: "input.txt"}, + core.Option{Key: "destination", Value: coreio.Medium(destination)}, + core.Option{Key: "destinationPath", Value: "backup/input.txt"}, + )) + require.True(t, result.OK) + + content, err := destination.Read("backup/input.txt") + require.NoError(t, err) + assert.Equal(t, "payload", content) +} + +func TestActions_Copy_Bad(t *testing.T) { + c := core.New() + coreio.RegisterActions(c) + + // Missing source medium must fail. + result := c.Action(coreio.ActionCopy).Run(context.Background(), core.NewOptions( + core.Option{Key: "sourcePath", Value: "input.txt"}, + core.Option{Key: "destination", Value: coreio.Medium(coreio.NewMemoryMedium())}, + core.Option{Key: "destinationPath", Value: "backup/input.txt"}, + )) + assert.False(t, result.OK) +} + +func TestActions_Copy_Ugly(t *testing.T) { + c := core.New() + coreio.RegisterActions(c) + + source := coreio.NewMemoryMedium() + // Source file does not exist — copy must surface the read error. + result := c.Action(coreio.ActionCopy).Run(context.Background(), core.NewOptions( + core.Option{Key: "source", Value: coreio.Medium(source)}, + core.Option{Key: "sourcePath", Value: "missing.txt"}, + core.Option{Key: "destination", Value: coreio.Medium(coreio.NewMemoryMedium())}, + core.Option{Key: "destinationPath", Value: "dest.txt"}, + )) + assert.False(t, result.OK) +} + +func TestActions_S3ReadWrite_Good(t *testing.T) { + c := core.New() + coreio.RegisterActions(c) + medium := newActionS3Medium(t) + + writeResult := c.Action(coreio.ActionS3Write).Run(context.Background(), core.NewOptions( + core.Option{Key: "medium", Value: medium}, + core.Option{Key: "path", Value: "reports/daily.txt"}, + core.Option{Key: "content", Value: "done"}, + )) + require.True(t, writeResult.OK) + + readResult := c.Action(coreio.ActionS3Read).Run(context.Background(), core.NewOptions( + core.Option{Key: "medium", Value: medium}, + core.Option{Key: "path", Value: "reports/daily.txt"}, + )) + require.True(t, readResult.OK) + assert.Equal(t, "done", readResult.Value) +} + +func TestActions_S3ReadWrite_Ugly(t *testing.T) { + c := core.New() + coreio.RegisterActions(c) + medium := newActionS3Medium(t) + + readResult := c.Action(coreio.ActionS3Read).Run(context.Background(), core.NewOptions( + core.Option{Key: "medium", Value: medium}, + core.Option{Key: "path", Value: "missing.txt"}, + )) + assert.False(t, readResult.OK) + + writeResult := c.Action(coreio.ActionS3Write).Run(context.Background(), core.NewOptions( + core.Option{Key: "medium", Value: medium}, + core.Option{Key: "path", Value: ""}, + core.Option{Key: "content", Value: "payload"}, + )) + assert.False(t, writeResult.OK) +} + +func TestActions_SFTPReadWrite_Good(t *testing.T) { + c := core.New() + coreio.RegisterActions(c) + medium := newActionSFTPTestMedium(t) + + writeResult := c.Action(coreio.ActionSFTPWrite).Run(context.Background(), core.NewOptions( + core.Option{Key: "medium", Value: medium}, + core.Option{Key: "path", Value: "notes/todo.txt"}, + core.Option{Key: "content", Value: "ship sftp"}, + )) + require.True(t, writeResult.OK) + + readResult := c.Action(coreio.ActionSFTPRead).Run(context.Background(), core.NewOptions( + core.Option{Key: "medium", Value: medium}, + core.Option{Key: "path", Value: "notes/todo.txt"}, + )) + require.True(t, readResult.OK) + assert.Equal(t, "ship sftp", readResult.Value) +} + +func TestActions_SFTPReadWrite_Ugly(t *testing.T) { + c := core.New() + coreio.RegisterActions(c) + medium := newActionSFTPTestMedium(t) + + readResult := c.Action(coreio.ActionSFTPRead).Run(context.Background(), core.NewOptions( + core.Option{Key: "medium", Value: medium}, + core.Option{Key: "path", Value: "missing.txt"}, + )) + assert.False(t, readResult.OK) + + writeResult := c.Action(coreio.ActionSFTPWrite).Run(context.Background(), core.NewOptions( + core.Option{Key: "medium", Value: medium}, + core.Option{Key: "path", Value: ""}, + core.Option{Key: "content", Value: "payload"}, + )) + assert.False(t, writeResult.OK) +} + +func TestActions_CubeReadWritePackUnpack_Good(t *testing.T) { + tempDir := t.TempDir() + c := core.New() + coreio.RegisterActions(c) + cube.RegisterActions(c) + + inner := coreio.NewMemoryMedium() + cubeMedium, err := cube.New(cube.Options{Inner: inner, Key: actionTestCubeKey}) + require.NoError(t, err) + + writeResult := c.Action(coreio.ActionCubeWrite).Run(context.Background(), core.NewOptions( + core.Option{Key: "medium", Value: coreio.Medium(cubeMedium)}, + core.Option{Key: "path", Value: "secret.txt"}, + core.Option{Key: "content", Value: "classified"}, + )) + require.True(t, writeResult.OK) + + readResult := c.Action(coreio.ActionCubeRead).Run(context.Background(), core.NewOptions( + core.Option{Key: "medium", Value: coreio.Medium(cubeMedium)}, + core.Option{Key: "path", Value: "secret.txt"}, + )) + require.True(t, readResult.OK) + assert.Equal(t, "classified", readResult.Value) + + innerContract := coreio.NewMemoryMedium() + contractWrite := c.Action(coreio.ActionCubeWrite).Run(context.Background(), core.NewOptions( + core.Option{Key: "inner", Value: coreio.Medium(innerContract)}, + core.Option{Key: "key", Value: actionTestCubeKey}, + core.Option{Key: "path", Value: "inner.txt"}, + core.Option{Key: "content", Value: "via inner"}, + )) + require.True(t, contractWrite.OK) + + contractRead := c.Action(coreio.ActionCubeRead).Run(context.Background(), core.NewOptions( + core.Option{Key: "inner", Value: coreio.Medium(innerContract)}, + core.Option{Key: "key", Value: actionTestCubeKey}, + core.Option{Key: "path", Value: "inner.txt"}, + )) + require.True(t, contractRead.OK) + assert.Equal(t, "via inner", contractRead.Value) + + source := coreio.NewMemoryMedium() + require.NoError(t, source.Write("config/app.yaml", "port: 8080")) + outputPath := tempDir + "/app.cube" + packResult := c.Action(coreio.ActionCubePack).Run(context.Background(), core.NewOptions( + core.Option{Key: "source", Value: coreio.Medium(source)}, + core.Option{Key: "output", Value: outputPath}, + core.Option{Key: "key", Value: actionTestCubeKey}, + )) + require.True(t, packResult.OK) + + destination := coreio.NewMemoryMedium() + unpackResult := c.Action(coreio.ActionCubeUnpack).Run(context.Background(), core.NewOptions( + core.Option{Key: "cube", Value: outputPath}, + core.Option{Key: "destination", Value: coreio.Medium(destination)}, + core.Option{Key: "key", Value: actionTestCubeKey}, + )) + require.True(t, unpackResult.OK) + + content, err := destination.Read("config/app.yaml") + require.NoError(t, err) + assert.Equal(t, "port: 8080", content) +} + +func TestActions_CubeReadWritePackUnpack_Ugly(t *testing.T) { + c := core.New() + coreio.RegisterActions(c) + cube.RegisterActions(c) + + readResult := c.Action(coreio.ActionCubeRead).Run(context.Background(), core.NewOptions( + core.Option{Key: "inner", Value: coreio.Medium(coreio.NewMemoryMedium())}, + core.Option{Key: "key", Value: actionTestCubeKey}, + core.Option{Key: "path", Value: "missing.txt"}, + )) + assert.False(t, readResult.OK) + + writeResult := c.Action(coreio.ActionCubeWrite).Run(context.Background(), core.NewOptions( + core.Option{Key: "inner", Value: coreio.Medium(coreio.NewMemoryMedium())}, + core.Option{Key: "key", Value: []byte("short")}, + core.Option{Key: "path", Value: "secret.txt"}, + core.Option{Key: "content", Value: "payload"}, + )) + assert.False(t, writeResult.OK) + + packResult := c.Action(coreio.ActionCubePack).Run(context.Background(), core.NewOptions( + core.Option{Key: "output", Value: t.TempDir() + "/app.cube"}, + core.Option{Key: "key", Value: actionTestCubeKey}, + )) + assert.False(t, packResult.OK) + + unpackResult := c.Action(coreio.ActionCubeUnpack).Run(context.Background(), core.NewOptions( + core.Option{Key: "cube", Value: t.TempDir() + "/missing.cube"}, + core.Option{Key: "destination", Value: coreio.Medium(coreio.NewMemoryMedium())}, + core.Option{Key: "key", Value: actionTestCubeKey}, + )) + assert.False(t, unpackResult.OK) +} + +func TestActions_GitHubPWAStubs_Bad(t *testing.T) { + c := core.New() + coreio.RegisterActions(c) + + for _, name := range []string{coreio.ActionGitHubClone, coreio.ActionGitHubRead, coreio.ActionPWAScrape} { + result := c.Action(name).Run(context.Background(), core.NewOptions()) + require.False(t, result.OK, name) + err, ok := result.Value.(error) + require.True(t, ok, name) + assert.Contains(t, err.Error(), "not implemented", name) + assert.Contains(t, err.Error(), "#633", name) + } +} + +type actionTestS3Client struct { + mu sync.RWMutex + objects map[string][]byte +} + +func newActionS3Medium(t *testing.T) *ios3.Medium { + t.Helper() + medium, err := ios3.New(ios3.Options{ + Bucket: "bucket", + Client: &actionTestS3Client{objects: make(map[string][]byte)}, + }) + require.NoError(t, err) + return medium +} + +func (client *actionTestS3Client) GetObject(_ context.Context, params *awss3.GetObjectInput, _ ...func(*awss3.Options)) (*awss3.GetObjectOutput, error) { + client.mu.RLock() + defer client.mu.RUnlock() + + key := aws.ToString(params.Key) + data, ok := client.objects[key] + if !ok { + return nil, core.E("actionsTest.s3.GetObject", "key not found", fs.ErrNotExist) + } + return &awss3.GetObjectOutput{ + Body: goio.NopCloser(bytes.NewReader(data)), + ContentLength: aws.Int64(int64(len(data))), + }, nil +} + +func (client *actionTestS3Client) PutObject(_ context.Context, params *awss3.PutObjectInput, _ ...func(*awss3.Options)) (*awss3.PutObjectOutput, error) { + client.mu.Lock() + defer client.mu.Unlock() + + data, err := goio.ReadAll(params.Body) + if err != nil { + return nil, err + } + client.objects[aws.ToString(params.Key)] = data + return &awss3.PutObjectOutput{}, nil +} + +func (client *actionTestS3Client) DeleteObject(_ context.Context, params *awss3.DeleteObjectInput, _ ...func(*awss3.Options)) (*awss3.DeleteObjectOutput, error) { + client.mu.Lock() + defer client.mu.Unlock() + delete(client.objects, aws.ToString(params.Key)) + return &awss3.DeleteObjectOutput{}, nil +} + +func (client *actionTestS3Client) DeleteObjects(_ context.Context, params *awss3.DeleteObjectsInput, _ ...func(*awss3.Options)) (*awss3.DeleteObjectsOutput, error) { + client.mu.Lock() + defer client.mu.Unlock() + for _, object := range params.Delete.Objects { + delete(client.objects, aws.ToString(object.Key)) + } + return &awss3.DeleteObjectsOutput{}, nil +} + +func (client *actionTestS3Client) HeadObject(_ context.Context, params *awss3.HeadObjectInput, _ ...func(*awss3.Options)) (*awss3.HeadObjectOutput, error) { + client.mu.RLock() + defer client.mu.RUnlock() + data, ok := client.objects[aws.ToString(params.Key)] + if !ok { + return nil, core.E("actionsTest.s3.HeadObject", "key not found", fs.ErrNotExist) + } + return &awss3.HeadObjectOutput{ContentLength: aws.Int64(int64(len(data)))}, nil +} + +func (client *actionTestS3Client) ListObjectsV2(context.Context, *awss3.ListObjectsV2Input, ...func(*awss3.Options)) (*awss3.ListObjectsV2Output, error) { + return &awss3.ListObjectsV2Output{}, nil +} + +func (client *actionTestS3Client) CopyObject(_ context.Context, params *awss3.CopyObjectInput, _ ...func(*awss3.Options)) (*awss3.CopyObjectOutput, error) { + client.mu.Lock() + defer client.mu.Unlock() + + _, sourceKey, ok := strings.Cut(aws.ToString(params.CopySource), "/") + if !ok { + return nil, core.E("actionsTest.s3.CopyObject", "invalid copy source", fs.ErrInvalid) + } + data, ok := client.objects[sourceKey] + if !ok { + return nil, core.E("actionsTest.s3.CopyObject", "source not found", fs.ErrNotExist) + } + client.objects[aws.ToString(params.Key)] = append([]byte(nil), data...) + return &awss3.CopyObjectOutput{}, nil +} + +func newActionSFTPTestMedium(t *testing.T) *iosftp.Medium { + t.Helper() + + serverConn, clientConn := net.Pipe() + server := pkgsftp.NewRequestServer(serverConn, pkgsftp.InMemHandler()) + done := make(chan error, 1) + go func() { + done <- server.Serve() + }() + + client, err := pkgsftp.NewClientPipe(clientConn, clientConn) + require.NoError(t, err) + + medium, err := iosftp.New(iosftp.Options{Client: client}) + require.NoError(t, err) + + t.Cleanup(func() { + _ = client.Close() + _ = clientConn.Close() + _ = serverConn.Close() + select { + case <-done: + case <-time.After(time.Second): + } + }) + + return medium +} diff --git a/cube/actions.go b/cube/actions.go new file mode 100644 index 0000000..d3fc2c3 --- /dev/null +++ b/cube/actions.go @@ -0,0 +1,166 @@ +// SPDX-License-Identifier: EUPL-1.2 + +// Example: cube.RegisterActions(c) +// Example: result := c.Action(cube.ActionPack).Run(ctx, core.NewOptions( +// Example: core.Option{Key: "source", Value: sourceMedium}, +// Example: core.Option{Key: "output", Value: "app.cube"}, +// Example: core.Option{Key: "key", Value: key}, +// Example: )) +package cube + +import ( + "context" + "io/fs" + + core "dappco.re/go/core" + coreio "dappco.re/go/io" +) + +// Named action identifiers for the Cube Medium. Matches the go-io RFC §15 +// registry so any Core-aware agent or CLI can dispatch Cube operations by +// name. +// +// Example: result := c.Action(cube.ActionRead).Run(ctx, opts) +const ( + ActionRead = "core.io.cube.read" + ActionWrite = "core.io.cube.write" + ActionPack = "core.io.cube.pack" + ActionUnpack = "core.io.cube.unpack" +) + +const ( + opReadAction = "cube.readAction" + opWriteAction = "cube.writeAction" + opPackAction = "cube.packAction" + opUnpackAction = "cube.unpackAction" + errKeyType = "key must be []byte" +) + +// Example: cube.RegisterActions(c) +// +// RegisterActions installs the cube actions listed in the go-io RFC §15 on the +// given Core. Call this during service registration. +func RegisterActions(c *core.Core) { + if c == nil { + return + } + c.Action(ActionRead, readAction) + c.Action(ActionWrite, writeAction) + c.Action(ActionPack, packAction) + c.Action(ActionUnpack, unpackAction) +} + +// Example: opts := core.NewOptions( +// Example: core.Option{Key: "inner", Value: innerMedium}, +// Example: core.Option{Key: "key", Value: key}, +// Example: core.Option{Key: "path", Value: "secret.txt"}, +// Example: ) +func readAction(_ context.Context, opts core.Options) core.Result { + if medium, ok := opts.Get("medium").Value.(coreio.Medium); ok && medium != nil { + content, err := medium.Read(opts.String("path")) + if err != nil { + return core.Result{}.New(err) + } + return core.Result{Value: content, OK: true} + } + + inner, ok := opts.Get("inner").Value.(coreio.Medium) + if !ok { + return core.Result{}.New(core.E(opReadAction, "inner medium is required", fs.ErrInvalid)) + } + key, err := keyFromOptions(opts, opReadAction) + if err != nil { + return core.Result{}.New(err) + } + medium, err := New(Options{Inner: inner, Key: key}) + if err != nil { + return core.Result{}.New(err) + } + content, err := medium.Read(opts.String("path")) + if err != nil { + return core.Result{}.New(err) + } + return core.Result{Value: content, OK: true} +} + +// Example: opts := core.NewOptions( +// Example: core.Option{Key: "inner", Value: innerMedium}, +// Example: core.Option{Key: "key", Value: key}, +// Example: core.Option{Key: "path", Value: "secret.txt"}, +// Example: core.Option{Key: "content", Value: "classified"}, +// Example: ) +func writeAction(_ context.Context, opts core.Options) core.Result { + if medium, ok := opts.Get("medium").Value.(coreio.Medium); ok && medium != nil { + if err := medium.Write(opts.String("path"), opts.String("content")); err != nil { + return core.Result{}.New(err) + } + return core.Result{OK: true} + } + + inner, ok := opts.Get("inner").Value.(coreio.Medium) + if !ok { + return core.Result{}.New(core.E(opWriteAction, "inner medium is required", fs.ErrInvalid)) + } + key, err := keyFromOptions(opts, opWriteAction) + if err != nil { + return core.Result{}.New(err) + } + medium, err := New(Options{Inner: inner, Key: key}) + if err != nil { + return core.Result{}.New(err) + } + if err := medium.Write(opts.String("path"), opts.String("content")); err != nil { + return core.Result{}.New(err) + } + return core.Result{OK: true} +} + +// Example: opts := core.NewOptions( +// Example: core.Option{Key: "source", Value: sourceMedium}, +// Example: core.Option{Key: "output", Value: "app.cube"}, +// Example: core.Option{Key: "key", Value: key}, +// Example: ) +func packAction(_ context.Context, opts core.Options) core.Result { + source, ok := opts.Get("source").Value.(coreio.Medium) + if !ok { + return core.Result{}.New(core.E(opPackAction, "source medium is required", fs.ErrInvalid)) + } + key, err := keyFromOptions(opts, opPackAction) + if err != nil { + return core.Result{}.New(err) + } + output := opts.String("output") + if err := Pack(output, source, key); err != nil { + return core.Result{}.New(err) + } + return core.Result{OK: true} +} + +// Example: opts := core.NewOptions( +// Example: core.Option{Key: "cube", Value: "app.cube"}, +// Example: core.Option{Key: "destination", Value: destinationMedium}, +// Example: core.Option{Key: "key", Value: key}, +// Example: ) +func unpackAction(_ context.Context, opts core.Options) core.Result { + destination, ok := opts.Get("destination").Value.(coreio.Medium) + if !ok { + return core.Result{}.New(core.E(opUnpackAction, "destination medium is required", fs.ErrInvalid)) + } + key, err := keyFromOptions(opts, opUnpackAction) + if err != nil { + return core.Result{}.New(err) + } + cubePath := opts.String("cube") + if err := Unpack(cubePath, destination, key); err != nil { + return core.Result{}.New(err) + } + return core.Result{OK: true} +} + +func keyFromOptions(opts core.Options, operation string) ([]byte, error) { + key, ok := opts.Get("key").Value.([]byte) + if !ok { + return nil, core.E(operation, errKeyType, fs.ErrInvalid) + } + return key, nil +} diff --git a/cube/actions_test.go b/cube/actions_test.go new file mode 100644 index 0000000..100f314 --- /dev/null +++ b/cube/actions_test.go @@ -0,0 +1,150 @@ +// SPDX-License-Identifier: EUPL-1.2 + +package cube + +import ( + "context" + "testing" + + core "dappco.re/go/core" + coreio "dappco.re/go/io" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestActions_RegisterActions_Good(t *testing.T) { + c := core.New() + RegisterActions(c) + + for _, name := range []string{ActionRead, ActionWrite, ActionPack, ActionUnpack} { + assert.True(t, c.Action(name).Exists(), name) + } +} + +func TestActions_RegisterActions_Bad(t *testing.T) { + // Nil Core must not panic. + assert.NotPanics(t, func() { RegisterActions(nil) }) +} + +func TestActions_RegisterActions_Ugly(t *testing.T) { + // Double registration is safe (idempotent overwrite). + c := core.New() + RegisterActions(c) + assert.NotPanics(t, func() { RegisterActions(c) }) +} + +func TestActions_Write_Read_Good(t *testing.T) { + c := core.New() + RegisterActions(c) + inner := coreio.NewMemoryMedium() + + writeResult := c.Action(ActionWrite).Run(context.Background(), core.NewOptions( + core.Option{Key: "inner", Value: coreio.Medium(inner)}, + core.Option{Key: "key", Value: testKey}, + core.Option{Key: "path", Value: "secret.txt"}, + core.Option{Key: "content", Value: "classified"}, + )) + require.True(t, writeResult.OK) + + readResult := c.Action(ActionRead).Run(context.Background(), core.NewOptions( + core.Option{Key: "inner", Value: coreio.Medium(inner)}, + core.Option{Key: "key", Value: testKey}, + core.Option{Key: "path", Value: "secret.txt"}, + )) + require.True(t, readResult.OK) + assert.Equal(t, "classified", readResult.Value) +} + +func TestActions_Write_Read_Bad(t *testing.T) { + c := core.New() + RegisterActions(c) + + // Missing inner medium must fail. + result := c.Action(ActionWrite).Run(context.Background(), core.NewOptions( + core.Option{Key: "key", Value: testKey}, + core.Option{Key: "path", Value: "secret.txt"}, + core.Option{Key: "content", Value: "classified"}, + )) + assert.False(t, result.OK) +} + +func TestActions_Write_Read_Ugly(t *testing.T) { + c := core.New() + RegisterActions(c) + inner := coreio.NewMemoryMedium() + + // Wrong-type key (string instead of []byte) must fail. + result := c.Action(ActionWrite).Run(context.Background(), core.NewOptions( + core.Option{Key: "inner", Value: coreio.Medium(inner)}, + core.Option{Key: "key", Value: "not a byte slice"}, + core.Option{Key: "path", Value: "secret.txt"}, + core.Option{Key: "content", Value: "classified"}, + )) + assert.False(t, result.OK) +} + +func TestActions_PackUnpack_Good(t *testing.T) { + tempDir := t.TempDir() + c := core.New() + RegisterActions(c) + + source := coreio.NewMemoryMedium() + require.NoError(t, source.Write("config/app.yaml", "port: 8080")) + + outputPath := tempDir + "/app.cube" + packResult := c.Action(ActionPack).Run(context.Background(), core.NewOptions( + core.Option{Key: "source", Value: coreio.Medium(source)}, + core.Option{Key: "output", Value: outputPath}, + core.Option{Key: "key", Value: testKey}, + )) + require.True(t, packResult.OK) + + destination := coreio.NewMemoryMedium() + unpackResult := c.Action(ActionUnpack).Run(context.Background(), core.NewOptions( + core.Option{Key: "cube", Value: outputPath}, + core.Option{Key: "destination", Value: coreio.Medium(destination)}, + core.Option{Key: "key", Value: testKey}, + )) + require.True(t, unpackResult.OK) + + content, err := destination.Read("config/app.yaml") + require.NoError(t, err) + assert.Equal(t, "port: 8080", content) +} + +func TestActions_PackUnpack_Bad(t *testing.T) { + c := core.New() + RegisterActions(c) + + // Pack without a source medium. + result := c.Action(ActionPack).Run(context.Background(), core.NewOptions( + core.Option{Key: "output", Value: "/tmp/anywhere.cube"}, + core.Option{Key: "key", Value: testKey}, + )) + assert.False(t, result.OK) + + // Unpack without a destination medium. + result = c.Action(ActionUnpack).Run(context.Background(), core.NewOptions( + core.Option{Key: "cube", Value: "missing.cube"}, + core.Option{Key: "key", Value: testKey}, + )) + assert.False(t, result.OK) +} + +func TestActions_PackUnpack_Ugly(t *testing.T) { + tempDir := t.TempDir() + c := core.New() + RegisterActions(c) + + source := coreio.NewMemoryMedium() + require.NoError(t, source.Write("a.txt", "alpha")) + + // Wrong-type key on pack must fail without writing anything. + outputPath := tempDir + "/bad.cube" + result := c.Action(ActionPack).Run(context.Background(), core.NewOptions( + core.Option{Key: "source", Value: coreio.Medium(source)}, + core.Option{Key: "output", Value: outputPath}, + core.Option{Key: "key", Value: "not a byte slice"}, + )) + assert.False(t, result.OK) +} diff --git a/cube/cube.go b/cube/cube.go new file mode 100644 index 0000000..6d256c4 --- /dev/null +++ b/cube/cube.go @@ -0,0 +1,532 @@ +// Example: inner := io.NewMemoryMedium() +// Example: medium, _ := cube.New(cube.Options{Inner: inner, Key: key}) +// Example: _ = medium.Write("secret.txt", "classified") +// Example: plain, _ := medium.Read("secret.txt") +package cube + +import ( + "archive/tar" // AX-6-exception: tar archive transport has no core equivalent. + goio "io" // AX-6-exception: io interface types have no core equivalent; io.EOF preserves stream semantics. + "io/fs" // AX-6-exception: fs interface types have no core equivalent. + "path" // AX-6-exception: tar entry names use slash-separated paths. + "time" // AX-6-exception: filesystem metadata timestamps have no core equivalent. + + core "dappco.re/go/core" + coreio "dappco.re/go/io" + "dappco.re/go/io/node" + "dappco.re/go/io/sigil" +) + +const ( + opCubeNew = "cube.New" + opCubeOpen = "cube.Open" + opCubePack = "cube.Pack" + opCubeUnpack = "cube.Unpack" + opCubeArchive = "cube.archive" + opCubeExtract = "cube.extract" + errCreateCipher = "failed to create cipher" +) + +// Example: medium, _ := cube.New(cube.Options{Inner: inner, Key: key}) +// Example: _ = medium.Write("secret.txt", "classified") +// Example: plain, _ := medium.Read("secret.txt") +type Medium struct { + inner coreio.Medium + sigil *sigil.ChaChaPolySigil +} + +var _ coreio.Medium = (*Medium)(nil) + +// Example: medium, _ := cube.New(cube.Options{Inner: io.NewMemoryMedium(), Key: key}) +type Options struct { + Inner coreio.Medium + Key []byte +} + +// Example: medium, _ := cube.New(cube.Options{Inner: io.NewMemoryMedium(), Key: key}) +// Example: _ = medium.Write("secret.txt", "classified") +// Example: plaintext, _ := medium.Read("secret.txt") +func New(options Options) (*Medium, error) { + if options.Inner == nil { + return nil, core.E(opCubeNew, "inner medium is required", fs.ErrInvalid) + } + cipherSigil, err := sigil.NewChaChaPolySigil(options.Key, nil) + if err != nil { + return nil, core.E(opCubeNew, "failed to create cipher sigil", err) + } + return &Medium{ + inner: options.Inner, + sigil: cipherSigil, + }, nil +} + +// Example: inner := medium.Inner() +func (medium *Medium) Inner() coreio.Medium { + return medium.inner +} + +// Example: content, _ := medium.Read("secret.txt") +func (medium *Medium) Read(path string) (string, error) { + ciphertext, err := medium.inner.Read(path) + if err != nil { + return "", err + } + plaintext, err := sigil.Untransmute([]byte(ciphertext), []sigil.Sigil{medium.sigil}) + if err != nil { + return "", core.E("cube.Read", core.Concat("failed to decrypt: ", path), err) + } + return string(plaintext), nil +} + +// Example: _ = medium.Write("secret.txt", "classified") +func (medium *Medium) Write(path, content string) error { + return medium.WriteMode(path, content, 0644) +} + +// Example: _ = medium.WriteMode("keys/private.key", key, 0600) +func (medium *Medium) WriteMode(path, content string, mode fs.FileMode) error { + ciphertext, err := sigil.Transmute([]byte(content), []sigil.Sigil{medium.sigil}) + if err != nil { + return core.E("cube.WriteMode", core.Concat("failed to encrypt: ", path), err) + } + return medium.inner.WriteMode(path, string(ciphertext), mode) +} + +// Example: _ = medium.EnsureDir("data") +func (medium *Medium) EnsureDir(path string) error { + return medium.inner.EnsureDir(path) +} + +// Example: isFile := medium.IsFile("secret.txt") +func (medium *Medium) IsFile(path string) bool { + return medium.inner.IsFile(path) +} + +// Example: _ = medium.Delete("secret.txt") +func (medium *Medium) Delete(path string) error { + return medium.inner.Delete(path) +} + +// Example: _ = medium.DeleteAll("archive") +func (medium *Medium) DeleteAll(path string) error { + return medium.inner.DeleteAll(path) +} + +// Example: _ = medium.Rename("draft.txt", "final.txt") +func (medium *Medium) Rename(oldPath, newPath string) error { + return medium.inner.Rename(oldPath, newPath) +} + +// Example: entries, _ := medium.List("data") +func (medium *Medium) List(path string) ([]fs.DirEntry, error) { + return medium.inner.List(path) +} + +// Example: info, _ := medium.Stat("secret.txt") +func (medium *Medium) Stat(path string) (fs.FileInfo, error) { + return medium.inner.Stat(path) +} + +// Example: file, _ := medium.Open("secret.txt") +func (medium *Medium) Open(path string) (fs.File, error) { + // Read via cube semantics (decrypt) then wrap in an in-memory fs.File. + ciphertext, err := medium.inner.Read(path) + if err != nil { + return nil, err + } + plaintext, err := sigil.Untransmute([]byte(ciphertext), []sigil.Sigil{medium.sigil}) + if err != nil { + return nil, core.E(opCubeOpen, core.Concat("failed to decrypt: ", path), err) + } + info, err := medium.inner.Stat(path) + if err != nil { + info = coreio.NewFileInfo(core.PathBase(path), int64(len(plaintext)), 0644, time.Now(), false) + } + return &cubeFile{ + name: core.PathBase(path), + content: plaintext, + mode: info.Mode(), + modTime: info.ModTime(), + }, nil +} + +// Example: writer, _ := medium.Create("secret.txt") +func (medium *Medium) Create(path string) (goio.WriteCloser, error) { + return &cubeWriteCloser{medium: medium, path: path, mode: 0644}, nil +} + +// Example: writer, _ := medium.Append("log.txt") +func (medium *Medium) Append(path string) (goio.WriteCloser, error) { + var existing []byte + if medium.inner.Exists(path) { + plain, err := medium.Read(path) + if err != nil { + return nil, err + } + existing = []byte(plain) + } + return &cubeWriteCloser{medium: medium, path: path, data: existing, mode: 0644}, nil +} + +// Example: reader, _ := medium.ReadStream("secret.txt") +func (medium *Medium) ReadStream(path string) (goio.ReadCloser, error) { + file, err := medium.Open(path) + if err != nil { + return nil, err + } + return file, nil +} + +// Example: writer, _ := medium.WriteStream("secret.txt") +func (medium *Medium) WriteStream(path string) (goio.WriteCloser, error) { + return medium.Create(path) +} + +// Example: exists := medium.Exists("secret.txt") +func (medium *Medium) Exists(path string) bool { + return medium.inner.Exists(path) +} + +// Example: isDirectory := medium.IsDir("data") +func (medium *Medium) IsDir(path string) bool { + return medium.inner.IsDir(path) +} + +// cubeFile implements fs.File over decrypted content. +type cubeFile struct { + name string + content []byte + offset int64 + mode fs.FileMode + modTime time.Time +} + +func (file *cubeFile) Stat() (fs.FileInfo, error) { + return coreio.NewFileInfo(file.name, int64(len(file.content)), file.mode, file.modTime, false), nil +} + +func (file *cubeFile) Read(buffer []byte) (int, error) { + if file.offset >= int64(len(file.content)) { + return 0, goio.EOF + } + readCount := copy(buffer, file.content[file.offset:]) + file.offset += int64(readCount) + return readCount, nil +} + +func (file *cubeFile) Close() error { + return nil +} + +// cubeWriteCloser buffers writes and commits them (encrypted) on Close. +type cubeWriteCloser struct { + medium *Medium + path string + data []byte + mode fs.FileMode +} + +func (writer *cubeWriteCloser) Write(data []byte) (int, error) { + writer.data = append(writer.data, data...) + return len(data), nil +} + +func (writer *cubeWriteCloser) Close() error { + mode := writer.mode + if mode == 0 { + mode = 0644 + } + return writer.medium.WriteMode(writer.path, string(writer.data), mode) +} + +// AX-6-exception: core.NewBuffer is unavailable in the pinned core module; this is +// the minimal intrinsic writer needed by archive/tar. +type cubeArchiveBuffer struct { + data []byte +} + +func (buffer *cubeArchiveBuffer) Write(data []byte) (int, error) { + buffer.data = append(buffer.data, data...) + return len(data), nil +} + +// Example: _ = cube.Pack("app.cube", workspaceMedium, key) +// +// Pack walks the source Medium, packs every file into a tar archive, encrypts +// the archive, and writes the ciphertext to outputPath on the local filesystem. +func Pack(outputPath string, source coreio.Medium, key []byte) error { + if source == nil { + return core.E(opCubePack, "source medium is required", fs.ErrInvalid) + } + if outputPath == "" { + return core.E(opCubePack, "output path is required", fs.ErrInvalid) + } + + archiveBytes, err := archiveMediumToTar(source) + if err != nil { + return core.E(opCubePack, "failed to build archive", err) + } + + cipherSigil, err := sigil.NewChaChaPolySigil(key, nil) + if err != nil { + return core.E(opCubePack, errCreateCipher, err) + } + ciphertext, err := sigil.Transmute(archiveBytes, []sigil.Sigil{cipherSigil}) + if err != nil { + return core.E(opCubePack, "failed to encrypt archive", err) + } + + localMedium, relativePath, err := sandboxedLocalForPath(opCubePack, outputPath) + if err != nil { + return err + } + return localMedium.WriteMode(relativePath, string(ciphertext), 0600) +} + +// Example: _ = cube.Unpack("app.cube", destinationMedium, key) +// +// Unpack reads the encrypted archive from cubePath, decrypts it, unpacks the +// tar contents, and writes every entry to the destination Medium. +func Unpack(cubePath string, destination coreio.Medium, key []byte) error { + if destination == nil { + return core.E(opCubeUnpack, "destination medium is required", fs.ErrInvalid) + } + if cubePath == "" { + return core.E(opCubeUnpack, "cube path is required", fs.ErrInvalid) + } + + localMedium, relativePath, err := sandboxedLocalForPath(opCubeUnpack, cubePath) + if err != nil { + return err + } + ciphertext, err := localMedium.Read(relativePath) + if err != nil { + return core.E(opCubeUnpack, core.Concat("failed to read cube: ", cubePath), err) + } + + cipherSigil, err := sigil.NewChaChaPolySigil(key, nil) + if err != nil { + return core.E(opCubeUnpack, errCreateCipher, err) + } + archiveBytes, err := sigil.Untransmute([]byte(ciphertext), []sigil.Sigil{cipherSigil}) + if err != nil { + return core.E(opCubeUnpack, "failed to decrypt archive", err) + } + + return extractTarToMedium(archiveBytes, destination) +} + +// Example: medium, _ := cube.Open("app.cube", key) +// Example: content, _ := medium.Read("config/app.yaml") +// +// Open reads the encrypted archive at cubePath, decrypts it, and returns a +// Medium backed by an in-memory node.Node. Reads and writes do not flow back +// to the .cube file — use Pack again to persist updates. +func Open(cubePath string, key []byte) (coreio.Medium, error) { + if cubePath == "" { + return nil, core.E(opCubeOpen, "cube path is required", fs.ErrInvalid) + } + + localMedium, relativePath, err := sandboxedLocalForPath(opCubeOpen, cubePath) + if err != nil { + return nil, err + } + ciphertext, err := localMedium.Read(relativePath) + if err != nil { + return nil, core.E(opCubeOpen, core.Concat("failed to read cube: ", cubePath), err) + } + + cipherSigil, err := sigil.NewChaChaPolySigil(key, nil) + if err != nil { + return nil, core.E(opCubeOpen, errCreateCipher, err) + } + archiveBytes, err := sigil.Untransmute([]byte(ciphertext), []sigil.Sigil{cipherSigil}) + if err != nil { + return nil, core.E(opCubeOpen, "failed to decrypt archive", err) + } + + nodeTree, err := node.FromTar(archiveBytes) + if err != nil { + return nil, core.E(opCubeOpen, "failed to load archive", err) + } + return nodeTree, nil +} + +func sandboxedLocalForPath(operation, filePath string) (coreio.Medium, string, error) { + if filePath == "" { + return nil, "", core.E(operation, "path is required", fs.ErrInvalid) + } + if !core.PathIsAbs(filePath) { + medium, err := coreio.NewSandboxed(".") + if err != nil { + return nil, "", core.E(operation, "failed to access local filesystem", err) + } + return medium, filePath, nil + } + root := core.PathDir(filePath) + relativePath := core.PathBase(filePath) + if root == "/" || relativePath == "" || relativePath == "." || relativePath == "/" { + return nil, "", core.E(operation, core.Concat("invalid local path: ", filePath), fs.ErrInvalid) + } + medium, err := coreio.NewSandboxed(root) + if err != nil { + return nil, "", core.E(operation, "failed to access local filesystem", err) + } + return medium, relativePath, nil +} + +// archiveMediumToTar walks source and serialises all files into a tar archive. +func archiveMediumToTar(source coreio.Medium) ([]byte, error) { + buffer := &cubeArchiveBuffer{} + tarWriter := tar.NewWriter(buffer) + + if err := walkAndArchive(source, "", tarWriter); err != nil { + tarWriter.Close() + return nil, err + } + + if err := tarWriter.Close(); err != nil { + return nil, core.E(opCubeArchive, "failed to close tar writer", err) + } + return buffer.data, nil +} + +// walkAndArchive recursively walks the source and appends every file. +func walkAndArchive(source coreio.Medium, path string, tarWriter *tar.Writer) error { + entries, err := source.List(path) + if err != nil { + return core.E(opCubeArchive, core.Concat("failed to list: ", path), err) + } + for _, entry := range entries { + childPath := archiveChildPath(path, entry.Name()) + if entry.IsDir() { + if err := walkAndArchive(source, childPath, tarWriter); err != nil { + return err + } + continue + } + if err := writeTarFileEntry(source, childPath, tarWriter); err != nil { + return err + } + } + return nil +} + +func archiveChildPath(parent, name string) string { + if parent == "" { + return name + } + return core.Concat(parent, "/", name) +} + +func writeTarFileEntry(source coreio.Medium, filePath string, tarWriter *tar.Writer) error { + content, err := source.Read(filePath) + if err != nil { + return core.E(opCubeArchive, core.Concat("failed to read: ", filePath), err) + } + mode, modTime := archiveEntryMetadata(source, filePath) + header := &tar.Header{ + Name: filePath, + Mode: int64(mode.Perm()), + Size: int64(len(content)), + ModTime: modTime, + } + if err := tarWriter.WriteHeader(header); err != nil { + return core.E(opCubeArchive, core.Concat("failed to write header: ", filePath), err) + } + if _, err := tarWriter.Write([]byte(content)); err != nil { + return core.E(opCubeArchive, core.Concat("failed to write content: ", filePath), err) + } + return nil +} + +func archiveEntryMetadata(source coreio.Medium, filePath string) (fs.FileMode, time.Time) { + if info, err := source.Stat(filePath); err == nil { + return info.Mode(), info.ModTime() + } + return 0600, time.Now() +} + +// extractTarToMedium reads a tar archive and writes each entry to destination. +func extractTarToMedium(archiveBytes []byte, destination coreio.Medium) error { + tarReader := tar.NewReader(&cubeFile{content: archiveBytes}) + for { + header, err := tarReader.Next() + if err == goio.EOF { + return nil + } + if err != nil { + return core.E(opCubeExtract, "failed to read tar entry", err) + } + if header.Typeflag != tar.TypeReg { + continue + } + if err := extractTarFileEntry(tarReader, header, destination); err != nil { + return err + } + } +} + +func extractTarFileEntry(tarReader *tar.Reader, header *tar.Header, destination coreio.Medium) error { + content, err := readTarEntryContent(tarReader, header.Name) + if err != nil { + return err + } + name, ok, err := validatedTarEntryName(header.Name) + if err != nil || !ok { + return err + } + mode := fs.FileMode(header.Mode) + if mode == 0 { + mode = 0644 + } + if err := destination.WriteMode(name, content, mode); err != nil { + return core.E(opCubeExtract, core.Concat("failed to write entry: ", name), err) + } + return nil +} + +func readTarEntryContent(tarReader *tar.Reader, name string) (string, error) { + contentResult := core.ReadAll(tarReader) + if contentResult.OK { + content, ok := contentResult.Value.(string) + if !ok { + return "", core.E(opCubeExtract, core.Concat("failed to read entry: ", name), fs.ErrInvalid) + } + return content, nil + } + if err, ok := contentResult.Value.(error); ok { + return "", core.E(opCubeExtract, core.Concat("failed to read entry: ", name), err) + } + return "", core.E(opCubeExtract, core.Concat("failed to read entry: ", name), fs.ErrInvalid) +} + +func validatedTarEntryName(rawName string) (string, bool, error) { + if rawName == "" { + return "", false, nil + } + if path.IsAbs(rawName) || core.Contains(rawName, "\\") { + return "", false, core.E(opCubeExtract, core.Concat("invalid tar entry path: ", rawName), fs.ErrInvalid) + } + name := core.TrimPrefix(rawName, "/") + if name == "" || core.HasSuffix(name, "/") { + return "", false, nil + } + if hasParentPathSegment(name) { + return "", false, core.E(opCubeExtract, core.Concat("invalid tar entry path: ", name), fs.ErrInvalid) + } + clean := path.Clean(name) + if clean == "." || clean == "" || clean == ".." || core.HasPrefix(clean, "../") { + return "", false, core.E(opCubeExtract, core.Concat("invalid tar entry path: ", name), fs.ErrInvalid) + } + return clean, true, nil +} + +func hasParentPathSegment(name string) bool { + for _, part := range core.Split(name, "/") { + if part == ".." { + return true + } + } + return false +} diff --git a/cube/cube_test.go b/cube/cube_test.go new file mode 100644 index 0000000..e2e1501 --- /dev/null +++ b/cube/cube_test.go @@ -0,0 +1,424 @@ +package cube + +import ( + "bytes" + goio "io" + "testing" + + coreio "dappco.re/go/io" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// testKey is a fixed 32-byte key used across cube tests. +var testKey = []byte("0123456789abcdef0123456789abcdef") + +func TestCube_New_Good(t *testing.T) { + inner := coreio.NewMemoryMedium() + medium, err := New(Options{Inner: inner, Key: testKey}) + require.NoError(t, err) + require.NotNil(t, medium) + assert.Same(t, inner, medium.Inner()) +} + +func TestCube_New_Bad(t *testing.T) { + // Nil inner medium should return an error. + _, err := New(Options{Inner: nil, Key: testKey}) + assert.Error(t, err) +} + +func TestCube_New_Ugly(t *testing.T) { + // Wrong key size must be rejected. + _, err := New(Options{Inner: coreio.NewMemoryMedium(), Key: []byte("short")}) + assert.Error(t, err) + // Empty key is also invalid. + _, err = New(Options{Inner: coreio.NewMemoryMedium(), Key: nil}) + assert.Error(t, err) +} + +func TestCube_WriteRead_Good(t *testing.T) { + inner := coreio.NewMemoryMedium() + medium, err := New(Options{Inner: inner, Key: testKey}) + require.NoError(t, err) + + require.NoError(t, medium.Write("notes/todo.txt", "ship the cube")) + + plaintext, err := medium.Read("notes/todo.txt") + require.NoError(t, err) + assert.Equal(t, "ship the cube", plaintext) +} + +func TestCube_WriteRead_Bad(t *testing.T) { + inner := coreio.NewMemoryMedium() + medium, err := New(Options{Inner: inner, Key: testKey}) + require.NoError(t, err) + + // Read of missing file should return an error. + _, err = medium.Read("missing.txt") + assert.Error(t, err) +} + +func TestCube_WriteRead_Ugly(t *testing.T) { + inner := coreio.NewMemoryMedium() + medium, err := New(Options{Inner: inner, Key: testKey}) + require.NoError(t, err) + + // Underlying storage must contain ciphertext, not plaintext. + require.NoError(t, medium.Write("secret.txt", "sensitive payload")) + raw, err := inner.Read("secret.txt") + require.NoError(t, err) + assert.NotEqual(t, "sensitive payload", raw, "cube must persist ciphertext, never plaintext") + + // Reading with the wrong key must fail. + otherKey := []byte("fedcba9876543210fedcba9876543210") + otherMedium, err := New(Options{Inner: inner, Key: otherKey}) + require.NoError(t, err) + _, err = otherMedium.Read("secret.txt") + assert.Error(t, err) +} + +func TestCube_WriteMode_Good(t *testing.T) { + inner := coreio.NewMemoryMedium() + medium, err := New(Options{Inner: inner, Key: testKey}) + require.NoError(t, err) + + require.NoError(t, medium.WriteMode("keys/private.key", "secret-key", 0600)) + plaintext, err := medium.Read("keys/private.key") + require.NoError(t, err) + assert.Equal(t, "secret-key", plaintext) +} + +func TestCube_WriteMode_Bad(t *testing.T) { + inner := coreio.NewMemoryMedium() + medium, err := New(Options{Inner: inner, Key: testKey}) + require.NoError(t, err) + + // Writing into a path that conflicts with a directory should fail via the inner Medium. + require.NoError(t, inner.EnsureDir("data")) + err = medium.WriteMode("data", "payload", 0644) + assert.Error(t, err) +} + +func TestCube_WriteMode_Ugly(t *testing.T) { + inner := coreio.NewMemoryMedium() + medium, err := New(Options{Inner: inner, Key: testKey}) + require.NoError(t, err) + + // Empty payload must round-trip. + require.NoError(t, medium.Write("empty.txt", "")) + plaintext, err := medium.Read("empty.txt") + require.NoError(t, err) + assert.Equal(t, "", plaintext) +} + +func TestCube_Streaming_Good(t *testing.T) { + inner := coreio.NewMemoryMedium() + medium, err := New(Options{Inner: inner, Key: testKey}) + require.NoError(t, err) + + writer, err := medium.Create("log.txt") + require.NoError(t, err) + _, err = writer.Write([]byte("line one\n")) + require.NoError(t, err) + require.NoError(t, writer.Close()) + + reader, err := medium.ReadStream("log.txt") + require.NoError(t, err) + defer reader.Close() + content, err := goio.ReadAll(reader) + require.NoError(t, err) + assert.Equal(t, "line one\n", string(content)) +} + +func TestCube_Streaming_Bad(t *testing.T) { + inner := coreio.NewMemoryMedium() + medium, err := New(Options{Inner: inner, Key: testKey}) + require.NoError(t, err) + + // Reading a stream that does not exist returns an error. + _, err = medium.ReadStream("missing.txt") + assert.Error(t, err) +} + +func TestCube_Streaming_Ugly(t *testing.T) { + inner := coreio.NewMemoryMedium() + medium, err := New(Options{Inner: inner, Key: testKey}) + require.NoError(t, err) + + // Append must decrypt the existing payload, then append. + require.NoError(t, medium.Write("log.txt", "line one\n")) + writer, err := medium.Append("log.txt") + require.NoError(t, err) + _, err = writer.Write([]byte("line two\n")) + require.NoError(t, err) + require.NoError(t, writer.Close()) + + plaintext, err := medium.Read("log.txt") + require.NoError(t, err) + assert.Equal(t, "line one\nline two\n", plaintext) +} + +func TestCube_Open_Good(t *testing.T) { + inner := coreio.NewMemoryMedium() + medium, err := New(Options{Inner: inner, Key: testKey}) + require.NoError(t, err) + require.NoError(t, medium.Write("notes.txt", "ship it")) + + file, err := medium.Open("notes.txt") + require.NoError(t, err) + defer file.Close() + + buffer := bytes.NewBuffer(nil) + _, err = goio.Copy(buffer, file) + require.NoError(t, err) + assert.Equal(t, "ship it", buffer.String()) +} + +func TestCube_Open_Bad(t *testing.T) { + inner := coreio.NewMemoryMedium() + medium, err := New(Options{Inner: inner, Key: testKey}) + require.NoError(t, err) + + _, err = medium.Open("missing.txt") + assert.Error(t, err) +} + +func TestCube_Open_Ugly(t *testing.T) { + inner := coreio.NewMemoryMedium() + medium, err := New(Options{Inner: inner, Key: testKey}) + require.NoError(t, err) + + // Write directly to the inner Medium (plaintext) — cube.Open must fail to decrypt. + require.NoError(t, inner.Write("secret.txt", "not ciphertext")) + _, err = medium.Open("secret.txt") + assert.Error(t, err) +} + +func TestCube_PassthroughOperations_Good(t *testing.T) { + inner := coreio.NewMemoryMedium() + medium, err := New(Options{Inner: inner, Key: testKey}) + require.NoError(t, err) + + // Exists / IsFile / IsDir / List / Stat pass through to inner. + require.NoError(t, medium.EnsureDir("data")) + require.NoError(t, medium.Write("data/one.txt", "alpha")) + + assert.True(t, medium.Exists("data/one.txt")) + assert.True(t, medium.IsFile("data/one.txt")) + assert.True(t, medium.IsDir("data")) + + entries, err := medium.List("data") + require.NoError(t, err) + assert.NotEmpty(t, entries) + + info, err := medium.Stat("data/one.txt") + require.NoError(t, err) + assert.False(t, info.IsDir()) +} + +func TestCube_PassthroughOperations_Bad(t *testing.T) { + inner := coreio.NewMemoryMedium() + medium, err := New(Options{Inner: inner, Key: testKey}) + require.NoError(t, err) + + // Deleting a missing file surfaces the underlying Medium's error. + err = medium.Delete("missing.txt") + assert.Error(t, err) +} + +func TestCube_PassthroughOperations_Ugly(t *testing.T) { + inner := coreio.NewMemoryMedium() + medium, err := New(Options{Inner: inner, Key: testKey}) + require.NoError(t, err) + + // Rename preserves ciphertext semantics. + require.NoError(t, medium.Write("old.txt", "keep")) + require.NoError(t, medium.Rename("old.txt", "new.txt")) + plaintext, err := medium.Read("new.txt") + require.NoError(t, err) + assert.Equal(t, "keep", plaintext) + + // DeleteAll removes the entire subtree. + require.NoError(t, medium.Write("branch/a.txt", "a")) + require.NoError(t, medium.Write("branch/b.txt", "b")) + require.NoError(t, medium.DeleteAll("branch")) + assert.False(t, inner.Exists("branch/a.txt")) +} + +func TestCube_Pack_Good(t *testing.T) { + tempDir := t.TempDir() + sandbox, err := coreio.NewSandboxed(tempDir) + require.NoError(t, err) + outputPath := tempDir + "/app.cube" + + source := coreio.NewMemoryMedium() + require.NoError(t, source.Write("config/app.yaml", "port: 8080")) + require.NoError(t, source.Write("data/user.json", `{"name":"alice"}`)) + + require.NoError(t, Pack(outputPath, source, testKey)) + assert.True(t, sandbox.Exists("app.cube")) +} + +func TestCube_Pack_Bad(t *testing.T) { + // Missing source must error. + err := Pack("output.cube", nil, testKey) + assert.Error(t, err) + + // Missing output path must error. + err = Pack("", coreio.NewMemoryMedium(), testKey) + assert.Error(t, err) +} + +func TestCube_Pack_Ugly(t *testing.T) { + tempDir := t.TempDir() + outputPath := tempDir + "/bad.cube" + + // Invalid (short) key must error before any filesystem work. + source := coreio.NewMemoryMedium() + require.NoError(t, source.Write("a.txt", "payload")) + err := Pack(outputPath, source, []byte("short")) + assert.Error(t, err) +} + +func TestCube_Unpack_Good(t *testing.T) { + tempDir := t.TempDir() + outputPath := tempDir + "/app.cube" + + source := coreio.NewMemoryMedium() + require.NoError(t, source.Write("config/app.yaml", "port: 8080")) + require.NoError(t, source.Write("data/user.json", `{"name":"alice"}`)) + + require.NoError(t, Pack(outputPath, source, testKey)) + + restored := coreio.NewMemoryMedium() + require.NoError(t, Unpack(outputPath, restored, testKey)) + + config, err := restored.Read("config/app.yaml") + require.NoError(t, err) + assert.Equal(t, "port: 8080", config) + + user, err := restored.Read("data/user.json") + require.NoError(t, err) + assert.Equal(t, `{"name":"alice"}`, user) +} + +func TestCube_Unpack_Bad(t *testing.T) { + err := Unpack("missing.cube", coreio.NewMemoryMedium(), testKey) + assert.Error(t, err) + + err = Unpack("some.cube", nil, testKey) + assert.Error(t, err) + + err = Unpack("", coreio.NewMemoryMedium(), testKey) + assert.Error(t, err) +} + +func TestCube_Unpack_Ugly(t *testing.T) { + tempDir := t.TempDir() + outputPath := tempDir + "/app.cube" + + source := coreio.NewMemoryMedium() + require.NoError(t, source.Write("secret.txt", "classified")) + require.NoError(t, Pack(outputPath, source, testKey)) + + // Attempting to unpack with a different key must fail. + badKey := []byte("fedcba9876543210fedcba9876543210") + err := Unpack(outputPath, coreio.NewMemoryMedium(), badKey) + assert.Error(t, err) +} + +func TestCube_Open_Packed_Good(t *testing.T) { + tempDir := t.TempDir() + outputPath := tempDir + "/app.cube" + + source := coreio.NewMemoryMedium() + require.NoError(t, source.Write("config/app.yaml", "port: 8080")) + require.NoError(t, Pack(outputPath, source, testKey)) + + cubeMedium, err := Open(outputPath, testKey) + require.NoError(t, err) + + content, err := cubeMedium.Read("config/app.yaml") + require.NoError(t, err) + assert.Equal(t, "port: 8080", content) +} + +func TestCube_Open_Packed_Bad(t *testing.T) { + _, err := Open("", testKey) + assert.Error(t, err) + + _, err = Open("missing.cube", testKey) + assert.Error(t, err) +} + +func TestCube_Open_Packed_Ugly(t *testing.T) { + tempDir := t.TempDir() + outputPath := tempDir + "/app.cube" + + source := coreio.NewMemoryMedium() + require.NoError(t, source.Write("a.txt", "alpha")) + require.NoError(t, Pack(outputPath, source, testKey)) + + // Wrong key fails. + badKey := []byte("fedcba9876543210fedcba9876543210") + _, err := Open(outputPath, badKey) + assert.Error(t, err) +} + +func TestCube_DoubleEncryption_Good(t *testing.T) { + inner := coreio.NewMemoryMedium() + userKey := []byte("0123456789abcdef0123456789abcdef") + transportKey := []byte("fedcba9876543210fedcba9876543210") + + userCube, err := New(Options{Inner: inner, Key: userKey}) + require.NoError(t, err) + outerCube, err := New(Options{Inner: userCube, Key: transportKey}) + require.NoError(t, err) + + require.NoError(t, outerCube.Write("secret.txt", "classified")) + plaintext, err := outerCube.Read("secret.txt") + require.NoError(t, err) + assert.Equal(t, "classified", plaintext) + + // The underlying inner Medium holds a double-encrypted payload. + raw, err := inner.Read("secret.txt") + require.NoError(t, err) + assert.NotEqual(t, "classified", raw) +} + +func TestCube_DoubleEncryption_Bad(t *testing.T) { + inner := coreio.NewMemoryMedium() + userKey := []byte("0123456789abcdef0123456789abcdef") + transportKey := []byte("fedcba9876543210fedcba9876543210") + + userCube, err := New(Options{Inner: inner, Key: userKey}) + require.NoError(t, err) + outerCube, err := New(Options{Inner: userCube, Key: transportKey}) + require.NoError(t, err) + + require.NoError(t, outerCube.Write("secret.txt", "classified")) + + // Reading through the inner userCube alone returns ciphertext, not plaintext. + stillEncrypted, err := userCube.Read("secret.txt") + require.NoError(t, err) + assert.NotEqual(t, "classified", stillEncrypted) +} + +func TestCube_DoubleEncryption_Ugly(t *testing.T) { + inner := coreio.NewMemoryMedium() + userKey := []byte("0123456789abcdef0123456789abcdef") + transportKey := []byte("fedcba9876543210fedcba9876543210") + + userCube, err := New(Options{Inner: inner, Key: userKey}) + require.NoError(t, err) + outerCube, err := New(Options{Inner: userCube, Key: transportKey}) + require.NoError(t, err) + + require.NoError(t, outerCube.Write("secret.txt", "classified")) + + // Swapping key order must fail to decrypt. + wrongOrder, err := New(Options{Inner: inner, Key: transportKey}) + require.NoError(t, err) + _, err = wrongOrder.Read("secret.txt") + assert.Error(t, err) +} diff --git a/datanode/medium.go b/datanode/medium.go index 3e0e5bd..e17d70a 100644 --- a/datanode/medium.go +++ b/datanode/medium.go @@ -10,11 +10,11 @@ import ( "io/fs" "path" "slices" - "sync" + "sync" // Note: AX-6 — internal concurrency primitive; structural per RFC §5.1 "time" core "dappco.re/go/core" - coreio "dappco.re/go/core/io" + coreio "dappco.re/go/io" borgdatanode "forge.lthn.ai/Snider/Borg/pkg/datanode" ) diff --git a/datanode/medium_test.go b/datanode/medium_test.go index 93ff024..429c8e0 100644 --- a/datanode/medium_test.go +++ b/datanode/medium_test.go @@ -6,7 +6,7 @@ import ( "testing" core "dappco.re/go/core" - coreio "dappco.re/go/core/io" + coreio "dappco.re/go/io" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) diff --git a/go.mod b/go.mod index b35de0d..54e53f7 100644 --- a/go.mod +++ b/go.mod @@ -1,4 +1,4 @@ -module dappco.re/go/core/io +module dappco.re/go/io go 1.26.0 @@ -7,8 +7,13 @@ require ( forge.lthn.ai/Snider/Borg v0.3.1 github.com/aws/aws-sdk-go-v2 v1.41.4 github.com/aws/aws-sdk-go-v2/service/s3 v1.97.1 + github.com/gin-gonic/gin v1.12.0 + github.com/google/go-github/v75 v75.0.0 + github.com/pkg/sftp v1.13.10 github.com/stretchr/testify v1.11.1 - golang.org/x/crypto v0.49.0 + golang.org/x/crypto v0.50.0 + golang.org/x/net v0.53.0 + golang.org/x/oauth2 v0.36.0 modernc.org/sqlite v1.47.0 ) @@ -22,16 +27,41 @@ require ( github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.20 // indirect github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.20 // indirect github.com/aws/smithy-go v1.24.2 // indirect + github.com/bytedance/gopkg v0.1.3 // indirect + github.com/bytedance/sonic v1.15.0 // indirect + github.com/bytedance/sonic/loader v0.5.0 // indirect + github.com/cloudwego/base64x v0.1.6 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dustin/go-humanize v1.0.1 // indirect + github.com/gabriel-vasile/mimetype v1.4.12 // indirect + github.com/gin-contrib/sse v1.1.0 // indirect + github.com/go-playground/locales v0.14.1 // indirect + github.com/go-playground/universal-translator v0.18.1 // indirect + github.com/go-playground/validator/v10 v10.30.1 // indirect + github.com/goccy/go-json v0.10.5 // indirect + github.com/goccy/go-yaml v1.19.2 // indirect + github.com/google/go-querystring v1.1.0 // indirect github.com/google/uuid v1.6.0 // indirect - github.com/kr/text v0.2.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/cpuid/v2 v2.3.0 // indirect + github.com/kr/fs v0.1.0 // indirect + github.com/leodido/go-urn v1.4.0 // indirect github.com/mattn/go-isatty v0.0.20 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect github.com/ncruces/go-strftime v1.0.0 // indirect + github.com/pelletier/go-toml/v2 v2.2.4 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/quic-go/qpack v0.6.0 // indirect + github.com/quic-go/quic-go v0.59.0 // indirect github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect - golang.org/x/sys v0.42.0 // indirect - golang.org/x/tools v0.43.0 // indirect + github.com/twitchyliquid64/golang-asm v0.15.1 // indirect + github.com/ugorji/go/codec v1.3.1 // indirect + go.mongodb.org/mongo-driver/v2 v2.5.0 // indirect + golang.org/x/arch v0.22.0 // indirect + golang.org/x/sys v0.43.0 // indirect + golang.org/x/text v0.36.0 // indirect + google.golang.org/protobuf v1.36.10 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect modernc.org/libc v1.70.0 // indirect modernc.org/mathutil v1.7.1 // indirect diff --git a/go.sum b/go.sum index 0164e68..cb59dff 100644 --- a/go.sum +++ b/go.sum @@ -24,47 +24,135 @@ github.com/aws/aws-sdk-go-v2/service/s3 v1.97.1 h1:csi9NLpFZXb9fxY7rS1xVzgPRGMt7 github.com/aws/aws-sdk-go-v2/service/s3 v1.97.1/go.mod h1:qXVal5H0ChqXP63t6jze5LmFalc7+ZE7wOdLtZ0LCP0= github.com/aws/smithy-go v1.24.2 h1:FzA3bu/nt/vDvmnkg+R8Xl46gmzEDam6mZ1hzmwXFng= github.com/aws/smithy-go v1.24.2/go.mod h1:YE2RhdIuDbA5E5bTdciG9KrW3+TiEONeUWCqxX9i1Fc= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/bytedance/gopkg v0.1.3 h1:TPBSwH8RsouGCBcMBktLt1AymVo2TVsBVCY4b6TnZ/M= +github.com/bytedance/gopkg v0.1.3/go.mod h1:576VvJ+eJgyCzdjS+c4+77QF3p7ubbtiKARP3TxducM= +github.com/bytedance/sonic v1.15.0 h1:/PXeWFaR5ElNcVE84U0dOHjiMHQOwNIx3K4ymzh/uSE= +github.com/bytedance/sonic v1.15.0/go.mod h1:tFkWrPz0/CUCLEF4ri4UkHekCIcdnkqXw9VduqpJh0k= +github.com/bytedance/sonic/loader v0.5.0 h1:gXH3KVnatgY7loH5/TkeVyXPfESoqSBSBEiDd5VjlgE= +github.com/bytedance/sonic/loader v0.5.0/go.mod h1:AR4NYCk5DdzZizZ5djGqQ92eEhCCcdf5x77udYiSJRo= +github.com/cloudwego/base64x v0.1.6 h1:t11wG9AECkCDk5fMSoxmufanudBtJ+/HemLstXDLI2M= +github.com/cloudwego/base64x v0.1.6/go.mod h1:OFcloc187FXDaYHvrNIjxSe8ncn0OOM8gEHfghB2IPU= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/gabriel-vasile/mimetype v1.4.12 h1:e9hWvmLYvtp846tLHam2o++qitpguFiYCKbn0w9jyqw= +github.com/gabriel-vasile/mimetype v1.4.12/go.mod h1:d+9Oxyo1wTzWdyVUPMmXFvp4F9tea18J8ufA774AB3s= +github.com/gin-contrib/sse v1.1.0 h1:n0w2GMuUpWDVp7qSpvze6fAu9iRxJY4Hmj6AmBOU05w= +github.com/gin-contrib/sse v1.1.0/go.mod h1:hxRZ5gVpWMT7Z0B0gSNYqqsSCNIJMjzvm6fqCz9vjwM= +github.com/gin-gonic/gin v1.12.0 h1:b3YAbrZtnf8N//yjKeU2+MQsh2mY5htkZidOM7O0wG8= +github.com/gin-gonic/gin v1.12.0/go.mod h1:VxccKfsSllpKshkBWgVgRniFFAzFb9csfngsqANjnLc= +github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= +github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= +github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= +github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= +github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= +github.com/go-playground/validator/v10 v10.30.1 h1:f3zDSN/zOma+w6+1Wswgd9fLkdwy06ntQJp0BBvFG0w= +github.com/go-playground/validator/v10 v10.30.1/go.mod h1:oSuBIQzuJxL//3MelwSLD5hc2Tu889bF0Idm9Dg26cM= +github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4= +github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= +github.com/goccy/go-yaml v1.19.2 h1:PmFC1S6h8ljIz6gMRBopkjP1TVT7xuwrButHID66PoM= +github.com/goccy/go-yaml v1.19.2/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/go-github/v75 v75.0.0 h1:k7q8Bvg+W5KxRl9Tjq16a9XEgVY1pwuiG5sIL7435Ic= +github.com/google/go-github/v75 v75.0.0/go.mod h1:H3LUJEA1TCrzuUqtdAQniBNwuKiQIqdGKgBo1/M/uqI= +github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= +github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e h1:ijClszYn+mADRFY17kjQEVQ1XRhq2/JR1M3sGqeJoxs= github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y= +github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= +github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ= +github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/ncruces/go-strftime v1.0.0 h1:HMFp8mLCTPp341M/ZnA4qaf7ZlsbTc+miZjCLOFAw7w= github.com/ncruces/go-strftime v1.0.0/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls= +github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= +github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= +github.com/pkg/sftp v1.13.10 h1:+5FbKNTe5Z9aspU88DPIKJ9z2KZoaGCu6Sr6kKR/5mU= +github.com/pkg/sftp v1.13.10/go.mod h1:bJ1a7uDhrX/4OII+agvy28lzRvQrmIQuaHrcI1HbeGA= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/quic-go/qpack v0.6.0 h1:g7W+BMYynC1LbYLSqRt8PBg5Tgwxn214ZZR34VIOjz8= +github.com/quic-go/qpack v0.6.0/go.mod h1:lUpLKChi8njB4ty2bFLX2x4gzDqXwUpaO1DP9qMDZII= +github.com/quic-go/quic-go v0.59.0 h1:OLJkp1Mlm/aS7dpKgTc6cnpynnD2Xg7C1pwL6vy/SAw= +github.com/quic-go/quic-go v0.59.0/go.mod h1:upnsH4Ju1YkqpLXC305eW3yDZ4NfnNbmQRCMWS58IKU= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= -golang.org/x/crypto v0.49.0 h1:+Ng2ULVvLHnJ/ZFEq4KdcDd/cfjrrjjNSXNzxg0Y4U4= -golang.org/x/crypto v0.49.0/go.mod h1:ErX4dUh2UM+CFYiXZRTcMpEcN8b/1gxEuv3nODoYtCA= +github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= +github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= +github.com/ugorji/go/codec v1.3.1 h1:waO7eEiFDwidsBN6agj1vJQ4AG7lh2yqXyOXqhgQuyY= +github.com/ugorji/go/codec v1.3.1/go.mod h1:pRBVtBSKl77K30Bv8R2P+cLSGaTtex6fsA2Wjqmfxj4= +go.mongodb.org/mongo-driver/v2 v2.5.0 h1:yXUhImUjjAInNcpTcAlPHiT7bIXhshCTL3jVBkF3xaE= +go.mongodb.org/mongo-driver/v2 v2.5.0/go.mod h1:yOI9kBsufol30iFsl1slpdq1I0eHPzybRWdyYUs8K/0= +go.uber.org/mock v0.6.0 h1:hyF9dfmbgIX5EfOdasqLsWD6xqpNZlXblLB/Dbnwv3Y= +go.uber.org/mock v0.6.0/go.mod h1:KiVJ4BqZJaMj4svdfmHM0AUx4NJYO8ZNpPnZn1Z+BBU= +golang.org/x/arch v0.22.0 h1:c/Zle32i5ttqRXjdLyyHZESLD/bB90DCU1g9l/0YBDI= +golang.org/x/arch v0.22.0/go.mod h1:dNHoOeKiyja7GTvF9NJS1l3Z2yntpQNzgrjh1cU103A= +golang.org/x/crypto v0.50.0 h1:zO47/JPrL6vsNkINmLoo/PH1gcxpls50DNogFvB5ZGI= +golang.org/x/crypto v0.50.0/go.mod h1:3muZ7vA7PBCE6xgPX7nkzzjiUq87kRItoJQM1Yo8S+Q= golang.org/x/mod v0.34.0 h1:xIHgNUUnW6sYkcM5Jleh05DvLOtwc6RitGHbDk4akRI= golang.org/x/mod v0.34.0/go.mod h1:ykgH52iCZe79kzLLMhyCUzhMci+nQj+0XkbXpNYtVjY= +golang.org/x/net v0.53.0 h1:d+qAbo5L0orcWAr0a9JweQpjXF19LMXJE8Ey7hwOdUA= +golang.org/x/net v0.53.0/go.mod h1:JvMuJH7rrdiCfbeHoo3fCQU24Lf5JJwT9W3sJFulfgs= +golang.org/x/oauth2 v0.36.0 h1:peZ/1z27fi9hUOFCAZaHyrpWG5lwe0RJEEEeH0ThlIs= +golang.org/x/oauth2 v0.36.0/go.mod h1:YDBUJMTkDnJS+A4BP4eZBjCqtokkg1hODuPjwiGPO7Q= golang.org/x/sync v0.20.0 h1:e0PTpb7pjO8GAtTs2dQ6jYa5BWYlMuX047Dco/pItO4= golang.org/x/sync v0.20.0/go.mod h1:9xrNwdLfx4jkKbNva9FpL6vEN7evnE43NNNJQ2LF3+0= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.42.0 h1:omrd2nAlyT5ESRdCLYdm3+fMfNFE/+Rf4bDIQImRJeo= -golang.org/x/sys v0.42.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw= +golang.org/x/sys v0.43.0 h1:Rlag2XtaFTxp19wS8MXlJwTvoh8ArU6ezoyFsMyCTNI= +golang.org/x/sys v0.43.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw= +golang.org/x/term v0.42.0 h1:UiKe+zDFmJobeJ5ggPwOshJIVt6/Ft0rcfrXZDLWAWY= +golang.org/x/term v0.42.0/go.mod h1:Dq/D+snpsbazcBG5+F9Q1n2rXV8Ma+71xEjTRufARgY= +golang.org/x/text v0.36.0 h1:JfKh3XmcRPqZPKevfXVpI1wXPTqbkE5f7JA92a55Yxg= +golang.org/x/text v0.36.0/go.mod h1:NIdBknypM8iqVmPiuco0Dh6P5Jcdk8lJL0CUebqK164= golang.org/x/tools v0.43.0 h1:12BdW9CeB3Z+J/I/wj34VMl8X+fEXBxVR90JeMX5E7s= golang.org/x/tools v0.43.0/go.mod h1:uHkMso649BX2cZK6+RpuIPXS3ho2hZo4FVwfoy1vIk0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= +google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= modernc.org/cc/v4 v4.27.1 h1:9W30zRlYrefrDV2JE2O8VDtJ1yPGownxciz5rrbQZis= diff --git a/internal/fsutil/direntry.go b/internal/fsutil/direntry.go new file mode 100644 index 0000000..98da5cc --- /dev/null +++ b/internal/fsutil/direntry.go @@ -0,0 +1,16 @@ +package fsutil + +import "io/fs" + +// SortDirEntriesByName sorts directory entries by their Name value. +func SortDirEntriesByName(entries []fs.DirEntry) { + for i := 1; i < len(entries); i++ { + entry := entries[i] + j := i - 1 + for j >= 0 && entries[j].Name() > entry.Name() { + entries[j+1] = entries[j] + j-- + } + entries[j+1] = entry + } +} diff --git a/io.go b/io.go index f0c17fa..31d87d9 100644 --- a/io.go +++ b/io.go @@ -1,16 +1,13 @@ package io import ( - "bytes" - "cmp" - goio "io" - "io/fs" - "path" - "slices" - "time" + goio "io" // AX-6-exception: io interface types have no core equivalent; io.EOF preserves stream semantics. + "io/fs" // AX-6-exception: fs interface types have no core equivalent. + "time" // AX-6-exception: filesystem metadata timestamps have no core equivalent. core "dappco.re/go/core" - "dappco.re/go/core/io/local" + "dappco.re/go/io/internal/fsutil" + "dappco.re/go/io/local" ) // Example: medium, _ := io.NewSandboxed("/srv/app") @@ -222,10 +219,10 @@ func NewMemoryMedium() *MemoryMedium { } func (medium *MemoryMedium) ensureAncestorDirectories(filePath string) { - parentPath := path.Dir(filePath) + parentPath := core.PathDir(filePath) for parentPath != "." && parentPath != "" { medium.directories[parentPath] = true - nextParentPath := path.Dir(parentPath) + nextParentPath := core.PathDir(parentPath) if nextParentPath == parentPath { break } @@ -277,12 +274,12 @@ func (medium *MemoryMedium) Write(path, content string) error { // Example: _ = io.NewMemoryMedium().WriteMode("keys/private.key", "secret", 0600) func (medium *MemoryMedium) WriteMode(filePath, content string, mode fs.FileMode) error { // Verify no ancestor directory component is stored as a file. - ancestor := path.Dir(filePath) + ancestor := core.PathDir(filePath) for ancestor != "." && ancestor != "" { if _, ok := medium.fileContents[ancestor]; ok { return core.E("io.MemoryMedium.WriteMode", core.Concat("ancestor path is a file: ", ancestor), fs.ErrExist) } - next := path.Dir(ancestor) + next := core.PathDir(ancestor) if next == ancestor { break } @@ -609,8 +606,8 @@ func (medium *MemoryMedium) List(path string) ([]fs.DirEntry, error) { } rest := core.TrimPrefix(filePath, prefix) if rest == "" || core.Contains(rest, "/") { - if idx := bytes.IndexByte([]byte(rest), '/'); idx != -1 { - dirName := rest[:idx] + if parts := core.SplitN(rest, "/", 2); len(parts) == 2 { + dirName := parts[0] if !seen[dirName] { seen[dirName] = true entries = append(entries, NewDirEntry( @@ -643,8 +640,8 @@ func (medium *MemoryMedium) List(path string) ([]fs.DirEntry, error) { if rest == "" { continue } - if idx := bytes.IndexByte([]byte(rest), '/'); idx != -1 { - rest = rest[:idx] + if parts := core.SplitN(rest, "/", 2); len(parts) == 2 { + rest = parts[0] } if !seen[rest] { seen[rest] = true @@ -657,9 +654,7 @@ func (medium *MemoryMedium) List(path string) ([]fs.DirEntry, error) { } } - slices.SortFunc(entries, func(a, b fs.DirEntry) int { - return cmp.Compare(a.Name(), b.Name()) - }) + fsutil.SortDirEntriesByName(entries) return entries, nil } diff --git a/local/medium.go b/local/medium.go index 94ac9d2..f3580f9 100644 --- a/local/medium.go +++ b/local/medium.go @@ -411,8 +411,8 @@ func lstat(path string) (*syscall.Stat_t, error) { return info, nil } -func isSymlink(mode uint32) bool { - return mode&syscall.S_IFMT == syscall.S_IFLNK +func isSymlink(mode uint16) bool { + return uint32(mode)&syscall.S_IFMT == syscall.S_IFLNK } func readlink(path string) (string, error) { diff --git a/mock.go b/mock.go new file mode 100644 index 0000000..ca8da8e --- /dev/null +++ b/mock.go @@ -0,0 +1,315 @@ +// SPDX-License-Identifier: EUPL-1.2 + +package io + +import ( + "bytes" + "cmp" + goio "io" + "io/fs" + pathpkg "path" + "slices" + "sync" // Note: AX-6 — internal concurrency primitive; structural per RFC §5.1 + "time" +) + +// MockMedium is an in-memory Medium implementation for testing. +// Tracks files, directories, and modification times without touching disk. +// +// Example: +// +// mock := io.NewMockMedium() +// _ = mock.Write("config/app.yaml", "port: 8080") +// content, _ := mock.Read("config/app.yaml") +type MockMedium struct { + mu sync.RWMutex + // Files is the file content store. Exported for test assertions and direct writes. + // + // mock.Files["config.yaml"] = "port: 8080" + // content := mock.Files["config.yaml"] + Files map[string]string + meta map[string]mockMeta + dirs map[string]bool +} + +type mockMeta struct { + mode fs.FileMode + modTime time.Time +} + +// NewMockMedium creates an empty in-memory Medium. +// +// Example: +// +// mock := io.NewMockMedium() +func NewMockMedium() *MockMedium { + return &MockMedium{ + Files: make(map[string]string), + meta: make(map[string]mockMeta), + dirs: make(map[string]bool), + } +} + +var _ Medium = (*MockMedium)(nil) + +func (m *MockMedium) Read(path string) (string, error) { + m.mu.RLock() + defer m.mu.RUnlock() + content, ok := m.Files[path] + if !ok { + return "", fs.ErrNotExist + } + return content, nil +} + +func (m *MockMedium) Write(path, content string) error { + return m.WriteMode(path, content, 0644) +} + +func (m *MockMedium) WriteMode(path, content string, mode fs.FileMode) error { + m.mu.Lock() + defer m.mu.Unlock() + m.Files[path] = content + m.meta[path] = mockMeta{mode: mode, modTime: time.Now()} + return nil +} + +func (m *MockMedium) EnsureDir(path string) error { + m.mu.Lock() + defer m.mu.Unlock() + m.dirs[path] = true + return nil +} + +func (m *MockMedium) IsFile(path string) bool { + m.mu.RLock() + defer m.mu.RUnlock() + _, ok := m.Files[path] + return ok +} + +func (m *MockMedium) Delete(path string) error { + m.mu.Lock() + defer m.mu.Unlock() + if _, ok := m.Files[path]; ok { + delete(m.Files, path) + delete(m.meta, path) + return nil + } + if _, ok := m.dirs[path]; ok { + delete(m.dirs, path) + return nil + } + return fs.ErrNotExist +} + +func (m *MockMedium) DeleteAll(path string) error { + m.mu.Lock() + defer m.mu.Unlock() + found := false + for k := range m.Files { + if pathMatchesPrefix(k, path) { + delete(m.Files, k) + delete(m.meta, k) + found = true + } + } + for d := range m.dirs { + if pathMatchesPrefix(d, path) { + delete(m.dirs, d) + found = true + } + } + if !found { + return fs.ErrNotExist + } + return nil +} + +func (m *MockMedium) Rename(oldPath, newPath string) error { + m.mu.Lock() + defer m.mu.Unlock() + f, ok := m.Files[oldPath] + if !ok { + return fs.ErrNotExist + } + m.Files[newPath] = f + delete(m.Files, oldPath) + if metadata, ok := m.meta[oldPath]; ok { + m.meta[newPath] = metadata + delete(m.meta, oldPath) + } + return nil +} + +func (m *MockMedium) List(path string) ([]fs.DirEntry, error) { + m.mu.RLock() + defer m.mu.RUnlock() + prefix := mockListPrefix(path) + seen := make(map[string]bool) + entries := make([]fs.DirEntry, 0) + entries = append(entries, m.fileEntries(prefix, seen)...) + entries = append(entries, m.dirEntries(prefix, seen)...) + slices.SortFunc(entries, func(a, b fs.DirEntry) int { return cmp.Compare(a.Name(), b.Name()) }) + return entries, nil +} + +func pathMatchesPrefix(candidate, prefix string) bool { + return candidate == prefix || len(candidate) > len(prefix) && candidate[:len(prefix)+1] == prefix+"/" +} + +func mockListPrefix(filePath string) string { + if filePath == "" || filePath == "." { + return "" + } + return filePath + "/" +} + +func (m *MockMedium) fileEntries(prefix string, seen map[string]bool) []fs.DirEntry { + var entries []fs.DirEntry + for k, content := range m.Files { + if len(k) <= len(prefix) || k[:len(prefix)] != prefix { + continue + } + rest := k[len(prefix):] + if dirName, ok := firstPathComponent(rest); ok { + entries = appendMockDirectoryEntry(entries, seen, dirName) + continue + } + if !seen[rest] { + seen[rest] = true + mt := m.meta[k] + entries = append(entries, NewDirEntry(rest, false, mt.mode, NewFileInfo(rest, int64(len(content)), mt.mode, mt.modTime, false))) + } + } + return entries +} + +func (m *MockMedium) dirEntries(prefix string, seen map[string]bool) []fs.DirEntry { + var entries []fs.DirEntry + for d := range m.dirs { + if len(d) <= len(prefix) || d[:len(prefix)] != prefix { + continue + } + rest := d[len(prefix):] + if dirName, ok := firstPathComponent(rest); ok { + entries = appendMockDirectoryEntry(entries, seen, dirName) + continue + } + entries = appendMockDirectoryEntry(entries, seen, rest) + } + return entries +} + +func firstPathComponent(rest string) (string, bool) { + for i, c := range rest { + if c == '/' { + return rest[:i], true + } + } + return "", false +} + +func appendMockDirectoryEntry(entries []fs.DirEntry, seen map[string]bool, name string) []fs.DirEntry { + if name == "" || seen[name] { + return entries + } + seen[name] = true + return append(entries, NewDirEntry(name, true, 0755, NewFileInfo(name, 0, 0755, time.Now(), true))) +} + +func (m *MockMedium) Stat(path string) (fs.FileInfo, error) { + m.mu.RLock() + defer m.mu.RUnlock() + if content, ok := m.Files[path]; ok { + mt := m.meta[path] + return NewFileInfo(pathpkg.Base(path), int64(len(content)), mt.mode, mt.modTime, false), nil + } + if m.dirs[path] { + return NewFileInfo(pathpkg.Base(path), 0, 0755, time.Now(), true), nil + } + return nil, fs.ErrNotExist +} + +func (m *MockMedium) Open(path string) (fs.File, error) { + m.mu.RLock() + defer m.mu.RUnlock() + content, ok := m.Files[path] + if !ok { + return nil, fs.ErrNotExist + } + mt := m.meta[path] + return &MockFile{Reader: bytes.NewReader([]byte(content)), info: NewFileInfo(pathpkg.Base(path), int64(len(content)), mt.mode, mt.modTime, false)}, nil +} + +func (m *MockMedium) Create(path string) (goio.WriteCloser, error) { + return &MockWriteCloser{medium: m, path: path}, nil +} + +func (m *MockMedium) Append(path string) (goio.WriteCloser, error) { + m.mu.RLock() + existing := m.Files[path] + m.mu.RUnlock() + return &MockWriteCloser{medium: m, path: path, buf: *bytes.NewBufferString(existing)}, nil +} + +func (m *MockMedium) ReadStream(path string) (goio.ReadCloser, error) { + m.mu.RLock() + defer m.mu.RUnlock() + f, ok := m.Files[path] + if !ok { + return nil, fs.ErrNotExist + } + return goio.NopCloser(bytes.NewReader([]byte(f))), nil +} + +func (m *MockMedium) WriteStream(path string) (goio.WriteCloser, error) { + return m.Create(path) +} + +func (m *MockMedium) Exists(path string) bool { + m.mu.RLock() + defer m.mu.RUnlock() + _, fileOK := m.Files[path] + return fileOK || m.dirs[path] +} + +func (m *MockMedium) IsDir(path string) bool { + m.mu.RLock() + defer m.mu.RUnlock() + return m.dirs[path] +} + +// MockFile implements fs.File for MockMedium. +// +// Example: +// +// file, _ := mock.Open("config/app.yaml") +// defer file.Close() +type MockFile struct { + *bytes.Reader + info fs.FileInfo +} + +func (f *MockFile) Stat() (fs.FileInfo, error) { return f.info, nil } +func (f *MockFile) Close() error { return nil } + +// MockWriteCloser implements io.WriteCloser for MockMedium. +// On Close, the buffered content is written to the mock filesystem. +// +// Example: +// +// w, _ := mock.Create("output.txt") +// w.Write([]byte("hello")) +// w.Close() +type MockWriteCloser struct { + medium *MockMedium + path string + buf bytes.Buffer +} + +func (w *MockWriteCloser) Write(p []byte) (int, error) { return w.buf.Write(p) } + +func (w *MockWriteCloser) Close() error { + return w.medium.Write(w.path, w.buf.String()) +} diff --git a/node/node.go b/node/node.go index e5815f2..1e28db4 100644 --- a/node/node.go +++ b/node/node.go @@ -5,17 +5,14 @@ package node import ( - "archive/tar" - "bytes" - "cmp" - goio "io" - "io/fs" - "path" - "slices" - "time" + "archive/tar" // AX-6-exception: tar archive transport has no core equivalent. + goio "io" // AX-6-exception: io interface types have no core equivalent; io.EOF preserves stream semantics. + "io/fs" // AX-6-exception: fs interface types have no core equivalent. + "time" // AX-6-exception: filesystem metadata timestamps have no core equivalent. core "dappco.re/go/core" - coreio "dappco.re/go/core/io" + coreio "dappco.re/go/io" + "dappco.re/go/io/internal/fsutil" ) // Example: nodeTree := node.New() @@ -37,6 +34,17 @@ func New() *Node { return &Node{files: make(map[string]*dataFile)} } +// AX-6-exception: core.NewBuffer is unavailable in the pinned core module; this is +// the minimal intrinsic writer needed by archive/tar. +type nodeArchiveBuffer struct { + data []byte +} + +func (buffer *nodeArchiveBuffer) Write(data []byte) (int, error) { + buffer.data = append(buffer.data, data...) + return len(data), nil +} + // Example: nodeTree.AddData("config/app.yaml", []byte("port: 8080")) func (node *Node) AddData(name string, content []byte) { name = core.TrimPrefix(name, "/") @@ -55,7 +63,7 @@ func (node *Node) AddData(name string, content []byte) { // Example: snapshot, _ := nodeTree.ToTar() func (node *Node) ToTar() ([]byte, error) { - buffer := new(bytes.Buffer) + buffer := &nodeArchiveBuffer{} tarWriter := tar.NewWriter(buffer) for _, file := range node.files { @@ -77,7 +85,7 @@ func (node *Node) ToTar() ([]byte, error) { return nil, err } - return buffer.Bytes(), nil + return buffer.data, nil } // Example: restored, _ := node.FromTar(snapshot) @@ -92,7 +100,7 @@ func FromTar(data []byte) (*Node, error) { // Example: _ = nodeTree.LoadTar(snapshot) func (node *Node) LoadTar(data []byte) error { newFiles := make(map[string]*dataFile) - tarReader := tar.NewReader(bytes.NewReader(data)) + tarReader := tar.NewReader(core.NewReader(string(data))) for { header, err := tarReader.Next() @@ -104,9 +112,12 @@ func (node *Node) LoadTar(data []byte) error { } if header.Typeflag == tar.TypeReg { - content, err := goio.ReadAll(tarReader) - if err != nil { - return core.E("node.LoadTar", "read tar entry", err) + contentResult := core.ReadAll(tarReader) + if !contentResult.OK { + if err, ok := contentResult.Value.(error); ok { + return core.E("node.LoadTar", "read tar entry", err) + } + return core.E("node.LoadTar", "read tar entry", fs.ErrInvalid) } name := core.TrimPrefix(header.Name, "/") if name == "" || core.HasSuffix(name, "/") { @@ -114,7 +125,7 @@ func (node *Node) LoadTar(data []byte) error { } newFiles[name] = &dataFile{ name: name, - content: content, + content: []byte(contentResult.Value.(string)), modTime: header.ModTime, } } @@ -282,7 +293,7 @@ func (node *Node) Stat(name string) (fs.FileInfo, error) { prefix := name + "/" for filePath := range node.files { if core.HasPrefix(filePath, prefix) { - return &dirInfo{name: path.Base(name), modTime: time.Now()}, nil + return &dirInfo{name: core.PathBase(name), modTime: time.Now()}, nil } } return nil, core.E("node.Stat", core.Concat("path not found: ", name), fs.ErrNotExist) @@ -334,9 +345,7 @@ func (node *Node) ReadDir(name string) ([]fs.DirEntry, error) { } } - slices.SortFunc(entries, func(a, b fs.DirEntry) int { - return cmp.Compare(a.Name(), b.Name()) - }) + fsutil.SortDirEntriesByName(entries) return entries, nil } @@ -499,7 +508,7 @@ func (node *Node) ReadStream(filePath string) (goio.ReadCloser, error) { if err != nil { return nil, err } - return goio.NopCloser(file), nil + return file, nil } func (node *Node) WriteStream(filePath string) (goio.WriteCloser, error) { @@ -543,7 +552,7 @@ func (file *dataFile) Close() error { return nil } type dataFileInfo struct{ file *dataFile } -func (info *dataFileInfo) Name() string { return path.Base(info.file.name) } +func (info *dataFileInfo) Name() string { return core.PathBase(info.file.name) } func (info *dataFileInfo) Size() int64 { return int64(len(info.file.content)) } @@ -557,16 +566,18 @@ func (info *dataFileInfo) Sys() any { return nil } type dataFileReader struct { file *dataFile - reader *bytes.Reader + offset int64 } func (reader *dataFileReader) Stat() (fs.FileInfo, error) { return reader.file.Stat() } func (reader *dataFileReader) Read(buffer []byte) (int, error) { - if reader.reader == nil { - reader.reader = bytes.NewReader(reader.file.content) + if reader.offset >= int64(len(reader.file.content)) { + return 0, goio.EOF } - return reader.reader.Read(buffer) + readCount := copy(buffer, reader.file.content[reader.offset:]) + reader.offset += int64(readCount) + return readCount, nil } func (reader *dataFileReader) Close() error { return nil } @@ -594,7 +605,7 @@ type dirFile struct { } func (directory *dirFile) Stat() (fs.FileInfo, error) { - return &dirInfo{name: path.Base(directory.path), modTime: directory.modTime}, nil + return &dirInfo{name: core.PathBase(directory.path), modTime: directory.modTime}, nil } func (directory *dirFile) Read([]byte) (int, error) { @@ -608,8 +619,6 @@ var _ fs.FS = (*Node)(nil) var _ fs.StatFS = (*Node)(nil) var _ fs.ReadDirFS = (*Node)(nil) -var _ goio.ReadCloser = goio.NopCloser(nil) - var _ goio.WriteCloser = (*nodeWriter)(nil) var _ fs.File = (*dirFile)(nil) diff --git a/node/node_test.go b/node/node_test.go index bdb72cf..9fa0b0a 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -9,7 +9,7 @@ import ( "testing" core "dappco.re/go/core" - coreio "dappco.re/go/core/io" + coreio "dappco.re/go/io" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) diff --git a/pkg/api/handlers.go b/pkg/api/handlers.go new file mode 100644 index 0000000..5d73e10 --- /dev/null +++ b/pkg/api/handlers.go @@ -0,0 +1,687 @@ +// SPDX-License-Identifier: EUPL-1.2 + +package api + +import ( + "context" + "encoding/json" + "errors" + "fmt" + goio "io" + "io/fs" + "net/http" + "strconv" + "strings" + "sync" + + core "dappco.re/go/core" + coreio "dappco.re/go/io" + workspacesvc "dappco.re/go/io/workspace" + "github.com/gin-gonic/gin" +) + +type rfc15Action struct { + Name string + Medium string + Operation string +} + +var rfc15Actions = []rfc15Action{ + {Name: coreio.ActionLocalRead, Medium: "local", Operation: "read"}, + {Name: coreio.ActionLocalWrite, Medium: "local", Operation: "write"}, + {Name: coreio.ActionLocalList, Medium: "local", Operation: "list"}, + {Name: coreio.ActionLocalDelete, Medium: "local", Operation: "delete"}, + {Name: coreio.ActionMemoryRead, Medium: "memory", Operation: "read"}, + {Name: coreio.ActionMemoryWrite, Medium: "memory", Operation: "write"}, + {Name: coreio.ActionGitHubClone, Medium: "github", Operation: "clone"}, + {Name: coreio.ActionGitHubRead, Medium: "github", Operation: "read"}, + {Name: coreio.ActionPWAScrape, Medium: "pwa", Operation: "scrape"}, + {Name: coreio.ActionSFTPRead, Medium: "sftp", Operation: "read"}, + {Name: coreio.ActionSFTPWrite, Medium: "sftp", Operation: "write"}, + {Name: coreio.ActionS3Read, Medium: "s3", Operation: "read"}, + {Name: coreio.ActionS3Write, Medium: "s3", Operation: "write"}, + {Name: coreio.ActionCubeRead, Medium: "cube", Operation: "read"}, + {Name: coreio.ActionCubeWrite, Medium: "cube", Operation: "write"}, + {Name: coreio.ActionCubePack, Medium: "cube", Operation: "pack"}, + {Name: coreio.ActionCubeUnpack, Medium: "cube", Operation: "unpack"}, + {Name: coreio.ActionCopy, Medium: "any", Operation: "copy"}, +} + +var errUnsupportedMediumOperation = errors.New("unsupported medium operation") + +var apiWorkspaceServices sync.Map + +type mediumRequest struct { + Root string + Path string + OldPath string + NewPath string + Content string + Mode any + Recursive bool +} + +type mediumResponse struct { + OK bool `json:"ok,omitempty"` + Content string `json:"content,omitempty"` + Entries []dirEntryDTO `json:"entries,omitempty"` + Info *fileInfoDTO `json:"info,omitempty"` + Exists *bool `json:"exists,omitempty"` + IsFile *bool `json:"isFile,omitempty"` + IsDir *bool `json:"isDir,omitempty"` + Action string `json:"action,omitempty"` + Value any `json:"value,omitempty"` + Medium string `json:"medium,omitempty"` + Op string `json:"op,omitempty"` + Meta map[string]any `json:"meta,omitempty"` +} + +type apiError struct { + Code string `json:"code"` + Message string `json:"message"` +} + +type apiResponse struct { + Success bool `json:"success"` + Data any `json:"data,omitempty"` + Error *apiError `json:"error,omitempty"` +} + +func apiOK(data any) apiResponse { + return apiResponse{Success: true, Data: data} +} + +func apiFail(code, message string) apiResponse { + return apiResponse{Success: false, Error: &apiError{Code: code, Message: message}} +} + +type dirEntryDTO struct { + Name string `json:"name"` + IsDir bool `json:"isDir"` + Type string `json:"type,omitempty"` + Size int64 `json:"size,omitempty"` + Mode string `json:"mode,omitempty"` +} + +type fileInfoDTO struct { + Name string `json:"name"` + Size int64 `json:"size"` + Mode string `json:"mode"` + ModTime string `json:"modTime"` + IsDir bool `json:"isDir"` +} + +func (p *IOProvider) createWorkspace(c *gin.Context) { + payload, ok := bindPayload(c) + if !ok { + return + } + workspaceName := workspaceNameFromPayload(payload) + if workspaceName == "" { + c.JSON(http.StatusBadRequest, apiFail("invalid_request", "workspace is required")) + return + } + + service, ok := p.resolveWorkspaceService(c) + if !ok { + return + } + result := service.HandleWorkspaceCommand(workspacesvc.WorkspaceCommand{ + Action: workspacesvc.WorkspaceCreateAction, + Workspace: workspaceName, + }) + writeWorkspaceResult(c, workspacesvc.WorkspaceCreateAction, result) +} + +func (p *IOProvider) switchWorkspace(c *gin.Context) { + workspaceID := strings.TrimSpace(c.Param("id")) + if workspaceID == "" { + c.JSON(http.StatusBadRequest, apiFail("invalid_request", "workspace id is required")) + return + } + + service, ok := p.resolveWorkspaceService(c) + if !ok { + return + } + result := service.HandleWorkspaceCommand(workspacesvc.WorkspaceCommand{ + Action: workspacesvc.WorkspaceSwitchAction, + Workspace: workspaceID, + }) + writeWorkspaceResult(c, workspacesvc.WorkspaceSwitchAction, result) +} + +func (p *IOProvider) handleWorkspaceCommand(c *gin.Context) { + workspaceID := strings.TrimSpace(c.Param("id")) + if workspaceID == "" { + c.JSON(http.StatusBadRequest, apiFail("invalid_request", "workspace id is required")) + return + } + payload, ok := bindPayload(c) + if !ok { + return + } + if strings.TrimSpace(stringValue(payload, "action")) == "" { + c.JSON(http.StatusBadRequest, apiFail("invalid_request", "action is required")) + return + } + + service, ok := p.resolveWorkspaceService(c) + if !ok { + return + } + command := workspaceCommandFromPayload(workspaceID, payload) + result := service.HandleWorkspaceCommand(command) + writeWorkspaceResult(c, command.Action, result) +} + +func (p *IOProvider) dispatchAction(c *gin.Context) { + actionName := strings.TrimSpace(c.Param("action")) + action, ok := findRFC15Action(actionName) + if !ok { + c.JSON(http.StatusNotFound, apiFail("unknown_action", "RFC §15 action is not registered")) + return + } + payload, ok := bindPayload(c) + if !ok { + return + } + if p == nil || p.core == nil { + c.JSON(http.StatusServiceUnavailable, apiFail("service_unavailable", "core action registry is not configured")) + return + } + + result := p.core.Action(action.Name).Run(c.Request.Context(), optionsFromPayload(payload)) + if !result.OK { + c.JSON(http.StatusInternalServerError, apiFail("action_failed", resultErrorMessage(result))) + return + } + c.JSON(http.StatusOK, apiOK(mediumResponse{OK: true, Action: action.Name, Value: result.Value})) +} + +func (p *IOProvider) dispatchMedium(c *gin.Context) { + mediumType := strings.TrimSpace(c.Param("type")) + op := strings.TrimSpace(c.Param("op")) + if mediumType == "" { + c.JSON(http.StatusBadRequest, apiFail("invalid_request", "medium type is required")) + return + } + if op == "" { + c.JSON(http.StatusBadRequest, apiFail("invalid_request", "medium operation is required")) + return + } + + payload, ok := bindPayload(c) + if !ok { + return + } + req := mediumRequestFromPayload(payload) + medium, ok := p.resolveMedium(c, mediumType, req) + if !ok { + return + } + + resp, err := dispatchMediumOperation(c.Request.Context(), medium, op, req) + if err != nil { + if errors.Is(err, errUnsupportedMediumOperation) { + notImplemented(c, err.Error()) + return + } + c.JSON(http.StatusInternalServerError, apiFail("medium_failed", err.Error())) + return + } + resp.Medium = mediumType + resp.Op = op + c.JSON(http.StatusOK, apiOK(resp)) +} + +func (p *IOProvider) resolveMedium(c *gin.Context, mediumType string, req mediumRequest) (coreio.Medium, bool) { + switch strings.ToLower(mediumType) { + case "memory": + if p == nil || p.memory == nil { + c.JSON(http.StatusServiceUnavailable, apiFail("service_unavailable", "memory medium is not configured")) + return nil, false + } + return p.memory, true + case "local": + if p == nil || p.local == nil { + c.JSON(http.StatusServiceUnavailable, apiFail("service_unavailable", "local medium is not configured")) + return nil, false + } + return p.local, true + case "github", "pwa", "sftp", "webdav": + unconfiguredMedium(c, mediumType) + return nil, false + default: + unconfiguredMedium(c, mediumType) + return nil, false + } +} + +func (p *IOProvider) resolveWorkspaceService(c *gin.Context) (*workspacesvc.Workspace, bool) { + if p == nil { + c.JSON(http.StatusServiceUnavailable, apiFail("service_unavailable", "workspace service is not configured")) + return nil, false + } + if service, ok := apiWorkspaceServices.Load(p); ok { + workspaceService, ok := service.(*workspacesvc.Workspace) + if ok { + return workspaceService, true + } + } + + medium := p.memory + if medium == nil { + medium = coreio.NewMemoryMedium() + } + workspaceService, err := workspacesvc.NewWorkspace(medium, "workspaces") + if err != nil { + c.JSON(http.StatusServiceUnavailable, apiFail("service_unavailable", err.Error())) + return nil, false + } + actual, _ := apiWorkspaceServices.LoadOrStore(p, workspaceService) + return actual.(*workspacesvc.Workspace), true +} + +func workspaceCommandFromPayload(pathWorkspace string, payload map[string]any) workspacesvc.WorkspaceCommand { + workspaceName := workspaceNameFromPayload(payload) + if workspaceName == "" { + workspaceName = pathWorkspace + } + return workspacesvc.WorkspaceCommand{ + Action: strings.TrimSpace(stringValue(payload, "action")), + Workspace: workspaceName, + Path: stringValue(payload, "path"), + Content: stringValue(payload, "content"), + } +} + +func workspaceNameFromPayload(payload map[string]any) string { + return strings.TrimSpace(stringValue(payload, "workspace", "name", "identifier", "workspaceID", "workspace_id")) +} + +func writeWorkspaceResult(c *gin.Context, action string, result core.Result) { + if !result.OK { + c.JSON(http.StatusInternalServerError, apiFail("workspace_failed", resultErrorMessage(result))) + return + } + + response := mediumResponse{ + OK: true, + Action: action, + Value: result.Value, + } + switch value := result.Value.(type) { + case coreio.Medium: + response.Value = nil + case string: + response.Content = value + case []fs.DirEntry: + response.Value = nil + response.Entries = dirEntryDTOs(value) + } + c.JSON(http.StatusOK, apiOK(response)) +} + +type mediumOperationHandler func(context.Context, coreio.Medium, mediumRequest) (mediumResponse, error) + +var mediumOperationHandlers = map[string]mediumOperationHandler{ + "read": readMediumOperation, + "write": writeMediumOperation, + "writemode": writeModeMediumOperation, + "ensuredir": ensureDirMediumOperation, + "mkdir": ensureDirMediumOperation, + "isfile": isFileMediumOperation, + "delete": deleteMediumOperation, + "deleteall": deleteAllMediumOperation, + "rename": renameMediumOperation, + "list": listMediumOperation, + "stat": statMediumOperation, + "open": openMediumOperation, + "create": createMediumOperation, + "append": appendMediumOperation, + "readstream": readStreamMediumOperation, + "writestream": writeStreamMediumOperation, + "exists": existsMediumOperation, + "isdir": isDirMediumOperation, +} + +func dispatchMediumOperation(ctx context.Context, medium coreio.Medium, op string, req mediumRequest) (mediumResponse, error) { + handler, ok := mediumOperationHandlers[strings.ToLower(op)] + if !ok { + return mediumResponse{}, fmt.Errorf("%w: %s", errUnsupportedMediumOperation, op) + } + return handler(ctx, medium, req) +} + +func readMediumOperation(_ context.Context, medium coreio.Medium, req mediumRequest) (mediumResponse, error) { + content, err := medium.Read(req.Path) + if err != nil { + return mediumResponse{}, err + } + return mediumResponse{OK: true, Content: content}, nil +} + +func writeMediumOperation(_ context.Context, medium coreio.Medium, req mediumRequest) (mediumResponse, error) { + if err := medium.Write(req.Path, req.Content); err != nil { + return mediumResponse{}, err + } + return mediumResponse{OK: true}, nil +} + +func writeModeMediumOperation(_ context.Context, medium coreio.Medium, req mediumRequest) (mediumResponse, error) { + mode, err := fileModeValue(req.Mode, 0644) + if err != nil { + return mediumResponse{}, err + } + if err := medium.WriteMode(req.Path, req.Content, mode); err != nil { + return mediumResponse{}, err + } + return mediumResponse{OK: true}, nil +} + +func ensureDirMediumOperation(_ context.Context, medium coreio.Medium, req mediumRequest) (mediumResponse, error) { + if err := medium.EnsureDir(req.Path); err != nil { + return mediumResponse{}, err + } + return mediumResponse{OK: true}, nil +} + +func isFileMediumOperation(_ context.Context, medium coreio.Medium, req mediumRequest) (mediumResponse, error) { + ok := medium.IsFile(req.Path) + return mediumResponse{OK: true, IsFile: &ok}, nil +} + +func deleteMediumOperation(ctx context.Context, medium coreio.Medium, req mediumRequest) (mediumResponse, error) { + if req.Recursive { + return deleteAllMediumOperation(ctx, medium, req) + } + if err := medium.Delete(req.Path); err != nil { + return mediumResponse{}, err + } + return mediumResponse{OK: true}, nil +} + +func deleteAllMediumOperation(_ context.Context, medium coreio.Medium, req mediumRequest) (mediumResponse, error) { + if err := medium.DeleteAll(req.Path); err != nil { + return mediumResponse{}, err + } + return mediumResponse{OK: true}, nil +} + +func renameMediumOperation(_ context.Context, medium coreio.Medium, req mediumRequest) (mediumResponse, error) { + if err := medium.Rename(req.OldPath, req.NewPath); err != nil { + return mediumResponse{}, err + } + return mediumResponse{OK: true}, nil +} + +func listMediumOperation(_ context.Context, medium coreio.Medium, req mediumRequest) (mediumResponse, error) { + entries, err := medium.List(req.Path) + if err != nil { + return mediumResponse{}, err + } + return mediumResponse{OK: true, Entries: dirEntryDTOs(entries)}, nil +} + +func statMediumOperation(_ context.Context, medium coreio.Medium, req mediumRequest) (mediumResponse, error) { + info, err := medium.Stat(req.Path) + if err != nil { + return mediumResponse{}, err + } + return mediumResponse{OK: true, Info: fileInfoDTOFromInfo(info)}, nil +} + +func openMediumOperation(_ context.Context, medium coreio.Medium, req mediumRequest) (mediumResponse, error) { + file, err := medium.Open(req.Path) + if err != nil { + return mediumResponse{}, err + } + defer file.Close() + return readAllContent(file) +} + +func createMediumOperation(_ context.Context, medium coreio.Medium, req mediumRequest) (mediumResponse, error) { + writer, err := medium.Create(req.Path) + if err != nil { + return mediumResponse{}, err + } + if err := writeAndClose(writer, req.Content); err != nil { + return mediumResponse{}, err + } + return mediumResponse{OK: true}, nil +} + +func appendMediumOperation(_ context.Context, medium coreio.Medium, req mediumRequest) (mediumResponse, error) { + writer, err := medium.Append(req.Path) + if err != nil { + return mediumResponse{}, err + } + if err := writeAndClose(writer, req.Content); err != nil { + return mediumResponse{}, err + } + return mediumResponse{OK: true}, nil +} + +func readStreamMediumOperation(_ context.Context, medium coreio.Medium, req mediumRequest) (mediumResponse, error) { + reader, err := medium.ReadStream(req.Path) + if err != nil { + return mediumResponse{}, err + } + defer reader.Close() + return readAllContent(reader) +} + +func writeStreamMediumOperation(_ context.Context, medium coreio.Medium, req mediumRequest) (mediumResponse, error) { + writer, err := medium.WriteStream(req.Path) + if err != nil { + return mediumResponse{}, err + } + if err := writeAndClose(writer, req.Content); err != nil { + return mediumResponse{}, err + } + return mediumResponse{OK: true}, nil +} + +func existsMediumOperation(_ context.Context, medium coreio.Medium, req mediumRequest) (mediumResponse, error) { + ok := medium.Exists(req.Path) + return mediumResponse{OK: true, Exists: &ok}, nil +} + +func isDirMediumOperation(_ context.Context, medium coreio.Medium, req mediumRequest) (mediumResponse, error) { + ok := medium.IsDir(req.Path) + return mediumResponse{OK: true, IsDir: &ok}, nil +} + +func readAllContent(reader goio.Reader) (mediumResponse, error) { + content, err := goio.ReadAll(reader) + if err != nil { + return mediumResponse{}, err + } + return mediumResponse{OK: true, Content: string(content)}, nil +} + +func bindPayload(c *gin.Context) (map[string]any, bool) { + payload := map[string]any{} + if c.Request == nil || c.Request.Body == nil || c.Request.ContentLength == 0 { + return payload, true + } + decoder := json.NewDecoder(c.Request.Body) + decoder.UseNumber() + if err := decoder.Decode(&payload); err != nil { + c.JSON(http.StatusBadRequest, apiFail("invalid_request", err.Error())) + return nil, false + } + return payload, true +} + +func mediumRequestFromPayload(payload map[string]any) mediumRequest { + return mediumRequest{ + Root: stringValue(payload, "root"), + Path: stringValue(payload, "path"), + OldPath: stringValue(payload, "oldPath", "old_path"), + NewPath: stringValue(payload, "newPath", "new_path"), + Content: stringValue(payload, "content"), + Mode: firstValue(payload, "mode"), + Recursive: boolValue(payload, "recursive"), + } +} + +func optionsFromPayload(payload map[string]any) core.Options { + options := make([]core.Option, 0, len(payload)) + for key, value := range payload { + options = append(options, core.Option{Key: key, Value: normalizedValue(value)}) + } + return core.NewOptions(options...) +} + +func findRFC15Action(name string) (rfc15Action, bool) { + for _, action := range rfc15Actions { + if action.Name == name { + return action, true + } + } + return rfc15Action{}, false +} + +func notImplemented(c *gin.Context, message string) { + c.JSON(http.StatusNotImplemented, apiFail("not_implemented", message)) +} + +func unconfiguredMedium(c *gin.Context, mediumType string) { + notImplemented(c, fmt.Sprintf("%s medium is not configured", mediumType)) +} + +func resultErrorMessage(result core.Result) string { + if err, ok := result.Value.(error); ok && err != nil { + return err.Error() + } + if result.Value != nil { + return fmt.Sprint(result.Value) + } + return "action failed" +} + +func firstValue(payload map[string]any, keys ...string) any { + for _, key := range keys { + if value, ok := payload[key]; ok { + return value + } + } + return nil +} + +func stringValue(payload map[string]any, keys ...string) string { + value := firstValue(payload, keys...) + switch typed := value.(type) { + case string: + return typed + case json.Number: + return typed.String() + default: + return "" + } +} + +func boolValue(payload map[string]any, keys ...string) bool { + value := firstValue(payload, keys...) + if typed, ok := value.(bool); ok { + return typed + } + return false +} + +func normalizedValue(value any) any { + switch typed := value.(type) { + case json.Number: + if i, err := typed.Int64(); err == nil { + return int(i) + } + if f, err := typed.Float64(); err == nil { + return f + } + return typed.String() + case []any: + out := make([]any, len(typed)) + for i, item := range typed { + out[i] = normalizedValue(item) + } + return out + case map[string]any: + out := make(map[string]any, len(typed)) + for key, item := range typed { + out[key] = normalizedValue(item) + } + return out + default: + return value + } +} + +func fileModeValue(value any, fallback fs.FileMode) (fs.FileMode, error) { + if value == nil { + return fallback, nil + } + switch typed := value.(type) { + case fs.FileMode: + return typed, nil + case int: + return fs.FileMode(typed), nil + case int64: + return fs.FileMode(typed), nil + case float64: + return fs.FileMode(typed), nil + case json.Number: + parsed, err := strconv.ParseInt(typed.String(), 0, 64) + if err != nil { + return 0, err + } + return fs.FileMode(parsed), nil + case string: + parsed, err := strconv.ParseInt(typed, 0, 64) + if err != nil { + return 0, err + } + return fs.FileMode(parsed), nil + default: + return 0, fmt.Errorf("unsupported file mode type %T", value) + } +} + +func dirEntryDTOs(entries []fs.DirEntry) []dirEntryDTO { + out := make([]dirEntryDTO, 0, len(entries)) + for _, entry := range entries { + dto := dirEntryDTO{ + Name: entry.Name(), + IsDir: entry.IsDir(), + Type: entry.Type().String(), + } + if info, err := entry.Info(); err == nil && info != nil { + dto.Size = info.Size() + dto.Mode = info.Mode().String() + } + out = append(out, dto) + } + return out +} + +func fileInfoDTOFromInfo(info fs.FileInfo) *fileInfoDTO { + if info == nil { + return nil + } + return &fileInfoDTO{ + Name: info.Name(), + Size: info.Size(), + Mode: info.Mode().String(), + ModTime: info.ModTime().Format("2006-01-02T15:04:05Z07:00"), + IsDir: info.IsDir(), + } +} + +func writeAndClose(writer goio.WriteCloser, content string) error { + if _, err := goio.WriteString(writer, content); err != nil { + _ = writer.Close() + return err + } + return writer.Close() +} diff --git a/pkg/api/handlers_test.go b/pkg/api/handlers_test.go new file mode 100644 index 0000000..1b6a45a --- /dev/null +++ b/pkg/api/handlers_test.go @@ -0,0 +1,202 @@ +// SPDX-License-Identifier: EUPL-1.2 + +package api + +import ( + "bytes" + "context" + "encoding/json" + "net/http" + "net/http/httptest" + "strings" + "testing" + + core "dappco.re/go/core" + coreio "dappco.re/go/io" + "github.com/gin-gonic/gin" +) + +func init() { + gin.SetMode(gin.TestMode) +} + +func TestCreateWorkspace_Good_Delegates(t *testing.T) { + router := testRouter(NewProvider(nil)) + + rec := postJSON(t, router, "/v1/workspace", `{"workspace":"alice"}`) + if rec.Code != http.StatusOK { + t.Fatalf("expected 200, got %d: %s", rec.Code, rec.Body.String()) + } + if !strings.Contains(rec.Body.String(), `"success":true`) { + t.Fatalf("expected success response, got %s", rec.Body.String()) + } +} + +func TestCreateWorkspace_Bad_InvalidJSON(t *testing.T) { + router := testRouter(NewProvider(nil)) + + rec := postJSON(t, router, "/v1/workspace", `{`) + if rec.Code != http.StatusBadRequest { + t.Fatalf("expected 400, got %d: %s", rec.Code, rec.Body.String()) + } + assertAPIErrorCode(t, rec, "invalid_request") +} + +func TestSwitchWorkspace_Good_Delegates(t *testing.T) { + router := testRouter(NewProvider(nil)) + + create := postJSON(t, router, "/v1/workspace", `{"workspace":"ws-1"}`) + if create.Code != http.StatusOK { + t.Fatalf("expected create 200, got %d: %s", create.Code, create.Body.String()) + } + rec := postJSON(t, router, "/v1/workspace/ws-1/switch", `{}`) + if rec.Code != http.StatusOK { + t.Fatalf("expected 200, got %d: %s", rec.Code, rec.Body.String()) + } +} + +func TestSwitchWorkspace_Bad_EmptyID(t *testing.T) { + router := testRouter(NewProvider(nil)) + + rec := postJSON(t, router, "/v1/workspace/%20/switch", `{}`) + if rec.Code != http.StatusBadRequest { + t.Fatalf("expected 400, got %d: %s", rec.Code, rec.Body.String()) + } + assertAPIErrorCode(t, rec, "invalid_request") +} + +func TestHandleWorkspaceCommand_Good_Delegates(t *testing.T) { + router := testRouter(NewProvider(nil)) + + create := postJSON(t, router, "/v1/workspace", `{"workspace":"ws-1"}`) + if create.Code != http.StatusOK { + t.Fatalf("expected create 200, got %d: %s", create.Code, create.Body.String()) + } + write := postJSON(t, router, "/v1/workspace/ws-1/command", `{"action":"write","path":"note.txt","content":"hello"}`) + if write.Code != http.StatusOK { + t.Fatalf("expected write 200, got %d: %s", write.Code, write.Body.String()) + } + read := postJSON(t, router, "/v1/workspace/ws-1/command", `{"action":"read","path":"note.txt"}`) + if read.Code != http.StatusOK { + t.Fatalf("expected read 200, got %d: %s", read.Code, read.Body.String()) + } + if !strings.Contains(read.Body.String(), "hello") { + t.Fatalf("expected response to contain read content, got %s", read.Body.String()) + } +} + +func TestHandleWorkspaceCommand_Bad_MissingAction(t *testing.T) { + router := testRouter(NewProvider(nil)) + + rec := postJSON(t, router, "/v1/workspace/ws-1/command", `{}`) + if rec.Code != http.StatusBadRequest { + t.Fatalf("expected 400, got %d: %s", rec.Code, rec.Body.String()) + } + assertAPIErrorCode(t, rec, "invalid_request") +} + +func TestMediumDispatcher_Good_MemoryRoundTrip(t *testing.T) { + router := testRouter(NewProvider(nil)) + + write := postJSON(t, router, "/v1/medium/memory/write", `{"path":"note.txt","content":"hello"}`) + if write.Code != http.StatusOK { + t.Fatalf("expected write 200, got %d: %s", write.Code, write.Body.String()) + } + + read := postJSON(t, router, "/v1/medium/memory/read", `{"path":"note.txt"}`) + if read.Code != http.StatusOK { + t.Fatalf("expected read 200, got %d: %s", read.Code, read.Body.String()) + } + if !strings.Contains(read.Body.String(), "hello") { + t.Fatalf("expected response to contain read content, got %s", read.Body.String()) + } +} + +func TestMediumDispatcher_Bad_UnsupportedMedium(t *testing.T) { + router := testRouter(NewProvider(nil)) + + rec := postJSON(t, router, "/v1/medium/github/read", `{"path":"README.md"}`) + if rec.Code != http.StatusNotImplemented { + t.Fatalf("expected 501, got %d: %s", rec.Code, rec.Body.String()) + } + assertAPIErrorCode(t, rec, "not_implemented") +} + +func TestActionDispatcher_Good_WiredActionDelegates(t *testing.T) { + coreio.ResetMemoryActionStore() + defer coreio.ResetMemoryActionStore() + + router := testRouter(NewProvider(nil)) + + write := postJSON(t, router, "/v1/io/core.io.memory.write", `{"path":"config/app.yaml","content":"port: 8080"}`) + if write.Code != http.StatusOK { + t.Fatalf("expected write action 200, got %d: %s", write.Code, write.Body.String()) + } + + read := postJSON(t, router, "/v1/io/core.io.memory.read", `{"path":"config/app.yaml"}`) + if read.Code != http.StatusOK { + t.Fatalf("expected read action 200, got %d: %s", read.Code, read.Body.String()) + } + if !strings.Contains(read.Body.String(), "port: 8080") { + t.Fatalf("expected delegated action content, got %s", read.Body.String()) + } +} + +func TestActionDispatcher_Bad_UnknownAction(t *testing.T) { + router := testRouter(NewProvider(nil)) + + rec := postJSON(t, router, "/v1/io/core.io.unknown", `{}`) + if rec.Code != http.StatusNotFound { + t.Fatalf("expected 404, got %d: %s", rec.Code, rec.Body.String()) + } + assertAPIErrorCode(t, rec, "unknown_action") +} + +func TestActionDispatcher_Good_FormerMissingActionDelegates(t *testing.T) { + c := core.New() + provider := NewProvider(c) + c.Action(coreio.ActionS3Read, func(_ context.Context, opts core.Options) core.Result { + if opts.String("path") != "reports/daily.txt" { + return core.Result{}.New(core.E("test", "unexpected path", nil)) + } + return core.Result{OK: true, Value: "delegated s3 read"} + }) + router := testRouter(provider) + + rec := postJSON(t, router, "/v1/io/core.io.s3.read", `{"path":"reports/daily.txt"}`) + if rec.Code != http.StatusOK { + t.Fatalf("expected 200, got %d: %s", rec.Code, rec.Body.String()) + } + if !strings.Contains(rec.Body.String(), "delegated s3 read") { + t.Fatalf("expected delegated response, got %s", rec.Body.String()) + } +} + +func testRouter(provider *IOProvider) *gin.Engine { + router := gin.New() + provider.RegisterRoutes(router.Group(provider.BasePath())) + return router +} + +func postJSON(t *testing.T, router http.Handler, path string, body string) *httptest.ResponseRecorder { + t.Helper() + req := httptest.NewRequest(http.MethodPost, path, bytes.NewBufferString(body)) + req.Header.Set("Content-Type", "application/json") + rec := httptest.NewRecorder() + router.ServeHTTP(rec, req) + return rec +} + +func assertAPIErrorCode(t *testing.T, rec *httptest.ResponseRecorder, code string) { + t.Helper() + var resp apiResponse + if err := json.Unmarshal(rec.Body.Bytes(), &resp); err != nil { + t.Fatalf("decode response: %v; body=%s", err, rec.Body.String()) + } + if resp.Error == nil { + t.Fatalf("expected error response, got %s", rec.Body.String()) + } + if resp.Error.Code != code { + t.Fatalf("expected error code %q, got %q", code, resp.Error.Code) + } +} diff --git a/pkg/api/provider.go b/pkg/api/provider.go new file mode 100644 index 0000000..7fd3b8c --- /dev/null +++ b/pkg/api/provider.go @@ -0,0 +1,229 @@ +// SPDX-License-Identifier: EUPL-1.2 + +// Package api exposes go-io primitives as a Core API service provider. +package api + +import ( + "net/http" + + core "dappco.re/go/core" + coreio "dappco.re/go/io" + "dappco.re/go/io/cube" + "github.com/gin-gonic/gin" +) + +// ParameterDescription describes a single HTTP route parameter. +type ParameterDescription struct { + Name string + In string + Required bool + Schema map[string]any +} + +// RouteDescription describes an HTTP route exposed by IOProvider. +type RouteDescription struct { + Method string + Path string + Summary string + Description string + Tags []string + StatusCode int + Parameters []ParameterDescription + RequestBody map[string]any + Response map[string]any +} + +// IOProvider wraps go-io's library-only surface as HTTP routes. +type IOProvider struct { + core *core.Core + local coreio.Medium + memory coreio.Medium +} + +// NewProvider creates an IO provider backed by a Core action registry. +// +// Pass nil or no Core to create a private registry with go-io and cube actions +// registered. The variadic form keeps the provider easy to mount from core/api +// while still allowing tests and callers to inject a Core. +func NewProvider(cores ...*core.Core) *IOProvider { + var c *core.Core + if len(cores) > 0 { + c = cores[0] + } + if c == nil { + c = core.New() + } + coreio.RegisterActions(c) + cube.RegisterActions(c) + return &IOProvider{ + core: c, + local: configuredLocalMedium(), + memory: coreio.NewMemoryMedium(), + } +} + +// Name implements api.RouteGroup. +func (p *IOProvider) Name() string { return "io" } + +// BasePath implements api.RouteGroup. +func (p *IOProvider) BasePath() string { return "/v1" } + +// Register mounts the provider on a Gin router using the provider base path. +func (p *IOProvider) Register(r gin.IRouter) { + if p == nil || r == nil { + return + } + p.RegisterRoutes(r.Group(p.BasePath())) +} + +// RegisterRoutes implements api.RouteGroup. +func (p *IOProvider) RegisterRoutes(rg *gin.RouterGroup) { + if p == nil || rg == nil { + return + } + rg.POST("/workspace", p.createWorkspace) + rg.POST("/workspace/:id/switch", p.switchWorkspace) + rg.POST("/workspace/:id/command", p.handleWorkspaceCommand) + rg.POST("/medium/:type/:op", p.dispatchMedium) + rg.POST("/io/:action", p.dispatchAction) +} + +// Describe implements api.DescribableGroup. +func (p *IOProvider) Describe() []RouteDescription { + actionNames := make([]any, 0, len(rfc15Actions)) + for _, action := range rfc15Actions { + actionNames = append(actionNames, action.Name) + } + + return []RouteDescription{ + { + Method: http.MethodPost, + Path: "/workspace", + Summary: "Create workspace", + Description: "RFC §5 workspace creation route.", + Tags: []string{"io", "workspace"}, + StatusCode: http.StatusOK, + RequestBody: map[string]any{ + "type": "object", + "required": []string{"workspace"}, + "properties": map[string]any{ + "workspace": map[string]any{"type": "string"}, + "name": map[string]any{"type": "string"}, + "identifier": map[string]any{"type": "string"}, + "workspaceID": map[string]any{"type": "string"}, + }, + }, + Response: map[string]any{"type": "object"}, + }, + { + Method: http.MethodPost, + Path: "/workspace/:id/switch", + Summary: "Switch workspace", + Description: "RFC §5 workspace switch route.", + Tags: []string{"io", "workspace"}, + StatusCode: http.StatusOK, + Parameters: []ParameterDescription{ + {Name: "id", In: "path", Required: true, Schema: map[string]any{"type": "string"}}, + }, + Response: map[string]any{"type": "object"}, + }, + { + Method: http.MethodPost, + Path: "/workspace/:id/command", + Summary: "Handle workspace command", + Description: "RFC §5 workspace command route.", + Tags: []string{"io", "workspace"}, + StatusCode: http.StatusOK, + Parameters: []ParameterDescription{ + {Name: "id", In: "path", Required: true, Schema: map[string]any{"type": "string"}}, + }, + RequestBody: map[string]any{ + "type": "object", + "required": []string{"action"}, + "properties": map[string]any{ + "action": map[string]any{"type": "string"}, + "workspace": map[string]any{"type": "string"}, + "name": map[string]any{"type": "string"}, + "identifier": map[string]any{"type": "string"}, + "workspaceID": map[string]any{"type": "string"}, + "path": map[string]any{"type": "string"}, + "content": map[string]any{"type": "string"}, + }, + }, + Response: map[string]any{"type": "object"}, + }, + { + Method: http.MethodPost, + Path: "/medium/:type/:op", + Summary: "Dispatch Medium operation", + Description: "Dispatches HTTP requests to configured go-io Medium primitives.", + Tags: []string{"io", "medium"}, + StatusCode: http.StatusOK, + Parameters: []ParameterDescription{ + {Name: "type", In: "path", Required: true, Schema: map[string]any{"type": "string"}}, + {Name: "op", In: "path", Required: true, Schema: map[string]any{"type": "string"}}, + }, + RequestBody: map[string]any{ + "type": "object", + "properties": map[string]any{ + "path": map[string]any{"type": "string"}, + "oldPath": map[string]any{"type": "string"}, + "newPath": map[string]any{"type": "string"}, + "content": map[string]any{"type": "string"}, + "mode": map[string]any{"type": "integer"}, + "recursive": map[string]any{"type": "boolean"}, + }, + }, + Response: map[string]any{"type": "object"}, + }, + { + Method: http.MethodPost, + Path: "/io/:action", + Summary: "Dispatch RFC §15 IO action", + Description: "Dispatches registered go-io RFC §15 actions.", + Tags: []string{"io", "actions"}, + StatusCode: http.StatusOK, + Parameters: []ParameterDescription{ + { + Name: "action", + In: "path", + Required: true, + Schema: map[string]any{ + "type": "string", + "enum": actionNames, + }, + }, + }, + RequestBody: map[string]any{"type": "object"}, + Response: map[string]any{"type": "object"}, + }, + } +} + +func configuredLocalMedium() coreio.Medium { + root := core.Env("CORE_IO_LOCAL_ROOT") + if root == "" { + return nil + } + medium, err := coreio.NewSandboxed(root) + if err != nil { + return nil + } + return medium +} + +func errorResponseSchema() map[string]any { + return map[string]any{ + "type": "object", + "properties": map[string]any{ + "success": map[string]any{"type": "boolean"}, + "error": map[string]any{ + "type": "object", + "properties": map[string]any{ + "code": map[string]any{"type": "string"}, + "message": map[string]any{"type": "string"}, + }, + }, + }, + } +} diff --git a/pkg/api/provider_test.go b/pkg/api/provider_test.go new file mode 100644 index 0000000..c57507e --- /dev/null +++ b/pkg/api/provider_test.go @@ -0,0 +1,57 @@ +// SPDX-License-Identifier: EUPL-1.2 + +package api + +import ( + "testing" + + core "dappco.re/go/core" + coreio "dappco.re/go/io" +) + +func TestNewProvider_Good(t *testing.T) { + c := core.New() + provider := NewProvider(c) + + if provider.Name() != "io" { + t.Fatalf("expected provider name io, got %q", provider.Name()) + } + if provider.BasePath() != "/v1" { + t.Fatalf("expected base path /v1, got %q", provider.BasePath()) + } + if !c.Action(coreio.ActionMemoryRead).Exists() { + t.Fatalf("expected %s to be registered", coreio.ActionMemoryRead) + } + if got := len(rfc15Actions); got != 18 { + t.Fatalf("expected 18 RFC actions, got %d", got) + } +} + +func TestNewProvider_Bad(t *testing.T) { + provider := NewProvider(nil) + if provider == nil { + t.Fatal("expected provider") + } + if provider.core == nil { + t.Fatal("expected provider core registry") + } + if !provider.core.Action(coreio.ActionLocalRead).Exists() { + t.Fatalf("expected %s to be registered on default core", coreio.ActionLocalRead) + } +} + +func TestNewProvider_Ugly(t *testing.T) { + c := core.New() + coreio.RegisterActions(c) + + provider := NewProvider(c) + if provider == nil { + t.Fatal("expected provider") + } + if !provider.core.Action(coreio.ActionCopy).Exists() { + t.Fatalf("expected %s to remain registered after duplicate registration", coreio.ActionCopy) + } + if len(provider.Describe()) != 5 { + t.Fatalf("expected 5 route descriptions, got %d", len(provider.Describe())) + } +} diff --git a/pkg/medium/github/github.go b/pkg/medium/github/github.go new file mode 100644 index 0000000..21435fd --- /dev/null +++ b/pkg/medium/github/github.go @@ -0,0 +1,512 @@ +package github + +import ( + "bytes" + "context" + "errors" + "fmt" + goio "io" + "io/fs" + "net/http" + "net/url" + "path" + "slices" + "strings" + "time" + + core "dappco.re/go/core" + coreio "dappco.re/go/io" + gh "github.com/google/go-github/v75/github" + "golang.org/x/oauth2" +) + +// ErrReadOnly is returned by all mutating operations on a GitHub Medium. +var ErrReadOnly = errors.New("github medium is read-only") + +const ( + opNew = "github.New" + opRead = "github.Read" + opList = "github.List" + opStat = "github.Stat" + + errNotFound = "not found: " +) + +// Medium is a GitHub REST API-backed implementation of coreio.Medium. +type Medium struct { + client *gh.Client + owner string + repo string + ref string +} + +var _ coreio.Medium = (*Medium)(nil) + +// Options configures a GitHub Medium. +type Options struct { + Client *gh.Client + HTTPClient *http.Client + Owner string + Repo string + Ref string + Branch string + Token string + TokenFile string + BaseURL string +} + +// New creates a GitHub Medium. +func New(options Options) (*Medium, error) { + owner := strings.TrimSpace(options.Owner) + if owner == "" { + return nil, core.E(opNew, "owner is required", fs.ErrInvalid) + } + repo := strings.TrimSpace(options.Repo) + if repo == "" { + return nil, core.E(opNew, "repo is required", fs.ErrInvalid) + } + + client := options.Client + if client == nil { + token := options.Token + if token == "" { + token = tokenFromEnvironment(options.TokenFile) + } + httpClient := options.HTTPClient + if token != "" { + httpClient = oauthClient(httpClient, token) + } + client = gh.NewClient(httpClient) + } + if options.BaseURL != "" { + if err := setClientBaseURL(client, options.BaseURL); err != nil { + return nil, core.E(opNew, "base URL is invalid", err) + } + } + + ref := strings.TrimSpace(options.Ref) + if ref == "" { + ref = strings.TrimSpace(options.Branch) + } + + return &Medium{ + client: client, + owner: owner, + repo: repo, + ref: ref, + }, nil +} + +func tokenFromEnvironment(tokenFile string) string { + if token := strings.TrimSpace(core.Env("GITHUB_TOKEN")); token != "" { + return token + } + if tokenFile == "" { + home := strings.TrimSpace(core.Env("HOME")) + if home == "" { + home = strings.TrimSpace(core.Env("DIR_HOME")) + } + if home == "" { + return "" + } + tokenFile = core.Path(home, ".config", "lthn", "github-token") + } + + medium, relativePath, err := tokenFileMedium(tokenFile) + if err != nil { + return "" + } + data, err := medium.Read(relativePath) + if err != nil { + return "" + } + return strings.TrimSpace(data) +} + +func tokenFileMedium(tokenFile string) (coreio.Medium, string, error) { + if core.PathIsAbs(tokenFile) { + root := core.PathDir(tokenFile) + relativePath := core.PathBase(tokenFile) + if root == "" || root == "." || relativePath == "" || relativePath == "." || relativePath == "/" { + return nil, "", fs.ErrInvalid + } + medium, err := coreio.NewSandboxed(root) + return medium, relativePath, err + } + medium, err := coreio.NewSandboxed(".") + return medium, tokenFile, err +} + +func oauthClient(client *http.Client, token string) *http.Client { + var clone http.Client + if client != nil { + clone = *client + } + transport := clone.Transport + if transport == nil { + transport = http.DefaultTransport + } + clone.Transport = &oauth2.Transport{ + Source: oauth2.StaticTokenSource(&oauth2.Token{AccessToken: token}), + Base: transport, + } + return &clone +} + +func setClientBaseURL(client *gh.Client, baseURL string) error { + parsed, err := url.Parse(baseURL) + if err != nil { + return err + } + if parsed.Scheme == "" || parsed.Host == "" { + return fs.ErrInvalid + } + if !strings.HasSuffix(parsed.Path, "/") { + parsed.Path += "/" + } + client.BaseURL = parsed + return nil +} + +func cleanRelative(filePath string) string { + clean := path.Clean("/" + strings.ReplaceAll(filePath, "\\", "/")) + if clean == "/" { + return "" + } + return strings.TrimPrefix(clean, "/") +} + +func requiredPath(operation, filePath string) (string, error) { + clean := cleanRelative(filePath) + if clean == "" { + return "", core.E(operation, "path is required", fs.ErrInvalid) + } + return clean, nil +} + +func (medium *Medium) contentOptions() *gh.RepositoryContentGetOptions { + if medium.ref == "" { + return nil + } + return &gh.RepositoryContentGetOptions{Ref: medium.ref} +} + +func (medium *Medium) getContents(operation, filePath string) (*gh.RepositoryContent, []*gh.RepositoryContent, error) { + fileContent, directoryContent, _, err := medium.client.Repositories.GetContents( + context.Background(), + medium.owner, + medium.repo, + filePath, + medium.contentOptions(), + ) + if err != nil { + return nil, nil, wrapGitHubError(operation, filePath, err) + } + return fileContent, directoryContent, nil +} + +func wrapGitHubError(operation, filePath string, err error) error { + if err == nil { + return nil + } + if errors.Is(err, gh.ErrPathForbidden) { + return core.E(operation, core.Concat("path is invalid: ", filePath), fs.ErrInvalid) + } + + var responseError *gh.ErrorResponse + if errors.As(err, &responseError) && responseError.Response != nil { + switch responseError.Response.StatusCode { + case http.StatusNotFound: + return core.E(operation, core.Concat(errNotFound, filePath), fs.ErrNotExist) + case http.StatusUnauthorized, http.StatusForbidden: + return core.E(operation, core.Concat("permission denied: ", filePath), fs.ErrPermission) + case http.StatusUnprocessableEntity: + return core.E(operation, core.Concat("invalid path: ", filePath), fs.ErrInvalid) + } + } + return core.E(operation, core.Concat("GitHub contents request failed: ", filePath), err) +} + +func readOnly(operation string) error { + return core.E(operation, "GitHub medium is read-only", ErrReadOnly) +} + +func fileInfoForContent(content *gh.RepositoryContent, name string) coreio.FileInfo { + mode := fs.FileMode(0644) + isDir := content.GetType() == "dir" + if isDir { + mode = fs.ModeDir | 0755 + } + return coreio.NewFileInfo(name, int64(content.GetSize()), mode, time.Time{}, isDir) +} + +func dirInfoForPath(filePath string) coreio.FileInfo { + name := path.Base(filePath) + if name == "." || name == "/" || name == "" { + name = "." + } + return coreio.NewFileInfo(name, 0, fs.ModeDir|0755, time.Time{}, true) +} + +// Read reads a repository file into a string. +func (medium *Medium) Read(filePath string) (string, error) { + clean, err := requiredPath(opRead, filePath) + if err != nil { + return "", err + } + fileContent, directoryContent, err := medium.getContents(opRead, clean) + if err != nil { + return "", err + } + if directoryContent != nil || fileContent.GetType() == "dir" { + return "", core.E(opRead, core.Concat("path is a directory: ", clean), fs.ErrInvalid) + } + if fileContent == nil { + return "", core.E(opRead, core.Concat(errNotFound, clean), fs.ErrNotExist) + } + content, err := fileContent.GetContent() + if err != nil { + return "", core.E(opRead, core.Concat("decode content failed: ", clean), err) + } + return content, nil +} + +// Write returns ErrReadOnly because GitHub Medium is read-only. +func (medium *Medium) Write(filePath, content string) error { + return readOnly("github.Write") +} + +// WriteMode returns ErrReadOnly because GitHub Medium is read-only. +func (medium *Medium) WriteMode(filePath, content string, mode fs.FileMode) error { + return readOnly("github.WriteMode") +} + +// EnsureDir returns ErrReadOnly because GitHub Medium is read-only. +func (medium *Medium) EnsureDir(filePath string) error { + return readOnly("github.EnsureDir") +} + +// IsFile reports whether filePath exists and is not a directory. +func (medium *Medium) IsFile(filePath string) bool { + clean := cleanRelative(filePath) + if clean == "" { + return false + } + info, err := medium.Stat(clean) + return err == nil && !info.IsDir() +} + +// Delete returns ErrReadOnly because GitHub Medium is read-only. +func (medium *Medium) Delete(filePath string) error { + return readOnly("github.Delete") +} + +// DeleteAll returns ErrReadOnly because GitHub Medium is read-only. +func (medium *Medium) DeleteAll(filePath string) error { + return readOnly("github.DeleteAll") +} + +// Rename returns ErrReadOnly because GitHub Medium is read-only. +func (medium *Medium) Rename(oldPath, newPath string) error { + return readOnly("github.Rename") +} + +// List returns a recursive listing under a repository directory. +func (medium *Medium) List(filePath string) ([]fs.DirEntry, error) { + clean := cleanRelative(filePath) + entries, err := medium.listRecursive(clean) + if err != nil { + return nil, err + } + slices.SortFunc(entries, func(a, b fs.DirEntry) int { + return strings.Compare(a.Name(), b.Name()) + }) + return entries, nil +} + +func (medium *Medium) listRecursive(filePath string) ([]fs.DirEntry, error) { + fileContent, directoryContent, err := medium.getContents(opList, filePath) + if err != nil { + return nil, err + } + if fileContent == nil && directoryContent == nil { + return nil, core.E(opList, core.Concat(errNotFound, filePath), fs.ErrNotExist) + } + if fileContent != nil && fileContent.GetType() != "dir" { + return nil, core.E(opList, core.Concat("path is not a directory: ", filePath), fs.ErrInvalid) + } + if directoryContent == nil { + return nil, core.E(opList, core.Concat("path is not a directory: ", filePath), fs.ErrInvalid) + } + + var entries []fs.DirEntry + for _, content := range directoryContent { + name := cleanRelative(content.GetPath()) + if name == "" { + name = content.GetName() + } + info := fileInfoForContent(content, name) + entries = append(entries, coreio.NewDirEntry(name, info.IsDir(), info.Mode(), info)) + if content.GetType() == "dir" { + childEntries, err := medium.listRecursive(content.GetPath()) + if err != nil { + return nil, err + } + entries = append(entries, childEntries...) + } + } + return entries, nil +} + +// Stat returns metadata for a repository path. +func (medium *Medium) Stat(filePath string) (fs.FileInfo, error) { + clean, err := requiredPath(opStat, filePath) + if err != nil { + return nil, err + } + fileContent, directoryContent, err := medium.getContents(opStat, clean) + if err != nil { + return nil, err + } + if fileContent == nil && directoryContent == nil { + return nil, core.E(opStat, core.Concat(errNotFound, clean), fs.ErrNotExist) + } + if directoryContent != nil || fileContent.GetType() == "dir" { + return dirInfoForPath(clean), nil + } + return fileInfoForContent(fileContent, path.Base(clean)), nil +} + +// Open opens a repository file for reading. +func (medium *Medium) Open(filePath string) (fs.File, error) { + content, err := medium.Read(filePath) + if err != nil { + return nil, err + } + info, err := medium.Stat(filePath) + if err != nil { + return nil, err + } + return &githubFile{ + name: info.Name(), + content: []byte(content), + mode: info.Mode(), + modTime: info.ModTime(), + }, nil +} + +// Create returns ErrReadOnly because GitHub Medium is read-only. +func (medium *Medium) Create(filePath string) (goio.WriteCloser, error) { + return nil, readOnly("github.Create") +} + +// Append returns ErrReadOnly because GitHub Medium is read-only. +func (medium *Medium) Append(filePath string) (goio.WriteCloser, error) { + return nil, readOnly("github.Append") +} + +// ReadStream opens a repository file as an io.ReadCloser. +func (medium *Medium) ReadStream(filePath string) (goio.ReadCloser, error) { + content, err := medium.Read(filePath) + if err != nil { + return nil, err + } + return goio.NopCloser(strings.NewReader(content)), nil +} + +// WriteStream returns ErrReadOnly because GitHub Medium is read-only. +func (medium *Medium) WriteStream(filePath string) (goio.WriteCloser, error) { + return nil, readOnly("github.WriteStream") +} + +// Exists reports whether a repository path exists. +func (medium *Medium) Exists(filePath string) bool { + clean := cleanRelative(filePath) + if clean == "" { + return false + } + _, err := medium.Stat(clean) + return err == nil +} + +// IsDir reports whether a repository path exists and is a directory. +func (medium *Medium) IsDir(filePath string) bool { + clean := cleanRelative(filePath) + if clean == "" { + return false + } + info, err := medium.Stat(clean) + return err == nil && info.IsDir() +} + +// Clone returns all file contents under filePath, keyed by repository path. +func (medium *Medium) Clone(filePath string) (map[string]string, error) { + clean := cleanRelative(filePath) + if clean != "" { + info, err := medium.Stat(clean) + if err != nil { + return nil, err + } + if !info.IsDir() { + content, err := medium.Read(clean) + if err != nil { + return nil, err + } + return map[string]string{clean: content}, nil + } + } + + entries, err := medium.List(clean) + if err != nil { + return nil, err + } + contents := make(map[string]string) + for _, entry := range entries { + if entry.IsDir() { + continue + } + content, err := medium.Read(entry.Name()) + if err != nil { + return nil, err + } + contents[entry.Name()] = content + } + return contents, nil +} + +type githubFile struct { + name string + content []byte + mode fs.FileMode + modTime time.Time + offset int64 + closed bool +} + +var _ fs.File = (*githubFile)(nil) + +func (file *githubFile) Stat() (fs.FileInfo, error) { + return coreio.NewFileInfo(file.name, int64(len(file.content)), file.mode, file.modTime, false), nil +} + +func (file *githubFile) Read(data []byte) (int, error) { + if file.closed { + return 0, fs.ErrClosed + } + reader := bytes.NewReader(file.content) + if _, err := reader.Seek(file.offset, goio.SeekStart); err != nil { + return 0, err + } + n, err := reader.Read(data) + file.offset += int64(n) + return n, err +} + +func (file *githubFile) Close() error { + file.closed = true + return nil +} + +func (file *githubFile) String() string { + return fmt.Sprintf("githubFile(%s)", file.name) +} diff --git a/pkg/medium/github/github_test.go b/pkg/medium/github/github_test.go new file mode 100644 index 0000000..82499e4 --- /dev/null +++ b/pkg/medium/github/github_test.go @@ -0,0 +1,181 @@ +package github + +import ( + "context" + "encoding/base64" + "errors" + "fmt" + "io/fs" + "net/http" + "net/http/httptest" + pathpkg "path" + "testing" + + core "dappco.re/go/core" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func newGitHubTestMedium(t *testing.T, handler http.Handler) *Medium { + t.Helper() + + server := httptest.NewServer(handler) + t.Cleanup(server.Close) + + medium, err := New(Options{ + HTTPClient: server.Client(), + Owner: "Snider", + Repo: "demo", + Ref: "main", + TokenFile: pathpkg.Join(t.TempDir(), "missing-token"), + BaseURL: server.URL + "/", + }) + require.NoError(t, err) + return medium +} + +func githubFileJSON(filePath, content string) string { + encoded := base64.StdEncoding.EncodeToString([]byte(content)) + return fmt.Sprintf( + `{"type":"file","name":%q,"path":%q,"encoding":"base64","content":%q,"size":%d}`, + pathpkg.Base(filePath), + filePath, + encoded, + len(content), + ) +} + +func githubDirJSON(filePath string) string { + return fmt.Sprintf( + `{"type":"dir","name":%q,"path":%q,"size":0}`, + pathpkg.Base(filePath), + filePath, + ) +} + +func TestGitHubMedium_Read_Good(t *testing.T) { + mux := http.NewServeMux() + mux.HandleFunc("/repos/Snider/demo/contents/docs/read.txt", func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "main", r.URL.Query().Get("ref")) + _, _ = fmt.Fprint(w, githubFileJSON("docs/read.txt", "hello github")) + }) + medium := newGitHubTestMedium(t, mux) + + content, err := medium.Read("docs/read.txt") + + require.NoError(t, err) + assert.Equal(t, "hello github", content) +} + +func TestGitHubMedium_Read_Bad(t *testing.T) { + medium := newGitHubTestMedium(t, http.NewServeMux()) + + _, err := medium.Read("missing.txt") + + assert.Error(t, err) + assert.True(t, errors.Is(err, fs.ErrNotExist)) +} + +func TestGitHubMedium_Read_Ugly(t *testing.T) { + mux := http.NewServeMux() + mux.HandleFunc("/repos/Snider/demo/contents/safe/file.txt", func(w http.ResponseWriter, r *http.Request) { + _, _ = fmt.Fprint(w, githubFileJSON("safe/file.txt", "normalised")) + }) + medium := newGitHubTestMedium(t, mux) + + content, err := medium.Read("//safe/../safe/./file.txt") + + require.NoError(t, err) + assert.Equal(t, "normalised", content) +} + +func TestGitHubMedium_List_Good(t *testing.T) { + mux := http.NewServeMux() + mux.HandleFunc("/repos/Snider/demo/contents/dir", func(w http.ResponseWriter, r *http.Request) { + _, _ = fmt.Fprintf(w, `[%s,%s,%s]`, + githubFileJSON("dir/b.txt", "b"), + githubFileJSON("dir/a.txt", "a"), + githubDirJSON("dir/sub"), + ) + }) + mux.HandleFunc("/repos/Snider/demo/contents/dir/sub", func(w http.ResponseWriter, r *http.Request) { + _, _ = fmt.Fprintf(w, `[%s]`, githubFileJSON("dir/sub/c.txt", "c")) + }) + medium := newGitHubTestMedium(t, mux) + + entries, err := medium.List("dir") + + require.NoError(t, err) + require.Len(t, entries, 4) + assert.Equal(t, "dir/a.txt", entries[0].Name()) + assert.Equal(t, "dir/b.txt", entries[1].Name()) + assert.Equal(t, "dir/sub", entries[2].Name()) + assert.True(t, entries[2].IsDir()) + assert.Equal(t, "dir/sub/c.txt", entries[3].Name()) +} + +func TestGitHubMedium_List_Bad(t *testing.T) { + medium := newGitHubTestMedium(t, http.NewServeMux()) + + _, err := medium.List("missing") + + assert.Error(t, err) + assert.True(t, errors.Is(err, fs.ErrNotExist)) +} + +func TestGitHubMedium_List_Ugly(t *testing.T) { + mux := http.NewServeMux() + mux.HandleFunc("/repos/Snider/demo/contents/dir", func(w http.ResponseWriter, r *http.Request) { + _, _ = fmt.Fprintf(w, `[%s]`, githubFileJSON("dir/file.txt", "content")) + }) + medium := newGitHubTestMedium(t, mux) + + entries, err := medium.List("//dir/../dir/.") + + require.NoError(t, err) + require.Len(t, entries, 1) + assert.Equal(t, "dir/file.txt", entries[0].Name()) +} + +func TestGitHubMedium_Write_Good(t *testing.T) { + medium := newGitHubTestMedium(t, http.NewServeMux()) + + err := medium.Write("notes/write.txt", "content") + + assert.ErrorIs(t, err, ErrReadOnly) +} + +func TestGitHubMedium_Write_Bad(t *testing.T) { + medium := newGitHubTestMedium(t, http.NewServeMux()) + + err := medium.Write("", "content") + + assert.ErrorIs(t, err, ErrReadOnly) +} + +func TestGitHubMedium_Write_Ugly(t *testing.T) { + medium := newGitHubTestMedium(t, http.NewServeMux()) + + err := medium.Write("../escaped.txt", "content") + + assert.ErrorIs(t, err, ErrReadOnly) +} + +func TestGitHubMedium_Actions_Register(t *testing.T) { + _, ok := FactoryFor(Scheme) + require.True(t, ok) + + c := core.New() + RegisterActions(c) + + assert.True(t, c.Action(ActionRead).Exists()) + assert.True(t, c.Action(ActionList).Exists()) + assert.True(t, c.Action(ActionClone).Exists()) + + result := c.Action(ActionRead).Run(context.Background(), core.NewOptions( + core.Option{Key: "owner", Value: "Snider"}, + core.Option{Key: "repo", Value: "demo"}, + core.Option{Key: "path", Value: ""}, + )) + assert.False(t, result.OK) +} diff --git a/pkg/medium/github/register.go b/pkg/medium/github/register.go new file mode 100644 index 0000000..3714502 --- /dev/null +++ b/pkg/medium/github/register.go @@ -0,0 +1,106 @@ +package github + +import ( + "context" + + core "dappco.re/go/core" +) + +const ( + Scheme = "github" + ActionRead = "core.io.github.read" + ActionList = "core.io.github.list" + ActionClone = "core.io.github.clone" +) + +type Factory func(Options) (*Medium, error) + +var Registry = core.NewRegistry[Factory]() + +func init() { + RegisterFactory(Scheme, New) +} + +func RegisterFactory(name string, factory Factory) core.Result { + return Registry.Set(name, factory) +} + +func FactoryFor(name string) (Factory, bool) { + result := Registry.Get(name) + if !result.OK { + return nil, false + } + factory, ok := result.Value.(Factory) + return factory, ok +} + +func RegisterActions(c *core.Core) { + if c == nil { + return + } + c.Action(ActionRead, readAction) + c.Action(ActionList, listAction) + c.Action(ActionClone, cloneAction) +} + +func mediumFromOptions(opts core.Options) (*Medium, error) { + if medium, ok := opts.Get("medium").Value.(*Medium); ok { + return medium, nil + } + ref := opts.String("ref") + if ref == "" { + ref = opts.String("branch") + } + tokenFile := opts.String("tokenFile") + if tokenFile == "" { + tokenFile = opts.String("token_file") + } + baseURL := opts.String("baseURL") + if baseURL == "" { + baseURL = opts.String("base_url") + } + return New(Options{ + Owner: opts.String("owner"), + Repo: opts.String("repo"), + Ref: ref, + Token: opts.String("token"), + TokenFile: tokenFile, + BaseURL: baseURL, + }) +} + +func readAction(_ context.Context, opts core.Options) core.Result { + medium, err := mediumFromOptions(opts) + if err != nil { + return core.Result{}.New(err) + } + content, err := medium.Read(opts.String("path")) + if err != nil { + return core.Result{}.New(err) + } + return core.Result{Value: content, OK: true} +} + +func listAction(_ context.Context, opts core.Options) core.Result { + medium, err := mediumFromOptions(opts) + if err != nil { + return core.Result{}.New(err) + } + entries, err := medium.List(opts.String("path")) + if err != nil { + return core.Result{}.New(err) + } + return core.Result{Value: entries, OK: true} +} + +func cloneAction(_ context.Context, opts core.Options) core.Result { + medium, err := mediumFromOptions(opts) + if err != nil { + return core.Result{}.New(err) + } + contents, err := medium.Clone(opts.String("path")) + if err != nil { + return core.Result{}.New(err) + } + return core.Result{Value: contents, OK: true} +} diff --git a/pkg/medium/pwa/pwa.go b/pkg/medium/pwa/pwa.go new file mode 100644 index 0000000..ecff44c --- /dev/null +++ b/pkg/medium/pwa/pwa.go @@ -0,0 +1,130 @@ +package pwa + +import ( + "errors" + goio "io" + "io/fs" + + core "dappco.re/go/core" + coreio "dappco.re/go/io" +) + +// PWA Medium is intentionally stubbed pending two Snider crypto-trio deps: +// - forge.lthn.ai/Snider/Borg — Borg IS the PWA collector (headless-browser +// scraping is one of Borg's many roles). It also wraps the scraped +// artefact in a DataNode. io.Medium was designed FOR DataNodes from day 1. +// - forge.lthn.ai/Snider/Enchantrix — encrypts the fetched payload at rest. +// Trixxie is THE encryption layer for ALL encryption across the stack. +// Borg's PWA collector is the active surface here — pwa.go is just the +// Medium-interface wrapper. Wire BOTH at canonical forge.lthn.ai/Snider/* +// paths when scaffolded — never migrate to dappco.re/*. + +// ErrNotImplemented is returned by all error-returning operations while PWA +// Medium is stubbed. +var ErrNotImplemented = errors.New("pwa medium is not implemented") + +// Medium is a stub PWA-backed implementation of coreio.Medium. +type Medium struct { + url string +} + +var _ coreio.Medium = (*Medium)(nil) + +// Options configures a PWA Medium. +type Options struct { + URL string +} + +// New creates a stub PWA Medium. +func New(options Options) (*Medium, error) { + return &Medium{url: options.URL}, nil +} + +func notImplemented(operation string) error { + return core.E(operation, "PWA medium is not implemented", ErrNotImplemented) +} + +// Read returns ErrNotImplemented while the PWA collector wiring is stubbed. +func (medium *Medium) Read(filePath string) (string, error) { + return "", notImplemented("pwa.Read") +} + +// Write returns ErrNotImplemented while the PWA collector wiring is stubbed. +func (medium *Medium) Write(filePath, content string) error { + return notImplemented("pwa.Write") +} + +// WriteMode returns ErrNotImplemented while the PWA collector wiring is stubbed. +func (medium *Medium) WriteMode(filePath, content string, mode fs.FileMode) error { + return notImplemented("pwa.WriteMode") +} + +// EnsureDir returns ErrNotImplemented while the PWA collector wiring is stubbed. +func (medium *Medium) EnsureDir(filePath string) error { + return notImplemented("pwa.EnsureDir") +} + +// IsFile reports false while the PWA collector wiring is stubbed. +func (medium *Medium) IsFile(filePath string) bool { + return false +} + +// Delete returns ErrNotImplemented while the PWA collector wiring is stubbed. +func (medium *Medium) Delete(filePath string) error { + return notImplemented("pwa.Delete") +} + +// DeleteAll returns ErrNotImplemented while the PWA collector wiring is stubbed. +func (medium *Medium) DeleteAll(filePath string) error { + return notImplemented("pwa.DeleteAll") +} + +// Rename returns ErrNotImplemented while the PWA collector wiring is stubbed. +func (medium *Medium) Rename(oldPath, newPath string) error { + return notImplemented("pwa.Rename") +} + +// List returns ErrNotImplemented while the PWA collector wiring is stubbed. +func (medium *Medium) List(filePath string) ([]fs.DirEntry, error) { + return nil, notImplemented("pwa.List") +} + +// Stat returns ErrNotImplemented while the PWA collector wiring is stubbed. +func (medium *Medium) Stat(filePath string) (fs.FileInfo, error) { + return nil, notImplemented("pwa.Stat") +} + +// Open returns ErrNotImplemented while the PWA collector wiring is stubbed. +func (medium *Medium) Open(filePath string) (fs.File, error) { + return nil, notImplemented("pwa.Open") +} + +// Create returns ErrNotImplemented while the PWA collector wiring is stubbed. +func (medium *Medium) Create(filePath string) (goio.WriteCloser, error) { + return nil, notImplemented("pwa.Create") +} + +// Append returns ErrNotImplemented while the PWA collector wiring is stubbed. +func (medium *Medium) Append(filePath string) (goio.WriteCloser, error) { + return nil, notImplemented("pwa.Append") +} + +// ReadStream returns ErrNotImplemented while the PWA collector wiring is stubbed. +func (medium *Medium) ReadStream(filePath string) (goio.ReadCloser, error) { + return nil, notImplemented("pwa.ReadStream") +} + +// WriteStream returns ErrNotImplemented while the PWA collector wiring is stubbed. +func (medium *Medium) WriteStream(filePath string) (goio.WriteCloser, error) { + return nil, notImplemented("pwa.WriteStream") +} + +// Exists reports false while the PWA collector wiring is stubbed. +func (medium *Medium) Exists(filePath string) bool { + return false +} + +// IsDir reports false while the PWA collector wiring is stubbed. +func (medium *Medium) IsDir(filePath string) bool { + return false +} diff --git a/pkg/medium/pwa/pwa_test.go b/pkg/medium/pwa/pwa_test.go new file mode 100644 index 0000000..f6b5af1 --- /dev/null +++ b/pkg/medium/pwa/pwa_test.go @@ -0,0 +1,64 @@ +package pwa + +import ( + "context" + "errors" + "testing" + + core "dappco.re/go/core" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestPWAMedium_StubOperations_ReturnErrNotImplemented(t *testing.T) { + medium, err := New(Options{}) + require.NoError(t, err) + + checks := []struct { + name string + run func() error + }{ + {name: "Read", run: func() error { _, err := medium.Read("page"); return err }}, + {name: "Write", run: func() error { return medium.Write("page", "content") }}, + {name: "WriteMode", run: func() error { return medium.WriteMode("page", "content", 0644) }}, + {name: "EnsureDir", run: func() error { return medium.EnsureDir("page") }}, + {name: "Delete", run: func() error { return medium.Delete("page") }}, + {name: "DeleteAll", run: func() error { return medium.DeleteAll("page") }}, + {name: "Rename", run: func() error { return medium.Rename("old", "new") }}, + {name: "List", run: func() error { _, err := medium.List("page"); return err }}, + {name: "Stat", run: func() error { _, err := medium.Stat("page"); return err }}, + {name: "Open", run: func() error { _, err := medium.Open("page"); return err }}, + {name: "Create", run: func() error { _, err := medium.Create("page"); return err }}, + {name: "Append", run: func() error { _, err := medium.Append("page"); return err }}, + {name: "ReadStream", run: func() error { _, err := medium.ReadStream("page"); return err }}, + {name: "WriteStream", run: func() error { _, err := medium.WriteStream("page"); return err }}, + } + + for _, check := range checks { + t.Run(check.name, func(t *testing.T) { + assert.True(t, errors.Is(check.run(), ErrNotImplemented)) + }) + } + + assert.False(t, medium.IsFile("page")) + assert.False(t, medium.Exists("page")) + assert.False(t, medium.IsDir("page")) +} + +func TestPWAMedium_Actions_ReturnErrNotImplemented(t *testing.T) { + _, ok := FactoryFor(Scheme) + require.True(t, ok) + + c := core.New() + RegisterActions(c) + + for _, action := range []string{ActionScrape, ActionRead, ActionList, ActionWrite} { + require.True(t, c.Action(action).Exists()) + result := c.Action(action).Run(context.Background(), core.NewOptions( + core.Option{Key: "url", Value: "https://example.test"}, + core.Option{Key: "path", Value: "page"}, + )) + require.False(t, result.OK) + assert.True(t, errors.Is(result.Value.(error), ErrNotImplemented)) + } +} diff --git a/pkg/medium/pwa/register.go b/pkg/medium/pwa/register.go new file mode 100644 index 0000000..96913de --- /dev/null +++ b/pkg/medium/pwa/register.go @@ -0,0 +1,88 @@ +package pwa + +import ( + "context" + + core "dappco.re/go/core" +) + +const ( + Scheme = "pwa" + ActionScrape = "core.io.pwa.scrape" + ActionRead = "core.io.pwa.read" + ActionList = "core.io.pwa.list" + ActionWrite = "core.io.pwa.write" +) + +type Factory func(Options) (*Medium, error) + +var Registry = core.NewRegistry[Factory]() + +func init() { + RegisterFactory(Scheme, New) +} + +func RegisterFactory(name string, factory Factory) core.Result { + return Registry.Set(name, factory) +} + +func FactoryFor(name string) (Factory, bool) { + result := Registry.Get(name) + if !result.OK { + return nil, false + } + factory, ok := result.Value.(Factory) + return factory, ok +} + +func RegisterActions(c *core.Core) { + if c == nil { + return + } + c.Action(ActionScrape, scrapeAction) + c.Action(ActionRead, readAction) + c.Action(ActionList, listAction) + c.Action(ActionWrite, writeAction) +} + +func mediumFromOptions(opts core.Options) *Medium { + if medium, ok := opts.Get("medium").Value.(*Medium); ok { + return medium + } + return &Medium{url: opts.String("url")} +} + +func readAction(_ context.Context, opts core.Options) core.Result { + content, err := mediumFromOptions(opts).Read(opts.String("path")) + if err != nil { + return core.Result{}.New(err) + } + return core.Result{Value: content, OK: true} +} + +func scrapeAction(_ context.Context, opts core.Options) core.Result { + target := opts.String("url") + if target == "" { + target = opts.String("path") + } + content, err := mediumFromOptions(opts).Read(target) + if err != nil { + return core.Result{}.New(err) + } + return core.Result{Value: content, OK: true} +} + +func listAction(_ context.Context, opts core.Options) core.Result { + entries, err := mediumFromOptions(opts).List(opts.String("path")) + if err != nil { + return core.Result{}.New(err) + } + return core.Result{Value: entries, OK: true} +} + +func writeAction(_ context.Context, opts core.Options) core.Result { + if err := mediumFromOptions(opts).Write(opts.String("path"), opts.String("content")); err != nil { + return core.Result{}.New(err) + } + return core.Result{OK: true} +} diff --git a/pkg/medium/sftp/register.go b/pkg/medium/sftp/register.go new file mode 100644 index 0000000..5bcf24c --- /dev/null +++ b/pkg/medium/sftp/register.go @@ -0,0 +1,66 @@ +package sftp + +import ( + "context" + "io/fs" + + core "dappco.re/go/core" +) + +const ( + Scheme = "sftp" + ActionRead = "core.io.sftp.read" + ActionWrite = "core.io.sftp.write" +) + +type Factory func(Options) (*Medium, error) + +var Registry = core.NewRegistry[Factory]() + +func init() { + RegisterFactory(Scheme, New) +} + +func RegisterFactory(name string, factory Factory) core.Result { + return Registry.Set(name, factory) +} + +func FactoryFor(name string) (Factory, bool) { + result := Registry.Get(name) + if !result.OK { + return nil, false + } + factory, ok := result.Value.(Factory) + return factory, ok +} + +func RegisterActions(c *core.Core) { + if c == nil { + return + } + c.Action(ActionRead, readAction) + c.Action(ActionWrite, writeAction) +} + +func readAction(_ context.Context, opts core.Options) core.Result { + medium, ok := opts.Get("medium").Value.(*Medium) + if !ok { + return core.Result{}.New(core.E("sftp.readAction", "medium is required", fs.ErrInvalid)) + } + content, err := medium.Read(opts.String("path")) + if err != nil { + return core.Result{}.New(err) + } + return core.Result{Value: content, OK: true} +} + +func writeAction(_ context.Context, opts core.Options) core.Result { + medium, ok := opts.Get("medium").Value.(*Medium) + if !ok { + return core.Result{}.New(core.E("sftp.writeAction", "medium is required", fs.ErrInvalid)) + } + if err := medium.Write(opts.String("path"), opts.String("content")); err != nil { + return core.Result{}.New(err) + } + return core.Result{OK: true} +} diff --git a/pkg/medium/sftp/sftp.go b/pkg/medium/sftp/sftp.go new file mode 100644 index 0000000..4af59b1 --- /dev/null +++ b/pkg/medium/sftp/sftp.go @@ -0,0 +1,414 @@ +package sftp + +import ( + "cmp" + goio "io" + "io/fs" + "os" + "path" + "slices" + "strings" + + core "dappco.re/go/core" + coreio "dappco.re/go/io" + pkgsftp "github.com/pkg/sftp" + "golang.org/x/crypto/ssh" +) + +const ( + opNew = "sftp.New" + opRead = "sftp.Read" + opWriteMode = "sftp.WriteMode" + opRename = "sftp.Rename" + opCreate = "sftp.Create" + opAppend = "sftp.Append" + opOpen = "sftp.Open" + + errOpenFailed = "open failed: " + errCreateParentFailed = "create parent failed: " +) + +// Medium is an SFTP-backed implementation of coreio.Medium. +type Medium struct { + client *pkgsftp.Client + sshClient *ssh.Client + root string + ownsClient bool + ownsSSHConn bool +} + +var _ coreio.Medium = (*Medium)(nil) + +// Options configures an SFTP Medium. +type Options struct { + Client *pkgsftp.Client + + SSHClient *ssh.Client + Address string + User string + Password string + PrivateKey []byte + + Config *ssh.ClientConfig + HostKeyCallback ssh.HostKeyCallback + Root string +} + +// New creates an SFTP Medium. Tests and callers that already manage transport +// state can inject Client directly; otherwise New dials Address using SSH. +func New(options Options) (*Medium, error) { + root := normaliseRoot(options.Root) + if options.Client != nil { + return &Medium{client: options.Client, root: root}, nil + } + + if options.SSHClient != nil { + client, err := pkgsftp.NewClient(options.SSHClient) + if err != nil { + return nil, core.E(opNew, "failed to create SFTP client", err) + } + return &Medium{client: client, sshClient: options.SSHClient, root: root, ownsClient: true}, nil + } + + config, err := sshConfig(options) + if err != nil { + return nil, err + } + if options.Address == "" { + return nil, core.E(opNew, "address is required", fs.ErrInvalid) + } + + sshClient, err := ssh.Dial("tcp", options.Address, config) + if err != nil { + return nil, core.E(opNew, "failed to dial SSH server", err) + } + + client, err := pkgsftp.NewClient(sshClient) + if err != nil { + sshClient.Close() + return nil, core.E(opNew, "failed to create SFTP client", err) + } + + return &Medium{ + client: client, + sshClient: sshClient, + root: root, + ownsClient: true, + ownsSSHConn: true, + }, nil +} + +func sshConfig(options Options) (*ssh.ClientConfig, error) { + if options.Config != nil { + return options.Config, nil + } + if options.User == "" { + return nil, core.E(opNew, "user is required", fs.ErrInvalid) + } + if options.HostKeyCallback == nil { + return nil, core.E(opNew, "host key callback is required", fs.ErrInvalid) + } + + var auth []ssh.AuthMethod + if options.Password != "" { + auth = append(auth, ssh.Password(options.Password)) + } + if len(options.PrivateKey) > 0 { + signer, err := ssh.ParsePrivateKey(options.PrivateKey) + if err != nil { + return nil, core.E(opNew, "failed to parse private key", err) + } + auth = append(auth, ssh.PublicKeys(signer)) + } + if len(auth) == 0 { + return nil, core.E(opNew, "password or private key is required", fs.ErrInvalid) + } + + return &ssh.ClientConfig{ + User: options.User, + Auth: auth, + HostKeyCallback: options.HostKeyCallback, + }, nil +} + +func normaliseRoot(root string) string { + clean := path.Clean("/" + root) + if clean == "." || clean == "" { + return "/" + } + return clean +} + +func cleanRelative(filePath string) string { + clean := path.Clean("/" + strings.ReplaceAll(filePath, "\\", "/")) + if clean == "/" { + return "" + } + return strings.TrimPrefix(clean, "/") +} + +func (medium *Medium) remotePath(filePath string) string { + relative := cleanRelative(filePath) + if relative == "" { + return medium.root + } + if medium.root == "/" { + return "/" + relative + } + return path.Join(medium.root, relative) +} + +func (medium *Medium) requiredRemotePath(operation, filePath string) (string, error) { + if cleanRelative(filePath) == "" { + return "", core.E(operation, "path is required", fs.ErrInvalid) + } + return medium.remotePath(filePath), nil +} + +func (medium *Medium) ensureParent(remotePath string) error { + parent := path.Dir(remotePath) + if parent == "." || parent == "/" { + return nil + } + return medium.client.MkdirAll(parent) +} + +// Close closes clients created by New. Injected clients remain caller-owned. +func (medium *Medium) Close() error { + var err error + if medium.ownsClient && medium.client != nil { + err = medium.client.Close() + } + if medium.ownsSSHConn && medium.sshClient != nil { + if closeErr := medium.sshClient.Close(); err == nil { + err = closeErr + } + } + return err +} + +// Read reads a remote file into a string. +func (medium *Medium) Read(filePath string) (string, error) { + remotePath, err := medium.requiredRemotePath(opRead, filePath) + if err != nil { + return "", err + } + file, err := medium.client.Open(remotePath) + if err != nil { + return "", core.E(opRead, core.Concat(errOpenFailed, remotePath), err) + } + defer file.Close() + + data, err := goio.ReadAll(file) + if err != nil { + return "", core.E(opRead, core.Concat("read failed: ", remotePath), err) + } + return string(data), nil +} + +// Write writes a remote file using the default file mode. +func (medium *Medium) Write(filePath, content string) error { + return medium.WriteMode(filePath, content, 0644) +} + +// WriteMode writes a remote file and applies POSIX permissions when supported +// by the SFTP server. +func (medium *Medium) WriteMode(filePath, content string, mode fs.FileMode) error { + remotePath, err := medium.requiredRemotePath(opWriteMode, filePath) + if err != nil { + return err + } + if err := medium.ensureParent(remotePath); err != nil { + return core.E(opWriteMode, core.Concat(errCreateParentFailed, remotePath), err) + } + + file, err := medium.client.OpenFile(remotePath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC) + if err != nil { + return core.E(opWriteMode, core.Concat(errOpenFailed, remotePath), err) + } + if _, err := file.Write([]byte(content)); err != nil { + file.Close() + return core.E(opWriteMode, core.Concat("write failed: ", remotePath), err) + } + if closeErr := file.Close(); closeErr != nil { + return core.E(opWriteMode, core.Concat("close failed: ", remotePath), closeErr) + } + if mode != 0 { + if err := medium.client.Chmod(remotePath, os.FileMode(mode)); err != nil { + return core.E(opWriteMode, core.Concat("chmod failed: ", remotePath), err) + } + } + return nil +} + +// EnsureDir creates a remote directory and any missing parents. +func (medium *Medium) EnsureDir(filePath string) error { + remotePath := medium.remotePath(filePath) + if remotePath == medium.root { + return nil + } + if err := medium.client.MkdirAll(remotePath); err != nil { + return core.E("sftp.EnsureDir", core.Concat("mkdir failed: ", remotePath), err) + } + return nil +} + +// IsFile reports whether filePath exists and is not a directory. +func (medium *Medium) IsFile(filePath string) bool { + if cleanRelative(filePath) == "" { + return false + } + info, err := medium.client.Stat(medium.remotePath(filePath)) + return err == nil && !info.IsDir() +} + +// Delete removes a remote file or empty directory. +func (medium *Medium) Delete(filePath string) error { + remotePath, err := medium.requiredRemotePath("sftp.Delete", filePath) + if err != nil { + return err + } + if err := medium.client.Remove(remotePath); err != nil { + return core.E("sftp.Delete", core.Concat("remove failed: ", remotePath), err) + } + return nil +} + +// DeleteAll removes a remote file or directory tree. +func (medium *Medium) DeleteAll(filePath string) error { + remotePath, err := medium.requiredRemotePath("sftp.DeleteAll", filePath) + if err != nil { + return err + } + if err := medium.client.RemoveAll(remotePath); err != nil { + return core.E("sftp.DeleteAll", core.Concat("remove all failed: ", remotePath), err) + } + return nil +} + +// Rename renames a remote path. +func (medium *Medium) Rename(oldPath, newPath string) error { + oldRemotePath, err := medium.requiredRemotePath(opRename, oldPath) + if err != nil { + return err + } + newRemotePath, err := medium.requiredRemotePath(opRename, newPath) + if err != nil { + return err + } + if err := medium.ensureParent(newRemotePath); err != nil { + return core.E(opRename, core.Concat(errCreateParentFailed, newRemotePath), err) + } + if err := medium.client.Rename(oldRemotePath, newRemotePath); err != nil { + return core.E(opRename, core.Concat("rename failed: ", oldRemotePath), err) + } + return nil +} + +// List returns the immediate children under a remote directory. +func (medium *Medium) List(filePath string) ([]fs.DirEntry, error) { + remotePath := medium.remotePath(filePath) + infos, err := medium.client.ReadDir(remotePath) + if err != nil { + return nil, core.E("sftp.List", core.Concat("read dir failed: ", remotePath), err) + } + + entries := make([]fs.DirEntry, 0, len(infos)) + for _, info := range infos { + entries = append(entries, fs.FileInfoToDirEntry(info)) + } + slices.SortFunc(entries, func(a, b fs.DirEntry) int { + return cmp.Compare(a.Name(), b.Name()) + }) + return entries, nil +} + +// Stat returns metadata for a remote path. +func (medium *Medium) Stat(filePath string) (fs.FileInfo, error) { + remotePath, err := medium.requiredRemotePath("sftp.Stat", filePath) + if err != nil { + return nil, err + } + info, err := medium.client.Stat(remotePath) + if err != nil { + return nil, core.E("sftp.Stat", core.Concat("stat failed: ", remotePath), err) + } + return info, nil +} + +// Open opens a remote file for reading. +func (medium *Medium) Open(filePath string) (fs.File, error) { + remotePath, err := medium.requiredRemotePath(opOpen, filePath) + if err != nil { + return nil, err + } + file, err := medium.client.Open(remotePath) + if err != nil { + return nil, core.E(opOpen, core.Concat(errOpenFailed, remotePath), err) + } + return file, nil +} + +// Create opens a remote file for replacement. +func (medium *Medium) Create(filePath string) (goio.WriteCloser, error) { + remotePath, err := medium.requiredRemotePath(opCreate, filePath) + if err != nil { + return nil, err + } + if err := medium.ensureParent(remotePath); err != nil { + return nil, core.E(opCreate, core.Concat(errCreateParentFailed, remotePath), err) + } + file, err := medium.client.OpenFile(remotePath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC) + if err != nil { + return nil, core.E(opCreate, core.Concat(errOpenFailed, remotePath), err) + } + return file, nil +} + +// Append opens a remote file for appending, creating it when missing. +func (medium *Medium) Append(filePath string) (goio.WriteCloser, error) { + remotePath, err := medium.requiredRemotePath(opAppend, filePath) + if err != nil { + return nil, err + } + if err := medium.ensureParent(remotePath); err != nil { + return nil, core.E(opAppend, core.Concat(errCreateParentFailed, remotePath), err) + } + file, err := medium.client.OpenFile(remotePath, os.O_WRONLY|os.O_CREATE|os.O_APPEND) + if err != nil { + return nil, core.E(opAppend, core.Concat(errOpenFailed, remotePath), err) + } + return file, nil +} + +// ReadStream opens a remote file as an io.ReadCloser. +func (medium *Medium) ReadStream(filePath string) (goio.ReadCloser, error) { + file, err := medium.Open(filePath) + if err != nil { + return nil, err + } + return file, nil +} + +// WriteStream opens a remote file as an io.WriteCloser. +func (medium *Medium) WriteStream(filePath string) (goio.WriteCloser, error) { + return medium.Create(filePath) +} + +// Exists reports whether a remote path exists. +func (medium *Medium) Exists(filePath string) bool { + if cleanRelative(filePath) == "" { + return false + } + _, err := medium.client.Stat(medium.remotePath(filePath)) + return err == nil +} + +// IsDir reports whether a remote path exists and is a directory. +func (medium *Medium) IsDir(filePath string) bool { + if cleanRelative(filePath) == "" { + return false + } + info, err := medium.client.Stat(medium.remotePath(filePath)) + return err == nil && info.IsDir() +} diff --git a/pkg/medium/sftp/sftp_test.go b/pkg/medium/sftp/sftp_test.go new file mode 100644 index 0000000..450f56c --- /dev/null +++ b/pkg/medium/sftp/sftp_test.go @@ -0,0 +1,135 @@ +package sftp + +import ( + "net" + "testing" + "time" + + pkgsftp "github.com/pkg/sftp" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func newSFTPTestMedium(t *testing.T) *Medium { + t.Helper() + + serverConn, clientConn := net.Pipe() + server := pkgsftp.NewRequestServer(serverConn, pkgsftp.InMemHandler()) + done := make(chan error, 1) + go func() { + done <- server.Serve() + }() + + client, err := pkgsftp.NewClientPipe(clientConn, clientConn) + require.NoError(t, err) + + medium, err := New(Options{Client: client}) + require.NoError(t, err) + + t.Cleanup(func() { + _ = client.Close() + _ = clientConn.Close() + _ = serverConn.Close() + select { + case <-done: + case <-time.After(time.Second): + } + }) + + return medium +} + +func TestSFTPMedium_Read_Good(t *testing.T) { + medium := newSFTPTestMedium(t) + + require.NoError(t, medium.Write("notes/read.txt", "hello sftp")) + + content, err := medium.Read("notes/read.txt") + require.NoError(t, err) + assert.Equal(t, "hello sftp", content) +} + +func TestSFTPMedium_Read_Bad(t *testing.T) { + medium := newSFTPTestMedium(t) + + _, err := medium.Read("missing.txt") + + assert.Error(t, err) +} + +func TestSFTPMedium_Read_Ugly(t *testing.T) { + medium := newSFTPTestMedium(t) + + require.NoError(t, medium.Write("safe/file.txt", "normalised")) + + content, err := medium.Read("/safe/../safe/./file.txt") + require.NoError(t, err) + assert.Equal(t, "normalised", content) +} + +func TestSFTPMedium_Write_Good(t *testing.T) { + medium := newSFTPTestMedium(t) + + err := medium.Write("nested/path/file.txt", "content") + require.NoError(t, err) + + assert.True(t, medium.IsFile("nested/path/file.txt")) + content, err := medium.Read("nested/path/file.txt") + require.NoError(t, err) + assert.Equal(t, "content", content) +} + +func TestSFTPMedium_Write_Bad(t *testing.T) { + medium := newSFTPTestMedium(t) + + err := medium.Write("", "content") + + assert.Error(t, err) +} + +func TestSFTPMedium_Write_Ugly(t *testing.T) { + medium := newSFTPTestMedium(t) + + require.NoError(t, medium.Write("../escaped.txt", "contained")) + + content, err := medium.Read("escaped.txt") + require.NoError(t, err) + assert.Equal(t, "contained", content) +} + +func TestSFTPMedium_List_Good(t *testing.T) { + medium := newSFTPTestMedium(t) + + require.NoError(t, medium.Write("dir/b.txt", "b")) + require.NoError(t, medium.Write("dir/a.txt", "a")) + require.NoError(t, medium.EnsureDir("dir/sub")) + + entries, err := medium.List("dir") + require.NoError(t, err) + + require.Len(t, entries, 3) + assert.Equal(t, "a.txt", entries[0].Name()) + assert.Equal(t, "b.txt", entries[1].Name()) + assert.Equal(t, "sub", entries[2].Name()) + assert.True(t, entries[2].IsDir()) +} + +func TestSFTPMedium_List_Bad(t *testing.T) { + medium := newSFTPTestMedium(t) + + _, err := medium.List("missing") + + assert.Error(t, err) +} + +func TestSFTPMedium_List_Ugly(t *testing.T) { + medium := newSFTPTestMedium(t) + + require.NoError(t, medium.Write("dir/file.txt", "content")) + + entries, err := medium.List("//dir/../dir/.") + require.NoError(t, err) + + require.Len(t, entries, 1) + assert.Equal(t, "file.txt", entries[0].Name()) +} diff --git a/pkg/medium/webdav/register.go b/pkg/medium/webdav/register.go new file mode 100644 index 0000000..dffaa33 --- /dev/null +++ b/pkg/medium/webdav/register.go @@ -0,0 +1,66 @@ +package webdav + +import ( + "context" + "io/fs" + + core "dappco.re/go/core" +) + +const ( + Scheme = "webdav" + ActionRead = "core.io.webdav.read" + ActionWrite = "core.io.webdav.write" +) + +type Factory func(Options) (*Medium, error) + +var Registry = core.NewRegistry[Factory]() + +func init() { + RegisterFactory(Scheme, New) +} + +func RegisterFactory(name string, factory Factory) core.Result { + return Registry.Set(name, factory) +} + +func FactoryFor(name string) (Factory, bool) { + result := Registry.Get(name) + if !result.OK { + return nil, false + } + factory, ok := result.Value.(Factory) + return factory, ok +} + +func RegisterActions(c *core.Core) { + if c == nil { + return + } + c.Action(ActionRead, readAction) + c.Action(ActionWrite, writeAction) +} + +func readAction(_ context.Context, opts core.Options) core.Result { + medium, ok := opts.Get("medium").Value.(*Medium) + if !ok { + return core.Result{}.New(core.E("webdav.readAction", "medium is required", fs.ErrInvalid)) + } + content, err := medium.Read(opts.String("path")) + if err != nil { + return core.Result{}.New(err) + } + return core.Result{Value: content, OK: true} +} + +func writeAction(_ context.Context, opts core.Options) core.Result { + medium, ok := opts.Get("medium").Value.(*Medium) + if !ok { + return core.Result{}.New(core.E("webdav.writeAction", "medium is required", fs.ErrInvalid)) + } + if err := medium.Write(opts.String("path"), opts.String("content")); err != nil { + return core.Result{}.New(err) + } + return core.Result{OK: true} +} diff --git a/pkg/medium/webdav/webdav.go b/pkg/medium/webdav/webdav.go new file mode 100644 index 0000000..bb31c69 --- /dev/null +++ b/pkg/medium/webdav/webdav.go @@ -0,0 +1,640 @@ +package webdav + +import ( + "bytes" + "cmp" + "encoding/xml" + "errors" + "fmt" + goio "io" + "io/fs" + "net/http" + "net/url" + "path" + "slices" + "strconv" + "strings" + "time" + + core "dappco.re/go/core" + coreio "dappco.re/go/io" +) + +const ( + defaultHTTPTimeout = 30 * time.Second + + opNew = "webdav.New" + opRead = "webdav.Read" + opWriteMode = "webdav.WriteMode" + opEnsureDir = "webdav.EnsureDir" + opDelete = "webdav.Delete" + opDeleteAll = "webdav.DeleteAll" + opRename = "webdav.Rename" + opPropfind = "webdav.propfind" + opReadStream = "webdav.ReadStream" +) + +const propfindBody = ` + + + + + + + +` + +// Medium is a WebDAV-backed implementation of coreio.Medium. +type Medium struct { + baseURL *url.URL + client *http.Client + username string + password string + headers http.Header +} + +var _ coreio.Medium = (*Medium)(nil) + +// Options configures a WebDAV Medium. +type Options struct { + BaseURL string + Client *http.Client + Username string + Password string + Header http.Header +} + +// New creates a WebDAV Medium. +func New(options Options) (*Medium, error) { + if options.BaseURL == "" { + return nil, core.E(opNew, "base URL is required", fs.ErrInvalid) + } + + baseURL, err := url.Parse(options.BaseURL) + if err != nil { + return nil, core.E(opNew, "base URL is invalid", err) + } + if baseURL.Scheme == "" || baseURL.Host == "" { + return nil, core.E(opNew, "base URL must include scheme and host", fs.ErrInvalid) + } + + client := options.Client + if client == nil { + client = &http.Client{Timeout: defaultHTTPTimeout} + } + + return &Medium{ + baseURL: baseURL, + client: client, + username: options.Username, + password: options.Password, + headers: options.Header.Clone(), + }, nil +} + +func cleanRelative(filePath string) string { + clean := path.Clean("/" + strings.ReplaceAll(filePath, "\\", "/")) + if clean == "/" { + return "" + } + return strings.TrimPrefix(clean, "/") +} + +func (medium *Medium) resourceURL(filePath string) string { + u := *medium.baseURL + basePath := strings.TrimSuffix(u.Path, "/") + relativePath := cleanRelative(filePath) + if relativePath == "" { + if basePath == "" { + u.Path = "/" + u.RawPath = "" + } + return u.String() + } + if basePath == "" { + u.Path = "/" + relativePath + u.RawPath = "" + return u.String() + } + u.Path = basePath + "/" + relativePath + u.RawPath = "" + return u.String() +} + +func (medium *Medium) requiredResourceURL(operation, filePath string) (string, error) { + if cleanRelative(filePath) == "" { + return "", core.E(operation, "path is required", fs.ErrInvalid) + } + return medium.resourceURL(filePath), nil +} + +func (medium *Medium) newRequest(method, filePath string, body goio.Reader) (*http.Request, error) { + request, err := http.NewRequest(method, medium.resourceURL(filePath), body) + if err != nil { + return nil, err + } + for key, values := range medium.headers { + for _, value := range values { + request.Header.Add(key, value) + } + } + if medium.username != "" || medium.password != "" { + request.SetBasicAuth(medium.username, medium.password) + } + return request, nil +} + +func (medium *Medium) do(method, filePath string, body goio.Reader) (*http.Response, error) { + request, err := medium.newRequest(method, filePath, body) + if err != nil { + return nil, err + } + return medium.client.Do(request) +} + +func statusError(operation, resource string, statusCode int) error { + switch statusCode { + case http.StatusNotFound: + return core.E(operation, core.Concat("not found: ", resource), fs.ErrNotExist) + case http.StatusConflict: + return core.E(operation, core.Concat("conflict: ", resource), fs.ErrInvalid) + case http.StatusMethodNotAllowed: + return core.E(operation, core.Concat("method not allowed: ", resource), fs.ErrExist) + default: + return core.E(operation, fmt.Sprintf("unexpected HTTP status %d for %s", statusCode, resource), nil) + } +} + +func statusOK(statusCode int, allowed ...int) bool { + for _, code := range allowed { + if statusCode == code { + return true + } + } + return false +} + +func (medium *Medium) putBytes(filePath string, data []byte) error { + resource, err := medium.requiredResourceURL(opWriteMode, filePath) + if err != nil { + return err + } + if err := medium.ensureParent(filePath); err != nil { + return err + } + + response, err := medium.do(http.MethodPut, filePath, bytes.NewReader(data)) + if err != nil { + return core.E(opWriteMode, core.Concat("PUT failed: ", resource), err) + } + defer response.Body.Close() + if !statusOK(response.StatusCode, http.StatusOK, http.StatusCreated, http.StatusNoContent) { + return statusError(opWriteMode, resource, response.StatusCode) + } + return nil +} + +func (medium *Medium) ensureParent(filePath string) error { + relative := cleanRelative(filePath) + parent := path.Dir(relative) + if parent == "." || parent == "" { + return nil + } + return medium.EnsureDir(parent) +} + +// Read reads a WebDAV resource into a string. +func (medium *Medium) Read(filePath string) (string, error) { + resource, err := medium.requiredResourceURL(opRead, filePath) + if err != nil { + return "", err + } + response, err := medium.do(http.MethodGet, filePath, nil) + if err != nil { + return "", core.E(opRead, core.Concat("GET failed: ", resource), err) + } + defer response.Body.Close() + if !statusOK(response.StatusCode, http.StatusOK) { + return "", statusError(opRead, resource, response.StatusCode) + } + data, err := goio.ReadAll(response.Body) + if err != nil { + return "", core.E(opRead, core.Concat("read body failed: ", resource), err) + } + return string(data), nil +} + +// Write writes a WebDAV resource using the default file mode. +func (medium *Medium) Write(filePath, content string) error { + return medium.WriteMode(filePath, content, 0644) +} + +// WriteMode writes a WebDAV resource. The mode is intentionally ignored +// because WebDAV has no portable POSIX permission model. +func (medium *Medium) WriteMode(filePath, content string, mode fs.FileMode) error { + return medium.putBytes(filePath, []byte(content)) +} + +// EnsureDir creates a WebDAV collection and any missing parent collections. +func (medium *Medium) EnsureDir(filePath string) error { + relative := cleanRelative(filePath) + if relative == "" { + return nil + } + + current := "" + for _, part := range strings.Split(relative, "/") { + if current == "" { + current = part + } else { + current = path.Join(current, part) + } + if err := medium.mkcol(current); err != nil { + return err + } + } + return nil +} + +func (medium *Medium) mkcol(filePath string) error { + resource := medium.resourceURL(filePath) + response, err := medium.do("MKCOL", filePath, nil) + if err != nil { + return core.E(opEnsureDir, core.Concat("MKCOL failed: ", resource), err) + } + defer response.Body.Close() + switch response.StatusCode { + case http.StatusCreated, http.StatusOK, http.StatusNoContent: + return nil + case http.StatusMethodNotAllowed: + if medium.IsDir(filePath) { + return nil + } + return statusError(opEnsureDir, resource, response.StatusCode) + default: + return statusError(opEnsureDir, resource, response.StatusCode) + } +} + +// IsFile reports whether filePath exists and is not a collection. +func (medium *Medium) IsFile(filePath string) bool { + if cleanRelative(filePath) == "" { + return false + } + info, err := medium.Stat(filePath) + return err == nil && !info.IsDir() +} + +// Delete removes a file or empty collection. +func (medium *Medium) Delete(filePath string) error { + resource, err := medium.requiredResourceURL(opDelete, filePath) + if err != nil { + return err + } + info, err := medium.Stat(filePath) + if err != nil { + return err + } + if info.IsDir() { + entries, err := medium.List(filePath) + if err != nil { + return err + } + if len(entries) > 0 { + return core.E(opDelete, core.Concat("collection not empty: ", resource), fs.ErrExist) + } + } + + response, err := medium.do(http.MethodDelete, filePath, nil) + if err != nil { + return core.E(opDelete, core.Concat("DELETE failed: ", resource), err) + } + defer response.Body.Close() + if !statusOK(response.StatusCode, http.StatusOK, http.StatusAccepted, http.StatusNoContent) { + return statusError(opDelete, resource, response.StatusCode) + } + return nil +} + +// DeleteAll removes a file or collection tree. +func (medium *Medium) DeleteAll(filePath string) error { + resource, err := medium.requiredResourceURL(opDeleteAll, filePath) + if err != nil { + return err + } + response, err := medium.do(http.MethodDelete, filePath, nil) + if err != nil { + return core.E(opDeleteAll, core.Concat("DELETE failed: ", resource), err) + } + defer response.Body.Close() + if !statusOK(response.StatusCode, http.StatusOK, http.StatusAccepted, http.StatusNoContent) { + return statusError(opDeleteAll, resource, response.StatusCode) + } + return nil +} + +// Rename moves a WebDAV resource to a new path. +func (medium *Medium) Rename(oldPath, newPath string) error { + source, err := medium.requiredResourceURL(opRename, oldPath) + if err != nil { + return err + } + destination, err := medium.requiredResourceURL(opRename, newPath) + if err != nil { + return err + } + if err := medium.ensureParent(newPath); err != nil { + return err + } + + request, err := medium.newRequest("MOVE", oldPath, nil) + if err != nil { + return core.E(opRename, "failed to build MOVE request", err) + } + request.Header.Set("Destination", destination) + request.Header.Set("Overwrite", "T") + + response, err := medium.client.Do(request) + if err != nil { + return core.E(opRename, core.Concat("MOVE failed: ", source), err) + } + defer response.Body.Close() + if !statusOK(response.StatusCode, http.StatusCreated, http.StatusNoContent) { + return statusError(opRename, source, response.StatusCode) + } + return nil +} + +// List returns the immediate children under a WebDAV collection. +func (medium *Medium) List(filePath string) ([]fs.DirEntry, error) { + responses, requestPath, err := medium.propfind(filePath, "1") + if err != nil { + return nil, err + } + + var entries []fs.DirEntry + for _, response := range responses { + hrefPath := hrefToPath(response.Href) + if sameURLPath(hrefPath, requestPath) { + continue + } + info := response.fileInfo(hrefPath) + entries = append(entries, fs.FileInfoToDirEntry(info)) + } + slices.SortFunc(entries, func(a, b fs.DirEntry) int { + return cmp.Compare(a.Name(), b.Name()) + }) + return entries, nil +} + +// Stat returns metadata for a WebDAV resource. +func (medium *Medium) Stat(filePath string) (fs.FileInfo, error) { + resource, err := medium.requiredResourceURL("webdav.Stat", filePath) + if err != nil { + return nil, err + } + responses, requestPath, err := medium.propfind(filePath, "0") + if err != nil { + return nil, err + } + if len(responses) == 0 { + return nil, core.E("webdav.Stat", core.Concat("not found: ", resource), fs.ErrNotExist) + } + return responses[0].fileInfo(requestPath), nil +} + +func (medium *Medium) propfind(filePath, depth string) ([]davResponse, string, error) { + resource := medium.resourceURL(filePath) + request, err := medium.newRequest("PROPFIND", filePath, strings.NewReader(propfindBody)) + if err != nil { + return nil, "", core.E(opPropfind, "failed to build PROPFIND request", err) + } + request.Header.Set("Depth", depth) + request.Header.Set("Content-Type", "application/xml; charset=utf-8") + + response, err := medium.client.Do(request) + if err != nil { + return nil, "", core.E(opPropfind, core.Concat("PROPFIND failed: ", resource), err) + } + defer response.Body.Close() + if response.StatusCode != http.StatusMultiStatus { + return nil, "", statusError(opPropfind, resource, response.StatusCode) + } + + var multistatus davMultiStatus + if err := xml.NewDecoder(response.Body).Decode(&multistatus); err != nil { + return nil, "", core.E(opPropfind, core.Concat("decode failed: ", resource), err) + } + return multistatus.Responses, hrefToPath(resource), nil +} + +// Open opens a WebDAV resource as an fs.File. +func (medium *Medium) Open(filePath string) (fs.File, error) { + content, err := medium.Read(filePath) + if err != nil { + return nil, err + } + info, err := medium.Stat(filePath) + if err != nil { + return nil, err + } + if info.IsDir() { + return nil, core.E("webdav.Open", core.Concat("path is a collection: ", filePath), fs.ErrInvalid) + } + return &webdavFile{ + name: info.Name(), + content: []byte(content), + mode: info.Mode(), + modTime: info.ModTime(), + }, nil +} + +// Create opens a buffered WebDAV writer that replaces the resource on close. +func (medium *Medium) Create(filePath string) (goio.WriteCloser, error) { + if _, err := medium.requiredResourceURL("webdav.Create", filePath); err != nil { + return nil, err + } + return &webdavWriteCloser{medium: medium, path: filePath, mode: 0644}, nil +} + +// Append opens a buffered WebDAV writer that appends locally then replaces the +// resource on close. +func (medium *Medium) Append(filePath string) (goio.WriteCloser, error) { + if _, err := medium.requiredResourceURL("webdav.Append", filePath); err != nil { + return nil, err + } + + var existing []byte + content, err := medium.Read(filePath) + if err == nil { + existing = []byte(content) + } else if !errors.Is(err, fs.ErrNotExist) { + return nil, core.E("webdav.Append", core.Concat("read existing failed: ", filePath), err) + } + + return &webdavWriteCloser{medium: medium, path: filePath, data: existing, mode: 0644}, nil +} + +// ReadStream opens a WebDAV resource as an io.ReadCloser. +func (medium *Medium) ReadStream(filePath string) (goio.ReadCloser, error) { + resource, err := medium.requiredResourceURL(opReadStream, filePath) + if err != nil { + return nil, err + } + response, err := medium.do(http.MethodGet, filePath, nil) + if err != nil { + return nil, core.E(opReadStream, core.Concat("GET failed: ", resource), err) + } + if !statusOK(response.StatusCode, http.StatusOK) { + response.Body.Close() + return nil, statusError(opReadStream, resource, response.StatusCode) + } + return response.Body, nil +} + +// WriteStream opens a buffered WebDAV writer that replaces the resource on close. +func (medium *Medium) WriteStream(filePath string) (goio.WriteCloser, error) { + return medium.Create(filePath) +} + +// Exists reports whether a WebDAV resource exists. +func (medium *Medium) Exists(filePath string) bool { + if cleanRelative(filePath) == "" { + return false + } + _, err := medium.Stat(filePath) + return err == nil +} + +// IsDir reports whether a WebDAV resource exists and is a collection. +func (medium *Medium) IsDir(filePath string) bool { + if cleanRelative(filePath) == "" { + return false + } + info, err := medium.Stat(filePath) + return err == nil && info.IsDir() +} + +type davMultiStatus struct { + Responses []davResponse `xml:"response"` +} + +type davResponse struct { + Href string `xml:"href"` + PropStats []davPropStat `xml:"propstat"` +} + +type davPropStat struct { + Prop davProp `xml:"prop"` + Status string `xml:"status"` +} + +type davProp struct { + DisplayName string `xml:"displayname"` + ContentLength string `xml:"getcontentlength"` + LastModified string `xml:"getlastmodified"` + ResourceType davResourceType `xml:"resourcetype"` +} + +type davResourceType struct { + Collection *struct{} `xml:"collection"` +} + +func (response davResponse) prop() davProp { + for _, propstat := range response.PropStats { + if propstat.Status == "" || strings.Contains(propstat.Status, " 200 ") { + return propstat.Prop + } + } + if len(response.PropStats) > 0 { + return response.PropStats[0].Prop + } + return davProp{} +} + +func (response davResponse) fileInfo(fallbackPath string) fs.FileInfo { + prop := response.prop() + isDir := prop.ResourceType.Collection != nil + size, _ := strconv.ParseInt(strings.TrimSpace(prop.ContentLength), 10, 64) + modTime := time.Time{} + if prop.LastModified != "" { + if parsedTime, err := http.ParseTime(prop.LastModified); err == nil { + modTime = parsedTime + } + } + + name := prop.DisplayName + if name == "" { + name = path.Base(strings.TrimSuffix(fallbackPath, "/")) + } + if name == "." || name == "/" { + name = "" + } + + mode := fs.FileMode(0644) + if isDir { + mode = fs.ModeDir | 0755 + size = 0 + } + + return coreio.NewFileInfo(name, size, mode, modTime, isDir) +} + +func hrefToPath(href string) string { + parsedURL, err := url.Parse(href) + if err == nil && parsedURL.Path != "" { + if unescaped, err := url.PathUnescape(parsedURL.Path); err == nil { + return unescaped + } + return parsedURL.Path + } + if unescaped, err := url.PathUnescape(href); err == nil { + return unescaped + } + return href +} + +func sameURLPath(left, right string) bool { + return path.Clean("/"+left) == path.Clean("/"+right) +} + +type webdavFile struct { + name string + content []byte + offset int64 + mode fs.FileMode + modTime time.Time +} + +func (file *webdavFile) Stat() (fs.FileInfo, error) { + return coreio.NewFileInfo(file.name, int64(len(file.content)), file.mode, file.modTime, false), nil +} + +func (file *webdavFile) Read(buffer []byte) (int, error) { + if file.offset >= int64(len(file.content)) { + return 0, goio.EOF + } + bytesRead := copy(buffer, file.content[file.offset:]) + file.offset += int64(bytesRead) + return bytesRead, nil +} + +func (file *webdavFile) Close() error { + return nil +} + +type webdavWriteCloser struct { + medium *Medium + path string + data []byte + mode fs.FileMode +} + +func (writer *webdavWriteCloser) Write(data []byte) (int, error) { + writer.data = append(writer.data, data...) + return len(data), nil +} + +func (writer *webdavWriteCloser) Close() error { + return writer.medium.WriteMode(writer.path, string(writer.data), writer.mode) +} diff --git a/pkg/medium/webdav/webdav_test.go b/pkg/medium/webdav/webdav_test.go new file mode 100644 index 0000000..eea49f4 --- /dev/null +++ b/pkg/medium/webdav/webdav_test.go @@ -0,0 +1,120 @@ +package webdav + +import ( + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + xwebdav "golang.org/x/net/webdav" +) + +func newWebDAVTestMedium(t *testing.T) *Medium { + t.Helper() + + handler := &xwebdav.Handler{ + FileSystem: xwebdav.NewMemFS(), + LockSystem: xwebdav.NewMemLS(), + } + server := httptest.NewServer(handler) + t.Cleanup(server.Close) + + medium, err := New(Options{BaseURL: server.URL}) + require.NoError(t, err) + return medium +} + +func TestWebDAVMedium_Read_Good(t *testing.T) { + medium := newWebDAVTestMedium(t) + + require.NoError(t, medium.Write("notes/read.txt", "hello webdav")) + + content, err := medium.Read("notes/read.txt") + require.NoError(t, err) + assert.Equal(t, "hello webdav", content) +} + +func TestWebDAVMedium_Read_Bad(t *testing.T) { + medium := newWebDAVTestMedium(t) + + _, err := medium.Read("missing.txt") + + assert.Error(t, err) +} + +func TestWebDAVMedium_Read_Ugly(t *testing.T) { + medium := newWebDAVTestMedium(t) + + require.NoError(t, medium.Write("safe/file.txt", "normalised")) + + content, err := medium.Read("/safe/../safe/./file.txt") + require.NoError(t, err) + assert.Equal(t, "normalised", content) +} + +func TestWebDAVMedium_Write_Good(t *testing.T) { + medium := newWebDAVTestMedium(t) + + err := medium.Write("nested/path/file.txt", "content") + require.NoError(t, err) + + assert.True(t, medium.IsFile("nested/path/file.txt")) + content, err := medium.Read("nested/path/file.txt") + require.NoError(t, err) + assert.Equal(t, "content", content) +} + +func TestWebDAVMedium_Write_Bad(t *testing.T) { + medium := newWebDAVTestMedium(t) + + err := medium.Write("", "content") + + assert.Error(t, err) +} + +func TestWebDAVMedium_Write_Ugly(t *testing.T) { + medium := newWebDAVTestMedium(t) + + require.NoError(t, medium.Write("../escaped.txt", "contained")) + + content, err := medium.Read("escaped.txt") + require.NoError(t, err) + assert.Equal(t, "contained", content) +} + +func TestWebDAVMedium_List_Good(t *testing.T) { + medium := newWebDAVTestMedium(t) + + require.NoError(t, medium.Write("dir/b.txt", "b")) + require.NoError(t, medium.Write("dir/a.txt", "a")) + require.NoError(t, medium.EnsureDir("dir/sub")) + + entries, err := medium.List("dir") + require.NoError(t, err) + + require.Len(t, entries, 3) + assert.Equal(t, "a.txt", entries[0].Name()) + assert.Equal(t, "b.txt", entries[1].Name()) + assert.Equal(t, "sub", entries[2].Name()) + assert.True(t, entries[2].IsDir()) +} + +func TestWebDAVMedium_List_Bad(t *testing.T) { + medium := newWebDAVTestMedium(t) + + _, err := medium.List("missing") + + assert.Error(t, err) +} + +func TestWebDAVMedium_List_Ugly(t *testing.T) { + medium := newWebDAVTestMedium(t) + + require.NoError(t, medium.Write("dir/file.txt", "content")) + + entries, err := medium.List("//dir/../dir/.") + require.NoError(t, err) + + require.Len(t, entries, 1) + assert.Equal(t, "file.txt", entries[0].Name()) +} diff --git a/s3/actions.go b/s3/actions.go new file mode 100644 index 0000000..d46769a --- /dev/null +++ b/s3/actions.go @@ -0,0 +1,68 @@ +// SPDX-License-Identifier: EUPL-1.2 + +// Example: s3.RegisterActions(c) +// Example: result := c.Action(s3.ActionRead).Run(ctx, core.NewOptions( +// Example: core.Option{Key: "medium", Value: s3Medium}, +// Example: core.Option{Key: "path", Value: "reports/daily.txt"}, +// Example: )) +package s3 + +import ( + "context" + "io/fs" + + core "dappco.re/go/core" +) + +// Named action identifiers for the S3 Medium. Matches the go-io RFC §15 +// registry so any Core-aware agent or CLI can dispatch S3 operations by name. +// +// Example: result := c.Action(s3.ActionRead).Run(ctx, opts) +const ( + ActionRead = "core.io.s3.read" + ActionWrite = "core.io.s3.write" +) + +// Example: s3.RegisterActions(c) +// +// RegisterActions installs the S3 actions listed in the go-io RFC §15 on the +// given Core. Call this during service registration. +func RegisterActions(c *core.Core) { + if c == nil { + return + } + c.Action(ActionRead, readAction) + c.Action(ActionWrite, writeAction) +} + +// Example: opts := core.NewOptions( +// Example: core.Option{Key: "medium", Value: s3Medium}, +// Example: core.Option{Key: "path", Value: "reports/daily.txt"}, +// Example: ) +func readAction(_ context.Context, opts core.Options) core.Result { + medium, ok := opts.Get("medium").Value.(*Medium) + if !ok || medium == nil { + return core.Result{}.New(core.E("s3.readAction", "medium is required", fs.ErrInvalid)) + } + content, err := medium.Read(opts.String("path")) + if err != nil { + return core.Result{}.New(err) + } + return core.Result{Value: content, OK: true} +} + +// Example: opts := core.NewOptions( +// Example: core.Option{Key: "medium", Value: s3Medium}, +// Example: core.Option{Key: "path", Value: "reports/daily.txt"}, +// Example: core.Option{Key: "content", Value: "done"}, +// Example: ) +func writeAction(_ context.Context, opts core.Options) core.Result { + medium, ok := opts.Get("medium").Value.(*Medium) + if !ok || medium == nil { + return core.Result{}.New(core.E("s3.writeAction", "medium is required", fs.ErrInvalid)) + } + if err := medium.Write(opts.String("path"), opts.String("content")); err != nil { + return core.Result{}.New(err) + } + return core.Result{OK: true} +} diff --git a/s3/actions_test.go b/s3/actions_test.go new file mode 100644 index 0000000..3178543 --- /dev/null +++ b/s3/actions_test.go @@ -0,0 +1,87 @@ +// SPDX-License-Identifier: EUPL-1.2 + +package s3 + +import ( + "context" + "testing" + + core "dappco.re/go/core" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestActions_RegisterActions_Good(t *testing.T) { + c := core.New() + RegisterActions(c) + assert.True(t, c.Action(ActionRead).Exists()) + assert.True(t, c.Action(ActionWrite).Exists()) +} + +func TestActions_RegisterActions_Bad(t *testing.T) { + // Nil Core must not panic. + assert.NotPanics(t, func() { RegisterActions(nil) }) +} + +func TestActions_RegisterActions_Ugly(t *testing.T) { + // Double registration is safe. + c := core.New() + RegisterActions(c) + assert.NotPanics(t, func() { RegisterActions(c) }) +} + +func TestActions_ReadWrite_Good(t *testing.T) { + c := core.New() + RegisterActions(c) + + client := newTestS3Client() + medium, err := New(Options{Bucket: "bucket", Client: client, Prefix: "prefix/"}) + require.NoError(t, err) + + writeResult := c.Action(ActionWrite).Run(context.Background(), core.NewOptions( + core.Option{Key: "medium", Value: medium}, + core.Option{Key: "path", Value: "reports/daily.txt"}, + core.Option{Key: "content", Value: "done"}, + )) + require.True(t, writeResult.OK) + + readResult := c.Action(ActionRead).Run(context.Background(), core.NewOptions( + core.Option{Key: "medium", Value: medium}, + core.Option{Key: "path", Value: "reports/daily.txt"}, + )) + require.True(t, readResult.OK) + assert.Equal(t, "done", readResult.Value) +} + +func TestActions_ReadWrite_Bad(t *testing.T) { + c := core.New() + RegisterActions(c) + + // Missing medium must fail. + result := c.Action(ActionRead).Run(context.Background(), core.NewOptions( + core.Option{Key: "path", Value: "reports/daily.txt"}, + )) + assert.False(t, result.OK) + + result = c.Action(ActionWrite).Run(context.Background(), core.NewOptions( + core.Option{Key: "path", Value: "reports/daily.txt"}, + core.Option{Key: "content", Value: "done"}, + )) + assert.False(t, result.OK) +} + +func TestActions_ReadWrite_Ugly(t *testing.T) { + c := core.New() + RegisterActions(c) + + client := newTestS3Client() + medium, err := New(Options{Bucket: "bucket", Client: client}) + require.NoError(t, err) + + // Reading a key that was never written must fail. + result := c.Action(ActionRead).Run(context.Background(), core.NewOptions( + core.Option{Key: "medium", Value: medium}, + core.Option{Key: "path", Value: "missing.txt"}, + )) + assert.False(t, result.OK) +} diff --git a/s3/s3.go b/s3/s3.go index 3e92a2c..27328b4 100644 --- a/s3/s3.go +++ b/s3/s3.go @@ -4,19 +4,17 @@ package s3 import ( - "bytes" - "context" - goio "io" - "io/fs" - "path" - "time" + "context" // AX-6-exception: AWS SDK transport APIs require context.Context. + goio "io" // AX-6-exception: io interface types have no core equivalent; io.EOF preserves stream semantics. + "io/fs" // AX-6-exception: fs interface types have no core equivalent. + "time" // AX-6-exception: S3 object metadata timestamps have no core equivalent. "github.com/aws/aws-sdk-go-v2/aws" awss3 "github.com/aws/aws-sdk-go-v2/service/s3" "github.com/aws/aws-sdk-go-v2/service/s3/types" core "dappco.re/go/core" - coreio "dappco.re/go/core/io" + coreio "dappco.re/go/io" ) // Example: client := awss3.NewFromConfig(aws.Config{Region: "us-east-1"}) @@ -71,11 +69,28 @@ func deleteObjectsError(prefix string, errs []types.Error) error { return core.E("s3.DeleteAll", core.Concat("partial delete failed under ", prefix, ": ", core.Join("; ", details...)), nil) } +func readAllString(reader goio.ReadCloser) (string, error) { + defer reader.Close() + + result := core.ReadAll(reader) + if !result.OK { + if err, ok := result.Value.(error); ok { + return "", err + } + return "", fs.ErrInvalid + } + content, ok := result.Value.(string) + if !ok { + return "", fs.ErrInvalid + } + return content, nil +} + func normalisePrefix(prefix string) string { if prefix == "" { return "" } - clean := path.Clean("/" + prefix) + clean := core.CleanPath("/"+prefix, "/") if clean == "/" { return "" } @@ -104,7 +119,7 @@ func New(options Options) (*Medium, error) { } func (medium *Medium) objectKey(filePath string) string { - clean := path.Clean("/" + filePath) + clean := core.CleanPath("/"+filePath, "/") if clean == "/" { clean = "" } @@ -133,13 +148,11 @@ func (medium *Medium) Read(filePath string) (string, error) { if err != nil { return "", core.E("s3.Read", core.Concat("failed to get object: ", key), err) } - defer out.Body.Close() - - data, err := goio.ReadAll(out.Body) + data, err := readAllString(out.Body) if err != nil { return "", core.E("s3.Read", core.Concat("failed to read body: ", key), err) } - return string(data), nil + return data, nil } // Example: _ = medium.Write("reports/daily.txt", "done") @@ -393,7 +406,7 @@ func (medium *Medium) Stat(filePath string) (fs.FileInfo, error) { modTime = *out.LastModified } - name := path.Base(key) + name := core.PathBase(key) return &fileInfo{ name: name, size: size, @@ -416,8 +429,7 @@ func (medium *Medium) Open(filePath string) (fs.File, error) { return nil, core.E("s3.Open", core.Concat("failed to get object: ", key), err) } - data, err := goio.ReadAll(out.Body) - out.Body.Close() + data, err := readAllString(out.Body) if err != nil { return nil, core.E("s3.Open", core.Concat("failed to read body: ", key), err) } @@ -432,8 +444,8 @@ func (medium *Medium) Open(filePath string) (fs.File, error) { } return &s3File{ - name: path.Base(key), - content: data, + name: core.PathBase(key), + content: []byte(data), size: size, modTime: modTime, }, nil @@ -464,8 +476,11 @@ func (medium *Medium) Append(filePath string) (goio.WriteCloser, error) { Key: aws.String(key), }) if err == nil { - existing, _ = goio.ReadAll(out.Body) - out.Body.Close() + content, readErr := readAllString(out.Body) + if readErr != nil { + return nil, core.E("s3.Append", core.Concat("failed to read existing object: ", key), readErr) + } + existing = []byte(content) } return &s3WriteCloser{ @@ -630,7 +645,7 @@ func (writer *s3WriteCloser) Close() error { _, err := writer.medium.client.PutObject(context.Background(), &awss3.PutObjectInput{ Bucket: aws.String(writer.medium.bucket), Key: aws.String(writer.key), - Body: bytes.NewReader(writer.data), + Body: core.NewReader(string(writer.data)), }) if err != nil { return core.E("s3.writeCloser.Close", "failed to upload on close", err) diff --git a/s3/s3_test.go b/s3/s3_test.go index bd4bc15..b76b60a 100644 --- a/s3/s3_test.go +++ b/s3/s3_test.go @@ -6,7 +6,7 @@ import ( goio "io" "io/fs" "sort" - "sync" + "sync" // Note: AX-6 — internal concurrency primitive; structural per RFC §5.1 "testing" "time" diff --git a/sigil/crypto_sigil.go b/sigil/crypto_sigil.go index 71ecdc9..f9c0e39 100644 --- a/sigil/crypto_sigil.go +++ b/sigil/crypto_sigil.go @@ -4,6 +4,7 @@ package sigil import ( + "crypto/cipher" "crypto/rand" "crypto/sha256" "encoding/binary" @@ -17,6 +18,9 @@ var ( // Example: errors.Is(err, sigil.InvalidKeyError) InvalidKeyError = core.E("sigil.InvalidKeyError", "invalid key size, must be 32 bytes", nil) + // Example: errors.Is(err, sigil.InvalidNonceError) + InvalidNonceError = core.E("sigil.InvalidNonceError", "invalid nonce argument; use PreObfuscator or nil", nil) + // Example: errors.Is(err, sigil.CiphertextTooShortError) CiphertextTooShortError = core.E("sigil.CiphertextTooShortError", "ciphertext too short", nil) @@ -183,6 +187,7 @@ func (obfuscator *ShuffleMaskObfuscator) deriveMask(entropy []byte, length int) // Example: ) type ChaChaPolySigil struct { key []byte + nonceSize int obfuscator PreObfuscator randomReader goio.Reader } @@ -194,6 +199,12 @@ func (s *ChaChaPolySigil) Key() []byte { return result } +// Nonce returns nil. Encryption nonces are generated per message by In and +// prepended to the ciphertext. +func (s *ChaChaPolySigil) Nonce() []byte { + return nil +} + // Example: ob := cipherSigil.Obfuscator() func (s *ChaChaPolySigil) Obfuscator() PreObfuscator { return s.obfuscator @@ -204,10 +215,17 @@ func (s *ChaChaPolySigil) SetObfuscator(obfuscator PreObfuscator) { s.obfuscator = obfuscator } -// Example: cipherSigil, _ := sigil.NewChaChaPolySigil([]byte("0123456789abcdef0123456789abcdef"), nil) -// Example: ciphertext, _ := cipherSigil.In([]byte("payload")) -// Example: plaintext, _ := cipherSigil.Out(ciphertext) -func NewChaChaPolySigil(key []byte, obfuscator PreObfuscator) (*ChaChaPolySigil, error) { +// NewChaChaPolySigil creates a ChaCha20-Poly1305 sigil. The nonce argument is +// retained for API compatibility; pass nil for the default pre-obfuscator or a +// PreObfuscator for custom pre-obfuscation. Fixed []byte nonces are rejected: +// ChaCha20-Poly1305 catastrophically fails under nonce reuse, leaking plaintext +// relationships and enabling authenticated forgeries. In always generates a +// fresh random nonce and prepends it to the ciphertext. +// +// WARNING: when using a custom PreObfuscator, nonce uniqueness remains the +// caller's responsibility. The PreObfuscator must treat the supplied entropy as +// a per-message nonce and must not introduce deterministic nonce reuse. +func NewChaChaPolySigil(key []byte, nonce any) (*ChaChaPolySigil, error) { if len(key) != 32 { return nil, InvalidKeyError } @@ -215,15 +233,28 @@ func NewChaChaPolySigil(key []byte, obfuscator PreObfuscator) (*ChaChaPolySigil, keyCopy := make([]byte, 32) copy(keyCopy, key) - if obfuscator == nil { - obfuscator = &XORObfuscator{} - } - - return &ChaChaPolySigil{ + sigil := &ChaChaPolySigil{ key: keyCopy, - obfuscator: obfuscator, + nonceSize: chacha20poly1305.NonceSizeX, randomReader: rand.Reader, - }, nil + } + + switch value := nonce.(type) { + case nil: + sigil.obfuscator = &XORObfuscator{} + case []byte: + return nil, core.E("sigil.NewChaChaPolySigil", "fixed-nonce []byte path removed; use PreObfuscator or nil", InvalidNonceError) + case PreObfuscator: + if value == nil { + sigil.obfuscator = &XORObfuscator{} + return sigil, nil + } + sigil.obfuscator = value + default: + return nil, core.E("sigil.NewChaChaPolySigil", "nonce must be PreObfuscator or nil", InvalidNonceError) + } + + return sigil, nil } func (s *ChaChaPolySigil) In(data []byte) ([]byte, error) { @@ -234,7 +265,7 @@ func (s *ChaChaPolySigil) In(data []byte) ([]byte, error) { return nil, nil } - aead, err := chacha20poly1305.NewX(s.key) + aead, err := s.newAEAD() if err != nil { return nil, core.E("sigil.ChaChaPolySigil.In", "create cipher", err) } @@ -250,7 +281,7 @@ func (s *ChaChaPolySigil) In(data []byte) ([]byte, error) { obfuscated := data if s.obfuscator != nil { - obfuscated = s.obfuscator.Obfuscate(data, nonce) + obfuscated = s.obfuscator.Obfuscate(data, cloneBytes(nonce)) } ciphertext := aead.Seal(nonce, nonce, obfuscated, nil) @@ -266,7 +297,7 @@ func (s *ChaChaPolySigil) Out(data []byte) ([]byte, error) { return nil, nil } - aead, err := chacha20poly1305.NewX(s.key) + aead, err := s.newAEAD() if err != nil { return nil, core.E("sigil.ChaChaPolySigil.Out", "create cipher", err) } @@ -288,7 +319,7 @@ func (s *ChaChaPolySigil) Out(data []byte) ([]byte, error) { plaintext := obfuscated if s.obfuscator != nil { - plaintext = s.obfuscator.Deobfuscate(obfuscated, nonce) + plaintext = s.obfuscator.Deobfuscate(obfuscated, cloneBytes(nonce)) } if len(plaintext) == 0 { @@ -298,6 +329,30 @@ func (s *ChaChaPolySigil) Out(data []byte) ([]byte, error) { return plaintext, nil } +func (s *ChaChaPolySigil) newAEAD() (cipher.AEAD, error) { + switch s.activeNonceSize() { + case chacha20poly1305.NonceSize: + return chacha20poly1305.New(s.key) + case chacha20poly1305.NonceSizeX: + return chacha20poly1305.NewX(s.key) + default: + return nil, InvalidNonceError + } +} + +func (s *ChaChaPolySigil) activeNonceSize() int { + if s.nonceSize != 0 { + return s.nonceSize + } + return chacha20poly1305.NonceSizeX +} + +func cloneBytes(data []byte) []byte { + result := make([]byte, len(data)) + copy(result, data) + return result +} + // Example: nonce, _ := sigil.NonceFromCiphertext(ciphertext) func NonceFromCiphertext(ciphertext []byte) ([]byte, error) { nonceSize := chacha20poly1305.NonceSizeX diff --git a/sigil/crypto_sigil_test.go b/sigil/crypto_sigil_test.go index d4f96a8..fd9121e 100644 --- a/sigil/crypto_sigil_test.go +++ b/sigil/crypto_sigil_test.go @@ -177,6 +177,39 @@ func TestCryptoSigil_NewChaChaPolySigil_EmptyKey_Bad(t *testing.T) { assert.ErrorIs(t, err, InvalidKeyError) } +func TestCryptoSigil_NewChaChaPolySigil_FixedNonceBytes_Bad(t *testing.T) { + key := make([]byte, 32) + _, _ = rand.Read(key) + + cases := map[string][]byte{ + "non-empty": []byte("0123456789abcdef01234567"), + "empty": []byte{}, + "typed nil": nil, + } + for name, nonce := range cases { + t.Run(name, func(t *testing.T) { + _, err := NewChaChaPolySigil(key, nonce) + assert.ErrorIs(t, err, InvalidNonceError) + if err == nil { + t.Fatal("expected invalid nonce error") + } + assert.Contains(t, err.Error(), "fixed-nonce []byte path removed; use PreObfuscator or nil") + }) + } +} + +func TestCryptoSigil_NewChaChaPolySigil_StringNonce_Bad(t *testing.T) { + key := make([]byte, 32) + _, _ = rand.Read(key) + + _, err := NewChaChaPolySigil(key, "fixed nonce") + assert.ErrorIs(t, err, InvalidNonceError) + if err == nil { + t.Fatal("expected invalid nonce error") + } + assert.Contains(t, err.Error(), "nonce must be PreObfuscator or nil") +} + func TestCryptoSigil_NewChaChaPolySigil_CustomObfuscator_Good(t *testing.T) { key := make([]byte, 32) _, _ = rand.Read(key) @@ -273,11 +306,17 @@ func TestCryptoSigil_ChaChaPolySigil_DifferentCiphertextsPerCall_Good(t *testing cipherSigil, err := NewChaChaPolySigil(key, nil) require.NoError(t, err) + cipherSigil.randomReader = &limitReader{ + data: append(bytes.Repeat([]byte{0x01}, 24), bytes.Repeat([]byte{0x02}, 24)...), + } plaintext := []byte("same input") - ct1, _ := cipherSigil.In(plaintext) - ct2, _ := cipherSigil.In(plaintext) + ct1, err := cipherSigil.In(plaintext) + require.NoError(t, err) + ct2, err := cipherSigil.In(plaintext) + require.NoError(t, err) + assert.NotEqual(t, ct1[:24], ct2[:24]) assert.NotEqual(t, ct1, ct2) } diff --git a/sigil/sigil_test.go b/sigil/sigil_test.go index 93565b9..e838ded 100644 --- a/sigil/sigil_test.go +++ b/sigil/sigil_test.go @@ -284,6 +284,21 @@ func TestSigil_NewSigil_Bad(t *testing.T) { assert.Contains(t, err.Error(), "unknown sigil name") } +func TestSigil_NewSigil_KeylessScheme_Good(t *testing.T) { + sigilValue, err := NewSigil("hex") + require.NoError(t, err) + assert.NotNil(t, sigilValue) +} + +func TestSigil_NewSigil_ChaChaPoly1305RequiresKey_Bad(t *testing.T) { + _, err := NewSigil("chacha20poly1305") + assert.Error(t, err) + if err == nil { + t.Fatal("expected key material error") + } + assert.Contains(t, err.Error(), "scheme requires key material; use NewChaChaPolySigil") +} + func TestSigil_NewSigil_EmptyName_Bad(t *testing.T) { _, err := NewSigil("") assert.Error(t, err) diff --git a/sigil/sigils.go b/sigil/sigils.go index ec313cc..c56bce2 100644 --- a/sigil/sigils.go +++ b/sigil/sigils.go @@ -1,8 +1,7 @@ package sigil import ( - "bytes" - "compress/gzip" + "compress/gzip" // AX-6-exception: gzip transport encoding has no core equivalent. "crypto" "crypto/md5" "crypto/sha1" @@ -10,8 +9,7 @@ import ( "crypto/sha512" "encoding/base64" "encoding/hex" - goio "io" - "io/fs" + "io/fs" // AX-6-exception: fs sentinel errors have no core equivalent. core "dappco.re/go/core" "golang.org/x/crypto/blake2b" @@ -21,6 +19,11 @@ import ( "golang.org/x/crypto/sha3" ) +const ( + opGzipOut = "sigil.GzipSigil.Out" + errReadGzipPayload = "read gzip payload" +) + // Example: reverseSigil, _ := sigil.NewSigil("reverse") type ReverseSigil struct{} @@ -83,14 +86,38 @@ func (sigil *Base64Sigil) Out(data []byte) ([]byte, error) { // Example: gzipSigil, _ := sigil.NewSigil("gzip") type GzipSigil struct { - outputWriter goio.Writer + outputWriter sigilWriter +} + +type sigilWriter interface { + Write([]byte) (int, error) +} + +type sigilHash interface { + sigilWriter + Sum([]byte) []byte +} + +// AX-6-exception: core.NewBuffer is unavailable in the pinned core module; this is +// the minimal intrinsic writer needed by compress/gzip. +type sigilBuffer struct { + data []byte +} + +func (buffer *sigilBuffer) Write(data []byte) (int, error) { + buffer.data = append(buffer.data, data...) + return len(data), nil +} + +func (buffer *sigilBuffer) Bytes() []byte { + return buffer.data } func (sigil *GzipSigil) In(data []byte) ([]byte, error) { if data == nil { return nil, nil } - var buffer bytes.Buffer + var buffer sigilBuffer outputWriter := sigil.outputWriter if outputWriter == nil { outputWriter = &buffer @@ -114,16 +141,19 @@ func (sigil *GzipSigil) Out(data []byte) ([]byte, error) { if data == nil { return nil, nil } - gzipReader, err := gzip.NewReader(bytes.NewReader(data)) + gzipReader, err := gzip.NewReader(core.NewReader(string(data))) if err != nil { - return nil, core.E("sigil.GzipSigil.Out", "open gzip reader", err) + return nil, core.E(opGzipOut, "open gzip reader", err) } defer gzipReader.Close() - out, err := goio.ReadAll(gzipReader) - if err != nil { - return nil, core.E("sigil.GzipSigil.Out", "read gzip payload", err) + out := core.ReadAll(gzipReader) + if !out.OK { + if err, ok := out.Value.(error); ok { + return nil, core.E(opGzipOut, errReadGzipPayload, err) + } + return nil, core.E(opGzipOut, errReadGzipPayload, fs.ErrInvalid) } - return out, nil + return []byte(out.Value.(string)), nil } // Example: jsonSigil := &sigil.JSONSigil{Indent: true} @@ -166,7 +196,7 @@ func NewHashSigil(hashAlgorithm crypto.Hash) *HashSigil { } func (sigil *HashSigil) In(data []byte) ([]byte, error) { - var hasher goio.Writer + var hasher sigilHash switch sigil.Hash { case crypto.MD4: hasher = md4.New() @@ -211,17 +241,25 @@ func (sigil *HashSigil) In(data []byte) ([]byte, error) { if _, err := hasher.Write(data); err != nil { return nil, core.E("sigil.HashSigil.In", "write hash input", err) } - return hasher.(interface{ Sum([]byte) []byte }).Sum(nil), nil + return hasher.Sum(nil), nil } func (sigil *HashSigil) Out(data []byte) ([]byte, error) { return data, nil } +// NewSigil constructs sigils that do not require caller-provided construction +// material. ChaCha20-Poly1305 requires key material at construction; use +// NewChaChaPolySigil instead. +// // Example: hexSigil, _ := sigil.NewSigil("hex") // Example: gzipSigil, _ := sigil.NewSigil("gzip") // Example: transformed, _ := sigil.Transmute([]byte("payload"), []sigil.Sigil{hexSigil, gzipSigil}) func NewSigil(sigilName string) (Sigil, error) { + if sigilName == "chacha20poly1305" { + return nil, core.E("sigil.NewSigil", "chacha20poly1305 scheme requires key material; use NewChaChaPolySigil", fs.ErrInvalid) + } + switch sigilName { case "reverse": return &ReverseSigil{}, nil diff --git a/sqlite/sqlite.go b/sqlite/sqlite.go index 220a620..ba733b8 100644 --- a/sqlite/sqlite.go +++ b/sqlite/sqlite.go @@ -3,15 +3,13 @@ package sqlite import ( - "bytes" "database/sql" - goio "io" - "io/fs" - "path" - "time" + goio "io" // AX-6-exception: io interface types have no core equivalent; io.EOF preserves stream semantics. + "io/fs" // AX-6-exception: fs interface types have no core equivalent. + "time" // AX-6-exception: filesystem metadata timestamps have no core equivalent. core "dappco.re/go/core" - coreio "dappco.re/go/core/io" + coreio "dappco.re/go/io" _ "modernc.org/sqlite" ) @@ -108,7 +106,7 @@ func (medium *Medium) Close() error { } func normaliseEntryPath(filePath string) string { - clean := path.Clean("/" + filePath) + clean := core.CleanPath("/"+filePath, "/") if clean == "/" { return "" } @@ -452,7 +450,7 @@ func (medium *Medium) Stat(filePath string) (fs.FileInfo, error) { return nil, core.E("sqlite.Stat", core.Concat("query failed: ", key), err) } - name := path.Base(key) + name := core.PathBase(key) return &fileInfo{ name: name, size: int64(len(content)), @@ -487,7 +485,7 @@ func (medium *Medium) Open(filePath string) (fs.File, error) { } return &sqliteFile{ - name: path.Base(key), + name: core.PathBase(key), content: content, mode: fs.FileMode(mode), modTime: mtime, @@ -550,7 +548,10 @@ func (medium *Medium) ReadStream(filePath string) (goio.ReadCloser, error) { return nil, core.E("sqlite.ReadStream", core.Concat("path is a directory: ", key), fs.ErrInvalid) } - return goio.NopCloser(bytes.NewReader(content)), nil + return &sqliteFile{ + name: core.PathBase(key), + content: content, + }, nil } // Example: writer, _ := medium.WriteStream("logs/app.log") diff --git a/store/medium.go b/store/medium.go index e085abd..bea16b0 100644 --- a/store/medium.go +++ b/store/medium.go @@ -8,7 +8,7 @@ import ( "time" core "dappco.re/go/core" - coreio "dappco.re/go/core/io" + coreio "dappco.re/go/io" ) // ErrNotDirectory is returned by List when the path resolves to a key rather than a group. @@ -345,9 +345,9 @@ func (file *keyValueFile) Close() error { return nil } type keyValueWriteCloser struct { keyValueStore *KeyValueStore - group string - key string - data []byte + group string + key string + data []byte } func (writer *keyValueWriteCloser) Write(data []byte) (int, error) { diff --git a/store/store.go b/store/store.go index 31550bc..26c5251 100644 --- a/store/store.go +++ b/store/store.go @@ -4,7 +4,7 @@ import ( "database/sql" "errors" "io/fs" - "text/template" + "text/template" // Note: AX-6 intrinsic - structural for KeyValueStore.Render templating; core exposes no template primitive. core "dappco.re/go/core" _ "modernc.org/sqlite" @@ -14,7 +14,7 @@ import ( // Callers test for it with errors.Is. It is defined with errors.New so that // identity comparison works correctly across package boundaries. // Example: _, err := keyValueStore.Get("app", "theme"); errors.Is(err, store.NotFoundError) -var NotFoundError = errors.New("key not found") +var NotFoundError = errors.New("store: key not found") // Example: keyValueStore, _ := store.New(store.Options{Path: ":memory:"}) type KeyValueStore struct { diff --git a/tests/cli/io/Taskfile.yaml b/tests/cli/io/Taskfile.yaml new file mode 100644 index 0000000..d817e25 --- /dev/null +++ b/tests/cli/io/Taskfile.yaml @@ -0,0 +1,38 @@ +version: "3" + +tasks: + build: + dir: ../../.. + cmds: + - go build ./... + + test: + dir: ../../.. + cmds: + - go test -count=1 -race ./... + + vet: + dir: ../../.. + cmds: + - go vet ./... + + test-unit: + dir: ../../.. + cmds: + - go test -count=1 -race ./... -run Unit + + test-integration: + dir: ../../.. + cmds: + - | + if [ -z "${S3_ENDPOINT:-}" ] && [ -z "${AWS_ENDPOINT_URL_S3:-}" ] && [ -z "${AWS_ENDPOINT_URL:-}" ]; then + echo "Skipping integration tests: S3 endpoint not set" + exit 0 + fi + go test -count=1 -tags integration ./... -run Integration + + default: + deps: + - build + - test + - vet diff --git a/workspace/command.go b/workspace/command.go new file mode 100644 index 0000000..44ebfd2 --- /dev/null +++ b/workspace/command.go @@ -0,0 +1,40 @@ +package workspace + +// Action values for WorkspaceCommand. +const ( + WorkspaceCreateAction = "create" + WorkspaceSwitchAction = "switch" + WorkspaceReadAction = "read" + WorkspaceWriteAction = "write" + WorkspaceListAction = "list" +) + +const ( + legacyWorkspaceCreateAction = "workspace.create" + legacyWorkspaceSwitchAction = "workspace.switch" +) + +// WorkspaceCommand is the RFC §5 DTO for workspace command dispatch. +type WorkspaceCommand struct { + Action string + Workspace string + Path string + Content string + + // Legacy fields are kept so the existing encrypted workspace service can + // continue handling its current Core IPC messages while this package exposes + // the RFC §5 command shape. + Identifier string + Password string + WorkspaceID string +} + +func (command WorkspaceCommand) workspaceName() string { + if command.Workspace != "" { + return command.Workspace + } + if command.WorkspaceID != "" { + return command.WorkspaceID + } + return command.Identifier +} diff --git a/workspace/command_test.go b/workspace/command_test.go new file mode 100644 index 0000000..5389f07 --- /dev/null +++ b/workspace/command_test.go @@ -0,0 +1,37 @@ +package workspace + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestWorkspaceCommand_Good(t *testing.T) { + command := WorkspaceCommand{ + Action: WorkspaceWriteAction, + Workspace: "alpha", + Path: "notes/todo.txt", + Content: "ship it", + } + + assert.Equal(t, "write", command.Action) + assert.Equal(t, "alpha", command.workspaceName()) + assert.Equal(t, "notes/todo.txt", command.Path) + assert.Equal(t, "ship it", command.Content) +} + +func TestWorkspaceCommand_Bad_EmptyWorkspace(t *testing.T) { + command := WorkspaceCommand{Action: WorkspaceReadAction} + + assert.Empty(t, command.workspaceName()) +} + +func TestWorkspaceCommand_Ugly_LegacyWorkspaceFields(t *testing.T) { + assert.Equal(t, "legacy-id", WorkspaceCommand{WorkspaceID: "legacy-id"}.workspaceName()) + assert.Equal(t, "identifier", WorkspaceCommand{Identifier: "identifier"}.workspaceName()) + assert.Equal(t, "workspace", WorkspaceCommand{ + Workspace: "workspace", + WorkspaceID: "legacy-id", + Identifier: "identifier", + }.workspaceName()) +} diff --git a/workspace/service.go b/workspace/service.go index 975a701..c672d75 100644 --- a/workspace/service.go +++ b/workspace/service.go @@ -2,19 +2,21 @@ package workspace import ( "crypto/sha256" - goio "io" + "encoding/hex" + "hash" + goio "io" // Note: AX-6 intrinsic — io.ReadFull for HKDF key derivation; no core wrapper for ReadFull semantics. "io/fs" - "sync" + "sync" // Note: AX-6 — internal concurrency primitive; structural per RFC §5.1 core "dappco.re/go/core" "golang.org/x/crypto/hkdf" - "dappco.re/go/core/io" - "dappco.re/go/core/io/sigil" + "dappco.re/go/io" + "dappco.re/go/io/sigil" ) // Example: service, _ := workspace.New(workspace.Options{KeyPairProvider: keyPairProvider}) -type Workspace interface { +type EncryptedWorkspace interface { CreateWorkspace(identifier, passphrase string) (string, error) SwitchWorkspace(workspaceID string) error ReadWorkspaceFile(workspaceFilePath string) (string, error) @@ -26,17 +28,44 @@ type KeyPairProvider interface { CreateKeyPair(identifier, passphrase string) (string, error) } -const ( - WorkspaceCreateAction = "workspace.create" - WorkspaceSwitchAction = "workspace.switch" -) +// newWorkspaceSHA256Hash adapts core.SHA256 for HKDF's hash.Hash API. +func newWorkspaceSHA256Hash() hash.Hash { + return &workspaceSHA256Hash{} +} + +func workspaceSHA256(data []byte) [32]byte { + return sha256.Sum256(data) +} + +func workspaceSHA256Hex(data []byte) string { + sum := workspaceSHA256(data) + return hex.EncodeToString(sum[:]) +} + +type workspaceSHA256Hash struct { + data []byte +} + +func (hash *workspaceSHA256Hash) Write(data []byte) (int, error) { + hash.data = append(hash.data, data...) + return len(data), nil +} + +func (hash *workspaceSHA256Hash) Sum(prefix []byte) []byte { + sum := workspaceSHA256(hash.data) + return append(prefix, sum[:]...) +} + +func (hash *workspaceSHA256Hash) Reset() { + hash.data = hash.data[:0] +} -// Example: command := WorkspaceCommand{Action: WorkspaceCreateAction, Identifier: "alice", Password: "pass123"} -type WorkspaceCommand struct { - Action string - Identifier string - Password string - WorkspaceID string +func (hash *workspaceSHA256Hash) Size() int { + return 32 +} + +func (hash *workspaceSHA256Hash) BlockSize() int { + return 64 } // Example: service, _ := workspace.New(workspace.Options{ @@ -62,7 +91,7 @@ type Service struct { stateLock sync.RWMutex } -var _ Workspace = (*Service)(nil) +var _ EncryptedWorkspace = (*Service)(nil) // Example: service, _ := workspace.New(workspace.Options{ // Example: KeyPairProvider: keyPairProvider, @@ -118,8 +147,7 @@ func (service *Service) CreateWorkspace(identifier, passphrase string) (string, return "", core.E("workspace.CreateWorkspace", "key pair provider not available", fs.ErrInvalid) } - hash := sha256.Sum256([]byte(identifier)) - workspaceID := hex.EncodeToString(hash[:]) + workspaceID := workspaceSHA256Hex([]byte(identifier)) workspaceDirectory, err := service.resolveWorkspaceDirectory("workspace.CreateWorkspace", workspaceID) if err != nil { return "", err @@ -191,7 +219,7 @@ func (service *Service) workspaceCipherSigil(operation string) (*sigil.ChaChaPol } // Use HKDF (RFC 5869) for key derivation: it is purpose-bound, domain-separated, // and more resistant to length-extension attacks than a bare SHA-256 hash. - hkdfReader := hkdf.New(sha256.New, []byte(rawKey), nil, []byte("workspace-cipher-key")) + hkdfReader := hkdf.New(newWorkspaceSHA256Hash, []byte(rawKey), nil, []byte("workspace-cipher-key")) derived := make([]byte, 32) if _, err := goio.ReadFull(hkdfReader, derived); err != nil { return nil, core.E(operation, "failed to derive workspace key", err) @@ -250,15 +278,22 @@ func (service *Service) WriteWorkspaceFile(workspaceFilePath, content string) er // Example: commandResult := service.HandleWorkspaceCommand(WorkspaceCommand{Action: WorkspaceCreateAction, Identifier: "alice", Password: "pass123"}) func (service *Service) HandleWorkspaceCommand(command WorkspaceCommand) core.Result { switch command.Action { - case WorkspaceCreateAction: - passphrase := command.Password - workspaceID, err := service.CreateWorkspace(command.Identifier, passphrase) + case WorkspaceCreateAction, legacyWorkspaceCreateAction: + identifier := command.workspaceName() + if identifier == "" { + return core.Result{}.New(core.E("workspace.HandleWorkspaceCommand", "workspace identifier is required", fs.ErrInvalid)) + } + workspaceID, err := service.CreateWorkspace(identifier, command.Password) if err != nil { return core.Result{}.New(err) } return core.Result{Value: workspaceID, OK: true} - case WorkspaceSwitchAction: - if err := service.SwitchWorkspace(command.WorkspaceID); err != nil { + case WorkspaceSwitchAction, legacyWorkspaceSwitchAction: + workspaceID := command.workspaceName() + if workspaceID == "" { + return core.Result{}.New(core.E("workspace.HandleWorkspaceCommand", "workspace id is required", fs.ErrInvalid)) + } + if err := service.SwitchWorkspace(workspaceID); err != nil { return core.Result{}.New(err) } return core.Result{OK: true} diff --git a/workspace/service_test.go b/workspace/service_test.go index 5f0a460..2eb5c87 100644 --- a/workspace/service_test.go +++ b/workspace/service_test.go @@ -5,7 +5,7 @@ import ( "testing" core "dappco.re/go/core" - coreio "dappco.re/go/core/io" + coreio "dappco.re/go/io" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) diff --git a/workspace/workspace.go b/workspace/workspace.go new file mode 100644 index 0000000..9276c67 --- /dev/null +++ b/workspace/workspace.go @@ -0,0 +1,421 @@ +package workspace + +import ( + goio "io" + "io/fs" + "path" + "strings" + "sync" + + core "dappco.re/go/core" + coreio "dappco.re/go/io" +) + +const ( + opNewWorkspace = "workspace.NewWorkspace" + opCreateWorkspace = "workspace.CreateWorkspace" + opSwitchWorkspace = "workspace.SwitchWorkspace" + + errWorkspaceServiceNotConfigured = "workspace service is not configured" +) + +// Workspace is the RFC §5 medium-backed workspace service. +type Workspace struct { + medium coreio.Medium + base string + current string + mu sync.RWMutex +} + +// NewWorkspace creates a workspace service backed by medium under baseSubpath. +func NewWorkspace(medium coreio.Medium, baseSubpath string) (*Workspace, error) { + if medium == nil { + return nil, core.E(opNewWorkspace, "storage medium is required", fs.ErrInvalid) + } + base, err := cleanMediumSubpath(opNewWorkspace, baseSubpath, true) + if err != nil { + return nil, err + } + if base != "" { + if err := medium.EnsureDir(base); err != nil { + return nil, core.E(opNewWorkspace, "failed to ensure base workspace directory", err) + } + } + return &Workspace{ + medium: medium, + base: base, + }, nil +} + +// CreateWorkspace creates a named workspace directory and returns a medium scoped to it. +func (workspace *Workspace) CreateWorkspace(name string) (coreio.Medium, error) { + if workspace == nil { + return nil, core.E(opCreateWorkspace, errWorkspaceServiceNotConfigured, fs.ErrInvalid) + } + workspace.mu.Lock() + defer workspace.mu.Unlock() + + workspacePath, err := workspace.workspacePath(opCreateWorkspace, name) + if err != nil { + return nil, err + } + if workspace.medium.IsDir(workspacePath) { + return nil, core.E(opCreateWorkspace, core.Concat("workspace already exists: ", name), fs.ErrExist) + } + if workspace.medium.Exists(workspacePath) { + return nil, core.E(opCreateWorkspace, core.Concat("workspace path is not a directory: ", name), fs.ErrExist) + } + if err := workspace.medium.EnsureDir(workspacePath); err != nil { + return nil, core.E(opCreateWorkspace, "failed to create workspace directory", err) + } + return workspace.scopedMedium(workspacePath), nil +} + +// SwitchWorkspace records the named workspace as the current workspace. +func (workspace *Workspace) SwitchWorkspace(name string) error { + if workspace == nil { + return core.E(opSwitchWorkspace, errWorkspaceServiceNotConfigured, fs.ErrInvalid) + } + workspace.mu.Lock() + defer workspace.mu.Unlock() + + workspaceName, workspacePath, err := workspace.workspaceNameAndPath(opSwitchWorkspace, name) + if err != nil { + return err + } + if !workspace.medium.IsDir(workspacePath) { + return core.E(opSwitchWorkspace, core.Concat("workspace not found: ", workspaceName), fs.ErrNotExist) + } + workspace.current = workspaceName + return nil +} + +// CurrentWorkspace returns the workspace selected by SwitchWorkspace. +func (workspace *Workspace) CurrentWorkspace() string { + if workspace == nil { + return "" + } + workspace.mu.RLock() + defer workspace.mu.RUnlock() + return workspace.current +} + +// ReadWorkspaceFile reads a file from the named workspace. +func (workspace *Workspace) ReadWorkspaceFile(name, filePath string) (string, error) { + if workspace == nil { + return "", core.E("workspace.ReadWorkspaceFile", errWorkspaceServiceNotConfigured, fs.ErrInvalid) + } + workspace.mu.RLock() + defer workspace.mu.RUnlock() + + mediumPath, err := workspace.workspaceFilePath("workspace.ReadWorkspaceFile", name, filePath, false) + if err != nil { + return "", err + } + return workspace.medium.Read(mediumPath) +} + +// WriteWorkspaceFile writes a file into the named workspace. +func (workspace *Workspace) WriteWorkspaceFile(name, filePath, content string) error { + if workspace == nil { + return core.E("workspace.WriteWorkspaceFile", errWorkspaceServiceNotConfigured, fs.ErrInvalid) + } + workspace.mu.Lock() + defer workspace.mu.Unlock() + + mediumPath, err := workspace.workspaceFilePath("workspace.WriteWorkspaceFile", name, filePath, false) + if err != nil { + return err + } + return workspace.medium.Write(mediumPath, content) +} + +// ListWorkspaceFiles lists entries under a workspace-relative directory. +func (workspace *Workspace) ListWorkspaceFiles(name, directoryPath string) ([]fs.DirEntry, error) { + if workspace == nil { + return nil, core.E("workspace.ListWorkspaceFiles", errWorkspaceServiceNotConfigured, fs.ErrInvalid) + } + workspace.mu.RLock() + defer workspace.mu.RUnlock() + + mediumPath, err := workspace.workspaceFilePath("workspace.ListWorkspaceFiles", name, directoryPath, true) + if err != nil { + return nil, err + } + return workspace.medium.List(mediumPath) +} + +// HandleWorkspaceCommand dispatches an RFC §5 WorkspaceCommand. +func (workspace *Workspace) HandleWorkspaceCommand(command WorkspaceCommand) core.Result { + switch strings.ToLower(strings.TrimSpace(command.Action)) { + case WorkspaceCreateAction, legacyWorkspaceCreateAction: + medium, err := workspace.CreateWorkspace(command.workspaceName()) + if err != nil { + return core.Result{}.New(err) + } + return core.Result{Value: medium, OK: true} + case WorkspaceSwitchAction, legacyWorkspaceSwitchAction: + if err := workspace.SwitchWorkspace(command.workspaceName()); err != nil { + return core.Result{}.New(err) + } + return core.Result{OK: true} + case WorkspaceReadAction: + content, err := workspace.ReadWorkspaceFile(command.workspaceName(), command.Path) + if err != nil { + return core.Result{}.New(err) + } + return core.Result{Value: content, OK: true} + case WorkspaceWriteAction: + if err := workspace.WriteWorkspaceFile(command.workspaceName(), command.Path, command.Content); err != nil { + return core.Result{}.New(err) + } + return core.Result{OK: true} + case WorkspaceListAction: + entries, err := workspace.ListWorkspaceFiles(command.workspaceName(), command.Path) + if err != nil { + return core.Result{}.New(err) + } + return core.Result{Value: entries, OK: true} + default: + return core.Result{}.New(core.E("workspace.HandleWorkspaceCommand", core.Concat("unsupported action: ", command.Action), fs.ErrInvalid)) + } +} + +func (workspace *Workspace) workspaceFilePath(operation, name, filePath string, allowEmptyPath bool) (string, error) { + _, workspacePath, err := workspace.workspaceNameAndPath(operation, name) + if err != nil { + return "", err + } + if !workspace.medium.IsDir(workspacePath) { + return "", core.E(operation, core.Concat("workspace not found: ", name), fs.ErrNotExist) + } + cleanFilePath, err := cleanMediumSubpath(operation, filePath, allowEmptyPath) + if err != nil { + return "", err + } + return joinMediumSubpaths(workspacePath, cleanFilePath), nil +} + +func (workspace *Workspace) workspacePath(operation, name string) (string, error) { + _, workspacePath, err := workspace.workspaceNameAndPath(operation, name) + return workspacePath, err +} + +func (workspace *Workspace) workspaceNameAndPath(operation, name string) (string, string, error) { + if workspace == nil || workspace.medium == nil { + return "", "", core.E(operation, errWorkspaceServiceNotConfigured, fs.ErrInvalid) + } + workspaceName, err := cleanWorkspaceName(operation, name) + if err != nil { + return "", "", err + } + return workspaceName, joinMediumSubpaths(workspace.base, workspaceName), nil +} + +func (workspace *Workspace) scopedMedium(root string) coreio.Medium { + return &scopedMedium{ + medium: workspace.medium, + root: root, + } +} + +func cleanWorkspaceName(operation, name string) (string, error) { + name = strings.TrimSpace(name) + if name == "" || name == "." || name == ".." { + return "", core.E(operation, "workspace name is required", fs.ErrInvalid) + } + if strings.Contains(name, "/") || strings.Contains(name, "\\") { + return "", core.E(operation, core.Concat("workspace name contains path separator: ", name), fs.ErrPermission) + } + return name, nil +} + +func cleanMediumSubpath(operation, subpath string, allowEmpty bool) (string, error) { + subpath = strings.TrimSpace(strings.ReplaceAll(subpath, "\\", "/")) + if subpath == "" || subpath == "." { + if allowEmpty { + return "", nil + } + return "", core.E(operation, "path is required", fs.ErrInvalid) + } + if strings.HasPrefix(subpath, "/") { + return "", core.E(operation, core.Concat("absolute path rejected: ", subpath), fs.ErrPermission) + } + for _, part := range strings.Split(subpath, "/") { + if part == ".." { + return "", core.E(operation, core.Concat("path traversal rejected: ", subpath), fs.ErrPermission) + } + } + cleaned := path.Clean(subpath) + if cleaned == "." { + if allowEmpty { + return "", nil + } + return "", core.E(operation, "path is required", fs.ErrInvalid) + } + return cleaned, nil +} + +func joinMediumSubpaths(parts ...string) string { + filtered := make([]string, 0, len(parts)) + for _, part := range parts { + if part != "" { + filtered = append(filtered, part) + } + } + if len(filtered) == 0 { + return "" + } + return path.Join(filtered...) +} + +type scopedMedium struct { + medium coreio.Medium + root string +} + +var _ coreio.Medium = (*scopedMedium)(nil) + +func (medium *scopedMedium) scopedPath(operation, entryPath string, allowRoot bool) (string, error) { + cleanPath, err := cleanMediumSubpath(operation, entryPath, allowRoot) + if err != nil { + return "", err + } + return joinMediumSubpaths(medium.root, cleanPath), nil +} + +func (medium *scopedMedium) Read(entryPath string) (string, error) { + scopedPath, err := medium.scopedPath("workspace.scopedMedium.Read", entryPath, false) + if err != nil { + return "", err + } + return medium.medium.Read(scopedPath) +} + +func (medium *scopedMedium) Write(entryPath, content string) error { + return medium.WriteMode(entryPath, content, 0644) +} + +func (medium *scopedMedium) WriteMode(entryPath, content string, mode fs.FileMode) error { + scopedPath, err := medium.scopedPath("workspace.scopedMedium.WriteMode", entryPath, false) + if err != nil { + return err + } + return medium.medium.WriteMode(scopedPath, content, mode) +} + +func (medium *scopedMedium) EnsureDir(entryPath string) error { + scopedPath, err := medium.scopedPath("workspace.scopedMedium.EnsureDir", entryPath, true) + if err != nil { + return err + } + return medium.medium.EnsureDir(scopedPath) +} + +func (medium *scopedMedium) IsFile(entryPath string) bool { + scopedPath, err := medium.scopedPath("workspace.scopedMedium.IsFile", entryPath, false) + if err != nil { + return false + } + return medium.medium.IsFile(scopedPath) +} + +func (medium *scopedMedium) Delete(entryPath string) error { + scopedPath, err := medium.scopedPath("workspace.scopedMedium.Delete", entryPath, false) + if err != nil { + return err + } + return medium.medium.Delete(scopedPath) +} + +func (medium *scopedMedium) DeleteAll(entryPath string) error { + scopedPath, err := medium.scopedPath("workspace.scopedMedium.DeleteAll", entryPath, false) + if err != nil { + return err + } + return medium.medium.DeleteAll(scopedPath) +} + +func (medium *scopedMedium) Rename(oldPath, newPath string) error { + scopedOldPath, err := medium.scopedPath("workspace.scopedMedium.Rename", oldPath, false) + if err != nil { + return err + } + scopedNewPath, err := medium.scopedPath("workspace.scopedMedium.Rename", newPath, false) + if err != nil { + return err + } + return medium.medium.Rename(scopedOldPath, scopedNewPath) +} + +func (medium *scopedMedium) List(entryPath string) ([]fs.DirEntry, error) { + scopedPath, err := medium.scopedPath("workspace.scopedMedium.List", entryPath, true) + if err != nil { + return nil, err + } + return medium.medium.List(scopedPath) +} + +func (medium *scopedMedium) Stat(entryPath string) (fs.FileInfo, error) { + scopedPath, err := medium.scopedPath("workspace.scopedMedium.Stat", entryPath, true) + if err != nil { + return nil, err + } + return medium.medium.Stat(scopedPath) +} + +func (medium *scopedMedium) Open(entryPath string) (fs.File, error) { + scopedPath, err := medium.scopedPath("workspace.scopedMedium.Open", entryPath, false) + if err != nil { + return nil, err + } + return medium.medium.Open(scopedPath) +} + +func (medium *scopedMedium) Create(entryPath string) (goio.WriteCloser, error) { + scopedPath, err := medium.scopedPath("workspace.scopedMedium.Create", entryPath, false) + if err != nil { + return nil, err + } + return medium.medium.Create(scopedPath) +} + +func (medium *scopedMedium) Append(entryPath string) (goio.WriteCloser, error) { + scopedPath, err := medium.scopedPath("workspace.scopedMedium.Append", entryPath, false) + if err != nil { + return nil, err + } + return medium.medium.Append(scopedPath) +} + +func (medium *scopedMedium) ReadStream(entryPath string) (goio.ReadCloser, error) { + scopedPath, err := medium.scopedPath("workspace.scopedMedium.ReadStream", entryPath, false) + if err != nil { + return nil, err + } + return medium.medium.ReadStream(scopedPath) +} + +func (medium *scopedMedium) WriteStream(entryPath string) (goio.WriteCloser, error) { + scopedPath, err := medium.scopedPath("workspace.scopedMedium.WriteStream", entryPath, false) + if err != nil { + return nil, err + } + return medium.medium.WriteStream(scopedPath) +} + +func (medium *scopedMedium) Exists(entryPath string) bool { + scopedPath, err := medium.scopedPath("workspace.scopedMedium.Exists", entryPath, true) + if err != nil { + return false + } + return medium.medium.Exists(scopedPath) +} + +func (medium *scopedMedium) IsDir(entryPath string) bool { + scopedPath, err := medium.scopedPath("workspace.scopedMedium.IsDir", entryPath, true) + if err != nil { + return false + } + return medium.medium.IsDir(scopedPath) +} diff --git a/workspace/workspace_test.go b/workspace/workspace_test.go new file mode 100644 index 0000000..8262d4d --- /dev/null +++ b/workspace/workspace_test.go @@ -0,0 +1,244 @@ +package workspace + +import ( + "io/fs" + "testing" + + coreio "dappco.re/go/io" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func newTestWorkspace(t *testing.T) (*Workspace, *coreio.MemoryMedium) { + t.Helper() + + medium := coreio.NewMemoryMedium() + workspaceService, err := NewWorkspace(medium, "workspaces") + require.NoError(t, err) + return workspaceService, medium +} + +func TestWorkspace_NewWorkspace_Good(t *testing.T) { + medium := coreio.NewMemoryMedium() + + workspaceService, err := NewWorkspace(medium, "root") + require.NoError(t, err) + + assert.NotNil(t, workspaceService) + assert.True(t, medium.IsDir("root")) +} + +func TestWorkspace_NewWorkspace_Bad_InvalidBase(t *testing.T) { + workspaceService, err := NewWorkspace(coreio.NewMemoryMedium(), "../escape") + + require.Error(t, err) + assert.Nil(t, workspaceService) +} + +func TestWorkspace_NewWorkspace_Ugly_NilMediumRejected(t *testing.T) { + workspaceService, err := NewWorkspace(nil, "") + + require.Error(t, err) + assert.Nil(t, workspaceService) +} + +func TestWorkspace_CreateWorkspace_Good(t *testing.T) { + workspaceService, medium := newTestWorkspace(t) + + scoped, err := workspaceService.CreateWorkspace("alpha") + require.NoError(t, err) + require.NotNil(t, scoped) + + assert.True(t, medium.IsDir("workspaces/alpha")) + require.NoError(t, scoped.Write("note.txt", "hello")) + content, err := medium.Read("workspaces/alpha/note.txt") + require.NoError(t, err) + assert.Equal(t, "hello", content) +} + +func TestWorkspace_CreateWorkspace_Bad_Duplicate(t *testing.T) { + workspaceService, _ := newTestWorkspace(t) + _, err := workspaceService.CreateWorkspace("alpha") + require.NoError(t, err) + + _, err = workspaceService.CreateWorkspace("alpha") + + require.Error(t, err) +} + +func TestWorkspace_CreateWorkspace_Ugly_TraversalName(t *testing.T) { + workspaceService, medium := newTestWorkspace(t) + + _, err := workspaceService.CreateWorkspace("../escape") + + require.Error(t, err) + assert.False(t, medium.IsDir("escape")) +} + +func TestWorkspace_SwitchWorkspace_Good(t *testing.T) { + workspaceService, _ := newTestWorkspace(t) + _, err := workspaceService.CreateWorkspace("alpha") + require.NoError(t, err) + + err = workspaceService.SwitchWorkspace("alpha") + + require.NoError(t, err) + assert.Equal(t, "alpha", workspaceService.CurrentWorkspace()) +} + +func TestWorkspace_SwitchWorkspace_Bad_Missing(t *testing.T) { + workspaceService, _ := newTestWorkspace(t) + + err := workspaceService.SwitchWorkspace("missing") + + require.Error(t, err) + assert.Empty(t, workspaceService.CurrentWorkspace()) +} + +func TestWorkspace_SwitchWorkspace_Ugly_TraversalName(t *testing.T) { + workspaceService, _ := newTestWorkspace(t) + _, err := workspaceService.CreateWorkspace("alpha") + require.NoError(t, err) + require.NoError(t, workspaceService.SwitchWorkspace("alpha")) + + err = workspaceService.SwitchWorkspace("alpha/../beta") + + require.Error(t, err) + assert.Equal(t, "alpha", workspaceService.CurrentWorkspace()) +} + +func TestWorkspace_ReadWorkspaceFile_Good(t *testing.T) { + workspaceService, _ := newTestWorkspace(t) + _, err := workspaceService.CreateWorkspace("alpha") + require.NoError(t, err) + require.NoError(t, workspaceService.WriteWorkspaceFile("alpha", "notes/todo.txt", "hello")) + + content, err := workspaceService.ReadWorkspaceFile("alpha", "notes/todo.txt") + + require.NoError(t, err) + assert.Equal(t, "hello", content) +} + +func TestWorkspace_ReadWorkspaceFile_Bad_MissingFile(t *testing.T) { + workspaceService, _ := newTestWorkspace(t) + _, err := workspaceService.CreateWorkspace("alpha") + require.NoError(t, err) + + _, err = workspaceService.ReadWorkspaceFile("alpha", "missing.txt") + + require.Error(t, err) +} + +func TestWorkspace_ReadWorkspaceFile_Ugly_TraversalPath(t *testing.T) { + workspaceService, _ := newTestWorkspace(t) + _, err := workspaceService.CreateWorkspace("alpha") + require.NoError(t, err) + + _, err = workspaceService.ReadWorkspaceFile("alpha", "../alpha/secret.txt") + + require.Error(t, err) +} + +func TestWorkspace_WriteWorkspaceFile_Good(t *testing.T) { + workspaceService, medium := newTestWorkspace(t) + _, err := workspaceService.CreateWorkspace("alpha") + require.NoError(t, err) + + err = workspaceService.WriteWorkspaceFile("alpha", "notes/todo.txt", "hello") + + require.NoError(t, err) + content, err := medium.Read("workspaces/alpha/notes/todo.txt") + require.NoError(t, err) + assert.Equal(t, "hello", content) +} + +func TestWorkspace_WriteWorkspaceFile_Bad_MissingWorkspace(t *testing.T) { + workspaceService, _ := newTestWorkspace(t) + + err := workspaceService.WriteWorkspaceFile("missing", "notes/todo.txt", "hello") + + require.Error(t, err) +} + +func TestWorkspace_WriteWorkspaceFile_Ugly_TraversalPath(t *testing.T) { + workspaceService, medium := newTestWorkspace(t) + _, err := workspaceService.CreateWorkspace("alpha") + require.NoError(t, err) + + err = workspaceService.WriteWorkspaceFile("alpha", "../outside.txt", "secret") + + require.Error(t, err) + assert.False(t, medium.IsFile("workspaces/outside.txt")) +} + +func TestWorkspace_HandleWorkspaceCommand_Good(t *testing.T) { + workspaceService, _ := newTestWorkspace(t) + + create := workspaceService.HandleWorkspaceCommand(WorkspaceCommand{ + Action: WorkspaceCreateAction, + Workspace: "alpha", + }) + require.True(t, create.OK) + _, ok := create.Value.(coreio.Medium) + require.True(t, ok) + + write := workspaceService.HandleWorkspaceCommand(WorkspaceCommand{ + Action: WorkspaceWriteAction, + Workspace: "alpha", + Path: "notes/todo.txt", + Content: "hello", + }) + require.True(t, write.OK) + + read := workspaceService.HandleWorkspaceCommand(WorkspaceCommand{ + Action: WorkspaceReadAction, + Workspace: "alpha", + Path: "notes/todo.txt", + }) + require.True(t, read.OK) + assert.Equal(t, "hello", read.Value) + + list := workspaceService.HandleWorkspaceCommand(WorkspaceCommand{ + Action: WorkspaceListAction, + Workspace: "alpha", + Path: "notes", + }) + require.True(t, list.OK) + entries, ok := list.Value.([]fs.DirEntry) + require.True(t, ok) + require.Len(t, entries, 1) + assert.Equal(t, "todo.txt", entries[0].Name()) + + switchResult := workspaceService.HandleWorkspaceCommand(WorkspaceCommand{ + Action: WorkspaceSwitchAction, + Workspace: "alpha", + }) + require.True(t, switchResult.OK) + assert.Equal(t, "alpha", workspaceService.CurrentWorkspace()) +} + +func TestWorkspace_HandleWorkspaceCommand_Bad_UnknownAction(t *testing.T) { + workspaceService, _ := newTestWorkspace(t) + + result := workspaceService.HandleWorkspaceCommand(WorkspaceCommand{Action: "noop"}) + + assert.False(t, result.OK) +} + +func TestWorkspace_HandleWorkspaceCommand_Ugly_TraversalPath(t *testing.T) { + workspaceService, medium := newTestWorkspace(t) + require.True(t, workspaceService.HandleWorkspaceCommand(WorkspaceCommand{ + Action: WorkspaceCreateAction, + Workspace: "alpha", + }).OK) + + result := workspaceService.HandleWorkspaceCommand(WorkspaceCommand{ + Action: WorkspaceWriteAction, + Workspace: "alpha", + Path: "../outside.txt", + Content: "secret", + }) + + assert.False(t, result.OK) + assert.False(t, medium.IsFile("workspaces/outside.txt")) +}