diff --git a/atomic.go b/atomic.go new file mode 100644 index 0000000..e2b4ad7 --- /dev/null +++ b/atomic.go @@ -0,0 +1,149 @@ +// SPDX-License-Identifier: EUPL-1.2 + +// Typed atomic primitives — wrappers over sync/atomic typed types. +// +// Each type is a value-type wrapper; zero value is the natural zero +// (false / 0 / nil). Must not be copied after first use. +// +// Mirrors Go 1.19+ stdlib typed atomics one-for-one. Method names and +// semantics are pass-through. +// +// Usage: +// +// var ready core.AtomicBool +// ready.Store(true) +// if ready.Load() { /* signal received */ } +// +// var counter core.AtomicInt64 +// counter.Add(1) +// value := counter.Load() +// +// var current core.AtomicPointer[Config] +// current.Store(&Config{...}) +// cfg := current.Load() + +package core + +import "sync/atomic" + +// AtomicBool is a race-free atomic boolean. Zero value is false. +type AtomicBool struct{ inner atomic.Bool } + +// Load returns the current value. +func (a *AtomicBool) Load() bool { return a.inner.Load() } + +// Store sets the value. +func (a *AtomicBool) Store(val bool) { a.inner.Store(val) } + +// Swap stores new and returns the previous value. +func (a *AtomicBool) Swap(new bool) bool { return a.inner.Swap(new) } + +// CompareAndSwap stores new only if the current value equals old. +// +// if a.CompareAndSwap(false, true) { /* claimed */ } +func (a *AtomicBool) CompareAndSwap(old, new bool) bool { + return a.inner.CompareAndSwap(old, new) +} + +// AtomicInt32 is a race-free atomic int32. Zero value is 0. +type AtomicInt32 struct{ inner atomic.Int32 } + +// Load returns the current value. +func (a *AtomicInt32) Load() int32 { return a.inner.Load() } + +// Store sets the value. +func (a *AtomicInt32) Store(val int32) { a.inner.Store(val) } + +// Add atomically adds delta and returns the new value. +// +// new := a.Add(1) +func (a *AtomicInt32) Add(delta int32) int32 { return a.inner.Add(delta) } + +// Swap stores new and returns the previous value. +func (a *AtomicInt32) Swap(new int32) int32 { return a.inner.Swap(new) } + +// CompareAndSwap stores new only if the current value equals old. +func (a *AtomicInt32) CompareAndSwap(old, new int32) bool { + return a.inner.CompareAndSwap(old, new) +} + +// AtomicInt64 is a race-free atomic int64. Zero value is 0. +type AtomicInt64 struct{ inner atomic.Int64 } + +// Load returns the current value. +func (a *AtomicInt64) Load() int64 { return a.inner.Load() } + +// Store sets the value. +func (a *AtomicInt64) Store(val int64) { a.inner.Store(val) } + +// Add atomically adds delta and returns the new value. +func (a *AtomicInt64) Add(delta int64) int64 { return a.inner.Add(delta) } + +// Swap stores new and returns the previous value. +func (a *AtomicInt64) Swap(new int64) int64 { return a.inner.Swap(new) } + +// CompareAndSwap stores new only if the current value equals old. +func (a *AtomicInt64) CompareAndSwap(old, new int64) bool { + return a.inner.CompareAndSwap(old, new) +} + +// AtomicUint32 is a race-free atomic uint32. Zero value is 0. +type AtomicUint32 struct{ inner atomic.Uint32 } + +// Load returns the current value. +func (a *AtomicUint32) Load() uint32 { return a.inner.Load() } + +// Store sets the value. +func (a *AtomicUint32) Store(val uint32) { a.inner.Store(val) } + +// Add atomically adds delta and returns the new value. +func (a *AtomicUint32) Add(delta uint32) uint32 { return a.inner.Add(delta) } + +// Swap stores new and returns the previous value. +func (a *AtomicUint32) Swap(new uint32) uint32 { return a.inner.Swap(new) } + +// CompareAndSwap stores new only if the current value equals old. +func (a *AtomicUint32) CompareAndSwap(old, new uint32) bool { + return a.inner.CompareAndSwap(old, new) +} + +// AtomicUint64 is a race-free atomic uint64. Zero value is 0. +type AtomicUint64 struct{ inner atomic.Uint64 } + +// Load returns the current value. +func (a *AtomicUint64) Load() uint64 { return a.inner.Load() } + +// Store sets the value. +func (a *AtomicUint64) Store(val uint64) { a.inner.Store(val) } + +// Add atomically adds delta and returns the new value. +func (a *AtomicUint64) Add(delta uint64) uint64 { return a.inner.Add(delta) } + +// Swap stores new and returns the previous value. +func (a *AtomicUint64) Swap(new uint64) uint64 { return a.inner.Swap(new) } + +// CompareAndSwap stores new only if the current value equals old. +func (a *AtomicUint64) CompareAndSwap(old, new uint64) bool { + return a.inner.CompareAndSwap(old, new) +} + +// AtomicPointer is a race-free atomic *T. Zero value is nil. +// +// var current core.AtomicPointer[Config] +// current.Store(&Config{...}) +// cfg := current.Load() +type AtomicPointer[T any] struct{ inner atomic.Pointer[T] } + +// Load returns the current pointer. +func (a *AtomicPointer[T]) Load() *T { return a.inner.Load() } + +// Store sets the pointer. +func (a *AtomicPointer[T]) Store(val *T) { a.inner.Store(val) } + +// Swap stores new and returns the previous pointer. +func (a *AtomicPointer[T]) Swap(new *T) *T { return a.inner.Swap(new) } + +// CompareAndSwap stores new only if the current pointer equals old. +func (a *AtomicPointer[T]) CompareAndSwap(old, new *T) bool { + return a.inner.CompareAndSwap(old, new) +} diff --git a/atomic_example_test.go b/atomic_example_test.go new file mode 100644 index 0000000..41a255c --- /dev/null +++ b/atomic_example_test.go @@ -0,0 +1,58 @@ +package core_test + +import ( + . "dappco.re/go/core" +) + +func ExampleAtomicBool() { + var ready AtomicBool + if !ready.Load() { + Println("not ready") + } + ready.Store(true) + if ready.Load() { + Println("ready") + } + // Output: + // not ready + // ready +} + +func ExampleAtomicInt64() { + var counter AtomicInt64 + counter.Add(1) + counter.Add(1) + counter.Add(1) + Println(counter.Load()) + // Output: + // 3 +} + +func ExampleAtomicInt32_CompareAndSwap() { + var state AtomicInt32 + if state.CompareAndSwap(0, 1) { + Println("claimed") + } + if !state.CompareAndSwap(0, 2) { + Println("already claimed") + } + // Output: + // claimed + // already claimed +} + +type config struct { + name string +} + +func ExampleAtomicPointer() { + var current AtomicPointer[config] + current.Store(&config{name: "v1"}) + cfg := current.Load() + Println(cfg.name) + current.Store(&config{name: "v2"}) + Println(current.Load().name) + // Output: + // v1 + // v2 +} diff --git a/atomic_test.go b/atomic_test.go new file mode 100644 index 0000000..0fc85ab --- /dev/null +++ b/atomic_test.go @@ -0,0 +1,222 @@ +package core_test + +import ( + "testing" + + . "dappco.re/go/core" + "github.com/stretchr/testify/assert" +) + +// --- AtomicBool --- + +func TestAtomic_Bool_Good(t *testing.T) { + var a AtomicBool + assert.False(t, a.Load()) + a.Store(true) + assert.True(t, a.Load()) +} + +func TestAtomic_Bool_Bad(t *testing.T) { + // Bad: CompareAndSwap with wrong old returns false, no change. + var a AtomicBool + a.Store(true) + swapped := a.CompareAndSwap(false, false) + assert.False(t, swapped) + assert.True(t, a.Load(), "CAS with wrong old must not mutate") +} + +func TestAtomic_Bool_Ugly(t *testing.T) { + // Ugly: 100 goroutines racing CompareAndSwap to claim a one-shot flag. + // Exactly one must win. + var a AtomicBool + var wins AtomicInt32 + var wg WaitGroup + for i := 0; i < 100; i++ { + wg.Add(1) + go func() { + defer wg.Done() + if a.CompareAndSwap(false, true) { + wins.Add(1) + } + }() + } + wg.Wait() + assert.Equal(t, int32(1), wins.Load(), + "exactly one goroutine must win the CAS race") +} + +// --- AtomicInt32 --- + +func TestAtomic_Int32_Good(t *testing.T) { + var a AtomicInt32 + a.Store(5) + assert.Equal(t, int32(5), a.Load()) + got := a.Add(3) + assert.Equal(t, int32(8), got) +} + +func TestAtomic_Int32_Bad(t *testing.T) { + // Bad: Swap returns previous value, not new. + var a AtomicInt32 + a.Store(10) + prev := a.Swap(20) + assert.Equal(t, int32(10), prev) + assert.Equal(t, int32(20), a.Load()) +} + +func TestAtomic_Int32_Ugly(t *testing.T) { + // Ugly: 1000 concurrent Adds. Final value must be exact (race-free). + var a AtomicInt32 + var wg WaitGroup + for i := 0; i < 1000; i++ { + wg.Add(1) + go func() { + defer wg.Done() + a.Add(1) + }() + } + wg.Wait() + assert.Equal(t, int32(1000), a.Load()) +} + +// --- AtomicInt64 --- + +func TestAtomic_Int64_Good(t *testing.T) { + var a AtomicInt64 + a.Store(1 << 40) + assert.Equal(t, int64(1<<40), a.Load()) +} + +func TestAtomic_Int64_Bad(t *testing.T) { + var a AtomicInt64 + a.Store(100) + swapped := a.CompareAndSwap(99, 200) + assert.False(t, swapped) + assert.Equal(t, int64(100), a.Load()) +} + +func TestAtomic_Int64_Ugly(t *testing.T) { + var a AtomicInt64 + var wg WaitGroup + for i := 0; i < 1000; i++ { + wg.Add(1) + go func() { + defer wg.Done() + a.Add(1) + }() + } + wg.Wait() + assert.Equal(t, int64(1000), a.Load()) +} + +// --- AtomicUint32 --- + +func TestAtomic_Uint32_Good(t *testing.T) { + var a AtomicUint32 + a.Store(7) + assert.Equal(t, uint32(7), a.Load()) + a.Add(3) + assert.Equal(t, uint32(10), a.Load()) +} + +func TestAtomic_Uint32_Bad(t *testing.T) { + var a AtomicUint32 + a.Store(5) + swapped := a.CompareAndSwap(99, 10) + assert.False(t, swapped) + assert.Equal(t, uint32(5), a.Load()) +} + +func TestAtomic_Uint32_Ugly(t *testing.T) { + var a AtomicUint32 + var wg WaitGroup + for i := 0; i < 500; i++ { + wg.Add(1) + go func() { + defer wg.Done() + a.Add(2) + }() + } + wg.Wait() + assert.Equal(t, uint32(1000), a.Load()) +} + +// --- AtomicUint64 --- + +func TestAtomic_Uint64_Good(t *testing.T) { + var a AtomicUint64 + a.Store(1 << 50) + assert.Equal(t, uint64(1<<50), a.Load()) +} + +func TestAtomic_Uint64_Bad(t *testing.T) { + var a AtomicUint64 + a.Store(100) + prev := a.Swap(200) + assert.Equal(t, uint64(100), prev) +} + +func TestAtomic_Uint64_Ugly(t *testing.T) { + var a AtomicUint64 + var wg WaitGroup + for i := 0; i < 1000; i++ { + wg.Add(1) + go func() { + defer wg.Done() + a.Add(1) + }() + } + wg.Wait() + assert.Equal(t, uint64(1000), a.Load()) +} + +// --- AtomicPointer --- + +type pointerVal struct { + n int +} + +func TestAtomic_Pointer_Good(t *testing.T) { + var a AtomicPointer[pointerVal] + assert.Nil(t, a.Load()) + v := &pointerVal{n: 42} + a.Store(v) + assert.Equal(t, 42, a.Load().n) +} + +func TestAtomic_Pointer_Bad(t *testing.T) { + // Bad: Swap returns nil if no prior value. + var a AtomicPointer[pointerVal] + prev := a.Swap(&pointerVal{n: 1}) + assert.Nil(t, prev) +} + +func TestAtomic_Pointer_Ugly(t *testing.T) { + // Ugly: 100 goroutines racing Store; Load at the end returns one of them. + var a AtomicPointer[pointerVal] + var wg WaitGroup + pvs := make([]*pointerVal, 100) + for i := 0; i < 100; i++ { + pvs[i] = &pointerVal{n: i} + wg.Add(1) + go func(pv *pointerVal) { + defer wg.Done() + a.Store(pv) + }(pvs[i]) + } + wg.Wait() + final := a.Load() + assert.NotNil(t, final, "after 100 stores, Load must return non-nil") + assert.GreaterOrEqual(t, final.n, 0) + assert.Less(t, final.n, 100) +} + +func TestAtomic_Pointer_CompareAndSwap_Good(t *testing.T) { + var a AtomicPointer[pointerVal] + old := &pointerVal{n: 1} + new := &pointerVal{n: 2} + a.Store(old) + swapped := a.CompareAndSwap(old, new) + assert.True(t, swapped) + assert.Equal(t, 2, a.Load().n) +} diff --git a/core.go b/core.go index 21f13c1..0c623d8 100644 --- a/core.go +++ b/core.go @@ -7,7 +7,6 @@ package core import ( "context" - "os" "sync" "sync/atomic" ) @@ -156,14 +155,16 @@ func (c *Core) RunE() error { } // Run starts all services, runs the CLI, then shuts down. -// Calls os.Exit(1) on failure. For error handling use RunE(). +// Calls c.ExitNow(1) on failure because RunE already shut down via defer. +// For error handling use RunE(). // // c := core.New(core.WithService(myService.Register)) // c.Run() func (c *Core) Run() { if err := c.RunE(); err != nil { Error(err.Error()) - os.Exit(1) + // ExitNow because RunE's defer already ran ServiceShutdown. + c.ExitNow(1) } } diff --git a/exit.go b/exit.go new file mode 100644 index 0000000..9bd7baf --- /dev/null +++ b/exit.go @@ -0,0 +1,114 @@ +// SPDX-License-Identifier: EUPL-1.2 + +// Process termination with graceful shutdown. +// +// Always prefer returning errors from RunE() over calling Exit. Use Exit only +// when you cannot return: signal handlers, panic recovery, or fatal errors deep +// in callbacks where the caller chain has no place for an error. +// +// Surface (in order of preference): +// +// c.Exit(0) // graceful, runs ServiceShutdown, 30s timeout +// c.ExitWith(opts) // graceful, custom timeout +// c.ExitNow(2) // skip shutdown, immediate (panic recovery only) +// core.Exit(1) // package-level, no *Core in scope (cli error helpers) +// +// All four call the unexported osExit() — the singular boundary in core/go where +// the os.Exit syscall is invoked. Consumers never import "os" for this. + +package core + +import ( + "context" + "os" + "time" +) + +// osExit is the singular call to os.Exit in core/go. +// Tests override via the testExitCode hook; production wires straight through. +var osExit = os.Exit + +// ExitOptions configures graceful exit behaviour. +// +// c.ExitWith(core.ExitOptions{Code: 1, Timeout: 5 * time.Second}) +type ExitOptions struct { + // Code is the process exit code passed to os.Exit. + Code int + // Timeout bounds how long ServiceShutdown may run before the process + // terminates anyway. Zero means wait forever (legacy behaviour). + // Negative is invalid — ExitWith warns and falls back to a 30s default + // rather than silently waiting forever. + Timeout time.Duration +} + +// Exit terminates the process with the given code, after running shutdown hooks. +// +// Default 30s timeout — long enough for graceful database shutdown / file +// flushes, short enough that ops can SIGKILL after waiting (matches systemd +// TimeoutStopSec). +// +// // fatal error in a signal handler +// c.Action("signal.received", func(ctx context.Context, opts core.Options) core.Result { +// if opts.String("name") == "SIGINT" { c.Exit(0) } +// return core.Result{OK: true} +// }) +func (c *Core) Exit(code int) { + c.ExitWith(ExitOptions{Code: code, Timeout: 30 * time.Second}) +} + +// ExitWith runs ServiceShutdown with the given timeout, then exits. +// If shutdown does not complete within Timeout, the process exits anyway and +// a warning is logged. +// +// // daemon with a tighter shutdown budget +// c.ExitWith(core.ExitOptions{Code: 0, Timeout: 5 * time.Second}) +func (c *Core) ExitWith(opts ExitOptions) { + ctx := context.Background() + switch { + case opts.Timeout < 0: + // Negative timeout is invalid — log and reject; treat as 30s default. + Warn("invalid negative ExitOptions.Timeout, using 30s default", "timeout", opts.Timeout) + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, 30*time.Second) + defer cancel() + case opts.Timeout > 0: + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, opts.Timeout) + defer cancel() + } + // Timeout == 0 → no context, wait forever (legacy behaviour preserved per RFC §15B) + done := make(chan struct{}) + go func() { + _ = c.ServiceShutdown(ctx) + close(done) + }() + select { + case <-done: + // shutdown completed + case <-ctx.Done(): + Warn("exit timeout, forcing immediate termination", + "timeout", opts.Timeout, "code", opts.Code) + } + osExit(opts.Code) +} + +// ExitNow terminates immediately without running shutdown hooks. +// Use only when shutdown is hung or unsafe (e.g. inside a panic the shutdown +// chain may have caused). +// Also valid when shutdown has already run via a defer chain. +// +// defer func() { +// if r := recover(); r != nil { c.ExitNow(2) } +// }() +func (c *Core) ExitNow(code int) { osExit(code) } + +// Exit (package level) terminates immediately without running shutdown hooks. +// For callsites that do not have a *Core reference (e.g. cli error helpers). +// Equivalent to calling ExitNow on a Core instance. +// +// // cli/pkg/cli/errors.go — no *Core in scope +// func Fatal(msg string) { +// Error(msg) +// core.Exit(1) +// } +func Exit(code int) { osExit(code) } diff --git a/exit_example_test.go b/exit_example_test.go new file mode 100644 index 0000000..164d5ff --- /dev/null +++ b/exit_example_test.go @@ -0,0 +1,44 @@ +package core_test + +import ( + "time" + + . "dappco.re/go/core" +) + +// ExampleCore_Exit demonstrates the call shape. The osExit hook is overridden +// in tests; in production this would terminate the process. +func ExampleCore_Exit() { + c := New() + _ = c // c.Exit(0) terminates the process in production + Println("ready to exit") + // Output: + // ready to exit +} + +// ExampleCore_ExitWith demonstrates a custom shutdown timeout. +func ExampleCore_ExitWith() { + c := New() + _ = c // c.ExitWith(core.ExitOptions{Code: 0, Timeout: 5*time.Second}) + _ = ExitOptions{Code: 0, Timeout: 5 * time.Second} + Println("configured") + // Output: + // configured +} + +// ExampleCore_ExitNow demonstrates the immediate-termination escape hatch. +func ExampleCore_ExitNow() { + c := New() + _ = c // c.ExitNow(2) terminates without running ServiceShutdown + Println("emergency") + // Output: + // emergency +} + +// ExampleExit demonstrates the package-level form for callsites without a *Core. +func ExampleExit() { + // core.Exit(1) terminates the process in production + Println("package-level") + // Output: + // package-level +} diff --git a/exit_test.go b/exit_test.go new file mode 100644 index 0000000..fa56f68 --- /dev/null +++ b/exit_test.go @@ -0,0 +1,230 @@ +package core + +import ( + "errors" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +// captureExit swaps the package-level osExit hook for the duration of the test. +// Returns (captured-code, restore-func). The captured code defaults to -1 so +// tests can distinguish "not called" from "called with 0". +func captureExit(t *testing.T) (codePtr *int, restore func()) { + t.Helper() + captured := -1 + codePtr = &captured + prev := osExit + osExit = func(code int) { captured = code } + return codePtr, func() { osExit = prev } +} + +func TestExit_Exit_Good(t *testing.T) { + got, restore := captureExit(t) + defer restore() + + c := New() + c.Exit(0) + + assert.Equal(t, 0, *got) +} + +func TestExit_Exit_Bad(t *testing.T) { + // Bad: caller passes a non-zero code via a fatal error path. + // Recoverable boundary: we observe the captured code, no process death. + got, restore := captureExit(t) + defer restore() + + c := New() + c.Exit(127) + + assert.Equal(t, 127, *got) +} + +func TestExit_Exit_Ugly(t *testing.T) { + // Ugly: Exit called twice (e.g. signal handler races user-triggered exit). + // Both calls land; second wins. ServiceShutdown is idempotent. + got, restore := captureExit(t) + defer restore() + + c := New() + c.Exit(1) + c.Exit(2) + + assert.Equal(t, 2, *got) +} + +func TestExit_ExitWith_Good(t *testing.T) { + got, restore := captureExit(t) + defer restore() + + c := New() + c.ExitWith(ExitOptions{Code: 5, Timeout: 100 * time.Millisecond}) + + assert.Equal(t, 5, *got) +} + +func TestExit_ExitWith_Bad(t *testing.T) { + // Bad: zero timeout = wait forever. With a registered service whose OnStop + // returns immediately, ServiceShutdown completes; Exit lands. + got, restore := captureExit(t) + defer restore() + + c := New() + c.ExitWith(ExitOptions{Code: 9, Timeout: 0}) + + assert.Equal(t, 9, *got) +} + +func TestExit_ExitWithNegativeTimeout_Bad(t *testing.T) { + got, restore := captureExit(t) + defer restore() + + // Service whose OnStop returns immediately so we don't wait the full 30s. + c := New() + c.Service("fast", Service{ + OnStop: func() Result { + return Result{OK: true} + }, + }) + + start := time.Now() + c.ExitWith(ExitOptions{Code: 7, Timeout: -1}) + elapsed := time.Since(start) + + assert.Equal(t, 7, *got, "ExitWith should still call osExit with the requested code") + assert.Less(t, elapsed, 5*time.Second, + "negative timeout must NOT fall through to wait-forever; default fallback should bound shutdown") +} + +func TestExit_ExitWithZeroTimeout_Good(t *testing.T) { + got, restore := captureExit(t) + defer restore() + + c := New() + c.Service("instant", Service{OnStop: func() Result { return Result{OK: true} }}) + + c.ExitWith(ExitOptions{Code: 4, Timeout: 0}) + + // Timeout==0 → wait forever — but the service's OnStop returns + // immediately so we shouldn't actually wait. The test asserts the + // documented zero-means-wait-forever-but-returns-when-ready semantics. + assert.Equal(t, 4, *got) +} + +func TestExit_ExitWith_Ugly(t *testing.T) { + // Ugly: shutdown takes longer than the timeout. Service blocks for 200ms, + // timeout is 10ms — process exits with the warning logged, no panic. + got, restore := captureExit(t) + defer restore() + + c := New() + c.Service("slow", Service{OnStop: func() Result { + time.Sleep(200 * time.Millisecond) + return Result{OK: true} + }}) + start := time.Now() + c.ExitWith(ExitOptions{Code: 3, Timeout: 10 * time.Millisecond}) + elapsed := time.Since(start) + + assert.Equal(t, 3, *got) + assert.Less(t, elapsed, 200*time.Millisecond, + "ExitWith must respect the timeout, not wait for slow shutdown") +} + +func TestExit_ExitNow_Good(t *testing.T) { + got, restore := captureExit(t) + defer restore() + + c := New() + c.ExitNow(0) + + assert.Equal(t, 0, *got) +} + +func TestExit_ExitNow_Bad(t *testing.T) { + // Bad: ExitNow called from a panic recovery path with non-zero code. + got, restore := captureExit(t) + defer restore() + + c := New() + defer func() { + if r := recover(); r != nil { + c.ExitNow(2) + } + }() + func() { panic(errors.New("boom")) }() + + assert.Equal(t, 2, *got) +} + +func TestExit_ExitNow_Ugly(t *testing.T) { + // Ugly: ExitNow does NOT run shutdown — verify the OnStop hook is NOT called. + got, restore := captureExit(t) + defer restore() + + stopped := false + c := New() + c.Service("hook", Service{OnStop: func() Result { + stopped = true + return Result{OK: true} + }}) + c.ExitNow(4) + + assert.Equal(t, 4, *got) + assert.False(t, stopped, + "ExitNow must skip the shutdown chain — OnStop must not run") +} + +func TestRun_FailurePath_ShutdownFiresOnce_Bad(t *testing.T) { + got, restore := captureExit(t) + defer restore() + + shutdownCount := 0 + c := New() + c.Service("failing", Service{ + OnStart: func() Result { + return Result{Value: errors.New("simulated startup failure"), OK: false} + }, + OnStop: func() Result { + shutdownCount++ + return Result{OK: true} + }, + }) + c.Run() + + assert.Equal(t, 1, *got, "Run should call osExit(1) on failure") + assert.Equal(t, 1, shutdownCount, "OnStop should fire exactly once, not twice") +} + +func TestExit_PackageExit_Good(t *testing.T) { + got, restore := captureExit(t) + defer restore() + + Exit(0) + + assert.Equal(t, 0, *got) +} + +func TestExit_PackageExit_Bad(t *testing.T) { + // Bad: package-level Exit called with non-zero code from cli error helper. + got, restore := captureExit(t) + defer restore() + + Exit(1) + + assert.Equal(t, 1, *got) +} + +func TestExit_PackageExit_Ugly(t *testing.T) { + // Ugly: package-level Exit called repeatedly. Each call lands. + got, restore := captureExit(t) + defer restore() + + Exit(1) + Exit(2) + Exit(3) + + assert.Equal(t, 3, *got) +} diff --git a/lock.go b/lock.go index a963278..a496820 100644 --- a/lock.go +++ b/lock.go @@ -9,6 +9,11 @@ import ( ) // Lock is the DTO for a named mutex. +// +// Mutex is the backing sync.RWMutex. +// +// Deprecated: direct field access forces consumers to import "sync". +// Use the Lock/Unlock/RLock/RUnlock/TryLock methods instead. Removed in v0.9.0. type Lock struct { Name string Mutex *sync.RWMutex @@ -17,6 +22,9 @@ type Lock struct { // Lock returns a named Lock, creating the mutex if needed. // Locks are per-Core — separate Core instances do not share mutexes. +// +// l := c.Lock("drain") +// l.Lock(); defer l.Unlock() func (c *Core) Lock(name string) *Lock { r := c.lock.locks.Get(name) if r.OK { @@ -27,6 +35,42 @@ func (c *Core) Lock(name string) *Lock { return &Lock{Name: name, Mutex: m} } +// Lock acquires the named mutex for write. +// +// c.Lock("drain").Lock() +// defer c.Lock("drain").Unlock() +func (l *Lock) Lock() { l.Mutex.Lock() } + +// Unlock releases the named mutex from write. +// +// c.Lock("drain").Unlock() +func (l *Lock) Unlock() { l.Mutex.Unlock() } + +// RLock acquires the named mutex for read. +// +// c.Lock("cache").RLock() +// defer c.Lock("cache").RUnlock() +func (l *Lock) RLock() { l.Mutex.RLock() } + +// RUnlock releases the named mutex from read. +// +// c.Lock("cache").RUnlock() +func (l *Lock) RUnlock() { l.Mutex.RUnlock() } + +// TryLock attempts to acquire the write mutex without blocking. +// Returns Result{OK: true} when acquired, Result{OK: false} when held. +// +// if c.Lock("drain").TryLock().OK { +// defer c.Lock("drain").Unlock() +// // ... +// } +func (l *Lock) TryLock() Result { + if l.Mutex.TryLock() { + return Result{OK: true} + } + return Result{OK: false} +} + // LockEnable marks that the service lock should be applied after initialisation. func (c *Core) LockEnable(name ...string) { c.services.lockEnabled = true diff --git a/lock_example_test.go b/lock_example_test.go index 61497b0..2294ec5 100644 --- a/lock_example_test.go +++ b/lock_example_test.go @@ -8,11 +8,34 @@ import ( func ExampleCore_Lock() { c := New() lock := c.Lock("drain") - lock.Mutex.Lock() + lock.Lock() Println("locked") - lock.Mutex.Unlock() + lock.Unlock() Println("unlocked") // Output: // locked // unlocked } + +func ExampleLock_RLock() { + c := New() + lock := c.Lock("cache") + lock.RLock() + Println("read-locked") + lock.RUnlock() + Println("read-unlocked") + // Output: + // read-locked + // read-unlocked +} + +func ExampleLock_TryLock() { + c := New() + lock := c.Lock("drain") + if lock.TryLock().OK { + Println("acquired") + lock.Unlock() + } + // Output: + // acquired +} diff --git a/lock_test.go b/lock_test.go index ef0ba86..666d4dd 100644 --- a/lock_test.go +++ b/lock_test.go @@ -53,3 +53,83 @@ func TestLock_Stoppables_Good(t *testing.T) { assert.True(t, r.OK) assert.Len(t, r.Value.([]*Service), 1) } + +func TestLock_LockUnlock_Good(t *testing.T) { + c := New() + l := c.Lock("a") + l.Lock() + l.Unlock() +} + +func TestLock_LockUnlock_Bad(t *testing.T) { + c := New() + l := c.Lock("held") + l.Lock() + defer l.Unlock() + r := l.TryLock() + assert.False(t, r.OK, "TryLock on already-held lock must report not-acquired") +} + +func TestLock_LockUnlock_Ugly(t *testing.T) { + c := New() + l := c.Lock("reentry") + l.Lock() + l.Unlock() + l.Lock() + l.Unlock() +} + +func TestLock_RLockRUnlock_Good(t *testing.T) { + c := New() + l := c.Lock("a") + l.RLock() + l.RUnlock() +} + +func TestLock_RLockRUnlock_Bad(t *testing.T) { + c := New() + l := c.Lock("write-held") + l.Lock() + defer l.Unlock() + r := l.TryLock() + assert.False(t, r.OK, "TryLock when write-held must fail (readers also blocked)") +} + +func TestLock_RLockRUnlock_Ugly(t *testing.T) { + c := New() + l := c.Lock("a") + l.RLock() + l.RLock() + l.RUnlock() + l.RUnlock() +} + +func TestLock_TryLock_Good(t *testing.T) { + c := New() + l := c.Lock("a") + r := l.TryLock() + assert.True(t, r.OK) + l.Unlock() +} + +func TestLock_TryLock_Bad(t *testing.T) { + c := New() + l := c.Lock("held") + l.Lock() + defer l.Unlock() + r := l.TryLock() + assert.False(t, r.OK) +} + +func TestLock_TryLock_Ugly(t *testing.T) { + c := New() + l := c.Lock("a") + r1 := l.TryLock() + assert.True(t, r1.OK) + r2 := l.TryLock() + assert.False(t, r2.OK) + l.Unlock() + r3 := l.TryLock() + assert.True(t, r3.OK) + l.Unlock() +} diff --git a/signal.go b/signal.go new file mode 100644 index 0000000..94c7b15 --- /dev/null +++ b/signal.go @@ -0,0 +1,58 @@ +// SPDX-License-Identifier: EUPL-1.2 + +// OS signal handling — consumer-facing Core surface. +// +// Signal events are emitted as actions. Consumers subscribe via c.Action(): +// +// c.Action("signal.received", func(ctx context.Context, opts core.Options) core.Result { +// name := opts.String("name") // "SIGINT", "SIGTERM", "SIGHUP" +// switch name { +// case "SIGINT", "SIGTERM": +// c.Exit(0) +// case "SIGHUP": +// c.Config().Reload() +// } +// return core.Result{OK: true} +// }) +// +// If no signal service is registered (typically by go-process), no actions fire +// and consumers do not observe signals — permission-by-registration, mirroring +// the Process accessor pattern. +// +// Action contract: +// +// signal.received service → consumers {name: string, value: int} +// signal.start consumer → service {signals: []string} +// signal.stop consumer → service {} + +package core + +// Signal is the Core primitive for OS signal handling. +// +// if c.Signal().Exists() { /* signals will be observed */ } +type Signal struct { + core *Core +} + +// Signal returns the signal-handling primitive. +// +// c.Signal().Stop() +func (c *Core) Signal() *Signal { return &Signal{core: c} } + +// Stop instructs the signal service to unsubscribe from OS notifications. +// Idempotent. The service shutdown chain calls this automatically. +// +// c.Signal().Stop() +func (s *Signal) Stop() Result { + return s.core.Action("signal.stop").Run(s.core.Context(), NewOptions()) +} + +// Exists reports whether a signal service is registered (and therefore whether +// consumers can expect signal.received broadcasts). +// +// if !c.Signal().Exists() { +// Warn("signal handling unavailable — go-process not registered") +// } +func (s *Signal) Exists() bool { + return s.core.Action("signal.received").Exists() +} diff --git a/signal_example_test.go b/signal_example_test.go new file mode 100644 index 0000000..9dde89b --- /dev/null +++ b/signal_example_test.go @@ -0,0 +1,35 @@ +package core_test + +import ( + "context" + + . "dappco.re/go/core" +) + +// ExampleCore_Signal_exists shows the registration check. +func ExampleCore_Signal_exists() { + c := New() + if c.Signal().Exists() { + Println("signal handling available") + } else { + Println("no signal service registered") + } + // Output: + // no signal service registered +} + +// ExampleCore_Signal_subscribe shows the action-subscription pattern. In +// production the go-process service registers signal.received and broadcasts +// on each OS signal; here we register a stub action to demonstrate the surface. +func ExampleCore_Signal_subscribe() { + c := New() + c.Action("signal.received", func(_ context.Context, opts Options) Result { + Println("got", opts.String("name")) + return Result{OK: true} + }) + // In production this fires on SIGINT/SIGTERM/SIGHUP: + c.Action("signal.received").Run(c.Context(), + NewOptions(Option{Key: "name", Value: "SIGINT"})) + // Output: + // got SIGINT +} diff --git a/signal_test.go b/signal_test.go new file mode 100644 index 0000000..23d8485 --- /dev/null +++ b/signal_test.go @@ -0,0 +1,68 @@ +package core_test + +import ( + "context" + "testing" + + . "dappco.re/go/core" + "github.com/stretchr/testify/assert" +) + +func TestSignal_Exists_Good(t *testing.T) { + // Good: with a registered signal.received action, Exists is true. + c := New() + c.Action("signal.received", func(_ context.Context, _ Options) Result { + return Result{OK: true} + }) + assert.True(t, c.Signal().Exists()) +} + +func TestSignal_Exists_Bad(t *testing.T) { + // Bad: no signal service registered. Exists returns false. + c := New() + assert.False(t, c.Signal().Exists()) +} + +func TestSignal_Exists_Ugly(t *testing.T) { + // Ugly: a signal.start action is registered but signal.received is not. + // Exists keys off signal.received specifically — partial registration + // reports as no service available. + c := New() + c.Action("signal.start", func(_ context.Context, _ Options) Result { + return Result{OK: true} + }) + assert.False(t, c.Signal().Exists(), + "Exists must key off signal.received, not just any signal.* action") +} + +func TestSignal_Stop_Good(t *testing.T) { + // Good: signal.stop registered, Stop emits and returns OK. + c := New() + called := false + c.Action("signal.stop", func(_ context.Context, _ Options) Result { + called = true + return Result{OK: true} + }) + r := c.Signal().Stop() + assert.True(t, r.OK) + assert.True(t, called) +} + +func TestSignal_Stop_Bad(t *testing.T) { + // Bad: no signal.stop registered. Stop returns Result{OK: false} + // (permission-by-registration — no handler = no capability). + c := New() + r := c.Signal().Stop() + assert.False(t, r.OK) +} + +func TestSignal_Stop_Ugly(t *testing.T) { + // Ugly: signal.stop registered but handler returns OK: false (refusal). + // Caller observes the refusal verbatim. + c := New() + c.Action("signal.stop", func(_ context.Context, _ Options) Result { + return Result{OK: false} + }) + r := c.Signal().Stop() + assert.False(t, r.OK) +} diff --git a/sync.go b/sync.go new file mode 100644 index 0000000..c9c4195 --- /dev/null +++ b/sync.go @@ -0,0 +1,159 @@ +// SPDX-License-Identifier: EUPL-1.2 + +// Per-instance synchronisation primitives — embed in your structs to avoid +// importing "sync" directly. +// +// These are 1:1 wrappers around stdlib sync types. Methods are pass-through; +// the only purpose is to confine the "sync" import to this file so consumers +// can satisfy the banned-imports rule without losing concurrency primitives. +// +// For per-Core named coordinators (drain, service-registry, etc.) use +// c.Lock(name) — that's a different shape (registry-style, named). +// +// Usage: +// +// type Counter struct { +// mu core.Mutex // protects value +// value int +// } +// +// func (c *Counter) Inc() { +// c.mu.Lock(); defer c.mu.Unlock() +// c.value++ +// } + +package core + +import "sync" + +// Mutex is a mutual exclusion lock. Embed in your struct to protect internal state. +// +// type Counter struct { +// mu core.Mutex +// value int +// } +// +// func (c *Counter) Inc() { +// c.mu.Lock(); defer c.mu.Unlock() +// c.value++ +// } +type Mutex struct{ inner sync.Mutex } + +// Lock acquires the mutex. +func (m *Mutex) Lock() { m.inner.Lock() } + +// Unlock releases the mutex. +func (m *Mutex) Unlock() { m.inner.Unlock() } + +// TryLock attempts to acquire the mutex without blocking. +// Returns Result{OK: true} on acquire, Result{OK: false} if already held. +// +// if c.mu.TryLock().OK { +// defer c.mu.Unlock() +// // ... +// } +func (m *Mutex) TryLock() Result { + if m.inner.TryLock() { + return Result{OK: true} + } + return Result{OK: false} +} + +// RWMutex is a read-write mutex. Many readers OR one writer. +// +// type Cache struct { +// mu core.RWMutex +// data map[string]string +// } +// +// func (c *Cache) Get(k string) string { +// c.mu.RLock(); defer c.mu.RUnlock() +// return c.data[k] +// } +// +// func (c *Cache) Set(k, v string) { +// c.mu.Lock(); defer c.mu.Unlock() +// c.data[k] = v +// } +type RWMutex struct{ inner sync.RWMutex } + +// Lock acquires the mutex for write (exclusive). +func (m *RWMutex) Lock() { m.inner.Lock() } + +// Unlock releases the mutex from write. +func (m *RWMutex) Unlock() { m.inner.Unlock() } + +// RLock acquires the mutex for read (shared). +func (m *RWMutex) RLock() { m.inner.RLock() } + +// RUnlock releases the mutex from read. +func (m *RWMutex) RUnlock() { m.inner.RUnlock() } + +// TryLock attempts to acquire the write mutex without blocking. +// +// if c.mu.TryLock().OK { defer c.mu.Unlock() } +func (m *RWMutex) TryLock() Result { + if m.inner.TryLock() { + return Result{OK: true} + } + return Result{OK: false} +} + +// TryRLock attempts to acquire the read mutex without blocking. +// +// if c.mu.TryRLock().OK { defer c.mu.RUnlock() } +func (m *RWMutex) TryRLock() Result { + if m.inner.TryRLock() { + return Result{OK: true} + } + return Result{OK: false} +} + +// Once runs a function exactly once across all callers. +// +// type Service struct { +// initOnce core.Once +// ready bool +// } +// +// func (s *Service) ensure() { +// s.initOnce.Do(func() { s.ready = true }) +// } +type Once struct{ inner sync.Once } + +// Do calls the function fn if and only if Do is being called for the first +// time for this instance of Once. +func (o *Once) Do(fn func()) { o.inner.Do(fn) } + +// Reset clears the once so Do can fire again. Use for re-initialisation +// patterns where the resource is closed and re-opened. +// +// s.closeStateStore() +// s.initOnce.Reset() // next Do() runs the init function again +// +// Semantics match stdlib sync.Once{} reset: the previous Once is replaced +// outright. If a Do(fn) is concurrently in flight, behaviour is undefined — +// callers must serialise Reset against any concurrent Do calls. +func (o *Once) Reset() { o.inner = sync.Once{} } + +// WaitGroup waits for a collection of goroutines to finish. +// +// var wg core.WaitGroup +// for _, item := range items { +// wg.Add(1) +// go func(it Item) { +// defer wg.Done() +// process(it) +// }(item) +// } +// wg.Wait() +type WaitGroup struct{ inner sync.WaitGroup } + +// Add adds delta, which may be negative, to the WaitGroup counter. +func (w *WaitGroup) Add(delta int) { w.inner.Add(delta) } + +// Done decrements the WaitGroup counter by one. +func (w *WaitGroup) Done() { w.inner.Done() } + +// Wait blocks until the WaitGroup counter is zero. +func (w *WaitGroup) Wait() { w.inner.Wait() } diff --git a/sync_example_test.go b/sync_example_test.go new file mode 100644 index 0000000..d23f486 --- /dev/null +++ b/sync_example_test.go @@ -0,0 +1,59 @@ +package core_test + +import ( + . "dappco.re/go/core" +) + +func ExampleMutex() { + var mu Mutex + mu.Lock() + Println("locked") + mu.Unlock() + Println("unlocked") + // Output: + // locked + // unlocked +} + +func ExampleRWMutex() { + var mu RWMutex + mu.RLock() + Println("read-locked") + mu.RUnlock() + mu.Lock() + Println("write-locked") + mu.Unlock() + // Output: + // read-locked + // write-locked +} + +func ExampleOnce() { + var o Once + o.Do(func() { Println("ran") }) + o.Do(func() { Println("not printed") }) + // Output: + // ran +} + +func ExampleOnce_Reset() { + var o Once + o.Do(func() { Println("first") }) + o.Reset() + o.Do(func() { Println("again") }) + // Output: + // first + // again +} + +func ExampleWaitGroup() { + var wg WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + Println("worker done") + }() + wg.Wait() + // Output: + // worker done +} diff --git a/sync_test.go b/sync_test.go new file mode 100644 index 0000000..f42b2a8 --- /dev/null +++ b/sync_test.go @@ -0,0 +1,184 @@ +package core_test + +import ( + "testing" + "time" + + . "dappco.re/go/core" + "github.com/stretchr/testify/assert" +) + +// --- Mutex --- + +func TestSync_Mutex_Good(t *testing.T) { + var m Mutex + m.Lock() + m.Unlock() +} + +func TestSync_Mutex_Bad(t *testing.T) { + // Bad: TryLock on already-held mutex returns Result{OK: false}. + var m Mutex + m.Lock() + defer m.Unlock() + r := m.TryLock() + assert.False(t, r.OK) +} + +func TestSync_Mutex_Ugly(t *testing.T) { + // Ugly: contention. Two goroutines incrementing under the same Mutex + // must produce 1000 increments without races (-race must pass). + var m Mutex + count := 0 + var wg WaitGroup + for i := 0; i < 2; i++ { + wg.Add(1) + go func() { + defer wg.Done() + for j := 0; j < 500; j++ { + m.Lock() + count++ + m.Unlock() + } + }() + } + wg.Wait() + assert.Equal(t, 1000, count) +} + +// --- RWMutex --- + +func TestSync_RWMutex_Good(t *testing.T) { + var m RWMutex + m.Lock() + m.Unlock() + m.RLock() + m.RUnlock() +} + +func TestSync_RWMutex_Bad(t *testing.T) { + // Bad: TryLock fails when write-held. + var m RWMutex + m.Lock() + defer m.Unlock() + r := m.TryLock() + assert.False(t, r.OK) +} + +func TestSync_RWMutex_Ugly(t *testing.T) { + // Ugly: many readers + occasional writer; -race must remain clean. + var m RWMutex + value := 0 + var wg WaitGroup + // 5 readers + for i := 0; i < 5; i++ { + wg.Add(1) + go func() { + defer wg.Done() + for j := 0; j < 100; j++ { + m.RLock() + _ = value + m.RUnlock() + } + }() + } + // 2 writers + for i := 0; i < 2; i++ { + wg.Add(1) + go func() { + defer wg.Done() + for j := 0; j < 50; j++ { + m.Lock() + value++ + m.Unlock() + } + }() + } + wg.Wait() + assert.Equal(t, 100, value) +} + +func TestSync_RWMutex_TryRLock_Good(t *testing.T) { + var m RWMutex + r := m.TryRLock() + assert.True(t, r.OK) + m.RUnlock() +} + +// --- Once --- + +func TestSync_Once_Good(t *testing.T) { + var o Once + count := 0 + o.Do(func() { count++ }) + o.Do(func() { count++ }) + o.Do(func() { count++ }) + assert.Equal(t, 1, count, "Once.Do must execute the function exactly once") +} + +func TestSync_Once_Bad(t *testing.T) { + // Bad: caller passes nil. Stdlib Once panics on nil; we pass through. + var o Once + assert.Panics(t, func() { o.Do(nil) }) +} + +func TestSync_Once_Ugly(t *testing.T) { + // Ugly: Reset between invocations re-arms the Once. + var o Once + count := 0 + o.Do(func() { count++ }) + o.Do(func() { count++ }) + assert.Equal(t, 1, count) + o.Reset() + o.Do(func() { count++ }) + o.Do(func() { count++ }) + assert.Equal(t, 2, count, "After Reset, Do must fire once more") +} + +// --- WaitGroup --- + +func TestSync_WaitGroup_Good(t *testing.T) { + var wg WaitGroup + var mu Mutex + done := false + wg.Add(1) + go func() { + defer wg.Done() + time.Sleep(10 * time.Millisecond) + mu.Lock() + done = true + mu.Unlock() + }() + wg.Wait() + mu.Lock() + defer mu.Unlock() + assert.True(t, done) +} + +func TestSync_WaitGroup_Bad(t *testing.T) { + // Bad: Done called more times than Add. Stdlib panics; we pass through. + var wg WaitGroup + wg.Add(1) + wg.Done() + assert.Panics(t, func() { wg.Done() }) +} + +func TestSync_WaitGroup_Ugly(t *testing.T) { + // Ugly: many goroutines, all must complete before Wait returns. + var wg WaitGroup + var mu Mutex + counter := 0 + for i := 0; i < 100; i++ { + wg.Add(1) + go func() { + defer wg.Done() + mu.Lock() + counter++ + mu.Unlock() + }() + } + wg.Wait() + mu.Lock() + defer mu.Unlock() + assert.Equal(t, 100, counter) +} diff --git a/syncmap.go b/syncmap.go new file mode 100644 index 0000000..9f31d93 --- /dev/null +++ b/syncmap.go @@ -0,0 +1,91 @@ +// SPDX-License-Identifier: EUPL-1.2 + +// Concurrent map primitive — wrapper over sync.Map. +// +// For most use cases, prefer map[K]V + core.RWMutex (type-safe, easier to +// reason about). Reach for SyncMap only in two patterns: +// (1) entries written once, read many times (caches that only grow), or +// (2) goroutines read/write/overwrite entries for disjoint key sets. +// See sync.Map's package docs for the full memory-model guarantees. +// +// Zero value is empty and ready for use. Must not be copied after first use. +// +// Usage: +// +// var cache core.SyncMap +// cache.Store("key", value) +// if v, ok := cache.Load("key"); ok { +// use(v) +// } + +package core + +import "sync" + +// SyncMap is a concurrent map. Same semantics and memory model as sync.Map. +type SyncMap struct{ inner sync.Map } + +// Load returns the value stored for key, or nil and ok=false if not present. +// +// if v, ok := m.Load("key"); ok { /* use v */ } +func (m *SyncMap) Load(key any) (value any, ok bool) { return m.inner.Load(key) } + +// Store sets the value for key. +// +// m.Store("key", value) +func (m *SyncMap) Store(key, value any) { m.inner.Store(key, value) } + +// LoadOrStore returns the existing value if present, otherwise stores and +// returns the given value. loaded is true if value was loaded, false if stored. +// +// actual, loaded := m.LoadOrStore("key", defaultValue) +func (m *SyncMap) LoadOrStore(key, value any) (actual any, loaded bool) { + return m.inner.LoadOrStore(key, value) +} + +// LoadAndDelete deletes the value for key, returning the previous value if any. +// +// v, loaded := m.LoadAndDelete("key") +func (m *SyncMap) LoadAndDelete(key any) (value any, loaded bool) { + return m.inner.LoadAndDelete(key) +} + +// Delete removes the value for key. +// +// m.Delete("key") +func (m *SyncMap) Delete(key any) { m.inner.Delete(key) } + +// Swap stores value for key and returns the previous value, if any. +// +// previous, loaded := m.Swap("key", new) +func (m *SyncMap) Swap(key, value any) (previous any, loaded bool) { + return m.inner.Swap(key, value) +} + +// CompareAndSwap stores new for key only if the current value equals old. +// +// if m.CompareAndSwap("key", old, new) { /* swapped */ } +func (m *SyncMap) CompareAndSwap(key, old, new any) (swapped bool) { + return m.inner.CompareAndSwap(key, old, new) +} + +// CompareAndDelete deletes the entry for key only if the current value equals old. +// +// if m.CompareAndDelete("key", expected) { /* deleted */ } +func (m *SyncMap) CompareAndDelete(key, old any) (deleted bool) { + return m.inner.CompareAndDelete(key, old) +} + +// Range calls f sequentially for each key/value present. If f returns false, +// Range stops. f may be called concurrently with other operations on the map. +// +// m.Range(func(k, v any) bool { +// Println(k, v) +// return true +// }) +func (m *SyncMap) Range(f func(key, value any) bool) { m.inner.Range(f) } + +// Clear deletes all entries. +// +// m.Clear() +func (m *SyncMap) Clear() { m.inner.Clear() } diff --git a/syncmap_example_test.go b/syncmap_example_test.go new file mode 100644 index 0000000..692db37 --- /dev/null +++ b/syncmap_example_test.go @@ -0,0 +1,36 @@ +package core_test + +import ( + . "dappco.re/go/core" +) + +func ExampleSyncMap() { + var m SyncMap + m.Store("greeting", "hello") + if v, ok := m.Load("greeting"); ok { + Println(v) + } + // Output: + // hello +} + +func ExampleSyncMap_LoadOrStore() { + var m SyncMap + actual, loaded := m.LoadOrStore("k", "first") + Println(actual, loaded) + actual, loaded = m.LoadOrStore("k", "second") + Println(actual, loaded) + // Output: + // first false + // first true +} + +func ExampleSyncMap_CompareAndSwap() { + var m SyncMap + m.Store("flag", false) + if m.CompareAndSwap("flag", false, true) { + Println("claimed") + } + // Output: + // claimed +} diff --git a/syncmap_test.go b/syncmap_test.go new file mode 100644 index 0000000..3748cdb --- /dev/null +++ b/syncmap_test.go @@ -0,0 +1,141 @@ +package core_test + +import ( + "testing" + + . "dappco.re/go/core" + "github.com/stretchr/testify/assert" +) + +func TestSyncMap_Store_Good(t *testing.T) { + var m SyncMap + m.Store("k", 42) + v, ok := m.Load("k") + assert.True(t, ok) + assert.Equal(t, 42, v) +} + +func TestSyncMap_Store_Bad(t *testing.T) { + // Bad: Load on absent key returns nil, false. + var m SyncMap + v, ok := m.Load("missing") + assert.False(t, ok) + assert.Nil(t, v) +} + +func TestSyncMap_Store_Ugly(t *testing.T) { + // Ugly: overwrite via Store, then via Swap, observe the chain. + var m SyncMap + m.Store("k", "first") + m.Store("k", "second") + prev, loaded := m.Swap("k", "third") + assert.True(t, loaded) + assert.Equal(t, "second", prev) + v, _ := m.Load("k") + assert.Equal(t, "third", v) +} + +func TestSyncMap_LoadOrStore_Good(t *testing.T) { + var m SyncMap + actual, loaded := m.LoadOrStore("k", 1) + assert.False(t, loaded) + assert.Equal(t, 1, actual) + actual, loaded = m.LoadOrStore("k", 2) + assert.True(t, loaded) + assert.Equal(t, 1, actual, "second LoadOrStore returns existing value") +} + +func TestSyncMap_LoadAndDelete_Good(t *testing.T) { + var m SyncMap + m.Store("k", "v") + v, loaded := m.LoadAndDelete("k") + assert.True(t, loaded) + assert.Equal(t, "v", v) + _, ok := m.Load("k") + assert.False(t, ok) +} + +func TestSyncMap_CompareAndSwap_Good(t *testing.T) { + var m SyncMap + m.Store("k", 1) + swapped := m.CompareAndSwap("k", 1, 2) + assert.True(t, swapped) + v, _ := m.Load("k") + assert.Equal(t, 2, v) +} + +func TestSyncMap_CompareAndSwap_Bad(t *testing.T) { + // Bad: CompareAndSwap with mismatched old returns false, no change. + var m SyncMap + m.Store("k", 1) + swapped := m.CompareAndSwap("k", 99, 2) + assert.False(t, swapped) + v, _ := m.Load("k") + assert.Equal(t, 1, v) +} + +func TestSyncMap_Range_Good(t *testing.T) { + var m SyncMap + m.Store("a", 1) + m.Store("b", 2) + count := 0 + m.Range(func(_, _ any) bool { + count++ + return true + }) + assert.Equal(t, 2, count) +} + +func TestSyncMap_Range_Bad(t *testing.T) { + // Bad: Range on empty map fires zero callbacks. + var m SyncMap + count := 0 + m.Range(func(_, _ any) bool { + count++ + return true + }) + assert.Equal(t, 0, count) +} + +func TestSyncMap_Range_Ugly(t *testing.T) { + // Ugly: Range with early termination — function returns false. + var m SyncMap + for i := 0; i < 10; i++ { + m.Store(i, i*10) + } + count := 0 + m.Range(func(_, _ any) bool { + count++ + return false // stop after first + }) + assert.Equal(t, 1, count) +} + +func TestSyncMap_Clear_Good(t *testing.T) { + var m SyncMap + m.Store("a", 1) + m.Store("b", 2) + m.Clear() + _, ok := m.Load("a") + assert.False(t, ok) +} + +func TestSyncMap_Concurrent_Ugly(t *testing.T) { + // Ugly: 100 goroutines storing disjoint keys; Range must see all. + var m SyncMap + var wg WaitGroup + for i := 0; i < 100; i++ { + wg.Add(1) + go func(k int) { + defer wg.Done() + m.Store(k, k*10) + }(i) + } + wg.Wait() + count := 0 + m.Range(func(_, _ any) bool { + count++ + return true + }) + assert.Equal(t, 100, count) +} diff --git a/tests/cli/core/Taskfile.yaml b/tests/cli/core/Taskfile.yaml new file mode 100644 index 0000000..8f27748 --- /dev/null +++ b/tests/cli/core/Taskfile.yaml @@ -0,0 +1,26 @@ +version: "3" + +tasks: + default: + deps: + - build + - vet + - test + + build: + desc: Compile every package in the core foundation library. + dir: ../../.. + cmds: + - GOWORK=off go build ./... + + vet: + desc: Run go vet across the module. + dir: ../../.. + cmds: + - GOWORK=off go vet ./... + + test: + desc: Run unit tests. + dir: ../../.. + cmds: + - GOWORK=off go test -count=1 ./...