diff --git a/README.md b/README.md index a376bed..eacdcce 100644 --- a/README.md +++ b/README.md @@ -11,6 +11,7 @@ Implementations are as simple as possible to be predictable in max latency, memo * `MapCache` is a very simple map-based thread-safe cache, that is not limited from growing. Can be used when you have relatively small number of distinct keys that does not grow significantly, and you do not need the values to expire automatically. E.g. if your keys are country codes, timezones etc, this cache type is ok to use. * `MapTTLCache` is map-based thread-safe cache with support for TTL (values automatically expire). If you don't want to read value from cache that is older than some threshold (e.g. 1 sec), you set this TTL when initializing the cache object and obsolete rows will be removed from cache automatically. * `RingBuffer` is a predefined size cache that allocates all memory from the start and will not grow above it. It keeps constant size by overwriting the oldest values in the cache with new ones. Use this cache when you need speed and fixed memory footprint, and your key cardinality is predictable (or you are ok with having cache misses if cardinality suddenly grows above your cache size). +* `KVCache` is a specialized cache designed for efficient prefix-based key lookups. It uses a trie data structure to store keys, enabling lexicographical ordering and fast retrieval of all values whose keys start with a given prefix. ## Examples @@ -37,14 +38,14 @@ func main() { fmt.Println(err) return } - + // will update the value associated to key 1 previousVal, updated := c.SetIfPresent(1, "two") // will print "one" fmt.Println(previousVal) // will print "true" fmt.Println(updated) - + // will not have any effect c.SetIfPresent(2, "dua") @@ -110,7 +111,7 @@ if err != nil { fmt.Println(v) ``` -`Updater` provides `ListByPrefix` function, but it can be used only if underlying cache supports it (is a `KV` wrapper). +`Updater` provides `ListByPrefix` function, but it can be used only if underlying cache supports it. Otherwize it will panic. ### Sharding @@ -208,45 +209,59 @@ allow rollback, but it provides atomicity and isolation guarantees. ## Benchmarks -Test suite contains a couple of benchmarks to compare the speed difference between old-school generic implementation using `interface{}` or `any` to hold cache values versus using generics. - -TL/DR: generics are faster than `interface{}` but slower than hardcoded type implementation. Ring buffer is 2x+ faster than map-based TTL cache. - -There are two types of benchmarks: -* `BenchmarkSet` only times the `Set` operation that allocates all the memory, and usually is the most resource intensive. -* `BenchmarkEverything` repeatedly does one of three operations (Get/Set/Del). The probability for each type of operation to be executed is 0.9/0.05/0.05 respectively. Each operation is executed on randomly generated key, there are totally 1 million distinct keys, so total cache size will be limited too. - -Another benchmark `BenchmarkKVListByPrefix` lists `KV` wrapper's `ListByPrefix` operation. It times getting all values matching particular prefix in a cache with 1 million keys. Benchmark is arranged so each call returns 10 records. - -Benchmarking four simple cache implementations shows that generic cache (`MapCache`) is faster than cache that uses an empty interface to store any type of values (`AnyCache`), but slower than implementations that use concrete types (`StringCache`) and skip on thread safety (`UnsafeCache`). -Generic `MapTTLCache` is on par with `AnyCache` but it is to be expected as it does more work keeping linked list for fast invalidation. `RingBuffer` performs the best because all the space it needs is preallocated during the initialization, and actual cache size is limited. - -Note that `stringCache`, `unsafeCache`, `anyCache` implementations are unexported. These implementations exist only to compare Go generic implementation with other approaches. - -The results below are not to be treated as absolute values. Actual cache operation latency will depend on many variables such as CPU speed, key cardinality, number of concurrent operations, whether the allocation happen during the operation or underlying structure already has the allocated space and so on. +Benchmarks are designed to compare basic operations of different cache implementations in this library. ```shell $ go test -bench=. -benchmem -benchtime=10s . goos: linux goarch: amd64 pkg: github.com/c-pro/geche -cpu: Intel(R) Xeon(R) Platinum 8358 CPU @ 2.60GHz -BenchmarkSet/MapCache-32 41473179 284.4 ns/op 1 B/op 0 allocs/op -BenchmarkSet/StringCache-32 64817786 182.5 ns/op 1 B/op 0 allocs/op -BenchmarkSet/UnsafeCache-32 80224212 125.2 ns/op 1 B/op 0 allocs/op -BenchmarkSet/MapTTLCache-32 14296934 758.3 ns/op 15 B/op 0 allocs/op -BenchmarkSet/RingBuffer-32 64152157 244.9 ns/op 0 B/op 0 allocs/op -BenchmarkSet/KVMapCache-32 10701508 1152 ns/op 10 B/op 0 allocs/op -BenchmarkSet/AnyCache-32 67699846 288.9 ns/op 2 B/op 0 allocs/op -BenchmarkEverything/MapCache-32 100000000 106.7 ns/op 0 B/op 0 allocs/op -BenchmarkEverything/StringCache-32 100000000 100.3 ns/op 0 B/op 0 allocs/op -BenchmarkEverything/UnsafeCache-32 135556000 87.31 ns/op 0 B/op 0 allocs/op -BenchmarkEverything/MapTTLCache-32 100000000 175.6 ns/op 0 B/op 0 allocs/op -BenchmarkEverything/RingBuffer-32 121507983 94.82 ns/op 0 B/op 0 allocs/op -BenchmarkEverything/ShardedRingBufferUpdater-32 32976999 371.6 ns/op 18 B/op 0 allocs/op -BenchmarkEverything/KVMapCache-32 90192560 199.9 ns/op 1 B/op 0 allocs/op -BenchmarkEverything/AnyCache-32 100000000 231.1 ns/op 8 B/op 1 allocs/op -BenchmarkKVListByPrefix-32 3167788 3720 ns/op 131 B/op 3 allocs/op +cpu: AMD Ryzen 7 PRO 8840U w/ Radeon 780M Graphics +BenchmarkSet/MapCache-16 65541076 182.1 ns/op 0 B/op 0 allocs/op +BenchmarkSet/MapTTLCache-16 19751806 754.7 ns/op 10 B/op 0 allocs/op +BenchmarkSet/RingBuffer-16 51921265 365.9 ns/op 0 B/op 0 allocs/op +BenchmarkSet/KVMapCache-16 3876873 3461 ns/op 804 B/op 11 allocs/op +BenchmarkSet/KVCache-16 14983084 1025 ns/op 54 B/op 1 allocs/op +BenchmarkSetIfPresentOnlyHits/MapCache-16 79179759 187.6 ns/op 0 B/op 0 allocs/op +BenchmarkSetIfPresentOnlyHits/MapTTLCache-16 37620368 371.8 ns/op 0 B/op 0 allocs/op +BenchmarkSetIfPresentOnlyHits/RingBuffer-16 100000000 110.3 ns/op 0 B/op 0 allocs/op +BenchmarkSetIfPresentOnlyHits/KVMapCache-16 39745081 345.1 ns/op 8 B/op 1 allocs/op +BenchmarkSetIfPresentOnlyMisses/MapCache-16 786898237 15.04 ns/op 0 B/op 0 allocs/op +BenchmarkSetIfPresentOnlyMisses/MapTTLCache-16 648632726 18.43 ns/op 0 B/op 0 allocs/op +BenchmarkSetIfPresentOnlyMisses/RingBuffer-16 746030799 15.92 ns/op 0 B/op 0 allocs/op +BenchmarkSetIfPresentOnlyMisses/KVMapCache-16 625973469 19.00 ns/op 0 B/op 0 allocs/op +BenchmarkSetIfPresentOnlyMisses/KVCache-16 972807471 12.02 ns/op 0 B/op 0 allocs/op +BenchmarkGetHit/MapCache-16 100000000 104.8 ns/op 0 B/op 0 allocs/op +BenchmarkGetHit/MapTTLCache-16 57810127 261.9 ns/op 0 B/op 0 allocs/op +BenchmarkGetHit/RingBuffer-16 121727826 98.63 ns/op 0 B/op 0 allocs/op +BenchmarkGetHit/KVMapCache-16 100000000 106.3 ns/op 0 B/op 0 allocs/op +BenchmarkGetHit/KVCache-16 158599485 78.32 ns/op 0 B/op 0 allocs/op +BenchmarkGetMiss/MapCache-16 1000000000 11.01 ns/op 0 B/op 0 allocs/op +BenchmarkGetMiss/MapTTLCache-16 749231084 15.85 ns/op 0 B/op 0 allocs/op +BenchmarkGetMiss/RingBuffer-16 676585886 17.73 ns/op 0 B/op 0 allocs/op +BenchmarkGetMiss/KVMapCache-16 1000000000 11.64 ns/op 0 B/op 0 allocs/op +BenchmarkGetMiss/KVCache-16 297815424 39.80 ns/op 0 B/op 0 allocs/op +BenchmarkDelHit/MapCache-16 1000000000 10.84 ns/op 0 B/op 0 allocs/op +BenchmarkDelHit/MapTTLCache-16 756901813 14.37 ns/op 0 B/op 0 allocs/op +BenchmarkDelHit/RingBuffer-16 1000000000 10.28 ns/op 0 B/op 0 allocs/op +BenchmarkDelHit/KVMapCache-16 358719861 28.27 ns/op 1 B/op 0 allocs/op +BenchmarkDelHit/KVCache-16 366528763 31.60 ns/op 17 B/op 1 allocs/op +BenchmarkDelMiss/MapCache-16 792498559 15.05 ns/op 0 B/op 0 allocs/op +BenchmarkDelMiss/MapTTLCache-16 735312480 16.18 ns/op 0 B/op 0 allocs/op +BenchmarkDelMiss/RingBuffer-16 364969610 32.75 ns/op 0 B/op 0 allocs/op +BenchmarkDelMiss/KVMapCache-16 78108807 153.3 ns/op 64 B/op 1 allocs/op +BenchmarkDelMiss/KVCache-16 49184259 233.0 ns/op 352 B/op 4 allocs/op +BenchmarkEverything/MapCache-16 67129406 175.8 ns/op 0 B/op 0 allocs/op +BenchmarkEverything/MapTTLCache-16 24496364 650.0 ns/op 8 B/op 0 allocs/op +BenchmarkEverything/RingBuffer-16 60798320 304.7 ns/op 0 B/op 0 allocs/op +BenchmarkEverything/ShardedRingBufferUpdater-16 44071453 427.5 ns/op 19 B/op 0 allocs/op +BenchmarkEverything/KVMapCache-16 5465926 2695 ns/op 745 B/op 10 allocs/op +BenchmarkEverything/KVCache-16 17036607 1020 ns/op 60 B/op 0 allocs/op +BenchmarkEverything/LockerMapCache-16 48808772 209.0 ns/op 1 B/op 0 allocs/op +BenchmarkKVListByPrefix-16 3052804 3905 ns/op 1008 B/op 15 allocs/op +BenchmarkKVCacheListByPrefix-16 8222977 1438 ns/op 580 B/op 8 allocs/op +PASS +ok github.com/c-pro/geche 956.205s ``` # Parallel benchmarks @@ -256,53 +271,31 @@ BenchmarkKVListByPrefix-32 3167788 I implemented sharding anyway because why not. But it is a separate wrapper, so does not complicate existing codebase. ```shell -$ go test -benchtime=10s -benchmem -bench . +go test -benchtime=10s -benchmem -bench . goos: linux goarch: amd64 pkg: cache_bench -cpu: Intel(R) Xeon(R) Platinum 8358 CPU @ 2.60GHz -BenchmarkEverythingParallel/MapCache-32 100000000 170.1 ns/op 0 B/op 0 allocs/op -BenchmarkEverythingParallel/MapTTLCache-32 90510988 198.9 ns/op 0 B/op 0 allocs/op -BenchmarkEverythingParallel/RingBuffer-32 85731428 196.8 ns/op 0 B/op 0 allocs/op -BenchmarkEverythingParallel/ShardedMapCache-32 273706551 43.51 ns/op 0 B/op 0 allocs/op -BenchmarkEverythingParallel/ShardedMapTTLCache-32 282491904 44.37 ns/op 0 B/op 0 allocs/op -BenchmarkEverythingParallel/ShardedRingBuffer-32 284756061 40.78 ns/op 0 B/op 0 allocs/op -BenchmarkEverythingParallel/github.com/Code-Hex/go-generics-cache-32 43165059 294.2 ns/op 7 B/op 0 allocs/op -BenchmarkEverythingParallel/github.com/Yiling-J/theine-go-32 186976719 64.51 ns/op 0 B/op 0 allocs/op -BenchmarkEverythingParallel/github.com/jellydator/ttlcache-32 29943469 376.3 ns/op 43 B/op 0 allocs/op -BenchmarkEverythingParallel/github.com/erni27/imcache-32 531496862 23.35 ns/op 50 B/op 1 allocs/op -BenchmarkEverythingParallel/github.com/dgraph-io/ristretto-32 100000000 108.5 ns/op 27 B/op 1 allocs/op -BenchmarkEverythingParallel/github.com/hashicorp/golang-lru/v2-32 43857675 307.1 ns/op 0 B/op 0 allocs/op -BenchmarkEverythingParallel/github.com/egregors/kesh-32 33866130 428.7 ns/op 83 B/op 2 allocs/op -BenchmarkEverythingParallel/KVMapCache-32 43328151 401.2 ns/op 112 B/op 0 allocs/op -``` - -And now on 32 CPU machine we clearly see performance degradation due to lock contention. Sharded implementations are about 4 times faster. -Notice the Imcache result. Crazy fast! 😅 - -KV wrapper result is worse then other caches, but it is expected as it keeps key index allowing prefix search with deterministic order, that other caches do not allow. It updates trie structure on `Set` and does extra work to cleanup the key on `Del`. - -```shell -$ go test -benchtime=10s -benchmem -bench . -goos: linux -goarch: amd64 -pkg: cache_bench -cpu: Intel(R) Xeon(R) Platinum 8280 CPU @ 2.70GHz -BenchmarkEverythingParallel/MapCache-32 64085875 248.9 ns/op 0 B/op 0 allocs/op -BenchmarkEverythingParallel/MapTTLCache-32 58598002 279.8 ns/op 0 B/op 0 allocs/op -BenchmarkEverythingParallel/RingBuffer-32 48229945 315.9 ns/op 0 B/op 0 allocs/op -BenchmarkEverythingParallel/ShardedMapCache-32 234258486 53.16 ns/op 0 B/op 0 allocs/op -BenchmarkEverythingParallel/ShardedMapTTLCache-32 231177732 53.63 ns/op 0 B/op 0 allocs/op -BenchmarkEverythingParallel/ShardedRingBuffer-32 236979438 48.98 ns/op 0 B/op 0 allocs/op -BenchmarkEverythingParallel/github.com/Code-Hex/go-generics-cache-32 39842918 345.9 ns/op 7 B/op 0 allocs/op -BenchmarkEverythingParallel/github.com/Yiling-J/theine-go-32 150612642 81.82 ns/op 0 B/op 0 allocs/op -BenchmarkEverythingParallel/github.com/jellydator/ttlcache-32 29333647 433.9 ns/op 43 B/op 0 allocs/op -BenchmarkEverythingParallel/github.com/erni27/imcache-32 345577933 35.63 ns/op 50 B/op 1 allocs/op -BenchmarkEverythingParallel/github.com/dgraph-io/ristretto-32 83293519 142.1 ns/op 27 B/op 1 allocs/op -BenchmarkEverythingParallel/github.com/hashicorp/golang-lru/v2-32 35763888 378.9 ns/op 0 B/op 0 allocs/op -BenchmarkEverythingParallel/github.com/egregors/kesh-32 25860772 524.1 ns/op 84 B/op 2 allocs/op -BenchmarkEverythingParallel/KVMapCache-32 33802629 478.4 ns/op 109 B/op 0 allocs/op +cpu: AMD Ryzen 7 PRO 8840U w/ Radeon 780M Graphics +BenchmarkEverythingParallel/MapCache-16 100000000 173.9 ns/op 1 B/op 0 allocs/op +BenchmarkEverythingParallel/MapTTLCache-16 65010415 382.5 ns/op 2 B/op 0 allocs/op +BenchmarkEverythingParallel/RingBuffer-16 100000000 225.9 ns/op 0 B/op 0 allocs/op +BenchmarkEverythingParallel/ShardedMapCache-16 198813898 56.77 ns/op 0 B/op 0 allocs/op +BenchmarkEverythingParallel/ShardedMapTTLCache-16 122482419 97.60 ns/op 0 B/op 0 allocs/op +BenchmarkEverythingParallel/ShardedRingBuffer-16 188570131 63.23 ns/op 0 B/op 0 allocs/op +BenchmarkEverythingParallel/github.com/Code-Hex/go-generics-cache-16 55945956 474.2 ns/op 91 B/op 2 allocs/op +BenchmarkEverythingParallel/github.com/Yiling-J/theine-go-16 71100289 172.2 ns/op 1 B/op 0 allocs/op +BenchmarkEverythingParallel/github.com/jellydator/ttlcache-16 58265994 924.8 ns/op 30 B/op 0 allocs/op +BenchmarkEverythingParallel/github.com/erni27/imcache-16 236973852 45.84 ns/op 33 B/op 0 allocs/op +BenchmarkEverythingParallel/github.com/dgraph-io/ristretto-16 88618468 141.9 ns/op 94 B/op 2 allocs/op +BenchmarkEverythingParallel/github.com/hashicorp/golang-lru/v2-16 78454165 399.1 ns/op 2 B/op 0 allocs/op +BenchmarkEverythingParallel/github.com/egregors/kesh-16 68416022 337.8 ns/op 7 B/op 0 allocs/op +BenchmarkEverythingParallel/KVMapCache-16 7607014 2050 ns/op 254 B/op 3 allocs/op +BenchmarkEverythingParallel/KVCache-16 52397652 902.8 ns/op 53 B/op 0 allocs/op +BenchmarkEverythingParallel/ShardedKVCache-16 100000000 150.9 ns/op 45 B/op 0 allocs/op PASS +ok cache_bench 390.130s ``` +KV and KVCache results are worse then other caches, but it is expected as it keeps key index allowing prefix search with deterministic order, that other caches do not allow. It updates trie structure on `Set` and does extra work to cleanup the key on `Del`. + Concurrent comparison benchmark is located in a [separate repository](https://github.com/C-Pro/cache-benchmarks) to avoid pulling unnecessary dependencies in the library. diff --git a/bench_test.go b/bench_test.go index 6e34c6f..a082db9 100644 --- a/bench_test.go +++ b/bench_test.go @@ -2,14 +2,15 @@ package geche import ( "context" + "fmt" + "maps" "math/rand" + "slices" "strconv" "testing" "time" ) -const keyCardinality = 1000000 - type testCase struct { key string op int @@ -22,15 +23,51 @@ const ( ) func genTestData(N int) []testCase { + // Generate composite keys with common real-life pattern. + // {keyType}:{userID}:{objectID} + // This is mostly irrelevant for map-backed caches, but makes a world of difference + // for trie-based KV cache. + var ( + numKeyTypes = 20 + numUsers = N / 1000 + // Number of distinct keys controls how much hits/misses we have in the benchmark. + // When number of distinct keys >= N, we have mostly misses. + distinctKeys = N / 10 + ) + keyTypesMap := make(map[string]struct{}, numKeyTypes) + for len(keyTypesMap) < numKeyTypes { + keyTypesMap[genRandomString(5)] = struct{}{} + } + keyTypes := slices.Collect(maps.Keys(keyTypesMap)) + + usersMap := make(map[string]struct{}, numUsers) + for len(usersMap) < numUsers { + usersMap[genRandomString(16)] = struct{}{} + } + users := slices.Collect(maps.Keys(usersMap)) + + keysMap := make(map[string]struct{}, distinctKeys) + for len(keysMap) < distinctKeys { + key := fmt.Sprintf("%s:%s:%s", + keyTypes[rand.Intn(len(keyTypes))], + users[rand.Intn(len(users))], + genRandomString(16), + ) + keysMap[key] = struct{}{} + } + + keys := slices.Collect(maps.Keys(keysMap)) + d := make([]testCase, N) for i := range d { - d[i].key = strconv.Itoa(rand.Intn(keyCardinality)) + d[i].key = keys[rand.Intn(len(keys))] r := rand.Float64() switch { - case r < 0.9: - d[i].op = OPGet - case r >= 0.9 && r < 0.95: + // Write heavy, because with read heavy, most of the reads would be misses. + case r < 0.7: d[i].op = OPSet + case r >= 0.7 && r < 0.95: + d[i].op = OPGet case r >= 0.95: d[i].op = OPDel } @@ -51,6 +88,18 @@ func benchmarkSetIfPresent(c Geche[string, string], testKeys []string, b *testin } } +func benchmarkGet(c Geche[string, string], testKeys []string, b *testing.B) { + for i := 0; i < b.N; i++ { + _, _ = c.Get(testKeys[i%len(testKeys)]) + } +} + +func benchmarkDel(c Geche[string, string], testKeys []string, b *testing.B) { + for i := 0; i < b.N; i++ { + _ = c.Del(testKeys[i%len(testKeys)]) + } +} + func benchmarkFuzz( c Geche[string, string], testData []testCase, @@ -80,14 +129,6 @@ func BenchmarkSet(b *testing.B) { "MapCache", NewMapCache[string, string](), }, - { - "StringCache", - newStringCache(), - }, - { - "UnsafeCache", - newUnsafeCache(), - }, { "MapTTLCache", NewMapTTLCache[string, string](ctx, time.Minute, time.Minute), @@ -98,7 +139,11 @@ func BenchmarkSet(b *testing.B) { }, { "KVMapCache", - NewKV[string](NewMapCache[string, string]()), + NewKV(NewMapCache[string, string]()), + }, + { + "KVCache", + NewKVCache[string, string](), }, } @@ -109,16 +154,51 @@ func BenchmarkSet(b *testing.B) { benchmarkSet(c.imp, data, b) }) } +} + +func BenchmarkSetIfPresentOnlyHits(b *testing.B) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + tab := []struct { + name string + imp Geche[string, string] + }{ + { + "MapCache", + NewMapCache[string, string](), + }, + { + "MapTTLCache", + NewMapTTLCache[string, string](ctx, time.Minute, time.Minute), + }, + { + "RingBuffer", + NewRingBuffer[string, string](1000000), + }, + { + "KVMapCache", + NewKV[string](NewMapCache[string, string]()), + }, + } + + testKeys := make([]string, 10_000_000) + for i := 0; i < len(testKeys); i++ { + testKeys[i] = strconv.Itoa(i) + } - b.Run("AnyCache", func(b *testing.B) { - c := newAnyCache() - for i := 0; i < b.N; i++ { - c.Set(data[i%len(data)].key, "value") + for _, c := range tab { + for k := 0; k < len(testKeys); k++ { + c.imp.Set(testKeys[k], "value") } - }) + + b.Run(c.name, func(b *testing.B) { + benchmarkSetIfPresent(c.imp, testKeys, b) + }) + } } -func BenchmarkSetIfPresentOnlyHits(b *testing.B) { +func BenchmarkSetIfPresentOnlyMisses(b *testing.B) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -131,12 +211,46 @@ func BenchmarkSetIfPresentOnlyHits(b *testing.B) { NewMapCache[string, string](), }, { - "StringCache", - newStringCache(), + "MapTTLCache", + NewMapTTLCache[string, string](ctx, time.Minute, time.Minute), }, { - "UnsafeCache", - newUnsafeCache(), + "RingBuffer", + NewRingBuffer[string, string](1000000), + }, + { + "KVMapCache", + NewKV(NewMapCache[string, string]()), + }, + { + "KVCache", + NewKVCache[string, string](), + }, + } + + for _, c := range tab { + for k := 0; k < 10_000_000; k++ { + c.imp.Set(strconv.Itoa(k), "value") + } + b.Run(c.name, func(b *testing.B) { + for i := 0; i < b.N; i++ { + c.imp.SetIfPresent("absent", "never set") + } + }) + } +} + +func BenchmarkGetHit(b *testing.B) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + tab := []struct { + name string + imp Geche[string, string] + }{ + { + "MapCache", + NewMapCache[string, string](), }, { "MapTTLCache", @@ -148,7 +262,11 @@ func BenchmarkSetIfPresentOnlyHits(b *testing.B) { }, { "KVMapCache", - NewKV[string](NewMapCache[string, string]()), + NewKV(NewMapCache[string, string]()), + }, + { + "KVCache", + NewKVCache[string, string](), }, } @@ -157,16 +275,18 @@ func BenchmarkSetIfPresentOnlyHits(b *testing.B) { testKeys[i] = strconv.Itoa(i) } - b.ResetTimer() - for _, c := range tab { + for k := 0; k < len(testKeys); k++ { + c.imp.Set(testKeys[k], "value") + } + b.Run(c.name, func(b *testing.B) { - benchmarkSetIfPresent(c.imp, testKeys, b) + benchmarkGet(c.imp, testKeys, b) }) } } -func BenchmarkSetIfPresentOnlyMisses(b *testing.B) { +func BenchmarkGetMiss(b *testing.B) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -179,12 +299,46 @@ func BenchmarkSetIfPresentOnlyMisses(b *testing.B) { NewMapCache[string, string](), }, { - "StringCache", - newStringCache(), + "MapTTLCache", + NewMapTTLCache[string, string](ctx, time.Minute, time.Minute), + }, + { + "RingBuffer", + NewRingBuffer[string, string](1000000), + }, + { + "KVMapCache", + NewKV(NewMapCache[string, string]()), }, { - "UnsafeCache", - newUnsafeCache(), + "KVCache", + NewKVCache[string, string](), + }, + } + + for _, c := range tab { + for k := 0; k < 10_000_000; k++ { + c.imp.Set(strconv.Itoa(k), "value") + } + b.Run(c.name, func(b *testing.B) { + for i := 0; i < b.N; i++ { + _, _ = c.imp.Get("1234567890absent") + } + }) + } +} + +func BenchmarkDelHit(b *testing.B) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + tab := []struct { + name string + imp Geche[string, string] + }{ + { + "MapCache", + NewMapCache[string, string](), }, { "MapTTLCache", @@ -196,24 +350,32 @@ func BenchmarkSetIfPresentOnlyMisses(b *testing.B) { }, { "KVMapCache", - NewKV[string](NewMapCache[string, string]()), + NewKV(NewMapCache[string, string]()), + }, + { + "KVCache", + NewKVCache[string, string](), }, } - b.ResetTimer() + testKeys := make([]string, 10_000_000) + for i := 0; i < len(testKeys); i++ { + testKeys[i] = strconv.Itoa(i) + } for _, c := range tab { b.Run(c.name, func(b *testing.B) { - for i := 0; i < b.N; i++ { - c.imp.SetIfPresent("absent", "never set") + // Populate cache before each benchmark + for k := 0; k < len(testKeys); k++ { + c.imp.Set(testKeys[k], "value") } + b.ResetTimer() + benchmarkDel(c.imp, testKeys, b) }) } } -// BenchmarkEverything performs different operations randomly. -// Ratio for get/set/del is 90/5/5 -func BenchmarkEverything(b *testing.B) { +func BenchmarkDelMiss(b *testing.B) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -226,12 +388,48 @@ func BenchmarkEverything(b *testing.B) { NewMapCache[string, string](), }, { - "StringCache", - newStringCache(), + "MapTTLCache", + NewMapTTLCache[string, string](ctx, time.Minute, time.Minute), }, { - "UnsafeCache", - newUnsafeCache(), + "RingBuffer", + NewRingBuffer[string, string](1000000), + }, + { + "KVMapCache", + NewKV(NewMapCache[string, string]()), + }, + { + "KVCache", + NewKVCache[string, string](), + }, + } + + for _, c := range tab { + for k := 0; k < 10_000_000; k++ { + c.imp.Set(strconv.Itoa(k), "value") + } + b.Run(c.name, func(b *testing.B) { + for i := 0; i < b.N; i++ { + _ = c.imp.Del("1234567890absent") + } + }) + } +} + +// BenchmarkEverything performs different operations randomly. +// Ratio for get/set/del is 90/5/5 +func BenchmarkEverything(b *testing.B) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + tab := []struct { + name string + imp Geche[string, string] + }{ + { + "MapCache", + NewMapCache[string, string](), }, { "MapTTLCache", @@ -257,6 +455,10 @@ func BenchmarkEverything(b *testing.B) { "KVMapCache", NewKV[string](NewMapCache[string, string]()), }, + { + "KVCache", + NewKVCache[string, string](), + }, { "LockerMapCache", NewLocker[string, string](NewMapCache[string, string]()).Lock(), @@ -270,22 +472,6 @@ func BenchmarkEverything(b *testing.B) { benchmarkFuzz(c.imp, data, b) }) } - - b.Run("AnyCache", func(b *testing.B) { - c := newAnyCache() - for i := 0; i < b.N; i++ { - key := strconv.Itoa(rand.Intn(keyCardinality)) - r := rand.Float64() - switch { - case r < 0.9: - _, _ = c.Get(key) - case r >= 0.9 && r < 0.95: - _ = c.Del(key) - case r >= 0.95: - c.Set(key, "value") - } - } - }) } func randomString(n int) string { @@ -321,3 +507,27 @@ func BenchmarkKVListByPrefix(b *testing.B) { } } } + +func BenchmarkKVCacheListByPrefix(b *testing.B) { + c := NewKVCache[string, string]() + keys := make([]string, 100_000) + for i := 0; i < 100_000; i++ { + l := rand.Intn(15) + 15 + unique := randomString(l) + keys[i] = unique + for j := 0; j < 10; j++ { + c.Set(unique+randomString(l), randomString(l)) + } + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + res, err := c.ListByPrefix(keys[i%len(keys)]) + if err != nil { + b.Errorf("unexpected error in ListByPrefix: %v", err) + } + if len(res) != 10 { + b.Errorf("expected len 10, but got %d", len(res)) + } + } +} diff --git a/codecov.yml b/codecov.yml new file mode 100644 index 0000000..3380cf9 --- /dev/null +++ b/codecov.yml @@ -0,0 +1,6 @@ +coverage: + status: + project: + default: + target: 80% + threshold: 1% diff --git a/common_test.go b/common_test.go index 960e15d..4355a63 100644 --- a/common_test.go +++ b/common_test.go @@ -2,7 +2,9 @@ package geche import ( "context" + "math/rand" "strconv" + "sync" "testing" "time" ) @@ -243,3 +245,60 @@ func TestCommon(t *testing.T) { } } } + +func TestConcurrentFuzz(t *testing.T) { + // This test does not do any specific checks but rather relies on race detector to find something. + testData := genTestData(100000) + numWorkers := 8 + numIterations := 10000 + + for _, ci := range []struct { + name string + factory func() Geche[string, string] + }{ + {"MapCache", func() Geche[string, string] { return NewMapCache[string, string]() }}, + {"MapTTLCache", func() Geche[string, string] { + return NewMapTTLCache[string, string](context.Background(), time.Millisecond*10, time.Millisecond*50) + }}, + {"RingBuffer", func() Geche[string, string] { return NewRingBuffer[string, string](100000) }}, + {"KVMapCache", func() Geche[string, string] { return NewKV[string](NewMapCache[string, string]()) }}, + {"KVCache", func() Geche[string, string] { return NewKVCache[string, string]() }}, + {"LockerMapCache", func() Geche[string, string] { + return NewLocker[string, string](NewMapCache[string, string]()).Lock() + }}, + { + "ShardedMapCache", func() Geche[string, string] { + return NewSharded[string]( + func() Geche[string, string] { return NewMapCache[string, string]() }, + numWorkers, + &StringMapper{}, + ) + }, + }, + } { + t.Run(ci.name, func(t *testing.T) { + imp := ci.factory() + wg := sync.WaitGroup{} + for range numWorkers { + wg.Add(1) + go func() { + defer wg.Done() + r := rand.New(rand.NewSource(time.Now().UnixNano())) + for i := 0; i < numIterations; i++ { + tc := testData[r.Intn(len(testData))] + switch tc.op { + case OPGet: + _, _ = imp.Get(tc.key) + case OPSet: + imp.Set(tc.key, "value") + case OPDel: + _ = imp.Del(tc.key) + } + } + }() + } + + wg.Wait() + }) + } +} diff --git a/dummy_test.go b/dummy_test.go deleted file mode 100644 index 4763b81..0000000 --- a/dummy_test.go +++ /dev/null @@ -1,161 +0,0 @@ -package geche - -// This file contains several simple map cache implementations for benchmark purposes. -// 1) Non generic version with hardcoded types. -// 2) Non thread-safe non generic version with hardcoded types. -// 3) interface{} based version. - -import ( - "sync" -) - -type stringCache struct { - data map[string]string - mux sync.RWMutex -} - -func newStringCache() *stringCache { - return &stringCache{ - data: make(map[string]string), - } -} - -func (s *stringCache) Set(key, value string) { - s.mux.Lock() - defer s.mux.Unlock() - - s.data[key] = value -} - -func (s *stringCache) SetIfPresent(key, value string) (string, bool) { - s.mux.Lock() - defer s.mux.Unlock() - - old, ok := s.data[key] - if !ok { - return "", false - } - - s.data[key] = value - return old, true -} - -func (s *stringCache) Get(key string) (string, error) { - s.mux.RLock() - defer s.mux.RUnlock() - - v, ok := s.data[key] - if !ok { - return v, ErrNotFound - } - - return v, nil -} - -func (s *stringCache) Del(key string) error { - s.mux.Lock() - defer s.mux.Unlock() - - delete(s.data, key) - - return nil -} - -func (s *stringCache) Snapshot() map[string]string { return nil } - -func (s *stringCache) Len() int { - s.mux.RLock() - defer s.mux.RUnlock() - - return len(s.data) -} - -type unsafeCache struct { - data map[string]string -} - -func newUnsafeCache() *unsafeCache { - return &unsafeCache{ - data: make(map[string]string), - } -} - -func (u *unsafeCache) Set(key, value string) { - u.data[key] = value -} - -func (u *unsafeCache) SetIfPresent(key, value string) (string, bool) { - old, err := u.Get(key) - if err != nil { - return "", false - } - - u.Set(key, value) - return old, true -} - -func (u *unsafeCache) Get(key string) (string, error) { - v, ok := u.data[key] - if !ok { - return v, ErrNotFound - } - - return v, nil -} - -func (u *unsafeCache) Del(key string) error { - delete(u.data, key) - - return nil -} - -func (u *unsafeCache) Snapshot() map[string]string { return nil } - -func (u *unsafeCache) Len() int { - return len(u.data) -} - -type anyCache struct { - data map[string]any - mux sync.RWMutex -} - -func newAnyCache() *anyCache { - return &anyCache{ - data: make(map[string]any), - } -} - -func (a *anyCache) Set(key string, value any) { - a.mux.Lock() - defer a.mux.Unlock() - - a.data[key] = value -} - -func (a *anyCache) Get(key string) (any, error) { - a.mux.RLock() - defer a.mux.RUnlock() - - v, ok := a.data[key] - if !ok { - return v, ErrNotFound - } - - return v, nil -} - -func (a *anyCache) Del(key string) error { - delete(a.data, key) - - return nil -} - -func (a *anyCache) Snapshot() map[string]any { return nil } - -func (a *anyCache) Len() int { - a.mux.RLock() - defer a.mux.RUnlock() - - return len(a.data) -} diff --git a/kv_cache.go b/kv_cache.go new file mode 100644 index 0000000..6e5f26c --- /dev/null +++ b/kv_cache.go @@ -0,0 +1,552 @@ +package geche + +import ( + "iter" + "sort" + "sync" +) + +type byteSlice interface { + ~string | ~[]byte +} + +// trieCacheNode is a compact node for the radix tree. +// It uses a sorted slice of values (not pointers) for children. +// This reduces the number of objects on the heap significantly, reducing GC pressure. +type trieCacheNode struct { + // b is the path segment this node represents. + b []byte + // terminal indicates if this node represents the end of a valid key. + terminal bool + // children is a list of child nodes, sorted by the first byte of their 'b' segment. + children []trieCacheNode + // index of the value in the values slice of the KVCache. + // Only valid if terminal is true. + valueIndex int +} + +// KVCache is a container that stores the values ordered by their keys using a trie index. +// It allows in order listing of values by prefix. +type KVCache[K byteSlice, V any] struct { + values []V + freelist []int + trie *trieCacheNode + mux sync.RWMutex + zero V +} + +// NewKVCache creates a new KVCache. +func NewKVCache[K byteSlice, V any]() *KVCache[K, V] { + return &KVCache[K, V]{ + trie: &trieCacheNode{}, + } +} + +// Set sets the value for the key. +func (kv *KVCache[K, V]) Set(key K, value V) { + kv.mux.Lock() + defer kv.mux.Unlock() + + kv.insert(key, value) +} + +// SetIfPresent sets the value only if the key already exists. +func (kv *KVCache[K, V]) SetIfPresent(key K, value V) (V, bool) { + kv.mux.Lock() + defer kv.mux.Unlock() + + if old, found := kv.get(key); found { + kv.insert(key, value) + return old, true + } + + return kv.zero, false +} + +// Get retrieves a value by key. +func (kv *KVCache[K, V]) Get(key K) (V, error) { + kv.mux.RLock() + defer kv.mux.RUnlock() + + v, ok := kv.get(key) + if !ok { + return kv.zero, ErrNotFound + } + + return v, nil +} + +// Del removes the record by key. +// Return value is always nil. +func (kv *KVCache[K, V]) Del(key string) error { + kv.mux.Lock() + defer kv.mux.Unlock() + + _ = kv.delete(key) + + return nil +} + +// ListByPrefix returns all values with keys starting with the given prefix. +func (kv *KVCache[K, V]) ListByPrefix(prefix string) ([]V, error) { + kv.mux.RLock() + defer kv.mux.RUnlock() + + node := kv.trie + searchKey := []byte(prefix) + + var path []byte + + for len(searchKey) > 0 { + idx, found := node.findChild(searchKey[0]) + if !found { + return nil, nil + } + + // Taking the address of the child is safe here because we hold RLock + // and we don't modify the slice. + child := &node.children[idx] + + common := commonPrefixLen(child.b, searchKey) + path = append(path, child.b[:common]...) + + if common < len(searchKey) { + if common < len(child.b) { + return nil, nil + } + searchKey = searchKey[common:] + node = child + } else { + // Matched prefix, reconstruct path to current node and descend + remainingNodeSegment := child.b[common:] + path = append(path, remainingNodeSegment...) + return kv.dfs(child, path) + } + } + + return kv.dfs(kv.trie, []byte{}) +} + +// AllByPrefix returns an (read only) iterator over values with keys starting with the given prefix. +// The iterator yields key-value pairs. +// Attempting to modify the cache while iterating will lead to a deadlock. +func (kv *KVCache[K, V]) AllByPrefix(prefix string) iter.Seq2[string, V] { + return func(yield func(string, V) bool) { + kv.mux.RLock() + defer kv.mux.RUnlock() + + node := kv.trie + searchKey := []byte(prefix) + + // path is the reconstructed key for the DFS traversal starting node. + var path []byte + + if len(prefix) > 0 { + var pathPrefix []byte + for len(searchKey) > 0 { + idx, found := node.findChild(searchKey[0]) + if !found { + return // No keys with this prefix. + } + + child := &node.children[idx] + common := commonPrefixLen(child.b, searchKey) + + if common < len(searchKey) { + if common < len(child.b) { + // e.g., search "ax", child has "ay...". No match. + return + } + // e.g., search "abc", child has "ab". Continue search in child. + pathPrefix = append(pathPrefix, child.b...) + searchKey = searchKey[common:] + node = child + } else { // common == len(searchKey) + // Matched prefix. The node for the next part of the key is `child`. + // The full path to `child` is `pathPrefix` + `child.b`. + path = append(pathPrefix, child.b...) + node = child + goto start_dfs + } + } + // This is for when the prefix matches a node path exactly + path = pathPrefix + } + + start_dfs: + // 2. Stack-based DFS from the found node. + if node.terminal { + if !yield(string(path), kv.values[node.valueIndex]) { + return + } + } + + type stackEntry struct { + node *trieCacheNode + pathLength int + } + + stack := make([]stackEntry, 0, 64) + + for i := len(node.children) - 1; i >= 0; i-- { + stack = append(stack, stackEntry{ + node: &node.children[i], + pathLength: len(path), + }) + } + + // Re-use path slice for building paths of descendants + for len(stack) > 0 { + top := stack[len(stack)-1] + stack = stack[:len(stack)-1] + + path = path[:top.pathLength] + path = append(path, top.node.b...) + + if top.node.terminal { + if !yield(string(path), kv.values[top.node.valueIndex]) { + return + } + } + + for i := len(top.node.children) - 1; i >= 0; i-- { + stack = append(stack, stackEntry{ + node: &top.node.children[i], + pathLength: len(path), + }) + } + } + } +} + +// Snapshot returns a copy of the cache. +func (kv *KVCache[K, V]) Snapshot() map[string]V { + kv.mux.RLock() + defer kv.mux.RUnlock() + + res := make(map[string]V, kv.Len()) + + seq := kv.AllByPrefix("") + seq(func(k string, v V) bool { + res[k] = v + return true + }) + return res +} + +// Len returns the number of the values in the cache. +func (kv *KVCache[K, V]) Len() int { + return max(0, len(kv.values)-len(kv.freelist)) +} + +// --- Internal Trie Helpers --- +// Internal helpers are not thread-safe. Caller must hold appropriate lock. + +func (kv *KVCache[K, V]) get(key K) (V, bool) { + node := kv.trie + keyBytes := []byte(key) + + for len(keyBytes) > 0 { + idx, found := node.findChild(keyBytes[0]) + if !found { + return kv.zero, false + } + + child := &node.children[idx] + common := commonPrefixLen(child.b, keyBytes) + + if common != len(child.b) { + return kv.zero, false + } + + keyBytes = keyBytes[common:] + node = child + } + + if node.terminal { + return kv.values[node.valueIndex], true + } + + return kv.zero, false +} + +func (kv *KVCache[K, V]) addValue(value V) int { + if len(kv.freelist) > 0 { + idx := kv.freelist[len(kv.freelist)-1] + kv.freelist = kv.freelist[:len(kv.freelist)-1] + kv.values[idx] = value + return idx + } + + kv.values = append(kv.values, value) + return len(kv.values) - 1 +} + +func (kv *KVCache[K, V]) insert(key K, value V) { + node := kv.trie + keyBytes := []byte(key) + + if len(keyBytes) == 0 { + if !node.terminal { + node.valueIndex = kv.addValue(value) + node.terminal = true + } else { + kv.values[node.valueIndex] = value + } + return + } + + for len(keyBytes) > 0 { + idx, found := node.findChild(keyBytes[0]) + + if !found { + // Create value struct + newNode := trieCacheNode{ + b: keyBytes, + terminal: true, + valueIndex: kv.addValue(value), + } + node.addChildAt(newNode, idx) + return + } + + // We take the address of the child element in the slice. + // This pointer is stable as long as we don't resize 'node.children'. + // We only resize 'node.children' when adding a new child to 'node', + // which we don't do in this loop branch (we already found the child). + child := &node.children[idx] + common := commonPrefixLen(child.b, keyBytes) + + // We found exact match of the child node's segment. + if common == len(child.b) { + keyBytes = keyBytes[common:] + node = child + if len(keyBytes) == 0 { + // We found the full key, update value if node is terminal, + // otherwise mark node as terminal and insert value. + if !node.terminal { + node.valueIndex = kv.addValue(value) + node.terminal = true + } else { + kv.values[node.valueIndex] = value + } + return + } + continue + } + + // Split required. + origSuffix := child.b[common:] + newSuffix := keyBytes[common:] + + // Create a node representing the rest of the original child. + // We copy the children slice from the original child. + restNode := trieCacheNode{ + b: origSuffix, + children: child.children, + terminal: child.terminal, + valueIndex: child.valueIndex, + } + + // Reset current child to be the branch. + child.b = child.b[:common] + child.children = nil // Release the old slice (ownership moved to restNode) + child.terminal = false + + child.addChild(restNode) + + if len(newSuffix) == 0 { + child.terminal = true + child.valueIndex = kv.addValue(value) + } else { + newNode := trieCacheNode{ + b: newSuffix, + terminal: true, + valueIndex: kv.addValue(value), + } + child.addChild(newNode) + } + return + } +} + +func (kv *KVCache[K, V]) deleteValueAtIndex(idx int) { + // Clear the value, so if it is a pointer or contains pointers, + // GC can collect the memory. + kv.values[idx] = kv.zero + // Add index to the freelist, so it can be reused. + kv.freelist = append(kv.freelist, idx) +} + +func (kv *KVCache[K, V]) delete(key string) error { + keyBytes := []byte(key) + + // Stack-based deletion to avoid recursion + type stackEntry struct { + node *trieCacheNode + keyPart []byte + childIdx int // index of child to check, -1 if not yet determined + parent *trieCacheNode + } + + stack := []stackEntry{{ + node: kv.trie, + keyPart: keyBytes, + childIdx: -1, + parent: nil, + }} + + // Track path for cleanup phase + type pathEntry struct { + node *trieCacheNode + parent *trieCacheNode + childIdx int + } + var path []pathEntry + + // Phase 1: Navigate to the target node + for len(stack) > 0 { + top := stack[len(stack)-1] + stack = stack[:len(stack)-1] + + if len(top.keyPart) == 0 { + // Reached the target node + if top.node.terminal { + top.node.terminal = false + kv.deleteValueAtIndex(top.node.valueIndex) + + // Phase 2: Cleanup - walk back and remove childless non-terminal nodes + for i := len(path) - 1; i >= 0; i-- { + node := path[i].node + parent := path[i].parent + childIdx := path[i].childIdx + + if len(node.children) == 0 && !node.terminal { + // Case 1: Delete empty non-terminal node + // Remove child from slice + copy(parent.children[childIdx:], parent.children[childIdx+1:]) + parent.children[len(parent.children)-1] = trieCacheNode{} + parent.children = parent.children[:len(parent.children)-1] + } else if len(node.children) == 1 && !node.terminal { + // Case 2: Merge node with its single child + child := node.children[0] + + node.b = append(node.b, child.b...) + node.terminal = child.terminal + node.valueIndex = child.valueIndex + node.children = child.children + } else { + // Node is stable (has >1 children or is terminal), stop cleanup + break + } + } + } + return nil + } + + idx, found := top.node.findChild(top.keyPart[0]) + if !found { + // Key doesn't exist, nothing to delete + return nil + } + + child := &top.node.children[idx] + common := commonPrefixLen(child.b, top.keyPart) + + if common != len(child.b) { + // Partial match, key doesn't exist + return nil + } + + // Record path for cleanup + path = append(path, pathEntry{ + node: child, + parent: top.node, + childIdx: idx, + }) + + // Continue with remaining key + stack = append(stack, stackEntry{ + node: child, + keyPart: top.keyPart[common:], + childIdx: -1, + parent: top.node, + }) + } + + return nil +} + +func (kv *KVCache[K, V]) dfs(node *trieCacheNode, currentPath []byte) ([]V, error) { + var res []V + + if node.terminal { + res = append(res, kv.values[node.valueIndex]) + } + + // If the node has no children, return early + if len(node.children) == 0 { + return res, nil + } + + // Stack-based DFS to avoid recursion + type stackEntry struct { + node *trieCacheNode + pathLength int // length of path before this node + } + + stack := make([]stackEntry, 0, maxKeyLength) + + // Push all children of the starting node in reverse order + for i := len(node.children) - 1; i >= 0; i-- { + stack = append(stack, stackEntry{ + node: &node.children[i], + pathLength: len(currentPath), + }) + } + + for len(stack) > 0 { + // Pop from stack + top := stack[len(stack)-1] + stack = stack[:len(stack)-1] + + // Restore path to parent length and append current node's segment + currentPath = currentPath[:top.pathLength] + currentPath = append(currentPath, top.node.b...) + + if top.node.terminal { + res = append(res, kv.values[top.node.valueIndex]) + } + + // Push children in reverse order so they are processed in correct order + for i := len(top.node.children) - 1; i >= 0; i-- { + stack = append(stack, stackEntry{ + node: &top.node.children[i], + pathLength: len(currentPath), + }) + } + } + + return res, nil +} + +func (n *trieCacheNode) findChild(c byte) (int, bool) { + idx := sort.Search(len(n.children), func(i int) bool { + return n.children[i].b[0] >= c + }) + if idx < len(n.children) && n.children[idx].b[0] == c { + return idx, true + } + return idx, false +} + +func (n *trieCacheNode) addChild(child trieCacheNode) { + idx, _ := n.findChild(child.b[0]) + n.addChildAt(child, idx) +} + +func (n *trieCacheNode) addChildAt(child trieCacheNode, idx int) { + n.children = append(n.children, trieCacheNode{}) + copy(n.children[idx+1:], n.children[idx:]) + n.children[idx] = child +} diff --git a/kv_cache_test.go b/kv_cache_test.go new file mode 100644 index 0000000..0bee3de --- /dev/null +++ b/kv_cache_test.go @@ -0,0 +1,1903 @@ +package geche + +import ( + "fmt" + "math/rand" + "runtime" + "sort" + "strings" + "testing" + "time" +) + +func ExampleNewKVCache() { + cache := NewKVCache[string, string]() + + cache.Set("foo", "bar") + cache.Set("foo2", "bar2") + cache.Set("foo3", "bar3") + cache.Set("foo1", "bar1") + + res, _ := cache.ListByPrefix("foo") + fmt.Println(res) + // Output: [bar bar1 bar2 bar3] +} + +func TestKVCache(t *testing.T) { + cache := NewKVCache[string, string]() + + for i := 999; i >= 0; i-- { + key := fmt.Sprintf("%03d", i) + if key == "008" { + cache.Set(key, key) + } + cache.Set(key, key) + } + + expected := []string{ + "000", "001", "002", "003", "004", "005", "006", "007", "008", "009", + } + + got, err := cache.ListByPrefix("00") + if err != nil { + t.Fatalf("unexpected error in ListByPrefix: %v", err) + } + compareSlice(t, expected, got) + + expected = []string{ + "120", "121", "122", "123", "124", "125", "126", "127", "128", "129", + } + + got, err = cache.ListByPrefix("12") + if err != nil { + t.Fatalf("unexpected error in ListByPrefix: %v", err) + } + compareSlice(t, expected, got) + + expected = []string{"888"} + + got, err = cache.ListByPrefix("888") + if err != nil { + t.Fatalf("unexpected error in ListByPrefix: %v", err) + } + compareSlice(t, expected, got) + + _ = cache.Del("777") + _ = cache.Del("779") + + if _, err := cache.Get("777"); err != ErrNotFound { + t.Fatalf("expected error %v, got %v", ErrNotFound, err) + } + + expected = []string{ + "770", "771", "772", "773", "774", "775", "776", "778", + } + + got, err = cache.ListByPrefix("77") + if err != nil { + t.Fatalf("unexpected error in ListByPrefix: %v", err) + } + + cache.Set("777", "777") + cache.Set("779", "779") + + compareSlice(t, expected, got) + + expected = []string{ + "770", "771", "772", "773", "774", "775", "776", "777", "778", "779", + } + + got, err = cache.ListByPrefix("77") + if err != nil { + t.Fatalf("unexpected error in ListByPrefix: %v", err) + } + + compareSlice(t, expected, got) + + cache.Set("77", "77") + + expected = []string{ + "77", "770", "771", "772", "773", "774", "775", "776", "777", "778", "779", + } + + got, err = cache.ListByPrefix("77") + if err != nil { + t.Fatalf("unexpected error in ListByPrefix: %v", err) + } + + compareSlice(t, expected, got) +} + +func TestKVCacheEmptyPrefix(t *testing.T) { + cache := NewKVCache[string, string]() + + expected := []string{} + for i := 0; i < 100; i++ { + key := fmt.Sprintf("%02d", i) + expected = append(expected, key) + cache.Set(key, key) + } + + got, err := cache.ListByPrefix("") + if err != nil { + t.Fatalf("unexpected error in ListByPrefix: %v", err) + } + + compareSlice(t, expected, got) +} + +func TestKVCacheGetEmptyKey(t *testing.T) { + cache := NewKVCache[string, string]() + t.Run("ValueAtEmptyKey", func(t *testing.T) { + cache.Set("", "empty") + val, err := cache.Get("") + if err != nil { + t.Fatalf("unexpected error in Get: %v", err) + } + if val != "empty" { + t.Errorf("expected %q, got %q", "empty", val) + } + }) + + t.Run("ValueAtEmptyKeyWithChildren", func(t *testing.T) { + cache.Set("a", "a") + val, err := cache.Get("") + if err != nil { + t.Fatalf("unexpected error in Get: %v", err) + } + if val != "empty" { + t.Errorf("expected %q, got %q", "empty", val) + } + }) + + t.Run("EmptyKeyMiss", func(t *testing.T) { + _ = cache.Del("") + val, err := cache.Get("") + if err != ErrNotFound { + t.Fatalf("expected error %v, got %v", ErrNotFound, err) + } + if val != "" { + t.Errorf("expected %q, got %q", "", val) + } + }) +} + +func TestKVCacheAllByPrefixBreak(t *testing.T) { + cache := NewKVCache[string, string]() + cache.Set("abc", "abc") + cache.Set("a", "a") + cache.Set("abcd", "abcd") + cache.Set("ab", "ab") + + cnt := 0 + gotKeys := []string{} + for k, v := range cache.AllByPrefix("a") { + cnt++ + if v != k { + t.Errorf("expected %q, got %q", k, v) + } + gotKeys = append(gotKeys, k) + if k == "ab" { + break + } + } + + expectedKeys := []string{"a", "ab"} + compareSlice(t, expectedKeys, gotKeys) + + for k, v := range cache.AllByPrefix("a") { + if k != "a" { + t.Errorf("expected %q, got %q", "a", k) + } + if v != k { + t.Errorf("expected %q, got %q", k, v) + } + break + } + + for k, v := range cache.AllByPrefix("abcd") { + if k != "abcd" { + t.Errorf("expected %q, got %q", "abcd", k) + } + if v != k { + t.Errorf("expected %q, got %q", k, v) + } + break + } + + // Case when last node prefix is matched but full node is not. + cache.Set("abcdefg", "abcdefg") + for range cache.AllByPrefix("abcdez") { + t.Errorf("expected no results") + } +} + +func TestKVCacheEmptyPrefixDiffLen(t *testing.T) { + cache := NewKVCache[string, string]() + + cache.Set("12345", "12345") + cache.Set("123", "123") + cache.Set("3", "3") + cache.Set("2", "2") + cache.Set("33333", "33333") + cache.Set("1", "1") + + expected := []string{"1", "123", "12345", "2", "3", "33333"} + + got, err := cache.ListByPrefix("") + if err != nil { + t.Fatalf("unexpected error in ListByPrefix: %v", err) + } + + compareSlice(t, expected, got) +} + +func TestKVCacheEmptyPrefixFuzz(t *testing.T) { + cache := NewKVCache[string, string]() + + set := map[string]struct{}{} + for i := 0; i < 10000; i++ { + key := genRandomString(rand.Intn(300) + 1) + set[key] = struct{}{} + cache.Set(key, key) + } + + expected := []string{} + for key := range set { + expected = append(expected, key) + } + sort.Strings(expected) + + got, err := cache.ListByPrefix("") + if err != nil { + t.Fatalf("unexpected error in ListByPrefix: %v", err) + } + + compareSlice(t, expected, got) +} + +func TestKVCachePrefixFuzz(t *testing.T) { + prefixes := []string{} + for i := 0; i < 10; i++ { + prefixes = append(prefixes, genRandomString(rand.Intn(20)+1)) + } + cache := NewKVCache[string, string]() + + set := map[string]struct{}{} + for i := 0; i < 10000; i++ { + prefix := prefixes[rand.Intn(len(prefixes))] + pl := rand.Intn(len(prefix)) + key := prefix[:pl] + genRandomString(rand.Intn(300)+1) + set[key] = struct{}{} + cache.Set(key, key) + } + + // Delete 10% of keys. + for key := range set { + if rand.Float64() < 0.1 { + delete(set, key) + _ = cache.Del(key) + } + } + + expected := []string{} + for key := range set { + expected = append(expected, key) + } + sort.Strings(expected) + + got, err := cache.ListByPrefix("") + if err != nil { + t.Fatalf("unexpected error in ListByPrefix: %v", err) + } + + compareSlice(t, expected, got) + + for i := 1; i < len(prefixes); i++ { + prefix := prefixes[i] + for j := 1; j < len(prefix); j++ { + q := prefix[:j] + expected2 := make([]string, 0, len(expected)) + for _, key := range expected { + if strings.HasPrefix(key, q) { + expected2 = append(expected2, key) + } + } + + got, err := cache.ListByPrefix(q) + if err != nil { + t.Fatalf("unexpected error in ListByPrefix: %v", err) + } + + compareSlice(t, expected2, got) + } + } +} + +func TestKVCacheNonexist(t *testing.T) { + cache := NewKVCache[string, string]() + + cache.Set("test", "best") + + got, err := cache.ListByPrefix("nonexistent") + if err != nil { + t.Fatalf("unexpected error in ListByPrefix: %v", err) + } + + if len(got) > 0 { + t.Errorf("unexpected len %d", len(got)) + } + + if err := cache.Del("nonexistent"); err != nil { + t.Errorf("unexpected error in Del: %v", err) + } +} + +func TestKVCacheEmptyKey(t *testing.T) { + cache := NewKVCache[string, string]() + + cache.Set("", "0") + cache.Set("foo1", "1") + cache.Set("foo2", "2") + + got, err := cache.ListByPrefix("fo") + if err != nil { + t.Fatalf("unexpected error in ListByPrefix: %v", err) + } + + if len(got) > 3 { + t.Errorf("unexpected len %d", len(got)) + } +} + +func TestKVCacheAlloc(t *testing.T) { + cache := NewKVCache[string, string]() + + var ( + mBefore, mAfter runtime.MemStats + rawDataLen int64 + ) + runtime.GC() + runtime.ReadMemStats(&mBefore) + + for i := 0; i < 1_000_000; i++ { + key := genRandomString(rand.Intn(300) + 1) + rawDataLen += int64(len(key) * 2) + + cache.Set(key, key) + } + + runtime.GC() + runtime.ReadMemStats(&mAfter) + t.Logf("rawDataLen: %d", rawDataLen) + t.Logf("memIncrease: %d", mAfter.HeapAlloc-mBefore.HeapAlloc) + t.Logf("memIncreaseRatio: %0.1f", float64(mAfter.HeapAlloc-mBefore.HeapAlloc)/float64(rawDataLen)) + + keys, err := cache.ListByPrefix("") + if err != nil { + t.Fatalf("unexpected error in ListByPrefix: %v", err) + } + + for _, key := range keys { + _ = cache.Del(key) + } + + runtime.GC() + runtime.ReadMemStats(&mAfter) + t.Logf("memIncreaseAfterDel: %d", mAfter.HeapAlloc-mBefore.HeapAlloc) + + if mAfter.HeapAlloc > mBefore.HeapAlloc { + if mAfter.HeapAlloc-mBefore.HeapAlloc > uint64(rawDataLen) { + t.Errorf("memory increase is too big") + } + } + + if len(cache.trie.children) > 0 { + t.Log(cache.trie.children) + t.Errorf("trie is not empty") + } + + if cache.Len() > 0 { + t.Errorf("data is not empty") + } +} + +func TestKVCacheDel(t *testing.T) { + cache := NewKVCache[string, string]() + + cache.Set("foo", "bar") + _ = cache.Del("foo") + + if _, err := cache.Get("foo"); err == nil { + t.Error("expected error after deleting a key, got nil") + } + + cache.Set("fo", "bar") + cache.Set("food", "bar") + _ = cache.Del("food") + + res, err := cache.ListByPrefix("foo") + if err != nil { + t.Fatalf("unexpected error in ListByPrefix: %v", err) + } + if len(res) != 0 { + t.Errorf("expected 0 results, got %d", len(res)) + } + + res, err = cache.ListByPrefix("fo") + if err != nil { + t.Fatalf("unexpected error in ListByPrefix: %v", err) + } + if len(res) != 1 { + t.Errorf("expected 1 results, got %d", len(res)) + } +} + +func TestKVCacheSetEmptyKey(t *testing.T) { + cache := NewKVCache[string, string]() + cache.Set("", "test") + + got, err := cache.Get("") + if err != nil { + t.Fatalf("unexpected error in Get: %v", err) + } + if got != "test" { + t.Errorf("expected %q, got %q", "test", got) + } + + values, err := cache.ListByPrefix("") + if err != nil { + t.Fatalf("unexpected error in ListByPrefix: %v", err) + } + if len(values) != 1 { + t.Errorf("expected len %d, got %d", 1, len(values)) + } + if values[0] != "test" { + t.Errorf("expected %q, got %q", "test", values[0]) + } +} + +func TestKVCacheTwoSingleChar(t *testing.T) { + cache := NewKVCache[string, string]() + cache.Set("a", "test1") + cache.Set("b", "test2") + + values, err := cache.ListByPrefix("") + if err != nil { + t.Fatalf("unexpected error in ListByPrefix: %v", err) + } + expected := []string{"test1", "test2"} + compareSlice(t, expected, values) +} + +func TestKVCacheSetTwoDepth(t *testing.T) { + cache := NewKVCache[string, string]() + cache.Set("a", "test1") + cache.Set("ab", "test2") + + values, err := cache.ListByPrefix("") + if err != nil { + t.Fatalf("unexpected error in ListByPrefix: %v", err) + } + expected := []string{"test1", "test2"} + compareSlice(t, expected, values) +} + +func TestKVCacheSetTwoDepthReverseOrder(t *testing.T) { + cache := NewKVCache[string, string]() + // When the order of Set is reversed, first Set will add 2-symbol node, + // and second set will split it into two. + cache.Set("ab", "test2") + cache.Set("a", "test1") + + values, err := cache.ListByPrefix("") + if err != nil { + t.Fatalf("unexpected error in ListByPrefix: %v", err) + } + expected := []string{"test1", "test2"} + compareSlice(t, expected, values) +} + +func TestKVCacheSetAppendTail(t *testing.T) { + cache := NewKVCache[string, string]() + cache.Set("ab", "test2") + cache.Set("abc", "test1") + + values, err := cache.ListByPrefix("") + if err != nil { + t.Fatalf("unexpected error in ListByPrefix: %v", err) + } + expected := []string{"test2", "test1"} + compareSlice(t, expected, values) +} + +func TestKVCacheSet3(t *testing.T) { + // Some tests like this here are white-box ones to cover specific code paths, + // or to check for regressions of fixed issues found by fuzzing. + cache := NewKVCache[string, string]() + cache.Set("ab", "test2") + cache.Set("abc", "test1") + cache.Set("a", "test4") + + values, err := cache.ListByPrefix("") + if err != nil { + t.Fatalf("unexpected error in ListByPrefix: %v", err) + } + + expected := []string{ + "test4", "test2", "test1", + } + + compareSlice(t, expected, values) +} + +func TestKVCacheSet4(t *testing.T) { + cache := NewKVCache[string, string]() + cache.Set("ab", "test2") + cache.Set("abc", "test1") + cache.Set("abz", "test4") + + values, err := cache.ListByPrefix("") + if err != nil { + t.Fatalf("unexpected error in ListByPrefix: %v", err) + } + + expected := []string{ + "test2", "test1", "test4", + } + + t.Log(values) + compareSlice(t, expected, values) +} + +func TestKVCacheSet5(t *testing.T) { + cache := NewKVCache[string, string]() + cache.Set("abra", "test2") + cache.Set("cadabra", "test1") + cache.Set("abracadabra", "test4") + + values, err := cache.ListByPrefix("cad") + if err != nil { + t.Fatalf("unexpected error in ListByPrefix: %v", err) + } + + expected := []string{ + "test1", + } + + t.Log(values) + compareSlice(t, expected, values) +} + +func TestKVCacheSetIfPresent(t *testing.T) { + cache := NewKVCache[string, string]() + cache.Set("a", "test2") + cache.Set("b", "test1") + cache.Set("c", "test4") + + old, inserted := cache.SetIfPresent("a", "test5") + if !inserted { + t.Errorf("key \"a\" is present in cache, SetIfPresent should return true") + } + + if old != "test2" { + t.Errorf("expected %q, got %q", "test2", old) + } + + old, inserted = cache.SetIfPresent("a", "test6") + if !inserted { + t.Errorf("key \"a\" is present in cache, SetIfPresent should return true") + } + + if old != "test5" { + t.Errorf("expected %q, got %q", "test5", old) + } + + if _, inserted := cache.SetIfPresent("d", "test3"); inserted { + t.Errorf("key \"d\" is not present in cache, SetIfPresent should return false") + } + + if _, inserted := cache.SetIfPresent("d", "test3"); inserted { + t.Errorf("key \"d\" is still not present in cache, SetIfPresent should return false") + } + + val, err := cache.Get("a") + if err != nil { + t.Fatalf("unexpected error in Get: %v", err) + } + + if val != "test6" { + t.Errorf("expected %q, got %q", "test6", val) + } + + _, err = cache.Get("d") + if err == nil { + t.Errorf("expected key \"d\" to not be present in the cache") + } +} + +func TestKVCacheSetIfPresentConcurrent(t *testing.T) { + cache := NewKVCache[string, string]() + cache.Set("a", "startA") + cache.Set("b", "startB") + + for i := 0; i < 1000; i++ { + go func() { + switch rand.Intn(6) { + case 0: + cache.SetIfPresent("a", "a") + case 1: + cache.SetIfPresent("b", "b") + case 2: + cache.SetIfPresent("c", "c") + case 3: + _, _ = cache.Get("a") + case 4: + _, _ = cache.Get("b") + case 5: + _, _ = cache.Get("c") + } + }() + } + + time.Sleep(10 * time.Millisecond) + + if val, _ := cache.Get("a"); val != "a" { + t.Errorf("expected %q, got %q", "a", val) + } + + if val, _ := cache.Get("b"); val != "b" { + t.Errorf("expected %q, got %q", "b", val) + } + + if _, err := cache.Get("c"); err == nil { + t.Errorf("expected key \"c\" to not be present in the cache") + } +} + +func FuzzKVCacheSetListByPrefix(f *testing.F) { + // Simple fuzzing test adding 3 keys then listing by prefix. + examples := [][]string{ + {"", "", "", ""}, + {"a", "a", "a", ""}, + {"a", "a", "a", "b"}, + {"ab", "ac", "abc", "a"}, + {"abra", "cadabra", "abracadabra", "cad"}, + {"abra", "cadabra", "abracadabra", "ab"}, + {"abcd", "abz", "ac", "a"}, + {"a", "abc", "abcd", "a"}, + } + + for _, example := range examples { + f.Add(example[0], example[1], example[2], example[3]) + } + + f.Fuzz(func(t *testing.T, k1, k2, k3, prefix string) { + golden := []string{ + k1, k2, k3, + } + cache := NewKVCache[string, string]() + for _, key := range golden { + cache.Set(key, key) + } + + sort.Strings(golden) + + expect := make([]string, 0, len(golden)) + + seen := map[string]struct{}{} + for _, s := range golden { + if _, ok := seen[s]; !ok && strings.HasPrefix(s, prefix) { + expect = append(expect, s) + seen[s] = struct{}{} + } + } + + got, err := cache.ListByPrefix(prefix) + if err != nil { + t.Fatalf("unexpected error in ListByPrefix: %v", err) + } + + t.Logf("params: (%q, %q, %q, %q)", k1, k2, k3, prefix) + t.Logf("got: %v", got) + + compareSlice(t, expect, got) + }) +} + +func TestKVCacheDelNoprefix(t *testing.T) { + cache := NewKVCache[string, string]() + cache.Set("hu", "hu") + _ = cache.Del("h") + res, err := cache.Get("hu") + if err != nil { + t.Errorf("unexpected error in Get: %v", err) + } + if res != "hu" { + t.Errorf("expected %q, got %q", "hu", res) + } + l, err := cache.ListByPrefix("") + if err != nil { + t.Errorf("unexpected error in ListByPrefix: %v", err) + } + if len(l) != 1 { + t.Fatalf("expected len 1, got %d", len(l)) + } + + if l[0] != "hu" { + t.Errorf("expected %q, got %q", "hu", res) + } +} + +func FuzzKVCacheMonkey(f *testing.F) { + // More elaborate fuzzing test. It creates a random task of 50 Set/Del + // commands to be executed on a KVCache. Then it checks that ListByPrefix + // returns correct results. + examples := []struct { + seed int64 + prefix string + }{ + {0, ""}, + {439, "x"}, + {2, "ab"}, + {4928589, " "}, + {93, "1"}, + {1994, ""}, + {185, "P"}, + } + for _, example := range examples { + f.Add(example.seed, example.prefix) + } + + f.Fuzz(func(t *testing.T, seed int64, prefix string) { + cache := NewKVCache[string, string]() + task := randTask(seed) + golden := make(map[string]struct{}, len(task)) + for _, cmd := range task { + switch cmd.action { + case "Set": + cache.Set(cmd.key, cmd.key) + golden[cmd.key] = struct{}{} + case "Del": + // Since keys are random we expect a lot of Del to fail. + _ = cache.Del(cmd.key) + delete(golden, cmd.key) + } + } + + goldenFiltered := make([]string, 0, len(golden)) + for k := range golden { + if strings.HasPrefix(k, prefix) { + goldenFiltered = append(goldenFiltered, k) + } + } + sort.Strings(goldenFiltered) + + got, err := cache.ListByPrefix(prefix) + if err != nil { + t.Fatalf("unexpected error in ListByPrefix: %v", err) + } + + if cache.Len() != len(golden) { + t.Errorf("expected len %d, got %d", len(golden), cache.Len()) + } + + for _, key := range goldenFiltered { + val, err := cache.Get(key) + if err != nil { + t.Fatalf("unexpected error in Get: %v", err) + } + if val != key { + t.Errorf("expected %q, got %q", key, val) + } + } + + t.Logf("seed: %d, task %v, prefix: %q", seed, task, prefix) + compareSlice(t, goldenFiltered, got) + }) +} + +func TestKVCacheLen(t *testing.T) { + cache := NewKVCache[string, string]() + + if cache.Len() != 0 { + t.Errorf("expected len 0, got %d", cache.Len()) + } + + cache.Set("a", "1") + if cache.Len() != 1 { + t.Errorf("expected len 1, got %d", cache.Len()) + } + + cache.Set("b", "2") + cache.Set("c", "3") + if cache.Len() != 3 { + t.Errorf("expected len 3, got %d", cache.Len()) + } + + cache.Set("a", "10") + if cache.Len() != 3 { + t.Errorf("expected len 3 after update, got %d", cache.Len()) + } + + _ = cache.Del("b") + if cache.Len() != 2 { + t.Errorf("expected len 2 after delete, got %d", cache.Len()) + } + + _ = cache.Del("nonexistent") + if cache.Len() != 2 { + t.Errorf("expected len 2 after deleting nonexistent key, got %d", cache.Len()) + } +} + +func TestKVCacheFreelist(t *testing.T) { + cache := NewKVCache[string, string]() + + // Add some values + cache.Set("a", "1") + cache.Set("b", "2") + cache.Set("c", "3") + + if len(cache.freelist) != 0 { + t.Errorf("expected freelist len 0, got %d", len(cache.freelist)) + } + + // Delete a value - should add to freelist + _ = cache.Del("b") + + if len(cache.freelist) != 1 { + t.Errorf("expected freelist len 1 after delete, got %d", len(cache.freelist)) + } + + // Add a new value - should reuse from freelist + cache.Set("d", "4") + + if len(cache.freelist) != 0 { + t.Errorf("expected freelist len 0 after reuse, got %d", len(cache.freelist)) + } + + // Verify values + val, err := cache.Get("d") + if err != nil { + t.Fatalf("unexpected error in Get: %v", err) + } + if val != "4" { + t.Errorf("expected %q, got %q", "4", val) + } + + // Old deleted value should not be accessible + if _, err := cache.Get("b"); err == nil { + t.Errorf("expected error for deleted key, got nil") + } +} + +func TestKVCacheConcurrent(t *testing.T) { + cache := NewKVCache[string, int]() + + // Concurrent writes + done := make(chan bool) + for i := 0; i < 10; i++ { + go func(id int) { + for j := 0; j < 100; j++ { + key := fmt.Sprintf("key-%d-%d", id, j) + cache.Set(key, id*100+j) + } + done <- true + }(i) + } + + // Wait for all writes to complete + for i := 0; i < 10; i++ { + <-done + } + + // Concurrent reads + for i := 0; i < 10; i++ { + go func(id int) { + for j := 0; j < 100; j++ { + key := fmt.Sprintf("key-%d-%d", id, j) + val, err := cache.Get(key) + if err != nil { + t.Errorf("unexpected error in Get: %v", err) + } + expected := id*100 + j + if val != expected { + t.Errorf("expected %d, got %d", expected, val) + } + } + done <- true + }(i) + } + + // Wait for all reads to complete + for i := 0; i < 10; i++ { + <-done + } +} + +func TestKVCacheWithDifferentTypes(t *testing.T) { + // Test with int values + intCache := NewKVCache[string, int]() + intCache.Set("one", 1) + intCache.Set("two", 2) + + val, err := intCache.Get("one") + if err != nil { + t.Fatalf("unexpected error in Get: %v", err) + } + if val != 1 { + t.Errorf("expected 1, got %d", val) + } + + // Test with struct values + type testStruct struct { + Name string + Age int + } + + structCache := NewKVCache[string, testStruct]() + structCache.Set("person1", testStruct{Name: "Alice", Age: 30}) + structCache.Set("person2", testStruct{Name: "Bob", Age: 25}) + + person, err := structCache.Get("person1") + if err != nil { + t.Fatalf("unexpected error in Get: %v", err) + } + if person.Name != "Alice" || person.Age != 30 { + t.Errorf("expected Alice/30, got %s/%d", person.Name, person.Age) + } + + people, err := structCache.ListByPrefix("person") + if err != nil { + t.Fatalf("unexpected error in ListByPrefix: %v", err) + } + if len(people) != 2 { + t.Errorf("expected 2 people, got %d", len(people)) + } +} + +func ExampleKVCache_AllByPrefix() { + cache := NewKVCache[string, string]() + + cache.Set("foo", "bar") + cache.Set("foo2", "bar2") + cache.Set("foo3", "bar3") + cache.Set("foo1", "bar1") + + for k, v := range cache.AllByPrefix("foo") { + fmt.Println(k, v) + } + // Output: + // foo bar + // foo1 bar1 + // foo2 bar2 + // foo3 bar3 +} + +func TestKVCache_AllByPrefix(t *testing.T) { + cache := NewKVCache[string, string]() + + for i := 999; i >= 0; i-- { + key := fmt.Sprintf("%03d", i) + if key == "008" { + cache.Set(key, key) + } + cache.Set(key, key) + } + + expected := []string{ + "000", "001", "002", "003", "004", "005", "006", "007", "008", "009", + } + + var got []string + var gotKeys []string + seq := cache.AllByPrefix("00") + seq(func(k, v string) bool { + gotKeys = append(gotKeys, k) + got = append(got, v) + return true + }) + compareSlice(t, expected, got) + compareSlice(t, expected, gotKeys) + + expected = []string{ + "120", "121", "122", "123", "124", "125", "126", "127", "128", "129", + } + + got = nil + gotKeys = nil + seq = cache.AllByPrefix("12") + seq(func(k, v string) bool { + gotKeys = append(gotKeys, k) + got = append(got, v) + return true + }) + compareSlice(t, expected, got) + compareSlice(t, expected, gotKeys) + + expected = []string{"888"} + + got = nil + gotKeys = nil + seq = cache.AllByPrefix("888") + seq(func(k, v string) bool { + gotKeys = append(gotKeys, k) + got = append(got, v) + return true + }) + compareSlice(t, expected, got) + compareSlice(t, expected, gotKeys) + + _ = cache.Del("777") + _ = cache.Del("779") + + if _, err := cache.Get("777"); err != ErrNotFound { + t.Fatalf("expected error %v, got %v", ErrNotFound, err) + } + + expected = []string{ + "770", "771", "772", "773", "774", "775", "776", "778", + } + expectedKeys := []string{ + "770", "771", "772", "773", "774", "775", "776", "778", + } + + got = nil + gotKeys = nil + seq = cache.AllByPrefix("77") + seq(func(k, v string) bool { + gotKeys = append(gotKeys, k) + got = append(got, v) + return true + }) + + cache.Set("777", "777") + cache.Set("779", "779") + + compareSlice(t, expected, got) + compareSlice(t, expectedKeys, gotKeys) + + expected = []string{ + "770", "771", "772", "773", "774", "775", "776", "777", "778", "779", + } + + got = nil + gotKeys = nil + seq = cache.AllByPrefix("77") + seq(func(k, v string) bool { + gotKeys = append(gotKeys, k) + got = append(got, v) + return true + }) + + compareSlice(t, expected, got) + compareSlice(t, expected, gotKeys) + + cache.Set("77", "77") + + expected = []string{ + "77", "770", "771", "772", "773", "774", "775", "776", "777", "778", "779", + } + + got = nil + gotKeys = nil + seq = cache.AllByPrefix("77") + seq(func(k, v string) bool { + gotKeys = append(gotKeys, k) + got = append(got, v) + return true + }) + + compareSlice(t, expected, got) + compareSlice(t, expected, gotKeys) +} + +func TestKVCacheEmptyPrefix_AllByPrefix(t *testing.T) { + cache := NewKVCache[string, string]() + + expected := []string{} + for i := 0; i < 100; i++ { + key := fmt.Sprintf("%02d", i) + expected = append(expected, key) + cache.Set(key, key) + } + + var got []string + var gotKeys []string + seq := cache.AllByPrefix("") + seq(func(k, v string) bool { + gotKeys = append(gotKeys, k) + got = append(got, v) + return true + }) + + compareSlice(t, expected, got) + compareSlice(t, expected, gotKeys) +} + +func TestKVCacheExactMatch_AllByPrefix(t *testing.T) { + cache := NewKVCache[string, string]() + + cache.Set("exact", "exact_value") + + expected := []string{"exact_value"} + expectedKeys := []string{"exact"} + + var got []string + var gotKeys []string + seq := cache.AllByPrefix("exact") + seq(func(k, v string) bool { + gotKeys = append(gotKeys, k) + got = append(got, v) + return true + }) + + compareSlice(t, expected, got) + compareSlice(t, expectedKeys, gotKeys) +} + +func TestKVCacheEmptyPrefixDiffLen_AllByPrefix(t *testing.T) { + cache := NewKVCache[string, string]() + + cache.Set("12345", "12345") + cache.Set("123", "123") + cache.Set("3", "3") + cache.Set("2", "2") + cache.Set("33333", "33333") + cache.Set("1", "1") + + expected := []string{"1", "123", "12345", "2", "3", "33333"} + + var got []string + var gotKeys []string + seq := cache.AllByPrefix("") + seq(func(k, v string) bool { + gotKeys = append(gotKeys, k) + got = append(got, v) + return true + }) + + compareSlice(t, expected, got) + compareSlice(t, expected, gotKeys) +} + +func TestKVCacheEmptyPrefixFuzz_AllByPrefix(t *testing.T) { + cache := NewKVCache[string, string]() + + set := map[string]struct{}{} + for i := 0; i < 10000; i++ { + key := genRandomString(rand.Intn(300) + 1) + set[key] = struct{}{} + cache.Set(key, key) + } + + expected := []string{} + for key := range set { + expected = append(expected, key) + } + sort.Strings(expected) + + var got []string + var gotKeys []string + seq := cache.AllByPrefix("") + seq(func(k, v string) bool { + gotKeys = append(gotKeys, k) + got = append(got, v) + return true + }) + + compareSlice(t, expected, got) + compareSlice(t, expected, gotKeys) +} + +func TestKVCachePrefixFuzz_AllByPrefix(t *testing.T) { + prefixes := []string{} + for i := 0; i < 10; i++ { + prefixes = append(prefixes, genRandomString(rand.Intn(20)+1)) + } + cache := NewKVCache[string, string]() + + set := map[string]struct{}{} + for i := 0; i < 10000; i++ { + prefix := prefixes[rand.Intn(len(prefixes))] + pl := rand.Intn(len(prefix)) + key := prefix[:pl] + genRandomString(rand.Intn(300)+1) + set[key] = struct{}{} + cache.Set(key, key) + } + + // Delete 10% of keys. + for key := range set { + if rand.Float64() < 0.1 { + delete(set, key) + _ = cache.Del(key) + } + } + + expected := []string{} + for key := range set { + expected = append(expected, key) + } + sort.Strings(expected) + + var got []string + var gotKeys []string + seq := cache.AllByPrefix("") + seq(func(k, v string) bool { + gotKeys = append(gotKeys, k) + got = append(got, v) + return true + }) + + compareSlice(t, expected, got) + compareSlice(t, expected, gotKeys) + + for i := 1; i < len(prefixes); i++ { + prefix := prefixes[i] + for j := 1; j < len(prefix); j++ { + q := prefix[:j] + expected2 := make([]string, 0, len(expected)) + for _, key := range expected { + if strings.HasPrefix(key, q) { + expected2 = append(expected2, key) + } + } + + got = nil + gotKeys = nil + seq = cache.AllByPrefix(q) + seq(func(k, v string) bool { + gotKeys = append(gotKeys, k) + got = append(got, v) + return true + }) + + compareSlice(t, expected2, got) + compareSlice(t, expected2, gotKeys) + } + } +} + +func TestKVCacheNonexist_AllByPrefix(t *testing.T) { + cache := NewKVCache[string, string]() + + cache.Set("test", "best") + + var got []string + seq := cache.AllByPrefix("nonexistent") + seq(func(k, v string) bool { + got = append(got, v) + return true + }) + + if len(got) > 0 { + t.Errorf("unexpected len %d", len(got)) + } + + if err := cache.Del("nonexistent"); err != nil { + t.Errorf("unexpected error in Del: %v", err) + } +} + +func TestKVCacheEmptyKey_AllByPrefix(t *testing.T) { + cache := NewKVCache[string, string]() + + cache.Set("", "0") + cache.Set("foo1", "1") + cache.Set("foo2", "2") + + var got []string + seq := cache.AllByPrefix("fo") + seq(func(k, v string) bool { + got = append(got, v) + return true + }) + + if len(got) > 3 { + t.Errorf("unexpected len %d", len(got)) + } +} + +func TestKVCacheAlloc_AllByPrefix(t *testing.T) { + cache := NewKVCache[string, string]() + + var ( + mBefore, mAfter runtime.MemStats + rawDataLen int64 + ) + runtime.GC() + runtime.ReadMemStats(&mBefore) + + for i := 0; i < 1_000_000; i++ { + key := genRandomString(rand.Intn(300) + 1) + rawDataLen += int64(len(key) * 2) + + cache.Set(key, key) + } + + runtime.GC() + runtime.ReadMemStats(&mAfter) + t.Logf("rawDataLen: %d", rawDataLen) + t.Logf("memIncrease: %d", mAfter.HeapAlloc-mBefore.HeapAlloc) + t.Logf("memIncreaseRatio: %0.1f", float64(mAfter.HeapAlloc-mBefore.HeapAlloc)/float64(rawDataLen)) + + var keys []string + seq := cache.AllByPrefix("") + seq(func(k, v string) bool { + keys = append(keys, k) + return true + }) + + for _, key := range keys { + _ = cache.Del(key) + } + + runtime.GC() + runtime.ReadMemStats(&mAfter) + t.Logf("memIncreaseAfterDel: %d", mAfter.HeapAlloc-mBefore.HeapAlloc) + + if mAfter.HeapAlloc > mBefore.HeapAlloc { + if mAfter.HeapAlloc-mBefore.HeapAlloc > uint64(rawDataLen) { + t.Errorf("memory increase is too big") + } + } + + if len(cache.trie.children) > 0 { + t.Log(cache.trie.children) + t.Errorf("trie is not empty") + } + + if cache.Len() > 0 { + t.Errorf("data is not empty") + } +} + +func TestKVCacheDel_AllByPrefix(t *testing.T) { + cache := NewKVCache[string, string]() + + cache.Set("foo", "bar") + _ = cache.Del("foo") + + if _, err := cache.Get("foo"); err == nil { + t.Error("expected error after deleting a key, got nil") + } + + cache.Set("fo", "bar") + cache.Set("food", "bar") + _ = cache.Del("food") + + var res []string + seq := cache.AllByPrefix("foo") + seq(func(k, v string) bool { + res = append(res, v) + return true + }) + if len(res) != 0 { + t.Errorf("expected 0 results, got %d", len(res)) + } + + res = nil + seq = cache.AllByPrefix("fo") + seq(func(k, v string) bool { + res = append(res, v) + return true + }) + if len(res) != 1 { + t.Errorf("expected 1 results, got %d", len(res)) + } +} + +func TestKVCacheSetEmptyKey_AllByPrefix(t *testing.T) { + cache := NewKVCache[string, string]() + cache.Set("", "test") + + got, err := cache.Get("") + if err != nil { + t.Fatalf("unexpected error in Get: %v", err) + } + if got != "test" { + t.Errorf("expected %q, got %q", "test", got) + } + + var values []string + var keys []string + seq := cache.AllByPrefix("") + seq(func(k, v string) bool { + keys = append(keys, k) + values = append(values, v) + return true + }) + if len(values) != 1 { + t.Errorf("expected len %d, got %d", 1, len(values)) + } + if values[0] != "test" { + t.Errorf("expected %q, got %q", "test", values[0]) + } + if keys[0] != "" { + t.Errorf("expected empty key, got %q", keys[0]) + } +} + +func TestKVCacheTwoSingleChar_AllByPrefix(t *testing.T) { + cache := NewKVCache[string, string]() + cache.Set("a", "test1") + cache.Set("b", "test2") + + var values []string + var keys []string + seq := cache.AllByPrefix("") + seq(func(k, v string) bool { + keys = append(keys, k) + values = append(values, v) + return true + }) + expectedValues := []string{"test1", "test2"} + expectedKeys := []string{"a", "b"} + compareSlice(t, expectedValues, values) + compareSlice(t, expectedKeys, keys) +} + +func TestKVCacheSetTwoDepth_AllByPrefix(t *testing.T) { + cache := NewKVCache[string, string]() + cache.Set("a", "test1") + cache.Set("ab", "test2") + + var values []string + var keys []string + seq := cache.AllByPrefix("") + seq(func(k, v string) bool { + keys = append(keys, k) + values = append(values, v) + return true + }) + expectedValues := []string{"test1", "test2"} + expectedKeys := []string{"a", "ab"} + compareSlice(t, expectedValues, values) + compareSlice(t, expectedKeys, keys) +} + +func TestKVCacheSetTwoDepthReverseOrder_AllByPrefix(t *testing.T) { + cache := NewKVCache[string, string]() + // When the order of Set is reversed, first Set will add 2-symbol node, + // and second set will split it into two. + cache.Set("ab", "test2") + cache.Set("a", "test1") + + var values []string + var keys []string + seq := cache.AllByPrefix("") + seq(func(k, v string) bool { + keys = append(keys, k) + values = append(values, v) + return true + }) + expectedValues := []string{"test1", "test2"} + expectedKeys := []string{"a", "ab"} + compareSlice(t, expectedValues, values) + compareSlice(t, expectedKeys, keys) +} + +func TestKVCacheSetAppendTail_AllByPrefix(t *testing.T) { + cache := NewKVCache[string, string]() + cache.Set("ab", "test2") + cache.Set("abc", "test1") + + var values []string + var keys []string + seq := cache.AllByPrefix("") + seq(func(k, v string) bool { + keys = append(keys, k) + values = append(values, v) + return true + }) + expectedValues := []string{"test2", "test1"} + expectedKeys := []string{"ab", "abc"} + compareSlice(t, expectedValues, values) + compareSlice(t, expectedKeys, keys) +} + +func TestKVCacheSet3_AllByPrefix(t *testing.T) { + // Some tests like this here are white-box ones to cover specific code paths, + // or to check for regressions of fixed issues found by fuzzing. + cache := NewKVCache[string, string]() + cache.Set("ab", "test2") + cache.Set("abc", "test1") + cache.Set("a", "test4") + + var values []string + var keys []string + seq := cache.AllByPrefix("") + seq(func(k, v string) bool { + keys = append(keys, k) + values = append(values, v) + return true + }) + + expectedValues := []string{ + "test4", "test2", "test1", + } + expectedKeys := []string{ + "a", "ab", "abc", + } + + compareSlice(t, expectedValues, values) + compareSlice(t, expectedKeys, keys) +} + +func TestKVCacheSet4_AllByPrefix(t *testing.T) { + cache := NewKVCache[string, string]() + cache.Set("ab", "test2") + cache.Set("abc", "test1") + cache.Set("abz", "test4") + + var values []string + var keys []string + seq := cache.AllByPrefix("") + seq(func(k, v string) bool { + keys = append(keys, k) + values = append(values, v) + return true + }) + + expectedValues := []string{ + "test2", "test1", "test4", + } + expectedKeys := []string{ + "ab", "abc", "abz", + } + + t.Log(values) + compareSlice(t, expectedValues, values) + compareSlice(t, expectedKeys, keys) +} + +func TestKVCacheSet5_AllByPrefix(t *testing.T) { + cache := NewKVCache[string, string]() + cache.Set("abra", "test2") + cache.Set("cadabra", "test1") + cache.Set("abracadabra", "test4") + + var values []string + var keys []string + seq := cache.AllByPrefix("cad") + seq(func(k, v string) bool { + keys = append(keys, k) + values = append(values, v) + return true + }) + + expectedValues := []string{ + "test1", + } + expectedKeys := []string{ + "cadabra", + } + + t.Log(values) + compareSlice(t, expectedValues, values) + compareSlice(t, expectedKeys, keys) +} + +func FuzzKVCacheSetAllByPrefix(f *testing.F) { + // Simple fuzzing test adding 3 keys then listing by prefix. + examples := [][]string{ + {"", "", "", ""}, + {"a", "a", "a", ""}, + {"a", "a", "a", "b"}, + {"ab", "ac", "abc", "a"}, + {"abra", "cadabra", "abracadabra", "cad"}, + {"abra", "cadabra", "abracadabra", "ab"}, + {"abcd", "abz", "ac", "a"}, + {"a", "abc", "abcd", "a"}, + } + + for _, example := range examples { + f.Add(example[0], example[1], example[2], example[3]) + } + + f.Fuzz(func(t *testing.T, k1, k2, k3, prefix string) { + golden := []string{ + k1, k2, k3, + } + cache := NewKVCache[string, string]() + for _, key := range golden { + cache.Set(key, key) + } + + sort.Strings(golden) + + expectValues := make([]string, 0, len(golden)) + expectKeys := make([]string, 0, len(golden)) + + seen := map[string]struct{}{} + for _, s := range golden { + if _, ok := seen[s]; !ok && strings.HasPrefix(s, prefix) { + expectValues = append(expectValues, s) + expectKeys = append(expectKeys, s) + seen[s] = struct{}{} + } + } + + var gotValues []string + var gotKeys []string + seq := cache.AllByPrefix(prefix) + seq(func(k, v string) bool { + gotKeys = append(gotKeys, k) + gotValues = append(gotValues, v) + return true + }) + + t.Logf("params: (%q, %q, %q, %q)", k1, k2, k3, prefix) + t.Logf("got: %v", gotValues) + + compareSlice(t, expectValues, gotValues) + compareSlice(t, expectKeys, gotKeys) + }) +} + +func TestKVCacheDelNoprefix_AllByPrefix(t *testing.T) { + cache := NewKVCache[string, string]() + cache.Set("hu", "hu") + _ = cache.Del("h") + res, err := cache.Get("hu") + if err != nil { + t.Errorf("unexpected error in Get: %v", err) + } + if res != "hu" { + t.Errorf("expected %q, got %q", "hu", res) + } + + var l []string + seq := cache.AllByPrefix("") + seq(func(k, v string) bool { + l = append(l, v) + return true + }) + + if len(l) != 1 { + t.Fatalf("expected len 1, got %d", len(l)) + } + + if l[0] != "hu" { + t.Errorf("expected %q, got %q", "hu", res) + } +} + +func FuzzKVCacheMonkey_AllByPrefix(f *testing.F) { + // More elaborate fuzzing test. It creates a random task of 50 Set/Del + // commands to be executed on a KVCache. Then it checks that AllByPrefix + // returns correct results. + examples := []struct { + seed int64 + prefix string + }{ + {0, ""}, + {439, "x"}, + {2, "ab"}, + {4928589, " "}, + {93, "1"}, + {1994, ""}, + {185, "P"}, + } + for _, example := range examples { + f.Add(example.seed, example.prefix) + } + + f.Fuzz(func(t *testing.T, seed int64, prefix string) { + cache := NewKVCache[string, string]() + task := randTask(seed) + golden := make(map[string]struct{}, len(task)) + for _, cmd := range task { + switch cmd.action { + case "Set": + cache.Set(cmd.key, cmd.key) + golden[cmd.key] = struct{}{} + case "Del": + // Since keys are random we expect a lot of Del to fail. + _ = cache.Del(cmd.key) + delete(golden, cmd.key) + } + } + + goldenFiltered := make([]string, 0, len(golden)) + for k := range golden { + if strings.HasPrefix(k, prefix) { + goldenFiltered = append(goldenFiltered, k) + } + } + sort.Strings(goldenFiltered) + + var gotValues []string + var gotKeys []string + seq := cache.AllByPrefix(prefix) + seq(func(k, v string) bool { + gotKeys = append(gotKeys, k) + gotValues = append(gotValues, v) + return true + }) + + if cache.Len() != len(golden) { + t.Errorf("expected len %d, got %d", len(golden), cache.Len()) + } + + for _, key := range goldenFiltered { + val, err := cache.Get(key) + if err != nil { + t.Fatalf("unexpected error in Get: %v", err) + } + if val != key { + t.Errorf("expected %q, got %q", key, val) + } + } + + t.Logf("seed: %d, task %v, prefix: %q", seed, task, prefix) + compareSlice(t, goldenFiltered, gotValues) + compareSlice(t, goldenFiltered, gotKeys) + }) +} + +func TestKVCacheWithDifferentTypes_AllByPrefix(t *testing.T) { + // Test with int values + intCache := NewKVCache[string, int]() + intCache.Set("one", 1) + intCache.Set("two", 2) + + val, err := intCache.Get("one") + if err != nil { + t.Fatalf("unexpected error in Get: %v", err) + } + if val != 1 { + t.Errorf("expected 1, got %d", val) + } + + // Test with struct values + type testStruct struct { + Name string + Age int + } + + structCache := NewKVCache[string, testStruct]() + structCache.Set("person1", testStruct{Name: "Alice", Age: 30}) + structCache.Set("person2", testStruct{Name: "Bob", Age: 25}) + + person, err := structCache.Get("person1") + if err != nil { + t.Fatalf("unexpected error in Get: %v", err) + } + if person.Name != "Alice" || person.Age != 30 { + t.Errorf("expected Alice/30, got %s/%d", person.Name, person.Age) + } + + var people []testStruct + var keys []string + seq := structCache.AllByPrefix("person") + seq(func(k string, v testStruct) bool { + keys = append(keys, k) + people = append(people, v) + return true + }) + + if len(people) != 2 { + t.Errorf("expected 2 people, got %d", len(people)) + } + expectedKeys := []string{"person1", "person2"} + compareSlice(t, expectedKeys, keys) +} + +func TestKVCacheSnapshot(t *testing.T) { + cache := NewKVCache[string, string]() + + // 1. Add some data + cache.Set("key1", "value1") + cache.Set("key2", "value2") + cache.Set("prefix/key3", "value3") + + // 2. Take a snapshot + snapshotMap := cache.Snapshot() + + // 3. Verify content of the snapshot + expected := map[string]string{ + "key1": "value1", + "key2": "value2", + "prefix/key3": "value3", + } + + if len(snapshotMap) != len(expected) { + t.Errorf("Snapshot map has unexpected length. Expected %d, got %d", len(expected), len(snapshotMap)) + } + + for k, v := range expected { + if gotV, ok := snapshotMap[k]; !ok || gotV != v { + t.Errorf("Snapshot missing key %q or has wrong value. Expected %q, got %q", k, v, gotV) + } + } + + // 4. Verify independence: modify original cache, snapshot should be unchanged + cache.Set("key4", "value4") // Add a new item + _ = cache.Del("key1") // Delete an existing item + cache.Set("key2", "newValue2") // Update an existing item + + // Snapshot should still reflect the state at the time it was taken + if len(snapshotMap) != len(expected) { // Length should still be 3 + t.Errorf("Snapshot map length changed after original cache modification. Expected %d, got %d", len(expected), len(snapshotMap)) + } + if _, ok := snapshotMap["key4"]; ok { // new key4 should not be in snapshot + t.Errorf("Snapshot contains new key 'key4' which was added after snapshot") + } + if gotV, ok := snapshotMap["key1"]; !ok || gotV != "value1" { // key1 should still be in snapshot with old value + t.Errorf("Snapshot's 'key1' was affected by deletion in original. Expected %q, got %q", "value1", gotV) + } + if gotV, ok := snapshotMap["key2"]; !ok || gotV != "value2" { // key2 should still have old value + t.Errorf("Snapshot's 'key2' was affected by update in original. Expected %q, got %q", "value2", gotV) + } + + // Also test an empty cache snapshot + emptyCache := NewKVCache[string, string]() + emptySnapshot := emptyCache.Snapshot() + if len(emptySnapshot) != 0 { + t.Errorf("Empty cache snapshot should be empty, got length %d", len(emptySnapshot)) + } +} + +func TestKVCache_DeleteMerge(t *testing.T) { + kv := NewKVCache[string, string]() + kv.Set("apple", "fruit") + kv.Set("apply", "verb") + + // Initial state: + // root -> "appl" -> "e" (val=fruit) + // -> "y" (val=verb) + + // Delete "apple" + _ = kv.Del("apple") + + // Expected state if merged: + // root -> "apply" (val=verb) + + // Verify "apply" is still accessible + val, err := kv.Get("apply") + if err != nil { + t.Fatalf("Get('apply') failed: %v", err) + } + if val != "verb" { + t.Errorf("Expected 'verb', got '%s'", val) + } + + // Verify "apple" is gone + _, err = kv.Get("apple") + if err != ErrNotFound { + t.Errorf("Expected ErrNotFound for 'apple', got %v", err) + } + + // Verify iteration works + items := kv.Snapshot() + if len(items) != 1 { + t.Errorf("Expected 1 item, got %d", len(items)) + } + if items["apply"] != "verb" { + t.Errorf("Snapshot content mismatch") + } +} + +func TestKVCache_DeleteEmpty(t *testing.T) { + kv := NewKVCache[string, string]() + kv.Set("a", "val_a") + kv.Set("b", "val_b") + + // Delete "a" - this should leave an empty node for "a" which needs cleanup + _ = kv.Del("a") + + val, err := kv.Get("b") + if err != nil { + t.Fatalf("Get('b') failed: %v", err) + } + if val != "val_b" { + t.Errorf("Expected 'val_b', got '%s'", val) + } + + _, err = kv.Get("a") + if err != ErrNotFound { + t.Errorf("Expected ErrNotFound for 'a', got %v", err) + } +}