Skip to content

Commit dec9502

Browse files
committed
chore: cover concurrent cache
1 parent d1b5a29 commit dec9502

File tree

6 files changed

+270
-34
lines changed

6 files changed

+270
-34
lines changed

2-race-in-cache/main.go

Lines changed: 129 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -1,65 +1,163 @@
1-
//////////////////////////////////////////////////////////////////////
2-
//
3-
// Given is some code to cache key-value pairs from a database into
4-
// the main memory (to reduce access time). Note that golang's map are
5-
// not entirely thread safe. Multiple readers are fine, but multiple
6-
// writers are not. Change the code to make this thread safe.
7-
//
8-
91
package main
102

113
import (
124
"container/list"
5+
"fmt"
6+
"sync"
137
"testing"
148
)
159

16-
// CacheSize determines how big the cache can grow
1710
const CacheSize = 100
1811

19-
// KeyStoreCacheLoader is an interface for the KeyStoreCache
2012
type KeyStoreCacheLoader interface {
2113
// Load implements a function where the cache should gets it's content from
2214
Load(string) string
2315
}
2416

17+
// page represents an item in our cache.
2518
type page struct {
2619
Key string
2720
Value string
2821
}
2922

30-
// KeyStoreCache is a LRU cache for string key-value pairs
23+
// Future represents a pending or completed result for a key.
24+
// It allows multiple goroutines to wait for the result of a single load operation.
25+
type Future struct {
26+
wg sync.WaitGroup // Used to wait for the load to complete
27+
result *list.Element // Pointer to the list element when done
28+
err error // Any error during loading
29+
once sync.Once // Ensures load is called only once
30+
loader func() (string, error) // The function to perform the actual load
31+
}
32+
33+
func newFuture(loader func() (string, error)) *Future {
34+
f := &Future{
35+
loader: loader,
36+
}
37+
f.wg.Add(1) // Initialize wait group for 1 completion
38+
return f
39+
}
40+
41+
// Do performs the actual loading operation exactly once.
42+
func (f *Future) Do() {
43+
f.once.Do(func() {
44+
// Simulate a time-consuming load operation
45+
val, err := f.loader()
46+
if err != nil {
47+
f.err = err
48+
} else {
49+
f.result = &list.Element{Value: &page{"", val}}
50+
}
51+
f.wg.Done() // Signal that loading is complete
52+
})
53+
}
54+
55+
// Wait blocks until the future's operation is complete and returns the result.
56+
func (f *Future) Wait() (*list.Element, error) {
57+
f.wg.Wait()
58+
return f.result, f.err
59+
}
60+
61+
// SetResult sets the list.Element once the loading is done and added to the list.
62+
func (f *Future) SetResult(e *list.Element) {
63+
f.result = e
64+
}
65+
66+
// KeyStoreCache implements a concurrent LRU cache.
3167
type KeyStoreCache struct {
32-
cache map[string]*list.Element
33-
pages list.List
34-
load func(string) string
68+
mu sync.RWMutex // Guards access to cache and pages
69+
cache map[string]*Future // Maps key to its Future (pending or completed)
70+
pages *list.List // Doubly linked list for LRU eviction
71+
load func(key string) string // The actual resource loading function
3572
}
3673

37-
// New creates a new KeyStoreCache
74+
// NewKeyStoreCache creates a new concurrent LRU cache.
3875
func New(load KeyStoreCacheLoader) *KeyStoreCache {
3976
return &KeyStoreCache{
77+
cache: make(map[string]*Future),
78+
pages: list.New(),
4079
load: load.Load,
41-
cache: make(map[string]*list.Element),
4280
}
4381
}
4482

45-
// Get gets the key from cache, loads it from the source if needed
83+
// Get retrieves a value from the cache, loading it if necessary.
4684
func (k *KeyStoreCache) Get(key string) string {
47-
if e, ok := k.cache[key]; ok {
48-
k.pages.MoveToFront(e)
49-
return e.Value.(page).Value
85+
// --- Phase 1: Check for existing entry (read-locked) ---
86+
k.mu.RLock() // Acquire a read lock
87+
f, ok := k.cache[key]
88+
k.mu.RUnlock() // Release read lock quickly
89+
90+
if ok {
91+
elem, err := f.Wait() // This blocks if the future is not yet done
92+
if err != nil {
93+
// Handle load error here if you want to propagate it
94+
fmt.Printf("Error loading key '%s': %v\n", key, err)
95+
return "" // Or re-attempt load, or return a specific error
96+
}
97+
98+
k.mu.Lock()
99+
k.pages.MoveToFront(elem)
100+
k.mu.Unlock()
101+
102+
return elem.Value.(*page).Value
50103
}
51-
// Miss - load from database and save it in cache
52-
p := page{key, k.load(key)}
53-
// if cache is full remove the least used item
54-
if len(k.cache) >= CacheSize {
55-
end := k.pages.Back()
56-
// remove from map
57-
delete(k.cache, end.Value.(page).Key)
58-
// remove from list
59-
k.pages.Remove(end)
104+
105+
k.mu.Lock()
106+
f, ok = k.cache[key]
107+
if ok {
108+
// Another goroutine beat us to it. Release lock and wait for its result.
109+
k.mu.Unlock()
110+
elem, err := f.Wait()
111+
if err != nil {
112+
fmt.Printf("Error loading key '%s': %v\n", key, err)
113+
return ""
114+
}
115+
k.mu.Lock() // Re-acquire lock to move to front
116+
k.pages.MoveToFront(elem)
117+
k.mu.Unlock()
118+
return elem.Value.(*page).Value
60119
}
61-
k.pages.PushFront(p)
62-
k.cache[key] = k.pages.Front()
120+
121+
// It's genuinely not in the cache. Create a new future.
122+
newF := newFuture(func() (string, error) {
123+
// The actual load operation that will be called by Do()
124+
val := k.load(key)
125+
return val, nil // Assuming k.load doesn't return an error, adjust if it does
126+
})
127+
k.cache[key] = newF
128+
k.mu.Unlock() // Release the write lock *before* calling Do()
129+
130+
newF.Do() // This will call the loader function for this key exactly once.
131+
132+
// Now that loading is complete, acquire write lock again to update LRU and set result.
133+
k.mu.Lock()
134+
defer k.mu.Unlock() // Ensure lock is released
135+
136+
// Check for eviction before adding the new item
137+
if k.pages.Len() >= CacheSize {
138+
oldest := k.pages.Back()
139+
if oldest != nil {
140+
pToDelete := oldest.Value.(*page)
141+
delete(k.cache, pToDelete.Key) // Remove from map
142+
k.pages.Remove(oldest) // Remove from list
143+
fmt.Printf("Evicting key: %s\n", pToDelete.Key)
144+
}
145+
}
146+
147+
// Get the loaded result from the future
148+
loadedElem, err := newF.Wait() // This should return immediately now as Do() just completed.
149+
if err != nil {
150+
// Handle the error (e.g., remove from cache if load failed permanently)
151+
delete(k.cache, key)
152+
fmt.Printf("Final error after load for key '%s': %v\n", key, err)
153+
return ""
154+
}
155+
156+
// Add the new page to the front of the list and set its result in the future.
157+
p := &page{key, loadedElem.Value.(*page).Value} // Re-create page to get its value
158+
elem := k.pages.PushFront(p)
159+
newF.SetResult(elem) // Set the actual list.Element in the future for future lookups
160+
63161
return p.Value
64162
}
65163

7-lfu-cache/cache_test.go

Lines changed: 80 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,15 @@
11
package main
22

33
import (
4+
"errors"
45
"fmt"
6+
"math/rand"
7+
"os"
58
"slices"
9+
"strings"
10+
"sync"
611
"testing"
12+
"time"
713
)
814

915
func TestCache(t *testing.T) {
@@ -76,3 +82,77 @@ func TestCache1(t *testing.T) {
7682
t.Errorf("keys should not contain luan")
7783
}
7884
}
85+
86+
const charset = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
87+
88+
func generaterandomstringMathrand(length int) string {
89+
if length <= 0 {
90+
return ""
91+
}
92+
93+
// Use strings.Builder for efficient string concatenation.
94+
// It pre-allocates memory, avoiding multiple re-allocations.
95+
var sb strings.Builder
96+
sb.Grow(length) // Pre-allocate capacity for efficiency
97+
98+
charsetLen := len(charset)
99+
for i := 0; i < length; i++ {
100+
// Pick a random index from the charset
101+
randomIndex := rand.Intn(charsetLen)
102+
// Append the character at that index
103+
sb.WriteByte(charset[randomIndex])
104+
}
105+
106+
return sb.String()
107+
}
108+
109+
// --- Test Main for Global Setup ---
110+
func TestMain(m *testing.M) {
111+
// Seed the global random number generator once for all tests in this package.
112+
// This is CRUCIAL for reproducible random behavior across test runs.
113+
rand.New(rand.NewSource(time.Now().UnixNano()))
114+
115+
// Run all tests
116+
code := m.Run()
117+
118+
// Exit with the test result code
119+
os.Exit(code)
120+
}
121+
122+
func TestCacheConcurrency(t *testing.T) {
123+
cache, _ := NewLFUCache(5, func(key string) (string, error) {
124+
return "", errors.New("Loader hasn't been implemented yet")
125+
})
126+
127+
keyValueMap := []string{"vu", "nghia", "luan", "xanh", "orange", "thuong",
128+
"tien", "lemon", "durian", "rambutant", "pear", "mango", "apple"}
129+
130+
var wg sync.WaitGroup
131+
maxSetOperations := 10000
132+
maxGetOperations := 5000
133+
// Setter
134+
for i := 0; i < 3; i++ {
135+
wg.Add(1)
136+
go func() {
137+
defer wg.Done()
138+
for i := 0; i < maxSetOperations; i++ {
139+
randomNumber := rand.Intn(len(keyValueMap)) + 0
140+
cache.Set(keyValueMap[randomNumber], generaterandomstringMathrand(5))
141+
}
142+
}()
143+
}
144+
145+
// 5 getters
146+
for i := 0; i < 5; i++ {
147+
wg.Add(1)
148+
go func() {
149+
defer wg.Done()
150+
for j := 0; j < maxGetOperations; j++ {
151+
randomNumber := rand.Intn(len(keyValueMap)) + 0
152+
cache.Get(keyValueMap[randomNumber])
153+
}
154+
}()
155+
}
156+
157+
wg.Wait()
158+
}

7-lfu-cache/main.go

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ type (
1818
)
1919

2020
type baseCache struct {
21-
mu sync.RWMutex
21+
mu sync.Mutex
2222
size int
2323
loaderFunc LoaderFunc
2424
loadGroup LoadGroup
@@ -71,15 +71,19 @@ func (cache *LFUCache) GetFreq(buckets *list.Element) int {
7171
}
7272

7373
func (cache *LFUCache) Get(key string) (string, error) {
74+
cache.mu.Lock()
7475
if item, ok := cache.cache[key]; ok {
7576
// Move item to the higher bucket
77+
v := item.value
7678
err := cache.moveToHigherBucket(item)
7779
if err != nil {
7880
return "", err
7981
}
82+
cache.mu.Unlock()
8083

81-
return item.value, nil
84+
return v, nil
8285
}
86+
cache.mu.Unlock()
8387

8488
// Miss, so load value
8589
value, err := cache.loaderFunc(key)
@@ -105,6 +109,9 @@ func (cache *LFUCache) GetKeys() []string {
105109
}
106110

107111
func (cache *LFUCache) Set(key, value string) error {
112+
cache.mu.Lock()
113+
defer cache.mu.Unlock()
114+
108115
if item, ok := cache.cache[key]; ok {
109116
item.value = value
110117
return nil

playground/go.mod

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
module go/playground
2+
3+
go 1.24.2
4+
5+
require golang.org/x/sync v0.15.0

playground/go.sum

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,2 @@
1+
golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8=
2+
golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=

playground/main.go

Lines changed: 45 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1,45 @@
1-
package playground
1+
package main
2+
3+
import (
4+
"context"
5+
"errors"
6+
"fmt"
7+
"time"
8+
9+
"golang.org/x/sync/errgroup"
10+
)
11+
12+
func hello() error { // hello needs to return error to be compatible with errgroup.Go
13+
fmt.Println("hello world")
14+
time.Sleep(time.Second * 10)
15+
fmt.Println("hello world ended")
16+
return nil // Return nil or an actual error
17+
}
18+
19+
func hello1() error {
20+
fmt.Println("hello world 1")
21+
time.Sleep(time.Second * 1)
22+
return errors.New("hello world 1 goes wrong")
23+
}
24+
25+
func main() {
26+
g, cancel := errgroup.WithContext(context.Background())
27+
g.SetLimit(2) // Set a limit of 2 concurrent goroutines
28+
29+
// Pass the functions as values, not the result of their execution
30+
g.Go(hello) // Correct way: pass the function hello
31+
g.Go(hello1) // Correct way: pass the function hello1
32+
33+
select {
34+
case <-cancel.Done():
35+
{
36+
err := cancel.Err()
37+
if err != nil {
38+
fmt.Printf("Error occurred %s", err.Error())
39+
}
40+
}
41+
}
42+
43+
_ := g.Wait()
44+
fmt.Println("Done")
45+
}

0 commit comments

Comments
 (0)