|
1 | | -////////////////////////////////////////////////////////////////////// |
2 | | -// |
3 | | -// Given is some code to cache key-value pairs from a database into |
4 | | -// the main memory (to reduce access time). Note that golang's map are |
5 | | -// not entirely thread safe. Multiple readers are fine, but multiple |
6 | | -// writers are not. Change the code to make this thread safe. |
7 | | -// |
8 | | - |
9 | 1 | package main |
10 | 2 |
|
11 | 3 | import ( |
12 | 4 | "container/list" |
| 5 | + "fmt" |
| 6 | + "sync" |
13 | 7 | "testing" |
14 | 8 | ) |
15 | 9 |
|
16 | | -// CacheSize determines how big the cache can grow |
17 | 10 | const CacheSize = 100 |
18 | 11 |
|
19 | | -// KeyStoreCacheLoader is an interface for the KeyStoreCache |
20 | 12 | type KeyStoreCacheLoader interface { |
21 | 13 | // Load implements a function where the cache should gets it's content from |
22 | 14 | Load(string) string |
23 | 15 | } |
24 | 16 |
|
| 17 | +// page represents an item in our cache. |
25 | 18 | type page struct { |
26 | 19 | Key string |
27 | 20 | Value string |
28 | 21 | } |
29 | 22 |
|
30 | | -// KeyStoreCache is a LRU cache for string key-value pairs |
| 23 | +// Future represents a pending or completed result for a key. |
| 24 | +// It allows multiple goroutines to wait for the result of a single load operation. |
| 25 | +type Future struct { |
| 26 | + wg sync.WaitGroup // Used to wait for the load to complete |
| 27 | + result *list.Element // Pointer to the list element when done |
| 28 | + err error // Any error during loading |
| 29 | + once sync.Once // Ensures load is called only once |
| 30 | + loader func() (string, error) // The function to perform the actual load |
| 31 | +} |
| 32 | + |
| 33 | +func newFuture(loader func() (string, error)) *Future { |
| 34 | + f := &Future{ |
| 35 | + loader: loader, |
| 36 | + } |
| 37 | + f.wg.Add(1) // Initialize wait group for 1 completion |
| 38 | + return f |
| 39 | +} |
| 40 | + |
| 41 | +// Do performs the actual loading operation exactly once. |
| 42 | +func (f *Future) Do() { |
| 43 | + f.once.Do(func() { |
| 44 | + // Simulate a time-consuming load operation |
| 45 | + val, err := f.loader() |
| 46 | + if err != nil { |
| 47 | + f.err = err |
| 48 | + } else { |
| 49 | + f.result = &list.Element{Value: &page{"", val}} |
| 50 | + } |
| 51 | + f.wg.Done() // Signal that loading is complete |
| 52 | + }) |
| 53 | +} |
| 54 | + |
| 55 | +// Wait blocks until the future's operation is complete and returns the result. |
| 56 | +func (f *Future) Wait() (*list.Element, error) { |
| 57 | + f.wg.Wait() |
| 58 | + return f.result, f.err |
| 59 | +} |
| 60 | + |
| 61 | +// SetResult sets the list.Element once the loading is done and added to the list. |
| 62 | +func (f *Future) SetResult(e *list.Element) { |
| 63 | + f.result = e |
| 64 | +} |
| 65 | + |
| 66 | +// KeyStoreCache implements a concurrent LRU cache. |
31 | 67 | type KeyStoreCache struct { |
32 | | - cache map[string]*list.Element |
33 | | - pages list.List |
34 | | - load func(string) string |
| 68 | + mu sync.RWMutex // Guards access to cache and pages |
| 69 | + cache map[string]*Future // Maps key to its Future (pending or completed) |
| 70 | + pages *list.List // Doubly linked list for LRU eviction |
| 71 | + load func(key string) string // The actual resource loading function |
35 | 72 | } |
36 | 73 |
|
37 | | -// New creates a new KeyStoreCache |
| 74 | +// NewKeyStoreCache creates a new concurrent LRU cache. |
38 | 75 | func New(load KeyStoreCacheLoader) *KeyStoreCache { |
39 | 76 | return &KeyStoreCache{ |
| 77 | + cache: make(map[string]*Future), |
| 78 | + pages: list.New(), |
40 | 79 | load: load.Load, |
41 | | - cache: make(map[string]*list.Element), |
42 | 80 | } |
43 | 81 | } |
44 | 82 |
|
45 | | -// Get gets the key from cache, loads it from the source if needed |
| 83 | +// Get retrieves a value from the cache, loading it if necessary. |
46 | 84 | func (k *KeyStoreCache) Get(key string) string { |
47 | | - if e, ok := k.cache[key]; ok { |
48 | | - k.pages.MoveToFront(e) |
49 | | - return e.Value.(page).Value |
| 85 | + // --- Phase 1: Check for existing entry (read-locked) --- |
| 86 | + k.mu.RLock() // Acquire a read lock |
| 87 | + f, ok := k.cache[key] |
| 88 | + k.mu.RUnlock() // Release read lock quickly |
| 89 | + |
| 90 | + if ok { |
| 91 | + elem, err := f.Wait() // This blocks if the future is not yet done |
| 92 | + if err != nil { |
| 93 | + // Handle load error here if you want to propagate it |
| 94 | + fmt.Printf("Error loading key '%s': %v\n", key, err) |
| 95 | + return "" // Or re-attempt load, or return a specific error |
| 96 | + } |
| 97 | + |
| 98 | + k.mu.Lock() |
| 99 | + k.pages.MoveToFront(elem) |
| 100 | + k.mu.Unlock() |
| 101 | + |
| 102 | + return elem.Value.(*page).Value |
50 | 103 | } |
51 | | - // Miss - load from database and save it in cache |
52 | | - p := page{key, k.load(key)} |
53 | | - // if cache is full remove the least used item |
54 | | - if len(k.cache) >= CacheSize { |
55 | | - end := k.pages.Back() |
56 | | - // remove from map |
57 | | - delete(k.cache, end.Value.(page).Key) |
58 | | - // remove from list |
59 | | - k.pages.Remove(end) |
| 104 | + |
| 105 | + k.mu.Lock() |
| 106 | + f, ok = k.cache[key] |
| 107 | + if ok { |
| 108 | + // Another goroutine beat us to it. Release lock and wait for its result. |
| 109 | + k.mu.Unlock() |
| 110 | + elem, err := f.Wait() |
| 111 | + if err != nil { |
| 112 | + fmt.Printf("Error loading key '%s': %v\n", key, err) |
| 113 | + return "" |
| 114 | + } |
| 115 | + k.mu.Lock() // Re-acquire lock to move to front |
| 116 | + k.pages.MoveToFront(elem) |
| 117 | + k.mu.Unlock() |
| 118 | + return elem.Value.(*page).Value |
60 | 119 | } |
61 | | - k.pages.PushFront(p) |
62 | | - k.cache[key] = k.pages.Front() |
| 120 | + |
| 121 | + // It's genuinely not in the cache. Create a new future. |
| 122 | + newF := newFuture(func() (string, error) { |
| 123 | + // The actual load operation that will be called by Do() |
| 124 | + val := k.load(key) |
| 125 | + return val, nil // Assuming k.load doesn't return an error, adjust if it does |
| 126 | + }) |
| 127 | + k.cache[key] = newF |
| 128 | + k.mu.Unlock() // Release the write lock *before* calling Do() |
| 129 | + |
| 130 | + newF.Do() // This will call the loader function for this key exactly once. |
| 131 | + |
| 132 | + // Now that loading is complete, acquire write lock again to update LRU and set result. |
| 133 | + k.mu.Lock() |
| 134 | + defer k.mu.Unlock() // Ensure lock is released |
| 135 | + |
| 136 | + // Check for eviction before adding the new item |
| 137 | + if k.pages.Len() >= CacheSize { |
| 138 | + oldest := k.pages.Back() |
| 139 | + if oldest != nil { |
| 140 | + pToDelete := oldest.Value.(*page) |
| 141 | + delete(k.cache, pToDelete.Key) // Remove from map |
| 142 | + k.pages.Remove(oldest) // Remove from list |
| 143 | + fmt.Printf("Evicting key: %s\n", pToDelete.Key) |
| 144 | + } |
| 145 | + } |
| 146 | + |
| 147 | + // Get the loaded result from the future |
| 148 | + loadedElem, err := newF.Wait() // This should return immediately now as Do() just completed. |
| 149 | + if err != nil { |
| 150 | + // Handle the error (e.g., remove from cache if load failed permanently) |
| 151 | + delete(k.cache, key) |
| 152 | + fmt.Printf("Final error after load for key '%s': %v\n", key, err) |
| 153 | + return "" |
| 154 | + } |
| 155 | + |
| 156 | + // Add the new page to the front of the list and set its result in the future. |
| 157 | + p := &page{key, loadedElem.Value.(*page).Value} // Re-create page to get its value |
| 158 | + elem := k.pages.PushFront(p) |
| 159 | + newF.SetResult(elem) // Set the actual list.Element in the future for future lookups |
| 160 | + |
63 | 161 | return p.Value |
64 | 162 | } |
65 | 163 |
|
|
0 commit comments