From 7d3670508322911be7f286a06c00a5b699d4cd4c Mon Sep 17 00:00:00 2001 From: Neha Sherpa Date: Wed, 11 Feb 2026 13:15:44 -0800 Subject: [PATCH 1/5] feat: Prefix keys with strategy name --- cmd/cachew/main.go | 12 ++-- internal/cache/api.go | 8 +-- internal/cache/cachetest/soak.go | 6 +- internal/cache/cachetest/suite.go | 48 ++++++++-------- internal/cache/disk.go | 68 ++++++++++++----------- internal/cache/disk_metadb.go | 45 +++++++++++---- internal/cache/http.go | 4 +- internal/cache/memory.go | 8 +-- internal/cache/noop.go | 8 +-- internal/cache/remote.go | 8 +-- internal/cache/s3.go | 63 ++++++++++++--------- internal/cache/tiered.go | 16 +++--- internal/snapshot/snapshot.go | 8 +-- internal/snapshot/snapshot_test.go | 42 +++++++------- internal/strategy/apiv1.go | 11 ++-- internal/strategy/artifactory.go | 5 +- internal/strategy/git/git.go | 4 +- internal/strategy/git/snapshot.go | 3 +- internal/strategy/git/snapshot_test.go | 2 +- internal/strategy/github_releases.go | 7 ++- internal/strategy/github_releases_test.go | 2 +- internal/strategy/gomod/cacher.go | 6 +- internal/strategy/handler/handler.go | 17 ++++-- internal/strategy/hermit.go | 8 ++- internal/strategy/hermit_test.go | 2 +- internal/strategy/host.go | 5 +- internal/strategy/host_test.go | 2 +- 27 files changed, 243 insertions(+), 175 deletions(-) diff --git a/cmd/cachew/main.go b/cmd/cachew/main.go index 547a167..513a13f 100644 --- a/cmd/cachew/main.go +++ b/cmd/cachew/main.go @@ -57,7 +57,7 @@ type GetCmd struct { func (c *GetCmd) Run(ctx context.Context, cache cache.Cache) error { defer c.Output.Close() - rc, headers, err := cache.Open(ctx, c.Key.Key()) + rc, headers, err := cache.Open(ctx, "", c.Key.Key()) if err != nil { return errors.Wrap(err, "failed to open object") } @@ -78,7 +78,7 @@ type StatCmd struct { } func (c *StatCmd) Run(ctx context.Context, cache cache.Cache) error { - headers, err := cache.Stat(ctx, c.Key.Key()) + headers, err := cache.Stat(ctx, "", c.Key.Key()) if err != nil { return errors.Wrap(err, "failed to stat object") } @@ -111,7 +111,7 @@ func (c *PutCmd) Run(ctx context.Context, cache cache.Cache) error { headers.Set("Content-Disposition", fmt.Sprintf("attachment; filename=%q", filepath.Base(filename))) //nolint:perfsprint } - wc, err := cache.Create(ctx, c.Key.Key(), headers, c.TTL) + wc, err := cache.Create(ctx, "", c.Key.Key(), headers, c.TTL) if err != nil { return errors.Wrap(err, "failed to create object") } @@ -128,7 +128,7 @@ type DeleteCmd struct { } func (c *DeleteCmd) Run(ctx context.Context, cache cache.Cache) error { - return errors.Wrap(cache.Delete(ctx, c.Key.Key()), "failed to delete object") + return errors.Wrap(cache.Delete(ctx, "", c.Key.Key()), "failed to delete object") } type SnapshotCmd struct { @@ -140,7 +140,7 @@ type SnapshotCmd struct { func (c *SnapshotCmd) Run(ctx context.Context, cache cache.Cache) error { fmt.Fprintf(os.Stderr, "Archiving %s...\n", c.Directory) //nolint:forbidigo - if err := snapshot.Create(ctx, cache, c.Key.Key(), c.Directory, c.TTL, c.Exclude); err != nil { + if err := snapshot.Create(ctx, cache, "", c.Key.Key(), c.Directory, c.TTL, c.Exclude); err != nil { return errors.Wrap(err, "failed to create snapshot") } @@ -155,7 +155,7 @@ type RestoreCmd struct { func (c *RestoreCmd) Run(ctx context.Context, cache cache.Cache) error { fmt.Fprintf(os.Stderr, "Restoring to %s...\n", c.Directory) //nolint:forbidigo - if err := snapshot.Restore(ctx, cache, c.Key.Key(), c.Directory); err != nil { + if err := snapshot.Restore(ctx, cache, "", c.Key.Key(), c.Directory); err != nil { return errors.Wrap(err, "failed to restore snapshot") } diff --git a/internal/cache/api.go b/internal/cache/api.go index 6f13121..6aaa142 100644 --- a/internal/cache/api.go +++ b/internal/cache/api.go @@ -152,13 +152,13 @@ type Cache interface { // // Expired files MUST not be returned. // Must return os.ErrNotExist if the file does not exist. - Stat(ctx context.Context, key Key) (http.Header, error) + Stat(ctx context.Context, strategyName string, key Key) (http.Header, error) // Open an existing file in the cache. // // Expired files MUST NOT be returned. // The returned headers MUST include a Last-Modified header. // Must return os.ErrNotExist if the file does not exist. - Open(ctx context.Context, key Key) (io.ReadCloser, http.Header, error) + Open(ctx context.Context, strategyName string, key Key) (io.ReadCloser, http.Header, error) // Create a new file in the cache. // // If "ttl" is zero, a maximum TTL MUST be used by the implementation. @@ -166,11 +166,11 @@ type Cache interface { // The file MUST NOT be available for read until completely written and closed. // // If the context is cancelled the object MUST NOT be made available in the cache. - Create(ctx context.Context, key Key, headers http.Header, ttl time.Duration) (io.WriteCloser, error) + Create(ctx context.Context, strategyName string, key Key, headers http.Header, ttl time.Duration) (io.WriteCloser, error) // Delete a file from the cache. // // MUST be atomic. - Delete(ctx context.Context, key Key) error + Delete(ctx context.Context, strategyName string, key Key) error // Stats returns health and usage statistics for the cache. Stats(ctx context.Context) (Stats, error) // Close the Cache. diff --git a/internal/cache/cachetest/soak.go b/internal/cache/cachetest/soak.go index 5490060..9b78107 100644 --- a/internal/cache/cachetest/soak.go +++ b/internal/cache/cachetest/soak.go @@ -194,7 +194,7 @@ func doWrite( } key := cache.NewKey(fmt.Sprintf("soak-key-%d", keyIdx)) - writer, err := c.Create(ctx, key, nil, config.TTL) + writer, err := c.Create(ctx, "", key, nil, config.TTL) if err != nil { t.Errorf("failed to create cache entry: %+v", err) return @@ -248,7 +248,7 @@ func doRead( keyIdx := rng.IntN(config.NumObjects) key := cache.NewKey(fmt.Sprintf("soak-key-%d", keyIdx)) - reader, _, err := c.Open(ctx, key) + reader, _, err := c.Open(ctx, "", key) if err != nil { if errors.Is(err, os.ErrNotExist) { atomic.AddInt64(&result.ReadMisses, 1) @@ -299,7 +299,7 @@ func doDelete( keyIdx := rng.IntN(config.NumObjects) key := cache.NewKey(fmt.Sprintf("soak-key-%d", keyIdx)) - if err := c.Delete(ctx, key); err != nil { + if err := c.Delete(ctx, "", key); err != nil { if errors.Is(err, os.ErrNotExist) { return } diff --git a/internal/cache/cachetest/suite.go b/internal/cache/cachetest/suite.go index db43d3c..142847f 100644 --- a/internal/cache/cachetest/suite.go +++ b/internal/cache/cachetest/suite.go @@ -63,7 +63,7 @@ func testCreateAndOpen(t *testing.T, c cache.Cache) { key := cache.NewKey("test-key") - writer, err := c.Create(ctx, key, nil, time.Hour) + writer, err := c.Create(ctx, "", key, nil, time.Hour) assert.NoError(t, err) _, err = writer.Write([]byte("hello world")) @@ -72,7 +72,7 @@ func testCreateAndOpen(t *testing.T, c cache.Cache) { err = writer.Close() assert.NoError(t, err) - reader, _, err := c.Open(ctx, key) + reader, _, err := c.Open(ctx, "", key) assert.NoError(t, err) defer reader.Close() @@ -87,7 +87,7 @@ func testNotFound(t *testing.T, c cache.Cache) { key := cache.NewKey("nonexistent") - _, _, err := c.Open(ctx, key) + _, _, err := c.Open(ctx, "", key) assert.IsError(t, err, os.ErrNotExist) } @@ -97,7 +97,7 @@ func testExpiration(t *testing.T, c cache.Cache) { key := cache.NewKey("test-key") - writer, err := c.Create(ctx, key, nil, time.Millisecond*250) + writer, err := c.Create(ctx, "", key, nil, time.Millisecond*250) assert.NoError(t, err) _, err = writer.Write([]byte("test data")) @@ -106,13 +106,13 @@ func testExpiration(t *testing.T, c cache.Cache) { err = writer.Close() assert.NoError(t, err) - reader, _, err := c.Open(ctx, key) + reader, _, err := c.Open(ctx, "", key) assert.NoError(t, err) assert.NoError(t, reader.Close()) time.Sleep(500 * time.Millisecond) - _, _, err = c.Open(ctx, key) + _, _, err = c.Open(ctx, "", key) assert.IsError(t, err, os.ErrNotExist) } @@ -122,7 +122,7 @@ func testDefaultTTL(t *testing.T, c cache.Cache) { key := cache.NewKey("test-key") - writer, err := c.Create(ctx, key, nil, 0) + writer, err := c.Create(ctx, "", key, nil, 0) assert.NoError(t, err) _, err = writer.Write([]byte("test data")) @@ -131,7 +131,7 @@ func testDefaultTTL(t *testing.T, c cache.Cache) { err = writer.Close() assert.NoError(t, err) - reader, _, err := c.Open(ctx, key) + reader, _, err := c.Open(ctx, "", key) assert.NoError(t, err) assert.NoError(t, reader.Close()) } @@ -142,7 +142,7 @@ func testDelete(t *testing.T, c cache.Cache) { key := cache.NewKey("test-key") - writer, err := c.Create(ctx, key, nil, time.Hour) + writer, err := c.Create(ctx, "", key, nil, time.Hour) assert.NoError(t, err) _, err = writer.Write([]byte("test data")) @@ -151,10 +151,10 @@ func testDelete(t *testing.T, c cache.Cache) { err = writer.Close() assert.NoError(t, err) - err = c.Delete(ctx, key) + err = c.Delete(ctx, "", key) assert.NoError(t, err) - _, _, err = c.Open(ctx, key) + _, _, err = c.Open(ctx, "", key) assert.IsError(t, err, os.ErrNotExist) } @@ -164,7 +164,7 @@ func testMultipleWrites(t *testing.T, c cache.Cache) { key := cache.NewKey("test-key") - writer, err := c.Create(ctx, key, nil, time.Hour) + writer, err := c.Create(ctx, "", key, nil, time.Hour) assert.NoError(t, err) _, err = writer.Write([]byte("hello ")) @@ -176,7 +176,7 @@ func testMultipleWrites(t *testing.T, c cache.Cache) { err = writer.Close() assert.NoError(t, err) - reader, _, err := c.Open(ctx, key) + reader, _, err := c.Open(ctx, "", key) assert.NoError(t, err) defer reader.Close() @@ -191,19 +191,19 @@ func testNotAvailableUntilClosed(t *testing.T, c cache.Cache) { key := cache.NewKey("test-key") - writer, err := c.Create(ctx, key, nil, time.Hour) + writer, err := c.Create(ctx, "", key, nil, time.Hour) assert.NoError(t, err) _, err = writer.Write([]byte("test data")) assert.NoError(t, err) - _, _, err = c.Open(ctx, key) + _, _, err = c.Open(ctx, "", key) assert.IsError(t, err, os.ErrNotExist) err = writer.Close() assert.NoError(t, err) - _, _, err = c.Open(ctx, key) + _, _, err = c.Open(ctx, "", key) assert.NoError(t, err) } @@ -220,7 +220,7 @@ func testHeaders(t *testing.T, c cache.Cache) { "X-Custom-Field": []string{"custom-value"}, } - writer, err := c.Create(ctx, key, headers, time.Hour) + writer, err := c.Create(ctx, "", key, headers, time.Hour) assert.NoError(t, err) _, err = writer.Write([]byte("test data with headers")) @@ -230,7 +230,7 @@ func testHeaders(t *testing.T, c cache.Cache) { assert.NoError(t, err) // Open and verify headers are returned - reader, returnedHeaders, err := c.Open(ctx, key) + reader, returnedHeaders, err := c.Open(ctx, "", key) assert.NoError(t, err) defer reader.Close() @@ -257,7 +257,7 @@ func testContextCancellation(t *testing.T, c cache.Cache) { // Create an object with the cancellable context key := cache.NewKey("test-cancelled") - writer, err := c.Create(cancelledCtx, key, http.Header{}, time.Hour) + writer, err := c.Create(cancelledCtx, "", key, http.Header{}, time.Hour) assert.NoError(t, err) // Write some data @@ -273,7 +273,7 @@ func testContextCancellation(t *testing.T, c cache.Cache) { assert.Contains(t, err.Error(), "cancel") // Object should not be in cache - _, _, err = c.Open(ctx, key) + _, _, err = c.Open(ctx, "", key) assert.IsError(t, err, os.ErrNotExist) } @@ -284,7 +284,7 @@ func testLastModified(t *testing.T, c cache.Cache) { key := cache.NewKey("test-last-modified") // Create an object without specifying Last-Modified - writer, err := c.Create(ctx, key, nil, time.Hour) + writer, err := c.Create(ctx, "", key, nil, time.Hour) assert.NoError(t, err) _, err = writer.Write([]byte("test data")) @@ -294,7 +294,7 @@ func testLastModified(t *testing.T, c cache.Cache) { assert.NoError(t, err) // Open and verify Last-Modified header is present - reader, headers, err := c.Open(ctx, key) + reader, headers, err := c.Open(ctx, "", key) assert.NoError(t, err) defer reader.Close() @@ -313,7 +313,7 @@ func testLastModified(t *testing.T, c cache.Cache) { "Last-Modified": []string{explicitTime.Format(http.TimeFormat)}, } - writer2, err := c.Create(ctx, key2, explicitHeaders, time.Hour) + writer2, err := c.Create(ctx, "", key2, explicitHeaders, time.Hour) assert.NoError(t, err) _, err = writer2.Write([]byte("test data 2")) @@ -323,7 +323,7 @@ func testLastModified(t *testing.T, c cache.Cache) { assert.NoError(t, err) // Verify explicit Last-Modified is preserved - reader2, headers2, err := c.Open(ctx, key2) + reader2, headers2, err := c.Open(ctx, "", key2) assert.NoError(t, err) defer reader2.Close() diff --git a/internal/cache/disk.go b/internal/cache/disk.go index 7d28ba0..7a7ed56 100644 --- a/internal/cache/disk.go +++ b/internal/cache/disk.go @@ -151,7 +151,7 @@ func (d *Disk) Stats(_ context.Context) (Stats, error) { }, nil } -func (d *Disk) Create(ctx context.Context, key Key, headers http.Header, ttl time.Duration) (io.WriteCloser, error) { +func (d *Disk) Create(ctx context.Context, strategyName string, key Key, headers http.Header, ttl time.Duration) (io.WriteCloser, error) { if ttl > d.config.MaxTTL || ttl == 0 { ttl = d.config.MaxTTL } @@ -164,7 +164,7 @@ func (d *Disk) Create(ctx context.Context, key Key, headers http.Header, ttl tim clonedHeaders.Set("Last-Modified", now.UTC().Format(http.TimeFormat)) } - path := d.keyToPath(key) + path := d.keyToPath(strategyName, key) fullPath := filepath.Join(d.config.Root, path) dir := filepath.Dir(fullPath) @@ -180,19 +180,20 @@ func (d *Disk) Create(ctx context.Context, key Key, headers http.Header, ttl tim expiresAt := now.Add(ttl) return &diskWriter{ - disk: d, - file: f, - key: key, - path: fullPath, - tempPath: f.Name(), - expiresAt: expiresAt, - headers: clonedHeaders, - ctx: ctx, + disk: d, + file: f, + key: key, + strategyName: strategyName, + path: fullPath, + tempPath: f.Name(), + expiresAt: expiresAt, + headers: clonedHeaders, + ctx: ctx, }, nil } -func (d *Disk) Delete(_ context.Context, key Key) error { - path := d.keyToPath(key) +func (d *Disk) Delete(_ context.Context, strategyName string, key Key) error { + path := d.keyToPath(strategyName, key) fullPath := filepath.Join(d.config.Root, path) // Check if file is expired @@ -224,8 +225,8 @@ func (d *Disk) Delete(_ context.Context, key Key) error { return nil } -func (d *Disk) Stat(ctx context.Context, key Key) (http.Header, error) { - path := d.keyToPath(key) +func (d *Disk) Stat(ctx context.Context, strategyName string, key Key) (http.Header, error) { + path := d.keyToPath(strategyName, key) fullPath := filepath.Join(d.config.Root, path) if _, err := os.Stat(fullPath); err != nil { @@ -238,7 +239,7 @@ func (d *Disk) Stat(ctx context.Context, key Key) (http.Header, error) { } if time.Now().After(expiresAt) { - return nil, errors.Join(fs.ErrNotExist, d.Delete(ctx, key)) + return nil, errors.Join(fs.ErrNotExist, d.Delete(ctx, strategyName, key)) } headers, err := d.db.getHeaders(key) @@ -249,8 +250,8 @@ func (d *Disk) Stat(ctx context.Context, key Key) (http.Header, error) { return headers, nil } -func (d *Disk) Open(ctx context.Context, key Key) (io.ReadCloser, http.Header, error) { - path := d.keyToPath(key) +func (d *Disk) Open(ctx context.Context, strategyName string, key Key) (io.ReadCloser, http.Header, error) { + path := d.keyToPath(strategyName, key) fullPath := filepath.Join(d.config.Root, path) f, err := os.Open(fullPath) @@ -265,7 +266,7 @@ func (d *Disk) Open(ctx context.Context, key Key) (io.ReadCloser, http.Header, e now := time.Now() if now.After(expiresAt) { - return nil, nil, errors.Join(fs.ErrNotExist, f.Close(), d.Delete(ctx, key)) + return nil, nil, errors.Join(fs.ErrNotExist, f.Close(), d.Delete(ctx, strategyName, key)) } headers, err := d.db.getHeaders(key) @@ -284,9 +285,13 @@ func (d *Disk) Open(ctx context.Context, key Key) (io.ReadCloser, http.Header, e return f, headers, nil } -func (d *Disk) keyToPath(key Key) string { +func (d *Disk) keyToPath(strategyName string, key Key) string { hexKey := key.String() + // Use first two hex digits as directory, full hex as filename + if strategyName != "" { + return filepath.Join(strategyName, hexKey[:2], hexKey) + } return filepath.Join(hexKey[:2], hexKey) } @@ -325,8 +330,8 @@ func (d *Disk) evict() error { var expiredKeys []Key now := time.Now() - err := d.db.walk(func(key Key, expiresAt time.Time) error { - path := d.keyToPath(key) + err := d.db.walk(func(key Key, strategyName string, expiresAt time.Time) error { + path := d.keyToPath(strategyName, key) fullPath := filepath.Join(d.config.Root, path) info, err := os.Stat(fullPath) @@ -394,15 +399,16 @@ func (d *Disk) evict() error { } type diskWriter struct { - disk *Disk - file *os.File - key Key - path string - tempPath string - expiresAt time.Time - headers http.Header - size int64 - ctx context.Context + disk *Disk + file *os.File + key Key + strategyName string + path string + tempPath string + expiresAt time.Time + headers http.Header + size int64 + ctx context.Context } func (w *diskWriter) Write(p []byte) (int, error) { @@ -437,7 +443,7 @@ func (w *diskWriter) Close() error { return errors.Errorf("failed to rename temp file: %w", err) } - if err := w.disk.db.set(w.key, w.expiresAt, w.headers); err != nil { + if err := w.disk.db.set(w.key, w.strategyName, w.expiresAt, w.headers); err != nil { return errors.Join(errors.Errorf("failed to set metadata: %w", err), os.Remove(w.path)) } diff --git a/internal/cache/disk_metadb.go b/internal/cache/disk_metadb.go index 9349aa7..d118be4 100644 --- a/internal/cache/disk_metadb.go +++ b/internal/cache/disk_metadb.go @@ -12,8 +12,9 @@ import ( //nolint:gochecknoglobals var ( - ttlBucketName = []byte("ttl") - headersBucketName = []byte("headers") + ttlBucketName = []byte("ttl") + headersBucketName = []byte("headers") + strategyBucketName = []byte("strategy") ) // diskMetaDB manages expiration times and headers for cache entries using bbolt. @@ -37,6 +38,9 @@ func newDiskMetaDB(dbPath string) (*diskMetaDB, error) { if _, err := tx.CreateBucketIfNotExists(headersBucketName); err != nil { return errors.WithStack(err) } + if _, err := tx.CreateBucketIfNotExists(strategyBucketName); err != nil { + return errors.WithStack(err) + } return nil }); err != nil { return nil, errors.Join(errors.Errorf("failed to create buckets: %w", err), db.Close()) @@ -57,7 +61,7 @@ func (s *diskMetaDB) setTTL(key Key, expiresAt time.Time) error { })) } -func (s *diskMetaDB) set(key Key, expiresAt time.Time, headers http.Header) error { +func (s *diskMetaDB) set(key Key, strategyName string, expiresAt time.Time, headers http.Header) error { ttlBytes, err := expiresAt.MarshalBinary() if err != nil { return errors.Errorf("failed to marshal TTL: %w", err) @@ -75,7 +79,12 @@ func (s *diskMetaDB) set(key Key, expiresAt time.Time, headers http.Header) erro } headersBucket := tx.Bucket(headersBucketName) - return errors.WithStack(headersBucket.Put(key[:], headersBytes)) + if err := headersBucket.Put(key[:], headersBytes); err != nil { + return errors.WithStack(err) + } + + strategyBucket := tx.Bucket(strategyBucketName) + return errors.WithStack(strategyBucket.Put(key[:], []byte(strategyName))) })) } @@ -113,7 +122,12 @@ func (s *diskMetaDB) delete(key Key) error { } headersBucket := tx.Bucket(headersBucketName) - return errors.WithStack(headersBucket.Delete(key[:])) + if err := headersBucket.Delete(key[:]); err != nil { + return errors.WithStack(err) + } + + strategyBucket := tx.Bucket(strategyBucketName) + return errors.WithStack(strategyBucket.Delete(key[:])) })) } @@ -124,6 +138,7 @@ func (s *diskMetaDB) deleteAll(keys []Key) error { return errors.WithStack(s.db.Update(func(tx *bbolt.Tx) error { ttlBucket := tx.Bucket(ttlBucketName) headersBucket := tx.Bucket(headersBucketName) + strategyBucket := tx.Bucket(strategyBucketName) for _, key := range keys { if err := ttlBucket.Delete(key[:]); err != nil { @@ -132,18 +147,22 @@ func (s *diskMetaDB) deleteAll(keys []Key) error { if err := headersBucket.Delete(key[:]); err != nil { return errors.Errorf("failed to delete headers: %w", err) } + if err := strategyBucket.Delete(key[:]); err != nil { + return errors.Errorf("failed to delete strategy: %w", err) + } } return nil })) } -func (s *diskMetaDB) walk(fn func(key Key, expiresAt time.Time) error) error { +func (s *diskMetaDB) walk(fn func(key Key, strategyName string, expiresAt time.Time) error) error { return errors.WithStack(s.db.View(func(tx *bbolt.Tx) error { - bucket := tx.Bucket(ttlBucketName) - if bucket == nil { + ttlBucket := tx.Bucket(ttlBucketName) + if ttlBucket == nil { return nil } - return bucket.ForEach(func(k, v []byte) error { + strategyBucket := tx.Bucket(strategyBucketName) + return ttlBucket.ForEach(func(k, v []byte) error { if len(k) != 32 { return nil } @@ -153,7 +172,13 @@ func (s *diskMetaDB) walk(fn func(key Key, expiresAt time.Time) error) error { if err := expiresAt.UnmarshalBinary(v); err != nil { return nil //nolint:nilerr } - return fn(key, expiresAt) + strategyName := "" + if strategyBucket != nil { + if strategyBytes := strategyBucket.Get(k); strategyBytes != nil { + strategyName = string(strategyBytes) + } + } + return fn(key, strategyName, expiresAt) }) })) } diff --git a/internal/cache/http.go b/internal/cache/http.go index ec2ba01..f21240d 100644 --- a/internal/cache/http.go +++ b/internal/cache/http.go @@ -18,7 +18,7 @@ func Fetch(client *http.Client, r *http.Request, c Cache) (*http.Response, error url := r.URL.String() key := NewKey(url) - cr, headers, err := c.Open(r.Context(), key) + cr, headers, err := c.Open(r.Context(), "", key) if err == nil { return &http.Response{ Status: "200 OK", @@ -53,7 +53,7 @@ func FetchDirect(client *http.Client, r *http.Request, c Cache, key Key) (*http. } responseHeaders := maps.Clone(resp.Header) - cw, err := c.Create(r.Context(), key, responseHeaders, 0) + cw, err := c.Create(r.Context(), "", key, responseHeaders, 0) if err != nil { _ = resp.Body.Close() return nil, httputil.Errorf(http.StatusInternalServerError, "failed to create cache entry: %w", err) diff --git a/internal/cache/memory.go b/internal/cache/memory.go index ced3d7a..dcf3ca8 100644 --- a/internal/cache/memory.go +++ b/internal/cache/memory.go @@ -53,7 +53,7 @@ func NewMemory(ctx context.Context, config MemoryConfig) (*Memory, error) { func (m *Memory) String() string { return fmt.Sprintf("memory:%dMB", m.config.LimitMB) } -func (m *Memory) Stat(_ context.Context, key Key) (http.Header, error) { +func (m *Memory) Stat(_ context.Context, _ string, key Key) (http.Header, error) { m.mu.RLock() defer m.mu.RUnlock() @@ -69,7 +69,7 @@ func (m *Memory) Stat(_ context.Context, key Key) (http.Header, error) { return entry.headers, nil } -func (m *Memory) Open(_ context.Context, key Key) (io.ReadCloser, http.Header, error) { +func (m *Memory) Open(_ context.Context, _ string, key Key) (io.ReadCloser, http.Header, error) { m.mu.RLock() defer m.mu.RUnlock() @@ -85,7 +85,7 @@ func (m *Memory) Open(_ context.Context, key Key) (io.ReadCloser, http.Header, e return io.NopCloser(bytes.NewReader(entry.data)), entry.headers, nil } -func (m *Memory) Create(ctx context.Context, key Key, headers http.Header, ttl time.Duration) (io.WriteCloser, error) { +func (m *Memory) Create(ctx context.Context, _ string, key Key, headers http.Header, ttl time.Duration) (io.WriteCloser, error) { if ttl == 0 { ttl = m.config.MaxTTL } @@ -110,7 +110,7 @@ func (m *Memory) Create(ctx context.Context, key Key, headers http.Header, ttl t return writer, nil } -func (m *Memory) Delete(_ context.Context, key Key) error { +func (m *Memory) Delete(_ context.Context, _ string, key Key) error { m.mu.Lock() defer m.mu.Unlock() diff --git a/internal/cache/noop.go b/internal/cache/noop.go index ad645ab..7d667af 100644 --- a/internal/cache/noop.go +++ b/internal/cache/noop.go @@ -22,20 +22,20 @@ func NoOpCache() Cache { func (n *noOpCache) String() string { return "noop" } -func (n *noOpCache) Stat(_ context.Context, _ Key) (http.Header, error) { +func (n *noOpCache) Stat(_ context.Context, _ string, _ Key) (http.Header, error) { return nil, os.ErrNotExist } -func (n *noOpCache) Open(_ context.Context, _ Key) (io.ReadCloser, http.Header, error) { +func (n *noOpCache) Open(_ context.Context, _ string, _ Key) (io.ReadCloser, http.Header, error) { return nil, nil, os.ErrNotExist } -func (n *noOpCache) Create(_ context.Context, _ Key, _ http.Header, _ time.Duration) (io.WriteCloser, error) { +func (n *noOpCache) Create(_ context.Context, _ string, _ Key, _ http.Header, _ time.Duration) (io.WriteCloser, error) { // Return a discard writer that does nothing return &noOpWriter{}, nil } -func (n *noOpCache) Delete(_ context.Context, _ Key) error { +func (n *noOpCache) Delete(_ context.Context, _ string, _ Key) error { return nil } diff --git a/internal/cache/remote.go b/internal/cache/remote.go index 05dac25..f3d2a78 100644 --- a/internal/cache/remote.go +++ b/internal/cache/remote.go @@ -36,7 +36,7 @@ func NewRemote(baseURL string) *Remote { func (c *Remote) String() string { return "remote:" + c.baseURL } // Open retrieves an object from the remote. -func (c *Remote) Open(ctx context.Context, key Key) (io.ReadCloser, http.Header, error) { +func (c *Remote) Open(ctx context.Context, _ string, key Key) (io.ReadCloser, http.Header, error) { url := fmt.Sprintf("%s/object/%s", c.baseURL, key.String()) req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) if err != nil { @@ -65,7 +65,7 @@ func (c *Remote) Open(ctx context.Context, key Key) (io.ReadCloser, http.Header, } // Stat retrieves headers for an object from the remote. -func (c *Remote) Stat(ctx context.Context, key Key) (http.Header, error) { +func (c *Remote) Stat(ctx context.Context, _ string, key Key) (http.Header, error) { url := fmt.Sprintf("%s/object/%s", c.baseURL, key.String()) req, err := http.NewRequestWithContext(ctx, http.MethodHead, url, nil) if err != nil { @@ -93,7 +93,7 @@ func (c *Remote) Stat(ctx context.Context, key Key) (http.Header, error) { } // Create stores a new object in the remote. -func (c *Remote) Create(ctx context.Context, key Key, headers http.Header, ttl time.Duration) (io.WriteCloser, error) { +func (c *Remote) Create(ctx context.Context, _ string, key Key, headers http.Header, ttl time.Duration) (io.WriteCloser, error) { pr, pw := io.Pipe() url := fmt.Sprintf("%s/object/%s", c.baseURL, key.String()) @@ -135,7 +135,7 @@ func (c *Remote) Create(ctx context.Context, key Key, headers http.Header, ttl t } // Delete removes an object from the remote. -func (c *Remote) Delete(ctx context.Context, key Key) error { +func (c *Remote) Delete(ctx context.Context, _ string, key Key) error { url := fmt.Sprintf("%s/object/%s", c.baseURL, key.String()) req, err := http.NewRequestWithContext(ctx, http.MethodDelete, url, nil) if err != nil { diff --git a/internal/cache/s3.go b/internal/cache/s3.go index cbb6734..ad3a9ce 100644 --- a/internal/cache/s3.go +++ b/internal/cache/s3.go @@ -156,14 +156,21 @@ func (s *S3) Close() error { return nil } -func (s *S3) keyToPath(key Key) string { +func (s *S3) keyToPath(strategyName string, key Key) string { hexKey := key.String() + prefix := "" + + // Add strategy name as prefix if available + if strategyName != "" { + prefix = strategyName + "/" + } + // Use first two hex digits as directory, full hex as filename - return hexKey[:2] + "/" + hexKey + return prefix + hexKey[:2] + "/" + hexKey } -func (s *S3) Stat(ctx context.Context, key Key) (http.Header, error) { - objectName := s.keyToPath(key) +func (s *S3) Stat(ctx context.Context, strategyName string, key Key) (http.Header, error) { + objectName := s.keyToPath(strategyName, key) // Get object info to check metadata objInfo, err := s.client.StatObject(ctx, s.config.Bucket, objectName, minio.StatObjectOptions{}) @@ -183,7 +190,7 @@ func (s *S3) Stat(ctx context.Context, key Key) (http.Header, error) { if err := expiresAt.UnmarshalText([]byte(expiresAtStr)); err == nil { if time.Now().After(expiresAt) { // Object expired, delete it and return not found - return nil, errors.Join(os.ErrNotExist, s.Delete(ctx, key)) + return nil, errors.Join(os.ErrNotExist, s.Delete(ctx, strategyName, key)) } } } @@ -205,8 +212,8 @@ func (s *S3) Stat(ctx context.Context, key Key) (http.Header, error) { return headers, nil } -func (s *S3) Open(ctx context.Context, key Key) (io.ReadCloser, http.Header, error) { - objectName := s.keyToPath(key) +func (s *S3) Open(ctx context.Context, strategyName string, key Key) (io.ReadCloser, http.Header, error) { + objectName := s.keyToPath(strategyName, key) // Get object info to retrieve metadata and check expiration objInfo, err := s.client.StatObject(ctx, s.config.Bucket, objectName, minio.StatObjectOptions{}) @@ -224,7 +231,7 @@ func (s *S3) Open(ctx context.Context, key Key) (io.ReadCloser, http.Header, err var expiresAt time.Time if err := expiresAt.UnmarshalText([]byte(expiresAtStr)); err == nil { if time.Now().After(expiresAt) { - return nil, nil, errors.Join(os.ErrNotExist, s.Delete(ctx, key)) + return nil, nil, errors.Join(os.ErrNotExist, s.Delete(ctx, strategyName, key)) } } } @@ -275,7 +282,7 @@ func (r *s3Reader) Close() error { return errors.WithStack(r.obj.Close()) } -func (s *S3) Create(ctx context.Context, key Key, headers http.Header, ttl time.Duration) (io.WriteCloser, error) { +func (s *S3) Create(ctx context.Context, strategyName string, key Key, headers http.Header, ttl time.Duration) (io.WriteCloser, error) { if ttl > s.config.MaxTTL || ttl == 0 { ttl = s.config.MaxTTL } @@ -289,13 +296,14 @@ func (s *S3) Create(ctx context.Context, key Key, headers http.Header, ttl time. pr, pw := io.Pipe() writer := &s3Writer{ - s3: s, - key: key, - pipe: pw, - expiresAt: expiresAt, - headers: clonedHeaders, - ctx: ctx, - errCh: make(chan error, 1), + s3: s, + key: key, + strategyName: strategyName, + pipe: pw, + expiresAt: expiresAt, + headers: clonedHeaders, + ctx: ctx, + errCh: make(chan error, 1), } // Start upload in background goroutine @@ -304,8 +312,8 @@ func (s *S3) Create(ctx context.Context, key Key, headers http.Header, ttl time. return writer, nil } -func (s *S3) Delete(ctx context.Context, key Key) error { - objectName := s.keyToPath(key) +func (s *S3) Delete(ctx context.Context, strategyName string, key Key) error { + objectName := s.keyToPath(strategyName, key) err := s.client.RemoveObject(ctx, s.config.Bucket, objectName, minio.RemoveObjectOptions{}) if err != nil { @@ -322,14 +330,15 @@ func (s *S3) Stats(_ context.Context) (Stats, error) { } type s3Writer struct { - s3 *S3 - key Key - pipe *io.PipeWriter - expiresAt time.Time - headers http.Header - ctx context.Context - errCh chan error - uploadErr error + s3 *S3 + key Key + strategyName string + pipe *io.PipeWriter + expiresAt time.Time + headers http.Header + ctx context.Context + errCh chan error + uploadErr error } func (w *s3Writer) Write(p []byte) (int, error) { @@ -376,7 +385,7 @@ func (w *s3Writer) upload(pr *io.PipeReader) { _ = pr.CloseWithError(uploadErr) }() - objectName := w.s3.keyToPath(w.key) + objectName := w.s3.keyToPath(w.strategyName, w.key) // Prepare user metadata userMetadata := make(map[string]string) diff --git a/internal/cache/tiered.go b/internal/cache/tiered.go index 6466a6a..60e5149 100644 --- a/internal/cache/tiered.go +++ b/internal/cache/tiered.go @@ -50,7 +50,7 @@ func (t Tiered) Close() error { } // Create a new object. All underlying caches will be written to in sequence. -func (t Tiered) Create(ctx context.Context, key Key, headers http.Header, ttl time.Duration) (io.WriteCloser, error) { +func (t Tiered) Create(ctx context.Context, strategyName string, key Key, headers http.Header, ttl time.Duration) (io.WriteCloser, error) { // The first error will cancel all outstanding writes. ctx, cancel := context.WithCancelCause(ctx) @@ -59,7 +59,7 @@ func (t Tiered) Create(ctx context.Context, key Key, headers http.Header, ttl ti wg := sync.WaitGroup{} for i, cache := range t.caches { wg.Go(func() { - w, err := cache.Create(ctx, key, headers, ttl) + w, err := cache.Create(ctx, strategyName, key, headers, ttl) if err != nil { cancel(err) } @@ -78,11 +78,11 @@ func (t Tiered) Create(ctx context.Context, key Key, headers http.Header, ttl ti } // Delete from all underlying caches. All errors are returned. -func (t Tiered) Delete(ctx context.Context, key Key) error { +func (t Tiered) Delete(ctx context.Context, strategyName string, key Key) error { wg := sync.WaitGroup{} errs := make([]error, len(t.caches)) for i, cache := range t.caches { - wg.Go(func() { errs[i] = errors.WithStack(cache.Delete(ctx, key)) }) + wg.Go(func() { errs[i] = errors.WithStack(cache.Delete(ctx, strategyName, key)) }) } wg.Wait() return errors.Join(errs...) @@ -91,10 +91,10 @@ func (t Tiered) Delete(ctx context.Context, key Key) error { // Stat returns headers from the first cache that succeeds. // // If all caches fail, all errors are returned. -func (t Tiered) Stat(ctx context.Context, key Key) (http.Header, error) { +func (t Tiered) Stat(ctx context.Context, strategyName string, key Key) (http.Header, error) { errs := make([]error, len(t.caches)) for i, c := range t.caches { - headers, err := c.Stat(ctx, key) + headers, err := c.Stat(ctx, strategyName, key) errs[i] = err if errors.Is(err, os.ErrNotExist) { continue @@ -109,10 +109,10 @@ func (t Tiered) Stat(ctx context.Context, key Key) (http.Header, error) { // Open returns a reader from the first cache that succeeds. // // If all caches fail, all errors are returned. -func (t Tiered) Open(ctx context.Context, key Key) (io.ReadCloser, http.Header, error) { +func (t Tiered) Open(ctx context.Context, strategyName string, key Key) (io.ReadCloser, http.Header, error) { errs := make([]error, len(t.caches)) for i, c := range t.caches { - r, headers, err := c.Open(ctx, key) + r, headers, err := c.Open(ctx, strategyName, key) errs[i] = err if errors.Is(err, os.ErrNotExist) { continue diff --git a/internal/snapshot/snapshot.go b/internal/snapshot/snapshot.go index 2e8f5bb..e3ce0fe 100644 --- a/internal/snapshot/snapshot.go +++ b/internal/snapshot/snapshot.go @@ -21,7 +21,7 @@ import ( // The archive preserves all file permissions, ownership, and symlinks. // The operation is fully streaming - no temporary files are created. // Exclude patterns use tar's --exclude syntax. -func Create(ctx context.Context, remote cache.Cache, key cache.Key, directory string, ttl time.Duration, excludePatterns []string) error { +func Create(ctx context.Context, remote cache.Cache, strategyName string, key cache.Key, directory string, ttl time.Duration, excludePatterns []string) error { // Verify directory exists if info, err := os.Stat(directory); err != nil { return errors.Wrap(err, "failed to stat directory") @@ -33,7 +33,7 @@ func Create(ctx context.Context, remote cache.Cache, key cache.Key, directory st headers.Set("Content-Type", "application/zstd") headers.Set("Content-Disposition", fmt.Sprintf("attachment; filename=%q", filepath.Base(directory)+".tar.zst")) - wc, err := remote.Create(ctx, key, headers, ttl) + wc, err := remote.Create(ctx, strategyName, key, headers, ttl) if err != nil { return errors.Wrap(err, "failed to create object") } @@ -90,8 +90,8 @@ func Create(ctx context.Context, remote cache.Cache, key cache.Key, directory st // The archive is decompressed with zstd and extracted with tar, preserving // all file permissions, ownership, and symlinks. // The operation is fully streaming - no temporary files are created. -func Restore(ctx context.Context, remote cache.Cache, key cache.Key, directory string) error { - rc, _, err := remote.Open(ctx, key) +func Restore(ctx context.Context, remote cache.Cache, strategyName string, key cache.Key, directory string) error { + rc, _, err := remote.Open(ctx, strategyName, key) if err != nil { return errors.Wrap(err, "failed to open object") } diff --git a/internal/snapshot/snapshot_test.go b/internal/snapshot/snapshot_test.go index b691947..efe7dea 100644 --- a/internal/snapshot/snapshot_test.go +++ b/internal/snapshot/snapshot_test.go @@ -30,15 +30,15 @@ func TestCreateAndRestoreRoundTrip(t *testing.T) { assert.NoError(t, os.Mkdir(filepath.Join(srcDir, "subdir"), 0o755)) assert.NoError(t, os.WriteFile(filepath.Join(srcDir, "subdir", "file3.txt"), []byte("content3"), 0o644)) - err = snapshot.Create(ctx, mem, key, srcDir, time.Hour, nil) + err = snapshot.Create(ctx, mem, "", key, srcDir, time.Hour, nil) assert.NoError(t, err) - headers, err := mem.Stat(ctx, key) + headers, err := mem.Stat(ctx, "", key) assert.NoError(t, err) assert.Equal(t, "application/zstd", headers.Get("Content-Type")) dstDir := t.TempDir() - err = snapshot.Restore(ctx, mem, key, dstDir) + err = snapshot.Restore(ctx, mem, "", key, dstDir) assert.NoError(t, err) content1, err := os.ReadFile(filepath.Join(dstDir, "file1.txt")) @@ -71,11 +71,11 @@ func TestCreateWithExcludePatterns(t *testing.T) { assert.NoError(t, os.Mkdir(filepath.Join(srcDir, "logs"), 0o755)) assert.NoError(t, os.WriteFile(filepath.Join(srcDir, "logs", "app.log"), []byte("excluded"), 0o644)) - err = snapshot.Create(ctx, mem, key, srcDir, time.Hour, []string{"*.log", "logs"}) + err = snapshot.Create(ctx, mem, "", key, srcDir, time.Hour, []string{"*.log", "logs"}) assert.NoError(t, err) dstDir := t.TempDir() - err = snapshot.Restore(ctx, mem, key, dstDir) + err = snapshot.Restore(ctx, mem, "", key, dstDir) assert.NoError(t, err) _, err = os.Stat(filepath.Join(dstDir, "include.txt")) @@ -99,11 +99,11 @@ func TestCreatePreservesSymlinks(t *testing.T) { assert.NoError(t, os.WriteFile(filepath.Join(srcDir, "target.txt"), []byte("target"), 0o644)) assert.NoError(t, os.Symlink("target.txt", filepath.Join(srcDir, "link.txt"))) - err = snapshot.Create(ctx, mem, key, srcDir, time.Hour, nil) + err = snapshot.Create(ctx, mem, "", key, srcDir, time.Hour, nil) assert.NoError(t, err) dstDir := t.TempDir() - err = snapshot.Restore(ctx, mem, key, dstDir) + err = snapshot.Restore(ctx, mem, "", key, dstDir) assert.NoError(t, err) info, err := os.Lstat(filepath.Join(dstDir, "link.txt")) @@ -122,7 +122,7 @@ func TestCreateNonexistentDirectory(t *testing.T) { defer mem.Close() key := cache.Key{1, 2, 3} - err = snapshot.Create(ctx, mem, key, "/nonexistent/directory", time.Hour, nil) + err = snapshot.Create(ctx, mem, "", key, "/nonexistent/directory", time.Hour, nil) assert.Error(t, err) } @@ -136,7 +136,7 @@ func TestCreateNotADirectory(t *testing.T) { tmpFile := filepath.Join(t.TempDir(), "file.txt") assert.NoError(t, os.WriteFile(tmpFile, []byte("content"), 0o644)) - err = snapshot.Create(ctx, mem, key, tmpFile, time.Hour, nil) + err = snapshot.Create(ctx, mem, "", key, tmpFile, time.Hour, nil) assert.Error(t, err) assert.Contains(t, err.Error(), "not a directory") } @@ -158,7 +158,7 @@ func TestCreateContextCancellation(t *testing.T) { cancelCtx, cancel := context.WithCancel(context.Background()) cancel() - err = snapshot.Create(cancelCtx, mem, key, srcDir, time.Hour, nil) + err = snapshot.Create(cancelCtx, mem, "", key, srcDir, time.Hour, nil) assert.Error(t, err) } @@ -170,7 +170,7 @@ func TestRestoreNonexistentKey(t *testing.T) { key := cache.Key{1, 2, 3} dstDir := t.TempDir() - err = snapshot.Restore(ctx, mem, key, dstDir) + err = snapshot.Restore(ctx, mem, "", key, dstDir) assert.Error(t, err) } @@ -184,11 +184,11 @@ func TestRestoreCreatesTargetDirectory(t *testing.T) { srcDir := t.TempDir() assert.NoError(t, os.WriteFile(filepath.Join(srcDir, "file.txt"), []byte("content"), 0o644)) - err = snapshot.Create(ctx, mem, key, srcDir, time.Hour, nil) + err = snapshot.Create(ctx, mem, "", key, srcDir, time.Hour, nil) assert.NoError(t, err) dstDir := filepath.Join(t.TempDir(), "nested", "target") - err = snapshot.Restore(ctx, mem, key, dstDir) + err = snapshot.Restore(ctx, mem, "", key, dstDir) assert.NoError(t, err) content, err := os.ReadFile(filepath.Join(dstDir, "file.txt")) @@ -210,14 +210,14 @@ func TestRestoreContextCancellation(t *testing.T) { assert.NoError(t, os.WriteFile(filename, content, 0o644)) } - err = snapshot.Create(ctx, mem, key, srcDir, time.Hour, nil) + err = snapshot.Create(ctx, mem, "", key, srcDir, time.Hour, nil) assert.NoError(t, err) cancelCtx, cancel := context.WithCancel(context.Background()) cancel() dstDir := t.TempDir() - err = snapshot.Restore(cancelCtx, mem, key, dstDir) + err = snapshot.Restore(cancelCtx, mem, "", key, dstDir) assert.Error(t, err) } @@ -230,11 +230,11 @@ func TestCreateEmptyDirectory(t *testing.T) { srcDir := t.TempDir() - err = snapshot.Create(ctx, mem, key, srcDir, time.Hour, nil) + err = snapshot.Create(ctx, mem, "", key, srcDir, time.Hour, nil) assert.NoError(t, err) dstDir := t.TempDir() - err = snapshot.Restore(ctx, mem, key, dstDir) + err = snapshot.Restore(ctx, mem, "", key, dstDir) assert.NoError(t, err) entries, err := os.ReadDir(dstDir) @@ -254,11 +254,11 @@ func TestCreateWithNestedDirectories(t *testing.T) { assert.NoError(t, os.MkdirAll(deepPath, 0o755)) assert.NoError(t, os.WriteFile(filepath.Join(deepPath, "deep.txt"), []byte("deep content"), 0o644)) - err = snapshot.Create(ctx, mem, key, srcDir, time.Hour, nil) + err = snapshot.Create(ctx, mem, "", key, srcDir, time.Hour, nil) assert.NoError(t, err) dstDir := t.TempDir() - err = snapshot.Restore(ctx, mem, key, dstDir) + err = snapshot.Restore(ctx, mem, "", key, dstDir) assert.NoError(t, err) content, err := os.ReadFile(filepath.Join(dstDir, "a", "b", "c", "d", "e", "deep.txt")) @@ -276,10 +276,10 @@ func TestCreateSetsCorrectHeaders(t *testing.T) { srcDir := t.TempDir() assert.NoError(t, os.WriteFile(filepath.Join(srcDir, "file.txt"), []byte("content"), 0o644)) - err = snapshot.Create(ctx, mem, key, srcDir, time.Hour, nil) + err = snapshot.Create(ctx, mem, "", key, srcDir, time.Hour, nil) assert.NoError(t, err) - headers, err := mem.Stat(ctx, key) + headers, err := mem.Stat(ctx, "", key) assert.NoError(t, err) assert.Equal(t, "application/zstd", headers.Get("Content-Type")) assert.Contains(t, headers.Get("Content-Disposition"), "attachment") diff --git a/internal/strategy/apiv1.go b/internal/strategy/apiv1.go index d92b0b9..44201b0 100644 --- a/internal/strategy/apiv1.go +++ b/internal/strategy/apiv1.go @@ -15,6 +15,9 @@ import ( "github.com/block/cachew/internal/logging" ) +// APIV1Name is the strategy name used for cache prefixing. +const APIV1Name = "apiv1" + func RegisterAPIV1(r *Registry) { Register(r, "apiv1", "The stable API of the cache server.", NewAPIV1) } @@ -49,7 +52,7 @@ func (d *APIV1) statObject(w http.ResponseWriter, r *http.Request) { return } - headers, err := d.cache.Stat(r.Context(), key) + headers, err := d.cache.Stat(r.Context(), APIV1Name, key) if err != nil { if errors.Is(err, os.ErrNotExist) { http.Error(w, "Cache object not found", http.StatusNotFound) @@ -70,7 +73,7 @@ func (d *APIV1) getObject(w http.ResponseWriter, r *http.Request) { return } - cr, headers, err := d.cache.Open(r.Context(), key) + cr, headers, err := d.cache.Open(r.Context(), APIV1Name, key) if err != nil { if errors.Is(err, os.ErrNotExist) { http.Error(w, "Cache object not found", http.StatusNotFound) @@ -111,7 +114,7 @@ func (d *APIV1) putObject(w http.ResponseWriter, r *http.Request) { // Extract and filter headers from request headers := cache.FilterTransportHeaders(r.Header) - cw, err := d.cache.Create(r.Context(), key, headers, ttl) + cw, err := d.cache.Create(r.Context(), APIV1Name, key, headers, ttl) if err != nil { d.httpError(w, http.StatusInternalServerError, err, "Failed to create cache writer", slog.String("key", key.String())) return @@ -135,7 +138,7 @@ func (d *APIV1) deleteObject(w http.ResponseWriter, r *http.Request) { return } - err = d.cache.Delete(r.Context(), key) + err = d.cache.Delete(r.Context(), APIV1Name, key) if err != nil { if errors.Is(err, os.ErrNotExist) { http.Error(w, "Cache object not found", http.StatusNotFound) diff --git a/internal/strategy/artifactory.go b/internal/strategy/artifactory.go index 08c105c..d801131 100644 --- a/internal/strategy/artifactory.go +++ b/internal/strategy/artifactory.go @@ -14,8 +14,10 @@ import ( "github.com/block/cachew/internal/strategy/handler" ) +const artifactoryStrategyName = "artifactory" + func RegisterArtifactory(r *Registry) { - Register(r, "artifactory", "Caches artifacts from an Artifactory server.", NewArtifactory) + Register(r, artifactoryStrategyName, "Caches artifacts from an Artifactory server.", NewArtifactory) } // ArtifactoryConfig represents the configuration for the Artifactory strategy. @@ -66,6 +68,7 @@ func NewArtifactory(ctx context.Context, config ArtifactoryConfig, cache cache.C } hdlr := handler.New(a.client, cache). + StrategyName(artifactoryStrategyName). CacheKey(func(r *http.Request) string { return a.buildTargetURL(r).String() }). diff --git a/internal/strategy/git/git.go b/internal/strategy/git/git.go index 7bd38f5..7e539b2 100644 --- a/internal/strategy/git/git.go +++ b/internal/strategy/git/git.go @@ -26,6 +26,8 @@ import ( "github.com/block/cachew/internal/strategy" ) +const StrategyName = "git" + func Register(r *strategy.Registry, scheduler jobscheduler.Scheduler, cloneManagerProvider gitclone.ManagerProvider, tokenManagerProvider githubapp.TokenManagerProvider) { strategy.Register(r, "git", "Caches Git repositories, including tarball snapshots.", func(ctx context.Context, config Config, cache cache.Cache, mux strategy.Mux) (*Strategy, error) { return New(ctx, config, scheduler, cache, mux, cloneManagerProvider, tokenManagerProvider) @@ -366,7 +368,7 @@ func (s *Strategy) serveCachedArtifact(w http.ResponseWriter, r *http.Request, h upstreamURL := "https://" + host + "/" + repoPath cacheKey := cache.NewKey(upstreamURL + "." + artifact) - reader, headers, err := s.cache.Open(ctx, cacheKey) + reader, headers, err := s.cache.Open(ctx, StrategyName, cacheKey) if err != nil { if errors.Is(err, os.ErrNotExist) { logger.DebugContext(ctx, artifact+" not found in cache", diff --git a/internal/strategy/git/snapshot.go b/internal/strategy/git/snapshot.go index a027d2c..6217800 100644 --- a/internal/strategy/git/snapshot.go +++ b/internal/strategy/git/snapshot.go @@ -57,13 +57,12 @@ func (s *Strategy) generateAndUploadSnapshot(ctx context.Context, repo *gitclone ttl := 7 * 24 * time.Hour excludePatterns := []string{"*.lock"} - err = snapshot.Create(ctx, s.cache, cacheKey, snapshotDir, ttl, excludePatterns) + err = snapshot.Create(ctx, s.cache, StrategyName, cacheKey, snapshotDir, ttl, excludePatterns) // Always clean up the snapshot working directory. if rmErr := os.RemoveAll(snapshotDir); rmErr != nil { logger.WarnContext(ctx, "Failed to clean up snapshot dir", slog.String("error", rmErr.Error())) } - if err != nil { logger.ErrorContext(ctx, "Snapshot generation failed", slog.String("upstream", upstream), slog.String("error", err.Error())) return errors.Wrap(err, "create snapshot") diff --git a/internal/strategy/git/snapshot_test.go b/internal/strategy/git/snapshot_test.go index d59566e..1982726 100644 --- a/internal/strategy/git/snapshot_test.go +++ b/internal/strategy/git/snapshot_test.go @@ -43,7 +43,7 @@ func TestSnapshotHTTPEndpoint(t *testing.T) { headers := make(map[string][]string) headers["Content-Type"] = []string{"application/zstd"} - writer, err := memCache.Create(ctx, cacheKey, headers, 24*time.Hour) + writer, err := memCache.Create(ctx, "", cacheKey, headers, 24*time.Hour) assert.NoError(t, err) _, err = writer.Write(snapshotData) assert.NoError(t, err) diff --git a/internal/strategy/github_releases.go b/internal/strategy/github_releases.go index 5add55b..fe1dbdd 100644 --- a/internal/strategy/github_releases.go +++ b/internal/strategy/github_releases.go @@ -17,8 +17,10 @@ import ( "github.com/block/cachew/internal/strategy/handler" ) +const githubReleasesStrategyName = "github-releases" + func RegisterGitHubReleases(r *Registry, tokenManagerProvider githubapp.TokenManagerProvider) { - Register(r, "github-releases", "Caches public and authenticated GitHub releases.", func(ctx context.Context, config GitHubReleasesConfig, cache cache.Cache, mux Mux) (*GitHubReleases, error) { + Register(r, githubReleasesStrategyName, "Caches public and authenticated GitHub releases.", func(ctx context.Context, config GitHubReleasesConfig, cache cache.Cache, mux Mux) (*GitHubReleases, error) { return NewGitHubReleases(ctx, config, cache, mux, tokenManagerProvider) }) } @@ -59,6 +61,7 @@ func NewGitHubReleases(ctx context.Context, config GitHubReleasesConfig, cache c } // eg. https://github.com/alecthomas/chroma/releases/download/v2.21.1/chroma-2.21.1-darwin-amd64.tar.gz h := handler.New(s.client, cache). + StrategyName(githubReleasesStrategyName). CacheKey(func(r *http.Request) string { org := r.PathValue("org") repo := r.PathValue("repo") @@ -79,7 +82,7 @@ func NewGitHubReleases(ctx context.Context, config GitHubReleasesConfig, cache c var _ Strategy = (*GitHubReleases)(nil) -func (g *GitHubReleases) String() string { return "github-releases" } +func (g *GitHubReleases) String() string { return githubReleasesStrategyName } // newGitHubRequest creates a new HTTP request with GitHub API headers and authentication. func (g *GitHubReleases) newGitHubRequest(ctx context.Context, url, accept, org string) (*http.Request, error) { diff --git a/internal/strategy/github_releases_test.go b/internal/strategy/github_releases_test.go index ef8e795..25e3ada 100644 --- a/internal/strategy/github_releases_test.go +++ b/internal/strategy/github_releases_test.go @@ -231,7 +231,7 @@ func TestGitHubReleasesPublicRepoNotFound(t *testing.T) { assert.NoError(t, err) defer memCache.Close() - _, _, err = memCache.Open(context.Background(), key) + _, _, err = memCache.Open(context.Background(), "", key) assert.Error(t, err, "non-OK responses should not be cached") } diff --git a/internal/strategy/gomod/cacher.go b/internal/strategy/gomod/cacher.go index 16e91d7..7cd53a2 100644 --- a/internal/strategy/gomod/cacher.go +++ b/internal/strategy/gomod/cacher.go @@ -10,6 +10,8 @@ import ( "github.com/block/cachew/internal/cache" ) +const StrategyName = "gomod" + type goproxyCacher struct { cache cache.Cache } @@ -17,7 +19,7 @@ type goproxyCacher struct { func (g *goproxyCacher) Get(ctx context.Context, name string) (io.ReadCloser, error) { key := cache.NewKey(name) - rc, _, err := g.cache.Open(ctx, key) + rc, _, err := g.cache.Open(ctx, StrategyName, key) if err != nil { return nil, fs.ErrNotExist } @@ -32,7 +34,7 @@ func (g *goproxyCacher) Put(ctx context.Context, name string, content io.ReadSee key := cache.NewKey(name) - wc, err := g.cache.Create(ctx, key, nil, 0) + wc, err := g.cache.Create(ctx, StrategyName, key, nil, 0) if err != nil { return fmt.Errorf("create cache entry: %w", err) } diff --git a/internal/strategy/handler/handler.go b/internal/strategy/handler/handler.go index 4056329..06d5475 100644 --- a/internal/strategy/handler/handler.go +++ b/internal/strategy/handler/handler.go @@ -20,6 +20,7 @@ import ( // Example usage: // // h := handler.New(client, cache). +// StrategyName("my-strategy"). // CacheKey(func(r *http.Request) string { // return "custom-key" // }). @@ -30,6 +31,7 @@ import ( type Handler struct { client *http.Client cache cache.Cache + strategyName string cacheKeyFunc func(*http.Request) string transformFunc func(*http.Request) (*http.Request, error) errorHandler func(error, http.ResponseWriter, *http.Request) @@ -58,6 +60,11 @@ func New(client *http.Client, c cache.Cache) *Handler { } } +func (h *Handler) StrategyName(name string) *Handler { + h.strategyName = name + return h +} + // CacheKey sets the function used to determine the cache key for a request. // The function receives the original incoming request. func (h *Handler) CacheKey(f func(*http.Request) string) *Handler { @@ -97,12 +104,14 @@ func (h *Handler) TTL(f func(*http.Request) time.Duration) *Handler { // 4. If not cached, transform the request and fetch from upstream // 5. Cache the response while streaming to the client. func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - logger := logging.FromContext(r.Context()) + ctx := r.Context() + + logger := logging.FromContext(ctx) cacheKeyStr := h.cacheKeyFunc(r) key := cache.NewKey(cacheKeyStr) - logger.DebugContext(r.Context(), "Processing request", slog.String("cache_key", cacheKeyStr)) + logger.DebugContext(ctx, "Processing request", slog.String("cache_key", cacheKeyStr)) if h.serveCached(w, r, key, logger) { return @@ -112,7 +121,7 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } func (h *Handler) serveCached(w http.ResponseWriter, r *http.Request, key cache.Key, logger *slog.Logger) bool { - cr, headers, err := h.cache.Open(r.Context(), key) + cr, headers, err := h.cache.Open(r.Context(), h.strategyName, key) if err != nil { if !errors.Is(err, os.ErrNotExist) { h.errorHandler(httputil.Errorf(http.StatusInternalServerError, "failed to open cache: %w", err), w, r) @@ -169,7 +178,7 @@ func (h *Handler) streamNonOKResponse(w http.ResponseWriter, resp *http.Response func (h *Handler) streamAndCache(w http.ResponseWriter, r *http.Request, key cache.Key, resp *http.Response, logger *slog.Logger) { ttl := h.ttlFunc(r) responseHeaders := maps.Clone(resp.Header) - cw, err := h.cache.Create(r.Context(), key, responseHeaders, ttl) + cw, err := h.cache.Create(r.Context(), h.strategyName, key, responseHeaders, ttl) if err != nil { h.errorHandler(httputil.Errorf(http.StatusInternalServerError, "failed to create cache entry: %w", err), w, r) return diff --git a/internal/strategy/hermit.go b/internal/strategy/hermit.go index 52f1fbc..9fedbb7 100644 --- a/internal/strategy/hermit.go +++ b/internal/strategy/hermit.go @@ -15,8 +15,10 @@ import ( "github.com/block/cachew/internal/strategy/handler" ) +const hermitStrategyName = "hermit" + func RegisterHermit(r *Registry) { - Register(r, "hermit", "Caches Hermit package downloads.", func(ctx context.Context, config HermitConfig, c cache.Cache, mux Mux) (*Hermit, error) { + Register(r, hermitStrategyName, "Caches Hermit package downloads.", func(ctx context.Context, config HermitConfig, c cache.Cache, mux Mux) (*Hermit, error) { return NewHermit(ctx, config, nil, c, mux) }) } @@ -70,10 +72,11 @@ func NewHermit(ctx context.Context, config HermitConfig, _ jobscheduler.Schedule return s, nil } -func (s *Hermit) String() string { return "hermit" } +func (s *Hermit) String() string { return hermitStrategyName } func (s *Hermit) createDirectHandler(c cache.Cache) http.Handler { return handler.New(s.client, c). + StrategyName(hermitStrategyName). CacheKey(func(r *http.Request) string { return s.buildOriginalURL(r) }). @@ -91,6 +94,7 @@ func (s *Hermit) createRedirectHandler(isInternalRedirect bool, c cache.Cache) h } return handler.New(s.client, cacheBackend). + StrategyName(hermitStrategyName). CacheKey(func(r *http.Request) string { return s.buildGitHubURL(r) }). diff --git a/internal/strategy/hermit_test.go b/internal/strategy/hermit_test.go index e815f28..1c5b51b 100644 --- a/internal/strategy/hermit_test.go +++ b/internal/strategy/hermit_test.go @@ -130,7 +130,7 @@ func TestHermitNonOKStatus(t *testing.T) { assert.Equal(t, "not found", w.Body.String()) key := cache.NewKey("https://example.com/missing.tar.gz") - _, _, err := memCache.Open(context.Background(), key) + _, _, err := memCache.Open(context.Background(), "", key) assert.Error(t, err, "non-OK responses should not be cached") } diff --git a/internal/strategy/host.go b/internal/strategy/host.go index afa4f8a..b06e360 100644 --- a/internal/strategy/host.go +++ b/internal/strategy/host.go @@ -12,8 +12,10 @@ import ( "github.com/block/cachew/internal/strategy/handler" ) +const hostStrategyName = "host" + func RegisterHost(r *Registry) { - Register(r, "host", "A generic host-based proxying strategy.", NewHost) + Register(r, hostStrategyName, "A generic host-based proxying strategy.", NewHost) } // HostConfig represents the configuration for the Host strategy. @@ -55,6 +57,7 @@ func NewHost(ctx context.Context, config HostConfig, cache cache.Cache, mux Mux) } hdlr := handler.New(h.client, cache). + StrategyName(hostStrategyName). CacheKey(func(r *http.Request) string { return h.buildTargetURL(r).String() }). diff --git a/internal/strategy/host_test.go b/internal/strategy/host_test.go index 1f8442b..9db0ead 100644 --- a/internal/strategy/host_test.go +++ b/internal/strategy/host_test.go @@ -83,7 +83,7 @@ func TestHostNonOKStatus(t *testing.T) { assert.Equal(t, "not found", w.Body.String()) key := cache.NewKey(backend.URL + "/missing") - _, _, err = memCache.Open(context.Background(), key) + _, _, err = memCache.Open(context.Background(), "", key) assert.Error(t, err, "non-OK responses should not be cached") } From edff0ed8adaec2ab570aa366a3ea47b5f8b6923e Mon Sep 17 00:00:00 2001 From: Neha Sherpa Date: Thu, 19 Feb 2026 16:37:51 -0800 Subject: [PATCH 2/5] fix: Update the api with namespace --- cmd/cachew/main.go | 51 ++++++--- internal/cache/api.go | 13 ++- internal/cache/cachetest/soak.go | 6 +- internal/cache/cachetest/suite.go | 48 ++++----- internal/cache/disk.go | 87 ++++++++------- internal/cache/disk_eviction_test.go | 125 ++++++++++++++++++++++ internal/cache/disk_metadb.go | 63 +++++++---- internal/cache/http.go | 4 +- internal/cache/memory.go | 94 ++++++++++++---- internal/cache/noop.go | 18 +++- internal/cache/remote.go | 21 +++- internal/cache/s3.go | 80 ++++++++------ internal/cache/tiered.go | 46 ++++++-- internal/snapshot/snapshot.go | 8 +- internal/snapshot/snapshot_test.go | 42 ++++---- internal/strategy/api.go | 4 +- internal/strategy/apiv1.go | 11 +- internal/strategy/artifactory.go | 5 +- internal/strategy/git/git.go | 4 +- internal/strategy/git/snapshot.go | 2 +- internal/strategy/git/snapshot_test.go | 2 +- internal/strategy/github_releases.go | 7 +- internal/strategy/github_releases_test.go | 2 +- internal/strategy/gomod/cacher.go | 6 +- internal/strategy/handler/handler.go | 11 +- internal/strategy/hermit.go | 8 +- internal/strategy/hermit_test.go | 2 +- internal/strategy/host.go | 5 +- internal/strategy/host_test.go | 2 +- 29 files changed, 532 insertions(+), 245 deletions(-) create mode 100644 internal/cache/disk_eviction_test.go diff --git a/cmd/cachew/main.go b/cmd/cachew/main.go index 513a13f..8b181c4 100644 --- a/cmd/cachew/main.go +++ b/cmd/cachew/main.go @@ -21,15 +21,17 @@ import ( type CLI struct { LoggingConfig logging.Config `embed:"" prefix:"log-"` - URL string `help:"Remote cache server URL." default:"http://127.0.0.1:8080"` - Platform bool `help:"Prefix keys with platform ($${os}-$${arch}-)."` - Daily bool `help:"Prefix keys with date ($${YYYY}-$${MM}-$${DD}-). Mutually exclusive with --hourly." xor:"timeprefix"` - Hourly bool `help:"Prefix keys with date and hour ($${YYYY}-$${MM}-$${DD}-$${HH}-). Mutually exclusive with --daily." xor:"timeprefix"` - - Get GetCmd `cmd:"" help:"Download object from cache." group:"Operations:"` - Stat StatCmd `cmd:"" help:"Show metadata for cached object." group:"Operations:"` - Put PutCmd `cmd:"" help:"Upload object to cache." group:"Operations:"` - Delete DeleteCmd `cmd:"" help:"Remove object from cache." group:"Operations:"` + URL string `help:"Remote cache server URL." default:"http://127.0.0.1:8080"` + Namespace string `help:"Namespace for organizing cache objects." default:""` + Platform bool `help:"Prefix keys with platform ($${os}-$${arch}-)."` + Daily bool `help:"Prefix keys with date ($${YYYY}-$${MM}-$${DD}-). Mutually exclusive with --hourly." xor:"timeprefix"` + Hourly bool `help:"Prefix keys with date and hour ($${YYYY}-$${MM}-$${DD}-$${HH}-). Mutually exclusive with --daily." xor:"timeprefix"` + + Get GetCmd `cmd:"" help:"Download object from cache." group:"Operations:"` + Stat StatCmd `cmd:"" help:"Show metadata for cached object." group:"Operations:"` + Put PutCmd `cmd:"" help:"Upload object to cache." group:"Operations:"` + Delete DeleteCmd `cmd:"" help:"Remove object from cache." group:"Operations:"` + ListNamespaces ListNamespacesCmd `cmd:"" help:"List available namespaces in cache." group:"Operations:"` Snapshot SnapshotCmd `cmd:"" help:"Create compressed archive of directory and upload." group:"Snapshots:"` Restore RestoreCmd `cmd:"" help:"Download and extract archive to directory." group:"Snapshots:"` @@ -57,7 +59,7 @@ type GetCmd struct { func (c *GetCmd) Run(ctx context.Context, cache cache.Cache) error { defer c.Output.Close() - rc, headers, err := cache.Open(ctx, "", c.Key.Key()) + rc, headers, err := cache.Open(ctx, c.Key.Key()) if err != nil { return errors.Wrap(err, "failed to open object") } @@ -78,7 +80,7 @@ type StatCmd struct { } func (c *StatCmd) Run(ctx context.Context, cache cache.Cache) error { - headers, err := cache.Stat(ctx, "", c.Key.Key()) + headers, err := cache.Stat(ctx, c.Key.Key()) if err != nil { return errors.Wrap(err, "failed to stat object") } @@ -111,7 +113,7 @@ func (c *PutCmd) Run(ctx context.Context, cache cache.Cache) error { headers.Set("Content-Disposition", fmt.Sprintf("attachment; filename=%q", filepath.Base(filename))) //nolint:perfsprint } - wc, err := cache.Create(ctx, "", c.Key.Key(), headers, c.TTL) + wc, err := cache.Create(ctx, c.Key.Key(), headers, c.TTL) if err != nil { return errors.Wrap(err, "failed to create object") } @@ -128,7 +130,26 @@ type DeleteCmd struct { } func (c *DeleteCmd) Run(ctx context.Context, cache cache.Cache) error { - return errors.Wrap(cache.Delete(ctx, "", c.Key.Key()), "failed to delete object") + return errors.Wrap(cache.Delete(ctx, c.Key.Key()), "failed to delete object") +} + +type ListNamespacesCmd struct{} + +func (c *ListNamespacesCmd) Run(ctx context.Context, cache cache.Cache) error { + namespaces, err := cache.ListNamespaces(ctx) + if err != nil { + return errors.Wrap(err, "failed to list namespaces") + } + + if len(namespaces) == 0 { + fmt.Println("No namespaces found") //nolint:forbidigo + return nil + } + + for _, ns := range namespaces { + fmt.Println(ns) //nolint:forbidigo + } + return nil } type SnapshotCmd struct { @@ -140,7 +161,7 @@ type SnapshotCmd struct { func (c *SnapshotCmd) Run(ctx context.Context, cache cache.Cache) error { fmt.Fprintf(os.Stderr, "Archiving %s...\n", c.Directory) //nolint:forbidigo - if err := snapshot.Create(ctx, cache, "", c.Key.Key(), c.Directory, c.TTL, c.Exclude); err != nil { + if err := snapshot.Create(ctx, cache, c.Key.Key(), c.Directory, c.TTL, c.Exclude); err != nil { return errors.Wrap(err, "failed to create snapshot") } @@ -155,7 +176,7 @@ type RestoreCmd struct { func (c *RestoreCmd) Run(ctx context.Context, cache cache.Cache) error { fmt.Fprintf(os.Stderr, "Restoring to %s...\n", c.Directory) //nolint:forbidigo - if err := snapshot.Restore(ctx, cache, "", c.Key.Key(), c.Directory); err != nil { + if err := snapshot.Restore(ctx, cache, c.Key.Key(), c.Directory); err != nil { return errors.Wrap(err, "failed to restore snapshot") } diff --git a/internal/cache/api.go b/internal/cache/api.go index 6aaa142..48c533b 100644 --- a/internal/cache/api.go +++ b/internal/cache/api.go @@ -148,17 +148,20 @@ type Stats struct { type Cache interface { // String describes the Cache implementation. String() string + // Namespace creates a namespaced view of this cache. + // All operations on the returned cache will use the given namespace prefix. + Namespace(namespace string) Cache // Stat returns the headers of an existing object in the cache. // // Expired files MUST not be returned. // Must return os.ErrNotExist if the file does not exist. - Stat(ctx context.Context, strategyName string, key Key) (http.Header, error) + Stat(ctx context.Context, key Key) (http.Header, error) // Open an existing file in the cache. // // Expired files MUST NOT be returned. // The returned headers MUST include a Last-Modified header. // Must return os.ErrNotExist if the file does not exist. - Open(ctx context.Context, strategyName string, key Key) (io.ReadCloser, http.Header, error) + Open(ctx context.Context, key Key) (io.ReadCloser, http.Header, error) // Create a new file in the cache. // // If "ttl" is zero, a maximum TTL MUST be used by the implementation. @@ -166,13 +169,15 @@ type Cache interface { // The file MUST NOT be available for read until completely written and closed. // // If the context is cancelled the object MUST NOT be made available in the cache. - Create(ctx context.Context, strategyName string, key Key, headers http.Header, ttl time.Duration) (io.WriteCloser, error) + Create(ctx context.Context, key Key, headers http.Header, ttl time.Duration) (io.WriteCloser, error) // Delete a file from the cache. // // MUST be atomic. - Delete(ctx context.Context, strategyName string, key Key) error + Delete(ctx context.Context, key Key) error // Stats returns health and usage statistics for the cache. Stats(ctx context.Context) (Stats, error) + // ListNamespaces returns all unique namespaces in the cache. + ListNamespaces(ctx context.Context) ([]string, error) // Close the Cache. Close() error } diff --git a/internal/cache/cachetest/soak.go b/internal/cache/cachetest/soak.go index 9b78107..5490060 100644 --- a/internal/cache/cachetest/soak.go +++ b/internal/cache/cachetest/soak.go @@ -194,7 +194,7 @@ func doWrite( } key := cache.NewKey(fmt.Sprintf("soak-key-%d", keyIdx)) - writer, err := c.Create(ctx, "", key, nil, config.TTL) + writer, err := c.Create(ctx, key, nil, config.TTL) if err != nil { t.Errorf("failed to create cache entry: %+v", err) return @@ -248,7 +248,7 @@ func doRead( keyIdx := rng.IntN(config.NumObjects) key := cache.NewKey(fmt.Sprintf("soak-key-%d", keyIdx)) - reader, _, err := c.Open(ctx, "", key) + reader, _, err := c.Open(ctx, key) if err != nil { if errors.Is(err, os.ErrNotExist) { atomic.AddInt64(&result.ReadMisses, 1) @@ -299,7 +299,7 @@ func doDelete( keyIdx := rng.IntN(config.NumObjects) key := cache.NewKey(fmt.Sprintf("soak-key-%d", keyIdx)) - if err := c.Delete(ctx, "", key); err != nil { + if err := c.Delete(ctx, key); err != nil { if errors.Is(err, os.ErrNotExist) { return } diff --git a/internal/cache/cachetest/suite.go b/internal/cache/cachetest/suite.go index 142847f..db43d3c 100644 --- a/internal/cache/cachetest/suite.go +++ b/internal/cache/cachetest/suite.go @@ -63,7 +63,7 @@ func testCreateAndOpen(t *testing.T, c cache.Cache) { key := cache.NewKey("test-key") - writer, err := c.Create(ctx, "", key, nil, time.Hour) + writer, err := c.Create(ctx, key, nil, time.Hour) assert.NoError(t, err) _, err = writer.Write([]byte("hello world")) @@ -72,7 +72,7 @@ func testCreateAndOpen(t *testing.T, c cache.Cache) { err = writer.Close() assert.NoError(t, err) - reader, _, err := c.Open(ctx, "", key) + reader, _, err := c.Open(ctx, key) assert.NoError(t, err) defer reader.Close() @@ -87,7 +87,7 @@ func testNotFound(t *testing.T, c cache.Cache) { key := cache.NewKey("nonexistent") - _, _, err := c.Open(ctx, "", key) + _, _, err := c.Open(ctx, key) assert.IsError(t, err, os.ErrNotExist) } @@ -97,7 +97,7 @@ func testExpiration(t *testing.T, c cache.Cache) { key := cache.NewKey("test-key") - writer, err := c.Create(ctx, "", key, nil, time.Millisecond*250) + writer, err := c.Create(ctx, key, nil, time.Millisecond*250) assert.NoError(t, err) _, err = writer.Write([]byte("test data")) @@ -106,13 +106,13 @@ func testExpiration(t *testing.T, c cache.Cache) { err = writer.Close() assert.NoError(t, err) - reader, _, err := c.Open(ctx, "", key) + reader, _, err := c.Open(ctx, key) assert.NoError(t, err) assert.NoError(t, reader.Close()) time.Sleep(500 * time.Millisecond) - _, _, err = c.Open(ctx, "", key) + _, _, err = c.Open(ctx, key) assert.IsError(t, err, os.ErrNotExist) } @@ -122,7 +122,7 @@ func testDefaultTTL(t *testing.T, c cache.Cache) { key := cache.NewKey("test-key") - writer, err := c.Create(ctx, "", key, nil, 0) + writer, err := c.Create(ctx, key, nil, 0) assert.NoError(t, err) _, err = writer.Write([]byte("test data")) @@ -131,7 +131,7 @@ func testDefaultTTL(t *testing.T, c cache.Cache) { err = writer.Close() assert.NoError(t, err) - reader, _, err := c.Open(ctx, "", key) + reader, _, err := c.Open(ctx, key) assert.NoError(t, err) assert.NoError(t, reader.Close()) } @@ -142,7 +142,7 @@ func testDelete(t *testing.T, c cache.Cache) { key := cache.NewKey("test-key") - writer, err := c.Create(ctx, "", key, nil, time.Hour) + writer, err := c.Create(ctx, key, nil, time.Hour) assert.NoError(t, err) _, err = writer.Write([]byte("test data")) @@ -151,10 +151,10 @@ func testDelete(t *testing.T, c cache.Cache) { err = writer.Close() assert.NoError(t, err) - err = c.Delete(ctx, "", key) + err = c.Delete(ctx, key) assert.NoError(t, err) - _, _, err = c.Open(ctx, "", key) + _, _, err = c.Open(ctx, key) assert.IsError(t, err, os.ErrNotExist) } @@ -164,7 +164,7 @@ func testMultipleWrites(t *testing.T, c cache.Cache) { key := cache.NewKey("test-key") - writer, err := c.Create(ctx, "", key, nil, time.Hour) + writer, err := c.Create(ctx, key, nil, time.Hour) assert.NoError(t, err) _, err = writer.Write([]byte("hello ")) @@ -176,7 +176,7 @@ func testMultipleWrites(t *testing.T, c cache.Cache) { err = writer.Close() assert.NoError(t, err) - reader, _, err := c.Open(ctx, "", key) + reader, _, err := c.Open(ctx, key) assert.NoError(t, err) defer reader.Close() @@ -191,19 +191,19 @@ func testNotAvailableUntilClosed(t *testing.T, c cache.Cache) { key := cache.NewKey("test-key") - writer, err := c.Create(ctx, "", key, nil, time.Hour) + writer, err := c.Create(ctx, key, nil, time.Hour) assert.NoError(t, err) _, err = writer.Write([]byte("test data")) assert.NoError(t, err) - _, _, err = c.Open(ctx, "", key) + _, _, err = c.Open(ctx, key) assert.IsError(t, err, os.ErrNotExist) err = writer.Close() assert.NoError(t, err) - _, _, err = c.Open(ctx, "", key) + _, _, err = c.Open(ctx, key) assert.NoError(t, err) } @@ -220,7 +220,7 @@ func testHeaders(t *testing.T, c cache.Cache) { "X-Custom-Field": []string{"custom-value"}, } - writer, err := c.Create(ctx, "", key, headers, time.Hour) + writer, err := c.Create(ctx, key, headers, time.Hour) assert.NoError(t, err) _, err = writer.Write([]byte("test data with headers")) @@ -230,7 +230,7 @@ func testHeaders(t *testing.T, c cache.Cache) { assert.NoError(t, err) // Open and verify headers are returned - reader, returnedHeaders, err := c.Open(ctx, "", key) + reader, returnedHeaders, err := c.Open(ctx, key) assert.NoError(t, err) defer reader.Close() @@ -257,7 +257,7 @@ func testContextCancellation(t *testing.T, c cache.Cache) { // Create an object with the cancellable context key := cache.NewKey("test-cancelled") - writer, err := c.Create(cancelledCtx, "", key, http.Header{}, time.Hour) + writer, err := c.Create(cancelledCtx, key, http.Header{}, time.Hour) assert.NoError(t, err) // Write some data @@ -273,7 +273,7 @@ func testContextCancellation(t *testing.T, c cache.Cache) { assert.Contains(t, err.Error(), "cancel") // Object should not be in cache - _, _, err = c.Open(ctx, "", key) + _, _, err = c.Open(ctx, key) assert.IsError(t, err, os.ErrNotExist) } @@ -284,7 +284,7 @@ func testLastModified(t *testing.T, c cache.Cache) { key := cache.NewKey("test-last-modified") // Create an object without specifying Last-Modified - writer, err := c.Create(ctx, "", key, nil, time.Hour) + writer, err := c.Create(ctx, key, nil, time.Hour) assert.NoError(t, err) _, err = writer.Write([]byte("test data")) @@ -294,7 +294,7 @@ func testLastModified(t *testing.T, c cache.Cache) { assert.NoError(t, err) // Open and verify Last-Modified header is present - reader, headers, err := c.Open(ctx, "", key) + reader, headers, err := c.Open(ctx, key) assert.NoError(t, err) defer reader.Close() @@ -313,7 +313,7 @@ func testLastModified(t *testing.T, c cache.Cache) { "Last-Modified": []string{explicitTime.Format(http.TimeFormat)}, } - writer2, err := c.Create(ctx, "", key2, explicitHeaders, time.Hour) + writer2, err := c.Create(ctx, key2, explicitHeaders, time.Hour) assert.NoError(t, err) _, err = writer2.Write([]byte("test data 2")) @@ -323,7 +323,7 @@ func testLastModified(t *testing.T, c cache.Cache) { assert.NoError(t, err) // Verify explicit Last-Modified is preserved - reader2, headers2, err := c.Open(ctx, "", key2) + reader2, headers2, err := c.Open(ctx, key2) assert.NoError(t, err) defer reader2.Close() diff --git a/internal/cache/disk.go b/internal/cache/disk.go index 7a7ed56..481f664 100644 --- a/internal/cache/disk.go +++ b/internal/cache/disk.go @@ -38,8 +38,9 @@ type DiskConfig struct { type Disk struct { logger *slog.Logger config DiskConfig + namespace string db *diskMetaDB - size atomic.Int64 + size *atomic.Int64 runEviction chan struct{} stop context.CancelFunc evictionDone chan struct{} @@ -113,6 +114,7 @@ func NewDisk(ctx context.Context, config DiskConfig) (*Disk, error) { logger: logger, config: config, db: db, + size: &atomic.Int64{}, runEviction: make(chan struct{}), stop: stop, evictionDone: make(chan struct{}), @@ -151,7 +153,7 @@ func (d *Disk) Stats(_ context.Context) (Stats, error) { }, nil } -func (d *Disk) Create(ctx context.Context, strategyName string, key Key, headers http.Header, ttl time.Duration) (io.WriteCloser, error) { +func (d *Disk) Create(ctx context.Context, key Key, headers http.Header, ttl time.Duration) (io.WriteCloser, error) { if ttl > d.config.MaxTTL || ttl == 0 { ttl = d.config.MaxTTL } @@ -164,7 +166,7 @@ func (d *Disk) Create(ctx context.Context, strategyName string, key Key, headers clonedHeaders.Set("Last-Modified", now.UTC().Format(http.TimeFormat)) } - path := d.keyToPath(strategyName, key) + path := d.keyToPath(d.namespace, key) fullPath := filepath.Join(d.config.Root, path) dir := filepath.Dir(fullPath) @@ -180,20 +182,20 @@ func (d *Disk) Create(ctx context.Context, strategyName string, key Key, headers expiresAt := now.Add(ttl) return &diskWriter{ - disk: d, - file: f, - key: key, - strategyName: strategyName, - path: fullPath, - tempPath: f.Name(), - expiresAt: expiresAt, - headers: clonedHeaders, - ctx: ctx, + disk: d, + file: f, + key: key, + namespace: d.namespace, + path: fullPath, + tempPath: f.Name(), + expiresAt: expiresAt, + headers: clonedHeaders, + ctx: ctx, }, nil } -func (d *Disk) Delete(_ context.Context, strategyName string, key Key) error { - path := d.keyToPath(strategyName, key) +func (d *Disk) Delete(_ context.Context, key Key) error { + path := d.keyToPath(d.namespace, key) fullPath := filepath.Join(d.config.Root, path) // Check if file is expired @@ -225,8 +227,8 @@ func (d *Disk) Delete(_ context.Context, strategyName string, key Key) error { return nil } -func (d *Disk) Stat(ctx context.Context, strategyName string, key Key) (http.Header, error) { - path := d.keyToPath(strategyName, key) +func (d *Disk) Stat(ctx context.Context, key Key) (http.Header, error) { + path := d.keyToPath(d.namespace, key) fullPath := filepath.Join(d.config.Root, path) if _, err := os.Stat(fullPath); err != nil { @@ -239,7 +241,7 @@ func (d *Disk) Stat(ctx context.Context, strategyName string, key Key) (http.Hea } if time.Now().After(expiresAt) { - return nil, errors.Join(fs.ErrNotExist, d.Delete(ctx, strategyName, key)) + return nil, errors.Join(fs.ErrNotExist, d.Delete(ctx, key)) } headers, err := d.db.getHeaders(key) @@ -250,8 +252,8 @@ func (d *Disk) Stat(ctx context.Context, strategyName string, key Key) (http.Hea return headers, nil } -func (d *Disk) Open(ctx context.Context, strategyName string, key Key) (io.ReadCloser, http.Header, error) { - path := d.keyToPath(strategyName, key) +func (d *Disk) Open(ctx context.Context, key Key) (io.ReadCloser, http.Header, error) { + path := d.keyToPath(d.namespace, key) fullPath := filepath.Join(d.config.Root, path) f, err := os.Open(fullPath) @@ -266,7 +268,7 @@ func (d *Disk) Open(ctx context.Context, strategyName string, key Key) (io.ReadC now := time.Now() if now.After(expiresAt) { - return nil, nil, errors.Join(fs.ErrNotExist, f.Close(), d.Delete(ctx, strategyName, key)) + return nil, nil, errors.Join(fs.ErrNotExist, f.Close(), d.Delete(ctx, key)) } headers, err := d.db.getHeaders(key) @@ -285,12 +287,12 @@ func (d *Disk) Open(ctx context.Context, strategyName string, key Key) (io.ReadC return f, headers, nil } -func (d *Disk) keyToPath(strategyName string, key Key) string { +func (d *Disk) keyToPath(namespace string, key Key) string { hexKey := key.String() // Use first two hex digits as directory, full hex as filename - if strategyName != "" { - return filepath.Join(strategyName, hexKey[:2], hexKey) + if namespace != "" { + return filepath.Join(namespace, hexKey[:2], hexKey) } return filepath.Join(hexKey[:2], hexKey) } @@ -330,8 +332,8 @@ func (d *Disk) evict() error { var expiredKeys []Key now := time.Now() - err := d.db.walk(func(key Key, strategyName string, expiresAt time.Time) error { - path := d.keyToPath(strategyName, key) + err := d.db.walk(func(key Key, namespace string, expiresAt time.Time) error { + path := d.keyToPath(namespace, key) fullPath := filepath.Join(d.config.Root, path) info, err := os.Stat(fullPath) @@ -399,16 +401,16 @@ func (d *Disk) evict() error { } type diskWriter struct { - disk *Disk - file *os.File - key Key - strategyName string - path string - tempPath string - expiresAt time.Time - headers http.Header - size int64 - ctx context.Context + disk *Disk + file *os.File + key Key + namespace string + path string + tempPath string + expiresAt time.Time + headers http.Header + size int64 + ctx context.Context } func (w *diskWriter) Write(p []byte) (int, error) { @@ -443,7 +445,7 @@ func (w *diskWriter) Close() error { return errors.Errorf("failed to rename temp file: %w", err) } - if err := w.disk.db.set(w.key, w.strategyName, w.expiresAt, w.headers); err != nil { + if err := w.disk.db.set(w.key, w.namespace, w.expiresAt, w.headers); err != nil { return errors.Join(errors.Errorf("failed to set metadata: %w", err), os.Remove(w.path)) } @@ -456,3 +458,16 @@ func (w *diskWriter) Close() error { return nil } + +// Namespace creates a namespaced view of the disk cache. +func (d *Disk) Namespace(namespace string) Cache { + // Create a shallow copy with the namespace set + c := *d + c.namespace = namespace + return &c +} + +// ListNamespaces returns all unique namespaces in the disk cache. +func (d *Disk) ListNamespaces(_ context.Context) ([]string, error) { + return d.db.listNamespaces() +} diff --git a/internal/cache/disk_eviction_test.go b/internal/cache/disk_eviction_test.go new file mode 100644 index 0000000..5a6409d --- /dev/null +++ b/internal/cache/disk_eviction_test.go @@ -0,0 +1,125 @@ +package cache_test + +import ( + "log/slog" + "testing" + "time" + + "github.com/alecthomas/assert/v2" + + "github.com/block/cachew/internal/cache" + "github.com/block/cachew/internal/logging" +) + +func TestDiskEvictionBySize(t *testing.T) { + dir := t.TempDir() + _, ctx := logging.Configure(t.Context(), logging.Config{Level: slog.LevelDebug}) + + // Create cache with 1MB limit and fast eviction + c, err := cache.NewDisk(ctx, cache.DiskConfig{ + Root: dir, + LimitMB: 1, + MaxTTL: time.Hour, + EvictInterval: 50 * time.Millisecond, + }) + assert.NoError(t, err) + defer c.Close() + + // Create 3 entries of ~500KB each (total 1.5MB, exceeding 1MB limit) + data := make([]byte, 500*1024) + keys := []cache.Key{ + cache.NewKey("key1"), + cache.NewKey("key2"), + cache.NewKey("key3"), + } + + for _, key := range keys { + w, err := c.Create(ctx, key, nil, time.Hour) + assert.NoError(t, err) + _, err = w.Write(data) + assert.NoError(t, err) + assert.NoError(t, w.Close()) + time.Sleep(10 * time.Millisecond) // Ensure different access times + } + + // Wait for eviction to run + time.Sleep(200 * time.Millisecond) + + // key1 (oldest) should be evicted + _, _, err = c.Open(ctx, keys[0]) + assert.Error(t, err) + + // key2 and key3 should still exist + r2, _, err := c.Open(ctx, keys[1]) + assert.NoError(t, err) + assert.NoError(t, r2.Close()) + + r3, _, err := c.Open(ctx, keys[2]) + assert.NoError(t, err) + assert.NoError(t, r3.Close()) +} + +func TestDiskEvictionAcrossNamespaces(t *testing.T) { + dir := t.TempDir() + _, ctx := logging.Configure(t.Context(), logging.Config{Level: slog.LevelDebug}) + + // Create cache with 1MB limit + baseCache, err := cache.NewDisk(ctx, cache.DiskConfig{ + Root: dir, + LimitMB: 1, + MaxTTL: time.Hour, + EvictInterval: 50 * time.Millisecond, + }) + assert.NoError(t, err) + defer baseCache.Close() + + // Create namespace views + gitCache := baseCache.Namespace("git") + gomodCache := baseCache.Namespace("gomod") + + // Create entries in different namespaces + data := make([]byte, 500*1024) + + // git namespace + gitKey := cache.NewKey("git-key") + w, err := gitCache.Create(ctx, gitKey, nil, time.Hour) + assert.NoError(t, err) + _, err = w.Write(data) + assert.NoError(t, err) + assert.NoError(t, w.Close()) + time.Sleep(10 * time.Millisecond) + + // gomod namespace + gomodKey := cache.NewKey("gomod-key") + w, err = gomodCache.Create(ctx, gomodKey, nil, time.Hour) + assert.NoError(t, err) + _, err = w.Write(data) + assert.NoError(t, err) + assert.NoError(t, w.Close()) + time.Sleep(10 * time.Millisecond) + + // Another git entry to exceed limit + gitKey2 := cache.NewKey("git-key2") + w, err = gitCache.Create(ctx, gitKey2, nil, time.Hour) + assert.NoError(t, err) + _, err = w.Write(data) + assert.NoError(t, err) + assert.NoError(t, w.Close()) + + // Wait for eviction + time.Sleep(200 * time.Millisecond) + + // First git entry (oldest) should be evicted + _, _, err = gitCache.Open(ctx, gitKey) + assert.Error(t, err) + + // gomod entry should still exist + r, _, err := gomodCache.Open(ctx, gomodKey) + assert.NoError(t, err) + assert.NoError(t, r.Close()) + + // Newer git entry should still exist + r, _, err = gitCache.Open(ctx, gitKey2) + assert.NoError(t, err) + assert.NoError(t, r.Close()) +} diff --git a/internal/cache/disk_metadb.go b/internal/cache/disk_metadb.go index d118be4..9b00768 100644 --- a/internal/cache/disk_metadb.go +++ b/internal/cache/disk_metadb.go @@ -12,9 +12,9 @@ import ( //nolint:gochecknoglobals var ( - ttlBucketName = []byte("ttl") - headersBucketName = []byte("headers") - strategyBucketName = []byte("strategy") + ttlBucketName = []byte("ttl") + headersBucketName = []byte("headers") + namespaceBucketName = []byte("namespace") ) // diskMetaDB manages expiration times and headers for cache entries using bbolt. @@ -38,7 +38,7 @@ func newDiskMetaDB(dbPath string) (*diskMetaDB, error) { if _, err := tx.CreateBucketIfNotExists(headersBucketName); err != nil { return errors.WithStack(err) } - if _, err := tx.CreateBucketIfNotExists(strategyBucketName); err != nil { + if _, err := tx.CreateBucketIfNotExists(namespaceBucketName); err != nil { return errors.WithStack(err) } return nil @@ -61,7 +61,7 @@ func (s *diskMetaDB) setTTL(key Key, expiresAt time.Time) error { })) } -func (s *diskMetaDB) set(key Key, strategyName string, expiresAt time.Time, headers http.Header) error { +func (s *diskMetaDB) set(key Key, namespace string, expiresAt time.Time, headers http.Header) error { ttlBytes, err := expiresAt.MarshalBinary() if err != nil { return errors.Errorf("failed to marshal TTL: %w", err) @@ -83,8 +83,8 @@ func (s *diskMetaDB) set(key Key, strategyName string, expiresAt time.Time, head return errors.WithStack(err) } - strategyBucket := tx.Bucket(strategyBucketName) - return errors.WithStack(strategyBucket.Put(key[:], []byte(strategyName))) + namespaceBucket := tx.Bucket(namespaceBucketName) + return errors.WithStack(namespaceBucket.Put(key[:], []byte(namespace))) })) } @@ -126,8 +126,8 @@ func (s *diskMetaDB) delete(key Key) error { return errors.WithStack(err) } - strategyBucket := tx.Bucket(strategyBucketName) - return errors.WithStack(strategyBucket.Delete(key[:])) + namespaceBucket := tx.Bucket(namespaceBucketName) + return errors.WithStack(namespaceBucket.Delete(key[:])) })) } @@ -138,7 +138,7 @@ func (s *diskMetaDB) deleteAll(keys []Key) error { return errors.WithStack(s.db.Update(func(tx *bbolt.Tx) error { ttlBucket := tx.Bucket(ttlBucketName) headersBucket := tx.Bucket(headersBucketName) - strategyBucket := tx.Bucket(strategyBucketName) + namespaceBucket := tx.Bucket(namespaceBucketName) for _, key := range keys { if err := ttlBucket.Delete(key[:]); err != nil { @@ -147,21 +147,21 @@ func (s *diskMetaDB) deleteAll(keys []Key) error { if err := headersBucket.Delete(key[:]); err != nil { return errors.Errorf("failed to delete headers: %w", err) } - if err := strategyBucket.Delete(key[:]); err != nil { - return errors.Errorf("failed to delete strategy: %w", err) + if err := namespaceBucket.Delete(key[:]); err != nil { + return errors.Errorf("failed to delete namespace: %w", err) } } return nil })) } -func (s *diskMetaDB) walk(fn func(key Key, strategyName string, expiresAt time.Time) error) error { +func (s *diskMetaDB) walk(fn func(key Key, namespace string, expiresAt time.Time) error) error { return errors.WithStack(s.db.View(func(tx *bbolt.Tx) error { ttlBucket := tx.Bucket(ttlBucketName) if ttlBucket == nil { return nil } - strategyBucket := tx.Bucket(strategyBucketName) + namespaceBucket := tx.Bucket(namespaceBucketName) return ttlBucket.ForEach(func(k, v []byte) error { if len(k) != 32 { return nil @@ -172,13 +172,13 @@ func (s *diskMetaDB) walk(fn func(key Key, strategyName string, expiresAt time.T if err := expiresAt.UnmarshalBinary(v); err != nil { return nil //nolint:nilerr } - strategyName := "" - if strategyBucket != nil { - if strategyBytes := strategyBucket.Get(k); strategyBytes != nil { - strategyName = string(strategyBytes) + namespace := "" + if namespaceBucket != nil { + if namespaceBytes := namespaceBucket.Get(k); namespaceBytes != nil { + namespace = string(namespaceBytes) } } - return fn(key, strategyName, expiresAt) + return fn(key, namespace, expiresAt) }) })) } @@ -202,3 +202,28 @@ func (s *diskMetaDB) close() error { } return nil } + +func (s *diskMetaDB) listNamespaces() ([]string, error) { + namespaceSet := make(map[string]bool) + err := s.db.View(func(tx *bbolt.Tx) error { + namespaceBucket := tx.Bucket(namespaceBucketName) + if namespaceBucket == nil { + return nil + } + return namespaceBucket.ForEach(func(_, v []byte) error { + if len(v) > 0 { + namespaceSet[string(v)] = true + } + return nil + }) + }) + if err != nil { + return nil, errors.WithStack(err) + } + + namespaces := make([]string, 0, len(namespaceSet)) + for ns := range namespaceSet { + namespaces = append(namespaces, ns) + } + return namespaces, nil +} diff --git a/internal/cache/http.go b/internal/cache/http.go index f21240d..ec2ba01 100644 --- a/internal/cache/http.go +++ b/internal/cache/http.go @@ -18,7 +18,7 @@ func Fetch(client *http.Client, r *http.Request, c Cache) (*http.Response, error url := r.URL.String() key := NewKey(url) - cr, headers, err := c.Open(r.Context(), "", key) + cr, headers, err := c.Open(r.Context(), key) if err == nil { return &http.Response{ Status: "200 OK", @@ -53,7 +53,7 @@ func FetchDirect(client *http.Client, r *http.Request, c Cache, key Key) (*http. } responseHeaders := maps.Clone(resp.Header) - cw, err := c.Create(r.Context(), "", key, responseHeaders, 0) + cw, err := c.Create(r.Context(), key, responseHeaders, 0) if err != nil { _ = resp.Body.Close() return nil, httputil.Errorf(http.StatusInternalServerError, "failed to create cache entry: %w", err) diff --git a/internal/cache/memory.go b/internal/cache/memory.go index dcf3ca8..2ab8e4d 100644 --- a/internal/cache/memory.go +++ b/internal/cache/memory.go @@ -38,8 +38,9 @@ type memoryEntry struct { type Memory struct { config MemoryConfig + namespace string mu sync.RWMutex - entries map[Key]*memoryEntry + entries map[string]map[Key]*memoryEntry // namespace -> key -> entry currentSize int64 } @@ -47,17 +48,22 @@ func NewMemory(ctx context.Context, config MemoryConfig) (*Memory, error) { logging.FromContext(ctx).InfoContext(ctx, "Constructing in-memory Cache", "limit-mb", config.LimitMB, "max-ttl", config.MaxTTL) return &Memory{ config: config, - entries: make(map[Key]*memoryEntry), + entries: make(map[string]map[Key]*memoryEntry), }, nil } func (m *Memory) String() string { return fmt.Sprintf("memory:%dMB", m.config.LimitMB) } -func (m *Memory) Stat(_ context.Context, _ string, key Key) (http.Header, error) { +func (m *Memory) Stat(_ context.Context, key Key) (http.Header, error) { m.mu.RLock() defer m.mu.RUnlock() - entry, exists := m.entries[key] + nsEntries, nsExists := m.entries[m.namespace] + if !nsExists { + return nil, os.ErrNotExist + } + + entry, exists := nsEntries[key] if !exists { return nil, os.ErrNotExist } @@ -69,11 +75,16 @@ func (m *Memory) Stat(_ context.Context, _ string, key Key) (http.Header, error) return entry.headers, nil } -func (m *Memory) Open(_ context.Context, _ string, key Key) (io.ReadCloser, http.Header, error) { +func (m *Memory) Open(_ context.Context, key Key) (io.ReadCloser, http.Header, error) { m.mu.RLock() defer m.mu.RUnlock() - entry, exists := m.entries[key] + nsEntries, nsExists := m.entries[m.namespace] + if !nsExists { + return nil, nil, os.ErrNotExist + } + + entry, exists := nsEntries[key] if !exists { return nil, nil, os.ErrNotExist } @@ -85,7 +96,7 @@ func (m *Memory) Open(_ context.Context, _ string, key Key) (io.ReadCloser, http return io.NopCloser(bytes.NewReader(entry.data)), entry.headers, nil } -func (m *Memory) Create(ctx context.Context, _ string, key Key, headers http.Header, ttl time.Duration) (io.WriteCloser, error) { +func (m *Memory) Create(ctx context.Context, key Key, headers http.Header, ttl time.Duration) (io.WriteCloser, error) { if ttl == 0 { ttl = m.config.MaxTTL } @@ -100,6 +111,7 @@ func (m *Memory) Create(ctx context.Context, _ string, key Key, headers http.Hea writer := &memoryWriter{ cache: m, + namespace: m.namespace, key: key, buf: &bytes.Buffer{}, expiresAt: now.Add(ttl), @@ -110,16 +122,21 @@ func (m *Memory) Create(ctx context.Context, _ string, key Key, headers http.Hea return writer, nil } -func (m *Memory) Delete(_ context.Context, _ string, key Key) error { +func (m *Memory) Delete(_ context.Context, key Key) error { m.mu.Lock() defer m.mu.Unlock() - entry, exists := m.entries[key] + nsEntries, nsExists := m.entries[m.namespace] + if !nsExists { + return os.ErrNotExist + } + + entry, exists := nsEntries[key] if !exists { return os.ErrNotExist } m.currentSize -= int64(len(entry.data)) - delete(m.entries, key) + delete(nsEntries, key) return nil } @@ -135,8 +152,13 @@ func (m *Memory) Stats(_ context.Context) (Stats, error) { m.mu.RLock() defer m.mu.RUnlock() + totalObjects := int64(0) + for _, nsEntries := range m.entries { + totalObjects += int64(len(nsEntries)) + } + return Stats{ - Objects: int64(len(m.entries)), + Objects: totalObjects, Size: m.currentSize, Capacity: int64(m.config.LimitMB) * 1024 * 1024, }, nil @@ -144,18 +166,22 @@ func (m *Memory) Stats(_ context.Context) (Stats, error) { func (m *Memory) evictOldest(neededSpace int64) { type entryInfo struct { + namespace string key Key size int64 expiresAt time.Time } var entries []entryInfo - for k, e := range m.entries { - entries = append(entries, entryInfo{ - key: k, - size: int64(len(e.data)), - expiresAt: e.expiresAt, - }) + for ns, nsEntries := range m.entries { + for k, e := range nsEntries { + entries = append(entries, entryInfo{ + namespace: ns, + key: k, + size: int64(len(e.data)), + expiresAt: e.expiresAt, + }) + } } // Sort by expiry time (earliest first) @@ -173,13 +199,14 @@ func (m *Memory) evictOldest(neededSpace int64) { break } m.currentSize -= e.size - delete(m.entries, e.key) + delete(m.entries[e.namespace], e.key) freedSpace += e.size } } type memoryWriter struct { cache *Memory + namespace string key Key buf *bytes.Buffer expiresAt time.Time @@ -212,9 +239,15 @@ func (w *memoryWriter) Close() error { newSize := int64(w.buf.Len()) limitBytes := int64(w.cache.config.LimitMB) * 1024 * 1024 + // Ensure namespace map exists + if w.cache.entries[w.namespace] == nil { + w.cache.entries[w.namespace] = make(map[Key]*memoryEntry) + } + nsEntries := w.cache.entries[w.namespace] + // Remove old entry size if it exists oldSize := int64(0) - if oldEntry, exists := w.cache.entries[w.key]; exists { + if oldEntry, exists := nsEntries[w.key]; exists { oldSize = int64(len(oldEntry.data)) } @@ -231,7 +264,7 @@ func (w *memoryWriter) Close() error { data := make([]byte, w.buf.Len()) copy(data, w.buf.Bytes()) w.buf.Reset() - w.cache.entries[w.key] = &memoryEntry{ + nsEntries[w.key] = &memoryEntry{ data: data, expiresAt: w.expiresAt, headers: w.headers, @@ -240,3 +273,24 @@ func (w *memoryWriter) Close() error { return nil } + +// Namespace creates a namespaced view of the memory cache. +func (m *Memory) Namespace(namespace string) Cache { + c := *m + c.namespace = namespace + return &c +} + +// ListNamespaces returns all unique namespaces in the memory cache. +func (m *Memory) ListNamespaces(_ context.Context) ([]string, error) { + m.mu.RLock() + defer m.mu.RUnlock() + + namespaces := make([]string, 0, len(m.entries)) + for ns := range m.entries { + if ns != "" { + namespaces = append(namespaces, ns) + } + } + return namespaces, nil +} diff --git a/internal/cache/noop.go b/internal/cache/noop.go index 7d667af..c77fee1 100644 --- a/internal/cache/noop.go +++ b/internal/cache/noop.go @@ -22,20 +22,20 @@ func NoOpCache() Cache { func (n *noOpCache) String() string { return "noop" } -func (n *noOpCache) Stat(_ context.Context, _ string, _ Key) (http.Header, error) { +func (n *noOpCache) Stat(_ context.Context, _ Key) (http.Header, error) { return nil, os.ErrNotExist } -func (n *noOpCache) Open(_ context.Context, _ string, _ Key) (io.ReadCloser, http.Header, error) { +func (n *noOpCache) Open(_ context.Context, _ Key) (io.ReadCloser, http.Header, error) { return nil, nil, os.ErrNotExist } -func (n *noOpCache) Create(_ context.Context, _ string, _ Key, _ http.Header, _ time.Duration) (io.WriteCloser, error) { +func (n *noOpCache) Create(_ context.Context, _ Key, _ http.Header, _ time.Duration) (io.WriteCloser, error) { // Return a discard writer that does nothing return &noOpWriter{}, nil } -func (n *noOpCache) Delete(_ context.Context, _ string, _ Key) error { +func (n *noOpCache) Delete(_ context.Context, _ Key) error { return nil } @@ -60,3 +60,13 @@ func (n *noOpWriter) Close() error { var _ Cache = (*noOpCache)(nil) var _ io.WriteCloser = (*noOpWriter)(nil) + +// Namespace creates a namespaced view (no-op for noop cache). +func (n *noOpCache) Namespace(_ string) Cache { + return n +} + +// ListNamespaces returns empty list for noop cache. +func (n *noOpCache) ListNamespaces(_ context.Context) ([]string, error) { + return []string{}, nil +} diff --git a/internal/cache/remote.go b/internal/cache/remote.go index f3d2a78..9cf25c5 100644 --- a/internal/cache/remote.go +++ b/internal/cache/remote.go @@ -36,7 +36,7 @@ func NewRemote(baseURL string) *Remote { func (c *Remote) String() string { return "remote:" + c.baseURL } // Open retrieves an object from the remote. -func (c *Remote) Open(ctx context.Context, _ string, key Key) (io.ReadCloser, http.Header, error) { +func (c *Remote) Open(ctx context.Context, key Key) (io.ReadCloser, http.Header, error) { url := fmt.Sprintf("%s/object/%s", c.baseURL, key.String()) req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) if err != nil { @@ -65,7 +65,7 @@ func (c *Remote) Open(ctx context.Context, _ string, key Key) (io.ReadCloser, ht } // Stat retrieves headers for an object from the remote. -func (c *Remote) Stat(ctx context.Context, _ string, key Key) (http.Header, error) { +func (c *Remote) Stat(ctx context.Context, key Key) (http.Header, error) { url := fmt.Sprintf("%s/object/%s", c.baseURL, key.String()) req, err := http.NewRequestWithContext(ctx, http.MethodHead, url, nil) if err != nil { @@ -93,7 +93,7 @@ func (c *Remote) Stat(ctx context.Context, _ string, key Key) (http.Header, erro } // Create stores a new object in the remote. -func (c *Remote) Create(ctx context.Context, _ string, key Key, headers http.Header, ttl time.Duration) (io.WriteCloser, error) { +func (c *Remote) Create(ctx context.Context, key Key, headers http.Header, ttl time.Duration) (io.WriteCloser, error) { pr, pw := io.Pipe() url := fmt.Sprintf("%s/object/%s", c.baseURL, key.String()) @@ -135,7 +135,7 @@ func (c *Remote) Create(ctx context.Context, _ string, key Key, headers http.Hea } // Delete removes an object from the remote. -func (c *Remote) Delete(ctx context.Context, _ string, key Key) error { +func (c *Remote) Delete(ctx context.Context, key Key) error { url := fmt.Sprintf("%s/object/%s", c.baseURL, key.String()) req, err := http.NewRequestWithContext(ctx, http.MethodDelete, url, nil) if err != nil { @@ -223,3 +223,16 @@ func (wc *writeCloser) Close() error { } return nil } + +// Namespace creates a namespaced view (which is ignored by remote cache). +// Remote cache ignores namespaces since the server-side API v1 handles namespacing. +func (c *Remote) Namespace(_ string) Cache { + // Remote cache doesn't use namespacing on client side + return c +} + +// ListNamespaces requests namespace list from the remote server. +func (c *Remote) ListNamespaces(_ context.Context) ([]string, error) { + // TODO: Could add an API endpoint for this + return nil, ErrStatsUnavailable +} diff --git a/internal/cache/s3.go b/internal/cache/s3.go index ad3a9ce..96ed174 100644 --- a/internal/cache/s3.go +++ b/internal/cache/s3.go @@ -41,9 +41,10 @@ type S3Config struct { } type S3 struct { - logger *slog.Logger - config S3Config - client *minio.Client + logger *slog.Logger + config S3Config + namespace string + client *minio.Client } var _ Cache = (*S3)(nil) @@ -156,21 +157,21 @@ func (s *S3) Close() error { return nil } -func (s *S3) keyToPath(strategyName string, key Key) string { +func (s *S3) keyToPath(namespace string, key Key) string { hexKey := key.String() prefix := "" // Add strategy name as prefix if available - if strategyName != "" { - prefix = strategyName + "/" + if namespace != "" { + prefix = namespace + "/" } // Use first two hex digits as directory, full hex as filename return prefix + hexKey[:2] + "/" + hexKey } -func (s *S3) Stat(ctx context.Context, strategyName string, key Key) (http.Header, error) { - objectName := s.keyToPath(strategyName, key) +func (s *S3) Stat(ctx context.Context, key Key) (http.Header, error) { + objectName := s.keyToPath(s.namespace, key) // Get object info to check metadata objInfo, err := s.client.StatObject(ctx, s.config.Bucket, objectName, minio.StatObjectOptions{}) @@ -190,7 +191,7 @@ func (s *S3) Stat(ctx context.Context, strategyName string, key Key) (http.Heade if err := expiresAt.UnmarshalText([]byte(expiresAtStr)); err == nil { if time.Now().After(expiresAt) { // Object expired, delete it and return not found - return nil, errors.Join(os.ErrNotExist, s.Delete(ctx, strategyName, key)) + return nil, errors.Join(os.ErrNotExist, s.Delete(ctx, key)) } } } @@ -212,8 +213,8 @@ func (s *S3) Stat(ctx context.Context, strategyName string, key Key) (http.Heade return headers, nil } -func (s *S3) Open(ctx context.Context, strategyName string, key Key) (io.ReadCloser, http.Header, error) { - objectName := s.keyToPath(strategyName, key) +func (s *S3) Open(ctx context.Context, key Key) (io.ReadCloser, http.Header, error) { + objectName := s.keyToPath(s.namespace, key) // Get object info to retrieve metadata and check expiration objInfo, err := s.client.StatObject(ctx, s.config.Bucket, objectName, minio.StatObjectOptions{}) @@ -231,7 +232,7 @@ func (s *S3) Open(ctx context.Context, strategyName string, key Key) (io.ReadClo var expiresAt time.Time if err := expiresAt.UnmarshalText([]byte(expiresAtStr)); err == nil { if time.Now().After(expiresAt) { - return nil, nil, errors.Join(os.ErrNotExist, s.Delete(ctx, strategyName, key)) + return nil, nil, errors.Join(os.ErrNotExist, s.Delete(ctx, key)) } } } @@ -282,7 +283,7 @@ func (r *s3Reader) Close() error { return errors.WithStack(r.obj.Close()) } -func (s *S3) Create(ctx context.Context, strategyName string, key Key, headers http.Header, ttl time.Duration) (io.WriteCloser, error) { +func (s *S3) Create(ctx context.Context, key Key, headers http.Header, ttl time.Duration) (io.WriteCloser, error) { if ttl > s.config.MaxTTL || ttl == 0 { ttl = s.config.MaxTTL } @@ -296,14 +297,14 @@ func (s *S3) Create(ctx context.Context, strategyName string, key Key, headers h pr, pw := io.Pipe() writer := &s3Writer{ - s3: s, - key: key, - strategyName: strategyName, - pipe: pw, - expiresAt: expiresAt, - headers: clonedHeaders, - ctx: ctx, - errCh: make(chan error, 1), + s3: s, + key: key, + namespace: s.namespace, + pipe: pw, + expiresAt: expiresAt, + headers: clonedHeaders, + ctx: ctx, + errCh: make(chan error, 1), } // Start upload in background goroutine @@ -312,8 +313,8 @@ func (s *S3) Create(ctx context.Context, strategyName string, key Key, headers h return writer, nil } -func (s *S3) Delete(ctx context.Context, strategyName string, key Key) error { - objectName := s.keyToPath(strategyName, key) +func (s *S3) Delete(ctx context.Context, key Key) error { + objectName := s.keyToPath(s.namespace, key) err := s.client.RemoveObject(ctx, s.config.Bucket, objectName, minio.RemoveObjectOptions{}) if err != nil { @@ -330,15 +331,15 @@ func (s *S3) Stats(_ context.Context) (Stats, error) { } type s3Writer struct { - s3 *S3 - key Key - strategyName string - pipe *io.PipeWriter - expiresAt time.Time - headers http.Header - ctx context.Context - errCh chan error - uploadErr error + s3 *S3 + key Key + namespace string + pipe *io.PipeWriter + expiresAt time.Time + headers http.Header + ctx context.Context + errCh chan error + uploadErr error } func (w *s3Writer) Write(p []byte) (int, error) { @@ -385,7 +386,7 @@ func (w *s3Writer) upload(pr *io.PipeReader) { _ = pr.CloseWithError(uploadErr) }() - objectName := w.s3.keyToPath(w.strategyName, w.key) + objectName := w.s3.keyToPath(w.namespace, w.key) // Prepare user metadata userMetadata := make(map[string]string) @@ -439,3 +440,16 @@ func (w *s3Writer) upload(pr *io.PipeReader) { w.errCh <- nil } + +// Namespace creates a namespaced view of the S3 cache. +func (s *S3) Namespace(namespace string) Cache { + c := *s + c.namespace = namespace + return &c +} + +// ListNamespaces returns all unique namespaces in the S3 cache. +// Not implemented for S3 - would require listing all objects. +func (s *S3) ListNamespaces(_ context.Context) ([]string, error) { + return nil, ErrStatsUnavailable +} diff --git a/internal/cache/tiered.go b/internal/cache/tiered.go index 60e5149..d248457 100644 --- a/internal/cache/tiered.go +++ b/internal/cache/tiered.go @@ -50,7 +50,7 @@ func (t Tiered) Close() error { } // Create a new object. All underlying caches will be written to in sequence. -func (t Tiered) Create(ctx context.Context, strategyName string, key Key, headers http.Header, ttl time.Duration) (io.WriteCloser, error) { +func (t Tiered) Create(ctx context.Context, key Key, headers http.Header, ttl time.Duration) (io.WriteCloser, error) { // The first error will cancel all outstanding writes. ctx, cancel := context.WithCancelCause(ctx) @@ -59,7 +59,7 @@ func (t Tiered) Create(ctx context.Context, strategyName string, key Key, header wg := sync.WaitGroup{} for i, cache := range t.caches { wg.Go(func() { - w, err := cache.Create(ctx, strategyName, key, headers, ttl) + w, err := cache.Create(ctx, key, headers, ttl) if err != nil { cancel(err) } @@ -78,11 +78,11 @@ func (t Tiered) Create(ctx context.Context, strategyName string, key Key, header } // Delete from all underlying caches. All errors are returned. -func (t Tiered) Delete(ctx context.Context, strategyName string, key Key) error { +func (t Tiered) Delete(ctx context.Context, key Key) error { wg := sync.WaitGroup{} errs := make([]error, len(t.caches)) for i, cache := range t.caches { - wg.Go(func() { errs[i] = errors.WithStack(cache.Delete(ctx, strategyName, key)) }) + wg.Go(func() { errs[i] = errors.WithStack(cache.Delete(ctx, key)) }) } wg.Wait() return errors.Join(errs...) @@ -91,10 +91,10 @@ func (t Tiered) Delete(ctx context.Context, strategyName string, key Key) error // Stat returns headers from the first cache that succeeds. // // If all caches fail, all errors are returned. -func (t Tiered) Stat(ctx context.Context, strategyName string, key Key) (http.Header, error) { +func (t Tiered) Stat(ctx context.Context, key Key) (http.Header, error) { errs := make([]error, len(t.caches)) for i, c := range t.caches { - headers, err := c.Stat(ctx, strategyName, key) + headers, err := c.Stat(ctx, key) errs[i] = err if errors.Is(err, os.ErrNotExist) { continue @@ -109,10 +109,10 @@ func (t Tiered) Stat(ctx context.Context, strategyName string, key Key) (http.He // Open returns a reader from the first cache that succeeds. // // If all caches fail, all errors are returned. -func (t Tiered) Open(ctx context.Context, strategyName string, key Key) (io.ReadCloser, http.Header, error) { +func (t Tiered) Open(ctx context.Context, key Key) (io.ReadCloser, http.Header, error) { errs := make([]error, len(t.caches)) for i, c := range t.caches { - r, headers, err := c.Open(ctx, strategyName, key) + r, headers, err := c.Open(ctx, key) errs[i] = err if errors.Is(err, os.ErrNotExist) { continue @@ -179,3 +179,33 @@ func (t tieredWriter) Write(p []byte) (n int, err error) { } return } + +// Namespace creates a namespaced view of the tiered cache. +// All underlying caches are also namespaced. +func (t Tiered) Namespace(namespace string) Cache { + namespaced := make([]Cache, len(t.caches)) + for i, c := range t.caches { + namespaced[i] = c.Namespace(namespace) + } + return Tiered{caches: namespaced} +} + +// ListNamespaces returns unique namespaces from all underlying caches. +func (t Tiered) ListNamespaces(ctx context.Context) ([]string, error) { + namespaceSet := make(map[string]bool) + for _, c := range t.caches { + namespaces, err := c.ListNamespaces(ctx) + if err != nil && !errors.Is(err, ErrStatsUnavailable) { + return nil, errors.WithStack(err) + } + for _, ns := range namespaces { + namespaceSet[ns] = true + } + } + + namespaces := make([]string, 0, len(namespaceSet)) + for ns := range namespaceSet { + namespaces = append(namespaces, ns) + } + return namespaces, nil +} diff --git a/internal/snapshot/snapshot.go b/internal/snapshot/snapshot.go index e3ce0fe..2e8f5bb 100644 --- a/internal/snapshot/snapshot.go +++ b/internal/snapshot/snapshot.go @@ -21,7 +21,7 @@ import ( // The archive preserves all file permissions, ownership, and symlinks. // The operation is fully streaming - no temporary files are created. // Exclude patterns use tar's --exclude syntax. -func Create(ctx context.Context, remote cache.Cache, strategyName string, key cache.Key, directory string, ttl time.Duration, excludePatterns []string) error { +func Create(ctx context.Context, remote cache.Cache, key cache.Key, directory string, ttl time.Duration, excludePatterns []string) error { // Verify directory exists if info, err := os.Stat(directory); err != nil { return errors.Wrap(err, "failed to stat directory") @@ -33,7 +33,7 @@ func Create(ctx context.Context, remote cache.Cache, strategyName string, key ca headers.Set("Content-Type", "application/zstd") headers.Set("Content-Disposition", fmt.Sprintf("attachment; filename=%q", filepath.Base(directory)+".tar.zst")) - wc, err := remote.Create(ctx, strategyName, key, headers, ttl) + wc, err := remote.Create(ctx, key, headers, ttl) if err != nil { return errors.Wrap(err, "failed to create object") } @@ -90,8 +90,8 @@ func Create(ctx context.Context, remote cache.Cache, strategyName string, key ca // The archive is decompressed with zstd and extracted with tar, preserving // all file permissions, ownership, and symlinks. // The operation is fully streaming - no temporary files are created. -func Restore(ctx context.Context, remote cache.Cache, strategyName string, key cache.Key, directory string) error { - rc, _, err := remote.Open(ctx, strategyName, key) +func Restore(ctx context.Context, remote cache.Cache, key cache.Key, directory string) error { + rc, _, err := remote.Open(ctx, key) if err != nil { return errors.Wrap(err, "failed to open object") } diff --git a/internal/snapshot/snapshot_test.go b/internal/snapshot/snapshot_test.go index efe7dea..b691947 100644 --- a/internal/snapshot/snapshot_test.go +++ b/internal/snapshot/snapshot_test.go @@ -30,15 +30,15 @@ func TestCreateAndRestoreRoundTrip(t *testing.T) { assert.NoError(t, os.Mkdir(filepath.Join(srcDir, "subdir"), 0o755)) assert.NoError(t, os.WriteFile(filepath.Join(srcDir, "subdir", "file3.txt"), []byte("content3"), 0o644)) - err = snapshot.Create(ctx, mem, "", key, srcDir, time.Hour, nil) + err = snapshot.Create(ctx, mem, key, srcDir, time.Hour, nil) assert.NoError(t, err) - headers, err := mem.Stat(ctx, "", key) + headers, err := mem.Stat(ctx, key) assert.NoError(t, err) assert.Equal(t, "application/zstd", headers.Get("Content-Type")) dstDir := t.TempDir() - err = snapshot.Restore(ctx, mem, "", key, dstDir) + err = snapshot.Restore(ctx, mem, key, dstDir) assert.NoError(t, err) content1, err := os.ReadFile(filepath.Join(dstDir, "file1.txt")) @@ -71,11 +71,11 @@ func TestCreateWithExcludePatterns(t *testing.T) { assert.NoError(t, os.Mkdir(filepath.Join(srcDir, "logs"), 0o755)) assert.NoError(t, os.WriteFile(filepath.Join(srcDir, "logs", "app.log"), []byte("excluded"), 0o644)) - err = snapshot.Create(ctx, mem, "", key, srcDir, time.Hour, []string{"*.log", "logs"}) + err = snapshot.Create(ctx, mem, key, srcDir, time.Hour, []string{"*.log", "logs"}) assert.NoError(t, err) dstDir := t.TempDir() - err = snapshot.Restore(ctx, mem, "", key, dstDir) + err = snapshot.Restore(ctx, mem, key, dstDir) assert.NoError(t, err) _, err = os.Stat(filepath.Join(dstDir, "include.txt")) @@ -99,11 +99,11 @@ func TestCreatePreservesSymlinks(t *testing.T) { assert.NoError(t, os.WriteFile(filepath.Join(srcDir, "target.txt"), []byte("target"), 0o644)) assert.NoError(t, os.Symlink("target.txt", filepath.Join(srcDir, "link.txt"))) - err = snapshot.Create(ctx, mem, "", key, srcDir, time.Hour, nil) + err = snapshot.Create(ctx, mem, key, srcDir, time.Hour, nil) assert.NoError(t, err) dstDir := t.TempDir() - err = snapshot.Restore(ctx, mem, "", key, dstDir) + err = snapshot.Restore(ctx, mem, key, dstDir) assert.NoError(t, err) info, err := os.Lstat(filepath.Join(dstDir, "link.txt")) @@ -122,7 +122,7 @@ func TestCreateNonexistentDirectory(t *testing.T) { defer mem.Close() key := cache.Key{1, 2, 3} - err = snapshot.Create(ctx, mem, "", key, "/nonexistent/directory", time.Hour, nil) + err = snapshot.Create(ctx, mem, key, "/nonexistent/directory", time.Hour, nil) assert.Error(t, err) } @@ -136,7 +136,7 @@ func TestCreateNotADirectory(t *testing.T) { tmpFile := filepath.Join(t.TempDir(), "file.txt") assert.NoError(t, os.WriteFile(tmpFile, []byte("content"), 0o644)) - err = snapshot.Create(ctx, mem, "", key, tmpFile, time.Hour, nil) + err = snapshot.Create(ctx, mem, key, tmpFile, time.Hour, nil) assert.Error(t, err) assert.Contains(t, err.Error(), "not a directory") } @@ -158,7 +158,7 @@ func TestCreateContextCancellation(t *testing.T) { cancelCtx, cancel := context.WithCancel(context.Background()) cancel() - err = snapshot.Create(cancelCtx, mem, "", key, srcDir, time.Hour, nil) + err = snapshot.Create(cancelCtx, mem, key, srcDir, time.Hour, nil) assert.Error(t, err) } @@ -170,7 +170,7 @@ func TestRestoreNonexistentKey(t *testing.T) { key := cache.Key{1, 2, 3} dstDir := t.TempDir() - err = snapshot.Restore(ctx, mem, "", key, dstDir) + err = snapshot.Restore(ctx, mem, key, dstDir) assert.Error(t, err) } @@ -184,11 +184,11 @@ func TestRestoreCreatesTargetDirectory(t *testing.T) { srcDir := t.TempDir() assert.NoError(t, os.WriteFile(filepath.Join(srcDir, "file.txt"), []byte("content"), 0o644)) - err = snapshot.Create(ctx, mem, "", key, srcDir, time.Hour, nil) + err = snapshot.Create(ctx, mem, key, srcDir, time.Hour, nil) assert.NoError(t, err) dstDir := filepath.Join(t.TempDir(), "nested", "target") - err = snapshot.Restore(ctx, mem, "", key, dstDir) + err = snapshot.Restore(ctx, mem, key, dstDir) assert.NoError(t, err) content, err := os.ReadFile(filepath.Join(dstDir, "file.txt")) @@ -210,14 +210,14 @@ func TestRestoreContextCancellation(t *testing.T) { assert.NoError(t, os.WriteFile(filename, content, 0o644)) } - err = snapshot.Create(ctx, mem, "", key, srcDir, time.Hour, nil) + err = snapshot.Create(ctx, mem, key, srcDir, time.Hour, nil) assert.NoError(t, err) cancelCtx, cancel := context.WithCancel(context.Background()) cancel() dstDir := t.TempDir() - err = snapshot.Restore(cancelCtx, mem, "", key, dstDir) + err = snapshot.Restore(cancelCtx, mem, key, dstDir) assert.Error(t, err) } @@ -230,11 +230,11 @@ func TestCreateEmptyDirectory(t *testing.T) { srcDir := t.TempDir() - err = snapshot.Create(ctx, mem, "", key, srcDir, time.Hour, nil) + err = snapshot.Create(ctx, mem, key, srcDir, time.Hour, nil) assert.NoError(t, err) dstDir := t.TempDir() - err = snapshot.Restore(ctx, mem, "", key, dstDir) + err = snapshot.Restore(ctx, mem, key, dstDir) assert.NoError(t, err) entries, err := os.ReadDir(dstDir) @@ -254,11 +254,11 @@ func TestCreateWithNestedDirectories(t *testing.T) { assert.NoError(t, os.MkdirAll(deepPath, 0o755)) assert.NoError(t, os.WriteFile(filepath.Join(deepPath, "deep.txt"), []byte("deep content"), 0o644)) - err = snapshot.Create(ctx, mem, "", key, srcDir, time.Hour, nil) + err = snapshot.Create(ctx, mem, key, srcDir, time.Hour, nil) assert.NoError(t, err) dstDir := t.TempDir() - err = snapshot.Restore(ctx, mem, "", key, dstDir) + err = snapshot.Restore(ctx, mem, key, dstDir) assert.NoError(t, err) content, err := os.ReadFile(filepath.Join(dstDir, "a", "b", "c", "d", "e", "deep.txt")) @@ -276,10 +276,10 @@ func TestCreateSetsCorrectHeaders(t *testing.T) { srcDir := t.TempDir() assert.NoError(t, os.WriteFile(filepath.Join(srcDir, "file.txt"), []byte("content"), 0o644)) - err = snapshot.Create(ctx, mem, "", key, srcDir, time.Hour, nil) + err = snapshot.Create(ctx, mem, key, srcDir, time.Hour, nil) assert.NoError(t, err) - headers, err := mem.Stat(ctx, "", key) + headers, err := mem.Stat(ctx, key) assert.NoError(t, err) assert.Equal(t, "application/zstd", headers.Get("Content-Type")) assert.Contains(t, headers.Get("Content-Disposition"), "attachment") diff --git a/internal/strategy/api.go b/internal/strategy/api.go index c2ef959..c2730c1 100644 --- a/internal/strategy/api.go +++ b/internal/strategy/api.go @@ -87,7 +87,9 @@ func (r *Registry) Create( vars map[string]string, ) (Strategy, error) { if entry, ok := r.registry[name]; ok { - return errors.WithStack2(entry.factory(ctx, config, cache, mux, vars)) + // Create a namespaced view of the cache for this strategy + namespacedCache := cache.Namespace(name) + return errors.WithStack2(entry.factory(ctx, config, namespacedCache, mux, vars)) } return nil, errors.Errorf("%s: %w", name, ErrNotFound) } diff --git a/internal/strategy/apiv1.go b/internal/strategy/apiv1.go index 44201b0..d92b0b9 100644 --- a/internal/strategy/apiv1.go +++ b/internal/strategy/apiv1.go @@ -15,9 +15,6 @@ import ( "github.com/block/cachew/internal/logging" ) -// APIV1Name is the strategy name used for cache prefixing. -const APIV1Name = "apiv1" - func RegisterAPIV1(r *Registry) { Register(r, "apiv1", "The stable API of the cache server.", NewAPIV1) } @@ -52,7 +49,7 @@ func (d *APIV1) statObject(w http.ResponseWriter, r *http.Request) { return } - headers, err := d.cache.Stat(r.Context(), APIV1Name, key) + headers, err := d.cache.Stat(r.Context(), key) if err != nil { if errors.Is(err, os.ErrNotExist) { http.Error(w, "Cache object not found", http.StatusNotFound) @@ -73,7 +70,7 @@ func (d *APIV1) getObject(w http.ResponseWriter, r *http.Request) { return } - cr, headers, err := d.cache.Open(r.Context(), APIV1Name, key) + cr, headers, err := d.cache.Open(r.Context(), key) if err != nil { if errors.Is(err, os.ErrNotExist) { http.Error(w, "Cache object not found", http.StatusNotFound) @@ -114,7 +111,7 @@ func (d *APIV1) putObject(w http.ResponseWriter, r *http.Request) { // Extract and filter headers from request headers := cache.FilterTransportHeaders(r.Header) - cw, err := d.cache.Create(r.Context(), APIV1Name, key, headers, ttl) + cw, err := d.cache.Create(r.Context(), key, headers, ttl) if err != nil { d.httpError(w, http.StatusInternalServerError, err, "Failed to create cache writer", slog.String("key", key.String())) return @@ -138,7 +135,7 @@ func (d *APIV1) deleteObject(w http.ResponseWriter, r *http.Request) { return } - err = d.cache.Delete(r.Context(), APIV1Name, key) + err = d.cache.Delete(r.Context(), key) if err != nil { if errors.Is(err, os.ErrNotExist) { http.Error(w, "Cache object not found", http.StatusNotFound) diff --git a/internal/strategy/artifactory.go b/internal/strategy/artifactory.go index d801131..08c105c 100644 --- a/internal/strategy/artifactory.go +++ b/internal/strategy/artifactory.go @@ -14,10 +14,8 @@ import ( "github.com/block/cachew/internal/strategy/handler" ) -const artifactoryStrategyName = "artifactory" - func RegisterArtifactory(r *Registry) { - Register(r, artifactoryStrategyName, "Caches artifacts from an Artifactory server.", NewArtifactory) + Register(r, "artifactory", "Caches artifacts from an Artifactory server.", NewArtifactory) } // ArtifactoryConfig represents the configuration for the Artifactory strategy. @@ -68,7 +66,6 @@ func NewArtifactory(ctx context.Context, config ArtifactoryConfig, cache cache.C } hdlr := handler.New(a.client, cache). - StrategyName(artifactoryStrategyName). CacheKey(func(r *http.Request) string { return a.buildTargetURL(r).String() }). diff --git a/internal/strategy/git/git.go b/internal/strategy/git/git.go index 7e539b2..7bd38f5 100644 --- a/internal/strategy/git/git.go +++ b/internal/strategy/git/git.go @@ -26,8 +26,6 @@ import ( "github.com/block/cachew/internal/strategy" ) -const StrategyName = "git" - func Register(r *strategy.Registry, scheduler jobscheduler.Scheduler, cloneManagerProvider gitclone.ManagerProvider, tokenManagerProvider githubapp.TokenManagerProvider) { strategy.Register(r, "git", "Caches Git repositories, including tarball snapshots.", func(ctx context.Context, config Config, cache cache.Cache, mux strategy.Mux) (*Strategy, error) { return New(ctx, config, scheduler, cache, mux, cloneManagerProvider, tokenManagerProvider) @@ -368,7 +366,7 @@ func (s *Strategy) serveCachedArtifact(w http.ResponseWriter, r *http.Request, h upstreamURL := "https://" + host + "/" + repoPath cacheKey := cache.NewKey(upstreamURL + "." + artifact) - reader, headers, err := s.cache.Open(ctx, StrategyName, cacheKey) + reader, headers, err := s.cache.Open(ctx, cacheKey) if err != nil { if errors.Is(err, os.ErrNotExist) { logger.DebugContext(ctx, artifact+" not found in cache", diff --git a/internal/strategy/git/snapshot.go b/internal/strategy/git/snapshot.go index 6217800..4263d67 100644 --- a/internal/strategy/git/snapshot.go +++ b/internal/strategy/git/snapshot.go @@ -57,7 +57,7 @@ func (s *Strategy) generateAndUploadSnapshot(ctx context.Context, repo *gitclone ttl := 7 * 24 * time.Hour excludePatterns := []string{"*.lock"} - err = snapshot.Create(ctx, s.cache, StrategyName, cacheKey, snapshotDir, ttl, excludePatterns) + err = snapshot.Create(ctx, s.cache, cacheKey, snapshotDir, ttl, excludePatterns) // Always clean up the snapshot working directory. if rmErr := os.RemoveAll(snapshotDir); rmErr != nil { diff --git a/internal/strategy/git/snapshot_test.go b/internal/strategy/git/snapshot_test.go index 1982726..d59566e 100644 --- a/internal/strategy/git/snapshot_test.go +++ b/internal/strategy/git/snapshot_test.go @@ -43,7 +43,7 @@ func TestSnapshotHTTPEndpoint(t *testing.T) { headers := make(map[string][]string) headers["Content-Type"] = []string{"application/zstd"} - writer, err := memCache.Create(ctx, "", cacheKey, headers, 24*time.Hour) + writer, err := memCache.Create(ctx, cacheKey, headers, 24*time.Hour) assert.NoError(t, err) _, err = writer.Write(snapshotData) assert.NoError(t, err) diff --git a/internal/strategy/github_releases.go b/internal/strategy/github_releases.go index fe1dbdd..5add55b 100644 --- a/internal/strategy/github_releases.go +++ b/internal/strategy/github_releases.go @@ -17,10 +17,8 @@ import ( "github.com/block/cachew/internal/strategy/handler" ) -const githubReleasesStrategyName = "github-releases" - func RegisterGitHubReleases(r *Registry, tokenManagerProvider githubapp.TokenManagerProvider) { - Register(r, githubReleasesStrategyName, "Caches public and authenticated GitHub releases.", func(ctx context.Context, config GitHubReleasesConfig, cache cache.Cache, mux Mux) (*GitHubReleases, error) { + Register(r, "github-releases", "Caches public and authenticated GitHub releases.", func(ctx context.Context, config GitHubReleasesConfig, cache cache.Cache, mux Mux) (*GitHubReleases, error) { return NewGitHubReleases(ctx, config, cache, mux, tokenManagerProvider) }) } @@ -61,7 +59,6 @@ func NewGitHubReleases(ctx context.Context, config GitHubReleasesConfig, cache c } // eg. https://github.com/alecthomas/chroma/releases/download/v2.21.1/chroma-2.21.1-darwin-amd64.tar.gz h := handler.New(s.client, cache). - StrategyName(githubReleasesStrategyName). CacheKey(func(r *http.Request) string { org := r.PathValue("org") repo := r.PathValue("repo") @@ -82,7 +79,7 @@ func NewGitHubReleases(ctx context.Context, config GitHubReleasesConfig, cache c var _ Strategy = (*GitHubReleases)(nil) -func (g *GitHubReleases) String() string { return githubReleasesStrategyName } +func (g *GitHubReleases) String() string { return "github-releases" } // newGitHubRequest creates a new HTTP request with GitHub API headers and authentication. func (g *GitHubReleases) newGitHubRequest(ctx context.Context, url, accept, org string) (*http.Request, error) { diff --git a/internal/strategy/github_releases_test.go b/internal/strategy/github_releases_test.go index 25e3ada..ef8e795 100644 --- a/internal/strategy/github_releases_test.go +++ b/internal/strategy/github_releases_test.go @@ -231,7 +231,7 @@ func TestGitHubReleasesPublicRepoNotFound(t *testing.T) { assert.NoError(t, err) defer memCache.Close() - _, _, err = memCache.Open(context.Background(), "", key) + _, _, err = memCache.Open(context.Background(), key) assert.Error(t, err, "non-OK responses should not be cached") } diff --git a/internal/strategy/gomod/cacher.go b/internal/strategy/gomod/cacher.go index 7cd53a2..16e91d7 100644 --- a/internal/strategy/gomod/cacher.go +++ b/internal/strategy/gomod/cacher.go @@ -10,8 +10,6 @@ import ( "github.com/block/cachew/internal/cache" ) -const StrategyName = "gomod" - type goproxyCacher struct { cache cache.Cache } @@ -19,7 +17,7 @@ type goproxyCacher struct { func (g *goproxyCacher) Get(ctx context.Context, name string) (io.ReadCloser, error) { key := cache.NewKey(name) - rc, _, err := g.cache.Open(ctx, StrategyName, key) + rc, _, err := g.cache.Open(ctx, key) if err != nil { return nil, fs.ErrNotExist } @@ -34,7 +32,7 @@ func (g *goproxyCacher) Put(ctx context.Context, name string, content io.ReadSee key := cache.NewKey(name) - wc, err := g.cache.Create(ctx, StrategyName, key, nil, 0) + wc, err := g.cache.Create(ctx, key, nil, 0) if err != nil { return fmt.Errorf("create cache entry: %w", err) } diff --git a/internal/strategy/handler/handler.go b/internal/strategy/handler/handler.go index 06d5475..73e6201 100644 --- a/internal/strategy/handler/handler.go +++ b/internal/strategy/handler/handler.go @@ -20,7 +20,6 @@ import ( // Example usage: // // h := handler.New(client, cache). -// StrategyName("my-strategy"). // CacheKey(func(r *http.Request) string { // return "custom-key" // }). @@ -31,7 +30,6 @@ import ( type Handler struct { client *http.Client cache cache.Cache - strategyName string cacheKeyFunc func(*http.Request) string transformFunc func(*http.Request) (*http.Request, error) errorHandler func(error, http.ResponseWriter, *http.Request) @@ -60,11 +58,6 @@ func New(client *http.Client, c cache.Cache) *Handler { } } -func (h *Handler) StrategyName(name string) *Handler { - h.strategyName = name - return h -} - // CacheKey sets the function used to determine the cache key for a request. // The function receives the original incoming request. func (h *Handler) CacheKey(f func(*http.Request) string) *Handler { @@ -121,7 +114,7 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } func (h *Handler) serveCached(w http.ResponseWriter, r *http.Request, key cache.Key, logger *slog.Logger) bool { - cr, headers, err := h.cache.Open(r.Context(), h.strategyName, key) + cr, headers, err := h.cache.Open(r.Context(), key) if err != nil { if !errors.Is(err, os.ErrNotExist) { h.errorHandler(httputil.Errorf(http.StatusInternalServerError, "failed to open cache: %w", err), w, r) @@ -178,7 +171,7 @@ func (h *Handler) streamNonOKResponse(w http.ResponseWriter, resp *http.Response func (h *Handler) streamAndCache(w http.ResponseWriter, r *http.Request, key cache.Key, resp *http.Response, logger *slog.Logger) { ttl := h.ttlFunc(r) responseHeaders := maps.Clone(resp.Header) - cw, err := h.cache.Create(r.Context(), h.strategyName, key, responseHeaders, ttl) + cw, err := h.cache.Create(r.Context(), key, responseHeaders, ttl) if err != nil { h.errorHandler(httputil.Errorf(http.StatusInternalServerError, "failed to create cache entry: %w", err), w, r) return diff --git a/internal/strategy/hermit.go b/internal/strategy/hermit.go index 9fedbb7..52f1fbc 100644 --- a/internal/strategy/hermit.go +++ b/internal/strategy/hermit.go @@ -15,10 +15,8 @@ import ( "github.com/block/cachew/internal/strategy/handler" ) -const hermitStrategyName = "hermit" - func RegisterHermit(r *Registry) { - Register(r, hermitStrategyName, "Caches Hermit package downloads.", func(ctx context.Context, config HermitConfig, c cache.Cache, mux Mux) (*Hermit, error) { + Register(r, "hermit", "Caches Hermit package downloads.", func(ctx context.Context, config HermitConfig, c cache.Cache, mux Mux) (*Hermit, error) { return NewHermit(ctx, config, nil, c, mux) }) } @@ -72,11 +70,10 @@ func NewHermit(ctx context.Context, config HermitConfig, _ jobscheduler.Schedule return s, nil } -func (s *Hermit) String() string { return hermitStrategyName } +func (s *Hermit) String() string { return "hermit" } func (s *Hermit) createDirectHandler(c cache.Cache) http.Handler { return handler.New(s.client, c). - StrategyName(hermitStrategyName). CacheKey(func(r *http.Request) string { return s.buildOriginalURL(r) }). @@ -94,7 +91,6 @@ func (s *Hermit) createRedirectHandler(isInternalRedirect bool, c cache.Cache) h } return handler.New(s.client, cacheBackend). - StrategyName(hermitStrategyName). CacheKey(func(r *http.Request) string { return s.buildGitHubURL(r) }). diff --git a/internal/strategy/hermit_test.go b/internal/strategy/hermit_test.go index 1c5b51b..e815f28 100644 --- a/internal/strategy/hermit_test.go +++ b/internal/strategy/hermit_test.go @@ -130,7 +130,7 @@ func TestHermitNonOKStatus(t *testing.T) { assert.Equal(t, "not found", w.Body.String()) key := cache.NewKey("https://example.com/missing.tar.gz") - _, _, err := memCache.Open(context.Background(), "", key) + _, _, err := memCache.Open(context.Background(), key) assert.Error(t, err, "non-OK responses should not be cached") } diff --git a/internal/strategy/host.go b/internal/strategy/host.go index b06e360..afa4f8a 100644 --- a/internal/strategy/host.go +++ b/internal/strategy/host.go @@ -12,10 +12,8 @@ import ( "github.com/block/cachew/internal/strategy/handler" ) -const hostStrategyName = "host" - func RegisterHost(r *Registry) { - Register(r, hostStrategyName, "A generic host-based proxying strategy.", NewHost) + Register(r, "host", "A generic host-based proxying strategy.", NewHost) } // HostConfig represents the configuration for the Host strategy. @@ -57,7 +55,6 @@ func NewHost(ctx context.Context, config HostConfig, cache cache.Cache, mux Mux) } hdlr := handler.New(h.client, cache). - StrategyName(hostStrategyName). CacheKey(func(r *http.Request) string { return h.buildTargetURL(r).String() }). diff --git a/internal/strategy/host_test.go b/internal/strategy/host_test.go index 9db0ead..1f8442b 100644 --- a/internal/strategy/host_test.go +++ b/internal/strategy/host_test.go @@ -83,7 +83,7 @@ func TestHostNonOKStatus(t *testing.T) { assert.Equal(t, "not found", w.Body.String()) key := cache.NewKey(backend.URL + "/missing") - _, _, err = memCache.Open(context.Background(), "", key) + _, _, err = memCache.Open(context.Background(), key) assert.Error(t, err, "non-OK responses should not be cached") } From a828d73ac1b877acea3aae5db36b972744fa9c02 Mon Sep 17 00:00:00 2001 From: Neha Sherpa Date: Thu, 19 Feb 2026 19:06:31 -0800 Subject: [PATCH 3/5] fix: Add composite key with namespace to disk_metadatadb --- internal/cache/disk.go | 77 +++++++++----- internal/cache/disk_metadb.go | 87 ++++++++++----- internal/cache/disk_namespace_test.go | 148 ++++++++++++++++++++++++++ internal/cache/memory.go | 23 ++-- internal/cache/remote.go | 27 ++++- internal/config/config.go | 5 +- internal/strategy/apiv1.go | 14 +++ 7 files changed, 311 insertions(+), 70 deletions(-) create mode 100644 internal/cache/disk_namespace_test.go diff --git a/internal/cache/disk.go b/internal/cache/disk.go index 481f664..b668af7 100644 --- a/internal/cache/disk.go +++ b/internal/cache/disk.go @@ -200,7 +200,7 @@ func (d *Disk) Delete(_ context.Context, key Key) error { // Check if file is expired expired := false - expiresAt, err := d.db.getTTL(key) + expiresAt, err := d.db.getTTL(d.namespace, key) if err == nil && time.Now().After(expiresAt) { expired = true } @@ -215,7 +215,7 @@ func (d *Disk) Delete(_ context.Context, key Key) error { } // Remove metadata - if err := d.db.delete(key); err != nil { + if err := d.db.delete(d.namespace, key); err != nil { return errors.Errorf("failed to delete TTL metadata: %w", err) } @@ -235,7 +235,7 @@ func (d *Disk) Stat(ctx context.Context, key Key) (http.Header, error) { return nil, errors.Errorf("failed to stat file: %w", err) } - expiresAt, err := d.db.getTTL(key) + expiresAt, err := d.db.getTTL(d.namespace, key) if err != nil { return nil, errors.Errorf("failed to get TTL: %w", err) } @@ -244,7 +244,7 @@ func (d *Disk) Stat(ctx context.Context, key Key) (http.Header, error) { return nil, errors.Join(fs.ErrNotExist, d.Delete(ctx, key)) } - headers, err := d.db.getHeaders(key) + headers, err := d.db.getHeaders(d.namespace, key) if err != nil { return nil, errors.Errorf("failed to get headers: %w", err) } @@ -261,7 +261,7 @@ func (d *Disk) Open(ctx context.Context, key Key) (io.ReadCloser, http.Header, e return nil, nil, errors.Errorf("failed to open file: %w", err) } - expiresAt, err := d.db.getTTL(key) + expiresAt, err := d.db.getTTL(d.namespace, key) if err != nil { return nil, nil, errors.Join(err, f.Close()) } @@ -271,7 +271,7 @@ func (d *Disk) Open(ctx context.Context, key Key) (io.ReadCloser, http.Header, e return nil, nil, errors.Join(fs.ErrNotExist, f.Close(), d.Delete(ctx, key)) } - headers, err := d.db.getHeaders(key) + headers, err := d.db.getHeaders(d.namespace, key) if err != nil { return nil, nil, errors.Join(errors.Errorf("failed to get headers: %w", err), f.Close()) } @@ -280,7 +280,7 @@ func (d *Disk) Open(ctx context.Context, key Key) (io.ReadCloser, http.Header, e ttl := min(expiresAt.Sub(now), d.config.MaxTTL) newExpiresAt := now.Add(ttl) - if err := d.db.setTTL(key, newExpiresAt); err != nil { + if err := d.db.setTTL(d.namespace, key, newExpiresAt); err != nil { return nil, nil, errors.Join(errors.Errorf("failed to update expiration time: %w", err), f.Close()) } @@ -319,17 +319,23 @@ func (d *Disk) evictionLoop(ctx context.Context) { } } -func (d *Disk) evict() error { - type fileInfo struct { - key Key - path string - size int64 - expiresAt time.Time - accessedAt time.Time - } +type evictFileInfo struct { + namespace string + key Key + path string + size int64 + expiresAt time.Time + accessedAt time.Time +} - var remainingFiles []fileInfo - var expiredKeys []Key +type evictEntryKey struct { + namespace string + key Key +} + +func (d *Disk) evict() error { + var remainingFiles []evictFileInfo + var expiredEntries []evictEntryKey now := time.Now() err := d.db.walk(func(key Key, namespace string, expiresAt time.Time) error { @@ -339,7 +345,7 @@ func (d *Disk) evict() error { info, err := os.Stat(fullPath) if err != nil { if errors.Is(err, fs.ErrNotExist) { - expiredKeys = append(expiredKeys, key) + expiredEntries = append(expiredEntries, evictEntryKey{namespace, key}) } return nil } @@ -348,10 +354,11 @@ func (d *Disk) evict() error { if err := os.Remove(fullPath); err != nil && !errors.Is(err, fs.ErrNotExist) { return errors.Errorf("failed to delete expired file %s: %w", path, err) } - expiredKeys = append(expiredKeys, key) + expiredEntries = append(expiredEntries, evictEntryKey{namespace, key}) d.size.Add(-info.Size()) } else { - remainingFiles = append(remainingFiles, fileInfo{ + remainingFiles = append(remainingFiles, evictFileInfo{ + namespace: namespace, key: key, path: path, size: info.Size(), @@ -365,10 +372,24 @@ func (d *Disk) evict() error { return errors.Errorf("failed to walk TTL entries: %w", err) } - if err := d.db.deleteAll(expiredKeys); err != nil { - return errors.Errorf("failed to delete TTL metadata: %w", err) + if err := d.deleteExpiredEntries(expiredEntries); err != nil { + return err } + return d.evictBySize(remainingFiles) +} + +func (d *Disk) deleteExpiredEntries(expiredEntries []evictEntryKey) error { + if len(expiredEntries) == 0 { + return nil + } + if err := d.db.deleteAll(expiredEntries); err != nil { + return errors.Errorf("failed to delete expired metadata: %w", err) + } + return nil +} + +func (d *Disk) evictBySize(remainingFiles []evictFileInfo) error { limitBytes := int64(d.config.LimitMB) * 1024 * 1024 if d.size.Load() <= limitBytes { return nil @@ -379,7 +400,7 @@ func (d *Disk) evict() error { return remainingFiles[i].accessedAt.Before(remainingFiles[j].accessedAt) }) - var sizeEvictedKeys []Key + var sizeEvictedEntries []evictEntryKey for _, f := range remainingFiles { if d.size.Load() <= limitBytes { break @@ -389,12 +410,16 @@ func (d *Disk) evict() error { if err := os.Remove(fullPath); err != nil && !errors.Is(err, fs.ErrNotExist) { return errors.Errorf("failed to delete file during size eviction %s: %w", f.path, err) } - sizeEvictedKeys = append(sizeEvictedKeys, f.key) + sizeEvictedEntries = append(sizeEvictedEntries, evictEntryKey{f.namespace, f.key}) d.size.Add(-f.size) } - if err := d.db.deleteAll(sizeEvictedKeys); err != nil { - return errors.Errorf("failed to delete TTL metadata: %w", err) + if len(sizeEvictedEntries) == 0 { + return nil + } + + if err := d.db.deleteAll(sizeEvictedEntries); err != nil { + return errors.Errorf("failed to delete size-evicted metadata: %w", err) } return nil diff --git a/internal/cache/disk_metadb.go b/internal/cache/disk_metadb.go index 9b00768..d27a73b 100644 --- a/internal/cache/disk_metadb.go +++ b/internal/cache/disk_metadb.go @@ -1,6 +1,7 @@ package cache import ( + "bytes" "encoding/json" "io/fs" "net/http" @@ -22,6 +23,16 @@ type diskMetaDB struct { db *bbolt.DB } +// compositeKey creates a unique database key from namespace and cache key. +func compositeKey(namespace string, key Key) []byte { + if namespace == "" { + return key[:] + } + // Format: "namespace/hexkey" + hexKey := key.String() + return []byte(namespace + "/" + hexKey) +} + // newDiskMetaDB creates a new bbolt-backed metadata storage for the disk cache. func newDiskMetaDB(dbPath string) (*diskMetaDB, error) { db, err := bbolt.Open(dbPath, 0600, &bbolt.Options{ @@ -49,15 +60,16 @@ func newDiskMetaDB(dbPath string) (*diskMetaDB, error) { return &diskMetaDB{db: db}, nil } -func (s *diskMetaDB) setTTL(key Key, expiresAt time.Time) error { +func (s *diskMetaDB) setTTL(namespace string, key Key, expiresAt time.Time) error { ttlBytes, err := expiresAt.MarshalBinary() if err != nil { return errors.Errorf("failed to marshal TTL: %w", err) } + dbKey := compositeKey(namespace, key) return errors.WithStack(s.db.Update(func(tx *bbolt.Tx) error { ttlBucket := tx.Bucket(ttlBucketName) - return errors.WithStack(ttlBucket.Put(key[:], ttlBytes)) + return errors.WithStack(ttlBucket.Put(dbKey, ttlBytes)) })) } @@ -72,27 +84,29 @@ func (s *diskMetaDB) set(key Key, namespace string, expiresAt time.Time, headers return errors.Errorf("failed to encode headers: %w", err) } + dbKey := compositeKey(namespace, key) return errors.WithStack(s.db.Update(func(tx *bbolt.Tx) error { ttlBucket := tx.Bucket(ttlBucketName) - if err := ttlBucket.Put(key[:], ttlBytes); err != nil { + if err := ttlBucket.Put(dbKey, ttlBytes); err != nil { return errors.WithStack(err) } headersBucket := tx.Bucket(headersBucketName) - if err := headersBucket.Put(key[:], headersBytes); err != nil { + if err := headersBucket.Put(dbKey, headersBytes); err != nil { return errors.WithStack(err) } namespaceBucket := tx.Bucket(namespaceBucketName) - return errors.WithStack(namespaceBucket.Put(key[:], []byte(namespace))) + return errors.WithStack(namespaceBucket.Put(dbKey, []byte(namespace))) })) } -func (s *diskMetaDB) getTTL(key Key) (time.Time, error) { +func (s *diskMetaDB) getTTL(namespace string, key Key) (time.Time, error) { var expiresAt time.Time + dbKey := compositeKey(namespace, key) err := s.db.View(func(tx *bbolt.Tx) error { bucket := tx.Bucket(ttlBucketName) - ttlBytes := bucket.Get(key[:]) + ttlBytes := bucket.Get(dbKey) if ttlBytes == nil { return fs.ErrNotExist } @@ -101,11 +115,12 @@ func (s *diskMetaDB) getTTL(key Key) (time.Time, error) { return expiresAt, errors.WithStack(err) } -func (s *diskMetaDB) getHeaders(key Key) (http.Header, error) { +func (s *diskMetaDB) getHeaders(namespace string, key Key) (http.Header, error) { var headers http.Header + dbKey := compositeKey(namespace, key) err := s.db.View(func(tx *bbolt.Tx) error { bucket := tx.Bucket(headersBucketName) - headersBytes := bucket.Get(key[:]) + headersBytes := bucket.Get(dbKey) if headersBytes == nil { return fs.ErrNotExist } @@ -114,25 +129,26 @@ func (s *diskMetaDB) getHeaders(key Key) (http.Header, error) { return headers, errors.WithStack(err) } -func (s *diskMetaDB) delete(key Key) error { +func (s *diskMetaDB) delete(namespace string, key Key) error { + dbKey := compositeKey(namespace, key) return errors.WithStack(s.db.Update(func(tx *bbolt.Tx) error { ttlBucket := tx.Bucket(ttlBucketName) - if err := ttlBucket.Delete(key[:]); err != nil { + if err := ttlBucket.Delete(dbKey); err != nil { return errors.WithStack(err) } headersBucket := tx.Bucket(headersBucketName) - if err := headersBucket.Delete(key[:]); err != nil { + if err := headersBucket.Delete(dbKey); err != nil { return errors.WithStack(err) } namespaceBucket := tx.Bucket(namespaceBucketName) - return errors.WithStack(namespaceBucket.Delete(key[:])) + return errors.WithStack(namespaceBucket.Delete(dbKey)) })) } -func (s *diskMetaDB) deleteAll(keys []Key) error { - if len(keys) == 0 { +func (s *diskMetaDB) deleteAll(entries []evictEntryKey) error { + if len(entries) == 0 { return nil } return errors.WithStack(s.db.Update(func(tx *bbolt.Tx) error { @@ -140,14 +156,15 @@ func (s *diskMetaDB) deleteAll(keys []Key) error { headersBucket := tx.Bucket(headersBucketName) namespaceBucket := tx.Bucket(namespaceBucketName) - for _, key := range keys { - if err := ttlBucket.Delete(key[:]); err != nil { + for _, entry := range entries { + dbKey := compositeKey(entry.namespace, entry.key) + if err := ttlBucket.Delete(dbKey); err != nil { return errors.Errorf("failed to delete TTL: %w", err) } - if err := headersBucket.Delete(key[:]); err != nil { + if err := headersBucket.Delete(dbKey); err != nil { return errors.Errorf("failed to delete headers: %w", err) } - if err := namespaceBucket.Delete(key[:]); err != nil { + if err := namespaceBucket.Delete(dbKey); err != nil { return errors.Errorf("failed to delete namespace: %w", err) } } @@ -161,23 +178,35 @@ func (s *diskMetaDB) walk(fn func(key Key, namespace string, expiresAt time.Time if ttlBucket == nil { return nil } - namespaceBucket := tx.Bucket(namespaceBucketName) return ttlBucket.ForEach(func(k, v []byte) error { - if len(k) != 32 { + var namespace string + var key Key + + // Check format: composite "namespace/hexkey" or raw 32-byte key + slashIdx := bytes.IndexByte(k, '/') + switch { + case slashIdx >= 0: + // Composite key: "namespace/hexkey" + namespace = string(k[:slashIdx]) + hexKey := string(k[slashIdx+1:]) + if len(hexKey) != 64 { + return nil + } + if err := key.UnmarshalText([]byte(hexKey)); err != nil { + return nil //nolint:nilerr + } + case len(k) == 32: + // Raw key (empty namespace) + copy(key[:], k) + default: return nil } - var key Key - copy(key[:], k) + var expiresAt time.Time if err := expiresAt.UnmarshalBinary(v); err != nil { return nil //nolint:nilerr } - namespace := "" - if namespaceBucket != nil { - if namespaceBytes := namespaceBucket.Get(k); namespaceBytes != nil { - namespace = string(namespaceBytes) - } - } + return fn(key, namespace, expiresAt) }) })) diff --git a/internal/cache/disk_namespace_test.go b/internal/cache/disk_namespace_test.go new file mode 100644 index 0000000..597922a --- /dev/null +++ b/internal/cache/disk_namespace_test.go @@ -0,0 +1,148 @@ +package cache_test + +import ( + "log/slog" + "testing" + "time" + + "github.com/alecthomas/assert/v2" + + "github.com/block/cachew/internal/cache" + "github.com/block/cachew/internal/logging" +) + +func TestDiskNamespaceIsolation(t *testing.T) { + dir := t.TempDir() + _, ctx := logging.Configure(t.Context(), logging.Config{Level: slog.LevelDebug}) + + // Create base cache + baseCache, err := cache.NewDisk(ctx, cache.DiskConfig{ + Root: dir, + MaxTTL: time.Hour, + }) + assert.NoError(t, err) + defer baseCache.Close() + + // Create namespace views + gitCache := baseCache.Namespace("git") + gomodCache := baseCache.Namespace("gomod") + + // Create entries in different namespaces with same key + key := cache.NewKey("same-key") + + // Write to git namespace + w, err := gitCache.Create(ctx, key, nil, time.Hour) + assert.NoError(t, err) + _, err = w.Write([]byte("git data")) + assert.NoError(t, err) + assert.NoError(t, w.Close()) + + // Write to gomod namespace + w, err = gomodCache.Create(ctx, key, nil, time.Hour) + assert.NoError(t, err) + _, err = w.Write([]byte("gomod data")) + assert.NoError(t, err) + assert.NoError(t, w.Close()) + + // Verify isolation - each namespace returns its own data + r, _, err := gitCache.Open(ctx, key) + assert.NoError(t, err) + gitData := make([]byte, 8) + n, _ := r.Read(gitData) + assert.Equal(t, "git data", string(gitData[:n])) + assert.NoError(t, r.Close()) + + r, _, err = gomodCache.Open(ctx, key) + assert.NoError(t, err) + gomodData := make([]byte, 10) + n, _ = r.Read(gomodData) + assert.Equal(t, "gomod data", string(gomodData[:n])) + assert.NoError(t, r.Close()) +} + +func TestDiskListNamespaces(t *testing.T) { + dir := t.TempDir() + _, ctx := logging.Configure(t.Context(), logging.Config{Level: slog.LevelDebug}) + + baseCache, err := cache.NewDisk(ctx, cache.DiskConfig{ + Root: dir, + MaxTTL: time.Hour, + }) + assert.NoError(t, err) + defer baseCache.Close() + + // Initially no namespaces + namespaces, err := baseCache.ListNamespaces(ctx) + assert.NoError(t, err) + assert.Equal(t, 0, len(namespaces)) + + // Create entries in different namespaces + gitCache := baseCache.Namespace("git") + gomodCache := baseCache.Namespace("gomod") + hermitCache := baseCache.Namespace("hermit") + + for i, c := range []cache.Cache{gitCache, gomodCache, hermitCache} { + w, err := c.Create(ctx, cache.NewKey(string(rune(i))), nil, time.Hour) + assert.NoError(t, err) + _, err = w.Write([]byte("data")) + assert.NoError(t, err) + assert.NoError(t, w.Close()) + } + + // Verify all namespaces are listed + namespaces, err = baseCache.ListNamespaces(ctx) + assert.NoError(t, err) + assert.Equal(t, 3, len(namespaces)) + + nsMap := make(map[string]bool) + for _, ns := range namespaces { + nsMap[ns] = true + } + assert.True(t, nsMap["git"]) + assert.True(t, nsMap["gomod"]) + assert.True(t, nsMap["hermit"]) +} + +func TestDiskNamespaceDelete(t *testing.T) { + dir := t.TempDir() + _, ctx := logging.Configure(t.Context(), logging.Config{Level: slog.LevelDebug}) + + baseCache, err := cache.NewDisk(ctx, cache.DiskConfig{ + Root: dir, + MaxTTL: time.Hour, + }) + assert.NoError(t, err) + defer baseCache.Close() + + gitCache := baseCache.Namespace("git") + gomodCache := baseCache.Namespace("gomod") + + key := cache.NewKey("test-key") + + // Create entry in git namespace + w, err := gitCache.Create(ctx, key, nil, time.Hour) + assert.NoError(t, err) + _, err = w.Write([]byte("git data")) + assert.NoError(t, err) + assert.NoError(t, w.Close()) + + // Create entry in gomod namespace + w, err = gomodCache.Create(ctx, key, nil, time.Hour) + assert.NoError(t, err) + _, err = w.Write([]byte("gomod data")) + assert.NoError(t, err) + assert.NoError(t, w.Close()) + + // Delete from git namespace + err = gitCache.Delete(ctx, key) + assert.NoError(t, err) + + // Verify git entry is gone + _, _, err = gitCache.Open(ctx, key) + assert.Error(t, err) + + // Verify gomod entry still exists + r, _, err := gomodCache.Open(ctx, key) + assert.NoError(t, err) + assert.NoError(t, r.Close()) +} diff --git a/internal/cache/memory.go b/internal/cache/memory.go index 2ab8e4d..10a553f 100644 --- a/internal/cache/memory.go +++ b/internal/cache/memory.go @@ -9,6 +9,7 @@ import ( "net/http" "os" "sync" + "sync/atomic" "time" "github.com/alecthomas/errors" @@ -39,16 +40,18 @@ type memoryEntry struct { type Memory struct { config MemoryConfig namespace string - mu sync.RWMutex + mu *sync.RWMutex entries map[string]map[Key]*memoryEntry // namespace -> key -> entry - currentSize int64 + currentSize *atomic.Int64 } func NewMemory(ctx context.Context, config MemoryConfig) (*Memory, error) { logging.FromContext(ctx).InfoContext(ctx, "Constructing in-memory Cache", "limit-mb", config.LimitMB, "max-ttl", config.MaxTTL) return &Memory{ - config: config, - entries: make(map[string]map[Key]*memoryEntry), + config: config, + mu: &sync.RWMutex{}, + entries: make(map[string]map[Key]*memoryEntry), + currentSize: &atomic.Int64{}, }, nil } @@ -135,7 +138,7 @@ func (m *Memory) Delete(_ context.Context, key Key) error { if !exists { return os.ErrNotExist } - m.currentSize -= int64(len(entry.data)) + m.currentSize.Add(-int64(len(entry.data))) delete(nsEntries, key) return nil } @@ -159,7 +162,7 @@ func (m *Memory) Stats(_ context.Context) (Stats, error) { return Stats{ Objects: totalObjects, - Size: m.currentSize, + Size: m.currentSize.Load(), Capacity: int64(m.config.LimitMB) * 1024 * 1024, }, nil } @@ -198,7 +201,7 @@ func (m *Memory) evictOldest(neededSpace int64) { if freedSpace >= neededSpace { break } - m.currentSize -= e.size + m.currentSize.Add(-e.size) delete(m.entries[e.namespace], e.key) freedSpace += e.size } @@ -253,13 +256,13 @@ func (w *memoryWriter) Close() error { // Evict entries if needed to make room if limitBytes > 0 { - neededSpace := w.cache.currentSize - oldSize + newSize - limitBytes + neededSpace := w.cache.currentSize.Load() - oldSize + newSize - limitBytes if neededSpace > 0 { w.cache.evictOldest(neededSpace) } } - w.cache.currentSize -= oldSize + w.cache.currentSize.Add(-oldSize) // Copy the buffer data to avoid holding a reference to the buffer's internal slice data := make([]byte, w.buf.Len()) copy(data, w.buf.Bytes()) @@ -269,7 +272,7 @@ func (w *memoryWriter) Close() error { expiresAt: w.expiresAt, headers: w.headers, } - w.cache.currentSize += newSize + w.cache.currentSize.Add(newSize) return nil } diff --git a/internal/cache/remote.go b/internal/cache/remote.go index 9cf25c5..7b0cc2d 100644 --- a/internal/cache/remote.go +++ b/internal/cache/remote.go @@ -232,7 +232,28 @@ func (c *Remote) Namespace(_ string) Cache { } // ListNamespaces requests namespace list from the remote server. -func (c *Remote) ListNamespaces(_ context.Context) ([]string, error) { - // TODO: Could add an API endpoint for this - return nil, ErrStatsUnavailable +func (c *Remote) ListNamespaces(ctx context.Context) ([]string, error) { + url := c.baseURL + "/namespaces" + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return nil, errors.WithStack(err) + } + + resp, err := c.client.Do(req) + if err != nil { + return nil, errors.WithStack(err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + body, _ := io.ReadAll(resp.Body) //nolint:errcheck + return nil, errors.Errorf("unexpected status %d: %s", resp.StatusCode, body) + } + + var namespaces []string + if err := json.NewDecoder(resp.Body).Decode(&namespaces); err != nil { + return nil, errors.WithStack(err) + } + + return namespaces, nil } diff --git a/internal/config/config.go b/internal/config/config.go index 98241fa..0c512ea 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -132,9 +132,10 @@ func Load( // Second pass, instantiate strategies and bind them to the mux. for _, block := range strategyCandidates { - logger := logger.With("strategy", block.Name) + strategy := block.Name + logger := logger.With("strategy", strategy) mlog := &loggingMux{logger: logger, mux: mux} - _, err := sr.Create(ctx, block.Name, block, cache, mlog, vars) + _, err := sr.Create(ctx, strategy, block, cache, mlog, vars) if err != nil { return errors.Errorf("%s: %w", block.Pos, err) } diff --git a/internal/strategy/apiv1.go b/internal/strategy/apiv1.go index d92b0b9..de7d994 100644 --- a/internal/strategy/apiv1.go +++ b/internal/strategy/apiv1.go @@ -37,6 +37,7 @@ func NewAPIV1(ctx context.Context, _ struct{}, cache cache.Cache, mux Mux) (*API mux.Handle("POST /api/v1/object/{key}", http.HandlerFunc(s.putObject)) mux.Handle("DELETE /api/v1/object/{key}", http.HandlerFunc(s.deleteObject)) mux.Handle("GET /api/v1/stats", http.HandlerFunc(s.getStats)) + mux.Handle("GET /api/v1/namespaces", http.HandlerFunc(s.getNamespaces)) return s, nil } @@ -163,6 +164,19 @@ func (d *APIV1) getStats(w http.ResponseWriter, r *http.Request) { } } +func (d *APIV1) getNamespaces(w http.ResponseWriter, r *http.Request) { + namespaces, err := d.cache.ListNamespaces(r.Context()) + if err != nil { + d.httpError(w, http.StatusInternalServerError, err, "Failed to list namespaces") + return + } + + w.Header().Set("Content-Type", "application/json") + if err := json.NewEncoder(w).Encode(namespaces); err != nil { + d.logger.Error("Failed to encode namespaces response", slog.String("error", err.Error())) + } +} + func (d *APIV1) httpError(w http.ResponseWriter, code int, err error, message string, args ...any) { args = append(args, slog.String("error", err.Error())) d.logger.Error(message, args...) From 0f1fc8f599c84123072323dab00020903ca0353f Mon Sep 17 00:00:00 2001 From: Neha Sherpa Date: Fri, 20 Feb 2026 11:30:35 -0800 Subject: [PATCH 4/5] fix: Address comments --- cmd/cachew/main.go | 65 ++++++----- internal/cache/cachetest/suite.go | 130 ++++++++++++++++++++++ internal/cache/disk.go | 10 +- internal/cache/disk_metadb.go | 114 +++++++++++--------- internal/cache/disk_namespace_test.go | 148 -------------------------- internal/cache/remote.go | 43 ++++++-- internal/cache/s3.go | 1 - internal/cache/tiered.go | 2 + internal/strategy/apiv1.go | 24 +++-- 9 files changed, 281 insertions(+), 256 deletions(-) delete mode 100644 internal/cache/disk_namespace_test.go diff --git a/cmd/cachew/main.go b/cmd/cachew/main.go index 8b181c4..b454b2f 100644 --- a/cmd/cachew/main.go +++ b/cmd/cachew/main.go @@ -21,17 +21,16 @@ import ( type CLI struct { LoggingConfig logging.Config `embed:"" prefix:"log-"` - URL string `help:"Remote cache server URL." default:"http://127.0.0.1:8080"` - Namespace string `help:"Namespace for organizing cache objects." default:""` - Platform bool `help:"Prefix keys with platform ($${os}-$${arch}-)."` - Daily bool `help:"Prefix keys with date ($${YYYY}-$${MM}-$${DD}-). Mutually exclusive with --hourly." xor:"timeprefix"` - Hourly bool `help:"Prefix keys with date and hour ($${YYYY}-$${MM}-$${DD}-$${HH}-). Mutually exclusive with --daily." xor:"timeprefix"` - - Get GetCmd `cmd:"" help:"Download object from cache." group:"Operations:"` - Stat StatCmd `cmd:"" help:"Show metadata for cached object." group:"Operations:"` - Put PutCmd `cmd:"" help:"Upload object to cache." group:"Operations:"` - Delete DeleteCmd `cmd:"" help:"Remove object from cache." group:"Operations:"` - ListNamespaces ListNamespacesCmd `cmd:"" help:"List available namespaces in cache." group:"Operations:"` + URL string `help:"Remote cache server URL." default:"http://127.0.0.1:8080"` + Platform bool `help:"Prefix keys with platform ($${os}-$${arch}-)."` + Daily bool `help:"Prefix keys with date ($${YYYY}-$${MM}-$${DD}-). Mutually exclusive with --hourly." xor:"timeprefix"` + Hourly bool `help:"Prefix keys with date and hour ($${YYYY}-$${MM}-$${DD}-$${HH}-). Mutually exclusive with --daily." xor:"timeprefix"` + + Get GetCmd `cmd:"" help:"Download object from cache." group:"Operations:"` + Stat StatCmd `cmd:"" help:"Show metadata for cached object." group:"Operations:"` + Put PutCmd `cmd:"" help:"Upload object to cache." group:"Operations:"` + Delete DeleteCmd `cmd:"" help:"Remove object from cache." group:"Operations:"` + Namespaces NamespacesCmd `cmd:"" help:"List available namespaces in cache." group:"Operations:"` Snapshot SnapshotCmd `cmd:"" help:"Create compressed archive of directory and upload." group:"Snapshots:"` Restore RestoreCmd `cmd:"" help:"Download and extract archive to directory." group:"Snapshots:"` @@ -52,14 +51,16 @@ func main() { } type GetCmd struct { - Key PlatformKey `arg:"" help:"Object key (hex or string)."` - Output *os.File `short:"o" help:"Output file (default: stdout)." default:"-"` + Namespace string `arg:"" help:"Namespace for organizing cache objects."` + Key PlatformKey `arg:"" help:"Object key (hex or string)."` + Output *os.File `short:"o" help:"Output file (default: stdout)." default:"-"` } func (c *GetCmd) Run(ctx context.Context, cache cache.Cache) error { defer c.Output.Close() - rc, headers, err := cache.Open(ctx, c.Key.Key()) + namespacedCache := cache.Namespace(c.Namespace) + rc, headers, err := namespacedCache.Open(ctx, c.Key.Key()) if err != nil { return errors.Wrap(err, "failed to open object") } @@ -76,11 +77,13 @@ func (c *GetCmd) Run(ctx context.Context, cache cache.Cache) error { } type StatCmd struct { - Key PlatformKey `arg:"" help:"Object key (hex or string)."` + Namespace string `arg:"" help:"Namespace for organizing cache objects."` + Key PlatformKey `arg:"" help:"Object key (hex or string)."` } func (c *StatCmd) Run(ctx context.Context, cache cache.Cache) error { - headers, err := cache.Stat(ctx, c.Key.Key()) + namespacedCache := cache.Namespace(c.Namespace) + headers, err := namespacedCache.Stat(ctx, c.Key.Key()) if err != nil { return errors.Wrap(err, "failed to stat object") } @@ -95,10 +98,11 @@ func (c *StatCmd) Run(ctx context.Context, cache cache.Cache) error { } type PutCmd struct { - Key PlatformKey `arg:"" help:"Object key (hex or string)."` - Input *os.File `arg:"" help:"Input file (default: stdin)." default:"-"` - TTL time.Duration `help:"Time to live for the object."` - Headers map[string]string `short:"H" help:"Additional headers (key=value)."` + Namespace string `arg:"" help:"Namespace for organizing cache objects."` + Key PlatformKey `arg:"" help:"Object key (hex or string)."` + Input *os.File `arg:"" help:"Input file (default: stdin)." default:"-"` + TTL time.Duration `help:"Time to live for the object."` + Headers map[string]string `short:"H" help:"Additional headers (key=value)."` } func (c *PutCmd) Run(ctx context.Context, cache cache.Cache) error { @@ -113,7 +117,8 @@ func (c *PutCmd) Run(ctx context.Context, cache cache.Cache) error { headers.Set("Content-Disposition", fmt.Sprintf("attachment; filename=%q", filepath.Base(filename))) //nolint:perfsprint } - wc, err := cache.Create(ctx, c.Key.Key(), headers, c.TTL) + namespacedCache := cache.Namespace(c.Namespace) + wc, err := namespacedCache.Create(ctx, c.Key.Key(), headers, c.TTL) if err != nil { return errors.Wrap(err, "failed to create object") } @@ -126,16 +131,18 @@ func (c *PutCmd) Run(ctx context.Context, cache cache.Cache) error { } type DeleteCmd struct { - Key PlatformKey `arg:"" help:"Object key (hex or string)."` + Namespace string `arg:"" help:"Namespace for organizing cache objects."` + Key PlatformKey `arg:"" help:"Object key (hex or string)."` } func (c *DeleteCmd) Run(ctx context.Context, cache cache.Cache) error { - return errors.Wrap(cache.Delete(ctx, c.Key.Key()), "failed to delete object") + namespacedCache := cache.Namespace(c.Namespace) + return errors.Wrap(namespacedCache.Delete(ctx, c.Key.Key()), "failed to delete object") } -type ListNamespacesCmd struct{} +type NamespacesCmd struct{} -func (c *ListNamespacesCmd) Run(ctx context.Context, cache cache.Cache) error { +func (c *NamespacesCmd) Run(ctx context.Context, cache cache.Cache) error { namespaces, err := cache.ListNamespaces(ctx) if err != nil { return errors.Wrap(err, "failed to list namespaces") @@ -153,6 +160,7 @@ func (c *ListNamespacesCmd) Run(ctx context.Context, cache cache.Cache) error { } type SnapshotCmd struct { + Namespace string `arg:"" help:"Namespace for organizing cache objects."` Key PlatformKey `arg:"" help:"Object key (hex or string)."` Directory string `arg:"" help:"Directory to archive." type:"path"` TTL time.Duration `help:"Time to live for the object."` @@ -161,7 +169,8 @@ type SnapshotCmd struct { func (c *SnapshotCmd) Run(ctx context.Context, cache cache.Cache) error { fmt.Fprintf(os.Stderr, "Archiving %s...\n", c.Directory) //nolint:forbidigo - if err := snapshot.Create(ctx, cache, c.Key.Key(), c.Directory, c.TTL, c.Exclude); err != nil { + namespacedCache := cache.Namespace(c.Namespace) + if err := snapshot.Create(ctx, namespacedCache, c.Key.Key(), c.Directory, c.TTL, c.Exclude); err != nil { return errors.Wrap(err, "failed to create snapshot") } @@ -170,13 +179,15 @@ func (c *SnapshotCmd) Run(ctx context.Context, cache cache.Cache) error { } type RestoreCmd struct { + Namespace string `arg:"" help:"Namespace for organizing cache objects."` Key PlatformKey `arg:"" help:"Object key (hex or string)."` Directory string `arg:"" help:"Target directory for extraction." type:"path"` } func (c *RestoreCmd) Run(ctx context.Context, cache cache.Cache) error { fmt.Fprintf(os.Stderr, "Restoring to %s...\n", c.Directory) //nolint:forbidigo - if err := snapshot.Restore(ctx, cache, c.Key.Key(), c.Directory); err != nil { + namespacedCache := cache.Namespace(c.Namespace) + if err := snapshot.Restore(ctx, namespacedCache, c.Key.Key(), c.Directory); err != nil { return errors.Wrap(err, "failed to restore snapshot") } diff --git a/internal/cache/cachetest/suite.go b/internal/cache/cachetest/suite.go index db43d3c..217e57e 100644 --- a/internal/cache/cachetest/suite.go +++ b/internal/cache/cachetest/suite.go @@ -2,6 +2,7 @@ package cachetest import ( "context" + "errors" "io" "net/http" "os" @@ -55,6 +56,18 @@ func Suite(t *testing.T, newCache func(t *testing.T) cache.Cache) { t.Run("LastModified", func(t *testing.T) { testLastModified(t, newCache(t)) }) + + t.Run("NamespaceIsolation", func(t *testing.T) { + testNamespaceIsolation(t, newCache(t)) + }) + + t.Run("ListNamespaces", func(t *testing.T) { + testListNamespaces(t, newCache(t)) + }) + + t.Run("NamespaceDelete", func(t *testing.T) { + testNamespaceDelete(t, newCache(t)) + }) } func testCreateAndOpen(t *testing.T, c cache.Cache) { @@ -329,3 +342,120 @@ func testLastModified(t *testing.T, c cache.Cache) { assert.Equal(t, explicitTime.Format(http.TimeFormat), headers2.Get("Last-Modified")) } + +func testNamespaceIsolation(t *testing.T, c cache.Cache) { + defer c.Close() + ctx := t.Context() + + // Create namespace views + gitCache := c.Namespace("git") + gomodCache := c.Namespace("gomod") + + // Create entries in different namespaces with same key + key := cache.NewKey("same-key") + + // Write to git namespace + w, err := gitCache.Create(ctx, key, nil, time.Hour) + assert.NoError(t, err) + _, err = w.Write([]byte("git data")) + assert.NoError(t, err) + assert.NoError(t, w.Close()) + + // Write to gomod namespace + w, err = gomodCache.Create(ctx, key, nil, time.Hour) + assert.NoError(t, err) + _, err = w.Write([]byte("gomod data")) + assert.NoError(t, err) + assert.NoError(t, w.Close()) + + // Verify isolation - each namespace returns its own data + r, _, err := gitCache.Open(ctx, key) + assert.NoError(t, err) + gitData, err := io.ReadAll(r) + assert.NoError(t, err) + assert.Equal(t, "git data", string(gitData)) + assert.NoError(t, r.Close()) + + r, _, err = gomodCache.Open(ctx, key) + assert.NoError(t, err) + gomodData, err := io.ReadAll(r) + assert.NoError(t, err) + assert.Equal(t, "gomod data", string(gomodData)) + assert.NoError(t, r.Close()) +} + +func testListNamespaces(t *testing.T, c cache.Cache) { + defer c.Close() + ctx := t.Context() + + // Initially no namespaces + namespaces, err := c.ListNamespaces(ctx) + if errors.Is(err, cache.ErrStatsUnavailable) { + t.Skip("Cache does not support ListNamespaces") + } + assert.NoError(t, err) + assert.Equal(t, 0, len(namespaces)) + + // Create entries in different namespaces + gitCache := c.Namespace("git") + gomodCache := c.Namespace("gomod") + hermitCache := c.Namespace("hermit") + + for i, cacheNS := range []cache.Cache{gitCache, gomodCache, hermitCache} { + w, err := cacheNS.Create(ctx, cache.NewKey(string(rune('a'+i))), nil, time.Hour) + assert.NoError(t, err) + _, err = w.Write([]byte("data")) + assert.NoError(t, err) + assert.NoError(t, w.Close()) + } + + // Verify all namespaces are listed + namespaces, err = c.ListNamespaces(ctx) + assert.NoError(t, err) + assert.Equal(t, 3, len(namespaces)) + + nsMap := make(map[string]bool) + for _, ns := range namespaces { + nsMap[ns] = true + } + assert.True(t, nsMap["git"]) + assert.True(t, nsMap["gomod"]) + assert.True(t, nsMap["hermit"]) +} + +func testNamespaceDelete(t *testing.T, c cache.Cache) { + defer c.Close() + ctx := t.Context() + + gitCache := c.Namespace("git") + gomodCache := c.Namespace("gomod") + + key := cache.NewKey("test-key") + + // Create entry in git namespace + w, err := gitCache.Create(ctx, key, nil, time.Hour) + assert.NoError(t, err) + _, err = w.Write([]byte("git data")) + assert.NoError(t, err) + assert.NoError(t, w.Close()) + + // Create entry in gomod namespace + w, err = gomodCache.Create(ctx, key, nil, time.Hour) + assert.NoError(t, err) + _, err = w.Write([]byte("gomod data")) + assert.NoError(t, err) + assert.NoError(t, w.Close()) + + // Delete from git namespace + err = gitCache.Delete(ctx, key) + assert.NoError(t, err) + + // Verify git entry is gone + _, _, err = gitCache.Open(ctx, key) + assert.IsError(t, err, os.ErrNotExist) + + // Verify gomod entry still exists + r, _, err := gomodCache.Open(ctx, key) + assert.NoError(t, err) + assert.NoError(t, r.Close()) +} diff --git a/internal/cache/disk.go b/internal/cache/disk.go index b668af7..79e7fca 100644 --- a/internal/cache/disk.go +++ b/internal/cache/disk.go @@ -414,15 +414,7 @@ func (d *Disk) evictBySize(remainingFiles []evictFileInfo) error { d.size.Add(-f.size) } - if len(sizeEvictedEntries) == 0 { - return nil - } - - if err := d.db.deleteAll(sizeEvictedEntries); err != nil { - return errors.Errorf("failed to delete size-evicted metadata: %w", err) - } - - return nil + return d.deleteExpiredEntries(sizeEvictedEntries) } type diskWriter struct { diff --git a/internal/cache/disk_metadb.go b/internal/cache/disk_metadb.go index d27a73b..a7fbc13 100644 --- a/internal/cache/disk_metadb.go +++ b/internal/cache/disk_metadb.go @@ -5,6 +5,8 @@ import ( "encoding/json" "io/fs" "net/http" + "sort" + "sync" "time" "github.com/alecthomas/errors" @@ -13,14 +15,14 @@ import ( //nolint:gochecknoglobals var ( - ttlBucketName = []byte("ttl") - headersBucketName = []byte("headers") - namespaceBucketName = []byte("namespace") + ttlBucketName = []byte("ttl") + headersBucketName = []byte("headers") ) // diskMetaDB manages expiration times and headers for cache entries using bbolt. type diskMetaDB struct { - db *bbolt.DB + db *bbolt.DB + namespacesCache sync.Map // map[string]bool - concurrent-safe } // compositeKey creates a unique database key from namespace and cache key. @@ -49,15 +51,31 @@ func newDiskMetaDB(dbPath string) (*diskMetaDB, error) { if _, err := tx.CreateBucketIfNotExists(headersBucketName); err != nil { return errors.WithStack(err) } - if _, err := tx.CreateBucketIfNotExists(namespaceBucketName); err != nil { - return errors.WithStack(err) - } return nil }); err != nil { return nil, errors.Join(errors.Errorf("failed to create buckets: %w", err), db.Close()) } - return &diskMetaDB{db: db}, nil + // Initialize in-memory namespace cache by scanning existing entries + metaDB := &diskMetaDB{db: db} + err = db.View(func(tx *bbolt.Tx) error { + ttlBucket := tx.Bucket(ttlBucketName) + if ttlBucket == nil { + return nil + } + return ttlBucket.ForEach(func(k, _ []byte) error { + namespace, _, found := bytes.Cut(k, []byte("/")) + if found && len(namespace) > 0 { + metaDB.namespacesCache.Store(string(namespace), true) + } + return nil + }) + }) + if err != nil { + return nil, errors.Join(errors.Errorf("failed to initialize namespace cache: %w", err), db.Close()) + } + + return metaDB, nil } func (s *diskMetaDB) setTTL(namespace string, key Key, expiresAt time.Time) error { @@ -67,10 +85,20 @@ func (s *diskMetaDB) setTTL(namespace string, key Key, expiresAt time.Time) erro } dbKey := compositeKey(namespace, key) - return errors.WithStack(s.db.Update(func(tx *bbolt.Tx) error { + err = s.db.Update(func(tx *bbolt.Tx) error { ttlBucket := tx.Bucket(ttlBucketName) return errors.WithStack(ttlBucket.Put(dbKey, ttlBytes)) - })) + }) + if err != nil { + return errors.WithStack(err) + } + + // Add namespace to in-memory cache + if namespace != "" { + s.namespacesCache.Store(namespace, true) + } + + return nil } func (s *diskMetaDB) set(key Key, namespace string, expiresAt time.Time, headers http.Header) error { @@ -85,20 +113,25 @@ func (s *diskMetaDB) set(key Key, namespace string, expiresAt time.Time, headers } dbKey := compositeKey(namespace, key) - return errors.WithStack(s.db.Update(func(tx *bbolt.Tx) error { + err = s.db.Update(func(tx *bbolt.Tx) error { ttlBucket := tx.Bucket(ttlBucketName) if err := ttlBucket.Put(dbKey, ttlBytes); err != nil { return errors.WithStack(err) } headersBucket := tx.Bucket(headersBucketName) - if err := headersBucket.Put(dbKey, headersBytes); err != nil { - return errors.WithStack(err) - } + return errors.WithStack(headersBucket.Put(dbKey, headersBytes)) + }) + if err != nil { + return errors.WithStack(err) + } - namespaceBucket := tx.Bucket(namespaceBucketName) - return errors.WithStack(namespaceBucket.Put(dbKey, []byte(namespace))) - })) + // Add namespace to in-memory cache + if namespace != "" { + s.namespacesCache.Store(namespace, true) + } + + return nil } func (s *diskMetaDB) getTTL(namespace string, key Key) (time.Time, error) { @@ -138,12 +171,7 @@ func (s *diskMetaDB) delete(namespace string, key Key) error { } headersBucket := tx.Bucket(headersBucketName) - if err := headersBucket.Delete(dbKey); err != nil { - return errors.WithStack(err) - } - - namespaceBucket := tx.Bucket(namespaceBucketName) - return errors.WithStack(namespaceBucket.Delete(dbKey)) + return errors.WithStack(headersBucket.Delete(dbKey)) })) } @@ -154,7 +182,6 @@ func (s *diskMetaDB) deleteAll(entries []evictEntryKey) error { return errors.WithStack(s.db.Update(func(tx *bbolt.Tx) error { ttlBucket := tx.Bucket(ttlBucketName) headersBucket := tx.Bucket(headersBucketName) - namespaceBucket := tx.Bucket(namespaceBucketName) for _, entry := range entries { dbKey := compositeKey(entry.namespace, entry.key) @@ -164,9 +191,6 @@ func (s *diskMetaDB) deleteAll(entries []evictEntryKey) error { if err := headersBucket.Delete(dbKey); err != nil { return errors.Errorf("failed to delete headers: %w", err) } - if err := namespaceBucket.Delete(dbKey); err != nil { - return errors.Errorf("failed to delete namespace: %w", err) - } } return nil })) @@ -183,16 +207,15 @@ func (s *diskMetaDB) walk(fn func(key Key, namespace string, expiresAt time.Time var key Key // Check format: composite "namespace/hexkey" or raw 32-byte key - slashIdx := bytes.IndexByte(k, '/') + before, after, found := bytes.Cut(k, []byte("/")) switch { - case slashIdx >= 0: + case found: // Composite key: "namespace/hexkey" - namespace = string(k[:slashIdx]) - hexKey := string(k[slashIdx+1:]) - if len(hexKey) != 64 { + namespace = string(before) + if len(after) != 64 { return nil } - if err := key.UnmarshalText([]byte(hexKey)); err != nil { + if err := key.UnmarshalText(after); err != nil { return nil //nolint:nilerr } case len(k) == 32: @@ -233,26 +256,13 @@ func (s *diskMetaDB) close() error { } func (s *diskMetaDB) listNamespaces() ([]string, error) { - namespaceSet := make(map[string]bool) - err := s.db.View(func(tx *bbolt.Tx) error { - namespaceBucket := tx.Bucket(namespaceBucketName) - if namespaceBucket == nil { - return nil + var namespaces []string + s.namespacesCache.Range(func(key, _ any) bool { + if ns, ok := key.(string); ok { + namespaces = append(namespaces, ns) } - return namespaceBucket.ForEach(func(_, v []byte) error { - if len(v) > 0 { - namespaceSet[string(v)] = true - } - return nil - }) + return true }) - if err != nil { - return nil, errors.WithStack(err) - } - - namespaces := make([]string, 0, len(namespaceSet)) - for ns := range namespaceSet { - namespaces = append(namespaces, ns) - } + sort.Strings(namespaces) return namespaces, nil } diff --git a/internal/cache/disk_namespace_test.go b/internal/cache/disk_namespace_test.go deleted file mode 100644 index 597922a..0000000 --- a/internal/cache/disk_namespace_test.go +++ /dev/null @@ -1,148 +0,0 @@ -package cache_test - -import ( - "log/slog" - "testing" - "time" - - "github.com/alecthomas/assert/v2" - - "github.com/block/cachew/internal/cache" - "github.com/block/cachew/internal/logging" -) - -func TestDiskNamespaceIsolation(t *testing.T) { - dir := t.TempDir() - _, ctx := logging.Configure(t.Context(), logging.Config{Level: slog.LevelDebug}) - - // Create base cache - baseCache, err := cache.NewDisk(ctx, cache.DiskConfig{ - Root: dir, - MaxTTL: time.Hour, - }) - assert.NoError(t, err) - defer baseCache.Close() - - // Create namespace views - gitCache := baseCache.Namespace("git") - gomodCache := baseCache.Namespace("gomod") - - // Create entries in different namespaces with same key - key := cache.NewKey("same-key") - - // Write to git namespace - w, err := gitCache.Create(ctx, key, nil, time.Hour) - assert.NoError(t, err) - _, err = w.Write([]byte("git data")) - assert.NoError(t, err) - assert.NoError(t, w.Close()) - - // Write to gomod namespace - w, err = gomodCache.Create(ctx, key, nil, time.Hour) - assert.NoError(t, err) - _, err = w.Write([]byte("gomod data")) - assert.NoError(t, err) - assert.NoError(t, w.Close()) - - // Verify isolation - each namespace returns its own data - r, _, err := gitCache.Open(ctx, key) - assert.NoError(t, err) - gitData := make([]byte, 8) - n, _ := r.Read(gitData) - assert.Equal(t, "git data", string(gitData[:n])) - assert.NoError(t, r.Close()) - - r, _, err = gomodCache.Open(ctx, key) - assert.NoError(t, err) - gomodData := make([]byte, 10) - n, _ = r.Read(gomodData) - assert.Equal(t, "gomod data", string(gomodData[:n])) - assert.NoError(t, r.Close()) -} - -func TestDiskListNamespaces(t *testing.T) { - dir := t.TempDir() - _, ctx := logging.Configure(t.Context(), logging.Config{Level: slog.LevelDebug}) - - baseCache, err := cache.NewDisk(ctx, cache.DiskConfig{ - Root: dir, - MaxTTL: time.Hour, - }) - assert.NoError(t, err) - defer baseCache.Close() - - // Initially no namespaces - namespaces, err := baseCache.ListNamespaces(ctx) - assert.NoError(t, err) - assert.Equal(t, 0, len(namespaces)) - - // Create entries in different namespaces - gitCache := baseCache.Namespace("git") - gomodCache := baseCache.Namespace("gomod") - hermitCache := baseCache.Namespace("hermit") - - for i, c := range []cache.Cache{gitCache, gomodCache, hermitCache} { - w, err := c.Create(ctx, cache.NewKey(string(rune(i))), nil, time.Hour) - assert.NoError(t, err) - _, err = w.Write([]byte("data")) - assert.NoError(t, err) - assert.NoError(t, w.Close()) - } - - // Verify all namespaces are listed - namespaces, err = baseCache.ListNamespaces(ctx) - assert.NoError(t, err) - assert.Equal(t, 3, len(namespaces)) - - nsMap := make(map[string]bool) - for _, ns := range namespaces { - nsMap[ns] = true - } - assert.True(t, nsMap["git"]) - assert.True(t, nsMap["gomod"]) - assert.True(t, nsMap["hermit"]) -} - -func TestDiskNamespaceDelete(t *testing.T) { - dir := t.TempDir() - _, ctx := logging.Configure(t.Context(), logging.Config{Level: slog.LevelDebug}) - - baseCache, err := cache.NewDisk(ctx, cache.DiskConfig{ - Root: dir, - MaxTTL: time.Hour, - }) - assert.NoError(t, err) - defer baseCache.Close() - - gitCache := baseCache.Namespace("git") - gomodCache := baseCache.Namespace("gomod") - - key := cache.NewKey("test-key") - - // Create entry in git namespace - w, err := gitCache.Create(ctx, key, nil, time.Hour) - assert.NoError(t, err) - _, err = w.Write([]byte("git data")) - assert.NoError(t, err) - assert.NoError(t, w.Close()) - - // Create entry in gomod namespace - w, err = gomodCache.Create(ctx, key, nil, time.Hour) - assert.NoError(t, err) - _, err = w.Write([]byte("gomod data")) - assert.NoError(t, err) - assert.NoError(t, w.Close()) - - // Delete from git namespace - err = gitCache.Delete(ctx, key) - assert.NoError(t, err) - - // Verify git entry is gone - _, _, err = gitCache.Open(ctx, key) - assert.Error(t, err) - - // Verify gomod entry still exists - r, _, err := gomodCache.Open(ctx, key) - assert.NoError(t, err) - assert.NoError(t, r.Close()) -} diff --git a/internal/cache/remote.go b/internal/cache/remote.go index 7b0cc2d..e265dd7 100644 --- a/internal/cache/remote.go +++ b/internal/cache/remote.go @@ -13,10 +13,13 @@ import ( "github.com/alecthomas/errors" ) +const defaultNamespace = "default" + // Remote implements Cache as a client for the remote cache server. type Remote struct { - baseURL string - client *http.Client + baseURL string + client *http.Client + namespace string } var _ Cache = (*Remote)(nil) @@ -37,7 +40,11 @@ func (c *Remote) String() string { return "remote:" + c.baseURL } // Open retrieves an object from the remote. func (c *Remote) Open(ctx context.Context, key Key) (io.ReadCloser, http.Header, error) { - url := fmt.Sprintf("%s/object/%s", c.baseURL, key.String()) + namespace := c.namespace + if namespace == "" { + namespace = defaultNamespace + } + url := fmt.Sprintf("%s/object/%s/%s", c.baseURL, namespace, key.String()) req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) if err != nil { return nil, nil, errors.Wrap(err, "failed to create request") @@ -66,7 +73,11 @@ func (c *Remote) Open(ctx context.Context, key Key) (io.ReadCloser, http.Header, // Stat retrieves headers for an object from the remote. func (c *Remote) Stat(ctx context.Context, key Key) (http.Header, error) { - url := fmt.Sprintf("%s/object/%s", c.baseURL, key.String()) + namespace := c.namespace + if namespace == "" { + namespace = defaultNamespace + } + url := fmt.Sprintf("%s/object/%s/%s", c.baseURL, namespace, key.String()) req, err := http.NewRequestWithContext(ctx, http.MethodHead, url, nil) if err != nil { return nil, errors.Wrap(err, "failed to create request") @@ -96,7 +107,11 @@ func (c *Remote) Stat(ctx context.Context, key Key) (http.Header, error) { func (c *Remote) Create(ctx context.Context, key Key, headers http.Header, ttl time.Duration) (io.WriteCloser, error) { pr, pw := io.Pipe() - url := fmt.Sprintf("%s/object/%s", c.baseURL, key.String()) + namespace := c.namespace + if namespace == "" { + namespace = defaultNamespace + } + url := fmt.Sprintf("%s/object/%s/%s", c.baseURL, namespace, key.String()) req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, pr) if err != nil { return nil, errors.Join(errors.Wrap(err, "failed to create request"), pr.Close(), pw.Close()) @@ -136,7 +151,11 @@ func (c *Remote) Create(ctx context.Context, key Key, headers http.Header, ttl t // Delete removes an object from the remote. func (c *Remote) Delete(ctx context.Context, key Key) error { - url := fmt.Sprintf("%s/object/%s", c.baseURL, key.String()) + namespace := c.namespace + if namespace == "" { + namespace = defaultNamespace + } + url := fmt.Sprintf("%s/object/%s/%s", c.baseURL, namespace, key.String()) req, err := http.NewRequestWithContext(ctx, http.MethodDelete, url, nil) if err != nil { return errors.Wrap(err, "failed to create request") @@ -224,11 +243,13 @@ func (wc *writeCloser) Close() error { return nil } -// Namespace creates a namespaced view (which is ignored by remote cache). -// Remote cache ignores namespaces since the server-side API v1 handles namespacing. -func (c *Remote) Namespace(_ string) Cache { - // Remote cache doesn't use namespacing on client side - return c +// Namespace creates a namespaced view of the remote cache. +func (c *Remote) Namespace(namespace string) Cache { + return &Remote{ + baseURL: c.baseURL, + client: c.client, + namespace: namespace, + } } // ListNamespaces requests namespace list from the remote server. diff --git a/internal/cache/s3.go b/internal/cache/s3.go index 96ed174..9b96118 100644 --- a/internal/cache/s3.go +++ b/internal/cache/s3.go @@ -161,7 +161,6 @@ func (s *S3) keyToPath(namespace string, key Key) string { hexKey := key.String() prefix := "" - // Add strategy name as prefix if available if namespace != "" { prefix = namespace + "/" } diff --git a/internal/cache/tiered.go b/internal/cache/tiered.go index d248457..8f48a43 100644 --- a/internal/cache/tiered.go +++ b/internal/cache/tiered.go @@ -5,6 +5,7 @@ import ( "io" "net/http" "os" + "sort" "strings" "sync" "time" @@ -207,5 +208,6 @@ func (t Tiered) ListNamespaces(ctx context.Context) ([]string, error) { for ns := range namespaceSet { namespaces = append(namespaces, ns) } + sort.Strings(namespaces) return namespaces, nil } diff --git a/internal/strategy/apiv1.go b/internal/strategy/apiv1.go index de7d994..1a55dda 100644 --- a/internal/strategy/apiv1.go +++ b/internal/strategy/apiv1.go @@ -32,10 +32,10 @@ func NewAPIV1(ctx context.Context, _ struct{}, cache cache.Cache, mux Mux) (*API logger: logging.FromContext(ctx), cache: cache, } - mux.Handle("GET /api/v1/object/{key}", http.HandlerFunc(s.getObject)) - mux.Handle("HEAD /api/v1/object/{key}", http.HandlerFunc(s.statObject)) - mux.Handle("POST /api/v1/object/{key}", http.HandlerFunc(s.putObject)) - mux.Handle("DELETE /api/v1/object/{key}", http.HandlerFunc(s.deleteObject)) + mux.Handle("GET /api/v1/object/{namespace}/{key}", http.HandlerFunc(s.getObject)) + mux.Handle("HEAD /api/v1/object/{namespace}/{key}", http.HandlerFunc(s.statObject)) + mux.Handle("POST /api/v1/object/{namespace}/{key}", http.HandlerFunc(s.putObject)) + mux.Handle("DELETE /api/v1/object/{namespace}/{key}", http.HandlerFunc(s.deleteObject)) mux.Handle("GET /api/v1/stats", http.HandlerFunc(s.getStats)) mux.Handle("GET /api/v1/namespaces", http.HandlerFunc(s.getNamespaces)) return s, nil @@ -44,13 +44,15 @@ func NewAPIV1(ctx context.Context, _ struct{}, cache cache.Cache, mux Mux) (*API func (d *APIV1) String() string { return "default" } func (d *APIV1) statObject(w http.ResponseWriter, r *http.Request) { + namespace := r.PathValue("namespace") key, err := cache.ParseKey(r.PathValue("key")) if err != nil { d.httpError(w, http.StatusBadRequest, err, "Invalid key") return } - headers, err := d.cache.Stat(r.Context(), key) + namespacedCache := d.cache.Namespace(namespace) + headers, err := namespacedCache.Stat(r.Context(), key) if err != nil { if errors.Is(err, os.ErrNotExist) { http.Error(w, "Cache object not found", http.StatusNotFound) @@ -65,13 +67,15 @@ func (d *APIV1) statObject(w http.ResponseWriter, r *http.Request) { } func (d *APIV1) getObject(w http.ResponseWriter, r *http.Request) { + namespace := r.PathValue("namespace") key, err := cache.ParseKey(r.PathValue("key")) if err != nil { d.httpError(w, http.StatusBadRequest, err, "Invalid key") return } - cr, headers, err := d.cache.Open(r.Context(), key) + namespacedCache := d.cache.Namespace(namespace) + cr, headers, err := namespacedCache.Open(r.Context(), key) if err != nil { if errors.Is(err, os.ErrNotExist) { http.Error(w, "Cache object not found", http.StatusNotFound) @@ -93,6 +97,7 @@ func (d *APIV1) getObject(w http.ResponseWriter, r *http.Request) { } func (d *APIV1) putObject(w http.ResponseWriter, r *http.Request) { + namespace := r.PathValue("namespace") key, err := cache.ParseKey(r.PathValue("key")) if err != nil { d.httpError(w, http.StatusBadRequest, err, "Invalid key") @@ -112,7 +117,8 @@ func (d *APIV1) putObject(w http.ResponseWriter, r *http.Request) { // Extract and filter headers from request headers := cache.FilterTransportHeaders(r.Header) - cw, err := d.cache.Create(r.Context(), key, headers, ttl) + namespacedCache := d.cache.Namespace(namespace) + cw, err := namespacedCache.Create(r.Context(), key, headers, ttl) if err != nil { d.httpError(w, http.StatusInternalServerError, err, "Failed to create cache writer", slog.String("key", key.String())) return @@ -130,13 +136,15 @@ func (d *APIV1) putObject(w http.ResponseWriter, r *http.Request) { } func (d *APIV1) deleteObject(w http.ResponseWriter, r *http.Request) { + namespace := r.PathValue("namespace") key, err := cache.ParseKey(r.PathValue("key")) if err != nil { d.httpError(w, http.StatusBadRequest, err, "Invalid key") return } - err = d.cache.Delete(r.Context(), key) + namespacedCache := d.cache.Namespace(namespace) + err = namespacedCache.Delete(r.Context(), key) if err != nil { if errors.Is(err, os.ErrNotExist) { http.Error(w, "Cache object not found", http.StatusNotFound) From 3943b148664cc42bc2ea5749b5e4eb487e0a872e Mon Sep 17 00:00:00 2001 From: Neha Sherpa Date: Mon, 23 Feb 2026 16:08:50 -0800 Subject: [PATCH 5/5] fix: Address comments --- internal/cache/api.go | 2 +- internal/cache/disk_metadb.go | 31 ++++++++++++------------------- internal/cache/remote.go | 2 +- 3 files changed, 14 insertions(+), 21 deletions(-) diff --git a/internal/cache/api.go b/internal/cache/api.go index 48c533b..c5c2d72 100644 --- a/internal/cache/api.go +++ b/internal/cache/api.go @@ -176,7 +176,7 @@ type Cache interface { Delete(ctx context.Context, key Key) error // Stats returns health and usage statistics for the cache. Stats(ctx context.Context) (Stats, error) - // ListNamespaces returns all unique namespaces in the cache. + // ListNamespaces returns all unique namespaces in the cache in order. ListNamespaces(ctx context.Context) ([]string, error) // Close the Cache. Close() error diff --git a/internal/cache/disk_metadb.go b/internal/cache/disk_metadb.go index a7fbc13..47b28aa 100644 --- a/internal/cache/disk_metadb.go +++ b/internal/cache/disk_metadb.go @@ -26,13 +26,12 @@ type diskMetaDB struct { } // compositeKey creates a unique database key from namespace and cache key. +// Format: "namespace/hexkey" when namespace is set, or just "hexkey" when empty. func compositeKey(namespace string, key Key) []byte { if namespace == "" { - return key[:] + return []byte(key.String()) } - // Format: "namespace/hexkey" - hexKey := key.String() - return []byte(namespace + "/" + hexKey) + return []byte(namespace + "/" + key.String()) } // newDiskMetaDB creates a new bbolt-backed metadata storage for the disk cache. @@ -206,24 +205,18 @@ func (s *diskMetaDB) walk(fn func(key Key, namespace string, expiresAt time.Time var namespace string var key Key - // Check format: composite "namespace/hexkey" or raw 32-byte key - before, after, found := bytes.Cut(k, []byte("/")) - switch { - case found: - // Composite key: "namespace/hexkey" + before, hexKey, found := bytes.Cut(k, []byte("/")) + if found { namespace = string(before) - if len(after) != 64 { - return nil - } - if err := key.UnmarshalText(after); err != nil { - return nil //nolint:nilerr - } - case len(k) == 32: - // Raw key (empty namespace) - copy(key[:], k) - default: + } else { + hexKey = k + } + if len(hexKey) != 64 { return nil } + if err := key.UnmarshalText(hexKey); err != nil { + return nil //nolint:nilerr + } var expiresAt time.Time if err := expiresAt.UnmarshalBinary(v); err != nil { diff --git a/internal/cache/remote.go b/internal/cache/remote.go index e265dd7..bcd5650 100644 --- a/internal/cache/remote.go +++ b/internal/cache/remote.go @@ -13,7 +13,7 @@ import ( "github.com/alecthomas/errors" ) -const defaultNamespace = "default" +const defaultNamespace = "-" // Remote implements Cache as a client for the remote cache server. type Remote struct {