From a0ba5be19f619ac3ac5e599bd4df555c9186b91f Mon Sep 17 00:00:00 2001 From: Claude Code Date: Wed, 11 Feb 2026 20:47:54 -0800 Subject: [PATCH] feat: add compressed_size column to chunks table for CDC compression tracking The `chunks` table now includes a `compressed_size` column to track the actual size of chunks after compression. This is necessary for accurate space management and decompression planning when using Content-Addressable Storage (CAS) with Content-Defined Chunking (CDC). - Modified existing migrations for MySQL, PostgreSQL, and SQLite to include the `compressed_size` column in the `chunks` table. - Updated database schemas for all backends to reflect the new column. - Regenerated Go database code using `gen-db-wrappers`, which introduced specific row structs for `GetChunksByNarFileID` and `GetOrphanedChunks` queries to handle the updated schema correctly. --- .../mysql/20260131021850_add_chunks.sql | 1 + .../postgres/20260131021850_add_chunks.sql | 1 + .../sqlite/20260131021850_add_chunks.sql | 1 + db/schema/mysql.sql | 1 + db/schema/postgres.sql | 2 + db/schema/sqlite.sql | 1 + pkg/database/generated_models.go | 35 ++++++++++-- pkg/database/generated_querier.go | 12 ++-- pkg/database/generated_wrapper_mysql.go | 24 ++++---- pkg/database/generated_wrapper_postgres.go | 26 +++++---- pkg/database/generated_wrapper_sqlite.go | 26 +++++---- pkg/database/mysqldb/models.go | 11 ++-- pkg/database/mysqldb/querier.go | 10 ++-- pkg/database/mysqldb/query.mysql.sql.go | 50 +++++++++++++---- pkg/database/postgresdb/models.go | 11 ++-- pkg/database/postgresdb/querier.go | 12 ++-- pkg/database/postgresdb/query.postgres.sql.go | 55 ++++++++++++++----- pkg/database/sqlitedb/models.go | 11 ++-- pkg/database/sqlitedb/querier.go | 12 ++-- pkg/database/sqlitedb/query.sqlite.sql.go | 55 ++++++++++++++----- sqlc.yml | 6 ++ 21 files changed, 250 insertions(+), 113 deletions(-) diff --git a/db/migrations/mysql/20260131021850_add_chunks.sql b/db/migrations/mysql/20260131021850_add_chunks.sql index 2dccb4e6..fcd067c0 100644 --- a/db/migrations/mysql/20260131021850_add_chunks.sql +++ b/db/migrations/mysql/20260131021850_add_chunks.sql @@ -3,6 +3,7 @@ CREATE TABLE chunks ( id BIGINT AUTO_INCREMENT PRIMARY KEY, hash VARCHAR(64) NOT NULL UNIQUE, size INT UNSIGNED NOT NULL, + compressed_size INT UNSIGNED NOT NULL DEFAULT 0, created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL, updated_at TIMESTAMP NULL DEFAULT NULL ); diff --git a/db/migrations/postgres/20260131021850_add_chunks.sql b/db/migrations/postgres/20260131021850_add_chunks.sql index 44de231c..f770aa2d 100644 --- a/db/migrations/postgres/20260131021850_add_chunks.sql +++ b/db/migrations/postgres/20260131021850_add_chunks.sql @@ -3,6 +3,7 @@ CREATE TABLE chunks ( id BIGSERIAL PRIMARY KEY, hash TEXT NOT NULL UNIQUE, size INTEGER NOT NULL CHECK (size >= 0), + compressed_size INTEGER NOT NULL DEFAULT 0 CHECK (compressed_size >= 0), created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL, updated_at TIMESTAMPTZ ); diff --git a/db/migrations/sqlite/20260131021850_add_chunks.sql b/db/migrations/sqlite/20260131021850_add_chunks.sql index 135ab7ef..c432f18a 100644 --- a/db/migrations/sqlite/20260131021850_add_chunks.sql +++ b/db/migrations/sqlite/20260131021850_add_chunks.sql @@ -3,6 +3,7 @@ CREATE TABLE chunks ( id INTEGER PRIMARY KEY AUTOINCREMENT, hash TEXT NOT NULL UNIQUE, size INTEGER NOT NULL CHECK (size >= 0), + compressed_size INTEGER NOT NULL DEFAULT 0 CHECK (compressed_size >= 0), created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL, updated_at TIMESTAMP ); diff --git a/db/schema/mysql.sql b/db/schema/mysql.sql index 00447e19..6abd2e4c 100644 --- a/db/schema/mysql.sql +++ b/db/schema/mysql.sql @@ -26,6 +26,7 @@ CREATE TABLE `chunks` ( `id` bigint(20) NOT NULL AUTO_INCREMENT, `hash` varchar(64) NOT NULL, `size` int(10) unsigned NOT NULL, + `compressed_size` int(10) unsigned NOT NULL DEFAULT 0, `created_at` timestamp NOT NULL DEFAULT current_timestamp(), `updated_at` timestamp NULL DEFAULT NULL, PRIMARY KEY (`id`), diff --git a/db/schema/postgres.sql b/db/schema/postgres.sql index 0aeef4b7..9c33d3e4 100644 --- a/db/schema/postgres.sql +++ b/db/schema/postgres.sql @@ -22,8 +22,10 @@ CREATE TABLE public.chunks ( id bigint NOT NULL, hash text NOT NULL, size integer NOT NULL, + compressed_size integer DEFAULT 0 NOT NULL, created_at timestamp without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL, updated_at timestamp with time zone, + CONSTRAINT chunks_compressed_size_check CHECK ((compressed_size >= 0)), CONSTRAINT chunks_size_check CHECK ((size >= 0)) ); diff --git a/db/schema/sqlite.sql b/db/schema/sqlite.sql index ed0860db..f55259dd 100644 --- a/db/schema/sqlite.sql +++ b/db/schema/sqlite.sql @@ -51,6 +51,7 @@ CREATE TABLE chunks ( id INTEGER PRIMARY KEY AUTOINCREMENT, hash TEXT NOT NULL UNIQUE, size INTEGER NOT NULL CHECK (size >= 0), + compressed_size INTEGER NOT NULL DEFAULT 0 CHECK (compressed_size >= 0), created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL, updated_at TIMESTAMP ); diff --git a/pkg/database/generated_models.go b/pkg/database/generated_models.go index 6f5eb11e..9db36584 100644 --- a/pkg/database/generated_models.go +++ b/pkg/database/generated_models.go @@ -27,11 +27,12 @@ type AddNarInfoSignaturesParams struct { } type Chunk struct { - ID int64 - Hash string - Size uint32 - CreatedAt time.Time - UpdatedAt sql.NullTime + ID int64 + Hash string + Size uint32 + CompressedSize uint32 + CreatedAt time.Time + UpdatedAt sql.NullTime } type Config struct { @@ -85,6 +86,22 @@ type GetChunkByNarFileIDAndIndexParams struct { ChunkIndex int64 } +type GetChunkByNarFileIDAndIndexRow struct { + ID int64 + Hash string + Size uint32 + CreatedAt time.Time + UpdatedAt sql.NullTime +} + +type GetChunksByNarFileIDRow struct { + ID int64 + Hash string + Size uint32 + CreatedAt time.Time + UpdatedAt sql.NullTime +} + type GetCompressedNarInfosParams struct { Limit int32 Offset int32 @@ -142,6 +159,14 @@ type GetOldCompressedNarFilesParams struct { Offset int32 } +type GetOrphanedChunksRow struct { + ID int64 + Hash string + Size uint32 + CreatedAt time.Time + UpdatedAt sql.NullTime +} + type GetOrphanedNarFilesRow struct { ID int64 Hash string diff --git a/pkg/database/generated_querier.go b/pkg/database/generated_querier.go index 898f2290..5571551a 100644 --- a/pkg/database/generated_querier.go +++ b/pkg/database/generated_querier.go @@ -48,7 +48,7 @@ type Querier interface { // ) // ON CONFLICT(hash) DO UPDATE SET // updated_at = CURRENT_TIMESTAMP - // RETURNING id, hash, size, created_at, updated_at + // RETURNING id, hash, size, compressed_size, created_at, updated_at CreateChunk(ctx context.Context, arg CreateChunkParams) (Chunk, error) //CreateConfig // @@ -135,13 +135,13 @@ type Querier interface { DeleteOrphanedNarInfos(ctx context.Context) (int64, error) //GetChunkByHash // - // SELECT id, hash, size, created_at, updated_at + // SELECT id, hash, size, compressed_size, created_at, updated_at // FROM chunks // WHERE hash = $1 GetChunkByHash(ctx context.Context, hash string) (Chunk, error) //GetChunkByID // - // SELECT id, hash, size, created_at, updated_at + // SELECT id, hash, size, compressed_size, created_at, updated_at // FROM chunks // WHERE id = $1 GetChunkByID(ctx context.Context, id int64) (Chunk, error) @@ -151,7 +151,7 @@ type Querier interface { // FROM chunks c // INNER JOIN nar_file_chunks nfc ON c.id = nfc.chunk_id // WHERE nfc.nar_file_id = $1 AND nfc.chunk_index = $2 - GetChunkByNarFileIDAndIndex(ctx context.Context, arg GetChunkByNarFileIDAndIndexParams) (Chunk, error) + GetChunkByNarFileIDAndIndex(ctx context.Context, arg GetChunkByNarFileIDAndIndexParams) (GetChunkByNarFileIDAndIndexRow, error) //GetChunkCount // // SELECT CAST(COUNT(*) AS BIGINT) AS count @@ -164,7 +164,7 @@ type Querier interface { // INNER JOIN nar_file_chunks nfc ON c.id = nfc.chunk_id // WHERE nfc.nar_file_id = $1 // ORDER BY nfc.chunk_index - GetChunksByNarFileID(ctx context.Context, narFileID int64) ([]Chunk, error) + GetChunksByNarFileID(ctx context.Context, narFileID int64) ([]GetChunksByNarFileIDRow, error) //GetCompressedNarInfos // // SELECT id, hash, created_at, updated_at, last_accessed_at, store_path, url, compression, file_hash, file_size, nar_hash, nar_size, deriver, system, ca @@ -340,7 +340,7 @@ type Querier interface { // FROM chunks c // LEFT JOIN nar_file_chunks nfc ON c.id = nfc.chunk_id // WHERE nfc.chunk_id IS NULL - GetOrphanedChunks(ctx context.Context) ([]Chunk, error) + GetOrphanedChunks(ctx context.Context) ([]GetOrphanedChunksRow, error) // Find files that have no relationship to any narinfo // // SELECT nf.id, nf.hash, nf.compression, nf.file_size, nf.query, nf.created_at, nf.updated_at, nf.last_accessed_at diff --git a/pkg/database/generated_wrapper_mysql.go b/pkg/database/generated_wrapper_mysql.go index 11b2cd64..22583bf7 100644 --- a/pkg/database/generated_wrapper_mysql.go +++ b/pkg/database/generated_wrapper_mysql.go @@ -274,6 +274,8 @@ func (w *mysqlWrapper) GetChunkByHash(ctx context.Context, hash string) (Chunk, Size: res.Size, + CompressedSize: res.CompressedSize, + CreatedAt: res.CreatedAt, UpdatedAt: res.UpdatedAt, @@ -302,13 +304,15 @@ func (w *mysqlWrapper) GetChunkByID(ctx context.Context, id int64) (Chunk, error Size: res.Size, + CompressedSize: res.CompressedSize, + CreatedAt: res.CreatedAt, UpdatedAt: res.UpdatedAt, }, nil } -func (w *mysqlWrapper) GetChunkByNarFileIDAndIndex(ctx context.Context, arg GetChunkByNarFileIDAndIndexParams) (Chunk, error) { +func (w *mysqlWrapper) GetChunkByNarFileIDAndIndex(ctx context.Context, arg GetChunkByNarFileIDAndIndexParams) (GetChunkByNarFileIDAndIndexRow, error) { /* --- Auto-Loop for Bulk Insert on Non-Postgres --- */ res, err := w.adapter.GetChunkByNarFileIDAndIndex(ctx, mysqldb.GetChunkByNarFileIDAndIndexParams{ @@ -318,15 +322,15 @@ func (w *mysqlWrapper) GetChunkByNarFileIDAndIndex(ctx context.Context, arg GetC if err != nil { if errors.Is(err, sql.ErrNoRows) { - return Chunk{}, ErrNotFound + return GetChunkByNarFileIDAndIndexRow{}, ErrNotFound } - return Chunk{}, err + return GetChunkByNarFileIDAndIndexRow{}, err } // Convert Single Domain Struct - return Chunk{ + return GetChunkByNarFileIDAndIndexRow{ ID: res.ID, Hash: res.Hash, @@ -352,7 +356,7 @@ func (w *mysqlWrapper) GetChunkCount(ctx context.Context) (int64, error) { return res, nil } -func (w *mysqlWrapper) GetChunksByNarFileID(ctx context.Context, narFileID int64) ([]Chunk, error) { +func (w *mysqlWrapper) GetChunksByNarFileID(ctx context.Context, narFileID int64) ([]GetChunksByNarFileIDRow, error) { /* --- Auto-Loop for Bulk Insert on Non-Postgres --- */ res, err := w.adapter.GetChunksByNarFileID(ctx, narFileID) @@ -361,9 +365,9 @@ func (w *mysqlWrapper) GetChunksByNarFileID(ctx context.Context, narFileID int64 } // Convert Slice of Domain Structs - items := make([]Chunk, len(res)) + items := make([]GetChunksByNarFileIDRow, len(res)) for i, v := range res { - items[i] = Chunk{ + items[i] = GetChunksByNarFileIDRow{ ID: v.ID, Hash: v.Hash, @@ -978,7 +982,7 @@ func (w *mysqlWrapper) GetOldCompressedNarFiles(ctx context.Context, arg GetOldC return items, nil } -func (w *mysqlWrapper) GetOrphanedChunks(ctx context.Context) ([]Chunk, error) { +func (w *mysqlWrapper) GetOrphanedChunks(ctx context.Context) ([]GetOrphanedChunksRow, error) { /* --- Auto-Loop for Bulk Insert on Non-Postgres --- */ res, err := w.adapter.GetOrphanedChunks(ctx) @@ -987,9 +991,9 @@ func (w *mysqlWrapper) GetOrphanedChunks(ctx context.Context) ([]Chunk, error) { } // Convert Slice of Domain Structs - items := make([]Chunk, len(res)) + items := make([]GetOrphanedChunksRow, len(res)) for i, v := range res { - items[i] = Chunk{ + items[i] = GetOrphanedChunksRow{ ID: v.ID, Hash: v.Hash, diff --git a/pkg/database/generated_wrapper_postgres.go b/pkg/database/generated_wrapper_postgres.go index 3a9be770..307e2b3f 100644 --- a/pkg/database/generated_wrapper_postgres.go +++ b/pkg/database/generated_wrapper_postgres.go @@ -75,6 +75,8 @@ func (w *postgresWrapper) CreateChunk(ctx context.Context, arg CreateChunkParams Size: res.Size, + CompressedSize: res.CompressedSize, + CreatedAt: res.CreatedAt, UpdatedAt: res.UpdatedAt, @@ -324,6 +326,8 @@ func (w *postgresWrapper) GetChunkByHash(ctx context.Context, hash string) (Chun Size: res.Size, + CompressedSize: res.CompressedSize, + CreatedAt: res.CreatedAt, UpdatedAt: res.UpdatedAt, @@ -352,13 +356,15 @@ func (w *postgresWrapper) GetChunkByID(ctx context.Context, id int64) (Chunk, er Size: res.Size, + CompressedSize: res.CompressedSize, + CreatedAt: res.CreatedAt, UpdatedAt: res.UpdatedAt, }, nil } -func (w *postgresWrapper) GetChunkByNarFileIDAndIndex(ctx context.Context, arg GetChunkByNarFileIDAndIndexParams) (Chunk, error) { +func (w *postgresWrapper) GetChunkByNarFileIDAndIndex(ctx context.Context, arg GetChunkByNarFileIDAndIndexParams) (GetChunkByNarFileIDAndIndexRow, error) { /* --- Auto-Loop for Bulk Insert on Non-Postgres --- */ res, err := w.adapter.GetChunkByNarFileIDAndIndex(ctx, postgresdb.GetChunkByNarFileIDAndIndexParams{ @@ -368,15 +374,15 @@ func (w *postgresWrapper) GetChunkByNarFileIDAndIndex(ctx context.Context, arg G if err != nil { if errors.Is(err, sql.ErrNoRows) { - return Chunk{}, ErrNotFound + return GetChunkByNarFileIDAndIndexRow{}, ErrNotFound } - return Chunk{}, err + return GetChunkByNarFileIDAndIndexRow{}, err } // Convert Single Domain Struct - return Chunk{ + return GetChunkByNarFileIDAndIndexRow{ ID: res.ID, Hash: res.Hash, @@ -402,7 +408,7 @@ func (w *postgresWrapper) GetChunkCount(ctx context.Context) (int64, error) { return res, nil } -func (w *postgresWrapper) GetChunksByNarFileID(ctx context.Context, narFileID int64) ([]Chunk, error) { +func (w *postgresWrapper) GetChunksByNarFileID(ctx context.Context, narFileID int64) ([]GetChunksByNarFileIDRow, error) { /* --- Auto-Loop for Bulk Insert on Non-Postgres --- */ res, err := w.adapter.GetChunksByNarFileID(ctx, narFileID) @@ -411,9 +417,9 @@ func (w *postgresWrapper) GetChunksByNarFileID(ctx context.Context, narFileID in } // Convert Slice of Domain Structs - items := make([]Chunk, len(res)) + items := make([]GetChunksByNarFileIDRow, len(res)) for i, v := range res { - items[i] = Chunk{ + items[i] = GetChunksByNarFileIDRow{ ID: v.ID, Hash: v.Hash, @@ -1028,7 +1034,7 @@ func (w *postgresWrapper) GetOldCompressedNarFiles(ctx context.Context, arg GetO return items, nil } -func (w *postgresWrapper) GetOrphanedChunks(ctx context.Context) ([]Chunk, error) { +func (w *postgresWrapper) GetOrphanedChunks(ctx context.Context) ([]GetOrphanedChunksRow, error) { /* --- Auto-Loop for Bulk Insert on Non-Postgres --- */ res, err := w.adapter.GetOrphanedChunks(ctx) @@ -1037,9 +1043,9 @@ func (w *postgresWrapper) GetOrphanedChunks(ctx context.Context) ([]Chunk, error } // Convert Slice of Domain Structs - items := make([]Chunk, len(res)) + items := make([]GetOrphanedChunksRow, len(res)) for i, v := range res { - items[i] = Chunk{ + items[i] = GetOrphanedChunksRow{ ID: v.ID, Hash: v.Hash, diff --git a/pkg/database/generated_wrapper_sqlite.go b/pkg/database/generated_wrapper_sqlite.go index 087465f4..8762a4cb 100644 --- a/pkg/database/generated_wrapper_sqlite.go +++ b/pkg/database/generated_wrapper_sqlite.go @@ -93,6 +93,8 @@ func (w *sqliteWrapper) CreateChunk(ctx context.Context, arg CreateChunkParams) Size: res.Size, + CompressedSize: res.CompressedSize, + CreatedAt: res.CreatedAt, UpdatedAt: res.UpdatedAt, @@ -342,6 +344,8 @@ func (w *sqliteWrapper) GetChunkByHash(ctx context.Context, hash string) (Chunk, Size: res.Size, + CompressedSize: res.CompressedSize, + CreatedAt: res.CreatedAt, UpdatedAt: res.UpdatedAt, @@ -370,13 +374,15 @@ func (w *sqliteWrapper) GetChunkByID(ctx context.Context, id int64) (Chunk, erro Size: res.Size, + CompressedSize: res.CompressedSize, + CreatedAt: res.CreatedAt, UpdatedAt: res.UpdatedAt, }, nil } -func (w *sqliteWrapper) GetChunkByNarFileIDAndIndex(ctx context.Context, arg GetChunkByNarFileIDAndIndexParams) (Chunk, error) { +func (w *sqliteWrapper) GetChunkByNarFileIDAndIndex(ctx context.Context, arg GetChunkByNarFileIDAndIndexParams) (GetChunkByNarFileIDAndIndexRow, error) { /* --- Auto-Loop for Bulk Insert on Non-Postgres --- */ res, err := w.adapter.GetChunkByNarFileIDAndIndex(ctx, sqlitedb.GetChunkByNarFileIDAndIndexParams{ @@ -386,15 +392,15 @@ func (w *sqliteWrapper) GetChunkByNarFileIDAndIndex(ctx context.Context, arg Get if err != nil { if errors.Is(err, sql.ErrNoRows) { - return Chunk{}, ErrNotFound + return GetChunkByNarFileIDAndIndexRow{}, ErrNotFound } - return Chunk{}, err + return GetChunkByNarFileIDAndIndexRow{}, err } // Convert Single Domain Struct - return Chunk{ + return GetChunkByNarFileIDAndIndexRow{ ID: res.ID, Hash: res.Hash, @@ -420,7 +426,7 @@ func (w *sqliteWrapper) GetChunkCount(ctx context.Context) (int64, error) { return res, nil } -func (w *sqliteWrapper) GetChunksByNarFileID(ctx context.Context, narFileID int64) ([]Chunk, error) { +func (w *sqliteWrapper) GetChunksByNarFileID(ctx context.Context, narFileID int64) ([]GetChunksByNarFileIDRow, error) { /* --- Auto-Loop for Bulk Insert on Non-Postgres --- */ res, err := w.adapter.GetChunksByNarFileID(ctx, narFileID) @@ -429,9 +435,9 @@ func (w *sqliteWrapper) GetChunksByNarFileID(ctx context.Context, narFileID int6 } // Convert Slice of Domain Structs - items := make([]Chunk, len(res)) + items := make([]GetChunksByNarFileIDRow, len(res)) for i, v := range res { - items[i] = Chunk{ + items[i] = GetChunksByNarFileIDRow{ ID: v.ID, Hash: v.Hash, @@ -1046,7 +1052,7 @@ func (w *sqliteWrapper) GetOldCompressedNarFiles(ctx context.Context, arg GetOld return items, nil } -func (w *sqliteWrapper) GetOrphanedChunks(ctx context.Context) ([]Chunk, error) { +func (w *sqliteWrapper) GetOrphanedChunks(ctx context.Context) ([]GetOrphanedChunksRow, error) { /* --- Auto-Loop for Bulk Insert on Non-Postgres --- */ res, err := w.adapter.GetOrphanedChunks(ctx) @@ -1055,9 +1061,9 @@ func (w *sqliteWrapper) GetOrphanedChunks(ctx context.Context) ([]Chunk, error) } // Convert Slice of Domain Structs - items := make([]Chunk, len(res)) + items := make([]GetOrphanedChunksRow, len(res)) for i, v := range res { - items[i] = Chunk{ + items[i] = GetOrphanedChunksRow{ ID: v.ID, Hash: v.Hash, diff --git a/pkg/database/mysqldb/models.go b/pkg/database/mysqldb/models.go index 789eb7bd..07b3d3ec 100644 --- a/pkg/database/mysqldb/models.go +++ b/pkg/database/mysqldb/models.go @@ -10,11 +10,12 @@ import ( ) type Chunk struct { - ID int64 - Hash string - Size uint32 - CreatedAt time.Time - UpdatedAt sql.NullTime + ID int64 + Hash string + Size uint32 + CompressedSize uint32 + CreatedAt time.Time + UpdatedAt sql.NullTime } type Config struct { diff --git a/pkg/database/mysqldb/querier.go b/pkg/database/mysqldb/querier.go index 4c412de5..311013b4 100644 --- a/pkg/database/mysqldb/querier.go +++ b/pkg/database/mysqldb/querier.go @@ -120,13 +120,13 @@ type Querier interface { DeleteOrphanedNarInfos(ctx context.Context) (int64, error) //GetChunkByHash // - // SELECT id, hash, size, created_at, updated_at + // SELECT id, hash, size, compressed_size, created_at, updated_at // FROM chunks // WHERE hash = ? GetChunkByHash(ctx context.Context, hash string) (Chunk, error) //GetChunkByID // - // SELECT id, hash, size, created_at, updated_at + // SELECT id, hash, size, compressed_size, created_at, updated_at // FROM chunks // WHERE id = ? GetChunkByID(ctx context.Context, id int64) (Chunk, error) @@ -136,7 +136,7 @@ type Querier interface { // FROM chunks c // INNER JOIN nar_file_chunks nfc ON c.id = nfc.chunk_id // WHERE nfc.nar_file_id = ? AND nfc.chunk_index = ? - GetChunkByNarFileIDAndIndex(ctx context.Context, arg GetChunkByNarFileIDAndIndexParams) (Chunk, error) + GetChunkByNarFileIDAndIndex(ctx context.Context, arg GetChunkByNarFileIDAndIndexParams) (GetChunkByNarFileIDAndIndexRow, error) //GetChunkCount // // SELECT CAST(COUNT(*) AS SIGNED) AS count @@ -149,7 +149,7 @@ type Querier interface { // INNER JOIN nar_file_chunks nfc ON c.id = nfc.chunk_id // WHERE nfc.nar_file_id = ? // ORDER BY nfc.chunk_index - GetChunksByNarFileID(ctx context.Context, narFileID int64) ([]Chunk, error) + GetChunksByNarFileID(ctx context.Context, narFileID int64) ([]GetChunksByNarFileIDRow, error) //GetCompressedNarInfos // // SELECT id, hash, created_at, updated_at, last_accessed_at, store_path, url, compression, file_hash, file_size, nar_hash, nar_size, deriver, `system`, ca @@ -325,7 +325,7 @@ type Querier interface { // FROM chunks c // LEFT JOIN nar_file_chunks nfc ON c.id = nfc.chunk_id // WHERE nfc.chunk_id IS NULL - GetOrphanedChunks(ctx context.Context) ([]Chunk, error) + GetOrphanedChunks(ctx context.Context) ([]GetOrphanedChunksRow, error) // Find files that have no relationship to any narinfo // // SELECT nf.id, nf.hash, nf.compression, nf.file_size, nf.query, nf.created_at, nf.updated_at, nf.last_accessed_at diff --git a/pkg/database/mysqldb/query.mysql.sql.go b/pkg/database/mysqldb/query.mysql.sql.go index 57754dc4..fda3d70b 100644 --- a/pkg/database/mysqldb/query.mysql.sql.go +++ b/pkg/database/mysqldb/query.mysql.sql.go @@ -360,14 +360,14 @@ func (q *Queries) DeleteOrphanedNarInfos(ctx context.Context) (int64, error) { } const getChunkByHash = `-- name: GetChunkByHash :one -SELECT id, hash, size, created_at, updated_at +SELECT id, hash, size, compressed_size, created_at, updated_at FROM chunks WHERE hash = ? ` // GetChunkByHash // -// SELECT id, hash, size, created_at, updated_at +// SELECT id, hash, size, compressed_size, created_at, updated_at // FROM chunks // WHERE hash = ? func (q *Queries) GetChunkByHash(ctx context.Context, hash string) (Chunk, error) { @@ -377,6 +377,7 @@ func (q *Queries) GetChunkByHash(ctx context.Context, hash string) (Chunk, error &i.ID, &i.Hash, &i.Size, + &i.CompressedSize, &i.CreatedAt, &i.UpdatedAt, ) @@ -384,14 +385,14 @@ func (q *Queries) GetChunkByHash(ctx context.Context, hash string) (Chunk, error } const getChunkByID = `-- name: GetChunkByID :one -SELECT id, hash, size, created_at, updated_at +SELECT id, hash, size, compressed_size, created_at, updated_at FROM chunks WHERE id = ? ` // GetChunkByID // -// SELECT id, hash, size, created_at, updated_at +// SELECT id, hash, size, compressed_size, created_at, updated_at // FROM chunks // WHERE id = ? func (q *Queries) GetChunkByID(ctx context.Context, id int64) (Chunk, error) { @@ -401,6 +402,7 @@ func (q *Queries) GetChunkByID(ctx context.Context, id int64) (Chunk, error) { &i.ID, &i.Hash, &i.Size, + &i.CompressedSize, &i.CreatedAt, &i.UpdatedAt, ) @@ -419,15 +421,23 @@ type GetChunkByNarFileIDAndIndexParams struct { ChunkIndex int64 } +type GetChunkByNarFileIDAndIndexRow struct { + ID int64 + Hash string + Size uint32 + CreatedAt time.Time + UpdatedAt sql.NullTime +} + // GetChunkByNarFileIDAndIndex // // SELECT c.id, c.hash, c.size, c.created_at, c.updated_at // FROM chunks c // INNER JOIN nar_file_chunks nfc ON c.id = nfc.chunk_id // WHERE nfc.nar_file_id = ? AND nfc.chunk_index = ? -func (q *Queries) GetChunkByNarFileIDAndIndex(ctx context.Context, arg GetChunkByNarFileIDAndIndexParams) (Chunk, error) { +func (q *Queries) GetChunkByNarFileIDAndIndex(ctx context.Context, arg GetChunkByNarFileIDAndIndexParams) (GetChunkByNarFileIDAndIndexRow, error) { row := q.db.QueryRowContext(ctx, getChunkByNarFileIDAndIndex, arg.NarFileID, arg.ChunkIndex) - var i Chunk + var i GetChunkByNarFileIDAndIndexRow err := row.Scan( &i.ID, &i.Hash, @@ -462,6 +472,14 @@ WHERE nfc.nar_file_id = ? ORDER BY nfc.chunk_index ` +type GetChunksByNarFileIDRow struct { + ID int64 + Hash string + Size uint32 + CreatedAt time.Time + UpdatedAt sql.NullTime +} + // GetChunksByNarFileID // // SELECT c.id, c.hash, c.size, c.created_at, c.updated_at @@ -469,15 +487,15 @@ ORDER BY nfc.chunk_index // INNER JOIN nar_file_chunks nfc ON c.id = nfc.chunk_id // WHERE nfc.nar_file_id = ? // ORDER BY nfc.chunk_index -func (q *Queries) GetChunksByNarFileID(ctx context.Context, narFileID int64) ([]Chunk, error) { +func (q *Queries) GetChunksByNarFileID(ctx context.Context, narFileID int64) ([]GetChunksByNarFileIDRow, error) { rows, err := q.db.QueryContext(ctx, getChunksByNarFileID, narFileID) if err != nil { return nil, err } defer rows.Close() - var items []Chunk + var items []GetChunksByNarFileIDRow for rows.Next() { - var i Chunk + var i GetChunksByNarFileIDRow if err := rows.Scan( &i.ID, &i.Hash, @@ -1391,21 +1409,29 @@ LEFT JOIN nar_file_chunks nfc ON c.id = nfc.chunk_id WHERE nfc.chunk_id IS NULL ` +type GetOrphanedChunksRow struct { + ID int64 + Hash string + Size uint32 + CreatedAt time.Time + UpdatedAt sql.NullTime +} + // GetOrphanedChunks // // SELECT c.id, c.hash, c.size, c.created_at, c.updated_at // FROM chunks c // LEFT JOIN nar_file_chunks nfc ON c.id = nfc.chunk_id // WHERE nfc.chunk_id IS NULL -func (q *Queries) GetOrphanedChunks(ctx context.Context) ([]Chunk, error) { +func (q *Queries) GetOrphanedChunks(ctx context.Context) ([]GetOrphanedChunksRow, error) { rows, err := q.db.QueryContext(ctx, getOrphanedChunks) if err != nil { return nil, err } defer rows.Close() - var items []Chunk + var items []GetOrphanedChunksRow for rows.Next() { - var i Chunk + var i GetOrphanedChunksRow if err := rows.Scan( &i.ID, &i.Hash, diff --git a/pkg/database/postgresdb/models.go b/pkg/database/postgresdb/models.go index 165eb8b6..14c70c4d 100644 --- a/pkg/database/postgresdb/models.go +++ b/pkg/database/postgresdb/models.go @@ -10,11 +10,12 @@ import ( ) type Chunk struct { - ID int64 - Hash string - Size uint32 - CreatedAt time.Time - UpdatedAt sql.NullTime + ID int64 + Hash string + Size uint32 + CompressedSize uint32 + CreatedAt time.Time + UpdatedAt sql.NullTime } type Config struct { diff --git a/pkg/database/postgresdb/querier.go b/pkg/database/postgresdb/querier.go index 0a1e529c..931a2875 100644 --- a/pkg/database/postgresdb/querier.go +++ b/pkg/database/postgresdb/querier.go @@ -51,7 +51,7 @@ type Querier interface { // ) // ON CONFLICT(hash) DO UPDATE SET // updated_at = CURRENT_TIMESTAMP - // RETURNING id, hash, size, created_at, updated_at + // RETURNING id, hash, size, compressed_size, created_at, updated_at CreateChunk(ctx context.Context, arg CreateChunkParams) (Chunk, error) //CreateConfig // @@ -138,13 +138,13 @@ type Querier interface { DeleteOrphanedNarInfos(ctx context.Context) (int64, error) //GetChunkByHash // - // SELECT id, hash, size, created_at, updated_at + // SELECT id, hash, size, compressed_size, created_at, updated_at // FROM chunks // WHERE hash = $1 GetChunkByHash(ctx context.Context, hash string) (Chunk, error) //GetChunkByID // - // SELECT id, hash, size, created_at, updated_at + // SELECT id, hash, size, compressed_size, created_at, updated_at // FROM chunks // WHERE id = $1 GetChunkByID(ctx context.Context, id int64) (Chunk, error) @@ -154,7 +154,7 @@ type Querier interface { // FROM chunks c // INNER JOIN nar_file_chunks nfc ON c.id = nfc.chunk_id // WHERE nfc.nar_file_id = $1 AND nfc.chunk_index = $2 - GetChunkByNarFileIDAndIndex(ctx context.Context, arg GetChunkByNarFileIDAndIndexParams) (Chunk, error) + GetChunkByNarFileIDAndIndex(ctx context.Context, arg GetChunkByNarFileIDAndIndexParams) (GetChunkByNarFileIDAndIndexRow, error) //GetChunkCount // // SELECT CAST(COUNT(*) AS BIGINT) AS count @@ -167,7 +167,7 @@ type Querier interface { // INNER JOIN nar_file_chunks nfc ON c.id = nfc.chunk_id // WHERE nfc.nar_file_id = $1 // ORDER BY nfc.chunk_index - GetChunksByNarFileID(ctx context.Context, narFileID int64) ([]Chunk, error) + GetChunksByNarFileID(ctx context.Context, narFileID int64) ([]GetChunksByNarFileIDRow, error) //GetCompressedNarInfos // // SELECT id, hash, created_at, updated_at, last_accessed_at, store_path, url, compression, file_hash, file_size, nar_hash, nar_size, deriver, system, ca @@ -343,7 +343,7 @@ type Querier interface { // FROM chunks c // LEFT JOIN nar_file_chunks nfc ON c.id = nfc.chunk_id // WHERE nfc.chunk_id IS NULL - GetOrphanedChunks(ctx context.Context) ([]Chunk, error) + GetOrphanedChunks(ctx context.Context) ([]GetOrphanedChunksRow, error) // Find files that have no relationship to any narinfo // // SELECT nf.id, nf.hash, nf.compression, nf.file_size, nf.query, nf.created_at, nf.updated_at, nf.last_accessed_at diff --git a/pkg/database/postgresdb/query.postgres.sql.go b/pkg/database/postgresdb/query.postgres.sql.go index fcc1c58e..72f2a81a 100644 --- a/pkg/database/postgresdb/query.postgres.sql.go +++ b/pkg/database/postgresdb/query.postgres.sql.go @@ -121,7 +121,7 @@ INSERT INTO chunks ( ) ON CONFLICT(hash) DO UPDATE SET updated_at = CURRENT_TIMESTAMP -RETURNING id, hash, size, created_at, updated_at +RETURNING id, hash, size, compressed_size, created_at, updated_at ` type CreateChunkParams struct { @@ -138,7 +138,7 @@ type CreateChunkParams struct { // ) // ON CONFLICT(hash) DO UPDATE SET // updated_at = CURRENT_TIMESTAMP -// RETURNING id, hash, size, created_at, updated_at +// RETURNING id, hash, size, compressed_size, created_at, updated_at func (q *Queries) CreateChunk(ctx context.Context, arg CreateChunkParams) (Chunk, error) { row := q.db.QueryRowContext(ctx, createChunk, arg.Hash, arg.Size) var i Chunk @@ -146,6 +146,7 @@ func (q *Queries) CreateChunk(ctx context.Context, arg CreateChunkParams) (Chunk &i.ID, &i.Hash, &i.Size, + &i.CompressedSize, &i.CreatedAt, &i.UpdatedAt, ) @@ -466,14 +467,14 @@ func (q *Queries) DeleteOrphanedNarInfos(ctx context.Context) (int64, error) { } const getChunkByHash = `-- name: GetChunkByHash :one -SELECT id, hash, size, created_at, updated_at +SELECT id, hash, size, compressed_size, created_at, updated_at FROM chunks WHERE hash = $1 ` // GetChunkByHash // -// SELECT id, hash, size, created_at, updated_at +// SELECT id, hash, size, compressed_size, created_at, updated_at // FROM chunks // WHERE hash = $1 func (q *Queries) GetChunkByHash(ctx context.Context, hash string) (Chunk, error) { @@ -483,6 +484,7 @@ func (q *Queries) GetChunkByHash(ctx context.Context, hash string) (Chunk, error &i.ID, &i.Hash, &i.Size, + &i.CompressedSize, &i.CreatedAt, &i.UpdatedAt, ) @@ -490,14 +492,14 @@ func (q *Queries) GetChunkByHash(ctx context.Context, hash string) (Chunk, error } const getChunkByID = `-- name: GetChunkByID :one -SELECT id, hash, size, created_at, updated_at +SELECT id, hash, size, compressed_size, created_at, updated_at FROM chunks WHERE id = $1 ` // GetChunkByID // -// SELECT id, hash, size, created_at, updated_at +// SELECT id, hash, size, compressed_size, created_at, updated_at // FROM chunks // WHERE id = $1 func (q *Queries) GetChunkByID(ctx context.Context, id int64) (Chunk, error) { @@ -507,6 +509,7 @@ func (q *Queries) GetChunkByID(ctx context.Context, id int64) (Chunk, error) { &i.ID, &i.Hash, &i.Size, + &i.CompressedSize, &i.CreatedAt, &i.UpdatedAt, ) @@ -525,15 +528,23 @@ type GetChunkByNarFileIDAndIndexParams struct { ChunkIndex int64 } +type GetChunkByNarFileIDAndIndexRow struct { + ID int64 + Hash string + Size uint32 + CreatedAt time.Time + UpdatedAt sql.NullTime +} + // GetChunkByNarFileIDAndIndex // // SELECT c.id, c.hash, c.size, c.created_at, c.updated_at // FROM chunks c // INNER JOIN nar_file_chunks nfc ON c.id = nfc.chunk_id // WHERE nfc.nar_file_id = $1 AND nfc.chunk_index = $2 -func (q *Queries) GetChunkByNarFileIDAndIndex(ctx context.Context, arg GetChunkByNarFileIDAndIndexParams) (Chunk, error) { +func (q *Queries) GetChunkByNarFileIDAndIndex(ctx context.Context, arg GetChunkByNarFileIDAndIndexParams) (GetChunkByNarFileIDAndIndexRow, error) { row := q.db.QueryRowContext(ctx, getChunkByNarFileIDAndIndex, arg.NarFileID, arg.ChunkIndex) - var i Chunk + var i GetChunkByNarFileIDAndIndexRow err := row.Scan( &i.ID, &i.Hash, @@ -568,6 +579,14 @@ WHERE nfc.nar_file_id = $1 ORDER BY nfc.chunk_index ` +type GetChunksByNarFileIDRow struct { + ID int64 + Hash string + Size uint32 + CreatedAt time.Time + UpdatedAt sql.NullTime +} + // GetChunksByNarFileID // // SELECT c.id, c.hash, c.size, c.created_at, c.updated_at @@ -575,15 +594,15 @@ ORDER BY nfc.chunk_index // INNER JOIN nar_file_chunks nfc ON c.id = nfc.chunk_id // WHERE nfc.nar_file_id = $1 // ORDER BY nfc.chunk_index -func (q *Queries) GetChunksByNarFileID(ctx context.Context, narFileID int64) ([]Chunk, error) { +func (q *Queries) GetChunksByNarFileID(ctx context.Context, narFileID int64) ([]GetChunksByNarFileIDRow, error) { rows, err := q.db.QueryContext(ctx, getChunksByNarFileID, narFileID) if err != nil { return nil, err } defer rows.Close() - var items []Chunk + var items []GetChunksByNarFileIDRow for rows.Next() { - var i Chunk + var i GetChunksByNarFileIDRow if err := rows.Scan( &i.ID, &i.Hash, @@ -1461,21 +1480,29 @@ LEFT JOIN nar_file_chunks nfc ON c.id = nfc.chunk_id WHERE nfc.chunk_id IS NULL ` +type GetOrphanedChunksRow struct { + ID int64 + Hash string + Size uint32 + CreatedAt time.Time + UpdatedAt sql.NullTime +} + // GetOrphanedChunks // // SELECT c.id, c.hash, c.size, c.created_at, c.updated_at // FROM chunks c // LEFT JOIN nar_file_chunks nfc ON c.id = nfc.chunk_id // WHERE nfc.chunk_id IS NULL -func (q *Queries) GetOrphanedChunks(ctx context.Context) ([]Chunk, error) { +func (q *Queries) GetOrphanedChunks(ctx context.Context) ([]GetOrphanedChunksRow, error) { rows, err := q.db.QueryContext(ctx, getOrphanedChunks) if err != nil { return nil, err } defer rows.Close() - var items []Chunk + var items []GetOrphanedChunksRow for rows.Next() { - var i Chunk + var i GetOrphanedChunksRow if err := rows.Scan( &i.ID, &i.Hash, diff --git a/pkg/database/sqlitedb/models.go b/pkg/database/sqlitedb/models.go index c199b19c..d3e6529e 100644 --- a/pkg/database/sqlitedb/models.go +++ b/pkg/database/sqlitedb/models.go @@ -10,11 +10,12 @@ import ( ) type Chunk struct { - ID int64 - Hash string - Size uint32 - CreatedAt time.Time - UpdatedAt sql.NullTime + ID int64 + Hash string + Size uint32 + CompressedSize uint32 + CreatedAt time.Time + UpdatedAt sql.NullTime } type Config struct { diff --git a/pkg/database/sqlitedb/querier.go b/pkg/database/sqlitedb/querier.go index 2426f80e..d34d590b 100644 --- a/pkg/database/sqlitedb/querier.go +++ b/pkg/database/sqlitedb/querier.go @@ -37,7 +37,7 @@ type Querier interface { // ) // ON CONFLICT(hash) DO UPDATE SET // updated_at = CURRENT_TIMESTAMP - // RETURNING id, hash, size, created_at, updated_at + // RETURNING id, hash, size, compressed_size, created_at, updated_at CreateChunk(ctx context.Context, arg CreateChunkParams) (Chunk, error) //CreateConfig // @@ -124,13 +124,13 @@ type Querier interface { DeleteOrphanedNarInfos(ctx context.Context) (int64, error) //GetChunkByHash // - // SELECT id, hash, size, created_at, updated_at + // SELECT id, hash, size, compressed_size, created_at, updated_at // FROM chunks // WHERE hash = ? GetChunkByHash(ctx context.Context, hash string) (Chunk, error) //GetChunkByID // - // SELECT id, hash, size, created_at, updated_at + // SELECT id, hash, size, compressed_size, created_at, updated_at // FROM chunks // WHERE id = ? GetChunkByID(ctx context.Context, id int64) (Chunk, error) @@ -140,7 +140,7 @@ type Querier interface { // FROM chunks c // INNER JOIN nar_file_chunks nfc ON c.id = nfc.chunk_id // WHERE nfc.nar_file_id = ? AND nfc.chunk_index = ? - GetChunkByNarFileIDAndIndex(ctx context.Context, arg GetChunkByNarFileIDAndIndexParams) (Chunk, error) + GetChunkByNarFileIDAndIndex(ctx context.Context, arg GetChunkByNarFileIDAndIndexParams) (GetChunkByNarFileIDAndIndexRow, error) //GetChunkCount // // SELECT CAST(COUNT(*) AS INTEGER) AS count @@ -153,7 +153,7 @@ type Querier interface { // INNER JOIN nar_file_chunks nfc ON c.id = nfc.chunk_id // WHERE nfc.nar_file_id = ? // ORDER BY nfc.chunk_index - GetChunksByNarFileID(ctx context.Context, narFileID int64) ([]Chunk, error) + GetChunksByNarFileID(ctx context.Context, narFileID int64) ([]GetChunksByNarFileIDRow, error) //GetCompressedNarInfos // // SELECT id, hash, created_at, updated_at, last_accessed_at, store_path, url, compression, file_hash, file_size, nar_hash, nar_size, deriver, system, ca @@ -329,7 +329,7 @@ type Querier interface { // FROM chunks c // LEFT JOIN nar_file_chunks nfc ON c.id = nfc.chunk_id // WHERE nfc.chunk_id IS NULL - GetOrphanedChunks(ctx context.Context) ([]Chunk, error) + GetOrphanedChunks(ctx context.Context) ([]GetOrphanedChunksRow, error) // Find files that have no relationship to any narinfo // // SELECT nf.id, nf.hash, nf.compression, nf.file_size, nf."query", nf.created_at, nf.updated_at, nf.last_accessed_at diff --git a/pkg/database/sqlitedb/query.sqlite.sql.go b/pkg/database/sqlitedb/query.sqlite.sql.go index c9b94381..0f5b89d2 100644 --- a/pkg/database/sqlitedb/query.sqlite.sql.go +++ b/pkg/database/sqlitedb/query.sqlite.sql.go @@ -73,7 +73,7 @@ INSERT INTO chunks ( ) ON CONFLICT(hash) DO UPDATE SET updated_at = CURRENT_TIMESTAMP -RETURNING id, hash, size, created_at, updated_at +RETURNING id, hash, size, compressed_size, created_at, updated_at ` type CreateChunkParams struct { @@ -90,7 +90,7 @@ type CreateChunkParams struct { // ) // ON CONFLICT(hash) DO UPDATE SET // updated_at = CURRENT_TIMESTAMP -// RETURNING id, hash, size, created_at, updated_at +// RETURNING id, hash, size, compressed_size, created_at, updated_at func (q *Queries) CreateChunk(ctx context.Context, arg CreateChunkParams) (Chunk, error) { row := q.db.QueryRowContext(ctx, createChunk, arg.Hash, arg.Size) var i Chunk @@ -98,6 +98,7 @@ func (q *Queries) CreateChunk(ctx context.Context, arg CreateChunkParams) (Chunk &i.ID, &i.Hash, &i.Size, + &i.CompressedSize, &i.CreatedAt, &i.UpdatedAt, ) @@ -418,14 +419,14 @@ func (q *Queries) DeleteOrphanedNarInfos(ctx context.Context) (int64, error) { } const getChunkByHash = `-- name: GetChunkByHash :one -SELECT id, hash, size, created_at, updated_at +SELECT id, hash, size, compressed_size, created_at, updated_at FROM chunks WHERE hash = ? ` // GetChunkByHash // -// SELECT id, hash, size, created_at, updated_at +// SELECT id, hash, size, compressed_size, created_at, updated_at // FROM chunks // WHERE hash = ? func (q *Queries) GetChunkByHash(ctx context.Context, hash string) (Chunk, error) { @@ -435,6 +436,7 @@ func (q *Queries) GetChunkByHash(ctx context.Context, hash string) (Chunk, error &i.ID, &i.Hash, &i.Size, + &i.CompressedSize, &i.CreatedAt, &i.UpdatedAt, ) @@ -442,14 +444,14 @@ func (q *Queries) GetChunkByHash(ctx context.Context, hash string) (Chunk, error } const getChunkByID = `-- name: GetChunkByID :one -SELECT id, hash, size, created_at, updated_at +SELECT id, hash, size, compressed_size, created_at, updated_at FROM chunks WHERE id = ? ` // GetChunkByID // -// SELECT id, hash, size, created_at, updated_at +// SELECT id, hash, size, compressed_size, created_at, updated_at // FROM chunks // WHERE id = ? func (q *Queries) GetChunkByID(ctx context.Context, id int64) (Chunk, error) { @@ -459,6 +461,7 @@ func (q *Queries) GetChunkByID(ctx context.Context, id int64) (Chunk, error) { &i.ID, &i.Hash, &i.Size, + &i.CompressedSize, &i.CreatedAt, &i.UpdatedAt, ) @@ -477,15 +480,23 @@ type GetChunkByNarFileIDAndIndexParams struct { ChunkIndex int64 } +type GetChunkByNarFileIDAndIndexRow struct { + ID int64 + Hash string + Size uint32 + CreatedAt time.Time + UpdatedAt sql.NullTime +} + // GetChunkByNarFileIDAndIndex // // SELECT c.id, c.hash, c.size, c.created_at, c.updated_at // FROM chunks c // INNER JOIN nar_file_chunks nfc ON c.id = nfc.chunk_id // WHERE nfc.nar_file_id = ? AND nfc.chunk_index = ? -func (q *Queries) GetChunkByNarFileIDAndIndex(ctx context.Context, arg GetChunkByNarFileIDAndIndexParams) (Chunk, error) { +func (q *Queries) GetChunkByNarFileIDAndIndex(ctx context.Context, arg GetChunkByNarFileIDAndIndexParams) (GetChunkByNarFileIDAndIndexRow, error) { row := q.db.QueryRowContext(ctx, getChunkByNarFileIDAndIndex, arg.NarFileID, arg.ChunkIndex) - var i Chunk + var i GetChunkByNarFileIDAndIndexRow err := row.Scan( &i.ID, &i.Hash, @@ -520,6 +531,14 @@ WHERE nfc.nar_file_id = ? ORDER BY nfc.chunk_index ` +type GetChunksByNarFileIDRow struct { + ID int64 + Hash string + Size uint32 + CreatedAt time.Time + UpdatedAt sql.NullTime +} + // GetChunksByNarFileID // // SELECT c.id, c.hash, c.size, c.created_at, c.updated_at @@ -527,15 +546,15 @@ ORDER BY nfc.chunk_index // INNER JOIN nar_file_chunks nfc ON c.id = nfc.chunk_id // WHERE nfc.nar_file_id = ? // ORDER BY nfc.chunk_index -func (q *Queries) GetChunksByNarFileID(ctx context.Context, narFileID int64) ([]Chunk, error) { +func (q *Queries) GetChunksByNarFileID(ctx context.Context, narFileID int64) ([]GetChunksByNarFileIDRow, error) { rows, err := q.db.QueryContext(ctx, getChunksByNarFileID, narFileID) if err != nil { return nil, err } defer rows.Close() - var items []Chunk + var items []GetChunksByNarFileIDRow for rows.Next() { - var i Chunk + var i GetChunksByNarFileIDRow if err := rows.Scan( &i.ID, &i.Hash, @@ -1413,21 +1432,29 @@ LEFT JOIN nar_file_chunks nfc ON c.id = nfc.chunk_id WHERE nfc.chunk_id IS NULL ` +type GetOrphanedChunksRow struct { + ID int64 + Hash string + Size uint32 + CreatedAt time.Time + UpdatedAt sql.NullTime +} + // GetOrphanedChunks // // SELECT c.id, c.hash, c.size, c.created_at, c.updated_at // FROM chunks c // LEFT JOIN nar_file_chunks nfc ON c.id = nfc.chunk_id // WHERE nfc.chunk_id IS NULL -func (q *Queries) GetOrphanedChunks(ctx context.Context) ([]Chunk, error) { +func (q *Queries) GetOrphanedChunks(ctx context.Context) ([]GetOrphanedChunksRow, error) { rows, err := q.db.QueryContext(ctx, getOrphanedChunks) if err != nil { return nil, err } defer rows.Close() - var items []Chunk + var items []GetOrphanedChunksRow for rows.Next() { - var i Chunk + var i GetOrphanedChunksRow if err := rows.Scan( &i.ID, &i.Hash, diff --git a/sqlc.yml b/sqlc.yml index 6549e301..07853242 100644 --- a/sqlc.yml +++ b/sqlc.yml @@ -15,6 +15,8 @@ sql: go_type: uint64 - column: chunks.size go_type: uint32 + - column: chunks.compressed_size + go_type: uint32 rename: narinfo_id: NarInfoID url: URL @@ -33,6 +35,8 @@ sql: go_type: uint64 - column: chunks.size go_type: uint32 + - column: chunks.compressed_size + go_type: uint32 rename: narinfo_id: NarInfoID url: URL @@ -51,6 +55,8 @@ sql: go_type: uint64 - column: chunks.size go_type: uint32 + - column: chunks.compressed_size + go_type: uint32 rename: narinfo_id: NarInfoID url: URL