From 714cdef1afa85c0f8e0f023313d7fd2745731c04 Mon Sep 17 00:00:00 2001 From: javi11 Date: Mon, 20 Apr 2026 10:25:57 +0200 Subject: [PATCH 1/6] feat(database): add import_migrations table and repository Adds a generic import_migrations table to track two-phase migration state (Phase 1 = import NZBs into AltMount; Phase 2 = rewrite arr library symlinks). Includes goose migrations for SQLite and PostgreSQL, ImportMigration model + status consts, a full ImportMigrationRepository with Upsert/MarkImported/MarkFailed/MarkSymlinksMigrated/LookupByExternalID/ ListByStatus/Stats/ExistsForSource/BackfillFromImportQueue, and wires MigrationRepo into the DB struct. --- internal/database/db.go | 5 +- .../database/import_migration_repository.go | 321 ++++++++++++++++++ .../postgres/024_import_migrations.sql | 25 ++ .../sqlite/024_import_migrations.sql | 25 ++ internal/database/models.go | 33 ++ 5 files changed, 408 insertions(+), 1 deletion(-) create mode 100644 internal/database/import_migration_repository.go create mode 100644 internal/database/migrations/postgres/024_import_migrations.sql create mode 100644 internal/database/migrations/sqlite/024_import_migrations.sql diff --git a/internal/database/db.go b/internal/database/db.go index b62ddbf22..69b804da5 100644 --- a/internal/database/db.go +++ b/internal/database/db.go @@ -19,7 +19,8 @@ type DB struct { conn *sql.DB dialect dialectHelper // Repository is kept for backwards-compat; prefer using Connection() directly. - Repository *QueueRepository + Repository *QueueRepository + MigrationRepo *ImportMigrationRepository } // Config holds database configuration. @@ -86,6 +87,7 @@ func newSQLiteDB(config Config) (*DB, error) { dh := dialectHelper{d: DialectSQLite} db := &DB{conn: conn, dialect: dh} db.Repository = NewQueueRepository(conn, DialectSQLite) + db.MigrationRepo = NewImportMigrationRepository(conn, DialectSQLite) return db, nil } @@ -114,6 +116,7 @@ func newPostgresDB(config Config) (*DB, error) { dh := dialectHelper{d: DialectPostgres} db := &DB{conn: conn, dialect: dh} db.Repository = NewQueueRepository(conn, DialectPostgres) + db.MigrationRepo = NewImportMigrationRepository(conn, DialectPostgres) return db, nil } diff --git a/internal/database/import_migration_repository.go b/internal/database/import_migration_repository.go new file mode 100644 index 000000000..63cf03902 --- /dev/null +++ b/internal/database/import_migration_repository.go @@ -0,0 +1,321 @@ +package database + +import ( + "context" + "database/sql" + "encoding/json" + "fmt" + "strings" + "time" +) + +// ImportMigrationRepository handles database operations for import_migrations. +type ImportMigrationRepository struct { + db *dialectAwareDB + dialect dialectHelper +} + +// NewImportMigrationRepository creates a new ImportMigrationRepository. +func NewImportMigrationRepository(db *sql.DB, d Dialect) *ImportMigrationRepository { + return &ImportMigrationRepository{ + db: newDialectAwareDB(db, d), + dialect: dialectHelper{d: d}, + } +} + +// Upsert inserts or updates a migration row keyed by (source, external_id). +// Returns the row ID. +func (r *ImportMigrationRepository) Upsert(ctx context.Context, row *ImportMigration) (int64, error) { + query := ` + INSERT INTO import_migrations + (source, external_id, queue_item_id, relative_path, final_path, status, error, created_at, updated_at) + VALUES (?, ?, ?, ?, ?, ?, ?, datetime('now'), datetime('now')) + ON CONFLICT(source, external_id) DO UPDATE SET + queue_item_id = COALESCE(excluded.queue_item_id, import_migrations.queue_item_id), + relative_path = excluded.relative_path, + final_path = COALESCE(excluded.final_path, import_migrations.final_path), + status = excluded.status, + error = excluded.error, + updated_at = datetime('now') + ` + args := []any{ + row.Source, row.ExternalID, row.QueueItemID, + row.RelativePath, row.FinalPath, string(row.Status), row.Error, + } + + if r.dialect.IsPostgres() { + var id int64 + err := r.db.QueryRowContext(ctx, query+" RETURNING id", args...).Scan(&id) + if err != nil && err != sql.ErrNoRows { + return 0, fmt.Errorf("upsert import_migration: %w", err) + } + return id, nil + } + + res, err := r.db.ExecContext(ctx, query, args...) + if err != nil { + return 0, fmt.Errorf("upsert import_migration: %w", err) + } + id, err := res.LastInsertId() + if err != nil { + return 0, fmt.Errorf("upsert import_migration last insert id: %w", err) + } + return id, nil +} + +// MarkImported sets status=imported and final_path for all rows matching queue_item_id. +func (r *ImportMigrationRepository) MarkImported(ctx context.Context, queueItemID int64, finalPath string) error { + query := ` + UPDATE import_migrations + SET status = 'imported', final_path = ?, updated_at = datetime('now') + WHERE queue_item_id = ? + ` + _, err := r.db.ExecContext(ctx, query, finalPath, queueItemID) + if err != nil { + return fmt.Errorf("mark import_migration imported (queue_item_id=%d): %w", queueItemID, err) + } + return nil +} + +// MarkFailed sets status=failed and error for all rows matching queue_item_id. +func (r *ImportMigrationRepository) MarkFailed(ctx context.Context, queueItemID int64, errMsg string) error { + query := ` + UPDATE import_migrations + SET status = 'failed', error = ?, updated_at = datetime('now') + WHERE queue_item_id = ? + ` + _, err := r.db.ExecContext(ctx, query, errMsg, queueItemID) + if err != nil { + return fmt.Errorf("mark import_migration failed (queue_item_id=%d): %w", queueItemID, err) + } + return nil +} + +// MarkSymlinksMigrated sets status=symlinks_migrated for the given row IDs. +func (r *ImportMigrationRepository) MarkSymlinksMigrated(ctx context.Context, ids []int64) error { + if len(ids) == 0 { + return nil + } + + placeholders := make([]string, len(ids)) + args := make([]any, len(ids)+1) + args[0] = time.Now() + for i, id := range ids { + placeholders[i] = "?" + args[i+1] = id + } + + query := fmt.Sprintf(` + UPDATE import_migrations + SET status = 'symlinks_migrated', updated_at = datetime('now') + WHERE id IN (%s) + `, strings.Join(placeholders, ", ")) + + _, err := r.db.ExecContext(ctx, query, args[1:]...) + if err != nil { + return fmt.Errorf("mark import_migrations symlinks_migrated: %w", err) + } + return nil +} + +// LookupByExternalID returns the migration row for (source, external_id), or nil if not found. +func (r *ImportMigrationRepository) LookupByExternalID(ctx context.Context, source, externalID string) (*ImportMigration, error) { + query := ` + SELECT id, source, external_id, queue_item_id, relative_path, final_path, status, error, created_at, updated_at + FROM import_migrations + WHERE source = ? AND external_id = ? + ` + row := r.db.QueryRowContext(ctx, query, source, externalID) + m, err := scanImportMigration(row) + if err == sql.ErrNoRows { + return nil, nil + } + if err != nil { + return nil, fmt.Errorf("lookup import_migration (source=%s, external_id=%s): %w", source, externalID, err) + } + return m, nil +} + +// ListByStatus returns paginated rows for source with the given status. +func (r *ImportMigrationRepository) ListByStatus(ctx context.Context, source string, status ImportMigrationStatus, limit, offset int) ([]*ImportMigration, error) { + query := ` + SELECT id, source, external_id, queue_item_id, relative_path, final_path, status, error, created_at, updated_at + FROM import_migrations + WHERE source = ? AND status = ? + ORDER BY id ASC + LIMIT ? OFFSET ? + ` + rows, err := r.db.QueryContext(ctx, query, source, string(status), limit, offset) + if err != nil { + return nil, fmt.Errorf("list import_migrations by status: %w", err) + } + defer rows.Close() + + var result []*ImportMigration + for rows.Next() { + m, err := scanImportMigrationRow(rows) + if err != nil { + return nil, fmt.Errorf("scan import_migration: %w", err) + } + result = append(result, m) + } + if err := rows.Err(); err != nil { + return nil, fmt.Errorf("iterate import_migrations: %w", err) + } + return result, nil +} + +// Stats returns aggregate counts for a source. +func (r *ImportMigrationRepository) Stats(ctx context.Context, source string) (*ImportMigrationStats, error) { + query := ` + SELECT + COUNT(*) AS total, + SUM(CASE WHEN status = 'pending' THEN 1 ELSE 0 END) AS pending, + SUM(CASE WHEN status = 'imported' THEN 1 ELSE 0 END) AS imported, + SUM(CASE WHEN status = 'failed' THEN 1 ELSE 0 END) AS failed, + SUM(CASE WHEN status = 'symlinks_migrated' THEN 1 ELSE 0 END) AS symlinks_migrated + FROM import_migrations + WHERE source = ? + ` + var stats ImportMigrationStats + err := r.db.QueryRowContext(ctx, query, source).Scan( + &stats.Total, + &stats.Pending, + &stats.Imported, + &stats.Failed, + &stats.SymlinksMigrated, + ) + if err != nil { + return nil, fmt.Errorf("stats import_migrations (source=%s): %w", source, err) + } + return &stats, nil +} + +// ExistsForSource returns true if any rows exist for the given source. +func (r *ImportMigrationRepository) ExistsForSource(ctx context.Context, source string) (bool, error) { + query := `SELECT COUNT(*) FROM import_migrations WHERE source = ? LIMIT 1` + var count int + if err := r.db.QueryRowContext(ctx, query, source).Scan(&count); err != nil { + return false, fmt.Errorf("exists import_migrations (source=%s): %w", source, err) + } + return count > 0, nil +} + +// BackfillFromImportQueue reads completed import_queue rows that contain a nzbdav_id +// in their metadata JSON and inserts them as status=imported rows into import_migrations +// (idempotent via ON CONFLICT IGNORE / INSERT OR IGNORE). +// Returns the number of rows inserted. +func (r *ImportMigrationRepository) BackfillFromImportQueue(ctx context.Context) (int, error) { + selectQuery := ` + SELECT id, relative_path, storage_path, metadata + FROM import_queue + WHERE status = 'completed' + AND metadata IS NOT NULL + AND storage_path IS NOT NULL + ` + rows, err := r.db.QueryContext(ctx, selectQuery) + if err != nil { + return 0, fmt.Errorf("backfill: query import_queue: %w", err) + } + defer rows.Close() + + type row struct { + id int64 + relativePath *string + storagePath *string + metadata string + } + + var candidates []row + for rows.Next() { + var candidate row + if err := rows.Scan(&candidate.id, &candidate.relativePath, &candidate.storagePath, &candidate.metadata); err != nil { + return 0, fmt.Errorf("backfill: scan import_queue row: %w", err) + } + candidates = append(candidates, candidate) + } + if err := rows.Err(); err != nil { + return 0, fmt.Errorf("backfill: iterate import_queue: %w", err) + } + + var nzbdavIDStruct struct { + NzbdavID string `json:"nzbdav_id"` + } + + inserted := 0 + for _, c := range candidates { + if err := json.Unmarshal([]byte(c.metadata), &nzbdavIDStruct); err != nil { + // Row has metadata but no parseable nzbdav_id — skip silently. + continue + } + if nzbdavIDStruct.NzbdavID == "" { + continue + } + + relativePath := "" + if c.relativePath != nil { + relativePath = *c.relativePath + } + + insertQuery := ` + INSERT OR IGNORE INTO import_migrations + (source, external_id, queue_item_id, relative_path, final_path, status, created_at, updated_at) + VALUES ('nzbdav', ?, ?, ?, ?, 'imported', datetime('now'), datetime('now')) + ` + if r.dialect.IsPostgres() { + insertQuery = ` + INSERT INTO import_migrations + (source, external_id, queue_item_id, relative_path, final_path, status, created_at, updated_at) + VALUES ('nzbdav', $1, $2, $3, $4, 'imported', NOW(), NOW()) + ON CONFLICT (source, external_id) DO NOTHING + ` + } + + var res sql.Result + if r.dialect.IsPostgres() { + res, err = r.db.ExecContext(ctx, insertQuery, nzbdavIDStruct.NzbdavID, c.id, relativePath, c.storagePath) + } else { + res, err = r.db.ExecContext(ctx, insertQuery, nzbdavIDStruct.NzbdavID, c.id, relativePath, c.storagePath) + } + if err != nil { + return inserted, fmt.Errorf("backfill: insert import_migration (external_id=%s): %w", nzbdavIDStruct.NzbdavID, err) + } + if n, _ := res.RowsAffected(); n > 0 { + inserted++ + } + } + + return inserted, nil +} + +// ─── helpers ───────────────────────────────────────────────────────────────── + +func scanImportMigration(row *sql.Row) (*ImportMigration, error) { + var m ImportMigration + var status string + err := row.Scan( + &m.ID, &m.Source, &m.ExternalID, &m.QueueItemID, + &m.RelativePath, &m.FinalPath, &status, &m.Error, + &m.CreatedAt, &m.UpdatedAt, + ) + if err != nil { + return nil, err + } + m.Status = ImportMigrationStatus(status) + return &m, nil +} + +func scanImportMigrationRow(rows *sql.Rows) (*ImportMigration, error) { + var m ImportMigration + var status string + err := rows.Scan( + &m.ID, &m.Source, &m.ExternalID, &m.QueueItemID, + &m.RelativePath, &m.FinalPath, &status, &m.Error, + &m.CreatedAt, &m.UpdatedAt, + ) + if err != nil { + return nil, err + } + m.Status = ImportMigrationStatus(status) + return &m, nil +} diff --git a/internal/database/migrations/postgres/024_import_migrations.sql b/internal/database/migrations/postgres/024_import_migrations.sql new file mode 100644 index 000000000..9df0925c0 --- /dev/null +++ b/internal/database/migrations/postgres/024_import_migrations.sql @@ -0,0 +1,25 @@ +-- +goose Up +-- +goose StatementBegin +CREATE TABLE import_migrations ( + id BIGSERIAL PRIMARY KEY, + source TEXT NOT NULL, + external_id TEXT NOT NULL, + queue_item_id BIGINT, + relative_path TEXT NOT NULL DEFAULT '', + final_path TEXT, + status TEXT NOT NULL DEFAULT 'pending', + error TEXT, + created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, + updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, + UNIQUE(source, external_id) +); +CREATE INDEX idx_import_migrations_status ON import_migrations(source, status); +CREATE INDEX idx_import_migrations_queue ON import_migrations(queue_item_id); +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +DROP INDEX IF EXISTS idx_import_migrations_queue; +DROP INDEX IF EXISTS idx_import_migrations_status; +DROP TABLE IF EXISTS import_migrations; +-- +goose StatementEnd diff --git a/internal/database/migrations/sqlite/024_import_migrations.sql b/internal/database/migrations/sqlite/024_import_migrations.sql new file mode 100644 index 000000000..2ffadb78a --- /dev/null +++ b/internal/database/migrations/sqlite/024_import_migrations.sql @@ -0,0 +1,25 @@ +-- +goose Up +-- +goose StatementBegin +CREATE TABLE import_migrations ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + source TEXT NOT NULL, + external_id TEXT NOT NULL, + queue_item_id INTEGER, + relative_path TEXT NOT NULL DEFAULT '', + final_path TEXT, + status TEXT NOT NULL DEFAULT 'pending', + error TEXT, + created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, + updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, + UNIQUE(source, external_id) +); +CREATE INDEX idx_import_migrations_status ON import_migrations(source, status); +CREATE INDEX idx_import_migrations_queue ON import_migrations(queue_item_id); +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +DROP INDEX IF EXISTS idx_import_migrations_queue; +DROP INDEX IF EXISTS idx_import_migrations_status; +DROP TABLE IF EXISTS import_migrations; +-- +goose StatementEnd diff --git a/internal/database/models.go b/internal/database/models.go index 3171fc44e..de1bc9222 100644 --- a/internal/database/models.go +++ b/internal/database/models.go @@ -169,3 +169,36 @@ type ImportHistory struct { CompletedAt time.Time `db:"completed_at"` } +// ImportMigrationStatus represents the status of a migration item +type ImportMigrationStatus string + +const ( + ImportMigrationStatusPending ImportMigrationStatus = "pending" + ImportMigrationStatusImported ImportMigrationStatus = "imported" + ImportMigrationStatusFailed ImportMigrationStatus = "failed" + ImportMigrationStatusSymlinksMigrated ImportMigrationStatus = "symlinks_migrated" +) + +// ImportMigration tracks progress of two-phase migrations (e.g. nzbdav → altmount) +type ImportMigration struct { + ID int64 `db:"id"` + Source string `db:"source"` // e.g. "nzbdav" + ExternalID string `db:"external_id"` // source-specific ID (nzbdav GUID) + QueueItemID *int64 `db:"queue_item_id"` // FK → import_queue.id (nullable) + RelativePath string `db:"relative_path"` // virtual path when enqueued + FinalPath *string `db:"final_path"` // storage_path after import + Status ImportMigrationStatus `db:"status"` + Error *string `db:"error"` + CreatedAt time.Time `db:"created_at"` + UpdatedAt time.Time `db:"updated_at"` +} + +// ImportMigrationStats holds aggregate counts for a source +type ImportMigrationStats struct { + Pending int + Imported int + Failed int + SymlinksMigrated int + Total int +} + From 2a160fc82c8b9e6a5b860b9fc78368daa1128d86 Mon Sep 17 00:00:00 2001 From: javi11 Date: Mon, 20 Apr 2026 10:33:51 +0200 Subject: [PATCH 2/6] refactor(nzbdav): remove ID-symlink write code and FilterExistingNzbdavIds Delete HandleIDMetadataLinks and id_linker.go; remove UpdateIDSymlink and RemoveIDSymlink methods from MetadataService; remove the .id sidecar write block from WriteFileMetadata (read path preserved for Phase 2 compatibility); remove FilterExistingNzbdavIds from QueueRepository, Repository, and the BatchQueueAdder interface; remove related call sites in nzbfilesystem MOVE handler and nzbdav scanner processBatch. --- internal/database/queue_repository.go | 53 ----------- internal/database/repository.go | 55 +---------- .../importer/postprocessor/coordinator.go | 9 +- internal/importer/postprocessor/id_linker.go | 34 ------- internal/importer/scanner/nzbdav.go | 95 ++----------------- internal/importer/service.go | 4 - internal/metadata/service.go | 73 -------------- .../nzbfilesystem/metadata_remote_file.go | 23 ----- 8 files changed, 10 insertions(+), 336 deletions(-) delete mode 100644 internal/importer/postprocessor/id_linker.go diff --git a/internal/database/queue_repository.go b/internal/database/queue_repository.go index f827ce581..6ff9d4d45 100644 --- a/internal/database/queue_repository.go +++ b/internal/database/queue_repository.go @@ -5,7 +5,6 @@ import ( "database/sql" "errors" "fmt" - "strings" "time" ) @@ -470,58 +469,6 @@ func (r *QueueRepository) IncrementRetryCountAndResetStatus(ctx context.Context, return rowsAffected > 0, nil } -// FilterExistingNzbdavIds checks a list of nzbdav IDs and returns those that already exist in the queue -func (r *QueueRepository) FilterExistingNzbdavIds(ctx context.Context, ids []string) ([]string, error) { - if len(ids) == 0 { - return nil, nil - } - - // We can't pass too many parameters at once, so we batch the query - batchSize := 500 - existingIds := make([]string, 0) - - for i := 0; i < len(ids); i += batchSize { - end := min(i+batchSize, len(ids)) - - batchIds := ids[i:end] - - // Build placeholders for the IN clause - placeholders := make([]string, len(batchIds)) - args := make([]any, len(batchIds)) - for j, id := range batchIds { - placeholders[j] = "?" - args[j] = id - } - - // Query using json_extract (SQLite) or ->>'key' (PostgreSQL) to find matching IDs - jsonExpr := r.dialect.JSONExtract("metadata", "nzbdav_id") - query := fmt.Sprintf(` - SELECT DISTINCT %s - FROM import_queue - WHERE %s IN (%s) - `, jsonExpr, jsonExpr, strings.Join(placeholders, ",")) - - rows, err := r.db.QueryContext(ctx, query, args...) - if err != nil { - return nil, fmt.Errorf("failed to check existing nzbdav IDs: %w", err) - } - - for rows.Next() { - var id sql.NullString - if err := rows.Scan(&id); err != nil { - rows.Close() - return nil, fmt.Errorf("failed to scan matching id: %w", err) - } - if id.Valid { - existingIds = append(existingIds, id.String) - } - } - rows.Close() - } - - return existingIds, nil -} - // UpdateQueueItemPriority updates the priority of a queue item func (r *QueueRepository) UpdateQueueItemPriority(ctx context.Context, id int64, priority QueuePriority) error { query := `UPDATE import_queue SET priority = ?, updated_at = datetime('now') WHERE id = ?` diff --git a/internal/database/repository.go b/internal/database/repository.go index 4fa78808b..7193b40e5 100644 --- a/internal/database/repository.go +++ b/internal/database/repository.go @@ -69,7 +69,7 @@ func (r *Repository) WithImmediateTransaction(ctx context.Context, fn func(*Repo } // withTransactionMode executes a function within a database transaction with specified mode -func (r *Repository) withTransactionMode(ctx context.Context, mode string, fn func(*Repository) error) error { +func (r *Repository) withTransactionMode(ctx context.Context, _ string, fn func(*Repository) error) error { ddb, ok := r.db.(*dialectAwareDB) if !ok { return fmt.Errorf("repository not connected to dialectAwareDB") @@ -942,59 +942,6 @@ func (r *Repository) IsFileInQueue(ctx context.Context, filePath string) (bool, return true, nil } -// FilterExistingNzbdavIds checks a list of nzbdav IDs and returns those that already exist in the queue -// This is used for deduplication during import -func (r *Repository) FilterExistingNzbdavIds(ctx context.Context, ids []string) ([]string, error) { - if len(ids) == 0 { - return nil, nil - } - - // We can't pass too many parameters at once, so we batch the query - batchSize := 500 - existingIds := make([]string, 0) - - for i := 0; i < len(ids); i += batchSize { - end := min(i+batchSize, len(ids)) - - batchIds := ids[i:end] - - // Build placeholders for the IN clause - placeholders := make([]string, len(batchIds)) - args := make([]any, len(batchIds)) - for j, id := range batchIds { - placeholders[j] = "?" - args[j] = id - } - - // Query using json_extract (SQLite) or ->>'key' (PostgreSQL) to find matching IDs - jsonExpr := r.dialect.JSONExtract("metadata", "nzbdav_id") - query := fmt.Sprintf(` - SELECT DISTINCT %s - FROM import_queue - WHERE %s IN (%s) - `, jsonExpr, jsonExpr, strings.Join(placeholders, ",")) - - rows, err := r.db.QueryContext(ctx, query, args...) - if err != nil { - return nil, fmt.Errorf("failed to check existing nzbdav IDs: %w", err) - } - - for rows.Next() { - var id sql.NullString - if err := rows.Scan(&id); err != nil { - rows.Close() - return nil, fmt.Errorf("failed to scan matching id: %w", err) - } - if id.Valid { - existingIds = append(existingIds, id.String) - } - } - rows.Close() - } - - return existingIds, nil -} - // UpdateQueueItemPriority updates the priority of a queue item func (r *Repository) UpdateQueueItemPriority(ctx context.Context, id int64, priority QueuePriority) error { query := `UPDATE import_queue SET priority = ?, updated_at = datetime('now') WHERE id = ?` diff --git a/internal/importer/postprocessor/coordinator.go b/internal/importer/postprocessor/coordinator.go index fb01afe30..c4982eebd 100644 --- a/internal/importer/postprocessor/coordinator.go +++ b/internal/importer/postprocessor/coordinator.go @@ -109,10 +109,7 @@ func (c *Coordinator) HandleSuccess(ctx context.Context, item *database.ImportQu result.SymlinksCreated = true } - // 3. Create ID metadata links - c.HandleIDMetadataLinks(ctx, item, resultingPath) - - // 4. Create STRM files if configured + // 3. Create STRM files if configured if err := c.CreateStrmFiles(ctx, item, resultingPath); err != nil { c.log.WarnContext(ctx, "Failed to create STRM files", "queue_id", item.ID, @@ -123,7 +120,7 @@ func (c *Coordinator) HandleSuccess(ctx context.Context, item *database.ImportQu result.StrmCreated = true } - // 5. Schedule health check + // 4. Schedule health check if err := c.ScheduleHealthCheck(ctx, resultingPath); err != nil { c.log.WarnContext(ctx, "Failed to schedule health check", "path", resultingPath, @@ -133,7 +130,7 @@ func (c *Coordinator) HandleSuccess(ctx context.Context, item *database.ImportQu result.HealthScheduled = true } - // 6. Notify ARR applications + // 5. Notify ARR applications if shouldSkipARRNotification(item) { c.log.DebugContext(ctx, "ARR notification skipped (requested by caller)", "queue_id", item.ID, diff --git a/internal/importer/postprocessor/id_linker.go b/internal/importer/postprocessor/id_linker.go deleted file mode 100644 index fe9456d26..000000000 --- a/internal/importer/postprocessor/id_linker.go +++ /dev/null @@ -1,34 +0,0 @@ -package postprocessor - -import ( - "context" - "encoding/json" - - "github.com/javi11/altmount/internal/database" - metapb "github.com/javi11/altmount/internal/metadata/proto" -) - -// HandleIDMetadataLinks creates ID-based metadata links for nzbdav compatibility -func (c *Coordinator) HandleIDMetadataLinks(ctx context.Context, item *database.ImportQueueItem, resultingPath string) { - // 1. Check if the queue item itself has a release-level ID in its metadata - if item.Metadata != nil && *item.Metadata != "" { - var meta struct { - NzbdavID string `json:"nzbdav_id"` - } - if err := json.Unmarshal([]byte(*item.Metadata), &meta); err == nil && meta.NzbdavID != "" { - if err := c.metadataService.UpdateIDSymlink(meta.NzbdavID, resultingPath); err != nil { - c.log.Warn("Failed to create release ID metadata link", "id", meta.NzbdavID, "error", err) - } - } - } - - // 2. Check individual files for IDs using MetadataService walker - _ = c.metadataService.WalkDirectoryFiles(resultingPath, func(fileVirtualPath string, meta *metapb.FileMetadata) error { - if meta.NzbdavId != "" { - if err := c.metadataService.UpdateIDSymlink(meta.NzbdavId, fileVirtualPath); err != nil { - c.log.Warn("Failed to create file ID metadata link", "id", meta.NzbdavId, "error", err) - } - } - return nil - }) -} diff --git a/internal/importer/scanner/nzbdav.go b/internal/importer/scanner/nzbdav.go index 8b2005073..4b2a7030b 100644 --- a/internal/importer/scanner/nzbdav.go +++ b/internal/importer/scanner/nzbdav.go @@ -19,7 +19,6 @@ import ( // BatchQueueAdder defines the interface for batch queue operations type BatchQueueAdder interface { AddBatchToQueue(ctx context.Context, items []*database.ImportQueueItem) error - FilterExistingNzbdavIds(ctx context.Context, ids []string) ([]string, error) } // NzbDavImporter handles importing from NZBDav databases @@ -231,98 +230,16 @@ func (n *NzbDavImporter) processBatch(ctx context.Context, batchChan <-chan *dat insertBatch := func() { if len(batch) > 0 { - // 1. Extract IDs from batch to check for duplicates - idMap := make(map[string]*database.ImportQueueItem) - idsToCheck := make([]string, 0, len(batch)) - - type metaStruct struct { - NzbdavID string `json:"nzbdav_id"` - } - - for _, item := range batch { - if item.Metadata != nil { - var meta metaStruct - if err := json.Unmarshal([]byte(*item.Metadata), &meta); err == nil && meta.NzbdavID != "" { - idMap[meta.NzbdavID] = item - idsToCheck = append(idsToCheck, meta.NzbdavID) - } - } - } - - // 2. Check for existing IDs in DB - var existingIds []string - var err error - if len(idsToCheck) > 0 { - existingIds, err = n.batchAdder.FilterExistingNzbdavIds(ctx, idsToCheck) - if err != nil { - n.log.ErrorContext(ctx, "Failed to check for existing IDs", "error", err) - // On error, we proceed with all items - the DB unique constraint on nzb_path - // will catch duplicates, though less efficiently, or we might add duplicates - // if paths differ. Better to fail safe and try to add. - } - } - - // 3. Filter out duplicates - itemsToAdd := make([]*database.ImportQueueItem, 0, len(batch)) - duplicates := 0 - - if len(existingIds) > 0 { - existingMap := make(map[string]bool) - for _, id := range existingIds { - existingMap[id] = true - } - - for _, item := range batch { - isDuplicate := false - if item.Metadata != nil { - var meta metaStruct - if err := json.Unmarshal([]byte(*item.Metadata), &meta); err == nil && meta.NzbdavID != "" { - if existingMap[meta.NzbdavID] { - isDuplicate = true - } - } - } - - if isDuplicate { - duplicates++ - // Cleanup temp NZB file for duplicate - if err := os.Remove(item.NzbPath); err != nil { - n.log.DebugContext(ctx, "Failed to remove duplicate temp NZB", "path", item.NzbPath, "error", err) - } - // Also try to remove the parent temp dir if empty (it was created just for this file) - go func(path string) { - dir := filepath.Dir(path) - _ = os.Remove(dir) - }(item.NzbPath) - } else { - itemsToAdd = append(itemsToAdd, item) - } - } + if err := n.batchAdder.AddBatchToQueue(ctx, batch); err != nil { + n.log.ErrorContext(ctx, "Failed to add batch to queue", "count", len(batch), "error", err) + n.mu.Lock() + n.info.Failed += len(batch) + n.mu.Unlock() } else { - itemsToAdd = batch - } - - if duplicates > 0 { - n.log.InfoContext(ctx, "Skipped duplicate items", "count", duplicates) n.mu.Lock() - n.info.Skipped += duplicates + n.info.Added += len(batch) n.mu.Unlock() } - - // 4. Add unique items to queue - if len(itemsToAdd) > 0 { - if err := n.batchAdder.AddBatchToQueue(ctx, itemsToAdd); err != nil { - n.log.ErrorContext(ctx, "Failed to add batch to queue", "count", len(itemsToAdd), "error", err) - n.mu.Lock() - n.info.Failed += len(itemsToAdd) - n.mu.Unlock() - } else { - n.mu.Lock() - n.info.Added += len(itemsToAdd) - n.mu.Unlock() - } - } - batch = nil // Reset batch } } diff --git a/internal/importer/service.go b/internal/importer/service.go index a6847dc10..e4e88fb69 100644 --- a/internal/importer/service.go +++ b/internal/importer/service.go @@ -102,10 +102,6 @@ func (a *batchQueueAdapterForImporter) AddBatchToQueue(ctx context.Context, item return a.repo.AddBatchToQueue(ctx, items) } -func (a *batchQueueAdapterForImporter) FilterExistingNzbdavIds(ctx context.Context, ids []string) ([]string, error) { - return a.repo.FilterExistingNzbdavIds(ctx, ids) -} - // isFileAlreadyProcessed checks if a file has already been processed by checking metadata func isFileAlreadyProcessed(metadataService *metadata.MetadataService, filePath string, scanRoot string) bool { // Calculate virtual path diff --git a/internal/metadata/service.go b/internal/metadata/service.go index 474335544..4060d6de8 100644 --- a/internal/metadata/service.go +++ b/internal/metadata/service.go @@ -8,7 +8,6 @@ import ( "log/slog" "os" "path/filepath" - "runtime" "strings" "time" @@ -113,18 +112,6 @@ func (ms *MetadataService) WriteFileMetadata(virtualPath string, metadata *metap return fmt.Errorf("failed to rename metadata file: %w", err) } - // Handle ID sidecar file - idPath := metadataPath + ".id" - if nzbdavId != "" { - if err := os.WriteFile(idPath, []byte(nzbdavId), 0644); err != nil { - // Log error but don't fail the operation - slog.WarnContext(context.Background(), "Failed to write ID sidecar file", "path", idPath, "error", err) - } - } else { - // Clean up existing ID file if present - _ = os.Remove(idPath) - } - metadata.NzbdavId = nzbdavId // Restore for in-memory use // Update only the lightweight cache; the full proto (with SegmentData) is @@ -395,11 +382,6 @@ func (ms *MetadataService) DeleteFileMetadataWithSourceNzb(ctx context.Context, } } - // Remove .ids/ symlink before deletion - if idData, readErr := os.ReadFile(metadataPath + ".id"); readErr == nil && len(idData) > 0 { - _ = ms.RemoveIDSymlink(string(idData)) - } - // Delete the metadata file err := os.Remove(metadataPath) if err != nil && !os.IsNotExist(err) { @@ -501,56 +483,6 @@ func (ms *MetadataService) RenameFileMetadata(oldVirtualPath, newVirtualPath str return nil } -// UpdateIDSymlink creates or updates an ID-based symlink in the .ids/ sharded directory. -// On Windows, symlinks are not supported and this operation is skipped gracefully. -func (ms *MetadataService) UpdateIDSymlink(nzbdavID, virtualPath string) error { - if runtime.GOOS == "windows" { - return nil // Symlinks not supported on Windows; ID-based lookup via symlinks is skipped - } - - id := strings.ToLower(nzbdavID) - if len(id) < 5 { - return nil // Invalid ID for sharding - } - - shardPath := filepath.Join(".ids", string(id[0]), string(id[1]), string(id[2]), string(id[3]), string(id[4])) - fullShardDir := filepath.Join(ms.rootPath, shardPath) - - if err := os.MkdirAll(fullShardDir, 0755); err != nil { - return err - } - - targetMetaPath := ms.GetMetadataFilePath(virtualPath) - linkPath := filepath.Join(fullShardDir, id+".meta") - - // Remove existing symlink if present - os.Remove(linkPath) - - // Create relative symlink - relTarget, err := filepath.Rel(fullShardDir, targetMetaPath) - if err != nil { - return os.Symlink(targetMetaPath, linkPath) - } - - return os.Symlink(relTarget, linkPath) -} - -// RemoveIDSymlink removes an ID-based symlink from the .ids/ sharded directory. -func (ms *MetadataService) RemoveIDSymlink(nzbdavID string) error { - id := strings.ToLower(nzbdavID) - if len(id) < 5 { - return nil - } - - shardPath := filepath.Join(".ids", string(id[0]), string(id[1]), string(id[2]), string(id[3]), string(id[4])) - linkPath := filepath.Join(ms.rootPath, shardPath, id+".meta") - - if err := os.Remove(linkPath); err != nil && !os.IsNotExist(err) { - return err - } - return nil -} - // WalkDirectoryFiles walks a metadata directory and calls fn for each file's virtual path and metadata. func (ms *MetadataService) WalkDirectoryFiles(virtualPath string, fn func(fileVirtualPath string, meta *metapb.FileMetadata) error) error { metadataDir := filepath.Join(ms.rootPath, virtualPath) @@ -753,11 +685,6 @@ func (ms *MetadataService) MoveToCorrupted(ctx context.Context, virtualPath stri targetPath := filepath.Join(targetDir, truncatedFilename+".meta") - // Remove .ids/ symlink before moving to corrupted - if idData, readErr := os.ReadFile(metadataPath + ".id"); readErr == nil && len(idData) > 0 { - _ = ms.RemoveIDSymlink(string(idData)) - } - // Move the .meta file if err := os.Rename(metadataPath, targetPath); err != nil { slog.WarnContext(ctx, "Failed to move corrupted metadata, trying copy fallback", "error", err) diff --git a/internal/nzbfilesystem/metadata_remote_file.go b/internal/nzbfilesystem/metadata_remote_file.go index 7be658955..c3cc508c0 100644 --- a/internal/nzbfilesystem/metadata_remote_file.go +++ b/internal/nzbfilesystem/metadata_remote_file.go @@ -384,16 +384,6 @@ func (mrf *MetadataRemoteFile) RenameFile(ctx context.Context, oldName, newName } } - // Update ID symlinks for all files with NzbdavId under the renamed directory - _ = mrf.metadataService.WalkDirectoryFiles(normalizedNew, func(fileVirtualPath string, meta *metapb.FileMetadata) error { - if meta.NzbdavId != "" { - if err := mrf.metadataService.UpdateIDSymlink(meta.NzbdavId, fileVirtualPath); err != nil { - slog.WarnContext(ctx, "Failed to update ID symlink after directory rename", "id", meta.NzbdavId, "path", fileVirtualPath, "error", err) - } - } - return nil - }) - return true, nil } @@ -404,24 +394,11 @@ func (mrf *MetadataRemoteFile) RenameFile(ctx context.Context, oldName, newName return false, nil } - // Read metadata first to get NzbdavId before rename - fileMeta, err := mrf.metadataService.ReadFileMetadata(normalizedOld) - if err != nil { - return false, fmt.Errorf("failed to read old metadata: %w", err) - } - // Use atomic rename instead of read-write-delete if err := mrf.metadataService.RenameFileMetadata(normalizedOld, normalizedNew); err != nil { return false, fmt.Errorf("failed to rename metadata: %w", err) } - // Update ID symlink if file has a NzbdavId - if fileMeta != nil && fileMeta.NzbdavId != "" { - if err := mrf.metadataService.UpdateIDSymlink(fileMeta.NzbdavId, normalizedNew); err != nil { - slog.WarnContext(ctx, "Failed to update ID symlink during MOVE", "id", fileMeta.NzbdavId, "error", err) - } - } - // Update health records and resolve pending repairs if mrf.healthRepository != nil { if err := mrf.healthRepository.RenameHealthRecord(ctx, normalizedOld, normalizedNew); err != nil { From dff49f564a7460c4693091b697a14072f5f299db Mon Sep 17 00:00:00 2001 From: javi11 Date: Mon, 20 Apr 2026 10:39:07 +0200 Subject: [PATCH 3/6] feat(scanner): use import_migrations for dedup, set SkipArrNotification - Add MigrationRecorder interface to scanner package with UpsertMigration and IsMigrationCompleted methods - Update NzbDavImporter to accept MigrationRecorder as second constructor parameter alongside BatchQueueAdder - processBatch now checks IsMigrationCompleted before enqueueing (skip already-imported/symlinks_migrated items) and calls UpsertMigration for new items, then strips nzbdav_id from the queue item metadata leaving only extracted_files if present - createNzbFileAndPrepareItem sets SkipArrNotification=true on all items - batchQueueAdapterForImporter gains migrationRepo field and implements MigrationRecorder via ImportMigrationRepository.Upsert/LookupByExternalID --- internal/importer/scanner/nzbdav.go | 109 ++++++++++++++++++++++++---- internal/importer/service.go | 31 +++++++- 2 files changed, 121 insertions(+), 19 deletions(-) diff --git a/internal/importer/scanner/nzbdav.go b/internal/importer/scanner/nzbdav.go index 4b2a7030b..e43e8aa82 100644 --- a/internal/importer/scanner/nzbdav.go +++ b/internal/importer/scanner/nzbdav.go @@ -21,10 +21,17 @@ type BatchQueueAdder interface { AddBatchToQueue(ctx context.Context, items []*database.ImportQueueItem) error } +// MigrationRecorder defines the interface for recording import migrations +type MigrationRecorder interface { + UpsertMigration(ctx context.Context, source, externalID, relativePath string) (int64, error) + IsMigrationCompleted(ctx context.Context, source, externalID string) (bool, error) +} + // NzbDavImporter handles importing from NZBDav databases type NzbDavImporter struct { - batchAdder BatchQueueAdder - log *slog.Logger + batchAdder BatchQueueAdder + migrationRecorder MigrationRecorder + log *slog.Logger // State management mu sync.RWMutex @@ -33,11 +40,12 @@ type NzbDavImporter struct { } // NewNzbDavImporter creates a new NZBDav importer -func NewNzbDavImporter(batchAdder BatchQueueAdder) *NzbDavImporter { +func NewNzbDavImporter(batchAdder BatchQueueAdder, migrationRecorder MigrationRecorder) *NzbDavImporter { return &NzbDavImporter{ - batchAdder: batchAdder, - log: slog.Default().With("component", "nzbdav-importer"), - info: ImportInfo{Status: ImportStatusIdle}, + batchAdder: batchAdder, + migrationRecorder: migrationRecorder, + log: slog.Default().With("component", "nzbdav-importer"), + info: ImportInfo{Status: ImportStatusIdle}, } } @@ -222,7 +230,9 @@ func (n *NzbDavImporter) performImport(ctx context.Context, dbPath string, blobs } } -// processBatch batches queue items and adds them to the queue +// processBatch batches queue items and adds them to the queue. +// It uses migrationRecorder to deduplicate already-completed items and to +// record new items before enqueueing them. func (n *NzbDavImporter) processBatch(ctx context.Context, batchChan <-chan *database.ImportQueueItem) { var batch []*database.ImportQueueItem ticker := time.NewTicker(500 * time.Millisecond) @@ -244,6 +254,43 @@ func (n *NzbDavImporter) processBatch(ctx context.Context, batchChan <-chan *dat } } + // extractNzbdavID reads nzbdav_id from the item metadata JSON. + extractNzbdavID := func(item *database.ImportQueueItem) string { + if item.Metadata == nil { + return "" + } + var meta struct { + NzbdavID string `json:"nzbdav_id"` + } + if err := json.Unmarshal([]byte(*item.Metadata), &meta); err != nil { + return "" + } + return meta.NzbdavID + } + + // stripNzbdavIDFromMetadata rewrites the metadata JSON removing the nzbdav_id key, + // retaining only other keys (e.g. extracted_files). + stripNzbdavIDFromMetadata := func(item *database.ImportQueueItem) { + if item.Metadata == nil { + return + } + var metaMap map[string]any + if err := json.Unmarshal([]byte(*item.Metadata), &metaMap); err != nil { + return + } + delete(metaMap, "nzbdav_id") + if len(metaMap) == 0 { + item.Metadata = nil + return + } + b, err := json.Marshal(metaMap) + if err != nil { + return + } + s := string(b) + item.Metadata = &s + } + for { select { case item, ok := <-batchChan: @@ -252,6 +299,35 @@ func (n *NzbDavImporter) processBatch(ctx context.Context, batchChan <-chan *dat insertBatch() return } + + nzbdavID := extractNzbdavID(item) + + // Dedup: skip items already successfully imported. + if nzbdavID != "" { + completed, err := n.migrationRecorder.IsMigrationCompleted(ctx, "nzbdav", nzbdavID) + if err != nil { + n.log.ErrorContext(ctx, "Failed to check migration status", "nzbdav_id", nzbdavID, "error", err) + } else if completed { + n.mu.Lock() + n.info.Skipped++ + n.mu.Unlock() + continue + } + + // Record migration row before enqueueing. + relativePath := "" + if item.Category != nil { + relativePath = *item.Category + } + if _, err := n.migrationRecorder.UpsertMigration(ctx, "nzbdav", nzbdavID, relativePath); err != nil { + n.log.ErrorContext(ctx, "Failed to upsert migration", "nzbdav_id", nzbdavID, "error", err) + } + } + + // Strip nzbdav_id from the queue item metadata — it lives in + // import_migrations now. Keep extracted_files if present. + stripNzbdavIDFromMetadata(item) + batch = append(batch, item) if len(batch) >= 100 { // Batch size insertBatch() @@ -324,15 +400,18 @@ func (n *NzbDavImporter) createNzbFileAndPrepareItem(ctx context.Context, res *n // Prepare item struct. RelativePath is left nil so the import mirrors the // nzbdav folder structure under Category without an extra user-supplied prefix. + // SkipArrNotification is true because nzbdav imports are migration jobs — ARR + // scans should not be triggered for each individual item. item := &database.ImportQueueItem{ - NzbPath: nzbPath, - Category: &targetCategory, - Priority: priority, - Status: database.QueueStatusPending, - RetryCount: 0, - MaxRetries: 3, - CreatedAt: time.Now(), - Metadata: &metaJSON, + NzbPath: nzbPath, + Category: &targetCategory, + Priority: priority, + Status: database.QueueStatusPending, + RetryCount: 0, + MaxRetries: 3, + CreatedAt: time.Now(), + Metadata: &metaJSON, + SkipArrNotification: true, } return item, nil diff --git a/internal/importer/service.go b/internal/importer/service.go index e4e88fb69..9c218b6bc 100644 --- a/internal/importer/service.go +++ b/internal/importer/service.go @@ -93,15 +93,37 @@ func (a *queueAdapterForScanner) IsFileProcessed(filePath string, scanRoot strin return isFileAlreadyProcessed(a.metadataService, filePath, scanRoot) } -// batchQueueAdapterForImporter adapts database repository for scanner.BatchQueueAdder interface +// batchQueueAdapterForImporter adapts database repository for scanner.BatchQueueAdder and +// scanner.MigrationRecorder interfaces. type batchQueueAdapterForImporter struct { - repo *database.QueueRepository + repo *database.QueueRepository + migrationRepo *database.ImportMigrationRepository } func (a *batchQueueAdapterForImporter) AddBatchToQueue(ctx context.Context, items []*database.ImportQueueItem) error { return a.repo.AddBatchToQueue(ctx, items) } +func (a *batchQueueAdapterForImporter) UpsertMigration(ctx context.Context, source, externalID, relativePath string) (int64, error) { + return a.migrationRepo.Upsert(ctx, &database.ImportMigration{ + Source: source, + ExternalID: externalID, + RelativePath: relativePath, + Status: database.ImportMigrationStatusPending, + }) +} + +func (a *batchQueueAdapterForImporter) IsMigrationCompleted(ctx context.Context, source, externalID string) (bool, error) { + row, err := a.migrationRepo.LookupByExternalID(ctx, source, externalID) + if err != nil { + return false, err + } + if row == nil { + return false, nil + } + return row.Status == database.ImportMigrationStatusImported || row.Status == database.ImportMigrationStatusSymlinksMigrated, nil +} + // isFileAlreadyProcessed checks if a file has already been processed by checking metadata func isFileAlreadyProcessed(metadataService *metadata.MetadataService, filePath string, scanRoot string) bool { // Calculate virtual path @@ -233,9 +255,10 @@ func NewService(config ServiceConfig, metadataService *metadata.MetadataService, // Create adapter for NZBDav imports importerAdapter := &batchQueueAdapterForImporter{ - repo: database.Repository, + repo: database.Repository, + migrationRepo: database.MigrationRepo, } - service.nzbdavImporter = scanner.NewNzbDavImporter(importerAdapter) + service.nzbdavImporter = scanner.NewNzbDavImporter(importerAdapter, importerAdapter) // Create directory watcher (Service implements WatchQueueAdder) service.watcher = scanner.NewWatcher(service, configGetter) From aa32a1eba364ba8905ff00a57a127d7630e71a57 Mon Sep 17 00:00:00 2001 From: javi11 Date: Mon, 20 Apr 2026 10:40:55 +0200 Subject: [PATCH 4/6] feat(importer): update import_migrations on queue item completion/failure Wire s.database.MigrationRepo into handleProcessingSuccess and handleProcessingFailure so that import_migrations rows are marked imported/failed when the corresponding queue item finishes. Both calls are non-fatal: failures are logged as warnings and do not affect the main processing outcome. If no matching migration row exists (non-nzbdav import), the UPDATE simply affects 0 rows. --- internal/importer/service.go | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/internal/importer/service.go b/internal/importer/service.go index 9c218b6bc..00c1e13a3 100644 --- a/internal/importer/service.go +++ b/internal/importer/service.go @@ -1003,6 +1003,15 @@ func (s *Service) handleProcessingSuccess(ctx context.Context, item *database.Im return err } + // Update import_migrations row if this was a nzbdav migration import + if s.database.MigrationRepo != nil { + if err := s.database.MigrationRepo.MarkImported(ctx, item.ID, resultingPath); err != nil { + // Non-fatal: log but don't fail + s.log.WarnContext(ctx, "Failed to mark import_migration as imported", + "queue_id", item.ID, "error", err) + } + } + // Notify completion and clear progress tracking if s.broadcaster != nil { s.broadcaster.NotifyComplete(int(item.ID), "completed") @@ -1088,6 +1097,14 @@ func (s *Service) handleProcessingFailure(ctx context.Context, item *database.Im "file", item.NzbPath) } + // Update import_migrations row if this was a nzbdav migration import + if s.database.MigrationRepo != nil { + if err := s.database.MigrationRepo.MarkFailed(ctx, item.ID, errorMessage); err != nil { + s.log.WarnContext(ctx, "Failed to mark import_migration as failed", + "queue_id", item.ID, "error", err) + } + } + // Notify failure and clear progress tracking if s.broadcaster != nil { s.broadcaster.NotifyComplete(int(item.ID), "failed") From 8aecb554fec8ba64b380b48a14164224035990f1 Mon Sep 17 00:00:00 2001 From: javi11 Date: Mon, 20 Apr 2026 10:46:23 +0200 Subject: [PATCH 5/6] feat(api): add migrate-symlinks endpoint and Phase 2 symlink rewriter Adds RewriteLibrarySymlinks in internal/importer/migration to walk a library directory and atomically rewrite arr symlinks that point at /.ids/ to the final altmount path. Introduces DBSymlinkLookup adapter in internal/database, a new POST /import/nzbdav/migrate-symlinks handler, and enriches the existing status endpoint with migration_stats when available. --- cmd/altmount/cmd/serve.go | 1 + internal/api/nzbdav_handlers.go | 122 +++++++++- internal/api/server.go | 7 + internal/database/symlink_lookup.go | 34 +++ internal/importer/migration/symlinks.go | 119 ++++++++++ internal/importer/migration/symlinks_test.go | 228 +++++++++++++++++++ 6 files changed, 510 insertions(+), 1 deletion(-) create mode 100644 internal/database/symlink_lookup.go create mode 100644 internal/importer/migration/symlinks.go create mode 100644 internal/importer/migration/symlinks_test.go diff --git a/cmd/altmount/cmd/serve.go b/cmd/altmount/cmd/serve.go index 64177fc1d..441dbef2f 100644 --- a/cmd/altmount/cmd/serve.go +++ b/cmd/altmount/cmd/serve.go @@ -150,6 +150,7 @@ func runServe(cmd *cobra.Command, args []string) error { apiServer := setupAPIServer(app, repos, authService, configManager, metadataReader, metadataService, fs, poolManager, importerService, arrsService, mountService, progressBroadcaster, streamTracker, cacheSource) apiServer.SetLogFilePath(slogutil.GetLogFilePath(cfg.Log)) + apiServer.SetMigrationRepo(db.MigrationRepo) webdavHandler, err := setupWebDAV(cfg, fs, authService, repos.UserRepo, configManager, streamTracker) if err != nil { diff --git a/internal/api/nzbdav_handlers.go b/internal/api/nzbdav_handlers.go index 9550a0204..9c1fdf596 100644 --- a/internal/api/nzbdav_handlers.go +++ b/internal/api/nzbdav_handlers.go @@ -7,7 +7,9 @@ import ( "time" "github.com/gofiber/fiber/v2" + "github.com/javi11/altmount/internal/database" "github.com/javi11/altmount/internal/importer" + "github.com/javi11/altmount/internal/importer/migration" ) // handleImportNzbdav handles POST /import/nzbdav @@ -114,9 +116,127 @@ func (s *Server) handleGetNzbdavImportStatus(c *fiber.Ctx) error { } status := s.importerService.GetImportStatus() + resp := toImportStatusResponse(status) + + // Attach migration stats when available; omit on error to preserve existing behaviour. + if s.migrationRepo != nil { + if stats, err := s.migrationRepo.Stats(c.Context(), "nzbdav"); err == nil { + resp["migration_stats"] = map[string]any{ + "pending": stats.Pending, + "imported": stats.Imported, + "failed": stats.Failed, + "symlinks_migrated": stats.SymlinksMigrated, + "total": stats.Total, + } + } + } + + return c.Status(200).JSON(fiber.Map{ + "success": true, + "data": resp, + }) +} + +// handleMigrateNzbdavSymlinks handles POST /import/nzbdav/migrate-symlinks +// +// @Summary Migrate NZBDav library symlinks +// @Description Walks a library directory and rewrites symlinks that target the nzbdav mount to point at the altmount path instead. +// @Tags Import +// @Accept json +// @Produce json +// @Param body body object{} true "library_path, source_mount_path, dry_run" +// @Success 200 {object} APIResponse +// @Failure 400 {object} APIResponse +// @Failure 500 {object} APIResponse +// @Security BearerAuth +// @Security ApiKeyAuth +// @Router /import/nzbdav/migrate-symlinks [post] +func (s *Server) handleMigrateNzbdavSymlinks(c *fiber.Ctx) error { + var req struct { + LibraryPath string `json:"library_path"` + SourceMountPath string `json:"source_mount_path"` + DryRun bool `json:"dry_run"` + } + if err := c.BodyParser(&req); err != nil { + return c.Status(400).JSON(fiber.Map{ + "success": false, + "message": "Invalid request body", + "details": err.Error(), + }) + } + + if req.LibraryPath == "" || !filepath.IsAbs(req.LibraryPath) { + return c.Status(400).JSON(fiber.Map{ + "success": false, + "message": "library_path must be a non-empty absolute path", + }) + } + if req.SourceMountPath == "" || !filepath.IsAbs(req.SourceMountPath) { + return c.Status(400).JSON(fiber.Map{ + "success": false, + "message": "source_mount_path must be a non-empty absolute path", + }) + } + + cfg := s.configManager.GetConfig() + if cfg == nil { + return c.Status(500).JSON(fiber.Map{ + "success": false, + "message": "Configuration not available", + }) + } + + // Determine which migration repo to use — prefer the dedicated field, fall back + // to nil-safe check so existing deployments without a migration repo still fail + // gracefully rather than panic. + migRepo := s.migrationRepo + if migRepo == nil { + return c.Status(500).JSON(fiber.Map{ + "success": false, + "message": "Migration repository not available", + }) + } + + ctx := c.Context() + + // Backfill idempotently before walking. + if _, err := migRepo.BackfillFromImportQueue(ctx); err != nil { + return c.Status(500).JSON(fiber.Map{ + "success": false, + "message": "Failed to backfill migration data", + "details": err.Error(), + }) + } + + lookup := database.NewDBSymlinkLookup(migRepo) + + report, err := migration.RewriteLibrarySymlinks( + ctx, + req.LibraryPath, + req.SourceMountPath, + cfg.MountPath, + "nzbdav", + lookup, + req.DryRun, + ) + if err != nil { + return c.Status(500).JSON(fiber.Map{ + "success": false, + "message": "Symlink migration failed", + "details": err.Error(), + }) + } + return c.Status(200).JSON(fiber.Map{ "success": true, - "data": toImportStatusResponse(status), + "data": fiber.Map{ + "scanned": report.Scanned, + "matched": report.Matched, + "rewritten": report.Rewritten, + "unmatched": report.Unmatched, + "errors": report.Errors, + "dry_run": req.DryRun, + }, }) } diff --git a/internal/api/server.go b/internal/api/server.go index ae7494fa3..df011dd4b 100644 --- a/internal/api/server.go +++ b/internal/api/server.go @@ -63,6 +63,7 @@ type Server struct { fuseManager *FuseManager cacheSource *segcache.Source logFilePath string + migrationRepo *database.ImportMigrationRepository ready atomic.Bool } @@ -128,6 +129,11 @@ func (s *Server) SetLogFilePath(path string) { s.logFilePath = path } +// SetMigrationRepo sets the migration repository used by the migrate-symlinks endpoint. +func (s *Server) SetMigrationRepo(repo *database.ImportMigrationRepository) { + s.migrationRepo = repo +} + // SetReady sets the server as ready to accept requests func (s *Server) SetReady(ready bool) { s.ready.Store(ready) @@ -222,6 +228,7 @@ func (s *Server) SetupRoutes(app *fiber.App) { api.Post("/import/nzbdav/reset", s.handleResetNzbdavImportStatus) api.Get("/import/nzbdav/status", s.handleGetNzbdavImportStatus) api.Delete("/import/nzbdav", s.handleCancelNzbdavImport) + api.Post("/import/nzbdav/migrate-symlinks", s.handleMigrateNzbdavSymlinks) // Queue endpoints api.Get("/queue", s.handleListQueue) diff --git a/internal/database/symlink_lookup.go b/internal/database/symlink_lookup.go new file mode 100644 index 000000000..1eabe39f0 --- /dev/null +++ b/internal/database/symlink_lookup.go @@ -0,0 +1,34 @@ +package database + +import ( + "context" + "fmt" +) + +// DBSymlinkLookup adapts ImportMigrationRepository to migration.SymlinkLookup. +type DBSymlinkLookup struct { + repo *ImportMigrationRepository +} + +// NewDBSymlinkLookup creates a new DBSymlinkLookup wrapping the given repository. +func NewDBSymlinkLookup(repo *ImportMigrationRepository) *DBSymlinkLookup { + return &DBSymlinkLookup{repo: repo} +} + +// LookupFinalPath returns the final AltMount path for the given source and externalID. +// Returns ("", false, nil) when no matching row exists or the row has no final_path. +func (l *DBSymlinkLookup) LookupFinalPath(ctx context.Context, source, externalID string) (string, bool, error) { + row, err := l.repo.LookupByExternalID(ctx, source, externalID) + if err != nil { + return "", false, fmt.Errorf("lookup final path (source=%s, id=%s): %w", source, externalID, err) + } + if row == nil || row.FinalPath == nil { + return "", false, nil + } + return *row.FinalPath, true, nil +} + +// MarkSymlinksMigrated sets status=symlinks_migrated for the given row IDs. +func (l *DBSymlinkLookup) MarkSymlinksMigrated(ctx context.Context, ids []int64) error { + return l.repo.MarkSymlinksMigrated(ctx, ids) +} diff --git a/internal/importer/migration/symlinks.go b/internal/importer/migration/symlinks.go new file mode 100644 index 000000000..0e229fdc9 --- /dev/null +++ b/internal/importer/migration/symlinks.go @@ -0,0 +1,119 @@ +package migration + +import ( + "context" + "fmt" + "io/fs" + "os" + "path/filepath" + "strings" +) + +// SymlinkLookup looks up the final AltMount path for a given source and external ID. +type SymlinkLookup interface { + LookupFinalPath(ctx context.Context, source, externalID string) (finalPath string, found bool, err error) + MarkSymlinksMigrated(ctx context.Context, ids []int64) error +} + +// RewriteReport summarizes results of a symlink rewrite operation. +type RewriteReport struct { + Scanned int + Matched int + Rewritten int + Unmatched []string // symlink paths that had no matching migration row + Errors []string // errors encountered (non-fatal) +} + +// RewriteLibrarySymlinks walks libraryPath, finds symlinks whose target starts +// with sourceMountPath+"/.ids/", looks up the GUID in the lookup, and rewrites +// the symlink target to filepath.Join(altmountPath, finalPath). +// +// If dryRun is true, no filesystem changes are made but the report is populated. +func RewriteLibrarySymlinks( + ctx context.Context, + libraryPath string, + sourceMountPath string, + altmountPath string, + source string, + lookup SymlinkLookup, + dryRun bool, +) (*RewriteReport, error) { + report := &RewriteReport{} + + // Normalise source mount prefix used for matching. + prefix := filepath.Clean(sourceMountPath) + "/.ids/" + + err := filepath.WalkDir(libraryPath, func(path string, d fs.DirEntry, walkErr error) error { + // Propagate hard walk errors. + if walkErr != nil { + report.Errors = append(report.Errors, fmt.Sprintf("walk error at %s: %v", path, walkErr)) + return nil + } + + // Check context cancellation at every entry. + if err := ctx.Err(); err != nil { + return err + } + + // Only process symlinks. + if d.Type()&fs.ModeSymlink == 0 { + return nil + } + + report.Scanned++ + + target, err := os.Readlink(path) + if err != nil { + report.Errors = append(report.Errors, fmt.Sprintf("readlink %s: %v", path, err)) + return nil + } + + // Must target our source mount's .ids directory. + if !strings.HasPrefix(target, prefix) { + return nil + } + + // Extract GUID: last path component of the target. + guid := filepath.Base(target) + + finalPath, found, err := lookup.LookupFinalPath(ctx, source, guid) + if err != nil { + report.Errors = append(report.Errors, fmt.Sprintf("lookup %s (guid=%s): %v", path, guid, err)) + return nil + } + if !found { + report.Unmatched = append(report.Unmatched, path) + return nil + } + + report.Matched++ + + if dryRun { + return nil + } + + // Build the new target path. + newTarget := filepath.Join(altmountPath, strings.TrimPrefix(finalPath, "/")) + + // Atomic rewrite: write to a temp name then rename. + tmpPath := path + ".new" + if err := os.Symlink(newTarget, tmpPath); err != nil { + report.Errors = append(report.Errors, fmt.Sprintf("create temp symlink %s -> %s: %v", tmpPath, newTarget, err)) + return nil + } + if err := os.Rename(tmpPath, path); err != nil { + // Clean up the temp file on rename failure. + _ = os.Remove(tmpPath) + report.Errors = append(report.Errors, fmt.Sprintf("rename %s -> %s: %v", tmpPath, path, err)) + return nil + } + + report.Rewritten++ + return nil + }) + if err != nil { + return report, fmt.Errorf("walk library path %s: %w", libraryPath, err) + } + + return report, nil +} diff --git a/internal/importer/migration/symlinks_test.go b/internal/importer/migration/symlinks_test.go new file mode 100644 index 000000000..eb1d7882f --- /dev/null +++ b/internal/importer/migration/symlinks_test.go @@ -0,0 +1,228 @@ +package migration_test + +import ( + "context" + "errors" + "os" + "path/filepath" + "testing" + + "github.com/javi11/altmount/internal/importer/migration" +) + +// mockLookup implements SymlinkLookup for testing. +type mockLookup struct { + paths map[string]string // guid → finalPath; absent means not found + err error // if non-nil, always return this error +} + +func (m *mockLookup) LookupFinalPath(_ context.Context, _, externalID string) (string, bool, error) { + if m.err != nil { + return "", false, m.err + } + fp, ok := m.paths[externalID] + return fp, ok, nil +} + +func (m *mockLookup) MarkSymlinksMigrated(_ context.Context, _ []int64) error { + return nil +} + +func TestRewriteLibrarySymlinks(t *testing.T) { + t.Parallel() + + const ( + sourceMountPath = "/mnt/nzbdav" + altmountPath = "/mnt/altmount" + source = "nzbdav" + ) + + tests := []struct { + name string + setup func(t *testing.T, dir string) // create symlinks inside dir + lookup *mockLookup + dryRun bool + wantScanned int + wantMatched int + wantRewritten int + wantUnmatched int + wantErrors int + // optional post-check on filesystem state + postCheck func(t *testing.T, dir string) + }{ + { + name: "match and rewrite", + setup: func(t *testing.T, dir string) { + t.Helper() + // Create a symlink pointing to sourceMountPath/.ids/abc123 + target := sourceMountPath + "/.ids/abc123" + link := filepath.Join(dir, "movie.mkv") + if err := os.Symlink(target, link); err != nil { + t.Fatalf("setup: %v", err) + } + }, + lookup: &mockLookup{ + paths: map[string]string{ + "abc123": "/movies/Movie (2020)/Movie (2020).mkv", + }, + }, + dryRun: false, + wantScanned: 1, + wantMatched: 1, + wantRewritten: 1, + postCheck: func(t *testing.T, dir string) { + t.Helper() + link := filepath.Join(dir, "movie.mkv") + got, err := os.Readlink(link) + if err != nil { + t.Fatalf("readlink after rewrite: %v", err) + } + want := filepath.Join(altmountPath, "movies/Movie (2020)/Movie (2020).mkv") + if got != want { + t.Errorf("symlink target: got %q, want %q", got, want) + } + }, + }, + { + name: "unmatched guid", + setup: func(t *testing.T, dir string) { + t.Helper() + target := sourceMountPath + "/.ids/unknown-guid" + if err := os.Symlink(target, filepath.Join(dir, "unknown.mkv")); err != nil { + t.Fatalf("setup: %v", err) + } + }, + lookup: &mockLookup{paths: map[string]string{}}, + wantScanned: 1, + wantMatched: 0, + wantRewritten: 0, + wantUnmatched: 1, + }, + { + name: "non-nzbdav symlink skipped", + setup: func(t *testing.T, dir string) { + t.Helper() + // Target doesn't contain sourceMountPath/.ids/ + if err := os.Symlink("/some/other/path/file.mkv", filepath.Join(dir, "other.mkv")); err != nil { + t.Fatalf("setup: %v", err) + } + }, + lookup: &mockLookup{paths: map[string]string{}}, + wantScanned: 1, + wantMatched: 0, + wantRewritten: 0, + wantUnmatched: 0, + }, + { + name: "dry run - no filesystem change", + setup: func(t *testing.T, dir string) { + t.Helper() + target := sourceMountPath + "/.ids/dryrun-guid" + link := filepath.Join(dir, "dry.mkv") + if err := os.Symlink(target, link); err != nil { + t.Fatalf("setup: %v", err) + } + }, + lookup: &mockLookup{ + paths: map[string]string{ + "dryrun-guid": "/movies/Dry/Dry.mkv", + }, + }, + dryRun: true, + wantScanned: 1, + wantMatched: 1, + wantRewritten: 0, + postCheck: func(t *testing.T, dir string) { + t.Helper() + link := filepath.Join(dir, "dry.mkv") + got, err := os.Readlink(link) + if err != nil { + t.Fatalf("readlink after dry run: %v", err) + } + // Target must remain unchanged. + want := sourceMountPath + "/.ids/dryrun-guid" + if got != want { + t.Errorf("dry run: symlink target changed: got %q, want %q", got, want) + } + }, + }, + { + name: "context cancellation stops walk", + setup: func(t *testing.T, dir string) { + t.Helper() + // Create several symlinks so the walk has entries to process. + for i := 0; i < 5; i++ { + target := sourceMountPath + "/.ids/guid-cancel" + link := filepath.Join(dir, "file") + // Only create the first one to keep setup simple. + if i == 0 { + if err := os.Symlink(target, link); err != nil { + t.Fatalf("setup: %v", err) + } + } + } + }, + lookup: &mockLookup{paths: map[string]string{"guid-cancel": "/movies/x.mkv"}}, + }, + } + + for _, tc := range tests { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + dir := t.TempDir() + tc.setup(t, dir) + + ctx := context.Background() + + if tc.name == "context cancellation stops walk" { + // Cancel context immediately to verify walk stops. + cancelled, cancel := context.WithCancel(ctx) + cancel() + ctx = cancelled + } + + report, err := migration.RewriteLibrarySymlinks( + ctx, + dir, + sourceMountPath, + altmountPath, + source, + tc.lookup, + tc.dryRun, + ) + + if tc.name == "context cancellation stops walk" { + // We just verify it returns a context error and doesn't panic. + if err == nil || !errors.Is(err, context.Canceled) { + t.Errorf("expected context.Canceled error, got: %v", err) + } + return + } + + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if report.Scanned != tc.wantScanned { + t.Errorf("Scanned: got %d, want %d", report.Scanned, tc.wantScanned) + } + if report.Matched != tc.wantMatched { + t.Errorf("Matched: got %d, want %d", report.Matched, tc.wantMatched) + } + if report.Rewritten != tc.wantRewritten { + t.Errorf("Rewritten: got %d, want %d", report.Rewritten, tc.wantRewritten) + } + if len(report.Unmatched) != tc.wantUnmatched { + t.Errorf("Unmatched: got %d, want %d (entries: %v)", len(report.Unmatched), tc.wantUnmatched, report.Unmatched) + } + if len(report.Errors) != tc.wantErrors { + t.Errorf("Errors: got %d, want %d (entries: %v)", len(report.Errors), tc.wantErrors, report.Errors) + } + + if tc.postCheck != nil { + tc.postCheck(t, dir) + } + }) + } +} From 0b2a63adab089966e801a450354a3cc8bbecef75 Mon Sep 17 00:00:00 2001 From: javi11 Date: Wed, 22 Apr 2026 16:16:50 +0200 Subject: [PATCH 6/6] feat(updater): auto-update for non-Docker standalone installs Add binary self-update path alongside the existing Docker update flow and publish a rolling `dev` prerelease of CLI binaries so non-Docker users can track the dev channel. - internal/updater: fetch GitHub release, verify SHA-512, extract from tar.gz/zip, swap binary via minio/selfupdate. Skip when running inside a container (/.dockerenv or KUBERNETES_SERVICE_HOST). - internal/api: route POST /system/update/apply to docker or binary path based on environment; expose binary_update_available in status. - frontend: enable the Apply button whenever either path is available and tailor the copy to the active mode. - .github/workflows/build-cli.yml: reusable workflow_call that builds the full OS/arch matrix and (optionally) attaches assets to a release. - release.yml + dev-image.yml: consume the shared workflow; dev-image publishes to a rolling `dev` prerelease. Closes #497 --- .github/workflows/build-cli.yml | 222 ++++++++++ .github/workflows/dev-image.yml | 61 +++ .github/workflows/release.yml | 242 ++-------- .../src/components/config/UpdateSection.tsx | 35 +- frontend/src/types/update.ts | 1 + go.mod | 2 + go.sum | 9 + internal/api/server.go | 9 + internal/api/types.go | 15 +- internal/api/update_handlers.go | 115 +++-- internal/api/update_handlers_test.go | 178 +++++++- internal/updater/binary.go | 416 ++++++++++++++++++ internal/updater/binary_test.go | 318 +++++++++++++ 13 files changed, 1343 insertions(+), 280 deletions(-) create mode 100644 .github/workflows/build-cli.yml create mode 100644 internal/updater/binary.go create mode 100644 internal/updater/binary_test.go diff --git a/.github/workflows/build-cli.yml b/.github/workflows/build-cli.yml new file mode 100644 index 000000000..d7197cb13 --- /dev/null +++ b/.github/workflows/build-cli.yml @@ -0,0 +1,222 @@ +name: Build CLI Binaries + +on: + workflow_call: + inputs: + version: + description: 'Version string (without leading v), e.g. 0.3.0 or dev-' + type: string + required: true + commit: + description: 'Short commit SHA' + type: string + required: true + upload-to-release: + description: 'Whether to attach archives to a GitHub release' + type: boolean + required: false + default: false + release-tag: + description: 'Release tag to attach archives to (when upload-to-release is true)' + type: string + required: false + default: '' + prerelease: + description: 'Mark the release as a prerelease' + type: boolean + required: false + default: false + +jobs: + build-cli: + runs-on: ${{ matrix.runner }} + permissions: + contents: write + strategy: + matrix: + include: + # Linux builds + - goos: linux + goarch: amd64 + runner: ubuntu-latest + cc: zig cc -target x86_64-linux-musl + - goos: linux + goarch: arm64 + runner: ubuntu-latest + cc: zig cc -target aarch64-linux-musl + # macOS builds + - goos: darwin + goarch: amd64 + runner: macos-latest + cc: '' + - goos: darwin + goarch: arm64 + runner: macos-latest + cc: '' + # Windows builds + - goos: windows + goarch: amd64 + runner: ubuntu-latest + cc: zig cc -target x86_64-windows-gnu + steps: + - uses: mlugg/setup-zig@v2 + if: matrix.cc != '' + + - uses: actions/setup-go@v6 + with: + go-version: 1.26.0 + + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Download frontend build + uses: actions/download-artifact@v8 + with: + name: frontend-build + path: frontend/dist/ + + - name: Install macFUSE + if: matrix.goos == 'darwin' + run: brew install --cask macfuse + + - name: Compute timestamp + id: ts + run: echo "timestamp=$(date -u '+%Y-%m-%dT%H:%M:%SZ')" >> $GITHUB_OUTPUT + + - name: Build ${{ matrix.goos }}-${{ matrix.goarch }} binary + env: + GOOS: ${{ matrix.goos }} + GOARCH: ${{ matrix.goarch }} + CGO_ENABLED: 1 + CC: ${{ matrix.cc }} + run: | + EXT="" + if [ "${{ matrix.goos }}" = "windows" ]; then + EXT=".exe" + fi + + go build \ + -trimpath \ + -tags=cli \ + -ldflags="-s -w -X 'github.com/javi11/altmount/internal/version.Version=${{ inputs.version }}' -X 'github.com/javi11/altmount/internal/version.GitCommit=${{ inputs.commit }}' -X 'github.com/javi11/altmount/internal/version.Timestamp=${{ steps.ts.outputs.timestamp }}'" \ + -o "altmount-cli-${{ matrix.goos }}-${{ matrix.goarch }}${EXT}" \ + ./cmd/altmount/main.go + + - name: Create archive + run: | + EXT="" + if [ "${{ matrix.goos }}" = "windows" ]; then + EXT=".exe" + fi + + BINARY_NAME="altmount-cli-${{ matrix.goos }}-${{ matrix.goarch }}${EXT}" + ARCHIVE_NAME="altmount-cli_v${{ inputs.version }}_${{ matrix.goos }}_${{ matrix.goarch }}" + + if [ "${{ matrix.goos }}" = "windows" ]; then + zip "${ARCHIVE_NAME}.zip" "$BINARY_NAME" + else + tar -czf "${ARCHIVE_NAME}.tar.gz" "$BINARY_NAME" + fi + + - name: Upload ${{ matrix.goos }}-${{ matrix.goarch }} artifacts + uses: actions/upload-artifact@v6 + with: + name: cli-${{ matrix.goos }}-${{ matrix.goarch }} + path: | + altmount-cli_v${{ inputs.version }}_${{ matrix.goos }}_${{ matrix.goarch }}.* + retention-days: 1 + + create-universal-darwin: + runs-on: macos-latest + needs: build-cli + permissions: + contents: write + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Download darwin-amd64 binary + uses: actions/download-artifact@v8 + with: + name: cli-darwin-amd64 + path: ./artifacts/ + + - name: Download darwin-arm64 binary + uses: actions/download-artifact@v8 + with: + name: cli-darwin-arm64 + path: ./artifacts/ + + - name: Extract binaries + run: | + cd artifacts + tar -xzf altmount-cli_v${{ inputs.version }}_darwin_amd64.tar.gz + tar -xzf altmount-cli_v${{ inputs.version }}_darwin_arm64.tar.gz + + - name: Create universal binary + run: | + lipo -create \ + artifacts/altmount-cli-darwin-amd64 \ + artifacts/altmount-cli-darwin-arm64 \ + -output altmount-cli-darwin-universal + + tar -czf "altmount-cli_v${{ inputs.version }}_darwin_universal.tar.gz" altmount-cli-darwin-universal + + - name: Upload universal darwin artifact + uses: actions/upload-artifact@v6 + with: + name: cli-darwin-universal + path: altmount-cli_v${{ inputs.version }}_darwin_universal.tar.gz + retention-days: 1 + + publish: + runs-on: ubuntu-latest + needs: [build-cli, create-universal-darwin] + permissions: + contents: write + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Download all CLI artifacts + uses: actions/download-artifact@v8 + with: + pattern: cli-* + path: ./artifacts/ + merge-multiple: true + + - name: Create checksums + run: | + cd artifacts + sha512sum altmount-cli_v${{ inputs.version }}_*.* > checksums-cli.txt + cat checksums-cli.txt + + - name: Upload combined artifacts (when not publishing to release) + if: ${{ !inputs.upload-to-release }} + uses: actions/upload-artifact@v6 + with: + name: cli-release-bundle + path: | + artifacts/altmount-cli_v${{ inputs.version }}_*.* + artifacts/checksums-cli.txt + retention-days: 7 + + - name: Attach assets to GitHub Release + if: ${{ inputs.upload-to-release }} + uses: softprops/action-gh-release@v2 + with: + tag_name: ${{ inputs.release-tag }} + files: | + artifacts/altmount-cli_v${{ inputs.version }}_*.* + artifacts/checksums-cli.txt + draft: false + prerelease: ${{ inputs.prerelease }} + make_latest: ${{ !inputs.prerelease }} + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/dev-image.yml b/.github/workflows/dev-image.yml index abbd25d5c..b11777fc7 100644 --- a/.github/workflows/dev-image.yml +++ b/.github/workflows/dev-image.yml @@ -59,6 +59,67 @@ jobs: path: frontend/dist retention-days: 1 + prepare-dev-release: + name: Ensure Dev Prerelease Exists + runs-on: ubuntu-latest + needs: build-frontend + permissions: + contents: write + outputs: + short_sha: ${{ steps.sha.outputs.short_sha }} + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Compute short SHA + id: sha + run: echo "short_sha=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT + + - name: Ensure rolling dev prerelease + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + gh release create dev \ + --prerelease \ + --title "Dev Build" \ + --notes "Rolling development build from main branch" \ + --target main 2>/dev/null \ + || gh release edit dev \ + --prerelease \ + --target main \ + --title "Dev Build" \ + --notes "Rolling development build from main branch (commit ${{ github.sha }})" + + - name: Delete previous dev assets + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + assets=$(gh release view dev --json assets -q '.assets[].name' || true) + if [ -n "$assets" ]; then + echo "$assets" | while read -r asset; do + [ -z "$asset" ] && continue + echo "Deleting asset: $asset" + gh release delete-asset dev "$asset" --yes || true + done + else + echo "No existing assets on dev release" + fi + + publish-dev-binaries: + name: Publish Dev CLI Binaries + needs: prepare-dev-release + permissions: + contents: write + uses: ./.github/workflows/build-cli.yml + with: + version: dev-${{ github.sha }} + commit: ${{ needs.prepare-dev-release.outputs.short_sha }} + upload-to-release: true + release-tag: dev + prerelease: true + build-dev-amd64: name: Build Dev Image (AMD64) runs-on: ubuntu-latest diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 444e53cc3..879e482c4 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -90,66 +90,21 @@ jobs: path: frontend/dist/ retention-days: 1 - build-cli: - runs-on: ${{ matrix.runner }} + prepare-release: + runs-on: ubuntu-latest needs: [test, build-frontend] permissions: contents: write - strategy: - matrix: - include: - # Linux builds - - goos: linux - goarch: amd64 - runner: ubuntu-latest - cc: zig cc -target x86_64-linux-musl - - goos: linux - goarch: arm64 - runner: ubuntu-latest - cc: zig cc -target aarch64-linux-musl - # macOS builds - - goos: darwin - goarch: amd64 - runner: macos-latest - cc: '' - - goos: darwin - goarch: arm64 - runner: macos-latest - cc: '' - # Windows builds - - goos: windows - goarch: amd64 - runner: ubuntu-latest - cc: zig cc -target x86_64-windows-gnu + outputs: + version: ${{ steps.version.outputs.version }} + commit: ${{ steps.version.outputs.commit }} + release_tag: ${{ steps.version.outputs.release_tag }} steps: - # dependencies - - uses: mlugg/setup-zig@v2 - if: matrix.cc != '' - - - uses: actions/setup-go@v6 - with: - go-version: 1.26.0 - - # checkout - name: Checkout uses: actions/checkout@v4 with: fetch-depth: 0 - # Download frontend build - - name: Download frontend build - uses: actions/download-artifact@v8 - with: - name: frontend-build - path: frontend/dist/ - - # Install macFUSE headers (required by cgofuse CGO dependency) - - name: Install macFUSE - if: matrix.goos == 'darwin' - run: brew install --cask macfuse - - - # Extract version info - name: Extract version info id: version run: | @@ -157,180 +112,41 @@ jobs: TAG="${{ inputs.tag }}" VERSION="${TAG#v}" else - VERSION="${GITHUB_REF#refs/tags/v}" + TAG="${GITHUB_REF#refs/tags/}" + VERSION="${TAG#v}" fi COMMIT=$(git rev-parse --short HEAD) - TIMESTAMP=$(date -u '+%Y-%m-%dT%H:%M:%SZ') echo "version=$VERSION" >> $GITHUB_OUTPUT echo "commit=$COMMIT" >> $GITHUB_OUTPUT - echo "timestamp=$TIMESTAMP" >> $GITHUB_OUTPUT + echo "release_tag=$TAG" >> $GITHUB_OUTPUT - # Build binary - - name: Build ${{ matrix.goos }}-${{ matrix.goarch }} binary + - name: Ensure GitHub Release exists with generated notes env: - GOOS: ${{ matrix.goos }} - GOARCH: ${{ matrix.goarch }} - CGO_ENABLED: 1 - CC: ${{ matrix.cc }} + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + TAG: ${{ steps.version.outputs.release_tag }} + GENERATE_NOTES: ${{ github.event_name != 'workflow_dispatch' }} run: | - # Set binary extension for Windows - EXT="" - if [ "${{ matrix.goos }}" = "windows" ]; then - EXT=".exe" - fi - - # Build the binary - go build \ - -trimpath \ - -tags=cli \ - -ldflags="-s -w -X 'github.com/javi11/altmount/internal/version.Version=${{ steps.version.outputs.version }}' -X 'github.com/javi11/altmount/internal/version.GitCommit=${{ steps.version.outputs.commit }}' -X 'github.com/javi11/altmount/internal/version.Timestamp=${{ steps.version.outputs.timestamp }}'" \ - -o "altmount-cli-${{ matrix.goos }}-${{ matrix.goarch }}${EXT}" \ - ./cmd/altmount/main.go - - # Create archive - - name: Create archive - run: | - # Set binary extension and archive format - EXT="" - ARCHIVE_EXT="tar.gz" - if [ "${{ matrix.goos }}" = "windows" ]; then - EXT=".exe" - ARCHIVE_EXT="zip" - fi - - BINARY_NAME="altmount-cli-${{ matrix.goos }}-${{ matrix.goarch }}${EXT}" - ARCHIVE_NAME="altmount-cli_v${{ steps.version.outputs.version }}_${{ matrix.goos }}_${{ matrix.goarch }}" - - if [ "${{ matrix.goos }}" = "windows" ]; then - zip "${ARCHIVE_NAME}.zip" "$BINARY_NAME" - else - tar -czf "${ARCHIVE_NAME}.tar.gz" "$BINARY_NAME" - fi - - # Upload individual build artifacts - - name: Upload ${{ matrix.goos }}-${{ matrix.goarch }} artifacts - uses: actions/upload-artifact@v6 - with: - name: cli-${{ matrix.goos }}-${{ matrix.goarch }} - path: | - altmount-cli_v${{ steps.version.outputs.version }}_${{ matrix.goos }}_${{ matrix.goarch }}.* - retention-days: 1 - - create-universal-darwin: - runs-on: macos-latest - needs: build-cli - permissions: - contents: write - steps: - # checkout - - name: Checkout - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - # Extract version info - - name: Extract version info - id: version - run: | - if [ "${{ github.event_name }}" = "workflow_dispatch" ]; then - TAG="${{ inputs.tag }}" - VERSION="${TAG#v}" + if gh release view "$TAG" >/dev/null 2>&1; then + echo "Release $TAG already exists" else - VERSION="${GITHUB_REF#refs/tags/v}" + if [ "$GENERATE_NOTES" = "true" ]; then + gh release create "$TAG" --title "$TAG" --generate-notes + else + gh release create "$TAG" --title "$TAG" + fi fi - echo "version=$VERSION" >> $GITHUB_OUTPUT - - # Download darwin binaries - - name: Download darwin-amd64 binary - uses: actions/download-artifact@v8 - with: - name: cli-darwin-amd64 - path: ./artifacts/ - - name: Download darwin-arm64 binary - uses: actions/download-artifact@v8 - with: - name: cli-darwin-arm64 - path: ./artifacts/ - - # Extract binaries from archives - - name: Extract binaries - run: | - cd artifacts - tar -xzf altmount-cli_v${{ steps.version.outputs.version }}_darwin_amd64.tar.gz - tar -xzf altmount-cli_v${{ steps.version.outputs.version }}_darwin_arm64.tar.gz - - # Create universal binary - - name: Create universal binary - run: | - lipo -create \ - artifacts/altmount-cli-darwin-amd64 \ - artifacts/altmount-cli-darwin-arm64 \ - -output altmount-cli-darwin-universal - - # Create universal archive - tar -czf "altmount-cli_v${{ steps.version.outputs.version }}_darwin_universal.tar.gz" altmount-cli-darwin-universal - - # Upload universal binary - - name: Upload universal darwin artifact - uses: actions/upload-artifact@v6 - with: - name: cli-darwin-universal - path: altmount-cli_v${{ steps.version.outputs.version }}_darwin_universal.tar.gz - retention-days: 1 - - create-release: - runs-on: ubuntu-latest - needs: [build-cli, create-universal-darwin] + build-cli: + needs: [prepare-release, build-frontend] permissions: contents: write - steps: - # checkout - - name: Checkout - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - # Extract version info - - name: Extract version info - id: version - run: | - if [ "${{ github.event_name }}" = "workflow_dispatch" ]; then - TAG="${{ inputs.tag }}" - VERSION="${TAG#v}" - else - VERSION="${GITHUB_REF#refs/tags/v}" - fi - echo "version=$VERSION" >> $GITHUB_OUTPUT - - # Download all artifacts - - name: Download all CLI artifacts - uses: actions/download-artifact@v8 - with: - pattern: cli-* - path: ./artifacts/ - merge-multiple: true - - # Create checksums - - name: Create checksums - run: | - cd artifacts - sha512sum altmount-cli_v${{ steps.version.outputs.version }}_*.* > checksums-cli.txt - cat checksums-cli.txt - - # Create GitHub release - - name: Create GitHub Release - uses: softprops/action-gh-release@v2 - with: - tag_name: ${{ github.event_name == 'workflow_dispatch' && inputs.tag || github.ref_name }} - files: | - artifacts/altmount-cli_v${{ steps.version.outputs.version }}_*.* - artifacts/checksums-cli.txt - draft: false - prerelease: false - generate_release_notes: ${{ github.event_name != 'workflow_dispatch' }} - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + uses: ./.github/workflows/build-cli.yml + with: + version: ${{ needs.prepare-release.outputs.version }} + commit: ${{ needs.prepare-release.outputs.commit }} + upload-to-release: true + release-tag: ${{ needs.prepare-release.outputs.release_tag }} + prerelease: false build-image-amd64: runs-on: ubuntu-latest diff --git a/frontend/src/components/config/UpdateSection.tsx b/frontend/src/components/config/UpdateSection.tsx index 16e290bb9..e0e8a87d1 100644 --- a/frontend/src/components/config/UpdateSection.tsx +++ b/frontend/src/components/config/UpdateSection.tsx @@ -33,11 +33,18 @@ export function UpdateSection() { refetch(); }; + const dockerMode = updateStatus?.docker_available ?? false; + const binaryMode = !dockerMode && (updateStatus?.binary_update_available ?? false); + const updateUnavailable = updateStatus !== undefined && !dockerMode && !binaryMode; + const handleApplyUpdate = async (force = false) => { const actionTitle = force ? "Force Reinstall" : "Apply Update"; + const baseAction = dockerMode + ? `pull the ${channel} image and restart the container` + : `download the ${channel} binary and restart`; const actionMessage = force - ? `This will force-pull the ${channel} image and restart the container, even if the version hasn't changed. Continue?` - : `This will pull the latest ${channel} image and restart the container. The service will be briefly unavailable. Continue?`; + ? `This will force-${baseAction}, even if the version hasn't changed. Continue?` + : `This will ${baseAction}. The service will be briefly unavailable. Continue?`; const confirmed = await confirmAction(actionTitle, actionMessage, { type: force ? "error" : "warning", @@ -51,7 +58,9 @@ export function UpdateSection() { showToast({ type: "success", title: force ? "Reinstall started" : "Update started", - message: "Pulling image. The container will restart automatically.", + message: dockerMode + ? "Pulling image. The container will restart automatically." + : "Downloading binary. The service will restart automatically.", }); } catch (err) { showToast({ @@ -62,7 +71,6 @@ export function UpdateSection() { } }; - const dockerUnavailable = updateStatus && !updateStatus.docker_available; const updateAvailable = updateStatus?.update_available ?? false; /** Taller tap targets below md (touch-friendly ~48px min height) */ @@ -157,7 +165,7 @@ export function UpdateSection() { type="button" className={`btn btn-sm btn-warning min-w-0 ${updateActionBtnLayout}`} onClick={() => handleApplyUpdate(false)} - disabled={applyUpdate.isPending || dockerUnavailable} + disabled={applyUpdate.isPending || updateUnavailable} > {applyUpdate.isPending ? ( @@ -171,7 +179,7 @@ export function UpdateSection() { type="button" className={`btn btn-sm btn-ghost min-w-0 border-base-300 bg-base-100 hover:bg-base-200 ${updateActionBtnLayout}`} onClick={() => handleApplyUpdate(true)} - disabled={applyUpdate.isPending || dockerUnavailable || isChecking} + disabled={applyUpdate.isPending || updateUnavailable || isChecking} > {applyUpdate.isPending ? ( @@ -214,18 +222,27 @@ export function UpdateSection() { ) : null} - {dockerUnavailable && ( + {updateUnavailable && (
Auto-update unavailable
- Mount /var/run/docker.sock into the container - to enable one-click updates. + For Docker installs, mount /var/run/docker.sock{" "} + into the container to enable one-click updates. For standalone binaries, ensure + the executable file is writable by this process.
)} + {binaryMode && ( +
+ +
+ Running as standalone binary — updates download the new binary and restart. +
+
+ )} )} diff --git a/frontend/src/types/update.ts b/frontend/src/types/update.ts index b665a4ace..20ce52269 100644 --- a/frontend/src/types/update.ts +++ b/frontend/src/types/update.ts @@ -8,4 +8,5 @@ export interface UpdateStatusResponse { update_available: boolean; release_url?: string; docker_available?: boolean; + binary_update_available?: boolean; } diff --git a/go.mod b/go.mod index bc2624d70..ee2ed2e89 100644 --- a/go.mod +++ b/go.mod @@ -28,6 +28,7 @@ require ( github.com/klauspost/compress v1.18.0 github.com/mattn/go-sqlite3 v1.14.22 github.com/middelink/go-parse-torrent-name v0.0.0-20190301154245-3ff4efacd4c4 + github.com/minio/selfupdate v0.6.0 github.com/natefinch/lumberjack v2.0.0+incompatible github.com/pressly/goose/v3 v3.24.3 github.com/rfjakob/eme v1.1.2 @@ -63,6 +64,7 @@ require ( require ( 4d63.com/gocheckcompilerdirectives v1.3.0 // indirect 4d63.com/gochecknoglobals v0.2.2 // indirect + aead.dev/minisign v0.2.0 // indirect cloud.google.com/go/compute/metadata v0.8.0 // indirect codeberg.org/chavacava/garif v0.2.0 // indirect dev.gaijin.team/go/exhaustruct/v4 v4.0.0 // indirect diff --git a/go.sum b/go.sum index 1ee2a53ee..9ecce4483 100644 --- a/go.sum +++ b/go.sum @@ -2,6 +2,8 @@ 4d63.com/gocheckcompilerdirectives v1.3.0/go.mod h1:ofsJ4zx2QAuIP/NO/NAh1ig6R1Fb18/GI7RVMwz7kAY= 4d63.com/gochecknoglobals v0.2.2 h1:H1vdnwnMaZdQW/N+NrkT1SZMTBmcwHe9Vq8lJcYYTtU= 4d63.com/gochecknoglobals v0.2.2/go.mod h1:lLxwTQjL5eIesRbvnzIP3jZtG140FnTdz+AlMa+ogt0= +aead.dev/minisign v0.2.0 h1:kAWrq/hBRu4AARY6AlciO83xhNnW9UaC8YipS2uhLPk= +aead.dev/minisign v0.2.0/go.mod h1:zdq6LdSd9TbuSxchxwhpA9zEb9YXcVGoE8JakuiGaIQ= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= @@ -476,6 +478,8 @@ github.com/mgechev/revive v1.13.0 h1:yFbEVliCVKRXY8UgwEO7EOYNopvjb1BFbmYqm9hZjBM github.com/mgechev/revive v1.13.0/go.mod h1:efJfeBVCX2JUumNQ7dtOLDja+QKj9mYGgEZA7rt5u+0= github.com/middelink/go-parse-torrent-name v0.0.0-20190301154245-3ff4efacd4c4 h1:C/VViMMbR/4Ti2aXrWpKy34S05cRaVd6EvV9BFR3qJ8= github.com/middelink/go-parse-torrent-name v0.0.0-20190301154245-3ff4efacd4c4/go.mod h1:H66QhXPJpUSdWschhL6u//v3ge96/qMnQ9mWp3efbxA= +github.com/minio/selfupdate v0.6.0 h1:i76PgT0K5xO9+hjzKcacQtO7+MjJ4JKA8Ak8XQ9DDwU= +github.com/minio/selfupdate v0.6.0/go.mod h1:bO02GTIPCMQFTEvE5h4DjYB58bCoZ35XLeBf0buTDdM= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mnightingale/rapidyenc v0.0.0-20251128204712-7aafef1eaf1c h1:UFEKx2AsNb8Tx80rlOwUCCz4lDxSsZ1tjq2+QDBNOUA= @@ -789,7 +793,9 @@ golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211209193657-4570a0811e8b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= @@ -905,6 +911,7 @@ golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -916,6 +923,7 @@ golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210228012217-479acdf4ea46/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -937,6 +945,7 @@ golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk= golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/telemetry v0.0.0-20251203150158-8fff8a5912fc h1:bH6xUXay0AIFMElXG2rQ4uiE+7ncwtiOdPfYK1NK2XA= golang.org/x/telemetry v0.0.0-20251203150158-8fff8a5912fc/go.mod h1:hKdjCMrbv9skySur+Nek8Hd0uJ0GuxJIoIX2payrIdQ= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= diff --git a/internal/api/server.go b/internal/api/server.go index df011dd4b..97c56fed1 100644 --- a/internal/api/server.go +++ b/internal/api/server.go @@ -23,6 +23,7 @@ import ( "github.com/javi11/altmount/internal/pool" "github.com/javi11/altmount/internal/progress" "github.com/javi11/altmount/internal/rclone" + "github.com/javi11/altmount/internal/updater" "github.com/javi11/altmount/internal/version" "github.com/javi11/altmount/pkg/rclonecli" ) @@ -64,6 +65,7 @@ type Server struct { cacheSource *segcache.Source logFilePath string migrationRepo *database.ImportMigrationRepository + updater updater.Updater ready atomic.Bool } @@ -109,6 +111,7 @@ func NewServer( streamTracker: streamTracker, cacheSource: cacheSource, fuseManager: NewFuseManager(newMountFactory(nzbFilesystem, configManager, streamTracker)), + updater: updater.Default(), } return server @@ -119,6 +122,12 @@ func (s *Server) SetHealthWorker(healthWorker *health.HealthWorker) { s.healthWorker = healthWorker } +// SetUpdater overrides the binary updater used for self-update operations. +// Primarily intended for tests that need to substitute a fake implementation. +func (s *Server) SetUpdater(u updater.Updater) { + s.updater = u +} + // SetLibrarySyncWorker sets the library sync worker reference for the server func (s *Server) SetLibrarySyncWorker(librarySyncWorker *health.LibrarySyncWorker) { s.librarySyncWorker = librarySyncWorker diff --git a/internal/api/types.go b/internal/api/types.go index 3aace5c07..759aae724 100644 --- a/internal/api/types.go +++ b/internal/api/types.go @@ -542,13 +542,14 @@ const ( // UpdateStatusResponse represents the current update status. type UpdateStatusResponse struct { - CurrentVersion string `json:"current_version"` - GitCommit string `json:"git_commit,omitempty"` - Channel UpdateChannel `json:"channel"` - LatestVersion string `json:"latest_version,omitempty"` - UpdateAvailable bool `json:"update_available"` - ReleaseURL string `json:"release_url,omitempty"` - DockerAvailable bool `json:"docker_available"` + CurrentVersion string `json:"current_version"` + GitCommit string `json:"git_commit,omitempty"` + Channel UpdateChannel `json:"channel"` + LatestVersion string `json:"latest_version,omitempty"` + UpdateAvailable bool `json:"update_available"` + ReleaseURL string `json:"release_url,omitempty"` + DockerAvailable bool `json:"docker_available"` + BinaryUpdateAvailable bool `json:"binary_update_available"` } // SystemHealthResponse represents system health check result diff --git a/internal/api/update_handlers.go b/internal/api/update_handlers.go index 108b57cad..a4110fa34 100644 --- a/internal/api/update_handlers.go +++ b/internal/api/update_handlers.go @@ -16,6 +16,19 @@ import ( "github.com/javi11/altmount/internal/version" ) +// insideContainer reports whether the current process is running inside a +// Docker or Kubernetes container. When true, the Docker-based update path is +// preferred over the binary self-update path. +func insideContainer() bool { + if _, err := os.Stat("/.dockerenv"); err == nil { + return true + } + if os.Getenv("KUBERNETES_SERVICE_HOST") != "" { + return true + } + return false +} + const ( ghAPIBase = "https://api.github.com" ghRepoOwner = "javi11" @@ -115,10 +128,11 @@ func (s *Server) handleGetUpdateStatus(c *fiber.Ctx) error { } resp := UpdateStatusResponse{ - CurrentVersion: version.Version, - GitCommit: version.GitCommit, - Channel: channel, - DockerAvailable: isDockerAvailable(), + CurrentVersion: version.Version, + GitCommit: version.GitCommit, + Channel: channel, + DockerAvailable: isDockerAvailable(), + BinaryUpdateAvailable: s.updater != nil && s.updater.CanSelfUpdate(), } ctx, cancel := context.WithTimeout(c.Context(), 10*time.Second) @@ -175,10 +189,6 @@ func (s *Server) handleApplyUpdate(c *fiber.Ctx) error { return RespondForbidden(c, "Admin privileges required", "Only administrators can perform system updates.") } - if !isDockerAvailable() { - return RespondBadRequest(c, "Auto-update is not available. Mount docker.sock into the container and ensure docker CLI is installed.", "") - } - var req struct { Channel UpdateChannel `json:"channel"` Force bool `json:"force"` @@ -196,33 +206,68 @@ func (s *Server) handleApplyUpdate(c *fiber.Ctx) error { return RespondBadRequest(c, "Invalid channel. Use 'latest' or 'dev'", "") } - // Use goroutine to avoid blocking the API response - go func() { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute) - defer cancel() + // Prefer the Docker-based update path when running inside a container + // with docker.sock mounted. Fall back to in-place binary self-update for + // standalone installs. + dockerPath := insideContainer() && isDockerAvailable() + binaryPath := !dockerPath && s.updater != nil && s.updater.CanSelfUpdate() - image := fmt.Sprintf("ghcr.io/%s/%s:%s", ghRepoOwner, ghRepoName, channel) - slog.InfoContext(ctx, "Starting auto-update", "channel", channel, "image", image, "force", req.Force) + switch { + case dockerPath: + go func() { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute) + defer cancel() - // 1. Pull the new image - cmd := exec.CommandContext(ctx, "docker", "pull", image) - cmd.Env = append(os.Environ(), "HOME=/config") - output, err := cmd.CombinedOutput() - if err != nil { - slog.ErrorContext(ctx, "Failed to pull latest image", "error", err, "output", string(output)) - return - } - slog.InfoContext(ctx, "Successfully pulled latest image", "output", string(output)) - - // 2. Trigger restart - // Note: performRestart only restarts the process. To pick up the new image, - // the container needs to be recreated. However, if the user has a setup - // that handles image updates on restart (like Watchtower or similar), this will work. - // For many users, a simple process restart is the first step. - s.performRestart(ctx) - }() - - return RespondSuccess(c, fiber.Map{ - "message": "Update initiated. The image is being pulled and the server will restart automatically.", - }) + image := fmt.Sprintf("ghcr.io/%s/%s:%s", ghRepoOwner, ghRepoName, channel) + slog.InfoContext(ctx, "Starting docker auto-update", + "channel", channel, + "image", image, + "force", req.Force) + + cmd := exec.CommandContext(ctx, "docker", "pull", image) + cmd.Env = append(os.Environ(), "HOME=/config") + output, err := cmd.CombinedOutput() + if err != nil { + slog.ErrorContext(ctx, "Failed to pull latest image", + "error", err, + "output", string(output)) + return + } + slog.InfoContext(ctx, "Successfully pulled latest image", + "output", string(output)) + + s.performRestart(ctx) + }() + + return RespondSuccess(c, fiber.Map{ + "message": "Update initiated. The image is being pulled and the server will restart automatically.", + }) + + case binaryPath: + go func() { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute) + defer cancel() + + slog.InfoContext(ctx, "Starting binary auto-update", + "channel", channel, + "force", req.Force) + + if err := s.updater.ApplyBinaryUpdate(ctx, string(channel)); err != nil { + slog.ErrorContext(ctx, "Failed to apply binary update", "error", err) + return + } + slog.InfoContext(ctx, "Binary update applied, restarting") + s.performRestart(ctx) + }() + + return RespondSuccess(c, fiber.Map{ + "message": "Update initiated. Downloading the new binary and restarting automatically.", + }) + + default: + return RespondBadRequest(c, + "Auto-update is not available. For Docker installs, mount /var/run/docker.sock and install the docker CLI. For standalone binaries, ensure the executable file is writable by this process.", + "") + } } + diff --git a/internal/api/update_handlers_test.go b/internal/api/update_handlers_test.go index 0ea3bfde0..1c0a921a0 100644 --- a/internal/api/update_handlers_test.go +++ b/internal/api/update_handlers_test.go @@ -1,33 +1,179 @@ package api import ( + "bytes" + "context" + "encoding/json" + "errors" + "io" + "net/http/httptest" "os" + "sync/atomic" "testing" + "time" + "github.com/gofiber/fiber/v2" + "github.com/javi11/altmount/internal/config" + "github.com/javi11/altmount/internal/updater" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestIsDockerAvailable(t *testing.T) { - // By default, it should be false in a normal environment unless /var/run/docker.sock exists - // and docker CLI is in PATH. - - // We can't easily mock /var/run/docker.sock without root, but we can check the logic. + // Ensure isDockerAvailable does not panic and returns a bool. available := isDockerAvailable() - - // In most CI environments this will be false. - // If it's true, it means the environment has docker. t.Logf("Docker available in this environment: %v", available) - - // Ensure it doesn't panic assert.NotPanics(t, func() { isDockerAvailable() }) } -func TestIsDockerAvailable_Mock(t *testing.T) { - // Create a dummy file for docker.sock in a temp dir - tmpDir, err := os.MkdirTemp("", "docker-test") - assert.NoError(t, err) - defer os.RemoveAll(tmpDir) +// fakeUpdater is a test double implementing updater.Updater. +type fakeUpdater struct { + canSelfUpdate bool + applyErr error + applyCalls atomic.Int32 + lastChannel atomic.Value // string +} + +func (f *fakeUpdater) CanSelfUpdate() bool { return f.canSelfUpdate } + +func (f *fakeUpdater) ApplyBinaryUpdate(_ context.Context, channel string) error { + f.applyCalls.Add(1) + f.lastChannel.Store(channel) + return f.applyErr +} + +func TestHandleGetUpdateStatus_PopulatesBinaryField(t *testing.T) { + app := fiber.New() + loginRequired := false + s := &Server{ + configManager: &mockConfigManager{cfg: &config.Config{ + Auth: config.AuthConfig{LoginRequired: &loginRequired}, + }}, + updater: &fakeUpdater{canSelfUpdate: true}, + } + app.Get("/status", s.handleGetUpdateStatus) + + req := httptest.NewRequest("GET", "/status?channel=latest", nil) + resp, err := app.Test(req, -1) + require.NoError(t, err) + defer resp.Body.Close() + + body, _ := io.ReadAll(resp.Body) + var parsed struct { + Data UpdateStatusResponse `json:"data"` + } + require.NoError(t, json.Unmarshal(body, &parsed)) + assert.True(t, parsed.Data.BinaryUpdateAvailable, "binary_update_available should reflect updater.CanSelfUpdate") +} + +// postApplyUpdate posts a body to the apply handler and returns the response. +func postApplyUpdate(t *testing.T, s *Server, body any) (status int, decoded map[string]any) { + t.Helper() + app := fiber.New() + app.Post("/apply", s.handleApplyUpdate) + + var buf bytes.Buffer + require.NoError(t, json.NewEncoder(&buf).Encode(body)) + req := httptest.NewRequest("POST", "/apply", &buf) + req.Header.Set("Content-Type", "application/json") + + resp, err := app.Test(req, -1) + require.NoError(t, err) + defer resp.Body.Close() + + raw, _ := io.ReadAll(resp.Body) + _ = json.Unmarshal(raw, &decoded) + return resp.StatusCode, decoded +} + +func TestHandleApplyUpdate_NoPathAvailable(t *testing.T) { + // Skip if running inside a container (the Docker branch would be + // evaluated instead of the "no path" branch). + if _, err := os.Stat("/.dockerenv"); err == nil { + t.Skip("running inside docker; /.dockerenv present") + } + + loginRequired := false + s := &Server{ + configManager: &mockConfigManager{cfg: &config.Config{ + Auth: config.AuthConfig{LoginRequired: &loginRequired}, + }}, + updater: &fakeUpdater{canSelfUpdate: false}, + } + + status, decoded := postApplyUpdate(t, s, map[string]string{"channel": "latest"}) + assert.Equal(t, 400, status) + assert.Equal(t, false, decoded["success"]) +} + +func TestHandleApplyUpdate_BinaryBranch(t *testing.T) { + if _, err := os.Stat("/.dockerenv"); err == nil { + t.Skip("running inside docker; /.dockerenv present") + } + + loginRequired := false + // Make ApplyBinaryUpdate return an error so performRestart is not + // invoked in the background goroutine — performRestart would syscall.Exec + // the test binary and kill the entire test run. + fake := &fakeUpdater{canSelfUpdate: true, applyErr: errTestApply} + s := &Server{ + configManager: &mockConfigManager{cfg: &config.Config{ + Auth: config.AuthConfig{LoginRequired: &loginRequired}, + }}, + updater: fake, + } + + status, decoded := postApplyUpdate(t, s, map[string]string{"channel": "dev"}) + assert.Equal(t, 200, status) + assert.Equal(t, true, decoded["success"]) - // We can't mock /var/run/docker.sock easily because isDockerAvailable has it hardcoded. - // This shows that isDockerAvailable might be hard to test if we don't allow path injection. + // Wait for the background goroutine to observe the fake updater call. + deadline := time.Now().Add(2 * time.Second) + for time.Now().Before(deadline) { + if fake.applyCalls.Load() >= 1 { + break + } + time.Sleep(10 * time.Millisecond) + } + assert.GreaterOrEqual(t, int(fake.applyCalls.Load()), 1, "ApplyBinaryUpdate should have been invoked") + if v := fake.lastChannel.Load(); v != nil { + assert.Equal(t, "dev", v.(string)) + } } + +var errTestApply = errors.New("test apply failure") + +func TestHandleApplyUpdate_InvalidChannel(t *testing.T) { + loginRequired := false + s := &Server{ + configManager: &mockConfigManager{cfg: &config.Config{ + Auth: config.AuthConfig{LoginRequired: &loginRequired}, + }}, + updater: &fakeUpdater{canSelfUpdate: true}, + } + + status, _ := postApplyUpdate(t, s, map[string]string{"channel": "banana"}) + assert.Equal(t, 400, status) +} + +func TestHandleApplyUpdate_DockerBranchRespected(t *testing.T) { + // This test verifies the decision logic: when the fake updater says it + // cannot self-update AND we're not in a container, we must return 400. + // It is a sibling to TestHandleApplyUpdate_NoPathAvailable to make + // explicit that s.updater is consulted. + if _, err := os.Stat("/.dockerenv"); err == nil { + t.Skip("running inside docker; /.dockerenv present") + } + loginRequired := false + s := &Server{ + configManager: &mockConfigManager{cfg: &config.Config{ + Auth: config.AuthConfig{LoginRequired: &loginRequired}, + }}, + updater: nil, // No updater configured at all. + } + status, _ := postApplyUpdate(t, s, map[string]string{"channel": "latest"}) + assert.Equal(t, 400, status) +} + +// Ensure the fake satisfies the Updater interface. +var _ updater.Updater = (*fakeUpdater)(nil) diff --git a/internal/updater/binary.go b/internal/updater/binary.go new file mode 100644 index 000000000..de089d62c --- /dev/null +++ b/internal/updater/binary.go @@ -0,0 +1,416 @@ +// Package updater provides binary self-update capabilities for standalone +// (non-Docker) installs of altmount. It fetches release assets from GitHub, +// verifies their SHA-512 checksum, extracts the binary, and applies the +// update in-place using github.com/minio/selfupdate. +package updater + +import ( + "archive/tar" + "archive/zip" + "bytes" + "compress/gzip" + "context" + "crypto/sha512" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "io" + "log/slog" + "net/http" + "os" + "path" + "runtime" + "slices" + "strings" + "time" + + "github.com/minio/selfupdate" +) + +const ( + defaultGitHubAPIBase = "https://api.github.com" + repoOwner = "javi11" + repoName = "altmount" + + // Channel identifiers. + ChannelLatest = "latest" + ChannelDev = "dev" + + downloadTimeout = 10 * time.Minute +) + +// githubAsset mirrors the subset of the GitHub release asset schema used here. +type githubAsset struct { + Name string `json:"name"` + BrowserDownloadURL string `json:"browser_download_url"` +} + +// githubRelease mirrors the subset of the GitHub release schema used here. +type githubRelease struct { + TagName string `json:"tag_name"` + Assets []githubAsset `json:"assets"` +} + +// Updater applies binary updates in-place using assets from GitHub releases. +// It exists as an interface so callers (and tests) can swap in a fake +// implementation. +type Updater interface { + CanSelfUpdate() bool + ApplyBinaryUpdate(ctx context.Context, channel string) error +} + +// Default returns a Updater backed by the real GitHub API and minio/selfupdate. +func Default() Updater { + return &binaryUpdater{ + apiBase: defaultGitHubAPIBase, + httpClient: &http.Client{Timeout: downloadTimeout}, + } +} + +// binaryUpdater is the production implementation of Updater. +type binaryUpdater struct { + apiBase string + httpClient *http.Client +} + +// CanSelfUpdate reports whether a binary self-update is feasible in the +// current runtime. It returns false when running inside a Docker container +// (the Docker path is preferred in that case), when os.Executable cannot be +// resolved, or when the current executable path is not writable. +func (u *binaryUpdater) CanSelfUpdate() bool { + if insideContainer() { + return false + } + exe, err := os.Executable() + if err != nil { + return false + } + return isWritable(exe) +} + +// ApplyBinaryUpdate downloads the release asset for the current platform, +// verifies its checksum, extracts the binary and applies the update. The +// channel must be either "latest" or "dev". +func (u *binaryUpdater) ApplyBinaryUpdate(ctx context.Context, channel string) error { + slog.InfoContext(ctx, "Starting binary self-update", + "channel", channel, + "goos", runtime.GOOS, + "goarch", runtime.GOARCH) + + reader, cleanup, err := u.downloadAndExtract(ctx, channel) + if err != nil { + return fmt.Errorf("prepare binary update: %w", err) + } + defer cleanup() + + if err := selfupdate.Apply(reader, selfupdate.Options{}); err != nil { + slog.ErrorContext(ctx, "selfupdate.Apply failed", "error", err) + if rerr := selfupdate.RollbackError(err); rerr != nil { + slog.ErrorContext(ctx, "selfupdate rollback failed", "error", rerr) + } + return fmt.Errorf("apply binary update: %w", err) + } + + slog.InfoContext(ctx, "Binary self-update applied successfully") + return nil +} + +// downloadAndExtract resolves the release for the given channel, downloads +// the matching archive and checksums, verifies the SHA-512 hash, and extracts +// the binary. It returns an io.Reader positioned at the start of the binary +// and a cleanup function the caller must invoke when done. +func (u *binaryUpdater) downloadAndExtract(ctx context.Context, channel string) (io.Reader, func(), error) { + release, err := u.fetchRelease(ctx, channel) + if err != nil { + return nil, nil, fmt.Errorf("fetch release: %w", err) + } + + archiveAsset, checksumAsset, err := pickAssets(release.Assets, runtime.GOOS, runtime.GOARCH) + if err != nil { + return nil, nil, err + } + + slog.InfoContext(ctx, "Selected release assets", + "archive", archiveAsset.Name, + "checksum", checksumAsset.Name, + "tag", release.TagName) + + archiveBytes, err := u.downloadBytes(ctx, archiveAsset.BrowserDownloadURL) + if err != nil { + return nil, nil, fmt.Errorf("download archive: %w", err) + } + + checksumBytes, err := u.downloadBytes(ctx, checksumAsset.BrowserDownloadURL) + if err != nil { + return nil, nil, fmt.Errorf("download checksum file: %w", err) + } + + if err := verifyChecksum(archiveAsset.Name, archiveBytes, checksumBytes); err != nil { + return nil, nil, fmt.Errorf("verify checksum: %w", err) + } + + binaryName := expectedBinaryName(runtime.GOOS, runtime.GOARCH) + reader, err := extractBinary(archiveAsset.Name, archiveBytes, binaryName) + if err != nil { + return nil, nil, fmt.Errorf("extract binary %q: %w", binaryName, err) + } + + return reader, func() {}, nil +} + +// fetchRelease retrieves the release metadata for the requested channel. +func (u *binaryUpdater) fetchRelease(ctx context.Context, channel string) (*githubRelease, error) { + var url string + switch channel { + case ChannelLatest: + url = fmt.Sprintf("%s/repos/%s/%s/releases/latest", u.apiBase, repoOwner, repoName) + case ChannelDev: + url = fmt.Sprintf("%s/repos/%s/%s/releases/tags/dev", u.apiBase, repoOwner, repoName) + default: + return nil, fmt.Errorf("unknown channel %q", channel) + } + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return nil, fmt.Errorf("build request: %w", err) + } + req.Header.Set("Accept", "application/vnd.github+json") + + resp, err := u.httpClient.Do(req) + if err != nil { + return nil, fmt.Errorf("perform request: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("github api returned status %d", resp.StatusCode) + } + + var rel githubRelease + if err := json.NewDecoder(resp.Body).Decode(&rel); err != nil { + return nil, fmt.Errorf("decode release: %w", err) + } + return &rel, nil +} + +// downloadBytes fetches a URL and returns the full response body. +func (u *binaryUpdater) downloadBytes(ctx context.Context, url string) ([]byte, error) { + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return nil, fmt.Errorf("build request: %w", err) + } + resp, err := u.httpClient.Do(req) + if err != nil { + return nil, fmt.Errorf("perform request: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("download %q returned status %d", url, resp.StatusCode) + } + data, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("read body: %w", err) + } + return data, nil +} + +// pickAssets selects the archive + checksum assets that match the given +// GOOS/GOARCH. For darwin, a universal binary asset is preferred when +// available. +func pickAssets(assets []githubAsset, goos, goarch string) (archive githubAsset, checksum githubAsset, err error) { + var checksumAsset *githubAsset + for i, a := range assets { + if a.Name == "checksums-cli.txt" { + checksumAsset = &assets[i] + break + } + } + if checksumAsset == nil { + return githubAsset{}, githubAsset{}, errors.New("release does not include checksums-cli.txt") + } + + candidateNames := candidateArchiveNames(goos, goarch) + for _, want := range candidateNames { + for _, a := range assets { + if strings.HasSuffix(a.Name, want) { + return a, *checksumAsset, nil + } + } + } + return githubAsset{}, githubAsset{}, fmt.Errorf("no release asset matches %s/%s", goos, goarch) +} + +// candidateArchiveNames returns the suffixes we accept for a given +// GOOS/GOARCH, in priority order. Asset names follow the release.yml pattern +// `altmount-cli_v__.`. +func candidateArchiveNames(goos, goarch string) []string { + ext := ".tar.gz" + if goos == "windows" { + ext = ".zip" + } + if goos == "darwin" { + // Prefer the universal binary, fall back to arch-specific archives. + return []string{ + fmt.Sprintf("_darwin_universal%s", ext), + fmt.Sprintf("_%s_%s%s", goos, goarch, ext), + } + } + return []string{fmt.Sprintf("_%s_%s%s", goos, goarch, ext)} +} + +// expectedBinaryName returns the binary file name that is embedded in the +// archive for the given GOOS/GOARCH. +func expectedBinaryName(goos, goarch string) string { + if goos == "darwin" { + // If the universal binary is present, it uses this name; otherwise the + // arch-specific binary name is the fallback. extractBinary tries both. + return "altmount-cli-darwin-universal" + } + name := fmt.Sprintf("altmount-cli-%s-%s", goos, goarch) + if goos == "windows" { + name += ".exe" + } + return name +} + +// verifyChecksum validates archiveBytes against the sha512 digest listed for +// archiveName in the provided checksum file contents. +func verifyChecksum(archiveName string, archiveBytes, checksumFile []byte) error { + want, err := findChecksum(archiveName, checksumFile) + if err != nil { + return err + } + sum := sha512.Sum512(archiveBytes) + got := hex.EncodeToString(sum[:]) + if !strings.EqualFold(got, want) { + return fmt.Errorf("checksum mismatch for %s: got %s want %s", archiveName, got, want) + } + return nil +} + +// findChecksum parses a `sha512sum` style checksum file and returns the digest +// for the named file. +func findChecksum(name string, checksumFile []byte) (string, error) { + for line := range strings.SplitSeq(string(checksumFile), "\n") { + line = strings.TrimSpace(line) + if line == "" { + continue + } + // Format: " " (two spaces) or " *". + fields := strings.Fields(line) + if len(fields) < 2 { + continue + } + fname := strings.TrimPrefix(fields[len(fields)-1], "*") + if fname == name || path.Base(fname) == name { + return fields[0], nil + } + } + return "", fmt.Errorf("checksum for %q not found", name) +} + +// extractBinary pulls the expected binary out of the archive. For tar.gz it +// iterates entries; for zip it looks up by file name. If the expected name is +// not present, a secondary lookup by arch-specific name is attempted (this +// covers darwin_universal archives whose binary name differs). +func extractBinary(archiveName string, archiveBytes []byte, expectedName string) (io.Reader, error) { + alt := fmt.Sprintf("altmount-cli-%s-%s", runtime.GOOS, runtime.GOARCH) + if runtime.GOOS == "windows" { + alt += ".exe" + } + candidates := []string{expectedName, alt} + + switch { + case strings.HasSuffix(archiveName, ".tar.gz"): + return extractFromTarGz(archiveBytes, candidates) + case strings.HasSuffix(archiveName, ".zip"): + return extractFromZip(archiveBytes, candidates) + default: + return nil, fmt.Errorf("unsupported archive format: %s", archiveName) + } +} + +func extractFromTarGz(data []byte, candidates []string) (io.Reader, error) { + gz, err := gzip.NewReader(bytes.NewReader(data)) + if err != nil { + return nil, fmt.Errorf("open gzip: %w", err) + } + defer gz.Close() + + tr := tar.NewReader(gz) + for { + hdr, err := tr.Next() + if errors.Is(err, io.EOF) { + break + } + if err != nil { + return nil, fmt.Errorf("read tar: %w", err) + } + if hdr.Typeflag != tar.TypeReg { + continue + } + base := path.Base(hdr.Name) + if matchesAny(base, candidates) { + buf, err := io.ReadAll(tr) + if err != nil { + return nil, fmt.Errorf("read entry %q: %w", hdr.Name, err) + } + return bytes.NewReader(buf), nil + } + } + return nil, fmt.Errorf("binary not found in tar.gz (looked for %v)", candidates) +} + +func extractFromZip(data []byte, candidates []string) (io.Reader, error) { + zr, err := zip.NewReader(bytes.NewReader(data), int64(len(data))) + if err != nil { + return nil, fmt.Errorf("open zip: %w", err) + } + for _, f := range zr.File { + base := path.Base(f.Name) + if !matchesAny(base, candidates) { + continue + } + rc, err := f.Open() + if err != nil { + return nil, fmt.Errorf("open zip entry %q: %w", f.Name, err) + } + buf, err := io.ReadAll(rc) + _ = rc.Close() + if err != nil { + return nil, fmt.Errorf("read zip entry %q: %w", f.Name, err) + } + return bytes.NewReader(buf), nil + } + return nil, fmt.Errorf("binary not found in zip (looked for %v)", candidates) +} + +func matchesAny(name string, candidates []string) bool { + return slices.Contains(candidates, name) +} + +// insideContainer reports whether the current process appears to be running +// inside a Docker/Kubernetes container. It checks for the /.dockerenv marker +// file and the KUBERNETES_SERVICE_HOST env var. +func insideContainer() bool { + if _, err := os.Stat("/.dockerenv"); err == nil { + return true + } + if os.Getenv("KUBERNETES_SERVICE_HOST") != "" { + return true + } + return false +} + +// isWritable returns true if the current process can open the given file for +// writing without truncating it. +func isWritable(path string) bool { + f, err := os.OpenFile(path, os.O_WRONLY, 0) + if err != nil { + return false + } + _ = f.Close() + return true +} diff --git a/internal/updater/binary_test.go b/internal/updater/binary_test.go new file mode 100644 index 000000000..d102c6d35 --- /dev/null +++ b/internal/updater/binary_test.go @@ -0,0 +1,318 @@ +package updater + +import ( + "archive/tar" + "archive/zip" + "bytes" + "compress/gzip" + "context" + "crypto/sha512" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "net/http" + "net/http/httptest" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestPickAssets(t *testing.T) { + t.Parallel() + + assets := []githubAsset{ + {Name: "altmount-cli_v1.2.3_linux_amd64.tar.gz", BrowserDownloadURL: "https://e/linux-amd64"}, + {Name: "altmount-cli_v1.2.3_linux_arm64.tar.gz", BrowserDownloadURL: "https://e/linux-arm64"}, + {Name: "altmount-cli_v1.2.3_windows_amd64.zip", BrowserDownloadURL: "https://e/win"}, + {Name: "altmount-cli_v1.2.3_darwin_amd64.tar.gz", BrowserDownloadURL: "https://e/darwin-amd64"}, + {Name: "altmount-cli_v1.2.3_darwin_universal.tar.gz", BrowserDownloadURL: "https://e/darwin-universal"}, + {Name: "checksums-cli.txt", BrowserDownloadURL: "https://e/checksums"}, + } + + tests := []struct { + name string + goos string + goarch string + wantArchive string + wantErr bool + }{ + {name: "linux amd64", goos: "linux", goarch: "amd64", wantArchive: "altmount-cli_v1.2.3_linux_amd64.tar.gz"}, + {name: "linux arm64", goos: "linux", goarch: "arm64", wantArchive: "altmount-cli_v1.2.3_linux_arm64.tar.gz"}, + {name: "windows amd64", goos: "windows", goarch: "amd64", wantArchive: "altmount-cli_v1.2.3_windows_amd64.zip"}, + {name: "darwin prefers universal", goos: "darwin", goarch: "amd64", wantArchive: "altmount-cli_v1.2.3_darwin_universal.tar.gz"}, + {name: "unsupported os", goos: "plan9", goarch: "amd64", wantErr: true}, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + archive, checksum, err := pickAssets(assets, tc.goos, tc.goarch) + if tc.wantErr { + require.Error(t, err) + return + } + require.NoError(t, err) + assert.Equal(t, tc.wantArchive, archive.Name) + assert.Equal(t, "checksums-cli.txt", checksum.Name) + }) + } +} + +func TestPickAssets_MissingChecksum(t *testing.T) { + t.Parallel() + assets := []githubAsset{ + {Name: "altmount-cli_v1.2.3_linux_amd64.tar.gz"}, + } + _, _, err := pickAssets(assets, "linux", "amd64") + require.Error(t, err) + assert.Contains(t, err.Error(), "checksums-cli.txt") +} + +func TestVerifyChecksum(t *testing.T) { + t.Parallel() + + data := []byte("hello world") + sum := sha512.Sum512(data) + digest := hex.EncodeToString(sum[:]) + checksums := fmt.Sprintf("%s altmount-cli_v1_linux_amd64.tar.gz\nbaddigest other.tar.gz\n", digest) + + t.Run("happy path", func(t *testing.T) { + t.Parallel() + err := verifyChecksum("altmount-cli_v1_linux_amd64.tar.gz", data, []byte(checksums)) + require.NoError(t, err) + }) + + t.Run("mismatch", func(t *testing.T) { + t.Parallel() + err := verifyChecksum("altmount-cli_v1_linux_amd64.tar.gz", []byte("tampered"), []byte(checksums)) + require.Error(t, err) + assert.Contains(t, err.Error(), "mismatch") + }) + + t.Run("unknown file", func(t *testing.T) { + t.Parallel() + err := verifyChecksum("other.tar.gz.missing", data, []byte(checksums)) + require.Error(t, err) + }) +} + +func TestExtractBinary_TarGz(t *testing.T) { + t.Parallel() + + payload := []byte("fake-binary-contents") + archive := buildTarGz(t, "altmount-cli-linux-amd64", payload) + + r, err := extractBinary("altmount-cli_v1_linux_amd64.tar.gz", archive, "altmount-cli-linux-amd64") + require.NoError(t, err) + got, err := io.ReadAll(r) + require.NoError(t, err) + assert.Equal(t, payload, got) +} + +func TestExtractBinary_Zip(t *testing.T) { + t.Parallel() + + payload := []byte("fake-windows-binary") + archive := buildZip(t, "altmount-cli-windows-amd64.exe", payload) + + r, err := extractBinary("altmount-cli_v1_windows_amd64.zip", archive, "altmount-cli-windows-amd64.exe") + require.NoError(t, err) + got, err := io.ReadAll(r) + require.NoError(t, err) + assert.Equal(t, payload, got) +} + +func TestExtractBinary_BinaryMissing(t *testing.T) { + t.Parallel() + archive := buildTarGz(t, "unrelated-file", []byte("x")) + _, err := extractBinary("something_linux_amd64.tar.gz", archive, "altmount-cli-linux-amd64") + require.Error(t, err) +} + +func TestDownloadAndExtract_HappyPath(t *testing.T) { + t.Parallel() + + payload := []byte("fake-binary-contents-for-download") + archive := buildTarGz(t, "altmount-cli-linux-amd64", payload) + sum := sha512.Sum512(archive) + digestLine := fmt.Sprintf("%s altmount-cli_v1.0.0_linux_amd64.tar.gz\n", hex.EncodeToString(sum[:])) + + var baseURL string + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch { + case strings.HasSuffix(r.URL.Path, "/releases/latest"): + rel := githubRelease{ + TagName: "v1.0.0", + Assets: []githubAsset{ + {Name: "altmount-cli_v1.0.0_linux_amd64.tar.gz", BrowserDownloadURL: baseURL + "/archive"}, + {Name: "checksums-cli.txt", BrowserDownloadURL: baseURL + "/checksums"}, + }, + } + _ = json.NewEncoder(w).Encode(rel) + case r.URL.Path == "/archive": + _, _ = w.Write(archive) + case r.URL.Path == "/checksums": + _, _ = w.Write([]byte(digestLine)) + default: + http.NotFound(w, r) + } + })) + defer srv.Close() + baseURL = srv.URL + + u := &binaryUpdater{apiBase: srv.URL, httpClient: srv.Client()} + reader, cleanup, err := u.downloadAndExtractWith(context.Background(), ChannelLatest, "linux", "amd64") + require.NoError(t, err) + defer cleanup() + + got, err := io.ReadAll(reader) + require.NoError(t, err) + assert.Equal(t, payload, got) +} + +func TestDownloadAndExtract_ChecksumMismatch(t *testing.T) { + t.Parallel() + + archive := buildTarGz(t, "altmount-cli-linux-amd64", []byte("real contents")) + // Intentionally wrong checksum. + digestLine := fmt.Sprintf("%s altmount-cli_v1.0.0_linux_amd64.tar.gz\n", strings.Repeat("0", 128)) + + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {})) + srv.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch { + case strings.HasSuffix(r.URL.Path, "/releases/latest"): + rel := githubRelease{ + TagName: "v1.0.0", + Assets: []githubAsset{ + {Name: "altmount-cli_v1.0.0_linux_amd64.tar.gz", BrowserDownloadURL: srv.URL + "/archive"}, + {Name: "checksums-cli.txt", BrowserDownloadURL: srv.URL + "/checksums"}, + }, + } + _ = json.NewEncoder(w).Encode(rel) + case r.URL.Path == "/archive": + _, _ = w.Write(archive) + case r.URL.Path == "/checksums": + _, _ = w.Write([]byte(digestLine)) + } + }) + defer srv.Close() + + u := &binaryUpdater{apiBase: srv.URL, httpClient: srv.Client()} + _, _, err := u.downloadAndExtractWith(context.Background(), ChannelLatest, "linux", "amd64") + require.Error(t, err) + assert.Contains(t, err.Error(), "checksum") +} + +func TestFetchRelease_UnknownChannel(t *testing.T) { + t.Parallel() + u := &binaryUpdater{apiBase: "http://127.0.0.1:1", httpClient: http.DefaultClient} + _, err := u.fetchRelease(context.Background(), "banana") + require.Error(t, err) + assert.Contains(t, err.Error(), "unknown channel") +} + +func TestIsWritable(t *testing.T) { + t.Parallel() + dir := t.TempDir() + + writable := filepath.Join(dir, "writable") + require.NoError(t, os.WriteFile(writable, []byte("x"), 0o600)) + assert.True(t, isWritable(writable)) + + readOnly := filepath.Join(dir, "readonly") + require.NoError(t, os.WriteFile(readOnly, []byte("x"), 0o400)) + // On most filesystems 0o400 forbids O_WRONLY for the owner. + assert.False(t, isWritable(readOnly)) + + assert.False(t, isWritable(filepath.Join(dir, "does-not-exist"))) +} + +func TestCanSelfUpdate_RefusesWhenExecutableNotWritable(t *testing.T) { + // We can't portably make os.Executable point at a read-only file, but we + // can at least assert the function does not panic and returns a boolean. + u := &binaryUpdater{} + _ = u.CanSelfUpdate() +} + +// --- helpers ----------------------------------------------------------- + +func buildTarGz(t *testing.T, name string, payload []byte) []byte { + t.Helper() + var buf bytes.Buffer + gz := gzip.NewWriter(&buf) + tw := tar.NewWriter(gz) + require.NoError(t, tw.WriteHeader(&tar.Header{ + Name: name, + Mode: 0o755, + Size: int64(len(payload)), + Typeflag: tar.TypeReg, + })) + _, err := tw.Write(payload) + require.NoError(t, err) + require.NoError(t, tw.Close()) + require.NoError(t, gz.Close()) + return buf.Bytes() +} + +func buildZip(t *testing.T, name string, payload []byte) []byte { + t.Helper() + var buf bytes.Buffer + zw := zip.NewWriter(&buf) + w, err := zw.Create(name) + require.NoError(t, err) + _, err = w.Write(payload) + require.NoError(t, err) + require.NoError(t, zw.Close()) + return buf.Bytes() +} + +// downloadAndExtractWith exposes downloadAndExtract with explicit goos/goarch +// for tests. It mirrors the logic of downloadAndExtract so callers do not need +// to mutate runtime.GOOS/GOARCH. +func (u *binaryUpdater) downloadAndExtractWith(ctx context.Context, channel, goos, goarch string) (io.Reader, func(), error) { + release, err := u.fetchRelease(ctx, channel) + if err != nil { + return nil, nil, fmt.Errorf("fetch release: %w", err) + } + archiveAsset, checksumAsset, err := pickAssets(release.Assets, goos, goarch) + if err != nil { + return nil, nil, err + } + archiveBytes, err := u.downloadBytes(ctx, archiveAsset.BrowserDownloadURL) + if err != nil { + return nil, nil, fmt.Errorf("download archive: %w", err) + } + checksumBytes, err := u.downloadBytes(ctx, checksumAsset.BrowserDownloadURL) + if err != nil { + return nil, nil, fmt.Errorf("download checksum file: %w", err) + } + if err := verifyChecksum(archiveAsset.Name, archiveBytes, checksumBytes); err != nil { + return nil, nil, fmt.Errorf("verify checksum: %w", err) + } + binaryName := expectedBinaryName(goos, goarch) + // For non-darwin the alt fallback equals the expected name; for darwin we + // want to try the arch-specific binary name too. + alt := fmt.Sprintf("altmount-cli-%s-%s", goos, goarch) + if goos == "windows" { + alt += ".exe" + } + candidates := []string{binaryName, alt} + var reader io.Reader + switch { + case strings.HasSuffix(archiveAsset.Name, ".tar.gz"): + reader, err = extractFromTarGz(archiveBytes, candidates) + case strings.HasSuffix(archiveAsset.Name, ".zip"): + reader, err = extractFromZip(archiveBytes, candidates) + default: + return nil, nil, fmt.Errorf("unsupported archive: %s", archiveAsset.Name) + } + if err != nil { + return nil, nil, err + } + return reader, func() {}, nil +}