Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
26 changes: 26 additions & 0 deletions build-test-image.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
#!/bin/bash

# Script to build a local test image for AltMount
# This uses the multi-stage Dockerfile which builds both frontend and backend

echo "🚀 Building AltMount test image..."

# Check if docker is installed
if ! command -v docker &> /dev/null; then
echo "❌ Error: docker is not installed."
exit 1
fi

# Build the image
# We use the root directory as context and the dev Dockerfile
docker build -t altmount:test -f docker/Dockerfile .

if [ $? -eq 0 ]; then
echo "✅ Success! Test image 'altmount:test' created."
echo ""
echo "To run it:"
echo "docker run -p 8080:8080 -v ./config:/config -v ./metadata:/metadata altmount:test"
else
echo "❌ Error: Docker build failed."
exit 1
fi
30 changes: 30 additions & 0 deletions config/altmount.log
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
{"time":"2026-03-22T02:57:58.91794191Z","level":"INFO","msg":"OK 018_add_target_path.sql (27.11ms)"}
{"time":"2026-03-22T02:57:58.919872796Z","level":"INFO","msg":"goose: successfully migrated database to version: 18"}
{"time":"2026-03-22T02:57:58.919937059Z","level":"INFO","msg":"Starting server without NNTP providers - configure via API to enable downloads"}
{"time":"2026-03-22T02:57:58.920043125Z","level":"INFO","msg":"RClone RC notifications disabled"}
{"time":"2026-03-22T02:57:58.926747028Z","level":"INFO","msg":"Updated database connection pool","component":"importer-service","workers":2,"max_connections":6}
{"time":"2026-03-22T02:57:58.927155504Z","level":"INFO","msg":"Queue manager started","component":"queue-manager","workers":2}
{"time":"2026-03-22T02:57:58.927194381Z","level":"INFO","msg":"NZB import service started successfully with 2 workers","component":"importer-service"}
{"time":"2026-03-22T02:57:58.927222953Z","level":"INFO","msg":"Segment cache disabled"}
{"time":"2026-03-22T02:57:58.929924652Z","level":"INFO","msg":"Authentication service initialized"}
{"time":"2026-03-22T02:57:58.933070585Z","level":"INFO","msg":"Health system disabled - no health monitoring or repairs will occur"}
{"time":"2026-03-22T02:57:58.933746428Z","level":"INFO","msg":"ARR queue cleanup disabled (ARRs disabled)"}
{"time":"2026-03-22T02:57:58.933803834Z","level":"INFO","msg":"Arrs service is disabled in configuration"}
{"time":"2026-03-22T02:57:58.933834247Z","level":"INFO","msg":"AltMount server started","port":8080,"webdav_path":"/webdav","api_path":"/api","providers":0,"processor_workers":2}
{"time":"2026-03-22T02:58:00.934356216Z","level":"INFO","msg":"RClone mount service is disabled in configuration"}
{"time":"2026-03-22T02:58:03.93454291Z","level":"WARN","msg":"No admin API key found, skipping automatic webhook registration"}
{"time":"2026-03-22T02:58:13.228068067Z","level":"INFO","msg":"Received shutdown signal","signal":"terminated"}
{"time":"2026-03-22T02:58:13.228135982Z","level":"INFO","msg":"Starting graceful shutdown sequence"}
{"time":"2026-03-22T02:58:13.228145004Z","level":"ERROR","msg":"Failed to stop health worker","error":"health worker not running"}
{"time":"2026-03-22T02:58:13.228169162Z","level":"INFO","msg":"Shutting down server..."}
{"time":"2026-03-22T02:58:13.228214053Z","level":"INFO","msg":"Server shutdown completed"}
{"time":"2026-03-22T02:58:13.228219667Z","level":"INFO","msg":"AltMount server shutdown completed successfully"}
{"time":"2026-03-22T02:58:13.22822436Z","level":"INFO","msg":"Closing importer service"}
{"time":"2026-03-22T02:58:13.228237368Z","level":"INFO","msg":"Stopping NZB import service","component":"importer-service"}
{"time":"2026-03-22T02:58:13.228245373Z","level":"INFO","msg":"Stopping queue manager","component":"queue-manager"}
{"time":"2026-03-22T02:58:13.228257391Z","level":"INFO","msg":"Queue worker stopped","component":"queue-manager","worker_id":0}
{"time":"2026-03-22T02:58:13.228267634Z","level":"INFO","msg":"Queue worker stopped","component":"queue-manager","worker_id":1}
{"time":"2026-03-22T02:58:13.228305031Z","level":"INFO","msg":"Queue manager stopped","component":"queue-manager"}
{"time":"2026-03-22T02:58:13.228313284Z","level":"INFO","msg":"NZB import service stopped","component":"importer-service"}
{"time":"2026-03-22T02:58:13.228323531Z","level":"INFO","msg":"Clearing NNTP pool"}
{"time":"2026-03-22T02:58:13.228329396Z","level":"INFO","msg":"Closing database"}
12 changes: 12 additions & 0 deletions internal/api/arrs_handlers.go
Original file line number Diff line number Diff line change
Expand Up @@ -307,6 +307,18 @@ func (s *Server) handleArrsWebhook(c *fiber.Ctx) error {
}
}

// Redundant Deletion Guard: ensure the file is gone from the local mount
if s.configManager != nil {
cfg := s.configManager.GetConfig()
if cfg.MountPath != "" {
localPath := filepath.Join(cfg.MountPath, metadataPath)
if _, err := os.Stat(localPath); err == nil {
slog.InfoContext(c.Context(), "Redundant Deletion Guard: Manual removal of ghost file from mount", "path", localPath)
_ = os.Remove(localPath)
}
}
}

}

// Process Directory Deletions
Expand Down
2 changes: 1 addition & 1 deletion internal/api/nzb_stremio_handlers.go
Original file line number Diff line number Diff line change
Expand Up @@ -181,7 +181,7 @@ func (s *Server) handleNzbStreams(c *fiber.Ctx) error {
}

priority := database.QueuePriorityHigh
item, err := s.importerService.AddToQueue(ctx, tempPath, basePath, &category, &priority, nil)
item, err := s.importerService.AddToQueue(ctx, tempPath, basePath, &category, &priority, nil, nil)
if err != nil {
os.Remove(tempPath)
return RespondInternalError(c, "Failed to add NZB to queue", err.Error())
Expand Down
8 changes: 4 additions & 4 deletions internal/api/queue_handlers.go
Original file line number Diff line number Diff line change
Expand Up @@ -647,7 +647,7 @@ func (s *Server) handleUploadToQueue(c *fiber.Ctx) error {

// For manually uploaded files, pass CompleteDir as the base path (not the temp upload directory)
// The category will be appended to this by processNzbItem in the service
item, err := s.importerService.AddToQueue(c.Context(), tempFile, basePath, categoryPtr, &priority, nil)
item, err := s.importerService.AddToQueue(c.Context(), tempFile, basePath, categoryPtr, &priority, nil, nil)
if err != nil {
// Clean up temp file on error
os.Remove(tempFile)
Expand Down Expand Up @@ -782,7 +782,7 @@ func (s *Server) handleUploadNZBLnk(c *fiber.Ctx) error {
}

priority := database.QueuePriority(req.Priority)
item, err := s.importerService.AddToQueue(c.Context(), tempFile, basePath, categoryPtr, &priority, nil)
item, err := s.importerService.AddToQueue(c.Context(), tempFile, basePath, categoryPtr, &priority, nil, nil)
if err != nil {
os.Remove(tempFile)
result.ErrorMessage = "Failed to add to queue: " + err.Error()
Expand Down Expand Up @@ -923,7 +923,7 @@ func (s *Server) handleSearchNZBByName(c *fiber.Ctx) error {
}

priority := database.QueuePriority(req.Priority)
item, err := s.importerService.AddToQueue(c.Context(), tempFile, basePath, categoryPtr, &priority, nil)
item, err := s.importerService.AddToQueue(c.Context(), tempFile, basePath, categoryPtr, &priority, nil, nil)
if err != nil {
os.Remove(tempFile)
return RespondInternalError(c, "Failed to add to queue", err.Error())
Expand Down Expand Up @@ -1170,7 +1170,7 @@ func (s *Server) handleAddTestQueueItem(c *fiber.Ctx) error {
}
}

item, err := s.importerService.AddToQueue(c.Context(), tempPath, basePath, &category, &priority, nil)
item, err := s.importerService.AddToQueue(c.Context(), tempPath, basePath, &category, &priority, nil, nil)
if err != nil {
os.Remove(tempPath)
return RespondInternalError(c, "Failed to add test file to queue", err.Error())
Expand Down
135 changes: 100 additions & 35 deletions internal/api/sabnzbd_handlers.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ import (

"github.com/gofiber/fiber/v2"
"github.com/javi11/altmount/internal/arrs"
"github.com/google/uuid"
"github.com/javi11/altmount/internal/config"
"github.com/javi11/altmount/internal/database"
"github.com/javi11/altmount/internal/httpclient"
Expand Down Expand Up @@ -365,18 +366,25 @@ func (s *Server) handleSABnzbdAddFile(c *fiber.Ctx) error {
}
}

// Generate a stable download ID (GUID) for Sonarr/Radarr tracking
// Some indexers provide a GUID in the 'nzbname' or 'name' parameter
downloadID := c.FormValue("nzbname")
if downloadID == "" {
downloadID = uuid.New().String()
}

// Add the file to the processing queue using centralized method
completeDir := s.configManager.GetConfig().SABnzbd.CompleteDir
priority := s.parseSABnzbdPriority(c.FormValue("priority"))
item, err := s.importerService.AddToQueue(c.Context(), tempFile, &completeDir, &validatedCategory, &priority, metadataJSON)
_, err = s.importerService.AddToQueue(c.Context(), tempFile, &completeDir, &validatedCategory, &priority, metadataJSON, &downloadID)
if err != nil {
return s.writeSABnzbdErrorFiber(c, "Failed to add to queue")
}

// Return success response
response := SABnzbdAddResponse{
Status: true,
NzoIds: []string{fmt.Sprintf("%d", item.ID)},
NzoIds: []string{downloadID},
}

return s.writeSABnzbdResponseFiber(c, response)
Expand Down Expand Up @@ -507,15 +515,27 @@ func (s *Server) handleSABnzbdAddUrl(c *fiber.Ctx) error {
// Add the file to the processing queue using centralized method
completeDir := s.configManager.GetConfig().SABnzbd.CompleteDir
priority := s.parseSABnzbdPriority(c.Query("priority"))
item, err := s.importerService.AddToQueue(c.Context(), tempFile, &completeDir, &validatedCategory, &priority, metadataJSON)

// Generate or extract stable download ID for tracking
// Some indexers provide a GUID in the 'nzbname' or 'name' parameter
downloadID := c.Query("nzbname")
if downloadID == "" {
// Use filename (without extension) as a fallback ID if it looks like a GUID
downloadID = strings.TrimSuffix(filename, filepath.Ext(filename))
if len(downloadID) < 20 { // Simple heuristic: GUIDs are usually long
downloadID = uuid.New().String()
}
}

_, err = s.importerService.AddToQueue(c.Context(), tempFile, &completeDir, &validatedCategory, &priority, metadataJSON, &downloadID)
if err != nil {
return s.writeSABnzbdErrorFiber(c, "Failed to add to queue")
}

// Return success response
response := SABnzbdAddResponse{
Status: true,
NzoIds: []string{fmt.Sprintf("%d", item.ID)},
NzoIds: []string{downloadID},
}

return s.writeSABnzbdResponseFiber(c, response)
Expand Down Expand Up @@ -679,23 +699,32 @@ func (s *Server) handleSABnzbdHistory(c *fiber.Ctx) error {
// Get category filter from query parameter
categoryFilter := s.normalizeCategoryFilter(c)

// Get specific job IDs if requested
nzoIDs := make(map[string]bool)
if ids := c.Query("nzo_ids"); ids != "" {
for _, id := range strings.Split(ids, ",") {
nzoIDs[strings.TrimSpace(id)] = true
}
}

// Get pagination parameters
start := 0
if s := c.Query("start"); s != "" {
if val, err := strconv.Atoi(s); err == nil {
start = val
}
}
limit := 50
limit := 0 // 0 means all items in SABnzbd
if l := c.Query("limit"); l != "" {
if val, err := strconv.Atoi(l); err == nil {
limit = val
}
}

// Get completed items from active queue (not yet deleted)
// Fetch items from active queue
// We use a larger set here to ensure we get everything for deduplication and combined history
completedStatus := database.QueueStatusCompleted
completedQueueItems, err := s.queueRepo.ListQueueItems(c.Context(), &completedStatus, "", categoryFilter, limit, start, "updated_at", "desc")
completedQueueItems, err := s.queueRepo.ListQueueItems(c.Context(), &completedStatus, "", categoryFilter, 10000, 0, "updated_at", "desc")
if err != nil {
return s.writeSABnzbdErrorFiber(c, "Failed to get completed items from queue")
}
Expand All @@ -714,6 +743,16 @@ func (s *Server) handleSABnzbdHistory(c *fiber.Ctx) error {

for _, item := range completedQueueItems {
name := filepath.Base(item.NzbPath)
// Filter by nzo_ids if requested (check both integer ID and DownloadID)
if len(nzoIDs) > 0 {
match := nzoIDs[fmt.Sprintf("%d", item.ID)]
if !match && item.DownloadID != nil {
match = nzoIDs[*item.DownloadID]
}
if !match {
continue
}
}
if !seenNames[name] {
finalItems = append(finalItems, item)
seenNames[name] = true
Expand All @@ -727,13 +766,26 @@ func (s *Server) handleSABnzbdHistory(c *fiber.Ctx) error {
continue
}

if !seenNames[item.NzbName] {
id := item.ID
if item.NzbID != nil {
id = *item.NzbID
id := item.ID
if item.NzbID != nil {
id = *item.NzbID
}

// Filter by nzo_ids if requested
if len(nzoIDs) > 0 {
match := nzoIDs[fmt.Sprintf("%d", id)]
if !match && item.DownloadID != nil {
match = nzoIDs[*item.DownloadID]
}
if !match {
continue
}
}

if !seenNames[item.NzbName] {
qItem := &database.ImportQueueItem{
ID: id,
DownloadID: item.DownloadID,
NzbPath: item.NzbName,
Status: database.QueueStatusCompleted,
FileSize: &item.FileSize,
Expand All @@ -748,41 +800,54 @@ func (s *Server) handleSABnzbdHistory(c *fiber.Ctx) error {

// Get failed items from active queue
failedStatus := database.QueueStatusFailed
failed, err := s.queueRepo.ListQueueItems(c.Context(), &failedStatus, "", categoryFilter, limit, start, "updated_at", "desc")
failed, err := s.queueRepo.ListQueueItems(c.Context(), &failedStatus, "", categoryFilter, 1000, 0, "updated_at", "desc")
if err != nil {
return s.writeSABnzbdErrorFiber(c, "Failed to get failed items")
}

// Get total failed count
totalFailed, err := s.queueRepo.CountQueueItems(c.Context(), &failedStatus, "", categoryFilter)
if err != nil {
totalFailed = len(failed)
// Combine failed items for noofslots calculation
for _, item := range failed {
name := filepath.Base(item.NzbPath)
// Filter by nzo_ids if requested
if len(nzoIDs) > 0 {
match := nzoIDs[fmt.Sprintf("%d", item.ID)]
if !match && item.DownloadID != nil {
match = nzoIDs[*item.DownloadID]
}
if !match {
continue
}
}
if !seenNames[name] {
finalItems = append(finalItems, item)
seenNames[name] = true
}
}

// Combine and convert to SABnzbd format
slots := make([]SABnzbdHistorySlot, 0, len(finalItems)+len(failed))
index := 0
var totalBytes int64
// Total available items before pagination
totalAvailableCount := len(finalItems)

for _, item := range finalItems {
// Calculate category-specific base path
itemBasePath := s.calculateItemBasePath()
finalPath := s.calculateHistoryStoragePath(item, itemBasePath)
// Apply pagination (start and limit)
if start < len(finalItems) {
finalItems = finalItems[start:]
} else {
finalItems = []*database.ImportQueueItem{}
}

slot := ToSABnzbdHistorySlot(item, start+index, finalPath)
slots = append(slots, slot)
totalBytes += slot.Bytes
index++
if limit > 0 && len(finalItems) > limit {
finalItems = finalItems[:limit]
}
for _, item := range failed {
// Calculate category-specific base path for this item
itemBasePath := s.calculateItemBasePath()
finalPath := s.calculateHistoryStoragePath(item, itemBasePath)

slot := ToSABnzbdHistorySlot(item, start+index, finalPath)
// Combine and convert to SABnzbd format
slots := make([]SABnzbdHistorySlot, 0, len(finalItems))
var totalBytes int64
itemBasePath := s.calculateItemBasePath()

for i, item := range finalItems {
finalPath := s.calculateHistoryStoragePath(item, itemBasePath)
slot := ToSABnzbdHistorySlot(item, start+i, finalPath)
slots = append(slots, slot)
totalBytes += slot.Bytes
index++
}

// Create the proper history response structure using the new struct
Expand All @@ -794,7 +859,7 @@ func (s *Server) handleSABnzbdHistory(c *fiber.Ctx) error {
WeekSize: "0 B",
Version: "4.5.0",
DaySize: "0 B",
Noofslots: len(finalItems) + totalFailed,
Noofslots: totalAvailableCount,
},
}

Expand Down
16 changes: 14 additions & 2 deletions internal/api/sabnzbd_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -379,9 +379,15 @@ func ToSABnzbdQueueSlot(item *database.ImportQueueItem, index int, progressBroad

sizeLeftBytes := int64((100 - progressPercentage) * int(totalSizeBytes) / 100)

// Use DownloadID (GUID) as NzoID for stable tracking
nzoID := fmt.Sprintf("%d", item.ID)
if item.DownloadID != nil && *item.DownloadID != "" {
nzoID = *item.DownloadID
}

return SABnzbdQueueSlot{
Index: index,
NzoID: fmt.Sprintf("%d", item.ID),
NzoID: nzoID,
Priority: priority,
Filename: jobName,
Cat: category,
Expand Down Expand Up @@ -503,10 +509,16 @@ func ToSABnzbdHistorySlot(item *database.ImportQueueItem, index int, finalPath s
}
}

// Use DownloadID (GUID) as NzoID for stable tracking
nzoID := fmt.Sprintf("%d", item.ID)
if item.DownloadID != nil && *item.DownloadID != "" {
nzoID = *item.DownloadID
}

return SABnzbdHistorySlot{
Index: index,

NzoID: fmt.Sprintf("%d", item.ID),
NzoID: nzoID,

Name: jobName,

Expand Down
Loading
Loading