diff --git a/pkg/postie/postie.go b/pkg/postie/postie.go index 5be20f9..5f48069 100644 --- a/pkg/postie/postie.go +++ b/pkg/postie/postie.go @@ -255,8 +255,7 @@ func (p *Postie) postInParallel( var par2OutputDir string if p.par2Cfg.MaintainPar2Files != nil && *p.par2Cfg.MaintainPar2Files { // Generate PAR2 files directly in output directory - dirPath := filepath.Dir(f.Path) - relativePath := strings.TrimPrefix(dirPath, rootDir) + relativePath := relativePathFrom(rootDir, f.Path) par2OutputDir = filepath.Join(outputDir, relativePath) slog.DebugContext(ctx, "Generating PAR2 files directly in output directory", @@ -309,11 +308,10 @@ func (p *Postie) postInParallel( } // Generate single NZB file for all files - dirPath := filepath.Dir(f.Path) - dirPath = strings.TrimPrefix(dirPath, rootDir) + relativePath := relativePathFrom(rootDir, f.Path) // Use the original filename as input for NZB generation - nzbPath := filepath.Join(outputDir, dirPath, filepath.Base(f.Path)) + nzbPath := filepath.Join(outputDir, relativePath, filepath.Base(f.Path)) finalPath, err := nzbGen.Generate(nzbPath) if err != nil { return "", fmt.Errorf("error generating NZB file: %w", err) @@ -366,8 +364,7 @@ func (p *Postie) post( var par2OutputDir string if p.par2Cfg.MaintainPar2Files != nil && *p.par2Cfg.MaintainPar2Files { // Generate PAR2 files directly in output directory - dirPath := filepath.Dir(f.Path) - relativePath := strings.TrimPrefix(dirPath, rootDir) + relativePath := relativePathFrom(rootDir, f.Path) par2OutputDir = filepath.Join(outputDir, relativePath) slog.DebugContext(ctx, "Generating PAR2 files directly in output directory", @@ -401,11 +398,10 @@ func (p *Postie) post( } // Generate single NZB file for all files - dirPath := filepath.Dir(f.Path) - dirPath = strings.TrimPrefix(dirPath, rootDir) + relativePath := relativePathFrom(rootDir, f.Path) // Use the original filename as input for NZB generation - nzbPath := filepath.Join(outputDir, dirPath, filepath.Base(f.Path)) + nzbPath := filepath.Join(outputDir, relativePath, filepath.Base(f.Path)) finalPath, err := nzbGen.Generate(nzbPath) if err != nil { return "", fmt.Errorf("error generating NZB file: %w", err) @@ -747,3 +743,15 @@ func deriveFolderName(rootDir string, files []fileinfo.FileInfo) string { } return name } + +// relativePathFrom computes the relative path of filePath's directory from rootDir. +// Falls back to empty string (placing output directly in outputDir) if paths +// cannot be made relative (e.g. cross-volume on Windows). +func relativePathFrom(rootDir, filePath string) string { + dirPath := filepath.Dir(filePath) + rel, err := filepath.Rel(rootDir, dirPath) + if err != nil || rel == "." { + return "" + } + return rel +} diff --git a/pkg/postie/postie_crossvolume_test.go b/pkg/postie/postie_crossvolume_test.go new file mode 100644 index 0000000..9c01a3d --- /dev/null +++ b/pkg/postie/postie_crossvolume_test.go @@ -0,0 +1,127 @@ +package postie + +import ( + "context" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/javi11/postie/pkg/fileinfo" +) + +// withinDir checks whether path is inside dir. +func withinDir(path, dir string) bool { + cleanPath := filepath.Clean(path) + cleanDir := filepath.Clean(dir) + string(filepath.Separator) + return strings.HasPrefix(cleanPath, cleanDir) +} + +// TestPostCrossVolumeNZBInOutputDir verifies that non-folder mode (post/postInParallel) +// places the NZB in the output directory, not the watch (source) directory, even when +// the two are on different volumes (no shared path prefix). +func TestPostCrossVolumeNZBInOutputDir(t *testing.T) { + watchDir := t.TempDir() + outputDir := t.TempDir() + + // Create a source file in the watch directory + srcFile := filepath.Join(watchDir, "movie.mkv") + if err := os.WriteFile(srcFile, []byte("content"), 0644); err != nil { + t.Fatalf("write source file: %v", err) + } + + par2mock := &mockPar2Executor{} + p := newTestPostie(par2mock, true, false) + + // post() is the sequential (waitForPar2=true) non-folder path + nzbPath, err := p.post(context.Background(), fileinfo.FileInfo{ + Path: srcFile, + Size: 7, + }, watchDir, outputDir) + if err != nil { + t.Fatalf("post() returned error: %v", err) + } + + // NZB must be inside the output directory + if !withinDir(nzbPath, outputDir) { + t.Errorf("NZB placed outside output dir:\n nzbPath: %s\n outputDir: %s", nzbPath, outputDir) + } + + // NZB must NOT be in the watch directory + if withinDir(nzbPath, watchDir) { + t.Errorf("NZB leaked into watch dir:\n nzbPath: %s\n watchDir: %s", nzbPath, watchDir) + } + + // Verify the file actually exists + if _, err := os.Stat(nzbPath); os.IsNotExist(err) { + t.Errorf("NZB file does not exist at %q", nzbPath) + } +} + +// TestPostInParallelCrossVolumeNZBInOutputDir does the same check for the parallel path. +func TestPostInParallelCrossVolumeNZBInOutputDir(t *testing.T) { + watchDir := t.TempDir() + outputDir := t.TempDir() + + srcFile := filepath.Join(watchDir, "movie.mkv") + if err := os.WriteFile(srcFile, []byte("content"), 0644); err != nil { + t.Fatalf("write source file: %v", err) + } + + par2mock := &mockPar2Executor{} + p := newTestPostie(par2mock, false, false) + + nzbPath, err := p.postInParallel(context.Background(), fileinfo.FileInfo{ + Path: srcFile, + Size: 7, + }, watchDir, outputDir) + if err != nil { + t.Fatalf("postInParallel() returned error: %v", err) + } + + if !withinDir(nzbPath, outputDir) { + t.Errorf("NZB placed outside output dir:\n nzbPath: %s\n outputDir: %s", nzbPath, outputDir) + } + + if withinDir(nzbPath, watchDir) { + t.Errorf("NZB leaked into watch dir:\n nzbPath: %s\n watchDir: %s", nzbPath, watchDir) + } + + if _, err := os.Stat(nzbPath); os.IsNotExist(err) { + t.Errorf("NZB file does not exist at %q", nzbPath) + } +} + +// TestPostCrossVolumeWithSubdirectory verifies that files in a subdirectory of the +// watch folder maintain their relative path structure in the output directory. +func TestPostCrossVolumeWithSubdirectory(t *testing.T) { + watchDir := t.TempDir() + outputDir := t.TempDir() + + // Create a source file in a subdirectory of the watch folder + subDir := filepath.Join(watchDir, "subfolder") + if err := os.MkdirAll(subDir, 0755); err != nil { + t.Fatalf("mkdir subfolder: %v", err) + } + srcFile := filepath.Join(subDir, "movie.mkv") + if err := os.WriteFile(srcFile, []byte("content"), 0644); err != nil { + t.Fatalf("write source file: %v", err) + } + + par2mock := &mockPar2Executor{} + p := newTestPostie(par2mock, true, false) + + nzbPath, err := p.post(context.Background(), fileinfo.FileInfo{ + Path: srcFile, + Size: 7, + }, watchDir, outputDir) + if err != nil { + t.Fatalf("post() returned error: %v", err) + } + + // NZB should be in outputDir/subfolder/ + expectedDir := filepath.Join(outputDir, "subfolder") + if filepath.Dir(nzbPath) != expectedDir { + t.Errorf("NZB not in expected subdirectory:\n got: %s\n want: %s/", filepath.Dir(nzbPath), expectedDir) + } +} diff --git a/tests/e2e/helpers_test.go b/tests/e2e/helpers_test.go index e629730..7c4ad1b 100644 --- a/tests/e2e/helpers_test.go +++ b/tests/e2e/helpers_test.go @@ -111,6 +111,9 @@ func newChromedpCtx(t *testing.T) (context.Context, context.CancelFunc) { chromedp.Flag("headless", true), chromedp.NoSandbox, chromedp.Flag("disable-gpu", true), + // Workaround for Chromium ThreadCache crash on newer Linux kernels + // (FATAL:scheduler_loop_quarantine_support.h Check failed: ThreadCache::IsValid) + chromedp.Flag("disable-features", "PartitionAlloc"), ) allocCtx, allocCancel := chromedp.NewExecAllocator(context.Background(), opts...) ctx, cancel := chromedp.NewContext(allocCtx) diff --git a/tests/e2e/nntp_test.go b/tests/e2e/nntp_test.go index caee4a0..3153ac8 100644 --- a/tests/e2e/nntp_test.go +++ b/tests/e2e/nntp_test.go @@ -7,12 +7,15 @@ import ( "fmt" "net" "strings" + "sync" "time" ) type fakeNntpServer struct { - listener net.Listener - port int + listener net.Listener + port int + mu sync.Mutex + articleCount int } func startFakeNntpServer() (*fakeNntpServer, error) { @@ -43,23 +46,49 @@ func (s *fakeNntpServer) handleConn(conn net.Conn) { // RFC 3977 §5.1 greeting fmt.Fprintf(conn, "200 Postie-test NNTP server ready\r\n") - scanner := bufio.NewScanner(conn) - for scanner.Scan() { - line := strings.ToUpper(strings.TrimSpace(scanner.Text())) - if line == "" { + reader := bufio.NewReader(conn) + for { + line, err := reader.ReadString('\n') + if err != nil { + return // connection closed + } + cmd := strings.ToUpper(strings.TrimSpace(line)) + if cmd == "" { continue } switch { - case strings.HasPrefix(line, "AUTHINFO USER"): + case strings.HasPrefix(cmd, "AUTHINFO USER"): fmt.Fprintf(conn, "381 Enter password\r\n") - case strings.HasPrefix(line, "AUTHINFO PASS"): + case strings.HasPrefix(cmd, "AUTHINFO PASS"): fmt.Fprintf(conn, "281 Authentication accepted\r\n") - case line == "CAPABILITIES": + case cmd == "CAPABILITIES": fmt.Fprintf(conn, "101 Capability list:\r\nVERSION 2\r\nREADER\r\nPOST\r\nDATE\r\n.\r\n") - case line == "DATE": + case cmd == "DATE": // nntppool sends DATE as its connectivity ping (RFC 3977 §7.1) fmt.Fprintf(conn, "111 %s\r\n", time.Now().UTC().Format("20060102150405")) - case line == "QUIT": + case cmd == "POST": + // Phase 1: accept article + fmt.Fprintf(conn, "340 Send article\r\n") + // Phase 2: read article body until lone ".\r\n" + for { + bodyLine, err := reader.ReadString('\n') + if err != nil { + return + } + if strings.TrimRight(bodyLine, "\r\n") == "." { + break + } + } + // Extract Message-ID from the article for STAT verification + s.mu.Lock() + s.articleCount++ + s.mu.Unlock() + fmt.Fprintf(conn, "240 Article posted\r\n") + case strings.HasPrefix(cmd, "STAT "): + // Post-check: always report article exists + msgID := strings.TrimSpace(line[5:]) + fmt.Fprintf(conn, "223 0 %s\r\n", msgID) + case cmd == "QUIT": fmt.Fprintf(conn, "205 closing connection\r\n") return default: diff --git a/tests/e2e/output_dir_test.go b/tests/e2e/output_dir_test.go new file mode 100644 index 0000000..8515a6c --- /dev/null +++ b/tests/e2e/output_dir_test.go @@ -0,0 +1,268 @@ +//go:build e2e + +// Tests MUST NOT call t.Parallel() — they share one server instance. +package e2e_test + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "mime/multipart" + "net/http" + "os" + "path/filepath" + "strings" + "testing" + "time" +) + +// ── helpers ───────────────────────────────────────────────────────────────────── + +// uploadFile sends a single file to POST /api/upload via multipart form. +func uploadFile(t *testing.T, filePath string) { + t.Helper() + + file, err := os.Open(filePath) + if err != nil { + t.Fatalf("open file: %v", err) + } + defer file.Close() + + var body bytes.Buffer + writer := multipart.NewWriter(&body) + part, err := writer.CreateFormFile("files", filepath.Base(filePath)) + if err != nil { + t.Fatalf("create form file: %v", err) + } + if _, err := io.Copy(part, file); err != nil { + t.Fatalf("copy file to form: %v", err) + } + writer.Close() + + resp, err := http.Post(baseURL+"/api/upload", writer.FormDataContentType(), &body) + if err != nil { + t.Fatalf("POST /api/upload: %v", err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + respBody, _ := io.ReadAll(resp.Body) + t.Fatalf("POST /api/upload returned %d: %s", resp.StatusCode, respBody) + } +} + +// queueResponse represents the paginated queue API response. +type queueResponse struct { + Items []struct { + ID string `json:"id"` + Status string `json:"status"` + NzbPath *string `json:"nzbPath"` + } `json:"items"` + TotalItems int `json:"totalItems"` +} + +// waitForQueueComplete polls the queue API until at least one item reaches +// "complete" status or the timeout elapses. Returns the final queue response. +func waitForQueueComplete(t *testing.T, timeout time.Duration) queueResponse { + t.Helper() + deadline := time.Now().Add(timeout) + + for time.Now().Before(deadline) { + // Check for completed items + resp, err := http.Get(baseURL + "/api/queue?status=complete&limit=100") + if err != nil { + t.Logf("GET /api/queue: %v (retrying)", err) + time.Sleep(1 * time.Second) + continue + } + + var qr queueResponse + err = json.NewDecoder(resp.Body).Decode(&qr) + resp.Body.Close() + if err != nil { + t.Logf("decode queue response: %v (retrying)", err) + time.Sleep(1 * time.Second) + continue + } + + if qr.TotalItems > 0 { + return qr + } + + // Log current queue state for debugging + if statsResp, err := http.Get(baseURL + "/api/queue/stats"); err == nil { + var stats map[string]any + if json.NewDecoder(statsResp.Body).Decode(&stats) == nil { + t.Logf("queue stats: %v", stats) + } + statsResp.Body.Close() + } + + time.Sleep(2 * time.Second) + } + + t.Fatal("timed out waiting for queue items to complete") + return queueResponse{} // unreachable +} + +// clearQueue deletes all queue items so tests start clean. +func clearQueue(t *testing.T) { + t.Helper() + req, _ := http.NewRequest(http.MethodDelete, baseURL+"/api/queue", nil) + resp, err := http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("DELETE /api/queue: %v", err) + } + resp.Body.Close() +} + +// ── tests ─────────────────────────────────────────────────────────────────────── + +// TestOutputDir_UploadPlacesNzbInOutputDir uploads a file and verifies the +// generated NZB lands inside the configured output directory, not in the +// source/temp directory. This is the end-to-end validation for the cross-volume +// fix (replacing strings.TrimPrefix with filepath.Rel). +func TestOutputDir_UploadPlacesNzbInOutputDir(t *testing.T) { + // 1. Configure a dedicated output directory + outputDir := t.TempDir() + cfg := getConfig(t) + cfg["output_dir"] = outputDir + // Disable par2 and post_check to speed up the test + if par2, ok := cfg["par2"].(map[string]any); ok { + par2["enabled"] = false + } + if postCheck, ok := cfg["post_check"].(map[string]any); ok { + postCheck["enabled"] = false + } + saveConfig(t, cfg) + + // 2. Clear any existing queue items + clearQueue(t) + + // 3. Create a source file in a separate temp dir (simulates different volume) + sourceDir := t.TempDir() + srcFile := filepath.Join(sourceDir, "test_upload.bin") + // Write enough data for a minimal article (at least a few bytes) + if err := os.WriteFile(srcFile, bytes.Repeat([]byte("X"), 1024), 0644); err != nil { + t.Fatalf("write source file: %v", err) + } + + // 4. Upload the file + uploadFile(t, srcFile) + + // 5. Wait for the job to complete + qr := waitForQueueComplete(t, 30*time.Second) + + // 6. Verify the NZB path is inside the output directory + if len(qr.Items) == 0 { + t.Fatal("no completed queue items found") + } + + for _, item := range qr.Items { + if item.NzbPath == nil { + t.Errorf("queue item %s completed but has nil nzbPath", item.ID) + continue + } + nzbPath := *item.NzbPath + + // NZB must be inside the output directory + absNzb, _ := filepath.Abs(nzbPath) + absOut, _ := filepath.Abs(outputDir) + if !strings.HasPrefix(absNzb, absOut+string(filepath.Separator)) && absNzb != absOut { + t.Errorf("NZB not in output dir:\n nzbPath: %s\n outputDir: %s", nzbPath, outputDir) + } + + // NZB must NOT be in the source directory + absSrc, _ := filepath.Abs(sourceDir) + if strings.HasPrefix(absNzb, absSrc+string(filepath.Separator)) { + t.Errorf("NZB leaked into source dir:\n nzbPath: %s\n sourceDir: %s", nzbPath, sourceDir) + } + + // Verify the NZB file actually exists on disk + if _, err := os.Stat(nzbPath); os.IsNotExist(err) { + t.Errorf("NZB file does not exist at %q", nzbPath) + } + + t.Logf("NZB correctly placed at: %s", nzbPath) + } +} + +// TestOutputDir_ConfigPersistsRoundTrip verifies that output_dir survives a +// config save/load cycle. +func TestOutputDir_ConfigPersistsRoundTrip(t *testing.T) { + tmpDir := t.TempDir() + cfg := getConfig(t) + cfg["output_dir"] = tmpDir + saveConfig(t, cfg) + + cfg = getConfig(t) + got, _ := cfg["output_dir"].(string) + if got != tmpDir { + t.Errorf("expected output_dir=%q, got %q", tmpDir, got) + } +} + +// TestOutputDir_ConfigCanBeCleared verifies output_dir can be reset to empty. +func TestOutputDir_ConfigCanBeCleared(t *testing.T) { + tmpDir := t.TempDir() + cfg := getConfig(t) + cfg["output_dir"] = tmpDir + saveConfig(t, cfg) + + cfg = getConfig(t) + cfg["output_dir"] = "" + saveConfig(t, cfg) + + cfg = getConfig(t) + got, _ := cfg["output_dir"].(string) + if got != "" { + t.Errorf("expected output_dir=\"\", got %q", got) + } +} + +// TestOutputDir_IndependentOfSingleNzbPerFolder verifies that output_dir +// persists independently of the watcher single_nzb_per_folder setting. +func TestOutputDir_IndependentOfSingleNzbPerFolder(t *testing.T) { + tmpDir := t.TempDir() + + cfg := getConfig(t) + cfg["output_dir"] = tmpDir + saveConfig(t, cfg) + + patchWatcherConfig(t, map[string]any{"single_nzb_per_folder": false}) + + cfg = getConfig(t) + got, _ := cfg["output_dir"].(string) + if got != tmpDir { + t.Errorf("output_dir lost after patching watcher config: want %q, got %q", tmpDir, got) + } + + watchers, _ := cfg["watchers"].([]any) + if len(watchers) == 0 { + t.Fatal("no watchers in config") + } + w, _ := watchers[0].(map[string]any) + if w["single_nzb_per_folder"] != false { + t.Errorf("expected single_nzb_per_folder=false, got %v", w["single_nzb_per_folder"]) + } +} + +// assertQueueEmpty is a helper that fails if there are pending/running items. +func assertQueueEmpty(t *testing.T) { + t.Helper() + resp, err := http.Get(fmt.Sprintf("%s/api/queue/stats", baseURL)) + if err != nil { + t.Logf("GET /api/queue/stats: %v (skipping check)", err) + return + } + defer resp.Body.Close() + var stats struct { + Pending int `json:"pending"` + Running int `json:"running"` + } + if err := json.NewDecoder(resp.Body).Decode(&stats); err == nil { + if stats.Pending > 0 || stats.Running > 0 { + t.Logf("queue not empty: pending=%d running=%d", stats.Pending, stats.Running) + } + } +}