diff --git a/.github/config.yml b/.github/config.yml index 8afbfd7aaf6..b681372f727 100644 --- a/.github/config.yml +++ b/.github/config.yml @@ -1,2 +1,2 @@ -PR_TITLE_REGEX: /(feat|fix|breaking|build|chore|docs|style|refactor|test)\((app|cli|server|providers|deps|site|ci|infra|general)\): .*/ -COMMIT_MESSAGE_REGEX: /(feat|fix|breaking|build|chore|docs|style|refactor|test)\((app|cli|server|providers|deps|site|ci|infra|general)\): .*/ +PR_TITLE_REGEX: /(feat|fix|breaking|build|chore|docs|style|refactor|test)\((ci|cli|deps-dev|deps|general|infra|kopiaui|lint|notifications|providers|repository|server|site|snapshots|testing|ui)\): .*/ +COMMIT_MESSAGE_REGEX: /(feat|fix|breaking|build|chore|docs|style|refactor|test)\((ci|cli|deps-dev|deps|general|infra|kopiaui|lint|notifications|providers|repository|server|site|snapshots|testing|ui)\): .*/ diff --git a/.github/copilot-instructions.md b/.github/copilot-instructions.md new file mode 100644 index 00000000000..1e34eb6cc61 --- /dev/null +++ b/.github/copilot-instructions.md @@ -0,0 +1,411 @@ +# Kopia Copilot Instructions + +## When reviewing code, focus on: + +### Security Critical Issues +- Check for hardcoded secrets, API keys, or credentials +- Verify proper input validation and sanitization +- Review authentication and authorization logic + +### Code Quality Essentials +- Functions should be focused and appropriately sized +- Use clear, descriptive naming conventions +- Ensure proper error handling throughout + +### Performance Issues +- Spot inefficient loops and algorithmic issues +- Check for memory leaks and resource cleanup +- Review caching opportunities for expensive operations + +## Review Style +- Be specific and actionable in feedback +- Explain the rationale behind recommendations +- Acknowledge good patterns when you see them +- Ask clarifying questions when code intent is unclear + +## Review Test Coverage +- Ensure there are tests that cover and exercise the new or changed functionality + +Always prioritize security vulnerabilities and performance issues that could impact users. + +Always suggest changes to improve readability. + +## Repository Overview + +Kopia is a fast and secure open-source backup/restore tool written in **Go** that creates encrypted snapshots and saves them to remote storage. The repository is approximately 15MB with ~1,000 Go files. + +**Key Technologies:** +- **Backend:** Go (primary language) +- **Build System:** GNU Make with cross-platform support (Windows/Linux/macOS/ARM) +- **UI:** React-based HTML UI (embedded via go:embed, source at github.com/kopia/htmlui) +- **Desktop App:** Electron-based KopiaUI wrapper +- **Website:** Hugo static site generator + +## Build Commands + +### Setup (Required Before Building) +```bash +make -j4 ci-setup +``` +**Time:** ~30-60 seconds +**What it does:** Downloads Go modules, installs build tools (gotestsum, golangci-lint, hugo, node), and installs npm dependencies for the app. **ALWAYS run this after cloning or when build tools are missing.** + +## Linting + +**Run linter:** +```bash +make lint +``` +**Time:** ~3-4 minutes +**Linter:** golangci-lint with timeout of 1200s +**Config:** `.golangci.yml` (extensive configuration with 40+ enabled linters) + +**Auto-fix linting issues:** +```bash +make lint-fix +``` + +**Check code locks:** +```bash +make check-locks +``` +**Note:** Not available on linux/arm64 or linux/arm. + +**Check JavaScript/TypeScript formatting (in app directory):** +```bash +make check-prettier +``` + +**Important:** Linting is **NOT** run on linux/arm64 or linux/arm platforms to avoid issues. + +### Building Kopia CLI + +**Build without UI (faster for testing):** +```bash +make install-noui +``` +**Output:** `~/go/bin/kopia` +**Time:** ~5-10 seconds +**Use this for:** Testing CLI changes that don't involve the UI. + +**Race detector build:** +```bash +make install-race +``` +**Use this for:** Debugging race conditions. + +**Full build with embedded HTML UI:** +```bash +make install +``` +**Output:** `~/go/bin/kopia` +**Time:** ~10-20 seconds +**Note:** Embeds HTML UI from github.com/kopia/htmluibuild dependency. + +### Building KopiaUI Desktop App + +**Prerequisites:** Must build kopia CLI first (creates embedded binary). + +```bash +make kopia-ui +``` +**Output:** `dist/kopia-ui/` directory with platform-specific installers +**Time:** ~2-5 minutes +**Note:** Only works on amd64 architectures. On Linux, may require xvfb for headless testing. + +## Testing + +### Unit Tests (Standard) +```bash +make test +``` +**Time:** ~2-4 minutes +**Runs:** All unit tests with gotestsum, excludes TestIndexBlobManagerStress +**Timeout:** 1200s (20 minutes) per test +**Format:** pkgname-and-test-fails + +### Unit Tests with Coverage +```bash +make test-with-coverage +``` +**Output:** `coverage.txt` +**Time:** ~3-5 minutes +**Note:** Used by Code Coverage workflow. Sets KOPIA_COVERAGE_TEST=1 + +### Index Blob Tests (Separate) +```bash +make test-index-blob-v0 +``` +**Runs:** TestIndexBlobManagerStress (excluded from standard tests due to duration) + +### Integration Tests +```bash +make build-integration-test-binary # Build test binary first +make integration-tests +``` +**Time:** ~5-10 minutes +**Requires:** KOPIA_INTEGRATION_EXE environment variable + +### CI Test Suites +```bash +make ci-tests # Runs: vet + test +make ci-integration-tests # Runs: robustness-tool-tests + socket-activation-tests +``` + +### Provider Tests (Cloud Storage) +```bash +make provider-tests PROVIDER_TEST_TARGET=... +``` +**Time:** 15 minutes timeout +**Requires:** KOPIA_PROVIDER_TEST=true, credentials for storage backend, rclone binary +**Note:** Tests various cloud storage providers (S3, Azure, GCS, etc.) + +### Other Test Types +- `make compat-tests` - Compatibility tests with older Kopia versions +- `make endurance-tests` - Long-running endurance tests (1 hour timeout) +- `make robustness-tests` - Robustness testing with FIO +- `make stress-test` - Stress tests (1 hour timeout) +- `make htmlui-e2e-test` - HTML UI end-to-end tests (10 minutes timeout) + +**Race Detector Tests:** +```bash +make test UNIT_TEST_RACE_FLAGS=-race UNIT_TESTS_TIMEOUT=1200s +``` + +## Common Issues & Workarounds + +### Build Issues + +1. **Missing build tools error:** Always run `make -j4 ci-setup` first after cloning. + +2. **Go version mismatch:** Kopia requires the Go toolchain with the version specified in go.mod. The `go-version-file` is used in GitHub Actions. + +3. **Platform-specific builds:** + - macOS: Creates universal binaries (AMD64 + ARM64) with `lipo` + - Windows: Requires chocolatey packages: make, zip, unzip, curl + - Linux ARM: Uses goreleaser for multi-arch builds on AMD64 host + +4. **KopiaUI build failures on ARM:** KopiaUI (Electron app) only builds on amd64. The build is skipped on ARM architectures. + +5. **Linting on ARM:** Linting and check-locks are skipped on linux/arm64 and linux/arm due to tool compatibility. + +### Test Issues + +1. **Test timeouts:** Default unit test timeout is 1200s (20 minutes). For race detector tests, explicitly set `UNIT_TESTS_TIMEOUT=1200s`. + +2. **Parallel execution:** Tests use `-parallel` flag (8 on amd64, 2 on ARM). Adjust with `PARALLEL` variable if needed. + +3. **Integration test binary:** Must build integration test binary explicitly with `make build-integration-test-binary` before running integration tests. + +4. **Provider tests require environment:** Provider tests need KOPIA_PROVIDER_TEST=true and rclone binary available. + +### Environment Variables + +**Important variables for CI/tests:** +- `UNIX_SHELL_ON_WINDOWS=true` - Required for Windows builds +- `KOPIA_COVERAGE_TEST=1` - Enable coverage testing +- `KOPIA_INTEGRATION_EXE` - Path to integration test binary +- `TESTING_ACTION_EXE` - Path to testing action binary +- `KOPIA_PROVIDER_TEST=true` - Enable provider tests +- `RCLONE_EXE` - Path to rclone binary for provider tests + +## Project Structure + +### Root Directory Files +- `main.go` - Entry point, uses kingpin for CLI parsing +- `Makefile` - Primary build system (GNU Make) +- `go.mod` / `go.sum` - Go module dependencies +- `.golangci.yml` - Linter configuration (extensive rules) +- `.gitignore` - Excludes dist/, .tools/, node_modules/, coverage files +- `BUILD.md` - Detailed build architecture documentation +- `README.md` - General project information + +### Source Directories + +**`cli/`** - CLI command implementations (~200 command files) +- Each command is in a separate file (e.g., `command_snapshot_create.go`) +- Uses kingpin v2 for command-line parsing +- Main entry via `app.go` + +**`repo/`** - Repository management and storage backends +- `repo/blob/` - Storage provider implementations (s3, azure, gcs, filesystem, etc.) +- `repo/content/` - Content-addressable storage layer +- `repo/format/` - Repository format and versioning +- `repo/manifest/` - Manifest management +- `repo/object/` - Object storage layer + +**`fs/`** - Filesystem abstraction layer +- `fs/localfs/` - Local filesystem implementation +- Supports snapshots, restore, and filesystem walking + +**`snapshot/`** - Snapshot creation and management +- `snapshot/snapshotmaintenance/` - Snapshot GC and maintenance +- `snapshot/upload/` - Upload logic and parallelization + +**`internal/`** - Internal packages (74 subdirectories) +- Utilities and shared code not for external use +- Examples: cache, crypto, compression, auth, server, etc. + +**`tests/`** - Integration and end-to-end tests +- `tests/end_to_end_test/` - E2E test scenarios +- `tests/robustness/` - Robustness testing framework +- `tests/tools/` - Test utilities and helpers + +**`tools/`** - Build and release tools +- `tools/gettool/` - Tool downloader (downloads versioned binaries) +- Various publishing scripts (apt, rpm, docker, homebrew) +- `tools/.tools/` - Downloaded build tools (gitignored) + +**`app/`** - Electron-based desktop application (KopiaUI) +- Node.js project with package.json +- Uses Electron Builder for packaging +- Resources in `app/resources/` and `app/public/` +- Embeds kopia server binary from `dist/kopia_*/kopia` + +**`site/`** - Hugo-based website (kopia.io) +- Build with `make -C site build` or `make website` +- Auto-generates CLI docs to `site/content/docs/Reference/Command-Line/` +- Development server: `make -C site server` (http://localhost:1313) + +### Configuration Files + +- `.golangci.yml` - Linter config with 40+ enabled linters, custom rules +- `.codecov.yml` - Code coverage reporting config +- `.goreleaser.yml` - Release automation config +- `.github/workflows/*.yml` - GitHub Actions workflows (19 workflow files) + +## GitHub Actions Workflows + +### Pull Request Checks (Always Run) + +1. **make.yml (Build)** - Builds on Windows/Linux/macOS/ARM + - Runs: `make ci-setup` → `make ci-build` + - Timeout: 40 minutes + - Creates artifacts: binaries, installers, packages + +2. **tests.yml** - Unit and integration tests on all platforms + - Runs: `make ci-setup` → `make test-index-blob-v0` → `make ci-tests` → `make ci-integration-tests` + - Uploads logs to artifacts + +3. **lint.yml** - Linting on ubuntu-latest and macos-latest + - Runs: `make lint` → `make check-locks` → `make check-prettier` + - Includes govulncheck for vulnerability scanning + +4. **code-coverage.yml** - Code coverage on ubuntu-latest + - Runs: `make test-with-coverage` + - Uploads to Codecov + +5. **race-detector.yml** - Race condition detection + - Runs: `make test UNIT_TEST_RACE_FLAGS=-race UNIT_TESTS_TIMEOUT=1200s` + +### Additional Workflows +- `providers-core.yml` / `providers-extra.yml` - Cloud storage provider tests +- `compat-test.yml` - Compatibility with older Kopia versions +- `stress-test.yml` - Stress testing +- `endurance-test.yml` - Long-running endurance tests +- `license-check.yml` - License compliance checking +- `dependency-review.yml` - Dependency security review +- `check-pr-title.yml` - PR title format validation + +### Workflow Tips +- **Build artifacts** are uploaded and can be downloaded from workflow runs +- **Logs** are uploaded to `.logs/**/*.log` on test failures +- **Concurrency:** All workflows use `cancel-in-progress: true` for the same ref +- **Scheduling:** Some workflows run weekly (Mondays at 8AM) + +## Development Workflow + +### Making Code Changes + +1. **Setup environment:** + ```bash + make -j4 ci-setup + ``` + +2. **Make your changes** to Go files + +3. **Build and test iteratively:** + ```bash + make install-noui # Fast build without UI + ~/go/bin/kopia --help # Test your changes + ``` + +4. **Lint your changes:** + ```bash + make lint + ``` + +5. **Run relevant tests:** + ```bash + make test # Unit tests + ``` + +6. **For HTML UI changes:** + - UI source is in separate repo: github.com/kopia/htmlui + - Pre-built UI imported from: github.com/kopia/htmluibuild + - To test local UI changes: `make install-with-local-htmlui-changes` (requires 3 repos checked out side-by-side) + +### Pre-Commit Checklist +- [ ] `make lint` passes (3-4 minutes) +- [ ] `make test` passes (2-4 minutes) +- [ ] Changes are formatted (gofumpt, gci enabled in linter) +- [ ] New packages: License check with `make license-check` + +### Code Style +- Uses golangci-lint with formatters: gci, gofumpt +- Imports organized: standard, default, localmodule +- No `time.Now()` outside clock/timetrack packages - use `clock.Now()` +- No `time.Since()` - use `timetrack.Timer.Elapsed()` +- No `filepath.IsAbs()` - use `ospath.IsAbs()` for Windows UNC support +- Tests use the `stretchr/testify` packages + +## Key Dependencies + +**Go modules (from go.mod):** +- Cloud providers: Azure SDK, AWS SDK (via minio), Google Cloud Storage +- CLI: alecthomas/kingpin/v2 +- Compression: klauspost/compress, klauspost/pgzip +- Testing: stretchr/testify, chromedp (for E2E) +- Observability: Prometheus client, OpenTelemetry +- UI: github.com/kopia/htmluibuild (pre-built React app) + +**Node.js dependencies (app/package.json):** +- electron-builder - Desktop app packaging +- electron-updater - Auto-updates +- React (via htmluibuild) - UI framework + +## Important Notes + +1. **Do not modify go.mod/go.sum manually** - Use `go get` to update dependencies. CI runs `git checkout go.mod go.sum` after ci-setup to revert local changes from tool downloads. + +2. **Build artifacts in dist/** - Gitignored. Contains platform-specific binaries and installers after `make ci-build` or `make goreleaser`. + +3. **Tools directory** - `tools/.tools/` is gitignored and populated by `make ci-setup`. Contains downloaded versions of gotestsum, linter, node, hugo, etc. + +4. **HTML UI is separate** - The HTML UI is maintained in github.com/kopia/htmlui and imported as a pre-built module. Don't try to find UI source in this repo. + +5. **Platform differences:** + - macOS: Creates universal binaries, requires Xcode command line tools + - Windows: Requires chocolatey tools, uses PowerShell for some commands + - Linux ARM: Builds via goreleaser on AMD64 host, creates ARM packages + +6. **Parallelism:** Makefiles use `-j4` for parallel execution. Tests use `-parallel 8` on amd64, `-parallel 2` on ARM. + +7. **Test binary paths:** + - Integration: `dist/testing_$(GOOS)_$(GOARCH)/kopia.exe` + - UI embedded: `dist/kopia_$(GOOS)_$(GOARCH)/kopia` (or universal binary on macOS) + +8. **Timeout configuration:** + - Linter: 1200s (20 minutes) + - Unit tests: 1200s (20 minutes) + - Integration tests: 300s (5 minutes) + - Provider tests: 15 minutes + - Stress/endurance: 3600s (1 hour) + +9. **Required tools installed by ci-setup:** + - gotestsum - Test runner with better output + - golangci-lint - Linter + - node - JavaScript runtime for app builds + - hugo - Static site generator for website + +10. **Trust these instructions** - These instructions have been validated by running all commands. Only search for additional information if something fails or if these instructions are incomplete or incorrect. diff --git a/.github/instructions/go.copilot-instructions.md b/.github/instructions/go.copilot-instructions.md new file mode 100644 index 00000000000..c945717c916 --- /dev/null +++ b/.github/instructions/go.copilot-instructions.md @@ -0,0 +1,352 @@ +--- +applyTo: '**/*.go,**/go.mod,**/go.sum' +description: 'Instructions for writing Go code following idiomatic Go practices and community standards' + +--- + +# Go Development Instructions + + +Follow idiomatic Go practices and community standards when writing Go code. These instructions are based on [Effective Go](https://go.dev/doc/effective_go), [Go Code Review Comments](https://go.dev/wiki/CodeReviewComments), and [Google's Go Style Guide](https://google.github.io/styleguide/go/guide). + +Refer to the linter configuration in `.golangci.yml` for style checks and standards + +## General Instructions + +- Write simple, clear, and idiomatic Go code +- Favor clarity and simplicity over cleverness +- Follow the principle of least surprise +- Keep the non-error path left-aligned (minimize indentation) +- Return early to reduce nesting +- Prefer early return over if-else chains; use `if condition { return }` pattern to avoid else blocks +- Make the zero value useful +- Write self-documenting code with clear, descriptive names +- Document exported types, functions, methods, and packages +- Use Go modules for dependency management +- Leverage and prefer the Go standard library when functionality exists instead of writing custom implementations (e.g., use `strings.Builder` for string concatenation, `filepath.Join` for path construction) +- Write comments in English +- Use allowed ASCII for identifiers; avoid using non-ASCII characters in identifiers +- Avoid using emoji in code and comments + +## Naming Conventions + +### Packages + +- Use lowercase, single-word package names +- Avoid underscores, hyphens, or mixedCaps +- Choose names that describe what the package provides, not what it contains +- Avoid generic names like `util`, `common`, or `base` + +#### Package Declaration Rules (CRITICAL): +- **NEVER duplicate `package` declarations** - each Go file must have exactly ONE `package` line +- When editing an existing `.go` file: + - **PRESERVE** the existing `package` declaration - do not add another one + - If you need to replace the entire file content, start with the existing package name +- When creating a new `.go` file: + - **BEFORE writing any code**, check what package name other `.go` files in the same directory use + - Use the SAME package name as existing files in that directory + - If it's a new directory, use the directory name as the package name + - Write **exactly one** `package ` line at the very top of the file +- When using file creation or replacement tools: + - **ALWAYS verify** the target file doesn't already have a `package` declaration before adding one + - If replacing file content, include only ONE `package` declaration in the new content + - **NEVER** create files with multiple `package` lines or duplicate declarations + +### Variables and Functions + +- Use mixedCaps or MixedCaps (camelCase) rather than underscores +- Keep names short but descriptive +- Use single-letter variables only for very short scopes (like loop indices) +- Exported names start with a capital letter +- Unexported names start with a lowercase letter +- Avoid using the same name for the package and a type, (e.g., avoid `http.HTTPServer`, prefer `http.Server`) + +### Interfaces + +- Name interfaces with -er suffix when possible (e.g., `Reader`, `Writer`, `Formatter`) +- Single-method interfaces should be named after the method (e.g., `Read` → `Reader`) +- Keep interfaces small and focused + +### Constants + +- Use MixedCaps for exported constants +- Use mixedCaps for unexported constants +- Group related constants using `const` blocks +- Consider using typed constants for better type safety + +## Code Style and Formatting + +### Formatting + +- Use `gofumt` to format code +- Use `goimports` to manage ordering of `import` statements +- Keep line length reasonable (no hard limit, but consider readability) +- Add blank lines to separate logical groups of code, adhering to the linter constraints. + +### Comments + +- Strive for self-documenting code; prefer clear variable names, function names, and code structure over comments +- Write comments only when necessary to explain complex implementation or non-obvious behavior +- Write comments in complete sentences in English +- Start sentences with the name of the item being described +- Package comments should start with "Package [name]" +- Use line comments (`//`) for most comments +- Document the meaning of structs, interfaces and fields and their use. +- Document the invariants expected when calling a function, the change of state + if any, and the expected state invariant when a function returns. +- Document the rationale (why) and not how it is done, unless the implementation is complex + +## Architecture and Project Structure + +### Package Organization + +- Follow standard Go project layout conventions +- Use `internal/` for packages that shouldn't be imported by external projects +- Group related functionality into packages +- Avoid circular dependencies +- Put reusable packages in `internal/` if possible + +### Dependency Management + +- Use Go modules (`go.mod` and `go.sum`) +- Keep dependencies minimal +- Regularly update dependencies for security patches +- Use `go mod tidy` to clean up unused dependencies + +## Type Safety and Language Features + +### Type Definitions + +- Define types to add meaning and type safety +- Use struct tags for JSON, XML, database mappings +- Use lower camelCase for field names in JSON tags +- Prefer explicit type conversions +- Use type assertions carefully and check whether the assertion succeeds using the second return value +- Prefer generics over unconstrained types; when an unconstrained type is truly needed, use the predeclared alias `any` instead of `interface{}` + +### Pointers vs Values Parameter + +- Use pointer receivers for large structs or when you need to modify the receiver +- Use value receivers for small structs and when immutability is desired +- Use pointer parameters when you need to modify the argument or for large structs +- Use value parameters for small structs and when you want to prevent modification +- Be consistent with the receiver type, either use pointer receivers or value receivers for a given receiver type +- Consider the zero value when choosing pointer vs value receivers + +### Interfaces and Composition + +- Accept interfaces, return concrete types +- Keep interfaces small (1-3 methods is ideal) +- Use embedding for composition +- Define interfaces close to where they're used, not where they're implemented +- Don't export interfaces unless necessary + +## Concurrency + +### Goroutines + +- Avoid creating goroutines in libraries; prefer letting the caller control concurrency +- If you must create goroutines in libraries, provide clear documentation and cleanup mechanisms +- Always know how a goroutine will exit +- Use `sync.WaitGroup` or channels to wait for goroutines +- Avoid goroutine leaks by ensuring cleanup + +### Channels + +- Use channels to communicate between goroutines +- Don't communicate by sharing memory; share memory by communicating +- Close channels from the sender side, not the receiver +- Use buffered channels when you know the capacity +- Use `select` for non-blocking operations + +### Synchronization + +- Use `sync.Mutex` for protecting shared state +- Keep critical sections small +- Use `sync.RWMutex` when you have many readers +- Choose between channels and mutexes based on the use case: use channels for communication, mutexes for protecting state +- Use `sync.Once` for one-time initialization + +## Error Handling Patterns + +### Creating Errors + +- Use `errors.New` for simple static errors (constant-like error values) +- Create custom error types for domain-specific errors +- Export error variables for sentinel errors +- Use `errors.Is` and `errors.As` for error checking + +### Error Propagation + +- Add context when propagating errors up the stack +- Use descriptive error messages with relevant context fields +- Don't log and return errors (choose one) +- Handle errors at the appropriate level +- Use structured errors with fields for better debugging and monitoring + +### Error Handling + +- Check errors immediately after the function call +- Don't ignore errors using `_` unless you have a valid reason (explain and document why) +- Preserve error chains to maintain full context, wrap errors with context using `errors.Wrap()` + +- Create custom error types when checking for specific errors is needed +- Place error returns as the last return value +- Name error variables `err` +- Keep error messages lowercase and don't end with punctuation + +### Error Lists and Multiple Errors + +- Use `errors.Join()` for collecting multiple errors (nil-safe) +- Handle validation scenarios with error accumulation + +## API Design + +### JSON APIs + +- Use struct tags to control JSON marshaling +- Validate input data +- Use pointers for optional fields +- Consider using `json.RawMessage` for delayed parsing +- Handle JSON errors appropriately + +## Performance Optimization + +### Memory Management + +- Minimize allocations in hot paths +- Reuse objects when a large number of those are allocated (consider `sync.Pool`) +- Use value receivers for small structs +- Preallocate slices when size is known +- Avoid unnecessary string-byte conversions + +### I/O: Readers and Buffers + +- Most `io.Reader` streams are consumable once; reading advances state. Do not assume a reader can be re-read without special handling +- If you must read data multiple times, buffer it once and recreate readers on demand: + - Use `io.ReadAll` (or a limited read) to obtain `[]byte`, then create fresh readers via `bytes.NewReader(buf)` or `bytes.NewBuffer(buf)` for each reuse + - For strings, use `strings.NewReader(s)`; you can `Seek(0, io.SeekStart)` on `*bytes.Reader` to rewind +- For HTTP requests, do not reuse a consumed `req.Body`. Instead: + - Keep the original payload as `[]byte` and set `req.Body = io.NopCloser(bytes.NewReader(buf))` before each send + - Prefer configuring `req.GetBody` so the transport can recreate the body for redirects/retries: `req.GetBody = func() (io.ReadCloser, error) { return io.NopCloser(bytes.NewReader(buf)), nil }` +- To duplicate a stream while reading, use `io.TeeReader` (copy to a buffer while passing through) or write to multiple sinks with `io.MultiWriter` +- Reusing buffered readers: call `(*bufio.Reader).Reset(r)` to attach to a new underlying reader; do not expect it to “rewind” unless the source supports seeking +- For large payloads, avoid unbounded buffering; consider streaming, `io.LimitReader`, or on-disk temporary storage to control memory + +- Use `io.Pipe` to stream without buffering the whole payload: + - Write to `*io.PipeWriter` in a separate goroutine while the reader consumes + - Always close the writer; use `CloseWithError(err)` on failures + - `io.Pipe` is for streaming, not rewinding or making readers reusable + +- **Warning:** When using `io.Pipe` (especially with multipart writers), all writes must be performed in strict, sequential order. Do not write concurrently or out of order—multipart boundaries and chunk order must be preserved. Out-of-order or parallel writes can corrupt the stream and result in errors. + +- Streaming multipart/form-data with `io.Pipe`: + - `pr, pw := io.Pipe()`; `mw := multipart.NewWriter(pw)`; use `pr` as the HTTP request body + - Set `Content-Type` to `mw.FormDataContentType()` + - In a goroutine: write all parts to `mw` in the correct order; on error `pw.CloseWithError(err)`; on success `mw.Close()` then `pw.Close()` + - Do not store request/in-flight form state on a long-lived client; build per call + - Streamed bodies are not rewindable; for retries/redirects, buffer small payloads or provide `GetBody` + + +### Profiling + +- Use built-in profiling tools (`pprof`) +- Benchmark critical code paths +- Profile before optimizing +- Focus on algorithmic improvements first +- Consider using `testing.B` for benchmarks + +## Testing + +### Test Organization + +- Keep tests in the same package (white-box testing) +- Use `_test` package suffix for black-box testing +- Name test files with `_test.go` suffix +- Place test files next to the code they test + +### Writing Tests + +- Use table-driven tests for multiple test cases +- Name tests descriptively using `Test_functionName_scenario` +- Use subtests with `t.Run` for better organization +- Test both success and error cases +- Have separate top-level tests for the success and error cases +- Use `stretchr/testify/require` package for checking expected results + +### Test Helpers + +- Mark helper functions with `t.Helper()` +- Create test fixtures for complex setup +- Use `testing.TB` interface for functions used in tests and benchmarks +- Clean up resources using `t.Cleanup()` + +## Security Best Practices + +### Input Validation + +- Validate all external input +- Use strong typing to prevent invalid states +- Sanitize data before using in SQL queries +- Be careful with file paths from user input +- Validate and escape data for different contexts (HTML, SQL, shell) + +### Cryptography + +- Use standard library crypto packages +- Use crypto/rand for random number generation +- Use TLS for network communication +- Never store plain-text passwords +- Store password hashes using functions designed for password hashing, such as PBKDF2 and scrypt + +## Documentation + +### Code Documentation + +- Prioritize self-documenting code through clear naming and structure +- Document all exported symbols with clear, concise explanations +- Start documentation with the symbol name +- Write documentation in English +- Use examples in documentation when helpful +- Keep documentation close to code +- Update documentation when code changes +- Avoid emoji in documentation and comments + +### README and Documentation Files + +- Include clear setup instructions +- Document dependencies and requirements +- Provide usage examples +- Document configuration options +- Include troubleshooting section + +## Tools and Development Workflow + +### Essential Tools + +- `golangci-lint`: Primary linter +- `go vet`: Find suspicious constructs +- `go test`: Run tests +- `go mod`: Manage dependencies +- `go generate`: Code generation + +### Development Practices + +- Run tests before committing +- Keep commits focused and atomic +- Write meaningful commit messages +- Review diffs before committing + +## Common Pitfalls to Avoid + +- Not checking errors +- Ignoring race conditions +- Creating goroutine leaks +- Not using defer for cleanup +- Modifying maps concurrently +- Not understanding nil interfaces vs nil pointers +- Forgetting to close or release resources (files, connections) +- Using global variables unnecessarily +- Over-using unconstrained types (e.g., `any`); prefer specific types or generic type parameters with constraints. If an unconstrained type is required, use `any` rather than `interface{}` +- Not considering the zero value of types +- **Creating duplicate `package` declarations** - this is a compile error; always check existing files before adding package declarations diff --git a/.github/workflows/auto-merge.yml b/.github/workflows/auto-merge.yml index 1d5f47f4838..25e747244cd 100644 --- a/.github/workflows/auto-merge.yml +++ b/.github/workflows/auto-merge.yml @@ -7,7 +7,7 @@ jobs: auto-merge: runs-on: ubuntu-latest steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - uses: ahmadnassri/action-dependabot-auto-merge@45fc124d949b19b6b8bf6645b6c9d55f4f9ac61a #v2.6.6 with: # auto-merge rules are in /.github/auto-merge.yml diff --git a/.github/workflows/check-pr-title.yml b/.github/workflows/check-pr-title.yml index 6009d345892..d0b25844ff7 100644 --- a/.github/workflows/check-pr-title.yml +++ b/.github/workflows/check-pr-title.yml @@ -9,4 +9,4 @@ jobs: steps: - uses: deepakputhraya/action-pr-title@077bddd7bdabd0d2b1b25ed0754c7e62e184d7ee with: - regex: '^(feat|fix|breaking|build|chore|docs|style|refactor|test)\((kopiaui|cli|ui|repository|snapshots|server|providers|deps|deps-dev|site|ci|infra|notifications|general)\)!{0,1}: .*$' + regex: '^(feat|fix|breaking|build|chore|docs|style|refactor|test)\((ci|cli|deps-dev|deps|general|infra|kopiaui|lint|notifications|providers|repository|server|site|snapshots|testing|ui)\)!{0,1}: .*$' diff --git a/.github/workflows/code-coverage.yml b/.github/workflows/code-coverage.yml index 15398b0c69d..0b0e2a383c2 100644 --- a/.github/workflows/code-coverage.yml +++ b/.github/workflows/code-coverage.yml @@ -12,23 +12,21 @@ jobs: runs-on: ubuntu-latest steps: - name: Check out repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: fetch-depth: 0 - name: Set up Go - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 + uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0 with: go-version-file: 'go.mod' - check-latest: true - id: go - name: Run Tests run: make test-with-coverage - name: Upload Coverage - uses: codecov/codecov-action@18283e04ce6e62d37312384ff67231eb8fd56d24 # v5.4.3 + uses: codecov/codecov-action@5a1091511ad55cbe89839c7260b706298ca349f7 # v5.5.1 with: files: coverage.txt - name: Upload Logs - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: name: logs path: .logs/**/*.log diff --git a/.github/workflows/compat-test.yml b/.github/workflows/compat-test.yml index 3bcf5417f61..c3081f9937b 100644 --- a/.github/workflows/compat-test.yml +++ b/.github/workflows/compat-test.yml @@ -14,19 +14,17 @@ jobs: runs-on: ubuntu-latest steps: - name: Check out repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: fetch-depth: 0 - name: Set up Go - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 + uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0 with: go-version-file: 'go.mod' - check-latest: true - id: go - name: Compat Test run: make compat-tests - name: Upload Logs - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: name: logs path: .logs/**/*.log diff --git a/.github/workflows/dependency-review.yml b/.github/workflows/dependency-review.yml index 666c4ef27b2..8ac26bdd90b 100644 --- a/.github/workflows/dependency-review.yml +++ b/.github/workflows/dependency-review.yml @@ -15,6 +15,6 @@ jobs: runs-on: ubuntu-latest steps: - name: 'Checkout Repository' - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - name: 'Dependency Review' - uses: actions/dependency-review-action@da24556b548a50705dd671f47852072ea4c105d9 #v4.7.1 + uses: actions/dependency-review-action@3c4e3dcb1aa7874d2c16be7d79418e9b7efd6261 #v4.8.2 diff --git a/.github/workflows/endurance-test.yml b/.github/workflows/endurance-test.yml index 2ece8f13a35..2f62239c940 100644 --- a/.github/workflows/endurance-test.yml +++ b/.github/workflows/endurance-test.yml @@ -1,12 +1,19 @@ name: Endurance Test on: push: - branches: [ master ] + branches: [ master, test/endurance ] tags: - v* schedule: - # run every 2 hours - - cron: '12 */2 * * *' + # run every 6 hours + - cron: '12 */6 * * *' + workflow_dispatch: + inputs: + ref: + description: 'branch or git ref to use for the build' + required: true + default: 'test/endurance' + concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true @@ -19,11 +26,11 @@ jobs: runs-on: ubuntu-latest steps: - name: Check out repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: fetch-depth: 0 - name: Set up Go - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 + uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0 with: go-version-file: 'go.mod' check-latest: true @@ -31,7 +38,7 @@ jobs: - name: Endurance Tests run: make endurance-tests - name: Upload Logs - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: name: logs path: .logs/**/*.log diff --git a/.github/workflows/htmlui-tests.yml b/.github/workflows/htmlui-tests.yml index 1532b44192d..88960a6d3c1 100644 --- a/.github/workflows/htmlui-tests.yml +++ b/.github/workflows/htmlui-tests.yml @@ -27,11 +27,11 @@ jobs: runs-on: macos-latest steps: - name: Check out repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: fetch-depth: 0 - name: Set up Go - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 + uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0 with: go-version-file: 'go.mod' check-latest: true @@ -39,7 +39,7 @@ jobs: - name: Run Tests run: make htmlui-e2e-test - name: Upload Screenshots - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: path: .screenshots/**/*.png if-no-files-found: ignore diff --git a/.github/workflows/license-check.yml b/.github/workflows/license-check.yml index 470e65579d7..282a4e5228b 100644 --- a/.github/workflows/license-check.yml +++ b/.github/workflows/license-check.yml @@ -12,15 +12,13 @@ jobs: runs-on: ubuntu-latest steps: - name: Check out repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: fetch-depth: 0 - name: Set up Go - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 + uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0 with: go-version-file: 'go.mod' - check-latest: true - id: go - name: Download dependencies run: go mod vendor - name: Run License Check diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index fd9438ada6c..2bae7f495cb 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -26,21 +26,22 @@ jobs: runs-on: ${{ matrix.os }} steps: - name: Check out repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: fetch-depth: 0 - name: Set up Go - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 + uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0 with: go-version-file: 'go.mod' - check-latest: true - id: go - id: govulncheck uses: golang/govulncheck-action@b625fbe08f3bccbe446d94fbf87fcc875a4f50ee # v1.0.4 with: cache: false - go-version-input: - go-version-file: 'go.mod' + go-version-input: '1.25.4' + # An explicit Go version is needed for govulncheck-action since internally + # it uses an outdated setup-go@v5.0 action that does not respect the 'toolchain' + # directive in the 'go.mod' file. + #go-version-file: 'go.mod' repo-checkout: false - name: Lint run: make lint diff --git a/.github/workflows/make.yml b/.github/workflows/make.yml index 7d0b4d98f02..4e1bc0f8ac5 100644 --- a/.github/workflows/make.yml +++ b/.github/workflows/make.yml @@ -36,14 +36,13 @@ jobs: runs-on: ${{ matrix.os }} steps: - name: Check out repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: fetch-depth: 0 - name: Set up Go - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 + uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0 with: go-version-file: 'go.mod' - check-latest: true id: go - name: Install Windows-specific packages run: "choco install --no-progress -y make zip unzip curl" @@ -93,7 +92,7 @@ jobs: # macOS signing certificate (base64-encoded), used by Electron Builder MACOS_SIGNING_IDENTITY: ${{ secrets.MACOS_SIGNING_IDENTITY }} - name: Upload Kopia Artifacts - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: name: kopia-${{ matrix.os }} path: | @@ -114,7 +113,7 @@ jobs: dist/kopia-ui/*.yml if-no-files-found: ignore - name: Upload Kopia Binary - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: name: kopia_binaries-${{ matrix.os }} path: | @@ -129,21 +128,21 @@ jobs: needs: build if: github.event_name != 'pull_request' && github.repository == 'kopia/kopia' steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - name: Set up QEMU - uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3.6.0 + uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # v3.7.0 - name: Set up Docker Buildx uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1 - name: Install Linux-specific packages run: "sudo apt-get install -y createrepo-c" - name: Download Artifacts - uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0 + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 with: pattern: kopia-* merge-multiple: true path: dist - name: Download Kopia Binaries - uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0 + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 with: pattern: kopia_binaries-* merge-multiple: true diff --git a/.github/workflows/ossf-scorecard.yml b/.github/workflows/ossf-scorecard.yml index 520a2956626..afc4258f3f9 100644 --- a/.github/workflows/ossf-scorecard.yml +++ b/.github/workflows/ossf-scorecard.yml @@ -26,12 +26,12 @@ jobs: steps: - name: "Checkout repo" - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: persist-credentials: false - name: "Run analysis" - uses: ossf/scorecard-action@05b42c624433fc40578a4040d5cf5e36ddca8cde # v2.4.2 + uses: ossf/scorecard-action@4eaacf0543bb3f2c246792bd56e8cdeffafb205a # v2.4.3 with: results_file: results.sarif results_format: sarif @@ -39,12 +39,13 @@ jobs: - # Upload the results to GitHub's code scanning dashboard. name: "Upload to results to dashboard" - uses: github/codeql-action/upload-sarif@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2 + uses: github/codeql-action/upload-sarif@fe4161a26a8629af62121b670040955b330f9af2 # v3.29.5 with: sarif_file: results.sarif + category: ossf - name: "Upload analysis results as 'Job Artifact'" - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: name: SARIF file path: results.sarif diff --git a/.github/workflows/providers-core.yml b/.github/workflows/providers-core.yml index 704c8304399..557c5cdaccd 100644 --- a/.github/workflows/providers-core.yml +++ b/.github/workflows/providers-core.yml @@ -24,16 +24,14 @@ jobs: runs-on: ubuntu-latest steps: - name: Check out repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: fetch-depth: 0 ref: ${{ github.event.inputs.ref_name || github.ref }} - name: Set up Go - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 + uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0 with: go-version-file: 'go.mod' - check-latest: true - id: go - name: Install Dependencies run: make provider-tests-deps - name: Azure @@ -46,7 +44,6 @@ jobs: KOPIA_AZURE_TEST_IMMUTABLE_CONTAINER: ${{ secrets.KOPIA_AZURE_TEST_IMMUTABLE_CONTAINER }} KOPIA_AZURE_TEST_IMMUTABLE_STORAGE_ACCOUNT: ${{ secrets.KOPIA_AZURE_TEST_IMMUTABLE_STORAGE_ACCOUNT }} KOPIA_AZURE_TEST_IMMUTABLE_STORAGE_KEY: ${{ secrets.KOPIA_AZURE_TEST_IMMUTABLE_STORAGE_KEY }} - KOPIA_AZURE_TEST_IMMUTABLE_SAS_TOKEN: ${{ secrets.KOPIA_AZURE_TEST_IMMUTABLE_SAS_TOKEN }} - name: GCS run: make provider-tests PROVIDER_TEST_TARGET=gcs env: diff --git a/.github/workflows/providers-extra.yml b/.github/workflows/providers-extra.yml index e00f2d4d1ce..31268041c09 100644 --- a/.github/workflows/providers-extra.yml +++ b/.github/workflows/providers-extra.yml @@ -24,16 +24,14 @@ jobs: runs-on: ubuntu-latest steps: - name: Check out repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: fetch-depth: 0 ref: ${{ github.event.inputs.ref_name || github.ref }} - name: Set up Go - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 + uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0 with: go-version-file: 'go.mod' - check-latest: true - id: go - name: Install Dependencies run: make provider-tests-deps - name: B2 diff --git a/.github/workflows/race-detector.yml b/.github/workflows/race-detector.yml index 15655a404bf..ab297fe6e09 100644 --- a/.github/workflows/race-detector.yml +++ b/.github/workflows/race-detector.yml @@ -12,14 +12,12 @@ jobs: runs-on: ubuntu-latest steps: - name: Check out repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: fetch-depth: 0 - name: Set up Go - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 + uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0 with: go-version-file: 'go.mod' - check-latest: true - id: go - name: Unit Tests run: make -j2 test UNIT_TEST_RACE_FLAGS=-race UNIT_TESTS_TIMEOUT=1200s diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index b0a88ee4483..e5b3210e3b6 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -14,7 +14,7 @@ jobs: stale: runs-on: ubuntu-latest steps: - - uses: actions/stale@5bef64f19d7facfb25b37b414482c7164d639639 # v9.1.0 + - uses: actions/stale@5f858e3efba33a5ca4407a664cc011ad407f2008 # v10.1.0 with: # process older PRs first ascending: true diff --git a/.github/workflows/stress-test.yml b/.github/workflows/stress-test.yml index 6e90b02f502..3305401fcc2 100644 --- a/.github/workflows/stress-test.yml +++ b/.github/workflows/stress-test.yml @@ -18,19 +18,17 @@ jobs: runs-on: ubuntu-latest steps: - name: Check out repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: fetch-depth: 0 - name: Set up Go - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 + uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0 with: go-version-file: 'go.mod' - check-latest: true - id: go - name: Stress Test run: make stress-test - name: Upload Logs - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: name: logs path: .logs/**/*.log diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 685a1bf7e5d..e7700a702b9 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -34,15 +34,13 @@ jobs: runs-on: ${{ matrix.os }} steps: - name: Check out repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: fetch-depth: 0 - name: Set up Go - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 + uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0 with: go-version-file: 'go.mod' - check-latest: true - id: go - name: Install Windows-specific packages run: "choco install --no-progress -y make zip unzip curl" if: ${{ contains(matrix.os, 'windows') }} @@ -58,7 +56,7 @@ jobs: - name: Integration Tests run: make -j2 ci-integration-tests - name: Upload Logs - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: name: logs-${{ matrix.os }} path: .logs/**/*.log diff --git a/.github/workflows/volume-shadow-copy-test.yml b/.github/workflows/volume-shadow-copy-test.yml index 03ac88c2792..f4de9626c00 100644 --- a/.github/workflows/volume-shadow-copy-test.yml +++ b/.github/workflows/volume-shadow-copy-test.yml @@ -15,15 +15,13 @@ jobs: runs-on: windows-latest steps: - name: Check out repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: fetch-depth: 0 - name: Set up Go - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 + uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0 with: go-version-file: 'go.mod' - check-latest: true - id: go - name: Install gsudo shell: bash run: | @@ -34,7 +32,7 @@ jobs: - name: Non-Admin Test run: gsudo -i Medium make os-snapshot-tests - name: Upload Logs - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 with: name: logs path: .logs/**/*.log diff --git a/.golangci.yml b/.golangci.yml index 6951c6be831..0f45788b484 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -80,9 +80,15 @@ linters: - (*go.uber.org/zap.SugaredLogger).With misspell: locale: US + modernize: + disable: + - omitzero + wsl_v5: + allow-whole-block: true default: all disable: + - embeddedstructfieldcheck - exhaustruct - funcorder - gochecknoglobals @@ -91,8 +97,10 @@ linters: - importas - ireturn # this one may be interesting to control allocations - musttag + - nakedret # already enforced by gofumpt in a stricter manner - nilnil - - nlreturn + - nlreturn # already enforced by wsl_v5 + - noinlineerr # inline error handling is a common Go idiom used in this codebase - nonamedreturns - paralleltest - prealloc @@ -106,14 +114,12 @@ linters: - usetesting - varnamelen # this one may be interesting, but too much churn - whitespace + - wsl - zerologlint # zerolog not currently used in the codebase exclusions: generated: lax rules: - - path: reporter.go - linters: - - musttag - path: _test\.go|testing|tests|test_env|fshasher|fault linters: - contextcheck @@ -131,12 +137,6 @@ linters: - perfsprint - revive - wrapcheck - - text: "log is a global variable" - linters: - - gochecknoglobals - - text: "tracer is a global variable" - linters: - - gochecknoglobals - text: "Magic number: 1e" linters: - mnd @@ -191,6 +191,8 @@ formatters: - standard - default - localmodule + gofumpt: + extra-rules: true output: show-stats: false diff --git a/.goreleaser.yml b/.goreleaser.yml index f89666ae653..c7667dcbae2 100644 --- a/.goreleaser.yml +++ b/.goreleaser.yml @@ -11,6 +11,8 @@ builds: - amd64 - arm - arm64 + flags: + - -trimpath ldflags: - -s -w -X "github.com/kopia/kopia/repo.BuildVersion={{.Version}}" -X "github.com/kopia/kopia/repo.BuildInfo={{.Commit}}" -X "github.com/kopia/kopia/repo.BuildGitHubRepo={{.Env.GITHUB_REPOSITORY}}" release: diff --git a/Makefile b/Makefile index 6bbe0eaa2e3..54b1a0fbb03 100644 --- a/Makefile +++ b/Makefile @@ -16,6 +16,9 @@ all: include tools/tools.mk +KOPIA_BUILD_TAGS= +KOPIA_BUILD_FLAGS=-trimpath -ldflags "-s -w -X github.com/kopia/kopia/repo.BuildVersion=$(KOPIA_VERSION_NO_PREFIX) -X github.com/kopia/kopia/repo.BuildInfo=$(shell git rev-parse HEAD) -X github.com/kopia/kopia/repo.BuildGitHubRepo=$(GITHUB_REPOSITORY)" + kopia_ui_embedded_exe=dist/kopia_$(GOOS)_$(GOARCH)/kopia$(exe_suffix) ifeq ($(GOOS),darwin) @@ -272,7 +275,6 @@ dev-deps: GO111MODULE=off go get -u github.com/sqs/goreturns test-with-coverage: export KOPIA_COVERAGE_TEST=1 -test-with-coverage: export GOEXPERIMENT=nocoverageredesign test-with-coverage: export TESTING_ACTION_EXE ?= $(TESTING_ACTION_EXE) test-with-coverage: $(gotestsum) $(TESTING_ACTION_EXE) $(GO_TEST) $(UNIT_TEST_RACE_FLAGS) -tags testing -count=$(REPEAT_TEST) -short -covermode=atomic -coverprofile=coverage.txt --coverpkg $(COVERAGE_PACKAGES) -timeout $(UNIT_TESTS_TIMEOUT) ./... diff --git a/README.md b/README.md index 6b8a24ff9f5..a4d09c63fe0 100644 --- a/README.md +++ b/README.md @@ -18,16 +18,16 @@ Kopia > 3. _[fast and secure backup tool](https://kopia.io)_ -Kopia is a fast and secure open-source backup/restore tool that allows you to create [encrypted](https://kopia.io/docs/features/#end-to-end-zero-knowledge-encryption) snapshots of your data and save the snapshots to [remote or cloud storage](https://kopia.io/docs/features/#save-snapshots-to-cloud-network-or-local-storage) of your choice, [to network-attached storage or server](https://kopia.io/docs/features/#save-snapshots-to-cloud-network-or-local-storage), or [locally on your machine](https://kopia.io/docs/features/#save-snapshots-to-cloud-network-or-local-storage). Kopia does not 'image' your whole machine. Rather, Kopia allows you to backup/restore any and all files/directories that you deem are important or critical. +Kopia is a fast and secure open-source backup/restore tool that allows you to create [encrypted](https://kopia.io/docs/features/#user-controlled-end-to-end-encryption) snapshots of your data and save the snapshots to [remote or cloud storage](https://kopia.io/docs/features/#save-snapshots-to-cloud-network-or-local-storage) of your choice, [to network-attached storage or server](https://kopia.io/docs/features/#save-snapshots-to-cloud-network-or-local-storage), or [locally on your machine](https://kopia.io/docs/features/#save-snapshots-to-cloud-network-or-local-storage). Kopia does not 'image' your whole machine. Rather, Kopia allows you to backup/restore any and all files/directories that you deem are important or critical. -Kopia has both [CLI (command-line interface)](https://kopia.io/docs/features/#both-command-line-and-graphical-user-interfaces) and [GUI (graphical user interface)](https://kopia.io/docs/features/#both-command-line-and-graphical-user-interfaces) versions, making it the perfect tool for both advanced and regular users. You can read more about Kopia's unique [features](https://kopia.io/docs/features/) -- which include [compression](https://kopia.io/docs/features/#compression), [deduplication](https://kopia.io/docs/features/#backup-files-and-directories-using-snapshots), [end-to-end 'zero knowledge' encryption](https://kopia.io/docs/features/#end-to-end-zero-knowledge-encryption), and [error correction](https://kopia.io/docs/features/#error-correction) -- to get a better understanding of how Kopia works. +Kopia has both [CLI (command-line interface)](https://kopia.io/docs/features/#both-command-line-and-graphical-user-interfaces) and [GUI (graphical user interface)](https://kopia.io/docs/features/#both-command-line-and-graphical-user-interfaces) versions, making it the perfect tool for both advanced and regular users. You can read more about Kopia's unique [features](https://kopia.io/docs/features/) -- which include [compression](https://kopia.io/docs/features/#compression), [deduplication](https://kopia.io/docs/features/#backup-files-and-directories-using-snapshots), [user-controlled end-to-end encryption](https://kopia.io/docs/features/#user-controlled-end-to-end-encryption), and [error correction](https://kopia.io/docs/features/#error-correction) -- to get a better understanding of how Kopia works. When ready, head to the [installation](https://kopia.io/docs/installation/) page to download and install Kopia, and make sure to read the [Getting Started Guide](https://kopia.io/docs/getting-started/) for a step-by-step walkthrough of how to use Kopia. Pick the Cloud Storage Provider You Want --- -Kopia supports saving your [encrypted](https://kopia.io/docs/features/#end-to-end-zero-knowledge-encryption) and [compressed](https://kopia.io/docs/features/#compression) snapshots to all of the following [storage locations](https://kopia.io/docs/features/#save-snapshots-to-cloud-network-or-local-storage): +Kopia supports saving your [encrypted](https://kopia.io/docs/features/#user-controlled-end-to-end-encryption) and [compressed](https://kopia.io/docs/features/#compression) snapshots to all of the following [storage locations](https://kopia.io/docs/features/#save-snapshots-to-cloud-network-or-local-storage): * **Amazon S3** and any **cloud storage that is compatible with S3** * **Azure Blob Storage** diff --git a/app/package-lock.json b/app/package-lock.json index ed28fe504bf..7596d2db46d 100644 --- a/app/package-lock.json +++ b/app/package-lock.json @@ -23,7 +23,7 @@ "asar": "^3.2.0", "concurrently": "^9.1.2", "dotenv": "^16.5.0", - "electron": "^36.3.2", + "electron": "^36.8.1", "electron-builder": "^26.0.12", "playwright": "^1.37.1", "playwright-core": "^1.35.1", @@ -174,9 +174,9 @@ } }, "node_modules/@electron/node-gyp/node_modules/brace-expansion": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", - "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz", + "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==", "dev": true, "license": "MIT", "dependencies": { @@ -345,9 +345,9 @@ } }, "node_modules/@electron/universal/node_modules/brace-expansion": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", - "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz", + "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==", "dev": true, "license": "MIT", "dependencies": { @@ -1047,9 +1047,9 @@ } }, "node_modules/app-builder-lib/node_modules/brace-expansion": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", - "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz", + "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==", "dev": true, "license": "MIT", "dependencies": { @@ -1252,9 +1252,9 @@ "optional": true }, "node_modules/brace-expansion": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", - "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", + "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", "dev": true, "license": "MIT", "dependencies": { @@ -1389,9 +1389,9 @@ } }, "node_modules/cacache/node_modules/brace-expansion": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", - "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz", + "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==", "dev": true, "license": "MIT", "dependencies": { @@ -1808,9 +1808,9 @@ } }, "node_modules/config-file-ts/node_modules/brace-expansion": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", - "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz", + "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==", "dev": true, "license": "MIT", "dependencies": { @@ -2211,9 +2211,9 @@ } }, "node_modules/electron": { - "version": "36.3.2", - "resolved": "https://registry.npmjs.org/electron/-/electron-36.3.2.tgz", - "integrity": "sha512-v0/j7n22CL3OYv9BIhq6JJz2+e1HmY9H4bjTk8/WzVT9JwVX/T/21YNdR7xuQ6XDSEo9gP5JnqmjOamE+CUY8Q==", + "version": "36.8.1", + "resolved": "https://registry.npmjs.org/electron/-/electron-36.8.1.tgz", + "integrity": "sha512-honaH58/cyCb9QAzIvD+WXWuNIZ0tW9zfBqMz5wZld/rXB+LCTEDb2B3TAv8+pDmlzPlkPio95RkUe86l6MNjg==", "dev": true, "hasInstallScript": true, "license": "MIT", @@ -2645,9 +2645,9 @@ } }, "node_modules/filelist/node_modules/brace-expansion": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", - "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz", + "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==", "dev": true, "license": "MIT", "dependencies": { @@ -2698,15 +2698,16 @@ } }, "node_modules/form-data": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.2.tgz", - "integrity": "sha512-hGfm/slu0ZabnNt4oaRZ6uREyfCj6P4fT/n6A1rGV+Z0VdGXjfOhVUpkn6qVQONHGIFwmveGXyDs75+nr6FM8w==", + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.4.tgz", + "integrity": "sha512-KrGhL9Q4zjj0kiUt5OO4Mr/A/jlI2jDYs5eHBpYHPcBEVSiipAvn2Ko2HnPe20rmcuuvMHNdZFp+4IlGTMF0Ow==", "dev": true, "license": "MIT", "dependencies": { "asynckit": "^0.4.0", "combined-stream": "^1.0.8", "es-set-tostringtag": "^2.1.0", + "hasown": "^2.0.2", "mime-types": "^2.1.12" }, "engines": { @@ -3302,9 +3303,9 @@ } }, "node_modules/js-yaml": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", - "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.1.tgz", + "integrity": "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==", "license": "MIT", "dependencies": { "argparse": "^2.0.1" @@ -4834,9 +4835,9 @@ "license": "MIT" }, "node_modules/tmp": { - "version": "0.2.3", - "resolved": "https://registry.npmjs.org/tmp/-/tmp-0.2.3.tgz", - "integrity": "sha512-nZD7m9iCPC5g0pYmcaxogYKggSfLsdxl8of3Q/oIbqCqLLIO9IAF0GWjX1z9NZRHPiXv8Wex4yDCaZsgEw0Y8w==", + "version": "0.2.4", + "resolved": "https://registry.npmjs.org/tmp/-/tmp-0.2.4.tgz", + "integrity": "sha512-UdiSoX6ypifLmrfQ/XfiawN6hkjSBpCjhKxxZcWlUUmoXLaCKQU0bx4HF/tdDK2uzRuchf1txGvrWBzYREssoQ==", "dev": true, "license": "MIT", "engines": { diff --git a/app/package.json b/app/package.json index 960978bdc84..bd3a699a5d0 100644 --- a/app/package.json +++ b/app/package.json @@ -127,7 +127,7 @@ "asar": "^3.2.0", "concurrently": "^9.1.2", "dotenv": "^16.5.0", - "electron": "^36.3.2", + "electron": "^36.8.1", "electron-builder": "^26.0.12", "playwright": "^1.37.1", "playwright-core": "^1.35.1", diff --git a/cli/app.go b/cli/app.go index e4eb3f65c99..3c6b5bf1b07 100644 --- a/cli/app.go +++ b/cli/app.go @@ -3,6 +3,7 @@ package cli import ( "context" + stderrors "errors" "fmt" "io" "os" @@ -13,11 +14,9 @@ import ( "github.com/mattn/go-colorable" "github.com/pkg/errors" "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/trace" "github.com/kopia/kopia/internal/apiclient" "github.com/kopia/kopia/internal/clock" - "github.com/kopia/kopia/internal/gather" "github.com/kopia/kopia/internal/passwordpersist" "github.com/kopia/kopia/internal/releasable" "github.com/kopia/kopia/notification" @@ -85,10 +84,9 @@ type appServices interface { repositoryReaderAction(act func(ctx context.Context, rep repo.Repository) error) func(ctx *kingpin.ParseContext) error repositoryWriterAction(act func(ctx context.Context, rep repo.RepositoryWriter) error) func(ctx *kingpin.ParseContext) error repositoryHintAction(act func(ctx context.Context, rep repo.Repository) []string) func() []string - maybeRepositoryAction(act func(ctx context.Context, rep repo.Repository) error, mode repositoryAccessMode) func(ctx *kingpin.ParseContext) error baseActionWithContext(act func(ctx context.Context) error) func(ctx *kingpin.ParseContext) error openRepository(ctx context.Context, mustBeConnected bool) (repo.Repository, error) - advancedCommand() + dangerousCommand() repositoryConfigFileName() string getProgress() *cliProgress getRestoreProgress() RestoreProgress @@ -124,7 +122,6 @@ type advancedAppServices interface { type App struct { // global flags enableAutomaticMaintenance bool - pf profileFlags progress *cliProgress restoreProgress RestoreProgress initialUpdateCheckDelay time.Duration @@ -135,9 +132,8 @@ type App struct { traceStorage bool keyRingEnabled bool persistCredentials bool - disableInternalLog bool - dumpAllocatorStats bool - AdvancedCommands string + disableRepositoryLog bool + DangerousCommands string cliStorageProviders []StorageProvider trackReleasable []string @@ -175,15 +171,16 @@ type App struct { // testability hooks testonlyIgnoreMissingRequiredFeatures bool - isInProcessTest bool - exitWithError func(err error) // os.Exit() with 1 or 0 based on err - stdinReader io.Reader - stdoutWriter io.Writer - stderrWriter io.Writer - rootctx context.Context //nolint:containedctx - loggerFactory logging.LoggerFactory - simulatedCtrlC chan bool - envNamePrefix string + isInProcessTest bool + exitWithError func(err error) // os.Exit() with 1 or 0 based on err + stdinReader io.Reader + stdoutWriter io.Writer + stderrWriter io.Writer + rootctx context.Context //nolint:containedctx + loggerFactory logging.LoggerFactory + contentLogWriter io.Writer + simulatedCtrlC chan bool + envNamePrefix string } func (c *App) enableTestOnlyFlags() bool { @@ -217,8 +214,9 @@ func (c *App) Stderr() io.Writer { } // SetLoggerFactory sets the logger factory to be used throughout the app. -func (c *App) SetLoggerFactory(loggerForModule logging.LoggerFactory) { +func (c *App) SetLoggerFactory(loggerForModule logging.LoggerFactory, contentLogWriter io.Writer) { c.loggerFactory = loggerForModule + c.contentLogWriter = contentLogWriter } // RegisterOnExit registers the provided function to run before app exits. @@ -261,7 +259,9 @@ func (c *App) setup(app *kingpin.Application) { _ = app.Flag("help-full", "Show help for all commands, including hidden").Action(func(pc *kingpin.ParseContext) error { _ = app.UsageForContextWithTemplate(pc, 0, kingpin.DefaultUsageTemplate) + c.exitWithError(nil) + return nil }).Bool() @@ -276,10 +276,9 @@ func (c *App) setup(app *kingpin.Application) { app.Flag("timezone", "Format time according to specified time zone (local, utc, original or time zone name)").Hidden().StringVar(&timeZone) app.Flag("password", "Repository password.").Envar(c.EnvName("KOPIA_PASSWORD")).Short('p').StringVar(&c.password) app.Flag("persist-credentials", "Persist credentials").Default("true").Envar(c.EnvName("KOPIA_PERSIST_CREDENTIALS_ON_CONNECT")).BoolVar(&c.persistCredentials) - app.Flag("disable-internal-log", "Disable internal log").Hidden().Envar(c.EnvName("KOPIA_DISABLE_INTERNAL_LOG")).BoolVar(&c.disableInternalLog) - app.Flag("advanced-commands", "Enable advanced (and potentially dangerous) commands.").Hidden().Envar(c.EnvName("KOPIA_ADVANCED_COMMANDS")).StringVar(&c.AdvancedCommands) + app.Flag("disable-repository-log", "Disable repository log").Hidden().Envar(c.EnvName("KOPIA_DISABLE_REPOSITORY_LOG")).BoolVar(&c.disableRepositoryLog) + app.Flag("dangerous-commands", "Enable dangerous commands that could result in data loss and repository corruption.").Hidden().Envar(c.EnvName("KOPIA_DANGEROUS_COMMANDS")).StringVar(&c.DangerousCommands) app.Flag("track-releasable", "Enable tracking of releasable resources.").Hidden().Envar(c.EnvName("KOPIA_TRACK_RELEASABLE")).StringsVar(&c.trackReleasable) - app.Flag("dump-allocator-stats", "Dump allocator stats at the end of execution.").Hidden().Envar(c.EnvName("KOPIA_DUMP_ALLOCATOR_STATS")).BoolVar(&c.dumpAllocatorStats) app.Flag("upgrade-owner-id", "Repository format upgrade owner-id.").Hidden().Envar(c.EnvName("KOPIA_REPO_UPGRADE_OWNER_ID")).StringVar(&c.upgradeOwnerID) app.Flag("upgrade-no-block", "Do not block when repository format upgrade is in progress, instead exit with a message.").Hidden().Default("false").Envar(c.EnvName("KOPIA_REPO_UPGRADE_NO_BLOCK")).BoolVar(&c.doNotWaitForUpgrade) app.Flag("error-notifications", "Send notification on errors").Hidden(). @@ -295,15 +294,6 @@ func (c *App) setup(app *kingpin.Application) { c.setupOSSpecificKeychainFlags(c, app) - _ = app.Flag("caching", "Enables caching of objects (disable with --no-caching)").Default("true").Hidden().Action( - deprecatedFlag(c.stderrWriter, "The '--caching' flag is deprecated and has no effect, use 'kopia cache set' instead."), - ).Bool() - - _ = app.Flag("list-caching", "Enables caching of list results (disable with --no-list-caching)").Default("true").Hidden().Action( - deprecatedFlag(c.stderrWriter, "The '--list-caching' flag is deprecated and has no effect, use 'kopia cache set' instead."), - ).Bool() - - c.pf.setup(app) c.progress.setup(c, app) c.blob.setup(c, app) @@ -410,15 +400,7 @@ func (c *App) currentActionName() string { func (c *App) noRepositoryAction(act func(ctx context.Context) error) func(ctx *kingpin.ParseContext) error { return func(kpc *kingpin.ParseContext) error { - return c.runAppWithContext(kpc.SelectedCommand, func(ctx context.Context) error { - return c.pf.withProfiling(func() error { - if c.dumpAllocatorStats { - defer gather.DumpStats(ctx) - } - - return act(ctx) - }) - }) + return c.runAppWithContext(kpc.SelectedCommand, act) } } @@ -458,7 +440,9 @@ func assertDirectRepository(act func(ctx context.Context, rep repo.DirectReposit } func (c *App) directRepositoryWriteAction(act func(ctx context.Context, rep repo.DirectRepositoryWriter) error) func(ctx *kingpin.ParseContext) error { - return c.maybeRepositoryAction(assertDirectRepository(func(ctx context.Context, rep repo.DirectRepository) error { + return c.repositoryAction(assertDirectRepository(func(ctx context.Context, rep repo.DirectRepository) error { + rep.LogManager().Enable() + return repo.DirectWriteSession(ctx, rep, repo.WriteSessionOptions{ Purpose: "cli:" + c.currentActionName(), OnUpload: c.progress.UploadedBytes, @@ -467,19 +451,19 @@ func (c *App) directRepositoryWriteAction(act func(ctx context.Context, rep repo } func (c *App) directRepositoryReadAction(act func(ctx context.Context, rep repo.DirectRepository) error) func(ctx *kingpin.ParseContext) error { - return c.maybeRepositoryAction(assertDirectRepository(func(ctx context.Context, rep repo.DirectRepository) error { + return c.repositoryAction(assertDirectRepository(func(ctx context.Context, rep repo.DirectRepository) error { return act(ctx, rep) }), repositoryAccessMode{}) } func (c *App) repositoryReaderAction(act func(ctx context.Context, rep repo.Repository) error) func(ctx *kingpin.ParseContext) error { - return c.maybeRepositoryAction(func(ctx context.Context, rep repo.Repository) error { + return c.repositoryAction(func(ctx context.Context, rep repo.Repository) error { return act(ctx, rep) }, repositoryAccessMode{}) } func (c *App) repositoryWriterAction(act func(ctx context.Context, rep repo.RepositoryWriter) error) func(ctx *kingpin.ParseContext) error { - return c.maybeRepositoryAction(func(ctx context.Context, rep repo.Repository) error { + return c.repositoryAction(func(ctx context.Context, rep repo.Repository) error { return repo.WriteSession(ctx, rep, repo.WriteSessionOptions{ Purpose: "cli:" + c.currentActionName(), OnUpload: c.progress.UploadedBytes, @@ -502,29 +486,18 @@ func (c *App) runAppWithContext(command *kingpin.CmdClause, cb func(ctx context. releasable.EnableTracking(releasable.ItemKind(r)) } - if err := c.observability.startMetrics(ctx); err != nil { - return errors.Wrap(err, "unable to start metrics") - } - - err := func() error { - if command == nil { - defer c.runOnExit() + var spanName string - return cb(ctx) - } - - tctx, span := tracer.Start(ctx, command.FullCommand(), trace.WithSpanKind(trace.SpanKindClient)) - defer span.End() + if command != nil { + spanName = command.FullCommand() + } + err := c.observability.run(ctx, spanName, func(ctx context.Context) error { defer c.runOnExit() - return cb(tctx) - }() - - c.observability.stopMetrics(ctx) - + return cb(ctx) + }) if err != nil { - // print error in red log(ctx).Errorf("%v", err.Error()) c.exitWithError(err) } @@ -545,19 +518,11 @@ type repositoryAccessMode struct { func (c *App) baseActionWithContext(act func(ctx context.Context) error) func(ctx *kingpin.ParseContext) error { return func(kpc *kingpin.ParseContext) error { - return c.runAppWithContext(kpc.SelectedCommand, func(ctx context.Context) error { - return c.pf.withProfiling(func() error { - if c.dumpAllocatorStats { - defer gather.DumpStats(ctx) - } - - return act(ctx) - }) - }) + return c.runAppWithContext(kpc.SelectedCommand, act) } } -func (c *App) maybeRepositoryAction(act func(ctx context.Context, rep repo.Repository) error, mode repositoryAccessMode) func(ctx *kingpin.ParseContext) error { +func (c *App) repositoryAction(act func(ctx context.Context, rep repo.Repository) error, mode repositoryAccessMode) func(ctx *kingpin.ParseContext) error { return c.baseActionWithContext(func(ctx context.Context) error { const requireConnected = true @@ -572,7 +537,7 @@ func (c *App) maybeRepositoryAction(act func(ctx context.Context, rep repo.Repos if rep != nil && err == nil && mode.allowMaintenance { if merr := c.maybeRunMaintenance(ctx, rep); merr != nil { - log(ctx).Errorf("error running maintenance: %v", merr) + err = errors.Wrap(merr, "running auto-maintenance") // surface auto-maintenance error } } @@ -589,7 +554,7 @@ func (c *App) maybeRepositoryAction(act func(ctx context.Context, rep repo.Repos if rep != nil { if cerr := rep.Close(ctx); cerr != nil { - return errors.Wrap(cerr, "unable to close repository") + return stderrors.Join(err, errors.Wrap(cerr, "unable to close repository")) } } @@ -650,17 +615,17 @@ func (c *App) maybeRunMaintenance(ctx context.Context, rep repo.Repository) erro return errors.Wrap(err, "error running maintenance") } -func (c *App) advancedCommand() { - if c.AdvancedCommands != "enabled" { +func (c *App) dangerousCommand() { + if c.DangerousCommands != "enabled" { _, _ = errorColor.Fprintf(c.stderrWriter, ` -This command could be dangerous or lead to repository corruption when used improperly. +This command is dangerous, it can corrupt the repository and result in data loss. -Running this command is not needed for using Kopia. Instead, most users should rely on periodic repository maintenance. See https://kopia.io/docs/advanced/maintenance/ for more information. -To run this command despite the warning, set --advanced-commands=enabled +Running this command is not needed for using Kopia. Instead, rely on periodic repository maintenance. See https://kopia.io/docs/advanced/maintenance/ for more information. +To run this command despite the warning, set --dangerous-commands=enabled `) - c.exitWithError(errors.New("advanced commands are disabled")) + c.exitWithError(errors.New("dangerous commands are disabled")) } } diff --git a/cli/auto_upgrade.go b/cli/auto_upgrade.go index 929f4217c11..79b5cf1fdb0 100644 --- a/cli/auto_upgrade.go +++ b/cli/auto_upgrade.go @@ -39,18 +39,6 @@ func setDefaultMaintenanceParameters(ctx context.Context, rep repo.RepositoryWri p := maintenance.DefaultParams() p.Owner = rep.ClientOptions().UsernameAtHost() - if dw, ok := rep.(repo.DirectRepositoryWriter); ok { - _, ok, err := dw.ContentReader().EpochManager(ctx) - if err != nil { - return errors.Wrap(err, "epoch manager") - } - - if ok { - // disable quick maintenance cycle - p.QuickCycle.Enabled = false - } - } - if err := maintenance.SetParams(ctx, rep, &p); err != nil { return errors.Wrap(err, "unable to set maintenance params") } diff --git a/cli/cli_progress.go b/cli/cli_progress.go index 0992e1cb9ee..ce7626ef4eb 100644 --- a/cli/cli_progress.go +++ b/cli/cli_progress.go @@ -29,7 +29,7 @@ type progressFlags struct { } func (p *progressFlags) setup(svc appServices, app *kingpin.Application) { - app.Flag("progress", "Enable progress bar").Hidden().Default("true").BoolVar(&p.enableProgress) + app.Flag("progress", "Enable progress output").Default("true").BoolVar(&p.enableProgress) app.Flag("progress-estimation-type", "Set type of estimation of the data to be snapshotted").Hidden().Default(upload.EstimationTypeClassic). EnumVar(&p.progressEstimationType, upload.EstimationTypeClassic, upload.EstimationTypeRough, upload.EstimationTypeAdaptive) app.Flag("progress-update-interval", "How often to update progress information").Hidden().Default("300ms").DurationVar(&p.progressUpdateInterval) @@ -206,7 +206,6 @@ func (p *cliProgress) spinnerCharacter() string { return s } -// +checklocksignore. func (p *cliProgress) StartShared() { *p = cliProgress{ uploadStartTime: timetrack.Start(), @@ -222,7 +221,6 @@ func (p *cliProgress) FinishShared() { p.output(defaultColor, "") } -// +checklocksignore. func (p *cliProgress) UploadStarted() { if p.shared { // do nothing diff --git a/cli/command_acl_add.go b/cli/command_acl_add.go index df34ed7b436..6c05dc5d257 100644 --- a/cli/command_acl_add.go +++ b/cli/command_acl_add.go @@ -29,7 +29,7 @@ func (c *commandACLAdd) setup(svc appServices, parent commandParent) { func (c *commandACLAdd) run(ctx context.Context, rep repo.RepositoryWriter) error { r := acl.TargetRule{} - for _, v := range strings.Split(c.target, ",") { + for v := range strings.SplitSeq(c.target, ",") { parts := strings.SplitN(v, "=", 2) //nolint:mnd if len(parts) != 2 { //nolint:mnd return errors.Errorf("invalid target labels %q, must be key=value", v) diff --git a/cli/command_benchmark.go b/cli/command_benchmark.go index 9706f8b59e2..447926b59cc 100644 --- a/cli/command_benchmark.go +++ b/cli/command_benchmark.go @@ -54,17 +54,13 @@ func runInParallelNoResult[A any](args []A, run func(arg A)) { }) } -func runInParallel[A any, T any](args []A, run func(arg A) T) T { +func runInParallel[A, T any](args []A, run func(arg A) T) T { var wg sync.WaitGroup for _, arg := range args[1:] { - wg.Add(1) - - go func() { - defer wg.Done() - + wg.Go(func() { run(arg) - }() + }) } // run one on the main goroutine and N-1 in parallel. diff --git a/cli/command_benchmark_compression.go b/cli/command_benchmark_compression.go index ff37c14626f..93a72333da2 100644 --- a/cli/command_benchmark_compression.go +++ b/cli/command_benchmark_compression.go @@ -97,7 +97,7 @@ func (c *commandBenchmarkCompression) shouldIncludeAlgorithm(name compression.Na return true } - for _, a := range strings.Split(c.algorithms, ",") { + for a := range strings.SplitSeq(c.algorithms, ",") { if strings.HasPrefix(string(name), a) { return true } diff --git a/cli/command_blob_delete.go b/cli/command_blob_delete.go index 0dcadd0d6fd..ec0551221f1 100644 --- a/cli/command_blob_delete.go +++ b/cli/command_blob_delete.go @@ -16,7 +16,7 @@ type commandBlobDelete struct { } func (c *commandBlobDelete) setup(svc appServices, parent commandParent) { - cmd := parent.Command("delete", "Delete blobs by ID").Alias("remove").Alias("rm") + cmd := parent.Command("delete", "Delete blobs by ID").Alias("remove").Alias("rm").Hidden() cmd.Arg("blobIDs", "Blob IDs").Required().StringsVar(&c.blobIDs) cmd.Action(svc.directRepositoryWriteAction(c.run)) @@ -24,7 +24,7 @@ func (c *commandBlobDelete) setup(svc appServices, parent commandParent) { } func (c *commandBlobDelete) run(ctx context.Context, rep repo.DirectRepositoryWriter) error { - c.svc.advancedCommand() + c.svc.dangerousCommand() for _, b := range c.blobIDs { err := rep.BlobStorage().DeleteBlob(ctx, blob.ID(b)) diff --git a/cli/command_blob_gc.go b/cli/command_blob_gc.go index 2dd7db808d8..56865a005cf 100644 --- a/cli/command_blob_gc.go +++ b/cli/command_blob_gc.go @@ -20,7 +20,7 @@ type commandBlobGC struct { } func (c *commandBlobGC) setup(svc appServices, parent commandParent) { - cmd := parent.Command("gc", "Garbage-collect unused blobs") + cmd := parent.Command("gc", "Garbage-collect unused blobs").Hidden() cmd.Flag("delete", "Whether to delete unused blobs").StringVar(&c.delete) cmd.Flag("parallel", "Number of parallel blob scans").Default("16").IntVar(&c.parallel) cmd.Flag("prefix", "Only GC blobs with given prefix").StringVar(&c.prefix) @@ -31,20 +31,20 @@ func (c *commandBlobGC) setup(svc appServices, parent commandParent) { } func (c *commandBlobGC) run(ctx context.Context, rep repo.DirectRepositoryWriter) error { - c.svc.advancedCommand() + c.svc.dangerousCommand() - opts := maintenance.DeleteUnreferencedBlobsOptions{ + opts := maintenance.DeleteUnreferencedPacksOptions{ DryRun: c.delete != "yes", Parallel: c.parallel, Prefix: blob.ID(c.prefix), } - n, err := maintenance.DeleteUnreferencedBlobs(ctx, rep, opts, c.safety) + stats, err := maintenance.DeleteUnreferencedPacks(ctx, rep, opts, c.safety) if err != nil { return errors.Wrap(err, "error deleting unreferenced blobs") } - if opts.DryRun && n > 0 { + if opts.DryRun && stats.UnreferencedPackCount > 0 { log(ctx).Info("Pass --delete=yes to delete.") } diff --git a/cli/command_blob_shards_modify.go b/cli/command_blob_shards_modify.go index 970c58b9a32..20252098e76 100644 --- a/cli/command_blob_shards_modify.go +++ b/cli/command_blob_shards_modify.go @@ -64,9 +64,9 @@ func parseShardSpec(shards string) ([]int, error) { return result, nil } - parts := strings.Split(shards, ",") + parts := strings.SplitSeq(shards, ",") - for _, p := range parts { + for p := range parts { if p == "" { continue } @@ -236,8 +236,8 @@ func (c *commandBlobShardsModify) renameBlobs(ctx context.Context, dir, prefix s if err := c.renameBlobs(ctx, path.Join(dir, ent.Name()), prefix+ent.Name(), params, numMoved, numUnchanged); err != nil { return err } - } else if strings.HasSuffix(ent.Name(), sharded.CompleteBlobSuffix) { - blobID := prefix + strings.TrimSuffix(ent.Name(), sharded.CompleteBlobSuffix) + } else if name, ok := strings.CutSuffix(ent.Name(), sharded.CompleteBlobSuffix); ok { + blobID := prefix + name destDir, destBlobID := params.GetShardDirectoryAndBlob(c.rootPath, blob.ID(blobID)) srcFile := path.Join(dir, ent.Name()) diff --git a/cli/command_content_delete.go b/cli/command_content_delete.go index cf24fb90c0d..b3fb71a777b 100644 --- a/cli/command_content_delete.go +++ b/cli/command_content_delete.go @@ -15,7 +15,7 @@ type commandContentDelete struct { } func (c *commandContentDelete) setup(svc appServices, parent commandParent) { - cmd := parent.Command("delete", "Remove content").Alias("remove").Alias("rm") + cmd := parent.Command("delete", "Remove content").Alias("remove").Alias("rm").Hidden() cmd.Arg("id", "IDs of content to remove").Required().StringsVar(&c.ids) cmd.Action(svc.directRepositoryWriteAction(c.run)) @@ -23,7 +23,7 @@ func (c *commandContentDelete) setup(svc appServices, parent commandParent) { } func (c *commandContentDelete) run(ctx context.Context, rep repo.DirectRepositoryWriter) error { - c.svc.advancedCommand() + c.svc.dangerousCommand() contentIDs, err := toContentIDs(c.ids) if err != nil { diff --git a/cli/command_content_rewrite.go b/cli/command_content_rewrite.go index 15b9aad3781..38c5232d830 100644 --- a/cli/command_content_rewrite.go +++ b/cli/command_content_rewrite.go @@ -25,7 +25,7 @@ type commandContentRewrite struct { } func (c *commandContentRewrite) setup(svc appServices, parent commandParent) { - cmd := parent.Command("rewrite", "Rewrite content using most recent format") + cmd := parent.Command("rewrite", "Rewrite content using most recent format").Hidden() cmd.Arg("contentID", "Identifiers of contents to rewrite").StringsVar(&c.contentRewriteIDs) cmd.Flag("parallelism", "Number of parallel workers").Default("16").IntVar(&c.contentRewriteParallelism) @@ -41,15 +41,14 @@ func (c *commandContentRewrite) setup(svc appServices, parent commandParent) { } func (c *commandContentRewrite) runContentRewriteCommand(ctx context.Context, rep repo.DirectRepositoryWriter) error { - c.svc.advancedCommand() + c.svc.dangerousCommand() contentIDs, err := toContentIDs(c.contentRewriteIDs) if err != nil { return err } - //nolint:wrapcheck - return maintenance.RewriteContents(ctx, rep, &maintenance.RewriteContentsOptions{ + _, err = maintenance.RewriteContents(ctx, rep, &maintenance.RewriteContentsOptions{ ContentIDRange: c.contentRange.contentIDRange(), ContentIDs: contentIDs, FormatVersion: c.contentRewriteFormatVersion, @@ -58,6 +57,8 @@ func (c *commandContentRewrite) runContentRewriteCommand(ctx context.Context, re ShortPacks: c.contentRewriteShortPacks, DryRun: c.contentRewriteDryRun, }, c.contentRewriteSafety) + + return errors.Wrap(err, "error rewriting contents") } func toContentIDs(s []string) ([]content.ID, error) { diff --git a/cli/command_content_verify.go b/cli/command_content_verify.go index 024755270e3..f51af3df315 100644 --- a/cli/command_content_verify.go +++ b/cli/command_content_verify.go @@ -2,7 +2,6 @@ package cli import ( "context" - "math/rand" "sync" "sync/atomic" "time" @@ -11,7 +10,6 @@ import ( "github.com/kopia/kopia/internal/timetrack" "github.com/kopia/kopia/repo" - "github.com/kopia/kopia/repo/blob" "github.com/kopia/kopia/repo/content" ) @@ -38,29 +36,14 @@ func (c *commandContentVerify) setup(svc appServices, parent commandParent) { } func (c *commandContentVerify) run(ctx context.Context, rep repo.DirectRepository) error { - blobMap := map[blob.ID]blob.Metadata{} - downloadPercent := c.contentVerifyPercent - - if c.contentVerifyFull { - downloadPercent = 100.0 - } - - blobMap, err := blob.ReadBlobMap(ctx, rep.BlobReader()) - if err != nil { - return errors.Wrap(err, "unable to read blob map") - } - var ( - verifiedCount atomic.Int32 - successCount atomic.Int32 - errorCount atomic.Int32 - totalCount atomic.Int32 + totalCount atomic.Int32 + + wg sync.WaitGroup ) subctx, cancel := context.WithCancel(ctx) - var wg sync.WaitGroup - // ensure we cancel estimation goroutine and wait for it before returning defer func() { cancel() @@ -68,63 +51,56 @@ func (c *commandContentVerify) run(ctx context.Context, rep repo.DirectRepositor }() // start a goroutine that will populate totalCount - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() { c.getTotalContentCount(subctx, rep, &totalCount) - }() - - log(ctx).Info("Verifying all contents...") + }) rep.DisableIndexRefresh() - throttle := new(timetrack.Throttle) + var throttle timetrack.Throttle + est := timetrack.Start() - if err := rep.ContentReader().IterateContents(ctx, content.IterateOptions{ - Range: c.contentRange.contentIDRange(), - Parallel: c.contentVerifyParallel, - IncludeDeleted: c.contentVerifyIncludeDeleted, - }, func(ci content.Info) error { - if err := c.contentVerify(ctx, rep.ContentReader(), ci, blobMap, downloadPercent); err != nil { - log(ctx).Errorf("error %v", err) - errorCount.Add(1) - } else { - successCount.Add(1) - } + if c.contentVerifyFull { + c.contentVerifyPercent = 100.0 + } + + opts := content.VerifyOptions{ + ContentIDRange: c.contentRange.contentIDRange(), + ContentReadPercentage: c.contentVerifyPercent, + IncludeDeletedContents: c.contentVerifyIncludeDeleted, + ContentIterateParallelism: c.contentVerifyParallel, + ProgressCallbackInterval: 1, + + ProgressCallback: func(vps content.VerifyProgressStats) { + if !throttle.ShouldOutput(c.progressInterval) { + return + } - verifiedCount.Add(1) + verifiedCount := vps.SuccessCount + vps.ErrorCount - if throttle.ShouldOutput(c.progressInterval) { - timings, ok := est.Estimate(float64(verifiedCount.Load()), float64(totalCount.Load())) + timings, ok := est.Estimate(float64(verifiedCount), float64(totalCount.Load())) if ok { log(ctx).Infof(" Verified %v of %v contents (%.1f%%), %v errors, remaining %v, ETA %v", - verifiedCount.Load(), + verifiedCount, totalCount.Load(), timings.PercentComplete, - errorCount.Load(), + vps.ErrorCount, timings.Remaining, formatTimestamp(timings.EstimatedEndTime), ) } else { - log(ctx).Infof(" Verified %v contents, %v errors, estimating...", verifiedCount.Load(), errorCount.Load()) + log(ctx).Infof(" Verified %v contents, %v errors, estimating...", verifiedCount, vps.ErrorCount) } - } - - return nil - }); err != nil { - return errors.Wrap(err, "iterate contents") + }, } - log(ctx).Infof("Finished verifying %v contents, found %v errors.", verifiedCount.Load(), errorCount.Load()) - - ec := errorCount.Load() - if ec == 0 { - return nil + if err := rep.ContentReader().VerifyContents(ctx, opts); err != nil { + return errors.Wrap(err, "verify contents") } - return errors.Errorf("encountered %v errors", ec) + return nil } func (c *commandContentVerify) getTotalContentCount(ctx context.Context, rep repo.DirectRepository, totalCount *atomic.Int32) { @@ -147,25 +123,3 @@ func (c *commandContentVerify) getTotalContentCount(ctx context.Context, rep rep totalCount.Store(tc) } - -func (c *commandContentVerify) contentVerify(ctx context.Context, r content.Reader, ci content.Info, blobMap map[blob.ID]blob.Metadata, downloadPercent float64) error { - bi, ok := blobMap[ci.PackBlobID] - if !ok { - return errors.Errorf("content %v depends on missing blob %v", ci.ContentID, ci.PackBlobID) - } - - if int64(ci.PackOffset+ci.PackedLength) > bi.Length { - return errors.Errorf("content %v out of bounds of its pack blob %v", ci.ContentID, ci.PackBlobID) - } - - //nolint:gosec - if 100*rand.Float64() < downloadPercent { - if _, err := r.GetContent(ctx, ci.ContentID); err != nil { - return errors.Wrapf(err, "content %v is invalid", ci.ContentID) - } - - return nil - } - - return nil -} diff --git a/cli/command_index_inspect.go b/cli/command_index_inspect.go index 7d7121d988b..48edd5550ec 100644 --- a/cli/command_index_inspect.go +++ b/cli/command_index_inspect.go @@ -2,6 +2,7 @@ package cli import ( "context" + "slices" "sync" "github.com/pkg/errors" @@ -42,13 +43,9 @@ func (c *commandIndexInspect) run(ctx context.Context, rep repo.DirectRepository var wg sync.WaitGroup - wg.Add(1) - - go func() { - defer wg.Done() - + wg.Go(func() { c.dumpIndexBlobEntries(output) - }() + }) err := c.runWithOutput(ctx, rep, output) close(output) @@ -109,6 +106,10 @@ func (c *commandIndexInspect) inspectAllBlobs(ctx context.Context, rep repo.Dire func (c *commandIndexInspect) dumpIndexBlobEntries(entries chan indexBlobPlusContentInfo) { for ent := range entries { + if !c.shouldInclude(ent.contentInfo) { + continue + } + ci := ent.contentInfo bm := ent.indexBlob @@ -117,10 +118,6 @@ func (c *commandIndexInspect) dumpIndexBlobEntries(entries chan indexBlobPlusCon state = "deleted" } - if !c.shouldInclude(ci) { - continue - } - c.out.printStdout("%v %v %v %v %v %v %v %v\n", formatTimestampPrecise(bm.Timestamp), bm.BlobID, ci.ContentID, state, formatTimestampPrecise(ci.Timestamp()), ci.PackBlobID, ci.PackOffset, ci.PackedLength) @@ -134,13 +131,7 @@ func (c *commandIndexInspect) shouldInclude(ci content.Info) bool { contentID := ci.ContentID.String() - for _, cid := range c.contentIDs { - if cid == contentID { - return true - } - } - - return false + return slices.Contains(c.contentIDs, contentID) } type indexBlobPlusContentInfo struct { diff --git a/cli/command_index_optimize.go b/cli/command_index_optimize.go index 850adfa0cda..feae50b7c76 100644 --- a/cli/command_index_optimize.go +++ b/cli/command_index_optimize.go @@ -4,6 +4,8 @@ import ( "context" "time" + "github.com/pkg/errors" + "github.com/kopia/kopia/repo" "github.com/kopia/kopia/repo/content/indexblob" ) @@ -18,7 +20,7 @@ type commandIndexOptimize struct { } func (c *commandIndexOptimize) setup(svc appServices, parent commandParent) { - cmd := parent.Command("optimize", "Optimize indexes blobs.") + cmd := parent.Command("optimize", "Optimize indexes blobs.").Hidden() cmd.Flag("max-small-blobs", "Maximum number of small index blobs that can be left after compaction.").Default("1").IntVar(&c.optimizeMaxSmallBlobs) cmd.Flag("drop-deleted-older-than", "Drop deleted contents above given age").DurationVar(&c.optimizeDropDeletedOlderThan) cmd.Flag("drop-contents", "Drop contents with given IDs").StringsVar(&c.optimizeDropContents) @@ -29,7 +31,7 @@ func (c *commandIndexOptimize) setup(svc appServices, parent commandParent) { } func (c *commandIndexOptimize) runOptimizeCommand(ctx context.Context, rep repo.DirectRepositoryWriter) error { - c.svc.advancedCommand() + c.svc.dangerousCommand() contentIDs, err := toContentIDs(c.optimizeDropContents) if err != nil { @@ -46,6 +48,7 @@ func (c *commandIndexOptimize) runOptimizeCommand(ctx context.Context, rep repo. opt.DropDeletedBefore = rep.Time().Add(-age) } - //nolint:wrapcheck - return rep.ContentManager().CompactIndexes(ctx, opt) + _, err = rep.ContentManager().CompactIndexes(ctx, opt) + + return errors.Wrap(err, "error optimizing indexes") } diff --git a/cli/command_index_recover.go b/cli/command_index_recover.go index 1d3248ed169..63e8785d853 100644 --- a/cli/command_index_recover.go +++ b/cli/command_index_recover.go @@ -40,7 +40,7 @@ func (c *commandIndexRecover) setup(svc appServices, parent commandParent) { } func (c *commandIndexRecover) run(ctx context.Context, rep repo.DirectRepositoryWriter) error { - c.svc.advancedCommand() + c.svc.dangerousCommand() var ( processedBlobCount atomic.Int32 diff --git a/cli/command_logs_cleanup.go b/cli/command_logs_cleanup.go index dd7bd22fdf3..ee8d2f3a294 100644 --- a/cli/command_logs_cleanup.go +++ b/cli/command_logs_cleanup.go @@ -29,7 +29,9 @@ func (c *commandLogsCleanup) setup(svc appServices, parent commandParent) { } func (c *commandLogsCleanup) run(ctx context.Context, rep repo.DirectRepositoryWriter) error { - toDelete, err := maintenance.CleanupLogs(ctx, rep, maintenance.LogRetentionOptions{ + rep.LogManager().Disable() + + stats, err := maintenance.CleanupLogs(ctx, rep, maintenance.LogRetentionOptions{ MaxTotalSize: c.maxTotalSizeMB << 20, //nolint:mnd MaxCount: c.maxCount, MaxAge: c.maxAge, @@ -39,11 +41,11 @@ func (c *commandLogsCleanup) run(ctx context.Context, rep repo.DirectRepositoryW return errors.Wrap(err, "error expiring logs") } - if len(toDelete) > 0 { + if stats.ToDeleteBlobCount > 0 { if c.dryRun { - log(ctx).Infof("Would delete %v logs.", len(toDelete)) + log(ctx).Infof("Would delete %v logs.", stats.ToDeleteBlobCount) } else { - log(ctx).Infof("Deleted %v logs.", len(toDelete)) + log(ctx).Infof("Deleted %v logs.", stats.DeletedBlobCount) } } else { log(ctx).Info("No logs found to delete.") diff --git a/cli/command_logs_show.go b/cli/command_logs_show.go index f0726fd45f1..9c8efcd21b3 100644 --- a/cli/command_logs_show.go +++ b/cli/command_logs_show.go @@ -2,6 +2,7 @@ package cli import ( "context" + "slices" "github.com/pkg/errors" @@ -38,13 +39,7 @@ func (c *commandLogsShow) run(ctx context.Context, rep repo.DirectRepository) er if len(c.logSessionIDs) > 0 { sessions = filterLogSessions(sessions, func(l *logSessionInfo) bool { - for _, sid := range c.logSessionIDs { - if l.id == sid { - return true - } - } - - return false + return slices.Contains(c.logSessionIDs, l.id) }) } diff --git a/cli/command_logs_test.go b/cli/command_logs_test.go index 28b45d2fc3f..f8a0fecde6d 100644 --- a/cli/command_logs_test.go +++ b/cli/command_logs_test.go @@ -91,12 +91,18 @@ func TestLogsMaintenance(t *testing.T) { e.RunAndVerifyOutputLineCount(t, 5, "logs", "list") e.RunAndExpectSuccess(t, "maintenance", "run", "--full") + + // maintenance run will create a new log and keep previous 2 logs e.RunAndVerifyOutputLineCount(t, 3, "logs", "list") e.RunAndExpectSuccess(t, "maintenance", "set", "--max-retained-log-age=1ms") + + // maintenance does not run here e.RunAndVerifyOutputLineCount(t, 4, "logs", "list") e.RunAndExpectSuccess(t, "maintenance", "run", "--full") + + // maintenance run will create a new log and delete all previous logs e.RunAndVerifyOutputLineCount(t, 1, "logs", "list") } diff --git a/cli/command_maintenance_info.go b/cli/command_maintenance_info.go index a7e891cd9cf..4c24bf85471 100644 --- a/cli/command_maintenance_info.go +++ b/cli/command_maintenance_info.go @@ -2,6 +2,7 @@ package cli import ( "context" + "strings" "time" "github.com/pkg/errors" @@ -10,6 +11,7 @@ import ( "github.com/kopia/kopia/internal/units" "github.com/kopia/kopia/repo" "github.com/kopia/kopia/repo/maintenance" + "github.com/kopia/kopia/repo/maintenancestats" ) type commandMaintenanceInfo struct { @@ -82,18 +84,19 @@ func (c *commandMaintenanceInfo) run(ctx context.Context, rep repo.DirectReposit c.out.printStdout(" %v:\n", run) for _, t := range timings { - var errInfo string + var message string + if t.Success { - errInfo = "SUCCESS" + message = getMessageFromRun(t.Extra) } else { - errInfo = "ERROR: " + t.Error + message = "ERROR: " + t.Error } c.out.printStdout( " %v (%v) %v\n", formatTimestamp(t.Start), t.End.Sub(t.Start).Truncate(time.Second), - errInfo) + message) } } @@ -113,3 +116,25 @@ func (c *commandMaintenanceInfo) displayCycleInfo(cp *maintenance.CycleParams, t } } } + +func getMessageFromRun(extra []maintenancestats.Extra) string { + succeed := "SUCCESS" + + if len(extra) == 0 { + return succeed + } + + var extraStr strings.Builder + + for _, e := range extra { + if msg, err := maintenancestats.BuildFromExtra(e); err == nil { + extraStr.WriteString(msg.Summary()) + } + } + + if extraStr.Len() > 0 { + succeed += ": " + extraStr.String() + } + + return succeed +} diff --git a/cli/command_manifest_delete.go b/cli/command_manifest_delete.go index 9508ff08aaf..a2c9be1816a 100644 --- a/cli/command_manifest_delete.go +++ b/cli/command_manifest_delete.go @@ -15,7 +15,7 @@ type commandManifestDelete struct { } func (c *commandManifestDelete) setup(svc appServices, parent commandParent) { - cmd := parent.Command("delete", "Remove manifest items").Alias("remove").Alias("rm") + cmd := parent.Command("delete", "Remove manifest items").Alias("remove").Alias("rm").Hidden() cmd.Arg("item", "Items to remove").Required().StringsVar(&c.manifestRemoveItems) cmd.Action(svc.repositoryWriterAction(c.run)) @@ -23,7 +23,7 @@ func (c *commandManifestDelete) setup(svc appServices, parent commandParent) { } func (c *commandManifestDelete) run(ctx context.Context, rep repo.RepositoryWriter) error { - c.svc.advancedCommand() + c.svc.dangerousCommand() for _, it := range toManifestIDs(c.manifestRemoveItems) { if err := rep.DeleteManifest(ctx, it); err != nil { diff --git a/cli/command_mount.go b/cli/command_mount.go index a95972417a2..bdabae22d0a 100644 --- a/cli/command_mount.go +++ b/cli/command_mount.go @@ -82,7 +82,6 @@ func (c *commandMount) run(ctx context.Context, rep repo.Repository) error { FuseAllowNonEmptyMount: c.mountFuseAllowNonEmptyMount, PreferWebDAV: c.mountPreferWebDAV, }) - if mountErr != nil { return errors.Wrap(mountErr, "mount error") } diff --git a/cli/command_notification_template_test.go b/cli/command_notification_template_test.go index 9e628455ff6..061213203a5 100644 --- a/cli/command_notification_template_test.go +++ b/cli/command_notification_template_test.go @@ -3,6 +3,7 @@ package cli_test import ( "context" "os" + "slices" "strings" "testing" @@ -17,6 +18,7 @@ func TestNotificationTemplates(t *testing.T) { t.Parallel() runner := testenv.NewInProcRunner(t) + e := testenv.NewCLITest(t, testenv.RepoFormatNotImportant, runner) defer e.RunAndExpectSuccess(t, "repo", "disconnect") @@ -123,10 +125,8 @@ func verifyTemplateContents(t *testing.T, e *testenv.CLITest, templateName strin func verifyHasLine(t *testing.T, lines []string, ok func(s string) bool) { t.Helper() - for _, l := range lines { - if ok(l) { - return - } + if slices.ContainsFunc(lines, ok) { + return } t.Errorf("output line meeting given condition was not found: %v", lines) diff --git a/cli/command_policy_edit.go b/cli/command_policy_edit.go index 61b04f1bd76..6d2bf344df8 100644 --- a/cli/command_policy_edit.go +++ b/cli/command_policy_edit.go @@ -119,6 +119,7 @@ func (c *commandPolicyEdit) run(ctx context.Context, rep repo.RepositoryWriter) func prettyJSON(v *policy.Policy) string { var b bytes.Buffer + e := json.NewEncoder(&b) e.SetIndent("", " ") e.Encode(v) //nolint:errcheck,errchkjson diff --git a/cli/command_policy_export.go b/cli/command_policy_export.go index cf243ddda57..6d0426174b2 100644 --- a/cli/command_policy_export.go +++ b/cli/command_policy_export.go @@ -3,6 +3,7 @@ package cli import ( "context" "encoding/json" + stderrors "errors" "fmt" "io" "os" @@ -46,9 +47,10 @@ func (c *commandPolicyExport) run(ctx context.Context, rep repo.Repository) erro return err } - file, ok := output.(*os.File) - if ok { - defer file.Close() //nolint:errcheck + if file, ok := output.(*os.File); ok { + defer func() { + err = stderrors.Join(err, file.Close()) + }() } policies := make(map[string]*policy.Policy) diff --git a/cli/command_policy_set_scheduling.go b/cli/command_policy_set_scheduling.go index e63e26d47a2..03af70ba97a 100644 --- a/cli/command_policy_set_scheduling.go +++ b/cli/command_policy_set_scheduling.go @@ -50,7 +50,7 @@ func (c *policySchedulingFlags) setScheduleFromFlags(ctx context.Context, sp *po var timesOfDay []policy.TimeOfDay for _, tods := range c.policySetTimesOfDay { - for _, tod := range strings.Split(tods, ",") { + for tod := range strings.SplitSeq(tods, ",") { if tod == inheritPolicyString { timesOfDay = nil break @@ -127,8 +127,8 @@ func splitCronExpressions(expr string) []string { var result []string - parts := strings.Split(expr, ";") - for _, part := range parts { + parts := strings.SplitSeq(expr, ";") + for part := range parts { part = strings.TrimSpace(part) if part == "" { continue diff --git a/cli/command_repository_repair.go b/cli/command_repository_repair.go index 0c027b990b3..9d5153c832a 100644 --- a/cli/command_repository_repair.go +++ b/cli/command_repository_repair.go @@ -19,7 +19,7 @@ type commandRepositoryRepair struct { } func (c *commandRepositoryRepair) setup(svc advancedAppServices, parent commandParent) { - cmd := parent.Command("repair", "Repairs repository.") + cmd := parent.Command("repair", "DEPRECATED: Recover format blob from older-format packs.").Hidden() cmd.Flag("recover-format", "Recover format blob from a copy").Default("auto").EnumVar(&c.repairCommandRecoverFormatBlob, "auto", "yes", "no") cmd.Flag("recover-format-block-prefixes", "Prefixes of file names").StringsVar(&c.repairCommandRecoverFormatBlobPrefixes) @@ -27,9 +27,11 @@ func (c *commandRepositoryRepair) setup(svc advancedAppServices, parent commandP for _, prov := range svc.storageProviders() { f := prov.NewFlags() - cc := cmd.Command(prov.Name, "Repair repository in "+prov.Description) + cc := cmd.Command(prov.Name, "Repair repository in "+prov.Description).Hidden() f.Setup(svc, cc) cc.Action(func(kpc *kingpin.ParseContext) error { + svc.dangerousCommand() + return svc.runAppWithContext(kpc.SelectedCommand, func(ctx context.Context) error { st, err := f.Connect(ctx, false, 0) if err != nil { diff --git a/cli/command_repository_sync.go b/cli/command_repository_sync.go index 4aeeb9b2758..f0c2690fc45 100644 --- a/cli/command_repository_sync.go +++ b/cli/command_repository_sync.go @@ -34,7 +34,8 @@ type commandRepositorySyncTo struct { lastSyncProgress string syncProgressMutex sync.Mutex - out textOutput + out textOutput + progress *cliProgress } func (c *commandRepositorySyncTo) setup(svc advancedAppServices, parent commandParent) { @@ -47,6 +48,7 @@ func (c *commandRepositorySyncTo) setup(svc advancedAppServices, parent commandP cmd.Flag("times", "Synchronize blob times if supported.").BoolVar(&c.repositorySyncTimes) c.out.setup(svc) + c.progress = svc.getProgress() for _, prov := range svc.storageProviders() { // Set up 'sync-to' subcommand @@ -200,6 +202,10 @@ func (c *commandRepositorySyncTo) beginSyncProgress() { } func (c *commandRepositorySyncTo) outputSyncProgress(s string) { + if !c.progress.Enabled() { + return + } + c.syncProgressMutex.Lock() defer c.syncProgressMutex.Unlock() @@ -215,6 +221,10 @@ func (c *commandRepositorySyncTo) outputSyncProgress(s string) { } func (c *commandRepositorySyncTo) finishSyncProcess() { + if !c.progress.Enabled() { + return + } + c.out.printStderr("\r%v\n", c.lastSyncProgress) } diff --git a/cli/command_repository_upgrade.go b/cli/command_repository_upgrade.go index e5fd9f62a7c..8a6f3e5adfe 100644 --- a/cli/command_repository_upgrade.go +++ b/cli/command_repository_upgrade.go @@ -125,12 +125,12 @@ func (c *commandRepositoryUpgrade) validateAction(ctx context.Context, rep repo. sm := rep.ContentManager().SharedManager - indexBlobInfos0, _, err := sm.IndexReaderV0().ListIndexBlobInfos(ctx) + indexBlobInfos0, err := sm.IndexReaderV0().ListIndexBlobInfos(ctx) if err != nil { return errors.Wrapf(err, "failed to list index blobs for old index") } - indexBlobInfos1, _, err := sm.IndexReaderV1().ListIndexBlobInfos(ctx) + indexBlobInfos1, err := sm.IndexReaderV1().ListIndexBlobInfos(ctx) if err != nil { log(ctx).Errorf("failed to list index blobs for new index. upgrade may have failed.: %v", err) return nil diff --git a/cli/command_restore.go b/cli/command_restore.go index d11285ab6d8..dabaef30856 100644 --- a/cli/command_restore.go +++ b/cli/command_restore.go @@ -125,6 +125,7 @@ type commandRestore struct { restoreIncremental bool restoreDeleteExtra bool restoreIgnoreErrors bool + flushFiles bool restoreShallowAtDepth int32 minSizeForPlaceholder int32 snapshotTime string @@ -158,6 +159,7 @@ func (c *commandRestore) setup(svc appServices, parent commandParent) { cmd.Flag("shallow", "Shallow restore the directory hierarchy starting at this level (default is to deep restore the entire hierarchy.)").Int32Var(&c.restoreShallowAtDepth) cmd.Flag("shallow-minsize", "When doing a shallow restore, write actual files instead of placeholders smaller than this size.").Int32Var(&c.minSizeForPlaceholder) cmd.Flag("snapshot-time", "When using a path as the source, use the latest snapshot available before this date. Default is latest").Default("latest").StringVar(&c.snapshotTime) + cmd.Flag("flush-files", "Specifies whether or not to flush files after restore completes").Default("false").BoolVar(&c.flushFiles) cmd.Action(svc.repositoryReaderAction(c.run)) } @@ -269,6 +271,7 @@ func (c *commandRestore) restoreOutput(ctx context.Context, rep repo.Repository) SkipPermissions: c.restoreSkipPermissions, SkipTimes: c.restoreSkipTimes, WriteSparseFiles: c.restoreWriteSparseFiles, + FlushFiles: c.flushFiles, } if err := o.Init(ctx); err != nil { diff --git a/cli/command_server_control_test.go b/cli/command_server_control_test.go index 08927a74ce8..953a3de8a45 100644 --- a/cli/command_server_control_test.go +++ b/cli/command_server_control_test.go @@ -2,6 +2,7 @@ package cli_test import ( "runtime" + "slices" "testing" "time" @@ -221,11 +222,5 @@ func TestServerControlUDS(t *testing.T) { } func hasLine(lines []string, lookFor string) bool { - for _, l := range lines { - if l == lookFor { - return true - } - } - - return false + return slices.Contains(lines, lookFor) } diff --git a/cli/command_server_start.go b/cli/command_server_start.go index dad8348304b..5f00dac10b3 100644 --- a/cli/command_server_start.go +++ b/cli/command_server_start.go @@ -226,6 +226,11 @@ func (c *commandServerStart) run(ctx context.Context) (reterr error) { // wait for all connections to finish within a shutdown grace period log(ctx2).Debugf("attempting graceful shutdown for %v", c.shutdownGracePeriod) + // Gracefully shutdown GRPC server first to close GRPC connections + log(ctx2).Debug("shutting down GRPC server") + srv.ShutdownGRPCServer() + log(ctx2).Debug("GRPC server shutdown completed") + if serr := httpServer.Shutdown(ctx2); serr != nil { // graceful shutdown unsuccessful, force close log(ctx2).Debugf("unable to shut down gracefully - closing: %v", serr) diff --git a/cli/command_server_tls.go b/cli/command_server_tls.go index ab7fb078f5c..99cc06ba5c3 100644 --- a/cli/command_server_tls.go +++ b/cli/command_server_tls.go @@ -47,10 +47,10 @@ func (c *commandServerStart) startServerWithOptionalTLS(ctx context.Context, htt switch len(listeners) { case 0: - if strings.HasPrefix(httpServer.Addr, "unix:") { - l, err = net.Listen("unix", strings.TrimPrefix(httpServer.Addr, "unix:")) + if after, ok := strings.CutPrefix(httpServer.Addr, "unix:"); ok { + l, err = (&net.ListenConfig{}).Listen(ctx, "unix", after) } else { - l, err = net.Listen("tcp", httpServer.Addr) + l, err = (&net.ListenConfig{}).Listen(ctx, "tcp", httpServer.Addr) } if err != nil { diff --git a/cli/command_snapshot_create.go b/cli/command_snapshot_create.go index d0c90f72cb7..6a9a994fe7b 100644 --- a/cli/command_snapshot_create.go +++ b/cli/command_snapshot_create.go @@ -499,7 +499,6 @@ func (c *commandSnapshotCreate) getContentToSnapshot(ctx context.Context, dir st func parseFullSource(str, hostname, username string) (snapshot.SourceInfo, error) { sourceInfo, err := snapshot.ParseSourceInfo(str, hostname, username) - if err != nil { return snapshot.SourceInfo{}, errors.Wrapf(err, "not a valid source %v", str) } else if sourceInfo.Host == "" || sourceInfo.UserName == "" || sourceInfo.Path == "" { diff --git a/cli/command_snapshot_fix_remove_files.go b/cli/command_snapshot_fix_remove_files.go index 8d5f8323cdd..152c32b7bec 100644 --- a/cli/command_snapshot_fix_remove_files.go +++ b/cli/command_snapshot_fix_remove_files.go @@ -3,6 +3,7 @@ package cli import ( "context" "path" + "slices" "github.com/pkg/errors" @@ -28,12 +29,10 @@ func (c *commandSnapshotFixRemoveFiles) setup(svc appServices, parent commandPar } func (c *commandSnapshotFixRemoveFiles) rewriteEntry(ctx context.Context, pathFromRoot string, ent *snapshot.DirEntry) (*snapshot.DirEntry, error) { - for _, id := range c.removeObjectIDs { - if ent.ObjectID.String() == id { - log(ctx).Infof("will remove file %v", pathFromRoot) + if slices.Contains(c.removeObjectIDs, ent.ObjectID.String()) { + log(ctx).Infof("will remove file %v", pathFromRoot) - return nil, nil - } + return nil, nil } for _, n := range c.removeFilesByName { diff --git a/cli/command_snapshot_list_test.go b/cli/command_snapshot_list_test.go index 8d906064fa5..851834be70d 100644 --- a/cli/command_snapshot_list_test.go +++ b/cli/command_snapshot_list_test.go @@ -19,6 +19,7 @@ func TestSnapshotList(t *testing.T) { e := testenv.NewCLITest(t, testenv.RepoFormatNotImportant, runner) defer e.RunAndExpectSuccess(t, "repo", "disconnect") + e.RunAndExpectSuccess(t, "repo", "create", "filesystem", "--path", e.RepoDir) srcdir := testutil.TempDirectory(t) @@ -86,6 +87,7 @@ func TestSnapshotListWithSameFileInMultipleSnapshots(t *testing.T) { e := testenv.NewCLITest(t, testenv.RepoFormatNotImportant, runner) defer e.RunAndExpectSuccess(t, "repo", "disconnect") + e.RunAndExpectSuccess(t, "repo", "create", "filesystem", "--path", e.RepoDir) srcdir := testutil.TempDirectory(t) diff --git a/cli/command_snapshot_migrate.go b/cli/command_snapshot_migrate.go index 329d969c596..28c51c8f7f3 100644 --- a/cli/command_snapshot_migrate.go +++ b/cli/command_snapshot_migrate.go @@ -100,6 +100,7 @@ func (c *commandSnapshotMigrate) run(ctx context.Context, destRepo repo.Reposito for _, s := range sources { // start a new uploader unless already canceled mu.Lock() + if canceled { mu.Unlock() break @@ -108,9 +109,11 @@ func (c *commandSnapshotMigrate) run(ctx context.Context, destRepo repo.Reposito uploader := upload.NewUploader(destRepo) uploader.Progress = c.svc.getProgress() activeUploaders[s] = uploader + mu.Unlock() wg.Add(1) + semaphore <- struct{}{} go func(s snapshot.SourceInfo) { diff --git a/cli/command_snapshot_pin_test.go b/cli/command_snapshot_pin_test.go index 38bf778ff9c..d80b5782a4a 100644 --- a/cli/command_snapshot_pin_test.go +++ b/cli/command_snapshot_pin_test.go @@ -20,6 +20,7 @@ func TestSnapshotPin(t *testing.T) { e := testenv.NewCLITest(t, testenv.RepoFormatNotImportant, runner) defer e.RunAndExpectSuccess(t, "repo", "disconnect") + e.RunAndExpectSuccess(t, "repo", "create", "filesystem", "--path", e.RepoDir) srcdir := testutil.TempDirectory(t) diff --git a/cli/command_user_add_set.go b/cli/command_user_add_set.go index 4131e755523..175de9a26bb 100644 --- a/cli/command_user_add_set.go +++ b/cli/command_user_add_set.go @@ -108,6 +108,8 @@ To refresh credentials in a running server use 'kopia server refresh' command. return nil } +var errPasswordsDoNotMatch = errors.New("passwords do not match") + func askConfirmPass(out io.Writer, initialPrompt string) (string, error) { pwd, err := askPass(out, initialPrompt) if err != nil { @@ -120,7 +122,7 @@ func askConfirmPass(out io.Writer, initialPrompt string) (string, error) { } if pwd != pwd2 { - return "", errors.Wrap(err, "passwords don't match") + return "", errPasswordsDoNotMatch } return pwd, nil diff --git a/cli/config.go b/cli/config.go index cab86e57f53..f3ca28b5aab 100644 --- a/cli/config.go +++ b/cli/config.go @@ -2,15 +2,12 @@ package cli import ( "context" - "fmt" - "io" "os" "os/signal" "path/filepath" "runtime" "syscall" - "github.com/alecthomas/kingpin/v2" "github.com/pkg/errors" "github.com/kopia/kopia/fs" @@ -19,13 +16,6 @@ import ( "github.com/kopia/kopia/repo" ) -func deprecatedFlag(w io.Writer, help string) func(_ *kingpin.ParseContext) error { - return func(_ *kingpin.ParseContext) error { - fmt.Fprintf(w, "DEPRECATED: %v\n", help) //nolint:errcheck - return nil - } -} - func (c *App) onRepositoryFatalError(f func(err error)) { c.onFatalErrorCallbacks = append(c.onFatalErrorCallbacks, f) } @@ -44,6 +34,7 @@ func (c *App) onTerminate(f func()) { case <-s: } + f() }() } @@ -74,10 +65,11 @@ func (c *App) openRepository(ctx context.Context, required bool) (repo.Repositor func (c *App) optionsFromFlags(ctx context.Context) *repo.Options { return &repo.Options{ - TraceStorage: c.traceStorage, - DisableInternalLog: c.disableInternalLog, - UpgradeOwnerID: c.upgradeOwnerID, - DoNotWaitForUpgrade: c.doNotWaitForUpgrade, + TraceStorage: c.traceStorage, + DisableRepositoryLog: c.disableRepositoryLog, + UpgradeOwnerID: c.upgradeOwnerID, + DoNotWaitForUpgrade: c.doNotWaitForUpgrade, + ContentLogWriter: c.contentLogWriter, // when a fatal error is encountered in the repository, run all registered callbacks // and exit the program. diff --git a/cli/observability_flags.go b/cli/observability_flags.go index 48b31b62e32..442d59acdde 100644 --- a/cli/observability_flags.go +++ b/cli/observability_flags.go @@ -22,8 +22,10 @@ import ( "go.opentelemetry.io/otel/sdk/resource" "go.opentelemetry.io/otel/sdk/trace" semconv "go.opentelemetry.io/otel/semconv/v1.4.0" + oteltrace "go.opentelemetry.io/otel/trace" "github.com/kopia/kopia/internal/clock" + "github.com/kopia/kopia/internal/gather" "github.com/kopia/kopia/repo" ) @@ -41,7 +43,10 @@ var metricsPushFormats = map[string]expfmt.Format{ } type observabilityFlags struct { - enablePProf bool + outputDirectory string + + dumpAllocatorStats bool + enablePProfEndpoint bool metricsListenAddr string metricsPushAddr string metricsJob string @@ -50,11 +55,11 @@ type observabilityFlags struct { metricsPushUsername string metricsPushPassword string metricsPushFormat string - metricsOutputDir string - outputFilePrefix string + otlpTrace bool + saveMetrics bool + pf profileFlags - enableJaeger bool - otlpTrace bool + outputSubdirectoryName string stopPusher chan struct{} pusherWG sync.WaitGroup @@ -63,8 +68,9 @@ type observabilityFlags struct { } func (c *observabilityFlags) setup(svc appServices, app *kingpin.Application) { + app.Flag("dump-allocator-stats", "Dump allocator stats at the end of execution.").Hidden().Envar(svc.EnvName("KOPIA_DUMP_ALLOCATOR_STATS")).BoolVar(&c.dumpAllocatorStats) app.Flag("metrics-listen-addr", "Expose Prometheus metrics on a given host:port").Hidden().StringVar(&c.metricsListenAddr) - app.Flag("enable-pprof", "Expose pprof handlers").Hidden().BoolVar(&c.enablePProf) + app.Flag("enable-pprof", "Expose pprof handlers").Hidden().BoolVar(&c.enablePProfEndpoint) // push gateway parameters app.Flag("metrics-push-addr", "Address of push gateway").Envar(svc.EnvName("KOPIA_METRICS_PUSH_ADDR")).Hidden().StringVar(&c.metricsPushAddr) @@ -75,7 +81,6 @@ func (c *observabilityFlags) setup(svc appServices, app *kingpin.Application) { app.Flag("metrics-push-password", "Password for push gateway").Envar(svc.EnvName("KOPIA_METRICS_PUSH_PASSWORD")).Hidden().StringVar(&c.metricsPushPassword) // tracing (OTLP) parameters - app.Flag("enable-jaeger-collector", "(DEPRECATED) Emit OpenTelemetry traces to Jaeger collector").Hidden().Envar(svc.EnvName("KOPIA_ENABLE_JAEGER_COLLECTOR")).BoolVar(&c.enableJaeger) app.Flag("otlp-trace", "Send OpenTelemetry traces to OTLP collector using gRPC").Hidden().Envar(svc.EnvName("KOPIA_ENABLE_OTLP_TRACE")).BoolVar(&c.otlpTrace) var formats []string @@ -88,16 +93,17 @@ func (c *observabilityFlags) setup(svc appServices, app *kingpin.Application) { app.Flag("metrics-push-format", "Format to use for push gateway").Envar(svc.EnvName("KOPIA_METRICS_FORMAT")).Hidden().EnumVar(&c.metricsPushFormat, formats...) - app.Flag("metrics-directory", "Directory where the metrics should be saved when kopia exits. A file per process execution will be created in this directory").Hidden().StringVar(&c.metricsOutputDir) + //nolint:lll + app.Flag("diagnostics-output-directory", "Directory where the diagnostics output should be stored saved when kopia exits. Diagnostics data includes among others: metrics, traces, profiles. The output files are stored in a sub-directory for each kopia (process) execution").Hidden().Default(filepath.Join(os.TempDir(), "kopia-diagnostics")).StringVar(&c.outputDirectory) + + app.Flag("metrics-store-on-exit", "Writes metrics to a file in a sub-directory of the directory specified with the --diagnostics-output-directory").Hidden().BoolVar(&c.saveMetrics) + + c.pf.setup(app) app.PreAction(c.initialize) } func (c *observabilityFlags) initialize(ctx *kingpin.ParseContext) error { - if c.metricsOutputDir == "" { - return nil - } - // write to a separate file per command and process execution to avoid // conflicts with previously created files command := "unknown" @@ -105,30 +111,67 @@ func (c *observabilityFlags) initialize(ctx *kingpin.ParseContext) error { command = strings.ReplaceAll(cmd.FullCommand(), " ", "-") } - c.outputFilePrefix = clock.Now().Format("20060102-150405-") + command + c.outputSubdirectoryName = clock.Now().Format("20060102-150405-") + command + + if (c.saveMetrics || c.pf.saveProfiles || c.pf.profileCPU) && c.outputDirectory == "" { + return errors.New("writing diagnostics output requires a non-empty directory name (specified with the '--diagnostics-output-directory' flag)") + } return nil } -func (c *observabilityFlags) startMetrics(ctx context.Context) error { +// spanName specifies the name of the span at the start of a trace. A tracer is +// started only when spanName is not empty. +func (c *observabilityFlags) run(ctx context.Context, spanName string, f func(context.Context) error) error { + if err := c.start(ctx); err != nil { + return errors.Wrap(err, "unable to start observability facilities") + } + + defer c.stop(ctx) + + if err := c.pf.start(ctx, filepath.Join(c.outputDirectory, c.outputSubdirectoryName)); err != nil { + return errors.Wrap(err, "failed to start profiling") + } + + defer c.pf.stop(ctx) + + if spanName != "" { + tctx, span := tracer.Start(ctx, spanName, oteltrace.WithSpanKind(oteltrace.SpanKindClient)) + ctx = tctx + + defer span.End() + } + + return f(ctx) +} + +func (c *observabilityFlags) start(ctx context.Context) error { c.maybeStartListener(ctx) if err := c.maybeStartMetricsPusher(ctx); err != nil { return err } - if c.metricsOutputDir != "" { - c.metricsOutputDir = filepath.Clean(c.metricsOutputDir) - + if c.saveMetrics { // ensure the metrics output dir can be created - if err := os.MkdirAll(c.metricsOutputDir, DirMode); err != nil { - return errors.Wrapf(err, "could not create metrics output directory: %s", c.metricsOutputDir) + if _, err := mkSubdirectories(c.outputDirectory, c.outputSubdirectoryName); err != nil { + return err } } return c.maybeStartTraceExporter(ctx) } +func mkSubdirectories(directoryNames ...string) (dirName string, err error) { + dirName = filepath.Join(directoryNames...) + + if err := os.MkdirAll(dirName, DirMode); err != nil { + return "", errors.Wrapf(err, "could not create '%q' subdirectory to save diagnostics output", dirName) + } + + return dirName, nil +} + // Starts observability listener when a listener address is specified. func (c *observabilityFlags) maybeStartListener(ctx context.Context) { if c.metricsListenAddr == "" { @@ -138,7 +181,7 @@ func (c *observabilityFlags) maybeStartListener(ctx context.Context) { m := mux.NewRouter() initPrometheus(m) - if c.enablePProf { + if c.enablePProfEndpoint { m.HandleFunc("/debug/pprof/", pprof.Index) m.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline) m.HandleFunc("/debug/pprof/profile", pprof.Profile) @@ -195,10 +238,6 @@ func (c *observabilityFlags) maybeStartMetricsPusher(ctx context.Context) error } func (c *observabilityFlags) maybeStartTraceExporter(ctx context.Context) error { - if c.enableJaeger { - return errors.New("Flag '--enable-jaeger-collector' is no longer supported, use '--otlp' instead. See https://github.com/kopia/kopia/pull/3264 for more information") - } - if !c.otlpTrace { return nil } @@ -228,7 +267,11 @@ func (c *observabilityFlags) maybeStartTraceExporter(ctx context.Context) error return nil } -func (c *observabilityFlags) stopMetrics(ctx context.Context) { +func (c *observabilityFlags) stop(ctx context.Context) { + if c.dumpAllocatorStats { + gather.DumpStats(ctx) + } + if c.stopPusher != nil { close(c.stopPusher) @@ -241,11 +284,13 @@ func (c *observabilityFlags) stopMetrics(ctx context.Context) { } } - if c.metricsOutputDir != "" { - filename := filepath.Join(c.metricsOutputDir, c.outputFilePrefix+".prom") - - if err := prometheus.WriteToTextfile(filename, prometheus.DefaultGatherer); err != nil { - log(ctx).Warnf("unable to write metrics file '%s': %v", filename, err) + if c.saveMetrics { + if metricsDir, err := mkSubdirectories(c.outputDirectory, c.outputSubdirectoryName); err != nil { + log(ctx).Warnf("unable to create metrics output directory '%s': %v", metricsDir, err) + } else { + if err := prometheus.WriteToTextfile(filepath.Join(metricsDir, "kopia-metrics.prom"), prometheus.DefaultGatherer); err != nil { + log(ctx).Warnf("unable to write metrics to file: %v", err) + } } } } diff --git a/cli/observability_flags_test.go b/cli/observability_flags_test.go index 4e11977b6e5..e0ab61ce955 100644 --- a/cli/observability_flags_test.go +++ b/cli/observability_flags_test.go @@ -97,7 +97,7 @@ func TestMetricsSaveToOutputDirFlags(t *testing.T) { tmp2 := testutil.TempDirectory(t) - env.RunAndExpectSuccess(t, "repo", "status", "--metrics-directory", tmp2) + env.RunAndExpectSuccess(t, "repo", "status", "--diagnostics-output-directory", tmp2, "--metrics-store-on-exit") entries, err := os.ReadDir(tmp2) require.NoError(t, err) diff --git a/cli/password_other.go b/cli/password_other.go index 6e0d0b0e2ec..2d9a464c053 100644 --- a/cli/password_other.go +++ b/cli/password_other.go @@ -1,5 +1,4 @@ //go:build !windows && !linux && !darwin -// +build !windows,!linux,!darwin package cli diff --git a/cli/profile.go b/cli/profile.go index 289719c338f..525098955c3 100644 --- a/cli/profile.go +++ b/cli/profile.go @@ -1,46 +1,156 @@ package cli import ( + "context" + "os" + "path/filepath" + "runtime" + "runtime/pprof" + "github.com/alecthomas/kingpin/v2" - "github.com/pkg/profile" + "github.com/pkg/errors" ) +const profDirName = "profiles" + type profileFlags struct { - profileDir string - profileCPU bool - profileMemory int - profileBlocking bool - profileMutex bool + profileGCBeforeSaving bool + profileCPU bool + profileBlockingRate int + profileMemoryRate int + profileMutexFraction int + saveProfiles bool + + outputDirectory string + cpuProfileCloser func() } func (c *profileFlags) setup(app *kingpin.Application) { - app.Flag("profile-dir", "Write profile to the specified directory").Hidden().StringVar(&c.profileDir) + c.profileBlockingRate = -1 + c.profileMemoryRate = -1 + c.profileMutexFraction = -1 + + app.Flag("profile-store-on-exit", "Writes profiling data on exit. It writes a file per profile type (heap, goroutine, threadcreate, block, mutex) in a sub-directory in the directory specified with the --diagnostics-output-directory").Hidden().BoolVar(&c.saveProfiles) //nolint:lll + app.Flag("profile-go-gc-before-dump", "Perform a Go GC before writing out memory profiles").Hidden().BoolVar(&c.profileGCBeforeSaving) + app.Flag("profile-blocking-rate", "Blocking profiling rate, a value of 0 turns off block profiling").Hidden().IntVar(&c.profileBlockingRate) app.Flag("profile-cpu", "Enable CPU profiling").Hidden().BoolVar(&c.profileCPU) - app.Flag("profile-memory", "Enable memory profiling").Hidden().IntVar(&c.profileMemory) - app.Flag("profile-blocking", "Enable block profiling").Hidden().BoolVar(&c.profileBlocking) - app.Flag("profile-mutex", "Enable mutex profiling").Hidden().BoolVar(&c.profileMutex) + app.Flag("profile-memory-rate", "Memory profiling rate").Hidden().IntVar(&c.profileMemoryRate) + app.Flag("profile-mutex-fraction", "Mutex profiling, a value of 0 turns off mutex profiling").Hidden().IntVar(&c.profileMutexFraction) } -// withProfiling runs the given callback with profiling enabled, configured according to command line flags. -func (c *profileFlags) withProfiling(callback func() error) error { - if c.profileDir != "" { - pp := profile.ProfilePath(c.profileDir) - if c.profileMemory > 0 { - defer profile.Start(pp, profile.MemProfileRate(c.profileMemory)).Stop() - } +func (c *profileFlags) start(ctx context.Context, outputDirectory string) error { + pBlockingRate := c.profileBlockingRate + pMemoryRate := c.profileMemoryRate + pMutexFraction := c.profileMutexFraction - if c.profileCPU { - defer profile.Start(pp, profile.CPUProfile).Stop() + if c.saveProfiles { + // when saving profiles ensure profiling parameters have sensible values + // unless explicitly modified. + // runtime.MemProfileRate has a default value, no need to reset it. + if pBlockingRate == -1 { + pBlockingRate = 1 } - if c.profileBlocking { - defer profile.Start(pp, profile.BlockProfile).Stop() + if pMutexFraction == -1 { + pMutexFraction = 1 } + } + + // set profiling parameters if they have been changed from defaults + if pBlockingRate != -1 { + runtime.SetBlockProfileRate(pBlockingRate) + } + + if pMemoryRate != -1 { + runtime.MemProfileRate = pMemoryRate + } + + if pMutexFraction != -1 { + runtime.SetMutexProfileFraction(pMutexFraction) + } + + if !c.profileCPU && !c.saveProfiles { + return nil + } + + c.outputDirectory = outputDirectory + + // ensure upfront that the pprof output dir can be created. + profDir, err := mkSubdirectories(c.outputDirectory, profDirName) + if err != nil { + return err + } + + if !c.profileCPU { + return nil + } + + // start CPU profile dumper + f, err := os.Create(filepath.Join(profDir, "cpu.pprof")) //nolint:gosec + if err != nil { + return errors.Wrap(err, "could not create CPU profile output file") + } - if c.profileMutex { - defer profile.Start(pp, profile.MutexProfile).Stop() + // CPU profile closer + closer := func() { + pprof.StopCPUProfile() + + if err := f.Close(); err != nil { + log(ctx).Warn("error closing CPU profile output file:", err) } } - return callback() + if err := pprof.StartCPUProfile(f); err != nil { + closer() + + return errors.Wrap(err, "could not start CPU profile") + } + + c.cpuProfileCloser = closer + + return nil +} + +func (c *profileFlags) stop(ctx context.Context) { + if c.cpuProfileCloser != nil { + c.cpuProfileCloser() + c.cpuProfileCloser = nil + } + + if !c.saveProfiles { + return + } + + if c.profileGCBeforeSaving { + // update profiles, otherwise they may not include activity after the last GC + runtime.GC() + } + + profDir, err := mkSubdirectories(c.outputDirectory, profDirName) + if err != nil { + log(ctx).Warn("cannot create directory to save profiles:", err) + } + + for _, p := range pprof.Profiles() { + func() { + fname := filepath.Join(profDir, p.Name()+".pprof") + + f, err := os.Create(fname) //nolint:gosec + if err != nil { + log(ctx).Warnf("unable to create profile output file '%s': %v", fname, err) + + return + } + + defer func() { + if err := f.Close(); err != nil { + log(ctx).Warnf("unable to close profile output file '%s': %v", fname, err) + } + }() + + if err := p.WriteTo(f, 0); err != nil { + log(ctx).Warnf("unable to write profile to file '%s': %v", fname, err) + } + }() + } } diff --git a/cli/sighup_unix.go b/cli/sighup_unix.go index 6284ad808ad..de662f04903 100644 --- a/cli/sighup_unix.go +++ b/cli/sighup_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package cli diff --git a/cli/throttle_set.go b/cli/throttle_set.go index 3f0d0b1646c..4bb2e7118d5 100644 --- a/cli/throttle_set.go +++ b/cli/throttle_set.go @@ -109,7 +109,7 @@ func (c *commonThrottleSet) setThrottleInt(ctx context.Context, desc string, val return nil } - v, err := strconv.ParseInt(str, 10, 64) + v, err := strconv.Atoi(str) if err != nil { return errors.Wrapf(err, "can't parse the %v %q", desc, str) } @@ -118,7 +118,7 @@ func (c *commonThrottleSet) setThrottleInt(ctx context.Context, desc string, val log(ctx).Infof("Setting %v to %v.", desc, v) - *val = int(v) + *val = v return nil } diff --git a/fs/cachefs/cache_test.go b/fs/cachefs/cache_test.go index cad706fe6f0..abe3f17fd7b 100644 --- a/fs/cachefs/cache_test.go +++ b/fs/cachefs/cache_test.go @@ -3,6 +3,7 @@ package cachefs import ( "context" "fmt" + "maps" "path/filepath" "reflect" "runtime" @@ -134,10 +135,7 @@ func errorPrefix() string { } func (cv *cacheVerifier) reset() { - cv.lastCallCounter = make(map[string]int) - for k, v := range cv.cacheSource.callCounter { - cv.lastCallCounter[k] = v - } + cv.lastCallCounter = maps.Clone(cv.cacheSource.callCounter) } type lockState struct { diff --git a/fs/localfs/local_fs.go b/fs/localfs/local_fs.go index ed89abdbe19..d0ce32376ee 100644 --- a/fs/localfs/local_fs.go +++ b/fs/localfs/local_fs.go @@ -13,6 +13,16 @@ import ( const numEntriesToRead = 100 // number of directory entries to read in one shot +// Options contains configuration options for localfs operations. +type Options struct { + // IgnoreUnreadableDirEntries, when true, causes unreadable directory entries + // to be silently skipped during directory iteration instead of causing errors. + IgnoreUnreadableDirEntries bool +} + +// DefaultOptions stores the default options used by localfs functions. +var DefaultOptions = &Options{} + type filesystemEntry struct { name string size int64 @@ -21,7 +31,8 @@ type filesystemEntry struct { owner fs.OwnerInfo device fs.DeviceInfo - prefix string + prefix string + options *Options } func (e *filesystemEntry) Name() string { @@ -92,6 +103,7 @@ func (fsd *filesystemDirectory) Size() int64 { type fileWithMetadata struct { *os.File + options *Options } func (f *fileWithMetadata) Entry() (fs.Entry, error) { @@ -102,7 +114,7 @@ func (f *fileWithMetadata) Entry() (fs.Entry, error) { basename, prefix := splitDirPrefix(f.Name()) - return newFilesystemFile(newEntry(basename, fi, prefix)), nil + return newFilesystemFile(newEntry(basename, fi, prefix, f.options)), nil } func (fsf *filesystemFile) Open(_ context.Context) (fs.Reader, error) { @@ -111,7 +123,7 @@ func (fsf *filesystemFile) Open(_ context.Context) (fs.Reader, error) { return nil, errors.Wrap(err, "unable to open local file") } - return &fileWithMetadata{f}, nil + return &fileWithMetadata{File: f, options: fsf.options}, nil } func (fsl *filesystemSymlink) Readlink(_ context.Context) (string, error) { @@ -125,7 +137,7 @@ func (fsl *filesystemSymlink) Resolve(_ context.Context) (fs.Entry, error) { return nil, errors.Wrapf(err, "cannot resolve symlink for '%q'", fsl.fullPath()) } - return NewEntry(target) + return NewEntryWithOptions(target, fsl.options) } func (e *filesystemErrorEntry) ErrorInfo() error { @@ -145,8 +157,15 @@ func splitDirPrefix(s string) (basename, prefix string) { } // Directory returns fs.Directory for the specified path. +// It uses DefaultOptions for configuration. func Directory(path string) (fs.Directory, error) { - e, err := NewEntry(path) + return DirectoryWithOptions(path, DefaultOptions) +} + +// DirectoryWithOptions returns fs.Directory for the specified path. +// It uses the provided Options for configuration. +func DirectoryWithOptions(path string, options *Options) (fs.Directory, error) { + e, err := NewEntryWithOptions(path, options) if err != nil { return nil, err } diff --git a/fs/localfs/local_fs_32bit.go b/fs/localfs/local_fs_32bit.go index 361b705c1b0..a7e73b0f746 100644 --- a/fs/localfs/local_fs_32bit.go +++ b/fs/localfs/local_fs_32bit.go @@ -1,6 +1,4 @@ //go:build !windows && ((!amd64 && !arm64 && !arm && !ppc64 && !ppc64le && !s390x && !386 && !riscv64) || darwin || openbsd) -// +build !windows -// +build !amd64,!arm64,!arm,!ppc64,!ppc64le,!s390x,!386,!riscv64 darwin openbsd package localfs diff --git a/fs/localfs/local_fs_64bit.go b/fs/localfs/local_fs_64bit.go index bb0ce6ee984..5c86dac9fc6 100644 --- a/fs/localfs/local_fs_64bit.go +++ b/fs/localfs/local_fs_64bit.go @@ -1,8 +1,4 @@ //go:build !windows && !openbsd && !darwin && (amd64 || arm64 || arm || ppc64 || ppc64le || s390x || 386 || riscv64) -// +build !windows -// +build !openbsd -// +build !darwin -// +build amd64 arm64 arm ppc64 ppc64le s390x 386 riscv64 package localfs diff --git a/fs/localfs/local_fs_nonwindows.go b/fs/localfs/local_fs_nonwindows.go index c5df2de7eb7..4007f81e750 100644 --- a/fs/localfs/local_fs_nonwindows.go +++ b/fs/localfs/local_fs_nonwindows.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package localfs @@ -10,6 +9,8 @@ import ( "github.com/kopia/kopia/fs" ) +const isWindows = false + func platformSpecificOwnerInfo(fi os.FileInfo) fs.OwnerInfo { var oi fs.OwnerInfo if stat, ok := fi.Sys().(*syscall.Stat_t); ok { @@ -30,3 +31,9 @@ func platformSpecificDeviceInfo(fi os.FileInfo) fs.DeviceInfo { return oi } + +// Direct Windows volume paths (e.g. Shadow Copy) require a trailing separator. +// The non-windows implementation can be optimized away by the compiler. +func trailingSeparator(_ *filesystemDirectory) string { + return "" +} diff --git a/fs/localfs/local_fs_os.go b/fs/localfs/local_fs_os.go index 9d57bd257f8..cfc410d1d7a 100644 --- a/fs/localfs/local_fs_os.go +++ b/fs/localfs/local_fs_os.go @@ -5,7 +5,6 @@ import ( "io" "os" "path/filepath" - "runtime" "strings" "syscall" @@ -14,9 +13,12 @@ import ( "github.com/kopia/kopia/fs" ) +const separatorStr = string(filepath.Separator) + type filesystemDirectoryIterator struct { dirHandle *os.File childPrefix string + options *Options currentIndex int currentBatch []os.DirEntry @@ -44,7 +46,7 @@ func (it *filesystemDirectoryIterator) Next(_ context.Context) (fs.Entry, error) n := it.currentIndex it.currentIndex++ - e, err := toDirEntryOrNil(it.currentBatch[n], it.childPrefix) + e, err := toDirEntryOrNil(it.currentBatch[n], it.childPrefix, it.options) if err != nil { // stop iteration return nil, err @@ -66,14 +68,14 @@ func (it *filesystemDirectoryIterator) Close() { func (fsd *filesystemDirectory) Iterate(_ context.Context) (fs.DirectoryIterator, error) { fullPath := fsd.fullPath() - f, direrr := os.Open(fullPath) //nolint:gosec - if direrr != nil { - return nil, errors.Wrap(direrr, "unable to read directory") + d, err := os.Open(fullPath + trailingSeparator(fsd)) //nolint:gosec + if err != nil { + return nil, errors.Wrap(err, "unable to read directory") } - childPrefix := fullPath + string(filepath.Separator) + childPrefix := fullPath + separatorStr - return &filesystemDirectoryIterator{dirHandle: f, childPrefix: childPrefix}, nil + return &filesystemDirectoryIterator{dirHandle: d, childPrefix: childPrefix, options: fsd.options}, nil } func (fsd *filesystemDirectory) Child(_ context.Context, name string) (fs.Entry, error) { @@ -88,10 +90,10 @@ func (fsd *filesystemDirectory) Child(_ context.Context, name string) (fs.Entry, return nil, errors.Wrap(err, "unable to get child") } - return entryFromDirEntry(name, st, fullPath+string(filepath.Separator)), nil + return entryFromDirEntry(name, st, fullPath+separatorStr, fsd.options), nil } -func toDirEntryOrNil(dirEntry os.DirEntry, prefix string) (fs.Entry, error) { +func toDirEntryOrNil(dirEntry os.DirEntry, prefix string, options *Options) (fs.Entry, error) { n := dirEntry.Name() fi, err := os.Lstat(prefix + n) @@ -100,19 +102,27 @@ func toDirEntryOrNil(dirEntry os.DirEntry, prefix string) (fs.Entry, error) { return nil, nil } + if options != nil && options.IgnoreUnreadableDirEntries { + return nil, nil + } + return nil, errors.Wrap(err, "error reading directory") } - return entryFromDirEntry(n, fi, prefix), nil -} - -func isWindows() bool { - return runtime.GOOS == "windows" + return entryFromDirEntry(n, fi, prefix, options), nil } // NewEntry returns fs.Entry for the specified path, the result will be one of supported entry types: fs.File, fs.Directory, fs.Symlink // or fs.UnsupportedEntry. +// It uses DefaultOptions for configuration. func NewEntry(path string) (fs.Entry, error) { + return NewEntryWithOptions(path, DefaultOptions) +} + +// NewEntryWithOptions returns fs.Entry for the specified path, the result will be one of supported entry types: fs.File, fs.Directory, fs.Symlink +// or fs.UnsupportedEntry. +// It uses the provided Options for configuration. +func NewEntryWithOptions(path string, options *Options) (fs.Entry, error) { path = filepath.Clean(path) fi, err := os.Lstat(path) @@ -121,10 +131,10 @@ func NewEntry(path string) (fs.Entry, error) { // cause os.Lstat to fail with "Incorrect function" error unless they // end with a separator. Retry the operation with the separator added. var e syscall.Errno - if isWindows() && - !strings.HasSuffix(path, string(filepath.Separator)) && + if isWindows && + !strings.HasSuffix(path, separatorStr) && errors.As(err, &e) && e == 1 { - fi, err = os.Lstat(path + string(filepath.Separator)) + fi, err = os.Lstat(path + separatorStr) } if err != nil { @@ -133,42 +143,42 @@ func NewEntry(path string) (fs.Entry, error) { } if path == "/" { - return entryFromDirEntry("/", fi, ""), nil + return entryFromDirEntry("/", fi, "", options), nil } basename, prefix := splitDirPrefix(path) - return entryFromDirEntry(basename, fi, prefix), nil + return entryFromDirEntry(basename, fi, prefix, options), nil } -func entryFromDirEntry(basename string, fi os.FileInfo, prefix string) fs.Entry { +func entryFromDirEntry(basename string, fi os.FileInfo, prefix string, options *Options) fs.Entry { isplaceholder := strings.HasSuffix(basename, ShallowEntrySuffix) maskedmode := fi.Mode() & os.ModeType switch { case maskedmode == os.ModeDir && !isplaceholder: - return newFilesystemDirectory(newEntry(basename, fi, prefix)) + return newFilesystemDirectory(newEntry(basename, fi, prefix, options)) case maskedmode == os.ModeDir && isplaceholder: - return newShallowFilesystemDirectory(newEntry(basename, fi, prefix)) + return newShallowFilesystemDirectory(newEntry(basename, fi, prefix, options)) case maskedmode == os.ModeSymlink && !isplaceholder: - return newFilesystemSymlink(newEntry(basename, fi, prefix)) + return newFilesystemSymlink(newEntry(basename, fi, prefix, options)) case maskedmode == 0 && !isplaceholder: - return newFilesystemFile(newEntry(basename, fi, prefix)) + return newFilesystemFile(newEntry(basename, fi, prefix, options)) case maskedmode == 0 && isplaceholder: - return newShallowFilesystemFile(newEntry(basename, fi, prefix)) + return newShallowFilesystemFile(newEntry(basename, fi, prefix, options)) default: - return newFilesystemErrorEntry(newEntry(basename, fi, prefix), fs.ErrUnknown) + return newFilesystemErrorEntry(newEntry(basename, fi, prefix, options), fs.ErrUnknown) } } var _ os.FileInfo = (*filesystemEntry)(nil) -func newEntry(basename string, fi os.FileInfo, prefix string) filesystemEntry { +func newEntry(basename string, fi os.FileInfo, prefix string, options *Options) filesystemEntry { return filesystemEntry{ TrimShallowSuffix(basename), fi.Size(), @@ -177,5 +187,6 @@ func newEntry(basename string, fi os.FileInfo, prefix string) filesystemEntry { platformSpecificOwnerInfo(fi), platformSpecificDeviceInfo(fi), prefix, + options, } } diff --git a/fs/localfs/local_fs_test.go b/fs/localfs/local_fs_test.go index c09a4643561..f44c340d57c 100644 --- a/fs/localfs/local_fs_test.go +++ b/fs/localfs/local_fs_test.go @@ -249,7 +249,7 @@ func verifyChild(t *testing.T, dir fs.Directory) { } func TestLocalFilesystemPath(t *testing.T) { - if isWindows() { + if isWindows { t.Skip() } @@ -286,7 +286,7 @@ func TestSplitDirPrefix(t *testing.T) { "/tmp/foo": {"/tmp/", "foo"}, } - if isWindows() { + if isWindows { cases[`c:/`] = pair{`c:/`, ``} cases[`c:\`] = pair{`c:\`, ``} cases[`c:/temp`] = pair{`c:/`, `temp`} @@ -306,3 +306,277 @@ func TestSplitDirPrefix(t *testing.T) { require.Equal(t, want.prefix, prefix, input) } } + +// getOptionsFromEntry extracts the options pointer from an fs.Entry by type assertion. +// Returns nil if the entry doesn't have options or if type assertion fails. +func getOptionsFromEntry(entry fs.Entry) *Options { + switch e := entry.(type) { + case *filesystemDirectory: + return e.options + case *filesystemFile: + return e.options + case *filesystemSymlink: + return e.options + case *filesystemErrorEntry: + return e.options + default: + return nil + } +} + +func TestOptionsPassedToChildEntries(t *testing.T) { + ctx := testlogging.Context(t) + tmp := testutil.TempDirectory(t) + + // Create a test directory structure + require.NoError(t, os.WriteFile(filepath.Join(tmp, "file1.txt"), []byte{1, 2, 3}, 0o777)) + require.NoError(t, os.WriteFile(filepath.Join(tmp, "file2.txt"), []byte{4, 5, 6}, 0o777)) + subdir := filepath.Join(tmp, "subdir") + require.NoError(t, os.Mkdir(subdir, 0o777)) + require.NoError(t, os.WriteFile(filepath.Join(subdir, "subfile.txt"), []byte{7, 8, 9}, 0o777)) + + // Create custom options + customOptions := &Options{ + IgnoreUnreadableDirEntries: true, + } + + // Create directory with custom options + dir, err := DirectoryWithOptions(tmp, customOptions) + require.NoError(t, err) + + // Verify the directory itself has the correct options + dirOptions := getOptionsFromEntry(dir) + require.NotNil(t, dirOptions, "directory should have options") + require.Equal(t, customOptions, dirOptions, "directory should have the same options pointer") + require.True(t, dirOptions.IgnoreUnreadableDirEntries, "directory options should match") + + // Test that options are passed to children via Child() + childFile, err := dir.Child(ctx, "file1.txt") + require.NoError(t, err) + + childOptions := getOptionsFromEntry(childFile) + require.NotNil(t, childOptions, "child file should have options") + require.Equal(t, customOptions, childOptions, "child file should have the same options pointer") + + // Test that options are passed to subdirectories + childDir, err := dir.Child(ctx, "subdir") + require.NoError(t, err) + + subdirOptions := getOptionsFromEntry(childDir) + require.NotNil(t, subdirOptions, "subdirectory should have options") + require.Equal(t, customOptions, subdirOptions, "subdirectory should have the same options pointer") + + // Test that options are passed to nested children + subdirEntry, ok := childDir.(fs.Directory) + require.True(t, ok, "child directory should be a directory") + + nestedFile, err := subdirEntry.Child(ctx, "subfile.txt") + require.NoError(t, err) + + nestedOptions := getOptionsFromEntry(nestedFile) + require.NotNil(t, nestedOptions, "nested file should have options") + require.Equal(t, customOptions, nestedOptions, "nested file should have the same options pointer") +} + +func TestOptionsPassedThroughIteration(t *testing.T) { + ctx := testlogging.Context(t) + tmp := testutil.TempDirectory(t) + + // Create a test directory structure + require.NoError(t, os.WriteFile(filepath.Join(tmp, "file1.txt"), []byte{1, 2, 3}, 0o777)) + require.NoError(t, os.WriteFile(filepath.Join(tmp, "file2.txt"), []byte{4, 5, 6}, 0o777)) + require.NoError(t, os.Mkdir(filepath.Join(tmp, "subdir"), 0o777)) + + // Create custom options + customOptions := &Options{ + IgnoreUnreadableDirEntries: true, + } + + // Create directory with custom options + dir, err := DirectoryWithOptions(tmp, customOptions) + require.NoError(t, err) + + // Iterate through entries and verify all have the same options pointer + iter, err := dir.Iterate(ctx) + require.NoError(t, err) + + defer iter.Close() + + entryCount := 0 + for { + entry, err := iter.Next(ctx) + if err != nil { + t.Fatalf("iteration error: %v", err) + } + + if entry == nil { + break + } + + entryCount++ + entryOptions := getOptionsFromEntry(entry) + require.NotNil(t, entryOptions, "entry %s should have options", entry.Name()) + require.Equal(t, customOptions, entryOptions, "entry %s should have the same options pointer", entry.Name()) + } + + require.Equal(t, 3, entryCount, "should have found 3 entries") +} + +func TestOptionsPassedThroughSymlinkResolution(t *testing.T) { + ctx := testlogging.Context(t) + tmp := testutil.TempDirectory(t) + + // Create a target file + targetFile := filepath.Join(tmp, "target.txt") + require.NoError(t, os.WriteFile(targetFile, []byte{1, 2, 3}, 0o777)) + + // Create a symlink + symlinkPath := filepath.Join(tmp, "link") + require.NoError(t, os.Symlink(targetFile, symlinkPath)) + + // Create custom options + customOptions := &Options{ + IgnoreUnreadableDirEntries: true, + } + + // Create symlink entry with custom options + symlinkEntry, err := NewEntryWithOptions(symlinkPath, customOptions) + require.NoError(t, err) + + // Verify the symlink has the correct options + symlinkOptions := getOptionsFromEntry(symlinkEntry) + require.NotNil(t, symlinkOptions, "symlink should have options") + require.Equal(t, customOptions, symlinkOptions, "symlink should have the same options pointer") + + // Resolve the symlink and verify the resolved entry has the same options + symlink, ok := symlinkEntry.(fs.Symlink) + require.True(t, ok, "entry should be a symlink") + + resolved, err := symlink.Resolve(ctx) + require.NoError(t, err) + + resolvedOptions := getOptionsFromEntry(resolved) + require.NotNil(t, resolvedOptions, "resolved entry should have options") + require.Equal(t, customOptions, resolvedOptions, "resolved entry should have the same options pointer") +} + +func TestOptionsPassedToNewEntry(t *testing.T) { + tmp := testutil.TempDirectory(t) + + // Create a file + filePath := filepath.Join(tmp, "testfile.txt") + require.NoError(t, os.WriteFile(filePath, []byte{1, 2, 3}, 0o777)) + + // Create custom options + customOptions := &Options{ + IgnoreUnreadableDirEntries: true, + } + + // Create entry with custom options + entry, err := NewEntryWithOptions(filePath, customOptions) + require.NoError(t, err) + + // Verify the entry has the correct options + entryOptions := getOptionsFromEntry(entry) + require.NotNil(t, entryOptions, "entry should have options") + require.Equal(t, customOptions, entryOptions, "entry should have the same options pointer") +} + +func TestOptionsPassedToNestedDirectories(t *testing.T) { + ctx := testlogging.Context(t) + tmp := testutil.TempDirectory(t) + + // Create nested directory structure + level1 := filepath.Join(tmp, "level1") + level2 := filepath.Join(level1, "level2") + level3 := filepath.Join(level2, "level3") + + require.NoError(t, os.MkdirAll(level3, 0o777)) + require.NoError(t, os.WriteFile(filepath.Join(level3, "file.txt"), []byte{1, 2, 3}, 0o777)) + + // Create custom options + customOptions := &Options{ + IgnoreUnreadableDirEntries: true, + } + + // Create root directory with custom options + rootDir, err := DirectoryWithOptions(tmp, customOptions) + require.NoError(t, err) + + // Navigate through nested directories and verify options are passed + currentDir := rootDir + levels := []string{"level1", "level2", "level3"} + + for _, level := range levels { + child, err := currentDir.Child(ctx, level) + require.NoError(t, err) + + childOptions := getOptionsFromEntry(child) + require.NotNil(t, childOptions, "directory %s should have options", level) + require.Equal(t, customOptions, childOptions, "directory %s should have the same options pointer", level) + + var ok bool + + currentDir, ok = child.(fs.Directory) + require.True(t, ok, "child should be a directory") + } + + // Verify the file in the deepest directory has the same options + file, err := currentDir.Child(ctx, "file.txt") + require.NoError(t, err) + + fileOptions := getOptionsFromEntry(file) + require.NotNil(t, fileOptions, "file should have options") + require.Equal(t, customOptions, fileOptions, "file should have the same options pointer") +} + +func TestDefaultOptionsUsedByDefault(t *testing.T) { + tmp := testutil.TempDirectory(t) + + // Create a file + filePath := filepath.Join(tmp, "testfile.txt") + require.NoError(t, os.WriteFile(filePath, []byte{1, 2, 3}, 0o777)) + + // Use default NewEntry (should use DefaultOptions) + entry, err := NewEntry(filePath) + require.NoError(t, err) + + // Verify the entry has DefaultOptions + entryOptions := getOptionsFromEntry(entry) + require.NotNil(t, entryOptions, "entry should have options") + require.Equal(t, DefaultOptions, entryOptions, "entry should have DefaultOptions pointer") +} + +func TestDifferentOptionsInstances(t *testing.T) { + tmp := testutil.TempDirectory(t) + + // Create two different files + filePath1 := filepath.Join(tmp, "testfile1.txt") + filePath2 := filepath.Join(tmp, "testfile2.txt") + + require.NoError(t, os.WriteFile(filePath1, []byte{1, 2, 3}, 0o777)) + require.NoError(t, os.WriteFile(filePath2, []byte{4, 5, 6}, 0o777)) + + // Create two different options instances with same values + options1 := &Options{IgnoreUnreadableDirEntries: true} + options2 := &Options{IgnoreUnreadableDirEntries: false} + + // Create entries with different options instances + entry1, err := NewEntryWithOptions(filePath1, options1) + require.NoError(t, err) + + entry2, err := NewEntryWithOptions(filePath2, options2) + require.NoError(t, err) + + // Verify they have the correct options pointers + entry1Options := getOptionsFromEntry(entry1) + entry2Options := getOptionsFromEntry(entry2) + + require.NotNil(t, entry1Options) + require.NotNil(t, entry2Options) + require.Equal(t, options1, entry1Options, "entry1 should have options1 pointer") + require.Equal(t, options2, entry2Options, "entry2 should have options2 pointer") + require.NotEqual(t, entry1Options, entry2Options, "entries should have different options pointers") + require.True(t, entry1Options.IgnoreUnreadableDirEntries, "entry1 options should have IgnoreUnreadableDirEntries=true") + require.False(t, entry2Options.IgnoreUnreadableDirEntries, "entry2 options should have IgnoreUnreadableDirEntries=false") +} diff --git a/fs/localfs/local_fs_windows.go b/fs/localfs/local_fs_windows.go index 086482e953f..3553075dae7 100644 --- a/fs/localfs/local_fs_windows.go +++ b/fs/localfs/local_fs_windows.go @@ -2,16 +2,32 @@ package localfs import ( "os" + "runtime" + "strings" "github.com/kopia/kopia/fs" ) -//nolint:revive -func platformSpecificOwnerInfo(fi os.FileInfo) fs.OwnerInfo { +var isWindows = runtime.GOOS == "windows" + +func platformSpecificOwnerInfo(_ os.FileInfo) fs.OwnerInfo { return fs.OwnerInfo{} } -//nolint:revive -func platformSpecificDeviceInfo(fi os.FileInfo) fs.DeviceInfo { +func platformSpecificDeviceInfo(_ os.FileInfo) fs.DeviceInfo { return fs.DeviceInfo{} } + +// Direct Windows volume paths (e.g. Shadow Copy) require a trailing separator. +func trailingSeparator(fsd *filesystemDirectory) string { + // is fsd a Windows VSS Volume and has no trailing separator? + if isWindows && + fsd.prefix == `\\?\GLOBALROOT\Device\` && + strings.HasPrefix(fsd.Name(), "HarddiskVolumeShadowCopy") && + !strings.HasSuffix(fsd.Name(), separatorStr) { + + return separatorStr + } + + return "" +} diff --git a/fs/localfs/localfs_benchmark_test.go b/fs/localfs/localfs_benchmark_test.go index cdfaf241e5b..68255b5295d 100644 --- a/fs/localfs/localfs_benchmark_test.go +++ b/fs/localfs/localfs_benchmark_test.go @@ -43,19 +43,15 @@ func BenchmarkReadDir10000(b *testing.B) { func benchmarkReadDirWithCount(b *testing.B, fileCount int) { b.Helper() - b.StopTimer() - td := b.TempDir() for range fileCount { os.WriteFile(filepath.Join(td, uuid.NewString()), []byte{1, 2, 3, 4}, 0o644) } - b.StartTimer() - ctx := context.Background() - for range b.N { + for b.Loop() { dir, _ := localfs.Directory(td) fs.IterateEntries(ctx, dir, func(context.Context, fs.Entry) error { return nil diff --git a/go.mod b/go.mod index a17a25295d1..dc53cb8ecd0 100644 --- a/go.mod +++ b/go.mod @@ -1,40 +1,40 @@ module github.com/kopia/kopia -go 1.24 +go 1.25 -toolchain go1.24.4 +toolchain go1.25.4 require ( - cloud.google.com/go/storage v1.55.0 - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.1 - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.1 - github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.1 + cloud.google.com/go/storage v1.57.2 + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0 + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1 + github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.3 github.com/alecthomas/kingpin/v2 v2.4.0 - github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 + github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b github.com/chmduquesne/rollinghash v4.0.0+incompatible - github.com/chromedp/cdproto v0.0.0-20250403032234-65de8f5d025b - github.com/chromedp/chromedp v0.13.7 - github.com/coreos/go-systemd/v22 v22.5.0 + github.com/chromedp/cdproto v0.0.0-20250724212937-08a3db8b4327 + github.com/chromedp/chromedp v0.14.2 + github.com/coreos/go-systemd/v22 v22.6.0 github.com/dustinkirkland/golang-petname v0.0.0-20191129215211-8e5a1ed0cff0 github.com/edsrzf/mmap-go v1.2.0 github.com/fatih/color v1.18.0 github.com/foomo/htpasswd v0.0.0-20200116085101-e3a90e78da9c - github.com/gofrs/flock v0.12.1 + github.com/gofrs/flock v0.13.0 github.com/golang-jwt/jwt/v4 v4.5.2 github.com/google/fswalker v0.3.3 github.com/google/go-cmp v0.7.0 github.com/google/uuid v1.6.0 github.com/gorilla/mux v1.8.1 - github.com/hanwen/go-fuse/v2 v2.8.0 - github.com/hashicorp/cronexpr v1.1.2 - github.com/klauspost/compress v1.18.0 + github.com/hanwen/go-fuse/v2 v2.9.0 + github.com/hashicorp/cronexpr v1.1.3 + github.com/klauspost/compress v1.18.2 github.com/klauspost/pgzip v1.2.6 - github.com/klauspost/reedsolomon v1.12.4 - github.com/kopia/htmluibuild v0.0.1-0.20250607181534-77e0f3f9f557 + github.com/klauspost/reedsolomon v1.12.6 + github.com/kopia/htmluibuild v0.0.1-0.20251125011029-7f1c3f84f29d github.com/kylelemons/godebug v1.1.0 github.com/mattn/go-colorable v0.1.14 github.com/mattn/go-isatty v0.0.20 - github.com/minio/minio-go/v7 v7.0.94 + github.com/minio/minio-go/v7 v7.0.97 github.com/mocktools/go-smtp-mock/v2 v2.5.1 github.com/mxk/go-vss v1.2.0 github.com/natefinch/atomic v1.0.1 @@ -42,112 +42,112 @@ require ( github.com/pierrec/lz4 v2.6.1+incompatible github.com/pkg/errors v0.9.1 github.com/pkg/profile v1.7.0 - github.com/pkg/sftp v1.13.9 - github.com/prometheus/client_golang v1.22.0 + github.com/pkg/sftp v1.13.10 + github.com/prometheus/client_golang v1.23.2 github.com/prometheus/client_model v0.6.2 - github.com/prometheus/common v0.65.0 + github.com/prometheus/common v0.67.4 github.com/sanity-io/litter v1.5.8 github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 - github.com/stretchr/testify v1.10.0 - github.com/studio-b12/gowebdav v0.10.0 + github.com/stretchr/testify v1.11.1 + github.com/studio-b12/gowebdav v0.11.0 github.com/tg123/go-htpasswd v1.2.4 github.com/zalando/go-keyring v0.2.6 github.com/zeebo/blake3 v0.2.4 - go.opentelemetry.io/otel v1.37.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.37.0 - go.opentelemetry.io/otel/sdk v1.37.0 - go.opentelemetry.io/otel/trace v1.37.0 - go.uber.org/zap v1.27.0 - golang.org/x/crypto v0.40.0 + go.opentelemetry.io/otel v1.38.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 + go.opentelemetry.io/otel/sdk v1.38.0 + go.opentelemetry.io/otel/trace v1.38.0 + go.uber.org/zap v1.27.1 + golang.org/x/crypto v0.45.0 golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 - golang.org/x/mod v0.26.0 - golang.org/x/net v0.42.0 - golang.org/x/oauth2 v0.30.0 - golang.org/x/sync v0.16.0 - golang.org/x/sys v0.34.0 - golang.org/x/term v0.33.0 - golang.org/x/text v0.27.0 - google.golang.org/api v0.241.0 - google.golang.org/grpc v1.73.0 - google.golang.org/protobuf v1.36.6 + golang.org/x/mod v0.30.0 + golang.org/x/net v0.47.0 + golang.org/x/oauth2 v0.33.0 + golang.org/x/sync v0.18.0 + golang.org/x/sys v0.38.0 + golang.org/x/term v0.37.0 + golang.org/x/text v0.31.0 + google.golang.org/api v0.256.0 + google.golang.org/grpc v1.77.0 + google.golang.org/protobuf v1.36.10 gopkg.in/kothar/go-backblaze.v0 v0.0.0-20210124194846-35409b867216 ) require ( al.essio.dev/pkg/shellescape v1.5.1 // indirect - cel.dev/expr v0.23.0 // indirect - cloud.google.com/go v0.121.1 // indirect - cloud.google.com/go/auth v0.16.2 // indirect + cel.dev/expr v0.24.0 // indirect + cloud.google.com/go v0.121.6 // indirect + cloud.google.com/go/auth v0.17.0 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect - cloud.google.com/go/compute/metadata v0.7.0 // indirect + cloud.google.com/go/compute/metadata v0.9.0 // indirect cloud.google.com/go/iam v1.5.2 // indirect cloud.google.com/go/monitoring v1.24.2 // indirect - github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 // indirect - github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0 // indirect github.com/GehirnInc/crypt v0.0.0-20230320061759-8cc1b52080c5 // indirect - github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0 // indirect - github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.51.0 // indirect - github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.51.0 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.53.0 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.53.0 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/cenkalti/backoff/v5 v5.0.2 // indirect + github.com/cenkalti/backoff/v5 v5.0.3 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/chromedp/sysutil v1.1.0 // indirect - github.com/cncf/xds/go v0.0.0-20250326154945-ae57f3c0d45f // indirect + github.com/cncf/xds/go v0.0.0-20251022180443-0feb69152e9f // indirect github.com/danieljoos/wincred v1.2.2 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dustin/go-humanize v1.0.1 // indirect - github.com/envoyproxy/go-control-plane/envoy v1.32.4 // indirect + github.com/envoyproxy/go-control-plane/envoy v1.35.0 // indirect github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect github.com/felixge/fgprof v0.9.3 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/frankban/quicktest v1.13.1 // indirect github.com/go-ini/ini v1.67.0 // indirect - github.com/go-jose/go-jose/v4 v4.0.5 // indirect - github.com/go-json-experiment/json v0.0.0-20250211171154-1ae217ad3535 // indirect + github.com/go-jose/go-jose/v4 v4.1.3 // indirect + github.com/go-json-experiment/json v0.0.0-20250725192818-e39067aee2d2 // indirect github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.3.0 // indirect github.com/gobwas/httphead v0.1.0 // indirect github.com/gobwas/pool v0.2.1 // indirect github.com/gobwas/ws v1.4.0 // indirect - github.com/goccy/go-json v0.10.5 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect - github.com/golang-jwt/jwt/v5 v5.2.2 // indirect - github.com/golang/glog v1.2.4 // indirect + github.com/golang-jwt/jwt/v5 v5.3.0 // indirect + github.com/golang/glog v1.2.5 // indirect github.com/google/pprof v0.0.0-20230602150820-91b7bce49751 // indirect github.com/google/readahead v0.0.0-20161222183148-eaceba169032 // indirect github.com/google/s2a-go v0.1.9 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect - github.com/googleapis/gax-go/v2 v2.14.2 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1 // indirect - github.com/klauspost/cpuid/v2 v2.2.10 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.7 // indirect + github.com/googleapis/gax-go/v2 v2.15.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 // indirect + github.com/klauspost/cpuid/v2 v2.3.0 // indirect + github.com/klauspost/crc32 v1.3.0 // indirect github.com/kr/fs v0.1.0 // indirect - github.com/minio/crc64nvme v1.0.1 // indirect + github.com/minio/crc64nvme v1.1.0 // indirect github.com/minio/md5-simd v1.1.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c // indirect + github.com/philhofer/fwd v1.2.0 // indirect github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/pquerna/ffjson v0.0.0-20190930134022-aa0246cd15f7 // indirect - github.com/prometheus/procfs v0.15.1 // indirect + github.com/prometheus/procfs v0.16.1 // indirect github.com/rs/xid v1.6.0 // indirect - github.com/spiffe/go-spiffe/v2 v2.5.0 // indirect + github.com/spiffe/go-spiffe/v2 v2.6.0 // indirect github.com/tinylib/msgp v1.3.0 // indirect github.com/xhit/go-str2duration/v2 v2.1.0 // indirect - github.com/zeebo/errs v1.4.0 // indirect - go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/contrib/detectors/gcp v1.36.0 // indirect + go.opentelemetry.io/auto/sdk v1.2.1 // indirect + go.opentelemetry.io/contrib/detectors/gcp v1.38.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.37.0 // indirect - go.opentelemetry.io/otel/metric v1.37.0 // indirect - go.opentelemetry.io/otel/sdk/metric v1.36.0 // indirect - go.opentelemetry.io/proto/otlp v1.7.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 // indirect + go.opentelemetry.io/otel/metric v1.38.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.38.0 // indirect + go.opentelemetry.io/proto/otlp v1.7.1 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/time v0.12.0 // indirect - google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 // indirect + go.yaml.in/yaml/v2 v2.4.3 // indirect + golang.org/x/time v0.14.0 // indirect + google.golang.org/genproto v0.0.0-20250603155806-513f23925822 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index 765266ddc21..59bcce9c2c7 100644 --- a/go.sum +++ b/go.sum @@ -1,98 +1,97 @@ al.essio.dev/pkg/shellescape v1.5.1 h1:86HrALUujYS/h+GtqoB26SBEdkWfmMI6FubjXlsXyho= al.essio.dev/pkg/shellescape v1.5.1/go.mod h1:6sIqp7X2P6mThCQ7twERpZTuigpr6KbZWtls1U8I890= -cel.dev/expr v0.23.0 h1:wUb94w6OYQS4uXraxo9U+wUAs9jT47Xvl4iPgAwM2ss= -cel.dev/expr v0.23.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw= -cloud.google.com/go v0.121.1 h1:S3kTQSydxmu1JfLRLpKtxRPA7rSrYPRPEUmL/PavVUw= -cloud.google.com/go v0.121.1/go.mod h1:nRFlrHq39MNVWu+zESP2PosMWA0ryJw8KUBZ2iZpxbw= -cloud.google.com/go/auth v0.16.2 h1:QvBAGFPLrDeoiNjyfVunhQ10HKNYuOwZ5noee0M5df4= -cloud.google.com/go/auth v0.16.2/go.mod h1:sRBas2Y1fB1vZTdurouM0AzuYQBMZinrUYL8EufhtEA= +cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY= +cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw= +cloud.google.com/go v0.121.6 h1:waZiuajrI28iAf40cWgycWNgaXPO06dupuS+sgibK6c= +cloud.google.com/go v0.121.6/go.mod h1:coChdst4Ea5vUpiALcYKXEpR1S9ZgXbhEzzMcMR66vI= +cloud.google.com/go/auth v0.17.0 h1:74yCm7hCj2rUyyAocqnFzsAYXgJhrG26XCFimrc/Kz4= +cloud.google.com/go/auth v0.17.0/go.mod h1:6wv/t5/6rOPAX4fJiRjKkJCvswLwdet7G8+UGXt7nCQ= cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c= -cloud.google.com/go/compute/metadata v0.7.0 h1:PBWF+iiAerVNe8UCHxdOt6eHLVc3ydFeOCw78U8ytSU= -cloud.google.com/go/compute/metadata v0.7.0/go.mod h1:j5MvL9PprKL39t166CoB1uVHfQMs4tFQZZcKwksXUjo= +cloud.google.com/go/compute/metadata v0.9.0 h1:pDUj4QMoPejqq20dK0Pg2N4yG9zIkYGdBtwLoEkH9Zs= +cloud.google.com/go/compute/metadata v0.9.0/go.mod h1:E0bWwX5wTnLPedCKqk3pJmVgCBSM6qQI1yTBdEb3C10= cloud.google.com/go/iam v1.5.2 h1:qgFRAGEmd8z6dJ/qyEchAuL9jpswyODjA2lS+w234g8= cloud.google.com/go/iam v1.5.2/go.mod h1:SE1vg0N81zQqLzQEwxL2WI6yhetBdbNQuTvIKCSkUHE= cloud.google.com/go/logging v1.13.0 h1:7j0HgAp0B94o1YRDqiqm26w4q1rDMH7XNRU34lJXHYc= cloud.google.com/go/logging v1.13.0/go.mod h1:36CoKh6KA/M0PbhPKMq6/qety2DCAErbhXT62TuXALA= -cloud.google.com/go/longrunning v0.6.7 h1:IGtfDWHhQCgCjwQjV9iiLnUta9LBCo8R9QmAFsS/PrE= -cloud.google.com/go/longrunning v0.6.7/go.mod h1:EAFV3IZAKmM56TyiE6VAP3VoTzhZzySwI/YI1s/nRsY= +cloud.google.com/go/longrunning v0.7.0 h1:FV0+SYF1RIj59gyoWDRi45GiYUMM3K1qO51qoboQT1E= +cloud.google.com/go/longrunning v0.7.0/go.mod h1:ySn2yXmjbK9Ba0zsQqunhDkYi0+9rlXIwnoAf+h+TPY= cloud.google.com/go/monitoring v1.24.2 h1:5OTsoJ1dXYIiMiuL+sYscLc9BumrL3CarVLL7dd7lHM= cloud.google.com/go/monitoring v1.24.2/go.mod h1:x7yzPWcgDRnPEv3sI+jJGBkwl5qINf+6qY4eq0I9B4U= -cloud.google.com/go/storage v1.55.0 h1:NESjdAToN9u1tmhVqhXCaCwYBuvEhZLLv0gBr+2znf0= -cloud.google.com/go/storage v1.55.0/go.mod h1:ztSmTTwzsdXe5syLVS0YsbFxXuvEmEyZj7v7zChEmuY= +cloud.google.com/go/storage v1.57.2 h1:sVlym3cHGYhrp6XZKkKb+92I1V42ks2qKKpB0CF5Mb4= +cloud.google.com/go/storage v1.57.2/go.mod h1:n5ijg4yiRXXpCu0sJTD6k+eMf7GRrJmPyr9YxLXGHOk= cloud.google.com/go/trace v1.11.6 h1:2O2zjPzqPYAHrn3OKl029qlqG6W8ZdYaOWRyr8NgMT4= cloud.google.com/go/trace v1.11.6/go.mod h1:GA855OeDEBiBMzcckLPE2kDunIpC72N+Pq8WFieFjnI= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.1 h1:Wc1ml6QlJs2BHQ/9Bqu1jiyggbsSjramq2oUmp5WeIo= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.1/go.mod h1:Ot/6aikWnKWi4l9QB7qVSwa8iMphQNqkWALMoNT3rzM= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.1 h1:B+blDbyVIG3WaikNxPnhPiJ1MThR03b3vKGtER95TP4= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.1/go.mod h1:JdM5psgjfBf5fo2uWOZhflPWyDBZ/O/CNAH9CtsuZE4= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0 h1:JXg2dwJUmPB9JmtVmdEB16APJ7jurfbY5jnfXpJoRMc= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0/go.mod h1:YD5h/ldMsG0XiIw7PdyNhLxaM317eFh5yNLccNfGdyw= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1 h1:Hk5QBxZQC1jb2Fwj6mpzme37xbCDdNTxU7O9eb5+LB4= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1/go.mod h1:IYus9qsFobWIc2YVwe/WPjcnyCkPKtnHAqUYeebc8z0= github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2 h1:yz1bePFlP5Vws5+8ez6T3HWXPmwOK7Yvq8QxDBD3SKY= github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2/go.mod h1:Pa9ZNPuoNu/GztvBSKk9J1cDJW6vk/n0zLtV4mgd8N8= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 h1:FPKJS1T+clwv+OLGt13a8UjqeRuh0O4SJ3lUriThc+4= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1/go.mod h1:j2chePtV91HrC22tGoRX3sGY42uF13WzmmV80/OdVAA= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.8.0 h1:LR0kAX9ykz8G4YgLCaRDVJ3+n43R8MneB5dTy2konZo= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.8.0/go.mod h1:DWAciXemNf++PQJLeXUB4HHH5OpsAh12HZnu2wXE1jA= -github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.1 h1:lhZdRq7TIx0GJQvSyX2Si406vrYsov2FXGp/RnSEtcs= -github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.1/go.mod h1:8cl44BDmi+effbARHMQjgOKA2AYvcohNm7KEt42mSV8= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 h1:9iefClla7iYpfYWdzPCRDozdmndjTm8DXdpCzPajMgA= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2/go.mod h1:XtLgD3ZD34DAaVIIAyG3objl5DynM3CQ/vMcbBNJZGI= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.8.1 h1:/Zt+cDPnpC3OVDm/JKLOs7M2DKmLRIIp3XIx9pHHiig= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.8.1/go.mod h1:Ng3urmn6dYe8gnbCMoHHVl5APYz2txho3koEkV2o2HA= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.3 h1:ZJJNFaQ86GVKQ9ehwqyAFE6pIfyicpuJ8IkVaPBc6/4= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.3/go.mod h1:URuDvhmATVKqHBH9/0nOiNKk0+YcwfQ3WkK5PqHKxc8= github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM= github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE= -github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 h1:oygO0locgZJe7PpYPXT5A29ZkwJaPqcva7BVeemZOZs= -github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= +github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0 h1:XRzhVemXdgvJqCH0sFfrBUTnUJSBrBf7++ypk+twtRs= +github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0/go.mod h1:HKpQxkWaGLJ+D/5H8QRpyQXA1eKjxkFlOMwck5+33Jk= github.com/GehirnInc/crypt v0.0.0-20190301055215-6c0105aabd46/go.mod h1:kC29dT1vFpj7py2OvG1khBdQpo3kInWP+6QipLbdngo= github.com/GehirnInc/crypt v0.0.0-20230320061759-8cc1b52080c5 h1:IEjq88XO4PuBDcvmjQJcQGg+w+UaafSy8G5Kcb5tBhI= github.com/GehirnInc/crypt v0.0.0-20230320061759-8cc1b52080c5/go.mod h1:exZ0C/1emQJAw5tHOaUDyY1ycttqBAPcxuzf7QbY6ec= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0 h1:ErKg/3iS1AKcTkf3yixlZ54f9U1rljCkQyEXWUnIUxc= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0/go.mod h1:yAZHSGnqScoU556rBOVkwLze6WP5N+U11RHuWaGVxwY= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.51.0 h1:fYE9p3esPxA/C0rQ0AHhP0drtPXDRhaWiwg1DPqO7IU= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.51.0/go.mod h1:BnBReJLvVYx2CS/UHOgVz2BXKXD9wsQPxZug20nZhd0= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.51.0 h1:OqVGm6Ei3x5+yZmSJG1Mh2NwHvpVmZ08CB5qJhT9Nuk= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.51.0/go.mod h1:SZiPHWGOOk3bl8tkevxkoiwPgsIl6CwrWcbwjfHZpdM= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.51.0 h1:6/0iUd0xrnX7qt+mLNRwg5c0PGv8wpE8K90ryANQwMI= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.51.0/go.mod h1:otE2jQekW/PqXk1Awf5lmfokJx4uwuqcj1ab5SpGeW0= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0 h1:sBEjpZlNHzK1voKq9695PJSX2o5NEXl7/OL3coiIY0c= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0/go.mod h1:P4WPRUkOhJC13W//jWpyfJNDAIpvRbAUIYLX/4jtlE0= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.53.0 h1:owcC2UnmsZycprQ5RfRgjydWhuoxg71LUfyiQdijZuM= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.53.0/go.mod h1:ZPpqegjbE99EPKsu3iUWV22A04wzGPcAY/ziSIQEEgs= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.53.0 h1:4LP6hvB4I5ouTbGgWtixJhgED6xdf67twf9PoY96Tbg= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.53.0/go.mod h1:jUZ5LYlw40WMd07qxcQJD5M40aUxrfwqQX1g7zxYnrQ= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.53.0 h1:Ron4zCA/yk6U7WOBXhTJcDpsUBG9npumK6xw2auFltQ= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.53.0/go.mod h1:cSgYe11MCNYunTnRXrKiR/tHc0eoKjICUuWpNZoVCOo= github.com/alecthomas/kingpin/v2 v2.4.0 h1:f48lwail6p8zpO1bC4TxtqACaGqHYA22qkHjHpqDjYY= github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= -github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc= -github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= +github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b h1:mimo19zliBX/vSQ6PWWSL9lK8qwHozUj03+zLoEB8O0= +github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b/go.mod h1:fvzegU4vN3H1qMT+8wDmzjAcDONcgo2/SZ/TyfdUOFs= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/cenkalti/backoff/v5 v5.0.2 h1:rIfFVxEf1QsI7E1ZHfp/B4DF/6QBAUhmgkxc0H7Zss8= -github.com/cenkalti/backoff/v5 v5.0.2/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= +github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM= +github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chmduquesne/rollinghash v4.0.0+incompatible h1:hnREQO+DXjqIw3rUTzWN7/+Dpw+N5Um8zpKV0JOEgbo= github.com/chmduquesne/rollinghash v4.0.0+incompatible/go.mod h1:Uc2I36RRfTAf7Dge82bi3RU0OQUmXT9iweIcPqvr8A0= -github.com/chromedp/cdproto v0.0.0-20250403032234-65de8f5d025b h1:jJmiCljLNTaq/O1ju9Bzz2MPpFlmiTn0F7LwCoeDZVw= -github.com/chromedp/cdproto v0.0.0-20250403032234-65de8f5d025b/go.mod h1:NItd7aLkcfOA/dcMXvl8p1u+lQqioRMq/SqDp71Pb/k= -github.com/chromedp/chromedp v0.13.7 h1:vt+mslxscyvUr58eC+6DLSeeo74jpV/HI2nWetjv/W4= -github.com/chromedp/chromedp v0.13.7/go.mod h1:h8GPP6ZtLMLsU8zFbTcb7ZDGCvCy8j/vRoFmRltQx9A= +github.com/chromedp/cdproto v0.0.0-20250724212937-08a3db8b4327 h1:UQ4AU+BGti3Sy/aLU8KVseYKNALcX9UXY6DfpwQ6J8E= +github.com/chromedp/cdproto v0.0.0-20250724212937-08a3db8b4327/go.mod h1:NItd7aLkcfOA/dcMXvl8p1u+lQqioRMq/SqDp71Pb/k= +github.com/chromedp/chromedp v0.14.2 h1:r3b/WtwM50RsBZHMUm9fsNhhzRStTHrKdr2zmwbZSzM= +github.com/chromedp/chromedp v0.14.2/go.mod h1:rHzAv60xDE7VNy/MYtTUrYreSc0ujt2O1/C3bzctYBo= github.com/chromedp/sysutil v1.1.0 h1:PUFNv5EcprjqXZD9nJb9b/c9ibAbxiYo4exNWZyipwM= github.com/chromedp/sysutil v1.1.0/go.mod h1:WiThHUdltqCNKGc4gaU50XgYjwjYIhKWoHGPTUfWTJ8= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/cncf/xds/go v0.0.0-20250326154945-ae57f3c0d45f h1:C5bqEmzEPLsHm9Mv73lSE9e9bKV23aB1vxOsmZrkl3k= -github.com/cncf/xds/go v0.0.0-20250326154945-ae57f3c0d45f/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= -github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= -github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/cncf/xds/go v0.0.0-20251022180443-0feb69152e9f h1:Y8xYupdHxryycyPlc9Y+bSQAYZnetRJ70VMVKm5CKI0= +github.com/cncf/xds/go v0.0.0-20251022180443-0feb69152e9f/go.mod h1:HlzOvOjVBOfTGSRXRyY0OiCS/3J1akRGQQpRO/7zyF4= +github.com/coreos/go-systemd/v22 v22.6.0 h1:aGVa/v8B7hpb0TKl0MWoAavPDmHvobFe5R5zn0bCJWo= +github.com/coreos/go-systemd/v22 v22.6.0/go.mod h1:iG+pp635Fo7ZmV/j14KUcmEyWF+0X7Lua8rrTWzYgWU= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/danieljoos/wincred v1.2.2 h1:774zMFJrqaeYCK2W57BgAem/MLi6mtSE47MB6BOJ0i0= github.com/danieljoos/wincred v1.2.2/go.mod h1:w7w4Utbrz8lqeMbDAK0lkNJUv5sAOkFi7nd/ogr0Uh8= github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= -github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/dustinkirkland/golang-petname v0.0.0-20191129215211-8e5a1ed0cff0 h1:90Ly+6UfUypEF6vvvW5rQIv9opIL8CbmW9FT20LDQoY= github.com/dustinkirkland/golang-petname v0.0.0-20191129215211-8e5a1ed0cff0/go.mod h1:V+Qd57rJe8gd4eiGzZyg4h54VLHmYVVw54iMnlAMrF8= github.com/edsrzf/mmap-go v1.2.0 h1:hXLYlkbaPzt1SaQk+anYwKSRNhufIDCchSPkUD6dD84= github.com/edsrzf/mmap-go v1.2.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q= -github.com/envoyproxy/go-control-plane v0.13.4 h1:zEqyPVyku6IvWCFwux4x9RxkLOMUL+1vC9xUFv5l2/M= -github.com/envoyproxy/go-control-plane v0.13.4/go.mod h1:kDfuBlDVsSj2MjrLEtRWtHlsWIFcGyB2RMO44Dc5GZA= -github.com/envoyproxy/go-control-plane/envoy v1.32.4 h1:jb83lalDRZSpPWW2Z7Mck/8kXZ5CQAFYVjQcdVIr83A= -github.com/envoyproxy/go-control-plane/envoy v1.32.4/go.mod h1:Gzjc5k8JcJswLjAx1Zm+wSYE20UrLtt7JZMWiWQXQEw= +github.com/envoyproxy/go-control-plane v0.13.5-0.20251024222203-75eaa193e329 h1:K+fnvUM0VZ7ZFJf0n4L/BRlnsb9pL/GuDG6FqaH+PwM= +github.com/envoyproxy/go-control-plane v0.13.5-0.20251024222203-75eaa193e329/go.mod h1:Alz8LEClvR7xKsrq3qzoc4N0guvVNSS8KmSChGYr9hs= +github.com/envoyproxy/go-control-plane/envoy v1.35.0 h1:ixjkELDE+ru6idPxcHLj8LBVc2bFP7iBytj353BoHUo= +github.com/envoyproxy/go-control-plane/envoy v1.35.0/go.mod h1:09qwbGVuSWWAyN5t/b3iyVfz5+z8QWGrzkoqm/8SbEs= github.com/envoyproxy/go-control-plane/ratelimit v0.1.0 h1:/G9QYbddjL25KvtKTv3an9lx6VBE2cnb8wp1vEGNYGI= github.com/envoyproxy/go-control-plane/ratelimit v0.1.0/go.mod h1:Wk+tMFAFbCXaJPzVVHnPgRKdUdwW/KdbRt94AzgRee4= github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8= @@ -109,10 +108,10 @@ github.com/frankban/quicktest v1.13.1 h1:xVm/f9seEhZFL9+n5kv5XLrGwy6elc4V9v/XFY2 github.com/frankban/quicktest v1.13.1/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og= github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A= github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= -github.com/go-jose/go-jose/v4 v4.0.5 h1:M6T8+mKZl/+fNNuFHvGIzDz7BTLQPIounk/b9dw3AaE= -github.com/go-jose/go-jose/v4 v4.0.5/go.mod h1:s3P1lRrkT8igV8D9OjyL4WRyHvjB6a4JSllnOrmmBOA= -github.com/go-json-experiment/json v0.0.0-20250211171154-1ae217ad3535 h1:yE7argOs92u+sSCRgqqe6eF+cDaVhSPlioy1UkA0p/w= -github.com/go-json-experiment/json v0.0.0-20250211171154-1ae217ad3535/go.mod h1:BWmvoE1Xia34f3l/ibJweyhrT+aROb/FQ6d+37F0e2s= +github.com/go-jose/go-jose/v4 v4.1.3 h1:CVLmWDhDVRa6Mi/IgCgaopNosCaHz7zrMeF9MlZRkrs= +github.com/go-jose/go-jose/v4 v4.1.3/go.mod h1:x4oUasVrzR7071A4TnHLGSPpNOm2a21K9Kf04k1rs08= +github.com/go-json-experiment/json v0.0.0-20250725192818-e39067aee2d2 h1:iizUGZ9pEquQS5jTGkh4AqeeHCMbfbjeb0zMt0aEFzs= +github.com/go-json-experiment/json v0.0.0-20250725192818-e39067aee2d2/go.mod h1:TiCD2a1pcmjd7YnhGH0f/zKNcCD06B029pHhzV23c2M= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= @@ -126,25 +125,21 @@ github.com/gobwas/pool v0.2.1 h1:xfeeEhW7pwmX8nuLVlqbzVc7udMDrwetjEv+TZIz1og= github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= github.com/gobwas/ws v1.4.0 h1:CTaoG1tojrh4ucGPcoJFiAQUAsEWekEWvLy7GsVNqGs= github.com/gobwas/ws v1.4.0/go.mod h1:G3gNqMNtPppf5XUz7O4shetPpcZ1VJ7zt18dlUeakrc= -github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4= -github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= -github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gofrs/flock v0.12.1 h1:MTLVXXHf8ekldpJk3AKicLij9MdwOWkZ+a/jHHZby9E= -github.com/gofrs/flock v0.12.1/go.mod h1:9zxTsyu5xtJ9DK+1tFZyibEV7y3uwDxPPfbxeeHCoD0= +github.com/gofrs/flock v0.13.0 h1:95JolYOvGMqeH31+FC7D2+uULf6mG61mEZ/A8dRYMzw= +github.com/gofrs/flock v0.13.0/go.mod h1:jxeyy9R1auM5S6JYDBhDt+E2TCo7DkratH4Pgi8P+Z0= github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI= github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8= -github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= -github.com/golang/glog v1.2.4 h1:CNNw5U8lSiiBk7druxtSHHTsRWcxKoac6kZKm2peBBc= -github.com/golang/glog v1.2.4/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo= +github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE= +github.com/golang/glog v1.2.5 h1:DrW6hGnjIhtvhOIiAKT6Psh/Kd/ldepEa81DKeiRJ5I= +github.com/golang/glog v1.2.5/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/fswalker v0.3.3 h1:K2+d6cb3vNFjquVPRObIY+QaXJ6cbleVV6yZWLzkkQ8= github.com/google/fswalker v0.3.3/go.mod h1:9upMSscEE8oRi0WJ0rXZZYya1DmgUtJFhXAw7KNS3c4= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/martian/v3 v3.3.3 h1:DIhPTQrbPkgs2yJYdXU/eNACCG5DVQjySNRNlflZ9Fc= @@ -160,32 +155,34 @@ github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaU github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.3.6 h1:GW/XbdyBFQ8Qe+YAmFU9uHLo7OnF5tL52HFAgMmyrf4= -github.com/googleapis/enterprise-certificate-proxy v0.3.6/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA= -github.com/googleapis/gax-go/v2 v2.14.2 h1:eBLnkZ9635krYIPD+ag1USrOAI0Nr0QYF3+/3GqO0k0= -github.com/googleapis/gax-go/v2 v2.14.2/go.mod h1:ON64QhlJkhVtSqp4v1uaK92VyZ2gmvDQsweuyLV+8+w= +github.com/googleapis/enterprise-certificate-proxy v0.3.7 h1:zrn2Ee/nWmHulBx5sAVrGgAa0f2/R35S4DJwfFaUPFQ= +github.com/googleapis/enterprise-certificate-proxy v0.3.7/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA= +github.com/googleapis/gax-go/v2 v2.15.0 h1:SyjDc1mGgZU5LncH8gimWo9lW1DtIfPibOG81vgd/bo= +github.com/googleapis/gax-go/v2 v2.15.0/go.mod h1:zVVkkxAQHa1RQpg9z2AUCMnKhi0Qld9rcmyfL1OZhoc= github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1 h1:X5VWvz21y3gzm9Nw/kaUeku/1+uBhcekkmy4IkffJww= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1/go.mod h1:Zanoh4+gvIgluNqcfMVTJueD4wSS5hT7zTt4Mrutd90= -github.com/hanwen/go-fuse/v2 v2.8.0 h1:wV8rG7rmCz8XHSOwBZhG5YcVqcYjkzivjmbaMafPlAs= -github.com/hanwen/go-fuse/v2 v2.8.0/go.mod h1:yE6D2PqWwm3CbYRxFXV9xUd8Md5d6NG0WBs5spCswmI= -github.com/hashicorp/cronexpr v1.1.2 h1:wG/ZYIKT+RT3QkOdgYc+xsKWVRgnxJ1OJtjjy84fJ9A= -github.com/hashicorp/cronexpr v1.1.2/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 h1:8Tjv8EJ+pM1xP8mK6egEbD1OgnVTyacbefKhmbLhIhU= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2/go.mod h1:pkJQ2tZHJ0aFOVEEot6oZmaVEZcRme73eIFmhiVuRWs= +github.com/hanwen/go-fuse/v2 v2.9.0 h1:0AOGUkHtbOVeyGLr0tXupiid1Vg7QB7M6YUcdmVdC58= +github.com/hanwen/go-fuse/v2 v2.9.0/go.mod h1:yE6D2PqWwm3CbYRxFXV9xUd8Md5d6NG0WBs5spCswmI= +github.com/hashicorp/cronexpr v1.1.3 h1:rl5IkxXN2m681EfivTlccqIryzYJSXRGRNa0xeG7NA4= +github.com/hashicorp/cronexpr v1.1.3/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4= github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= github.com/keybase/go-keychain v0.0.1 h1:way+bWYa6lDppZoZcgMbYsvC7GxljxrskdNInRtuthU= github.com/keybase/go-keychain v0.0.1/go.mod h1:PdEILRW3i9D8JcdM+FmY6RwkHGnhHxXwkPPMeUgOK1k= -github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= -github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= +github.com/klauspost/compress v1.18.2 h1:iiPHWW0YrcFgpBYhsA6D1+fqHssJscY/Tm/y2Uqnapk= +github.com/klauspost/compress v1.18.2/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4= github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE= -github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= +github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y= +github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= +github.com/klauspost/crc32 v1.3.0 h1:sSmTt3gUt81RP655XGZPElI0PelVTZ6YwCRnPSupoFM= +github.com/klauspost/crc32 v1.3.0/go.mod h1:D7kQaZhnkX/Y0tstFGf8VUzv2UofNGqCjnC3zdHB0Hw= github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU= github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= -github.com/klauspost/reedsolomon v1.12.4 h1:5aDr3ZGoJbgu/8+j45KtUJxzYm8k08JGtB9Wx1VQ4OA= -github.com/klauspost/reedsolomon v1.12.4/go.mod h1:d3CzOMOt0JXGIFZm1StgkyF14EYr3xneR2rNWo7NcMU= -github.com/kopia/htmluibuild v0.0.1-0.20250607181534-77e0f3f9f557 h1:je1C/xnmKxnaJsIgj45me5qA51TgtK9uMwTxgDw+9H0= -github.com/kopia/htmluibuild v0.0.1-0.20250607181534-77e0f3f9f557/go.mod h1:h53A5JM3t2qiwxqxusBe+PFgGcgZdS+DWCQvG5PTlto= +github.com/klauspost/reedsolomon v1.12.6 h1:8pqE9aECQG/ZFitiUD1xK/E83zwosBAZtE3UbuZM8TQ= +github.com/klauspost/reedsolomon v1.12.6/go.mod h1:ggJT9lc71Vu+cSOPBlxGvBN6TfAS77qB4fp8vJ05NSA= +github.com/kopia/htmluibuild v0.0.1-0.20251125011029-7f1c3f84f29d h1:U3VB/cDMsPW4zB4JRFbVRDzIpPytt889rJUKAG40NPA= +github.com/kopia/htmluibuild v0.0.1-0.20251125011029-7f1c3f84f29d/go.mod h1:h53A5JM3t2qiwxqxusBe+PFgGcgZdS+DWCQvG5PTlto= github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= @@ -204,12 +201,12 @@ github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHP github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/minio/crc64nvme v1.0.1 h1:DHQPrYPdqK7jQG/Ls5CTBZWeex/2FMS3G5XGkycuFrY= -github.com/minio/crc64nvme v1.0.1/go.mod h1:eVfm2fAzLlxMdUGc0EEBGSMmPwmXD5XiNRpnu9J3bvg= +github.com/minio/crc64nvme v1.1.0 h1:e/tAguZ+4cw32D+IO/8GSf5UVr9y+3eJcxZI2WOO/7Q= +github.com/minio/crc64nvme v1.1.0/go.mod h1:eVfm2fAzLlxMdUGc0EEBGSMmPwmXD5XiNRpnu9J3bvg= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= -github.com/minio/minio-go/v7 v7.0.94 h1:1ZoksIKPyaSt64AVOyaQvhDOgVC3MfZsWM6mZXRUGtM= -github.com/minio/minio-go/v7 v7.0.94/go.mod h1:71t2CqDt3ThzESgZUlU1rBN54mksGGlkLcFgguDnnAc= +github.com/minio/minio-go/v7 v7.0.97 h1:lqhREPyfgHTB/ciX8k2r8k0D93WaFqxbJX36UZq5occ= +github.com/minio/minio-go/v7 v7.0.97/go.mod h1:re5VXuo0pwEtoNLsNuSr0RrLfT/MBtohwdaSmPPSRSk= github.com/moby/sys/mountinfo v0.7.2 h1:1shs6aH5s4o5H2zQLn796ADW1wMrIwHsyJ2v9KouLrg= github.com/moby/sys/mountinfo v0.7.2/go.mod h1:1YOa8w8Ih7uW0wALDUgT1dTTSBrZ+HiBLGws92L2RU4= github.com/mocktools/go-smtp-mock/v2 v2.5.1 h1:QcMJMChSgG1olVj4o6xxQFdrWzRjYNrcq660HAjd0wA= @@ -224,8 +221,8 @@ github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde h1:x0TT0RDC7UhA github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde/go.mod h1:nZgzbfBr3hhjoZnS66nKrHmduYNpc34ny7RK4z5/HM0= github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9 h1:1/WtZae0yGtPq+TI6+Tv1WTxkukpXeMlviSxvL7SRgk= github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9/go.mod h1:x3N5drFsm2uilKKuuYo6LdyD8vZAW55sH/9w+pbo1sw= -github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c h1:dAMKvw0MlJT1GshSTtih8C2gDs04w8dReiOGXrGLNoY= -github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c/go.mod h1:RqIHx9QI14HlwKwm98g9Re5prTQ6LdeRQn+gXJFxsJM= +github.com/philhofer/fwd v1.2.0 h1:e6DnBTl7vGY+Gz322/ASL4Gyp1FspeMvx1RNDoToZuM= +github.com/philhofer/fwd v1.2.0/go.mod h1:RqIHx9QI14HlwKwm98g9Re5prTQ6LdeRQn+gXJFxsJM= github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM= github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= @@ -234,200 +231,142 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/profile v1.7.0 h1:hnbDkaNWPCLMO9wGLdBFTIZvzDrDfBM2072E1S9gJkA= github.com/pkg/profile v1.7.0/go.mod h1:8Uer0jas47ZQMJ7VD+OHknK4YDY07LPUC6dEvqDjvNo= -github.com/pkg/sftp v1.13.9 h1:4NGkvGudBL7GteO3m6qnaQ4pC0Kvf0onSVc9gR3EWBw= -github.com/pkg/sftp v1.13.9/go.mod h1:OBN7bVXdstkFFN/gdnHPUb5TE8eb8G1Rp9wCItqjkkA= +github.com/pkg/sftp v1.13.10 h1:+5FbKNTe5Z9aspU88DPIKJ9z2KZoaGCu6Sr6kKR/5mU= +github.com/pkg/sftp v1.13.10/go.mod h1:bJ1a7uDhrX/4OII+agvy28lzRvQrmIQuaHrcI1HbeGA= github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pquerna/ffjson v0.0.0-20190930134022-aa0246cd15f7 h1:xoIK0ctDddBMnc74udxJYBqlo9Ylnsp1waqjLsnef20= github.com/pquerna/ffjson v0.0.0-20190930134022-aa0246cd15f7/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M= -github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= -github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= +github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= +github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= -github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE= -github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8= -github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= -github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= -github.com/redis/go-redis/v9 v9.8.0 h1:q3nRvjrlge/6UD7eTu/DSg2uYiU2mCL0G/uzBWqhicI= -github.com/redis/go-redis/v9 v9.8.0/go.mod h1:huWgSWd8mW6+m0VPhJjSSQ+d6Nh1VICQ6Q5lHuCH/Iw= +github.com/prometheus/common v0.67.4 h1:yR3NqWO1/UyO1w2PhUvXlGQs/PtFmoveVO0KZ4+Lvsc= +github.com/prometheus/common v0.67.4/go.mod h1:gP0fq6YjjNCLssJCQp0yk4M8W6ikLURwkdd/YKtTbyI= +github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg= +github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= -github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= +github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= github.com/rs/xid v1.6.0 h1:fV591PaemRlL6JfRxGDEPl69wICngIQ3shQtzfy2gxU= github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0= github.com/sanity-io/litter v1.5.8 h1:uM/2lKrWdGbRXDrIq08Lh9XtVYoeGtcQxk9rtQ7+rYg= github.com/sanity-io/litter v1.5.8/go.mod h1:9gzJgR2i4ZpjZHsKvUXIRQVk7P+yM3e+jAF7bU2UI5U= github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 h1:JIAuq3EEf9cgbU6AtGPK4CTG3Zf6CKMNqf0MHTggAUA= github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog= -github.com/spiffe/go-spiffe/v2 v2.5.0 h1:N2I01KCUkv1FAjZXJMwh95KK1ZIQLYbPfhaxw8WS0hE= -github.com/spiffe/go-spiffe/v2 v2.5.0/go.mod h1:P+NxobPc6wXhVtINNtFjNWGBTreew1GBUCwT2wPmb7g= +github.com/spiffe/go-spiffe/v2 v2.6.0 h1:l+DolpxNWYgruGQVV0xsfeya3CsC7m8iBzDnMpsbLuo= +github.com/spiffe/go-spiffe/v2 v2.6.0/go.mod h1:gm2SeUoMZEtpnzPNs2Csc0D/gX33k1xIx7lEzqblHEs= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v0.0.0-20161117074351-18a02ba4a312/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/studio-b12/gowebdav v0.10.0 h1:Yewz8FFiadcGEu4hxS/AAJQlHelndqln1bns3hcJIYc= -github.com/studio-b12/gowebdav v0.10.0/go.mod h1:bHA7t77X/QFExdeAnDzK6vKM34kEZAcE1OX4MfiwjkE= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/studio-b12/gowebdav v0.11.0 h1:qbQzq4USxY28ZYsGJUfO5jR+xkFtcnwWgitp4Zp1irU= +github.com/studio-b12/gowebdav v0.11.0/go.mod h1:bHA7t77X/QFExdeAnDzK6vKM34kEZAcE1OX4MfiwjkE= github.com/tg123/go-htpasswd v1.2.4 h1:HgH8KKCjdmo7jjXWN9k1nefPBd7Be3tFCTjc2jPraPU= github.com/tg123/go-htpasswd v1.2.4/go.mod h1:EKThQok9xHkun6NBMynNv6Jmu24A33XdZzzl4Q7H1+0= github.com/tinylib/msgp v1.3.0 h1:ULuf7GPooDaIlbyvgAxBV/FI7ynli6LZ1/nVUNu+0ww= github.com/tinylib/msgp v1.3.0/go.mod h1:ykjzy2wzgrlvpDCRc4LA8UXy6D8bzMSuAF3WD57Gok0= github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8Ydu2Bstc= github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/zalando/go-keyring v0.2.6 h1:r7Yc3+H+Ux0+M72zacZoItR3UDxeWfKTcabvkI8ua9s= github.com/zalando/go-keyring v0.2.6/go.mod h1:2TCrxYrbUNYfNS/Kgy/LSrkSQzZ5UPVH85RwfczwvcI= github.com/zeebo/assert v1.1.0 h1:hU1L1vLTHsnO8x8c9KAR5GmM5QscxHg5RNU5z5qbUWY= github.com/zeebo/assert v1.1.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= github.com/zeebo/blake3 v0.2.4 h1:KYQPkhpRtcqh0ssGYcKLG1JYvddkEA8QwCM/yBqhaZI= github.com/zeebo/blake3 v0.2.4/go.mod h1:7eeQ6d2iXWRGF6npfaxl2CU+xy2Fjo2gxeyZGCRUjcE= -github.com/zeebo/errs v1.4.0 h1:XNdoD/RRMKP7HD0UhJnIzUy74ISdGGxURlYG8HSWSfM= -github.com/zeebo/errs v1.4.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4= github.com/zeebo/pcg v1.0.1 h1:lyqfGeWiv4ahac6ttHs+I5hwtH/+1mrhlCtVNQM2kHo= github.com/zeebo/pcg v1.0.1/go.mod h1:09F0S9iiKrwn9rlI5yjLkmrug154/YRW6KnnXVDM/l4= -go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= -go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/contrib/detectors/gcp v1.36.0 h1:F7q2tNlCaHY9nMKHR6XH9/qkp8FktLnIcy6jJNyOCQw= -go.opentelemetry.io/contrib/detectors/gcp v1.36.0/go.mod h1:IbBN8uAIIx734PTonTPxAxnjc2pQTxWNkwfstZ+6H2k= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/contrib/detectors/gcp v1.38.0 h1:ZoYbqX7OaA/TAikspPl3ozPI6iY6LiIY9I8cUfm+pJs= +go.opentelemetry.io/contrib/detectors/gcp v1.38.0/go.mod h1:SU+iU7nu5ud4oCb3LQOhIZ3nRLj6FNVrKgtflbaf2ts= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 h1:q4XOmH/0opmeuJtPsbFNivyl7bCt7yRBbeEm2sC/XtQ= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0/go.mod h1:snMWehoOh2wsEwnvvwtDyFCxVeDAODenXHtn5vzrKjo= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q= -go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ= -go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.37.0 h1:Ahq7pZmv87yiyn3jeFz/LekZmPLLdKejuO3NcK9MssM= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.37.0/go.mod h1:MJTqhM0im3mRLw1i8uGHnCvUEeS7VwRyxlLC78PA18M= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.37.0 h1:EtFWSnwW9hGObjkIdmlnWSydO+Qs8OwzfzXLUPg4xOc= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.37.0/go.mod h1:QjUEoiGCPkvFZ/MjK6ZZfNOS6mfVEVKYE99dFhuN2LI= +go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= +go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 h1:GqRJVj7UmLjCVyVJ3ZFLdPRmhDUp2zFmQe3RHIOsw24= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0/go.mod h1:ri3aaHSmCTVYu2AWv44YMauwAQc0aqI9gHKIcSbI1pU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 h1:lwI4Dc5leUqENgGuQImwLo4WnuXFPetmPpkLi2IrX54= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0/go.mod h1:Kz/oCE7z5wuyhPxsXDuaPteSWqjSBD5YaSdbxZYGbGk= go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.36.0 h1:rixTyDGXFxRy1xzhKrotaHy3/KXdPhlWARrCgK+eqUY= go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.36.0/go.mod h1:dowW6UsM9MKbJq5JTz2AMVp3/5iW5I/TStsk8S+CfHw= -go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE= -go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E= -go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI= -go.opentelemetry.io/otel/sdk v1.37.0/go.mod h1:VredYzxUvuo2q3WRcDnKDjbdvmO0sCzOvVAiY+yUkAg= -go.opentelemetry.io/otel/sdk/metric v1.36.0 h1:r0ntwwGosWGaa0CrSt8cuNuTcccMXERFwHX4dThiPis= -go.opentelemetry.io/otel/sdk/metric v1.36.0/go.mod h1:qTNOhFDfKRwX0yXOqJYegL5WRaW376QbB7P4Pb0qva4= -go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4= -go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0= -go.opentelemetry.io/proto/otlp v1.7.0 h1:jX1VolD6nHuFzOYso2E73H85i92Mv8JQYk0K9vz09os= -go.opentelemetry.io/proto/otlp v1.7.0/go.mod h1:fSKjH6YJ7HDlwzltzyMj036AJ3ejJLCgCSHGj4efDDo= +go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA= +go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI= +go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E= +go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg= +go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM= +go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA= +go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE= +go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs= +go.opentelemetry.io/proto/otlp v1.7.1 h1:gTOMpGDb0WTBOP8JaO72iL3auEZhVmAQg4ipjOVAtj4= +go.opentelemetry.io/proto/otlp v1.7.1/go.mod h1:b2rVh6rfI/s2pHWNlB7ILJcRALpcNDzKhACevjI+ZnE= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= -go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.uber.org/zap v1.27.1 h1:08RqriUEv8+ArZRYSTXy1LeBScaMpVSTBhCeaZYfMYc= +go.uber.org/zap v1.27.1/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= +go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= -golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= -golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= -golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= -golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM= -golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY= +golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q= +golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4= golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 h1:k/i9J1pBpvlfR+9QsetwPyERsqu1GIbi967PQMq3Ivc= golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/mod v0.26.0 h1:EGMPT//Ezu+ylkCijjPc+f4Aih7sZvaAr+O3EHBxvZg= -golang.org/x/mod v0.26.0/go.mod h1:/j6NAhSk8iQ723BGAUyoAcn7SlD7s15Dp9Nd/SfeaFQ= +golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk= +golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= -golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= -golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= -golang.org/x/net v0.42.0 h1:jzkYrhi3YQWD6MLBJcsklgQsoAcw89EcZbJw8Z614hs= -golang.org/x/net v0.42.0/go.mod h1:FF1RA5d3u7nAYA4z2TkclSCKh68eSXtiFwcWQpPXdt8= -golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= -golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= -golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= +golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= +golang.org/x/oauth2 v0.33.0 h1:4Q+qn+E5z8gPRJfmRy7C2gGG3T4jIprK6aSYgTXGRpo= +golang.org/x/oauth2 v0.33.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= +golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I= +golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA= -golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= -golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= -golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= -golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= -golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= -golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= -golang.org/x/term v0.33.0 h1:NuFncQrRcaRvVmgRkvM3j/F00gWIAlcmlB8ACEKmGIg= -golang.org/x/term v0.33.0/go.mod h1:s18+ql9tYWp1IfpV9DmCtQDDSRBUjKaw9M1eAv5UeF0= +golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= +golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU= +golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= -golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4= -golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU= -golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE= -golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= -golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= +golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= +golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI= +golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.241.0 h1:QKwqWQlkc6O895LchPEDUSYr22Xp3NCxpQRiWTB6avE= -google.golang.org/api v0.241.0/go.mod h1:cOVEm2TpdAGHL2z+UwyS+kmlGr3bVWQQ6sYEqkKje50= -google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2 h1:1tXaIXCracvtsRxSBsYDiSBN0cuJvM7QYW+MrpIRY78= -google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2/go.mod h1:49MsLSx0oWMOZqcpB3uL8ZOkAh1+TndpJ8ONoCBWiZk= -google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 h1:oWVWY3NzT7KJppx2UKhKmzPq4SRe0LdCijVRwvGeikY= -google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822/go.mod h1:h3c4v36UTKzUiuaOKQ6gr3S+0hovBtUrXzTG/i3+XEc= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 h1:fc6jSaCT0vBduLYZHYrBBNY4dsWuvgyff9noRNDdBeE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= -google.golang.org/grpc v1.73.0 h1:VIWSmpI2MegBtTuFt5/JWy2oXxtjJ/e89Z70ImfD2ok= -google.golang.org/grpc v1.73.0/go.mod h1:50sbHOUqWoCQGI8V2HQLJM0B+LMlIUjNSZmow7EVBQc= -google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= -google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= +google.golang.org/api v0.256.0 h1:u6Khm8+F9sxbCTYNoBHg6/Hwv0N/i+V94MvkOSor6oI= +google.golang.org/api v0.256.0/go.mod h1:KIgPhksXADEKJlnEoRa9qAII4rXcy40vfI8HRqcU964= +google.golang.org/genproto v0.0.0-20250603155806-513f23925822 h1:rHWScKit0gvAPuOnu87KpaYtjK5zBMLcULh7gxkCXu4= +google.golang.org/genproto v0.0.0-20250603155806-513f23925822/go.mod h1:HubltRL7rMh0LfnQPkMH4NPDFEWp0jw3vixw7jEM53s= +google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8 h1:mepRgnBZa07I4TRuomDE4sTIYieg/osKmzIf4USdWS4= +google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8/go.mod h1:fDMmzKV90WSg1NbozdqrE64fkuTv6mlq2zxo9ad+3yo= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101 h1:tRPGkdGHuewF4UisLzzHHr1spKw92qLM98nIzxbC0wY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk= +google.golang.org/grpc v1.77.0 h1:wVVY6/8cGA6vvffn+wWK5ToddbgdU3d8MNENr4evgXM= +google.golang.org/grpc v1.77.0/go.mod h1:z0BY1iVj0q8E1uSQCjL9cppRj+gnZjzDnzV0dHhrNig= +google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= +google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= @@ -435,7 +374,6 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EV gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/kothar/go-backblaze.v0 v0.0.0-20210124194846-35409b867216 h1:2TSTkQ8PMvGOD5eeqqRVv6Z9+BYI+bowK97RCr3W+9M= gopkg.in/kothar/go-backblaze.v0 v0.0.0-20210124194846-35409b867216/go.mod h1:zJ2QpyDCYo1KvLXlmdnFlQAyF/Qfth0fB8239Qg7BIE= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/internal/acl/acl.go b/internal/acl/acl.go index 4b709cd22c3..727906e16fa 100644 --- a/internal/acl/acl.go +++ b/internal/acl/acl.go @@ -2,6 +2,7 @@ package acl import ( "fmt" + "slices" "sort" "strings" @@ -81,10 +82,8 @@ func nonEmptyString(v string) error { func oneOf(allowed ...string) valueValidatorFunc { return func(v string) error { - for _, a := range allowed { - if v == a { - return nil - } + if slices.Contains(allowed, v) { + return nil } return errors.Errorf("must be one of: %v", strings.Join(allowed, ", ")) diff --git a/internal/apiclient/apiclient.go b/internal/apiclient/apiclient.go index 510823b1110..2e133bd5533 100644 --- a/internal/apiclient/apiclient.go +++ b/internal/apiclient/apiclient.go @@ -224,8 +224,8 @@ func NewKopiaAPIClient(options Options) (*KopiaAPIClient, error) { tp, _ := transport.(*http.Transport) transport = tp.Clone() tp, _ = transport.(*http.Transport) - tp.DialContext = func(_ context.Context, _, _ string) (net.Conn, error) { - dial, err := net.Dial("unix", u.Path) + tp.DialContext = func(ctx context.Context, _, _ string) (net.Conn, error) { + dial, err := (&net.Dialer{}).DialContext(ctx, "unix", u.Path) return dial, errors.Wrap(err, "Failed to connect to socket: "+options.BaseURL) } } diff --git a/internal/blobparam/blobid_params.go b/internal/blobparam/blobid_params.go new file mode 100644 index 00000000000..8ff383ed0af --- /dev/null +++ b/internal/blobparam/blobid_params.go @@ -0,0 +1,91 @@ +// Package blobparam provides parameters for logging blob-specific operations. +package blobparam + +import ( + "github.com/kopia/kopia/internal/contentlog" + "github.com/kopia/kopia/repo/blob" +) + +type blobMetadataListParam struct { + key string + list []blob.Metadata +} + +func (v blobMetadataListParam) WriteValueTo(jw *contentlog.JSONWriter) { + jw.BeginListField(v.key) + + for _, bm := range v.list { + jw.BeginObject() + jw.StringField("blobID", string(bm.BlobID)) + jw.Int64Field("l", bm.Length) + jw.TimeField("ts", bm.Timestamp) + jw.EndObject() + } + + jw.EndList() +} + +// BlobMetadataList creates a parameter for a list of blob metadata. +// +//nolint:revive +func BlobMetadataList(name string, list []blob.Metadata) blobMetadataListParam { + return blobMetadataListParam{key: name, list: list} +} + +type blobIDParam struct { + key string + val blob.ID +} + +func (v blobIDParam) WriteValueTo(jw *contentlog.JSONWriter) { + jw.StringField(v.key, string(v.val)) +} + +// BlobID creates a parameter for a blob ID. +// +//nolint:revive +func BlobID(name string, id blob.ID) blobIDParam { + return blobIDParam{key: name, val: id} +} + +type blobIDListParam struct { + key string + list []blob.ID +} + +func (v blobIDListParam) WriteValueTo(jw *contentlog.JSONWriter) { + jw.BeginListField(v.key) + + for _, blobID := range v.list { + jw.StringElement(string(blobID)) + } + + jw.EndList() +} + +// BlobIDList creates a parameter for a list of blob IDs. +// +//nolint:revive +func BlobIDList(name string, list []blob.ID) blobIDListParam { + return blobIDListParam{key: name, list: list} +} + +type blobMetadataParam struct { + key string + val blob.Metadata +} + +func (v blobMetadataParam) WriteValueTo(jw *contentlog.JSONWriter) { + jw.BeginObjectField(v.key) + jw.StringField("blobID", string(v.val.BlobID)) + jw.Int64Field("l", v.val.Length) + jw.TimeField("ts", v.val.Timestamp) + jw.EndObject() +} + +// BlobMetadata creates a parameter for a blob metadata. +// +//nolint:revive +func BlobMetadata(name string, bm blob.Metadata) blobMetadataParam { + return blobMetadataParam{key: name, val: bm} +} diff --git a/internal/blobtesting/asserts.go b/internal/blobtesting/asserts.go index 45fe473b031..09c88d2b3e1 100644 --- a/internal/blobtesting/asserts.go +++ b/internal/blobtesting/asserts.go @@ -4,7 +4,7 @@ import ( "bytes" "context" "reflect" - "sort" + "slices" "testing" "time" @@ -199,9 +199,7 @@ func AssertListResultsIDs(ctx context.Context, t *testing.T, s blob.Storage, pre func sorted(s []blob.ID) []blob.ID { x := append([]blob.ID(nil), s...) - sort.Slice(x, func(i, j int) bool { - return x[i] < x[j] - }) + slices.Sort(x) return x } diff --git a/internal/blobtesting/faulty.go b/internal/blobtesting/faulty.go index f36e7360b60..6a77b3e15d4 100644 --- a/internal/blobtesting/faulty.go +++ b/internal/blobtesting/faulty.go @@ -1,4 +1,3 @@ -// Package blobtesting implements storage with fault injection. package blobtesting import ( diff --git a/internal/blobtesting/map.go b/internal/blobtesting/map.go index db06376bd6a..cead45069e1 100644 --- a/internal/blobtesting/map.go +++ b/internal/blobtesting/map.go @@ -3,7 +3,7 @@ package blobtesting import ( "bytes" "context" - "sort" + "slices" "strings" "sync" "time" @@ -181,9 +181,7 @@ func (s *mapStorage) ListBlobs(ctx context.Context, prefix blob.ID, callback fun s.mutex.RUnlock() - sort.Slice(keys, func(i, j int) bool { - return keys[i] < keys[j] - }) + slices.Sort(keys) for _, k := range keys { s.mutex.RLock() diff --git a/internal/blobtesting/object_locking_map.go b/internal/blobtesting/object_locking_map.go index 6fbb20c344c..4ac825349e5 100644 --- a/internal/blobtesting/object_locking_map.go +++ b/internal/blobtesting/object_locking_map.go @@ -3,7 +3,7 @@ package blobtesting import ( "bytes" "context" - "sort" + "slices" "strings" "sync" "time" @@ -237,9 +237,7 @@ func (s *objectLockingMap) ListBlobs(ctx context.Context, prefix blob.ID, callba s.mutex.RUnlock() - sort.Slice(keys, func(i, j int) bool { - return keys[i] < keys[j] - }) + slices.Sort(keys) for _, k := range keys { m, err := s.GetMetadata(ctx, k) diff --git a/internal/cache/content_cache_concurrency_test.go b/internal/cache/content_cache_concurrency_test.go index bdca2e48489..d144f5d9bf0 100644 --- a/internal/cache/content_cache_concurrency_test.go +++ b/internal/cache/content_cache_concurrency_test.go @@ -178,16 +178,12 @@ func testGetContentForDifferentContentIDsExecutesInParallel(t *testing.T, newCac var wg sync.WaitGroup for i := range 20 { - wg.Add(1) - - go func() { - defer wg.Done() - + wg.Go(func() { var tmp gather.WriteBuffer defer tmp.Close() dataCache.GetContent(ctx, fmt.Sprintf("c%v", i), "blob1", int64(i), 1, &tmp) - }() + }) } wg.Wait() @@ -226,16 +222,12 @@ func testGetContentForDifferentBlobsExecutesInParallel(t *testing.T, newCache ne var wg sync.WaitGroup for i := range 20 { - wg.Add(1) - - go func() { - defer wg.Done() - + wg.Go(func() { var tmp gather.WriteBuffer defer tmp.Close() dataCache.GetContent(ctx, fmt.Sprintf("c%v", i), blob.ID(fmt.Sprintf("blob%v", i)), int64(i), 1, &tmp) - }() + }) } wg.Wait() @@ -273,16 +265,12 @@ func testGetContentRaceFetchesOnce(t *testing.T, newCache newContentCacheFunc) { var wg sync.WaitGroup for range 20 { - wg.Add(1) - - go func() { - defer wg.Done() - + wg.Go(func() { var tmp gather.WriteBuffer defer tmp.Close() dataCache.GetContent(ctx, "c1", "blob1", 0, 1, &tmp) - }() + }) } wg.Wait() diff --git a/internal/cache/content_cache_test.go b/internal/cache/content_cache_test.go index 0bc44598368..f47ff2b93b2 100644 --- a/internal/cache/content_cache_test.go +++ b/internal/cache/content_cache_test.go @@ -4,7 +4,6 @@ import ( "bytes" "context" "slices" - "sort" "sync" "testing" "time" @@ -345,9 +344,7 @@ func verifyStorageContentList(t *testing.T, st cache.Storage, expectedContents . return nil })) - sort.Slice(foundContents, func(i, j int) bool { - return foundContents[i] < foundContents[j] - }) + slices.Sort(foundContents) assert.Equal(t, expectedContents, foundContents, "unexpected content list") } diff --git a/internal/cache/persistent_lru_cache.go b/internal/cache/persistent_lru_cache.go index bebb35a46f6..489843ac311 100644 --- a/internal/cache/persistent_lru_cache.go +++ b/internal/cache/persistent_lru_cache.go @@ -100,6 +100,7 @@ func (c *PersistentCache) getPartialCacheHit(ctx context.Context, key string, le c.reportHitBytes(int64(output.Length())) mtime, err := c.cacheStorage.TouchBlob(ctx, blob.ID(key), c.sweep.TouchThreshold) + c.listCacheMutex.Lock() defer c.listCacheMutex.Unlock() diff --git a/internal/clock/now_prod.go b/internal/clock/now_prod.go index 01548c11b4b..c6f02e99c86 100644 --- a/internal/clock/now_prod.go +++ b/internal/clock/now_prod.go @@ -1,5 +1,4 @@ //go:build !testing -// +build !testing package clock diff --git a/internal/clock/now_testing.go b/internal/clock/now_testing.go index f0e6d25881d..a900b545849 100644 --- a/internal/clock/now_testing.go +++ b/internal/clock/now_testing.go @@ -1,5 +1,4 @@ //go:build testing -// +build testing package clock diff --git a/internal/contentlog/contentlog_benchmark_test.go b/internal/contentlog/contentlog_benchmark_test.go new file mode 100644 index 00000000000..a87e3b3cde7 --- /dev/null +++ b/internal/contentlog/contentlog_benchmark_test.go @@ -0,0 +1,42 @@ +package contentlog_test + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/kopia/kopia/internal/contentlog" + "github.com/kopia/kopia/internal/contentlog/logparam" + "github.com/kopia/kopia/internal/contentparam" + "github.com/kopia/kopia/repo/content/index" +) + +func BenchmarkLogger(b *testing.B) { + ctx := context.Background() + + cid, err := index.ParseID("1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef") + require.NoError(b, err) + + // context params + ctx = contentlog.WithParams(ctx, + logparam.String("service", "test-service"), + logparam.Int("version", 2), + contentparam.ContentID("cid", cid), + ) + + // logger params + l := contentlog.NewLogger(func(data []byte) {}, + logparam.String("lservice", "test-service"), + ) + + for b.Loop() { + contentlog.Log(ctx, l, "baz") + contentlog.Log1(ctx, l, "baz", logparam.String("arg1", "123\x01foobar")) + contentlog.Log2(ctx, l, "baz", logparam.Int("arg1", 123), logparam.Int("arg2", 456)) + contentlog.Log3(ctx, l, "baz", logparam.Int("arg1", 123), logparam.Int("arg2", 456), logparam.Int("arg3", 789)) + contentlog.Log4(ctx, l, "baz", logparam.Int("arg1", 123), logparam.Int("arg2", 456), logparam.Int("arg3", 789), logparam.Int("arg4", 101112)) + contentlog.Log5(ctx, l, "baz", logparam.Int("arg1", 123), logparam.Int("arg2", 456), logparam.Int("arg3", 789), logparam.Int("arg4", 101112), logparam.Int("arg5", 123456)) + contentlog.Log6(ctx, l, "baz", logparam.Int("arg1", 123), logparam.Int("arg2", 456), logparam.Int("arg3", 789), logparam.Int("arg4", 101112), logparam.Int("arg5", 123456), logparam.Int("arg6", 123456)) + } +} diff --git a/internal/contentlog/contentlog_json_writer.go b/internal/contentlog/contentlog_json_writer.go new file mode 100644 index 00000000000..54bf5cad4f2 --- /dev/null +++ b/internal/contentlog/contentlog_json_writer.go @@ -0,0 +1,376 @@ +// Package contentlog provides a JSON writer that can write JSON to a buffer +// without any memory allocations and Logger that can write log entries +// with strongly-typed parameters. +package contentlog + +import ( + "strconv" + "time" + + "github.com/kopia/kopia/internal/freepool" +) + +var commaSeparator = []byte(",") + +const ( + decimal = 10 + hexadecimal = 16 +) + +// JSONWriter is a writer that can write JSON to a buffer +// without any memory allocations. +type JSONWriter struct { + buf []byte + separator []byte + + separatorStack [][]byte +} + +// ParamWriter must be implemented by all types that write a parameter ("key":value)to the JSON writer. +type ParamWriter interface { + WriteValueTo(jw *JSONWriter) +} + +func (jw *JSONWriter) beforeField(key string) { + jw.buf = append(jw.buf, jw.separator...) + jw.buf = append(jw.buf, '"') + jw.buf = append(jw.buf, key...) + jw.buf = append(jw.buf, '"', ':') + jw.separator = commaSeparator +} + +// RawJSONField writes a raw JSON field where the value is already in JSON format. +func (jw *JSONWriter) RawJSONField(key string, value []byte) { + jw.beforeField(key) + jw.buf = append(jw.buf, value...) +} + +func (jw *JSONWriter) beforeElement() { + jw.buf = append(jw.buf, jw.separator...) + jw.separator = commaSeparator +} + +// object + +// BeginObjectField starts an object field. +func (jw *JSONWriter) BeginObjectField(key string) { + jw.beforeField(key) + jw.BeginObject() +} + +// BeginObject starts an object. +func (jw *JSONWriter) BeginObject() { + jw.buf = append(jw.buf, '{') + jw.separatorStack = append(jw.separatorStack, commaSeparator) + jw.separator = nil +} + +// EndObject ends an object. +func (jw *JSONWriter) EndObject() { + jw.separator = jw.separatorStack[len(jw.separatorStack)-1] + jw.separatorStack = jw.separatorStack[:len(jw.separatorStack)-1] + jw.buf = append(jw.buf, '}') +} + +// list + +// BeginListField starts a list field. +func (jw *JSONWriter) BeginListField(key string) { + jw.beforeField(key) + jw.BeginList() +} + +// BeginList starts a list. +func (jw *JSONWriter) BeginList() { + jw.buf = append(jw.buf, '[') + jw.separatorStack = append(jw.separatorStack, commaSeparator) + jw.separator = nil +} + +// EndList ends a list. +func (jw *JSONWriter) EndList() { + jw.separator = jw.separatorStack[len(jw.separatorStack)-1] + jw.separatorStack = jw.separatorStack[:len(jw.separatorStack)-1] + jw.buf = append(jw.buf, ']') +} + +// string + +// StringField writes a string field. +func (jw *JSONWriter) StringField(key, value string) { + jw.beforeField(key) + jw.stringValue(value) +} + +// StringElement writes a string element. +func (jw *JSONWriter) StringElement(value string) { + jw.beforeElement() + jw.stringValue(value) +} + +func (jw *JSONWriter) stringValue(value string) { + jw.buf = append(jw.buf, '"') + + for i := range len(value) { + c := value[i] + + //nolint:gocritic + if c < ' ' { + switch c { + case '\b': + jw.buf = append(jw.buf, '\\', 'b') + case '\f': + jw.buf = append(jw.buf, '\\', 'f') + case '\n': + jw.buf = append(jw.buf, '\\', 'n') + case '\r': + jw.buf = append(jw.buf, '\\', 'r') + case '\t': + jw.buf = append(jw.buf, '\\', 't') + default: + // Escape as unicode \u00XX + jw.buf = append(jw.buf, '\\', 'u', '0', '0') + + var hexBuf [8]byte + + hex := strconv.AppendInt(hexBuf[:0], int64(c), hexadecimal) + if len(hex) < 2 { //nolint:mnd + jw.buf = append(jw.buf, '0') + } + + jw.buf = append(jw.buf, hex...) + } + } else if c == '"' { + jw.buf = append(jw.buf, '\\', '"') + } else if c == '\\' { + jw.buf = append(jw.buf, '\\', '\\') + } else { + jw.buf = append(jw.buf, c) + } + } + + jw.buf = append(jw.buf, '"') +} + +// null + +// NullElement writes a null element. +func (jw *JSONWriter) NullElement() { + jw.beforeElement() + jw.nullValue() +} + +// NullField writes a null field. +func (jw *JSONWriter) NullField(key string) { + jw.beforeField(key) + jw.nullValue() +} + +func (jw *JSONWriter) nullValue() { + jw.buf = append(jw.buf, "null"...) +} + +// boolean + +// BoolField writes a boolean field. +func (jw *JSONWriter) BoolField(key string, value bool) { + jw.beforeField(key) + jw.boolValue(value) +} + +// BoolElement writes a boolean element. +func (jw *JSONWriter) BoolElement(value bool) { + jw.beforeElement() + jw.boolValue(value) +} + +func (jw *JSONWriter) boolValue(value bool) { + if value { + jw.buf = append(jw.buf, "true"...) + } else { + jw.buf = append(jw.buf, "false"...) + } +} + +// signed integers + +// IntField writes an int field. +func (jw *JSONWriter) IntField(key string, value int) { jw.Int64Field(key, int64(value)) } + +// IntElement writes an int element. +func (jw *JSONWriter) IntElement(value int) { jw.Int64Element(int64(value)) } + +// Int8Field writes an int8 field. +func (jw *JSONWriter) Int8Field(key string, value int8) { jw.Int64Field(key, int64(value)) } + +// Int8Element writes an int8 element. +func (jw *JSONWriter) Int8Element(value int8) { jw.Int64Element(int64(value)) } + +// Int16Field writes an int16 field. +func (jw *JSONWriter) Int16Field(key string, value int16) { jw.Int64Field(key, int64(value)) } + +// Int16Element writes an int16 element. +func (jw *JSONWriter) Int16Element(value int16) { jw.Int64Element(int64(value)) } + +// Int32Field writes an int32 field. +func (jw *JSONWriter) Int32Field(key string, value int32) { jw.Int64Field(key, int64(value)) } + +// Int32Element writes an int32 element. +func (jw *JSONWriter) Int32Element(value int32) { jw.Int64Element(int64(value)) } + +// Int64Field writes an int64 field. +func (jw *JSONWriter) Int64Field(key string, value int64) { + jw.beforeField(key) + jw.int64Value(value) +} + +// Int64Element writes an int64 element. +func (jw *JSONWriter) Int64Element(value int64) { + jw.beforeElement() + jw.int64Value(value) +} + +func (jw *JSONWriter) int64Value(value int64) { + var buf [64]byte + + jw.buf = append(jw.buf, strconv.AppendInt(buf[:0], value, decimal)...) +} + +// unsigned integers + +// UIntField writes a uint field. +func (jw *JSONWriter) UIntField(key string, value uint) { jw.UInt64Field(key, uint64(value)) } + +// UIntElement writes a uint element. +func (jw *JSONWriter) UIntElement(value uint) { jw.UInt64Element(uint64(value)) } + +// UInt8Field writes a uint8 field. +func (jw *JSONWriter) UInt8Field(key string, value uint8) { jw.UInt64Field(key, uint64(value)) } + +// UInt8Element writes a uint8 element. +func (jw *JSONWriter) UInt8Element(value uint8) { jw.UInt64Element(uint64(value)) } + +// UInt16Field writes a uint16 field. +func (jw *JSONWriter) UInt16Field(key string, value uint16) { jw.UInt64Field(key, uint64(value)) } + +// UInt16Element writes a uint16 element. +func (jw *JSONWriter) UInt16Element(value uint16) { jw.UInt64Element(uint64(value)) } + +// UInt32Field writes a uint32 field. +func (jw *JSONWriter) UInt32Field(key string, value uint32) { jw.UInt64Field(key, uint64(value)) } + +// UInt32Element writes a uint32 element. +func (jw *JSONWriter) UInt32Element(value uint32) { jw.UInt64Element(uint64(value)) } + +// UInt64Field writes a uint64 field. +func (jw *JSONWriter) UInt64Field(key string, value uint64) { + jw.beforeField(key) + jw.uint64Value(value) +} + +// UInt64Element writes a uint64 element. +func (jw *JSONWriter) UInt64Element(value uint64) { + jw.beforeElement() + jw.uint64Value(value) +} + +func (jw *JSONWriter) uint64Value(value uint64) { + var buf [64]byte + + jw.buf = append(jw.buf, strconv.AppendUint(buf[:0], value, decimal)...) +} + +// error + +// ErrorField writes an error field. +func (jw *JSONWriter) ErrorField(key string, value error) { + if value == nil { + jw.NullField(key) + } else { + jw.StringField(key, value.Error()) + } +} + +// time + +// TimeField writes a time field. +func (jw *JSONWriter) TimeField(key string, value time.Time) { + jw.beforeField(key) + jw.timeValue(value) +} + +// appendPaddedInt appends an integer with zero-padding to the buffer. +func (jw *JSONWriter) appendPaddedInt(value int64, width int) { + var numBuf [64]byte + + numStr := strconv.AppendInt(numBuf[:0], value, decimal) + numLen := len(numStr) + + // Add leading zeros + for i := numLen; i < width; i++ { + jw.buf = append(jw.buf, '0') + } + + jw.buf = append(jw.buf, numStr...) +} + +// TimeElement writes a time element. +func (jw *JSONWriter) TimeElement(value time.Time) { + jw.beforeElement() + jw.timeValue(value) +} + +func (jw *JSONWriter) timeValue(value time.Time) { + utc := value.UTC() + + jw.buf = append(jw.buf, '"') + jw.appendPaddedInt(int64(utc.Year()), 4) //nolint:mnd + jw.buf = append(jw.buf, '-') + jw.appendPaddedInt(int64(utc.Month()), 2) //nolint:mnd + jw.buf = append(jw.buf, '-') + jw.appendPaddedInt(int64(utc.Day()), 2) //nolint:mnd + jw.buf = append(jw.buf, 'T') + jw.appendPaddedInt(int64(utc.Hour()), 2) //nolint:mnd + jw.buf = append(jw.buf, ':') + jw.appendPaddedInt(int64(utc.Minute()), 2) //nolint:mnd + jw.buf = append(jw.buf, ':') + jw.appendPaddedInt(int64(utc.Second()), 2) //nolint:mnd + jw.buf = append(jw.buf, '.') + jw.appendPaddedInt(int64(utc.Nanosecond()/1000), 6) //nolint:mnd + jw.buf = append(jw.buf, 'Z', '"') +} + +var freeJSONWriterPool = freepool.New( + func() *JSONWriter { + return &JSONWriter{ + buf: make([]byte, 0, 1024), //nolint:mnd + separatorStack: make([][]byte, 0, 10), //nolint:mnd + separator: nil, + } + }, func(jw *JSONWriter) { + jw.buf = jw.buf[:0] + jw.separatorStack = jw.separatorStack[:0] + jw.separator = nil + }) + +// Release releases the JSON writer back to the pool. +func (jw *JSONWriter) Release() { + freeJSONWriterPool.Return(jw) +} + +// Result returns the internal buffer for testing purposes. +// This should only be used in tests. +func (jw *JSONWriter) Result() []byte { + return jw.buf +} + +// NewJSONWriter creates a new JSON writer. +func NewJSONWriter() *JSONWriter { + return freeJSONWriterPool.Take() +} + +// GetBufferForTesting returns the internal buffer for testing purposes. +// This should only be used in tests. +func (jw *JSONWriter) GetBufferForTesting() []byte { + return jw.buf +} diff --git a/internal/contentlog/contentlog_json_writer_test.go b/internal/contentlog/contentlog_json_writer_test.go new file mode 100644 index 00000000000..27f0835444b --- /dev/null +++ b/internal/contentlog/contentlog_json_writer_test.go @@ -0,0 +1,804 @@ +package contentlog + +import ( + "encoding/json" + "os" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestEntryWriter_EmptyObject(t *testing.T) { + jw := NewJSONWriter() + defer jw.Release() + + jw.BeginObject() + jw.EndObject() + + require.Equal(t, "{}", string(jw.buf)) +} + +func TestEntryWriter_AllTypes(t *testing.T) { + jw := NewJSONWriter() + defer jw.Release() + + jw.BeginObject() + jw.Int64Field("k1", 123) + jw.Int64Field("a1", 1234) + jw.Int64Field("b1", 12345) + jw.BoolField("b2", true) + jw.BoolField("b3", false) + + jw.BeginObjectField("o1") + jw.Int64Field("k2", 123456) + jw.EndObject() + + jw.BeginObjectField("o2") + jw.EndObject() + + jw.BeginListField("l1") + jw.StringElement("aaa") + jw.StringElement("bbb") + jw.StringElement("ccc") + jw.EndList() + + jw.BeginListField("mixedList") + jw.StringElement("aaa") + jw.Int64Element(123) + jw.NullElement() + jw.BoolElement(true) + jw.EndList() + + jw.BeginObjectField("o3") + jw.StringField("v", "xxx") + jw.StringField("someUnicode", "😄") + jw.EndObject() + + jw.UInt64Field("u1", 123456) + jw.StringField("s", "hello\nworld\r\t\b\f") + jw.EndObject() + + var v map[string]any + + json.NewEncoder(os.Stdout).Encode("😄") + + t.Logf("buf: %s", string(jw.buf)) + require.NoError(t, json.Unmarshal(jw.buf, &v)) + require.Equal(t, map[string]any{ + "k1": 123.0, + "a1": 1234.0, + "b1": 12345.0, + "b2": true, + "b3": false, + "u1": 123456.0, + "mixedList": []any{"aaa", 123.0, nil, true}, + "l1": []any{"aaa", "bbb", "ccc"}, + "o2": map[string]any{}, + "o3": map[string]any{ + "v": "xxx", + "someUnicode": "😄", + }, + "o1": map[string]any{ + "k2": 123456.0, + }, + "s": "hello\nworld\r\t\b\f", + }, v) +} + +func TestJSONWriter_IntTypes(t *testing.T) { + jw := NewJSONWriter() + defer jw.Release() + + jw.BeginObject() + jw.IntField("intField", 42) + jw.Int8Field("int8Field", 8) + jw.Int16Field("int16Field", 16) + jw.Int32Field("int32Field", 32) + jw.Int64Field("int64Field", 64) + jw.EndObject() + + var v map[string]any + + require.NoError(t, json.Unmarshal(jw.buf, &v)) + require.Equal(t, map[string]any{ + "intField": 42.0, + "int8Field": 8.0, + "int16Field": 16.0, + "int32Field": 32.0, + "int64Field": 64.0, + }, v) +} + +func TestJSONWriter_IntElements(t *testing.T) { + jw := NewJSONWriter() + defer jw.Release() + + jw.BeginList() + jw.IntElement(1) + jw.Int8Element(2) + jw.Int16Element(3) + jw.Int32Element(4) + jw.Int64Element(5) + jw.EndList() + + var v []any + + require.NoError(t, json.Unmarshal(jw.buf, &v)) + require.Equal(t, []any{1.0, 2.0, 3.0, 4.0, 5.0}, v) +} + +func TestJSONWriter_UIntTypes(t *testing.T) { + jw := NewJSONWriter() + defer jw.Release() + + jw.BeginObject() + jw.UIntField("uintField", 100) + jw.UInt8Field("uint8Field", 200) + jw.UInt16Field("uint16Field", 300) + jw.UInt32Field("uint32Field", 400) + jw.UInt64Field("uint64Field", 500) + jw.EndObject() + + var v map[string]any + + require.NoError(t, json.Unmarshal(jw.buf, &v)) + require.Equal(t, map[string]any{ + "uintField": 100.0, + "uint8Field": 200.0, + "uint16Field": 300.0, + "uint32Field": 400.0, + "uint64Field": 500.0, + }, v) +} + +func TestJSONWriter_UIntElements(t *testing.T) { + jw := NewJSONWriter() + defer jw.Release() + + jw.BeginList() + jw.UIntElement(10) + jw.UInt8Element(20) + jw.UInt16Element(30) + jw.UInt32Element(40) + jw.UInt64Element(50) + jw.EndList() + + var v []any + + require.NoError(t, json.Unmarshal(jw.buf, &v)) + require.Equal(t, []any{10.0, 20.0, 30.0, 40.0, 50.0}, v) +} + +func TestJSONWriter_NullField(t *testing.T) { + jw := NewJSONWriter() + defer jw.Release() + + jw.BeginObject() + jw.NullField("nullField") + jw.EndObject() + + var v map[string]any + + require.NoError(t, json.Unmarshal(jw.buf, &v)) + require.Equal(t, map[string]any{ + "nullField": nil, + }, v) +} + +func TestJSONWriter_ErrorField(t *testing.T) { + jw := NewJSONWriter() + defer jw.Release() + + jw.BeginObject() + jw.ErrorField("nilError", nil) + jw.ErrorField("realError", &testError{msg: "test error message"}) + jw.EndObject() + + var v map[string]any + + require.NoError(t, json.Unmarshal(jw.buf, &v)) + require.Equal(t, map[string]any{ + "nilError": nil, + "realError": "test error message", + }, v) +} + +type testError struct { + msg string +} + +func (e *testError) Error() string { + return e.msg +} + +func TestJSONWriter_TimeField(t *testing.T) { + jw := NewJSONWriter() + defer jw.Release() + + testTime := time.Date(2023, 12, 25, 15, 30, 45, 123456789, time.UTC) + + jw.BeginObject() + jw.TimeField("timeField", testTime) + jw.EndObject() + + var v map[string]any + + require.NoError(t, json.Unmarshal(jw.buf, &v)) + require.Equal(t, map[string]any{ + "timeField": "2023-12-25T15:30:45.123456Z", + }, v) +} + +func TestJSONWriter_TimeElement(t *testing.T) { + jw := NewJSONWriter() + defer jw.Release() + + testTime := time.Date(2023, 1, 1, 0, 0, 0, 0, time.UTC) + + jw.BeginList() + jw.TimeElement(testTime) + jw.EndList() + + var v []any + + require.NoError(t, json.Unmarshal(jw.buf, &v)) + require.Equal(t, []any{"2023-01-01T00:00:00.000000Z"}, v) +} + +func TestJSONWriter_BeginList(t *testing.T) { + jw := NewJSONWriter() + defer jw.Release() + + jw.BeginList() + jw.StringElement("first") + jw.StringElement("second") + jw.EndList() + + var v []any + + require.NoError(t, json.Unmarshal(jw.buf, &v)) + require.Equal(t, []any{"first", "second"}, v) +} + +func TestJSONWriter_EdgeCases(t *testing.T) { + jw := NewJSONWriter() + defer jw.Release() + + jw.BeginObject() + jw.StringField("emptyString", "") + jw.StringField("unicodeString", "Hello 世界 🌍") + jw.StringField("quotesAndSlashes", "quoted and backslash") + jw.Int64Field("zero", 0) + jw.Int64Field("negative", -1) + jw.Int64Field("maxInt64", 9223372036854775807) + jw.Int64Field("minInt64", -9223372036854775808) + jw.UInt64Field("maxUInt64", 18446744073709551615) + jw.BoolField("trueValue", true) + jw.BoolField("falseValue", false) + jw.EndObject() + + var v map[string]any + + require.NoError(t, json.Unmarshal(jw.buf, &v)) + require.Equal(t, map[string]any{ + "emptyString": "", + "unicodeString": "Hello 世界 🌍", + "quotesAndSlashes": "quoted and backslash", + "zero": 0.0, + "negative": -1.0, + "maxInt64": 9223372036854775807.0, + "minInt64": -9223372036854775808.0, + "maxUInt64": 18446744073709551615.0, + "trueValue": true, + "falseValue": false, + }, v) +} + +func TestJSONWriter_StringEscaping(t *testing.T) { + jw := NewJSONWriter() + defer jw.Release() + + jw.BeginObject() + jw.StringField("quotes", `"quoted string"`) + jw.StringField("backslashes", `path\to\file`) + jw.StringField("mixed", `"quoted" and \backslash\`) + jw.StringField("newline", "line1\nline2") + jw.StringField("carriageReturn", "line1\rline2") + jw.StringField("tab", "col1\tcol2") + jw.StringField("backspace", "text\btext") + jw.StringField("formFeed", "text") + jw.StringField("allControlChars", "a\bb\fc\nd\re\tf") + jw.StringField("unicode", "Hello 世界 🌍") + jw.StringField("empty", "") + jw.EndObject() + + var v map[string]any + + require.NoError(t, json.Unmarshal(jw.buf, &v)) + + // Verify the JSON can be parsed and contains expected values + require.Equal(t, `"quoted string"`, v["quotes"]) + require.Equal(t, `path\to\file`, v["backslashes"]) + require.Equal(t, `"quoted" and \backslash\`, v["mixed"]) + require.Equal(t, "line1\nline2", v["newline"]) + require.Equal(t, "line1\rline2", v["carriageReturn"]) + require.Equal(t, "col1\tcol2", v["tab"]) + require.Equal(t, "text\btext", v["backspace"]) + require.Equal(t, "text", v["formFeed"]) + require.Equal(t, "a\bb\fc\nd\re\tf", v["allControlChars"]) + require.Equal(t, "Hello 世界 🌍", v["unicode"]) + require.Empty(t, v["empty"]) +} + +func TestJSONWriter_StringEscapingElements(t *testing.T) { + jw := NewJSONWriter() + defer jw.Release() + + jw.BeginList() + jw.StringElement(`"quoted"`) + jw.StringElement(`\backslash\`) + jw.StringElement("mixed\n\t\r") + jw.StringElement("unicode: 世界") + jw.EndList() + + var v []any + + require.NoError(t, json.Unmarshal(jw.buf, &v)) + require.Equal(t, []any{`"quoted"`, `\backslash\`, "mixed\n\t\r", "unicode: 世界"}, v) +} + +func TestJSONWriter_StringEscapingEdgeCases(t *testing.T) { + jw := NewJSONWriter() + defer jw.Release() + + jw.BeginObject() + jw.StringField("onlyQuote", `"`) + jw.StringField("onlyBackslash", `\`) + jw.StringField("onlyNewline", "\n") + jw.StringField("onlyTab", "\t") + jw.StringField("multipleQuotes", `""""`) + jw.StringField("multipleBackslashes", `\\\\`) + jw.StringField("quoteBackslash", `"\`) + jw.StringField("backslashQuote", `\"`) + jw.EndObject() + + var v map[string]any + + require.NoError(t, json.Unmarshal(jw.buf, &v)) + + require.Equal(t, `"`, v["onlyQuote"]) + require.Equal(t, `\`, v["onlyBackslash"]) + require.Equal(t, "\n", v["onlyNewline"]) + require.Equal(t, "\t", v["onlyTab"]) + require.Equal(t, `""""`, v["multipleQuotes"]) + require.Equal(t, `\\\\`, v["multipleBackslashes"]) + require.Equal(t, `"\`, v["quoteBackslash"]) + require.Equal(t, `\"`, v["backslashQuote"]) +} + +func TestJSONWriter_StringEscapingRawOutput(t *testing.T) { + jw := NewJSONWriter() + defer jw.Release() + + jw.BeginObject() + jw.StringField("quotes", `"test"`) + jw.StringField("backslashes", `\test\`) + jw.StringField("mixed", `"quoted" and \backslash\`) + jw.EndObject() + + jsonOutput := string(jw.buf) + + // Verify that quotes are properly escaped in the raw JSON + require.Contains(t, jsonOutput, `\"test\"`) + require.Contains(t, jsonOutput, `\\test\\`) + require.Contains(t, jsonOutput, `\"quoted\" and \\backslash\\`) + + // Verify the JSON is valid + var v map[string]any + + require.NoError(t, json.Unmarshal(jw.buf, &v)) + require.Equal(t, `"test"`, v["quotes"]) + require.Equal(t, `\test\`, v["backslashes"]) + require.Equal(t, `"quoted" and \backslash\`, v["mixed"]) +} + +func TestJSONWriter_StringEscapingControlChars(t *testing.T) { + jw := NewJSONWriter() + defer jw.Release() + + jw.BeginObject() + // Test various control characters + jw.StringField("nullChar", "\x00") + jw.StringField("bellChar", "\x07") + jw.StringField("verticalTab", "\x0b") + jw.StringField("escapeChar", "\x1b") + jw.StringField("delChar", "\x7f") + jw.EndObject() + + jsonOutput := string(jw.buf) + t.Logf("Control chars JSON output: %q", jsonOutput) + + // The JSONWriter doesn't properly escape control characters < ' ' except for \b, \f, \n, \r, \t + // This means the JSON will be invalid for characters like \x00, \x07, etc. + // This test documents the current behavior - these characters are not properly escaped + var v map[string]any + + if err := json.Unmarshal(jw.buf, &v); err != nil { + t.Logf("Expected error due to unescaped control characters: %v", err) + // This is expected behavior - the JSONWriter has a bug with control character escaping + require.Error(t, err) + } else { + // If it somehow works, verify the values + require.Equal(t, "\x00", v["nullChar"]) + require.Equal(t, "\x07", v["bellChar"]) + require.Equal(t, "\x0b", v["verticalTab"]) + require.Equal(t, "\x1b", v["escapeChar"]) + require.Equal(t, "\x7f", v["delChar"]) + } +} + +func TestJSONWriter_StringEscapingProperlyHandledControlChars(t *testing.T) { + jw := NewJSONWriter() + defer jw.Release() + + jw.BeginObject() + // Test control characters that ARE properly handled by JSONWriter + jw.StringField("backspace", "\b") + jw.StringField("formFeed", "\f") + jw.StringField("newline", "\n") + jw.StringField("carriageReturn", "\r") + jw.StringField("tab", "\t") + jw.EndObject() + + var v map[string]any + + require.NoError(t, json.Unmarshal(jw.buf, &v)) + + // These should be properly escaped and work correctly + require.Equal(t, "\b", v["backspace"]) + require.Equal(t, "\f", v["formFeed"]) + require.Equal(t, "\n", v["newline"]) + require.Equal(t, "\r", v["carriageReturn"]) + require.Equal(t, "\t", v["tab"]) + + // Verify the raw JSON contains proper escape sequences + jsonOutput := string(jw.buf) + t.Logf("JSON output: %q", jsonOutput) + + // The JSON should be valid and contain the escaped control characters + // We can see from the output that it contains \"\\b\" etc. + require.Contains(t, jsonOutput, `backspace`) + require.Contains(t, jsonOutput, `formFeed`) + require.Contains(t, jsonOutput, `newline`) + require.Contains(t, jsonOutput, `carriageReturn`) + require.Contains(t, jsonOutput, `tab`) +} + +func TestJSONWriter_StringEscapingAllControlCharacters(t *testing.T) { + jw := NewJSONWriter() + defer jw.Release() + + jw.BeginObject() + + // Test all control characters from 0x00 to 0x1F + controlChars := map[string]string{ + "null": "\x00", // NUL + "startOfHeading": "\x01", // SOH + "startOfText": "\x02", // STX + "endOfText": "\x03", // ETX + "endOfTransmit": "\x04", // EOT + "enquiry": "\x05", // ENQ + "acknowledge": "\x06", // ACK + "bell": "\x07", // BEL + "backspace": "\x08", // BS - handled specially + "tab": "\x09", // TAB - handled specially + "lineFeed": "\x0a", // LF - handled specially + "verticalTab": "\x0b", // VT + "formFeed": "\x0c", // FF - handled specially + "carriageReturn": "\x0d", // CR - handled specially + "shiftOut": "\x0e", // SO + "shiftIn": "\x0f", // SI + "dataLinkEscape": "\x10", // DLE + "deviceCtrl1": "\x11", // DC1 + "deviceCtrl2": "\x12", // DC2 + "deviceCtrl3": "\x13", // DC3 + "deviceCtrl4": "\x14", // DC4 + "negativeAck": "\x15", // NAK + "synchronousIdle": "\x16", // SYN + "endOfTransBlock": "\x17", // ETB + "cancel": "\x18", // CAN + "endOfMedium": "\x19", // EM + "substitute": "\x1a", // SUB + "escape": "\x1b", // ESC + "fileSeparator": "\x1c", // FS + "groupSeparator": "\x1d", // GS + "recordSeparator": "\x1e", // RS + "unitSeparator": "\x1f", // US + } + + // Add all control characters as fields + for name, char := range controlChars { + jw.StringField(name, char) + } + + jw.EndObject() + + var v map[string]any + + require.NoError(t, json.Unmarshal(jw.buf, &v)) + + // Verify all control characters are properly handled + for name, expectedChar := range controlChars { + require.Equal(t, expectedChar, v[name], "Control character %s (0x%02x) not properly handled", name, expectedChar[0]) + } + + // Verify the raw JSON contains proper Unicode escape sequences for non-special control chars + jsonOutput := string(jw.buf) + t.Logf("Control chars JSON output: %q", jsonOutput) + + // Check that special control characters use their standard escape sequences + require.Contains(t, jsonOutput, `\b`) // backspace + require.Contains(t, jsonOutput, `\t`) // tab + require.Contains(t, jsonOutput, `\n`) // line feed + require.Contains(t, jsonOutput, `\f`) // form feed + require.Contains(t, jsonOutput, `\r`) // carriage return + + // Check that other control characters use Unicode escape sequences + require.Contains(t, jsonOutput, `\u0000`) // null + require.Contains(t, jsonOutput, `\u0001`) // start of heading + require.Contains(t, jsonOutput, `\u0007`) // bell + require.Contains(t, jsonOutput, `\u000b`) // vertical tab + require.Contains(t, jsonOutput, `\u001b`) // escape + require.Contains(t, jsonOutput, `\u001f`) // unit separator +} + +func TestJSONWriter_StringEscapingControlCharactersInElements(t *testing.T) { + jw := NewJSONWriter() + defer jw.Release() + + jw.BeginList() + + // Test control characters as list elements + jw.StringElement("\x00") // null + jw.StringElement("\x07") // bell + jw.StringElement("\x08") // backspace + jw.StringElement("\x09") // tab + jw.StringElement("\x0a") // line feed + jw.StringElement("\x0c") // form feed + jw.StringElement("\x0d") // carriage return + jw.StringElement("\x1b") // escape + jw.StringElement("\x1f") // unit separator + jw.EndList() + + var v []any + + require.NoError(t, json.Unmarshal(jw.buf, &v)) + + expected := []any{ + "\x00", // null + "\x07", // bell + "\x08", // backspace + "\x09", // tab + "\x0a", // line feed + "\x0c", // form feed + "\x0d", // carriage return + "\x1b", // escape + "\x1f", // unit separator + } + + require.Equal(t, expected, v) + + // Verify the raw JSON contains proper escape sequences + jsonOutput := string(jw.buf) + t.Logf("Control chars in elements JSON output: %q", jsonOutput) + + require.Contains(t, jsonOutput, `\u0000`) // null + require.Contains(t, jsonOutput, `\u0007`) // bell + require.Contains(t, jsonOutput, `\b`) // backspace + require.Contains(t, jsonOutput, `\t`) // tab + require.Contains(t, jsonOutput, `\n`) // line feed + require.Contains(t, jsonOutput, `\f`) // form feed + require.Contains(t, jsonOutput, `\r`) // carriage return + require.Contains(t, jsonOutput, `\u001b`) // escape + require.Contains(t, jsonOutput, `\u001f`) // unit separator +} + +func TestJSONWriter_StringEscapingMixedControlCharacters(t *testing.T) { + jw := NewJSONWriter() + defer jw.Release() + + jw.BeginObject() + + // Test strings with mixed control characters and regular characters + jw.StringField("mixed1", "hello\x00world\x07test") + jw.StringField("mixed2", "start\x08\x09\x0a\x0c\x0dend") + jw.StringField("mixed3", "text\x1b\x1c\x1d\x1e\x1fmore") + jw.StringField("mixed4", "a\x00b\x01c\x02d\x03e") + jw.StringField("mixed5", "quotes\"and\\backslash\x00control") + jw.EndObject() + + var v map[string]any + + require.NoError(t, json.Unmarshal(jw.buf, &v)) + + // Verify the parsed values match the original strings + require.Equal(t, "hello\x00world\x07test", v["mixed1"]) + require.Equal(t, "start\x08\x09\x0a\x0c\x0dend", v["mixed2"]) + require.Equal(t, "text\x1b\x1c\x1d\x1e\x1fmore", v["mixed3"]) + require.Equal(t, "a\x00b\x01c\x02d\x03e", v["mixed4"]) + require.Equal(t, "quotes\"and\\backslash\x00control", v["mixed5"]) + + // Verify the raw JSON contains proper escape sequences + jsonOutput := string(jw.buf) + t.Logf("Mixed control chars JSON output: %q", jsonOutput) + + // Check for Unicode escapes + require.Contains(t, jsonOutput, `\u0000`) // null character + require.Contains(t, jsonOutput, `\u0007`) // bell + require.Contains(t, jsonOutput, `\u0001`) // start of heading + require.Contains(t, jsonOutput, `\u0002`) // start of text + require.Contains(t, jsonOutput, `\u0003`) // end of text + require.Contains(t, jsonOutput, `\u001b`) // escape + require.Contains(t, jsonOutput, `\u001c`) // file separator + require.Contains(t, jsonOutput, `\u001d`) // group separator + require.Contains(t, jsonOutput, `\u001e`) // record separator + require.Contains(t, jsonOutput, `\u001f`) // unit separator + + // Check for standard escapes + require.Contains(t, jsonOutput, `\b`) // backspace + require.Contains(t, jsonOutput, `\t`) // tab + require.Contains(t, jsonOutput, `\n`) // line feed + require.Contains(t, jsonOutput, `\f`) // form feed + require.Contains(t, jsonOutput, `\r`) // carriage return + + // Check for quote and backslash escapes + require.Contains(t, jsonOutput, `\"`) // escaped quote + require.Contains(t, jsonOutput, `\\`) // escaped backslash +} + +func TestJSONWriter_StringEscapingBoundaryValues(t *testing.T) { + jw := NewJSONWriter() + defer jw.Release() + + jw.BeginObject() + + // Test boundary values around control character range + jw.StringField("space", " ") // 0x20 - first non-control character + jw.StringField("del", "\x7f") // 0x7F - DEL character (not in 0x00-0x1F range) + jw.StringField("lastControl", "\x1f") // 0x1F - last control character + jw.StringField("firstNonControl", " ") // 0x20 - first non-control character + jw.EndObject() + + var v map[string]any + + require.NoError(t, json.Unmarshal(jw.buf, &v)) + + // Verify the parsed values + require.Equal(t, " ", v["space"]) + require.Equal(t, "\x7f", v["del"]) // DEL should not be escaped as Unicode + require.Equal(t, "\x1f", v["lastControl"]) // Last control char should be escaped + require.Equal(t, " ", v["firstNonControl"]) + + // Verify the raw JSON + jsonOutput := string(jw.buf) + t.Logf("Boundary values JSON output: %q", jsonOutput) + + // Space (0x20) should not be escaped + require.Contains(t, jsonOutput, `"space":" "`) + require.Contains(t, jsonOutput, `"firstNonControl":" "`) + + // DEL (0x7F) should not be escaped as Unicode (it's outside 0x00-0x1F range) + // The DEL character is output as-is in the JSON (not escaped) + require.Contains(t, jsonOutput, `"del":"`+string('\x7f')+`"`) + + // Last control character (0x1F) should be escaped as Unicode + require.Contains(t, jsonOutput, `\u001f`) +} + +func TestJSONWriter_StringEscapingUnicodeEscapeFormat(t *testing.T) { + jw := NewJSONWriter() + defer jw.Release() + + jw.BeginObject() + + // Test specific control characters to verify Unicode escape format + jw.StringField("null", "\x00") + jw.StringField("bell", "\x07") + jw.StringField("verticalTab", "\x0b") + jw.StringField("escape", "\x1b") + jw.StringField("unitSeparator", "\x1f") + jw.EndObject() + + jsonOutput := string(jw.buf) + t.Logf("Unicode escape format JSON output: %q", jsonOutput) + + // Verify exact Unicode escape format: \u00XX where XX is the hex value + require.Contains(t, jsonOutput, `\u0000`) // null (0x00) + require.Contains(t, jsonOutput, `\u0007`) // bell (0x07) + require.Contains(t, jsonOutput, `\u000b`) // vertical tab (0x0B) + require.Contains(t, jsonOutput, `\u001b`) // escape (0x1B) + require.Contains(t, jsonOutput, `\u001f`) // unit separator (0x1F) + + // Verify the format is exactly 6 characters: \u + 4 hex digits + // This is a more specific test to ensure the format is correct + var v map[string]any + + require.NoError(t, json.Unmarshal(jw.buf, &v)) + + // Verify the values are correctly parsed back + require.Equal(t, "\x00", v["null"]) + require.Equal(t, "\x07", v["bell"]) + require.Equal(t, "\x0b", v["verticalTab"]) + require.Equal(t, "\x1b", v["escape"]) + require.Equal(t, "\x1f", v["unitSeparator"]) +} + +func TestJSONWriter_StringEscapingPerformanceWithManyControlChars(t *testing.T) { + jw := NewJSONWriter() + defer jw.Release() + + jw.BeginObject() + + // Create a string with many control characters to test performance + var testString string + for i := range 100 { + testString += string(rune(i % 32)) // Mix of control chars 0x00-0x1F + } + + jw.StringField("manyControlChars", testString) + jw.EndObject() + + var v map[string]any + + require.NoError(t, json.Unmarshal(jw.buf, &v)) + + // Verify the string is correctly handled + require.Equal(t, testString, v["manyControlChars"]) + + // Verify the JSON is valid and contains many Unicode escapes + jsonOutput := string(jw.buf) + t.Logf("Performance test JSON output length: %d", len(jsonOutput)) + + // Should contain many Unicode escape sequences + require.Contains(t, jsonOutput, `\u0000`) + require.Contains(t, jsonOutput, `\u0001`) + require.Contains(t, jsonOutput, `\u000f`) + require.Contains(t, jsonOutput, `\u001f`) +} + +func TestJSONWriter_StringEscapingEmptyAndSingleChar(t *testing.T) { + jw := NewJSONWriter() + defer jw.Release() + + jw.BeginObject() + + // Test edge cases with empty strings and single control characters + jw.StringField("empty", "") + jw.StringField("singleNull", "\x00") + jw.StringField("singleBell", "\x07") + jw.StringField("singleTab", "\x09") + jw.StringField("singleNewline", "\x0a") + jw.EndObject() + + var v map[string]any + + require.NoError(t, json.Unmarshal(jw.buf, &v)) + + // Verify edge cases + require.Empty(t, v["empty"]) + require.Equal(t, "\x00", v["singleNull"]) + require.Equal(t, "\x07", v["singleBell"]) + require.Equal(t, "\x09", v["singleTab"]) + require.Equal(t, "\x0a", v["singleNewline"]) + + // Verify the raw JSON + jsonOutput := string(jw.buf) + t.Logf("Edge cases JSON output: %q", jsonOutput) + + require.Contains(t, jsonOutput, `"empty":""`) + require.Contains(t, jsonOutput, `"singleNull":"\u0000"`) + require.Contains(t, jsonOutput, `"singleBell":"\u0007"`) + require.Contains(t, jsonOutput, `"singleTab":"\t"`) + require.Contains(t, jsonOutput, `"singleNewline":"\n"`) +} diff --git a/internal/contentlog/contentlog_logger.go b/internal/contentlog/contentlog_logger.go new file mode 100644 index 00000000000..4e211d88342 --- /dev/null +++ b/internal/contentlog/contentlog_logger.go @@ -0,0 +1,164 @@ +package contentlog + +import ( + "context" + "crypto/rand" + "crypto/sha256" + "encoding/base32" + "strings" + "time" + + "github.com/kopia/kopia/internal/clock" +) + +// WriterTo is a type that can write itself to a JSON writer. +type WriterTo interface { + WriteTo(jw *JSONWriter) +} + +type loggerParamsKeyType string + +const loggerParamsKey loggerParamsKeyType = "loggerParams" + +// Emit writes the entry to the segment writer. +// We are using this particular syntax to avoid allocating an intermediate interface value. +// This allows exactly zero non-amortized allocations in all cases. +func Emit[T WriterTo](ctx context.Context, l *Logger, entry T) { + if l == nil { + return + } + + if l.output == nil { + return + } + + jw := NewJSONWriter() + defer jw.Release() + + jw.BeginObject() + jw.TimeField("t", l.timeFunc()) + + for _, param := range l.params { + param.WriteValueTo(jw) + } + + params := ctx.Value(loggerParamsKey) + if params != nil { + if params, ok := params.([]ParamWriter); ok { + for _, p := range params { + p.WriteValueTo(jw) + } + } + } + + entry.WriteTo(jw) + jw.EndObject() + jw.buf = append(jw.buf, '\n') + + l.output(jw.buf) +} + +// Log logs a message with no parameters. +func Log(ctx context.Context, l *Logger, text string) { + Emit(ctx, l, debugMessageWithParams[voidParamValue, voidParamValue, voidParamValue, voidParamValue, voidParamValue, voidParamValue]{text: text}) +} + +// Log1 logs a message with one parameter. +func Log1[T1 ParamWriter](ctx context.Context, l *Logger, format string, value1 T1) { + Emit(ctx, l, debugMessageWithParams[T1, voidParamValue, voidParamValue, voidParamValue, voidParamValue, voidParamValue]{text: format, v1: value1}) +} + +// Log2 logs a message with two parameters. +func Log2[T1, T2 ParamWriter](ctx context.Context, l *Logger, format string, value1 T1, value2 T2) { + Emit(ctx, l, debugMessageWithParams[T1, T2, voidParamValue, voidParamValue, voidParamValue, voidParamValue]{text: format, v1: value1, v2: value2}) +} + +// Log3 logs a message with three parameters. +func Log3[T1, T2, T3 ParamWriter](ctx context.Context, l *Logger, format string, value1 T1, value2 T2, value3 T3) { + Emit(ctx, l, debugMessageWithParams[T1, T2, T3, voidParamValue, voidParamValue, voidParamValue]{text: format, v1: value1, v2: value2, v3: value3}) +} + +// Log4 logs a message with four parameters. +func Log4[T1, T2, T3, T4 ParamWriter](ctx context.Context, l *Logger, format string, value1 T1, value2 T2, value3 T3, value4 T4) { + Emit(ctx, l, debugMessageWithParams[T1, T2, T3, T4, voidParamValue, voidParamValue]{text: format, v1: value1, v2: value2, v3: value3, v4: value4}) +} + +// Log5 logs a message with five parameters. +func Log5[T1, T2, T3, T4, T5 ParamWriter](ctx context.Context, l *Logger, format string, value1 T1, value2 T2, value3 T3, value4 T4, value5 T5) { + Emit(ctx, l, debugMessageWithParams[T1, T2, T3, T4, T5, voidParamValue]{text: format, v1: value1, v2: value2, v3: value3, v4: value4, v5: value5}) +} + +// Log6 logs a message with six parameters. +func Log6[T1, T2, T3, T4, T5, T6 ParamWriter](ctx context.Context, l *Logger, format string, value1 T1, value2 T2, value3 T3, value4 T4, value5 T5, value6 T6) { + Emit(ctx, l, debugMessageWithParams[T1, T2, T3, T4, T5, T6]{text: format, v1: value1, v2: value2, v3: value3, v4: value4, v5: value5, v6: value6}) +} + +// WithParams returns a new logger with the given parameters. +func WithParams(ctx context.Context, params ...ParamWriter) context.Context { + existing := ctx.Value(loggerParamsKey) + if existing != nil { + if existing, ok := existing.([]ParamWriter); ok { + params = append(append([]ParamWriter(nil), existing...), params...) + } + } + + return context.WithValue(ctx, loggerParamsKey, params) +} + +type voidParamValue struct{} + +func (e voidParamValue) WriteValueTo(*JSONWriter) {} + +type debugMessageWithParams[T1 ParamWriter, T2 ParamWriter, T3 ParamWriter, T4 ParamWriter, T5 ParamWriter, T6 ParamWriter] struct { + text string + v1 T1 + v2 T2 + v3 T3 + v4 T4 + v5 T5 + v6 T6 +} + +func (e debugMessageWithParams[T1, T2, T3, T4, T5, T6]) WriteTo(jw *JSONWriter) { + jw.StringField("m", e.text) + e.v1.WriteValueTo(jw) + e.v2.WriteValueTo(jw) + e.v3.WriteValueTo(jw) + e.v4.WriteValueTo(jw) + e.v5.WriteValueTo(jw) + e.v6.WriteValueTo(jw) +} + +// Logger is a logger that writes log entries to the output. +type Logger struct { + params []ParamWriter // Parameters to include in each log entry. + output OutputFunc + timeFunc func() time.Time +} + +// OutputFunc is a function that writes the log entry to the output. +type OutputFunc func(data []byte) + +// NewLogger creates a new logger. +func NewLogger(out OutputFunc, params ...ParamWriter) *Logger { + return &Logger{ + params: params, + output: out, + timeFunc: clock.Now, + } +} + +// RandomSpanID generates a random span ID (40 bits encoded as 5 base32 characters == 8 ASCII characters). +func RandomSpanID() string { + var runID [5]byte + + rand.Read(runID[:]) //nolint:errcheck + + return strings.ToLower(base32.StdEncoding.WithPadding(base32.NoPadding).EncodeToString(runID[:])) +} + +// HashSpanID hashes a given value a Span ID. +func HashSpanID(v string) string { + spanID := sha256.Sum256([]byte(v)) + return strings.ToLower(base32.StdEncoding.WithPadding(base32.NoPadding).EncodeToString(spanID[:10])) +} diff --git a/internal/contentlog/contentlog_logger_test.go b/internal/contentlog/contentlog_logger_test.go new file mode 100644 index 00000000000..b97447c6049 --- /dev/null +++ b/internal/contentlog/contentlog_logger_test.go @@ -0,0 +1,527 @@ +package contentlog_test + +import ( + "context" + "encoding/json" + "strings" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/kopia/kopia/internal/contentlog" + "github.com/kopia/kopia/internal/contentlog/logparam" +) + +func TestNewLogger(t *testing.T) { + t.Run("creates logger with output function", func(t *testing.T) { + var captured []byte + + outputFunc := func(data []byte) { + captured = append(captured, data...) + } + + logger := contentlog.NewLogger(outputFunc) + require.NotNil(t, logger) + }) + + t.Run("creates logger with parameters", func(t *testing.T) { + var captured []byte + + outputFunc := func(data []byte) { + captured = append(captured, data...) + } + + params := []contentlog.ParamWriter{ + logparam.String("service", "test"), + logparam.Int("version", 1), + } + + logger := contentlog.NewLogger(outputFunc, params...) + require.NotNil(t, logger) + }) + + t.Run("creates logger with nil output", func(t *testing.T) { + logger := contentlog.NewLogger(nil) + require.NotNil(t, logger) + }) +} + +func TestLog(t *testing.T) { + t.Run("logs message with no parameters", func(t *testing.T) { + var captured []byte + + outputFunc := func(data []byte) { + captured = append(captured, data...) + } + + logger := contentlog.NewLogger(outputFunc) + ctx := context.Background() + + contentlog.Log(ctx, logger, "test message") + + require.NotEmpty(t, captured) + + var logEntry map[string]any + + err := json.Unmarshal(captured, &logEntry) + require.NoError(t, err) + require.Equal(t, "test message", logEntry["m"]) + require.Contains(t, logEntry, "t") // timestamp field + }) + + t.Run("logs message with logger parameters", func(t *testing.T) { + var captured []byte + + outputFunc := func(data []byte) { + captured = append(captured, data...) + } + + params := []contentlog.ParamWriter{ + logparam.String("service", "test-service"), + logparam.Int("version", 2), + } + + logger := contentlog.NewLogger(outputFunc, params...) + ctx := context.Background() + + contentlog.Log(ctx, logger, "test message") + + require.NotEmpty(t, captured) + + var logEntry map[string]any + + err := json.Unmarshal(captured, &logEntry) + require.NoError(t, err) + require.Equal(t, "test message", logEntry["m"]) + require.Equal(t, "test-service", logEntry["service"]) + require.Equal(t, float64(2), logEntry["version"]) + require.Contains(t, logEntry, "t") // timestamp field + }) + + t.Run("handles nil logger gracefully", func(t *testing.T) { + ctx := context.Background() + // This should not panic + contentlog.Log(ctx, nil, "test message") + }) + + t.Run("handles nil output gracefully", func(t *testing.T) { + logger := contentlog.NewLogger(nil) + ctx := context.Background() + // This should not panic + contentlog.Log(ctx, logger, "test message") + }) +} + +func TestLog1(t *testing.T) { + t.Run("logs message with one parameter", func(t *testing.T) { + var captured []byte + + outputFunc := func(data []byte) { + captured = append(captured, data...) + } + + logger := contentlog.NewLogger(outputFunc) + ctx := context.Background() + + contentlog.Log1(ctx, logger, "processing item", logparam.String("id", "item-123")) + + require.NotEmpty(t, captured) + + var logEntry map[string]any + + err := json.Unmarshal(captured, &logEntry) + require.NoError(t, err) + require.Equal(t, "processing item", logEntry["m"]) + require.Equal(t, "item-123", logEntry["id"]) + }) + + t.Run("logs message with different parameter types", func(t *testing.T) { + var captured []byte + + outputFunc := func(data []byte) { + captured = append(captured, data...) + } + + logger := contentlog.NewLogger(outputFunc) + ctx := context.Background() + + testCases := []struct { + name string + message string + param contentlog.ParamWriter + key string + expected any + }{ + {"string", "string param", logparam.String("str", "hello"), "str", "hello"}, + {"int", "int param", logparam.Int("num", 42), "num", float64(42)}, + {"int64", "int64 param", logparam.Int64("big", 9223372036854775807), "big", float64(9223372036854775807)}, + {"bool", "bool param", logparam.Bool("flag", true), "flag", true}, + {"uint64", "uint64 param", logparam.UInt64("unsigned", 18446744073709551615), "unsigned", float64(18446744073709551615)}, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + captured = nil // reset + + contentlog.Log1(ctx, logger, tc.message, tc.param) + + require.NotEmpty(t, captured) + + var logEntry map[string]any + + err := json.Unmarshal(captured, &logEntry) + require.NoError(t, err) + require.Equal(t, tc.message, logEntry["m"]) + require.Equal(t, tc.expected, logEntry[tc.key]) + }) + } + }) +} + +func TestLog2(t *testing.T) { + t.Run("logs message with two parameters", func(t *testing.T) { + var captured []byte + + outputFunc := func(data []byte) { + captured = append(captured, data...) + } + + logger := contentlog.NewLogger(outputFunc) + ctx := context.Background() + + contentlog.Log2(ctx, logger, "processing item", + logparam.String("id", "item-123"), + logparam.Int("count", 5)) + + require.NotEmpty(t, captured) + + var logEntry map[string]any + + err := json.Unmarshal(captured, &logEntry) + require.NoError(t, err) + require.Equal(t, "processing item", logEntry["m"]) + require.Equal(t, "item-123", logEntry["id"]) + require.Equal(t, float64(5), logEntry["count"]) + }) +} + +func TestLog3(t *testing.T) { + t.Run("logs message with three parameters", func(t *testing.T) { + var captured []byte + + outputFunc := func(data []byte) { + captured = append(captured, data...) + } + + logger := contentlog.NewLogger(outputFunc) + ctx := context.Background() + + contentlog.Log3(ctx, logger, "processing item", + logparam.String("id", "item-123"), + logparam.Int("count", 5), + logparam.Bool("active", true)) + + require.NotEmpty(t, captured) + + var logEntry map[string]any + + err := json.Unmarshal(captured, &logEntry) + require.NoError(t, err) + require.Equal(t, "processing item", logEntry["m"]) + require.Equal(t, "item-123", logEntry["id"]) + require.Equal(t, float64(5), logEntry["count"]) + require.Equal(t, true, logEntry["active"]) + }) +} + +func TestLog4(t *testing.T) { + t.Run("logs message with four parameters", func(t *testing.T) { + var captured []byte + + outputFunc := func(data []byte) { + captured = append(captured, data...) + } + + logger := contentlog.NewLogger(outputFunc) + ctx := context.Background() + + contentlog.Log4(ctx, logger, "processing item", + logparam.String("id", "item-123"), + logparam.Int("count", 5), + logparam.Bool("active", true), + logparam.String("status", "processing")) + + require.NotEmpty(t, captured) + + var logEntry map[string]any + + err := json.Unmarshal(captured, &logEntry) + require.NoError(t, err) + require.Equal(t, "processing item", logEntry["m"]) + require.Equal(t, "item-123", logEntry["id"]) + require.Equal(t, float64(5), logEntry["count"]) + require.Equal(t, true, logEntry["active"]) + require.Equal(t, "processing", logEntry["status"]) + }) +} + +func TestLog5(t *testing.T) { + t.Run("logs message with five parameters", func(t *testing.T) { + var captured []byte + + outputFunc := func(data []byte) { + captured = append(captured, data...) + } + + logger := contentlog.NewLogger(outputFunc) + ctx := context.Background() + + contentlog.Log5(ctx, logger, "processing item", + logparam.String("id", "item-123"), + logparam.Int("count", 5), + logparam.Bool("active", true), + logparam.String("status", "processing"), + logparam.Int64("size", 1024)) + + require.NotEmpty(t, captured) + + var logEntry map[string]any + + err := json.Unmarshal(captured, &logEntry) + require.NoError(t, err) + require.Equal(t, "processing item", logEntry["m"]) + require.Equal(t, "item-123", logEntry["id"]) + require.Equal(t, float64(5), logEntry["count"]) + require.Equal(t, true, logEntry["active"]) + require.Equal(t, "processing", logEntry["status"]) + require.Equal(t, float64(1024), logEntry["size"]) + }) +} + +func TestLog6(t *testing.T) { + t.Run("logs message with six parameters", func(t *testing.T) { + var captured []byte + + outputFunc := func(data []byte) { + captured = append(captured, data...) + } + + logger := contentlog.NewLogger(outputFunc) + ctx := context.Background() + + contentlog.Log6(ctx, logger, "processing item", + logparam.String("id", "item-123"), + logparam.Int("count", 5), + logparam.Bool("active", true), + logparam.String("status", "processing"), + logparam.Int64("size", 1024), + logparam.UInt64("flags", 0xFF)) + + require.NotEmpty(t, captured) + + var logEntry map[string]any + + err := json.Unmarshal(captured, &logEntry) + require.NoError(t, err) + require.Equal(t, "processing item", logEntry["m"]) + require.Equal(t, "item-123", logEntry["id"]) + require.Equal(t, float64(5), logEntry["count"]) + require.Equal(t, true, logEntry["active"]) + require.Equal(t, "processing", logEntry["status"]) + require.Equal(t, float64(1024), logEntry["size"]) + require.Equal(t, float64(0xFF), logEntry["flags"]) + }) +} + +func TestEmit(t *testing.T) { + t.Run("emits custom WriterTo entry", func(t *testing.T) { + var captured []byte + + outputFunc := func(data []byte) { + captured = append(captured, data...) + } + + logger := contentlog.NewLogger(outputFunc) + ctx := context.Background() + + customEntry := &customLogEntry{ + message: "custom entry", + value: 42, + } + + contentlog.Emit(ctx, logger, customEntry) + + require.NotEmpty(t, captured) + + var logEntry map[string]any + + err := json.Unmarshal(captured, &logEntry) + require.NoError(t, err) + require.Equal(t, "custom entry", logEntry["message"]) + require.Equal(t, float64(42), logEntry["value"]) + require.Contains(t, logEntry, "t") // timestamp field + }) + + t.Run("emits with logger parameters", func(t *testing.T) { + var captured []byte + + outputFunc := func(data []byte) { + captured = append(captured, data...) + } + + params := []contentlog.ParamWriter{ + logparam.String("service", "custom-service"), + logparam.Int("version", 3), + } + + logger := contentlog.NewLogger(outputFunc, params...) + ctx := context.Background() + + customEntry := &customLogEntry{ + message: "custom entry", + value: 42, + } + + contentlog.Emit(ctx, logger, customEntry) + + require.NotEmpty(t, captured) + + var logEntry map[string]any + + err := json.Unmarshal(captured, &logEntry) + require.NoError(t, err) + require.Equal(t, "custom entry", logEntry["message"]) + require.Equal(t, float64(42), logEntry["value"]) + require.Equal(t, "custom-service", logEntry["service"]) + require.Equal(t, float64(3), logEntry["version"]) + }) + + t.Run("handles nil logger gracefully", func(t *testing.T) { + ctx := context.Background() + customEntry := &customLogEntry{message: "test", value: 1} + // This should not panic + contentlog.Emit(ctx, nil, customEntry) + }) + + t.Run("handles nil output gracefully", func(t *testing.T) { + logger := contentlog.NewLogger(nil) + ctx := context.Background() + customEntry := &customLogEntry{message: "test", value: 1} + // This should not panic + contentlog.Emit(ctx, logger, customEntry) + }) +} + +func TestLoggerMultipleLogs(t *testing.T) { + t.Run("handles multiple log entries", func(t *testing.T) { + var captured []byte + + outputFunc := func(data []byte) { + captured = append(captured, data...) + } + + logger := contentlog.NewLogger(outputFunc) + ctx := context.Background() + + contentlog.Log(ctx, logger, "first message") + contentlog.Log1(ctx, logger, "second message", logparam.String("id", "123")) + contentlog.Log2(ctx, logger, "third message", logparam.Int("count", 5), logparam.Bool("flag", true)) + + require.NotEmpty(t, captured) + + // Split by newlines to get individual log entries + lines := strings.Split(strings.TrimSpace(string(captured)), "\n") + require.Len(t, lines, 3) + + // Check first entry + var entry1 map[string]any + + err := json.Unmarshal([]byte(lines[0]), &entry1) + require.NoError(t, err) + require.Equal(t, "first message", entry1["m"]) + + // Check second entry + var entry2 map[string]any + + err = json.Unmarshal([]byte(lines[1]), &entry2) + require.NoError(t, err) + require.Equal(t, "second message", entry2["m"]) + require.Equal(t, "123", entry2["id"]) + + // Check third entry + var entry3 map[string]any + + err = json.Unmarshal([]byte(lines[2]), &entry3) + require.NoError(t, err) + require.Equal(t, "third message", entry3["m"]) + require.Equal(t, float64(5), entry3["count"]) + require.Equal(t, true, entry3["flag"]) + }) +} + +func TestLoggerErrorHandling(t *testing.T) { + t.Run("handles nil error parameter", func(t *testing.T) { + var captured []byte + + outputFunc := func(data []byte) { + captured = append(captured, data...) + } + + logger := contentlog.NewLogger(outputFunc) + ctx := context.Background() + + contentlog.Log1(ctx, logger, "error test", logparam.Error("err", nil)) + + require.NotEmpty(t, captured) + + var logEntry map[string]any + + err := json.Unmarshal(captured, &logEntry) + require.NoError(t, err) + require.Equal(t, "error test", logEntry["m"]) + require.Nil(t, logEntry["err"]) + }) + + t.Run("handles real error parameter", func(t *testing.T) { + var captured []byte + + outputFunc := func(data []byte) { + captured = append(captured, data...) + } + + logger := contentlog.NewLogger(outputFunc) + ctx := context.Background() + + testErr := &testError{msg: "operation failed"} + contentlog.Log1(ctx, logger, "error test", logparam.Error("err", testErr)) + + require.NotEmpty(t, captured) + + var logEntry map[string]any + + err := json.Unmarshal(captured, &logEntry) + require.NoError(t, err) + require.Equal(t, "error test", logEntry["m"]) + require.Equal(t, "operation failed", logEntry["err"]) + }) +} + +// Helper types for testing + +type testError struct { + msg string +} + +func (e *testError) Error() string { + return e.msg +} + +type customLogEntry struct { + message string + value int +} + +func (e *customLogEntry) WriteTo(jw *contentlog.JSONWriter) { + jw.StringField("message", e.message) + jw.Int64Field("value", int64(e.value)) +} diff --git a/internal/contentlog/logparam/logparam.go b/internal/contentlog/logparam/logparam.go new file mode 100644 index 00000000000..0e1eeaada12 --- /dev/null +++ b/internal/contentlog/logparam/logparam.go @@ -0,0 +1,134 @@ +// Package logparam provides parameters for logging. +package logparam + +import ( + "time" + + "github.com/kopia/kopia/internal/contentlog" +) + +// String creates a string parameter. +// +//nolint:revive +func String(key, value string) stringParam { + return stringParam{Key: key, Value: value} +} + +// Int64 creates an int64 parameter. +// +//nolint:revive +func Int64(key string, value int64) int64Param { return int64Param{Key: key, Value: value} } + +// Int creates an int parameter. +// +//nolint:revive +func Int(key string, value int) int64Param { + return int64Param{Key: key, Value: int64(value)} +} + +// Int32 creates an int32 parameter. +// +//nolint:revive +func Int32(key string, value int32) int64Param { + return int64Param{Key: key, Value: int64(value)} +} + +// Bool creates a bool parameter. +// +//nolint:revive +func Bool(key string, value bool) boolParam { return boolParam{Key: key, Value: value} } + +// Time creates a time parameter. +// +//nolint:revive +func Time(key string, value time.Time) timeParam { return timeParam{Key: key, Value: value} } + +// Error creates an error parameter. +// +//nolint:revive +func Error(key string, value error) errorParam { return errorParam{Key: key, Value: value} } + +// UInt64 creates a uint64 parameter. +// +//nolint:revive +func UInt64(key string, value uint64) uint64Param { + return uint64Param{Key: key, Value: value} +} + +// UInt32 creates a uint32 parameter. +// +//nolint:revive +func UInt32(key string, value uint32) uint64Param { + return uint64Param{Key: key, Value: uint64(value)} +} + +// Duration creates a duration parameter. +// +//nolint:revive +func Duration(key string, value time.Duration) durationParam { + return durationParam{Key: key, Value: value} +} + +// int64Param is a parameter that writes a int64 value to the JSON writer. +type int64Param struct { + Key string + Value int64 +} + +func (v int64Param) WriteValueTo(jw *contentlog.JSONWriter) { + jw.Int64Field(v.Key, v.Value) +} + +type uint64Param struct { + Key string + Value uint64 +} + +func (v uint64Param) WriteValueTo(jw *contentlog.JSONWriter) { + jw.UInt64Field(v.Key, v.Value) +} + +type timeParam struct { + Key string + Value time.Time +} + +func (v timeParam) WriteValueTo(jw *contentlog.JSONWriter) { + jw.TimeField(v.Key, v.Value) +} + +type boolParam struct { + Key string + Value bool +} + +func (v boolParam) WriteValueTo(jw *contentlog.JSONWriter) { + jw.BoolField(v.Key, v.Value) +} + +type durationParam struct { + Key string + Value time.Duration +} + +func (v durationParam) WriteValueTo(jw *contentlog.JSONWriter) { + jw.Int64Field(v.Key, v.Value.Microseconds()) +} + +type errorParam struct { + Key string + Value error +} + +func (v errorParam) WriteValueTo(jw *contentlog.JSONWriter) { + jw.ErrorField(v.Key, v.Value) +} + +type stringParam struct { + Key string + Value string +} + +func (v stringParam) WriteValueTo(jw *contentlog.JSONWriter) { + jw.StringField(v.Key, v.Value) +} diff --git a/internal/contentlog/logparam/logparam_test.go b/internal/contentlog/logparam/logparam_test.go new file mode 100644 index 00000000000..9b5cbf71874 --- /dev/null +++ b/internal/contentlog/logparam/logparam_test.go @@ -0,0 +1,644 @@ +package logparam + +import ( + "encoding/json" + "testing" + "time" + + "github.com/pkg/errors" + "github.com/stretchr/testify/require" + + "github.com/kopia/kopia/internal/clock" + "github.com/kopia/kopia/internal/contentlog" +) + +func TestString(t *testing.T) { + tests := []struct { + name string + key string + value string + expected map[string]any + }{ + { + name: "simple string", + key: "message", + value: "hello world", + expected: map[string]any{ + "message": "hello world", + }, + }, + { + name: "empty string", + key: "empty", + value: "", + expected: map[string]any{ + "empty": "", + }, + }, + { + name: "unicode string", + key: "unicode", + value: "😄🚀🎉", + expected: map[string]any{ + "unicode": "😄🚀🎉", + }, + }, + { + name: "string with special chars", + key: "special", + value: "hello\nworld\r\t\b\f", + expected: map[string]any{ + "special": "hello\nworld\r\t\b\f", + }, + }, + { + name: "string with control chars", + key: "control", + value: "hello\x00world\x07test", + expected: map[string]any{ + "control": "hello\x00world\x07test", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Test memory allocations + allocs := testing.AllocsPerRun(100, func() { + _ = String(tt.key, tt.value) + }) + require.Equal(t, float64(0), allocs, "String() should not allocate memory") + + // Test output format + param := String(tt.key, tt.value) + + jw := contentlog.NewJSONWriter() + defer jw.Release() + + jw.BeginObject() + param.WriteValueTo(jw) + jw.EndObject() + + var result map[string]any + + require.NoError(t, json.Unmarshal(jw.GetBufferForTesting(), &result)) + require.Equal(t, tt.expected, result) + }) + } +} + +func TestInt64(t *testing.T) { + tests := []struct { + name string + key string + value int64 + expected map[string]any + }{ + { + name: "positive int64", + key: "count", + value: 123456789, + expected: map[string]any{ + "count": 123456789.0, + }, + }, + { + name: "negative int64", + key: "negative", + value: -987654321, + expected: map[string]any{ + "negative": -987654321.0, + }, + }, + { + name: "zero int64", + key: "zero", + value: 0, + expected: map[string]any{ + "zero": 0.0, + }, + }, + { + name: "max int64", + key: "max", + value: 9223372036854775807, + expected: map[string]any{ + "max": 9223372036854775807.0, + }, + }, + { + name: "min int64", + key: "min", + value: -9223372036854775808, + expected: map[string]any{ + "min": -9223372036854775808.0, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Test memory allocations + allocs := testing.AllocsPerRun(100, func() { + _ = Int64(tt.key, tt.value) + }) + require.Equal(t, float64(0), allocs, "Int64() should not allocate memory") + + // Test output format + param := Int64(tt.key, tt.value) + + jw := contentlog.NewJSONWriter() + defer jw.Release() + + jw.BeginObject() + param.WriteValueTo(jw) + jw.EndObject() + + var result map[string]any + + require.NoError(t, json.Unmarshal(jw.GetBufferForTesting(), &result)) + require.Equal(t, tt.expected, result) + }) + } +} + +func TestInt(t *testing.T) { + tests := []struct { + name string + key string + value int + expected map[string]any + }{ + { + name: "positive int", + key: "count", + value: 42, + expected: map[string]any{ + "count": 42.0, + }, + }, + { + name: "negative int", + key: "negative", + value: -100, + expected: map[string]any{ + "negative": -100.0, + }, + }, + { + name: "zero int", + key: "zero", + value: 0, + expected: map[string]any{ + "zero": 0.0, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Test memory allocations + allocs := testing.AllocsPerRun(100, func() { + _ = Int(tt.key, tt.value) + }) + require.Equal(t, float64(0), allocs, "Int() should not allocate memory") + + // Test output format + param := Int(tt.key, tt.value) + + jw := contentlog.NewJSONWriter() + defer jw.Release() + + jw.BeginObject() + param.WriteValueTo(jw) + jw.EndObject() + + var result map[string]any + + require.NoError(t, json.Unmarshal(jw.GetBufferForTesting(), &result)) + require.Equal(t, tt.expected, result) + }) + } +} + +func TestInt32(t *testing.T) { + tests := []struct { + name string + key string + value int32 + expected map[string]any + }{ + { + name: "positive int32", + key: "count", + value: 2147483647, + expected: map[string]any{ + "count": 2147483647.0, + }, + }, + { + name: "negative int32", + key: "negative", + value: -2147483648, + expected: map[string]any{ + "negative": -2147483648.0, + }, + }, + { + name: "zero int32", + key: "zero", + value: 0, + expected: map[string]any{ + "zero": 0.0, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Test memory allocations + allocs := testing.AllocsPerRun(100, func() { + _ = Int32(tt.key, tt.value) + }) + require.Equal(t, float64(0), allocs, "Int32() should not allocate memory") + + // Test output format + param := Int32(tt.key, tt.value) + + jw := contentlog.NewJSONWriter() + defer jw.Release() + + jw.BeginObject() + param.WriteValueTo(jw) + jw.EndObject() + + var result map[string]any + + require.NoError(t, json.Unmarshal(jw.GetBufferForTesting(), &result)) + require.Equal(t, tt.expected, result) + }) + } +} + +func TestBool(t *testing.T) { + tests := []struct { + name string + key string + value bool + expected map[string]any + }{ + { + name: "true bool", + key: "enabled", + value: true, + expected: map[string]any{ + "enabled": true, + }, + }, + { + name: "false bool", + key: "disabled", + value: false, + expected: map[string]any{ + "disabled": false, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Test memory allocations + allocs := testing.AllocsPerRun(100, func() { + _ = Bool(tt.key, tt.value) + }) + require.Equal(t, float64(0), allocs, "Bool() should not allocate memory") + + // Test output format + param := Bool(tt.key, tt.value) + + jw := contentlog.NewJSONWriter() + defer jw.Release() + + jw.BeginObject() + param.WriteValueTo(jw) + jw.EndObject() + + var result map[string]any + + require.NoError(t, json.Unmarshal(jw.GetBufferForTesting(), &result)) + require.Equal(t, tt.expected, result) + }) + } +} + +func TestTime(t *testing.T) { + now := clock.Now() + utcTime := time.Date(2023, 12, 25, 15, 30, 45, 123456789, time.UTC) + zeroTime := time.Time{} + + tests := []struct { + name string + key string + value time.Time + expected map[string]any + }{ + { + name: "current time", + key: "now", + value: now, + expected: map[string]any{ + "now": now.UTC().Format("2006-01-02T15:04:05.000000Z"), + }, + }, + { + name: "UTC time", + key: "utc", + value: utcTime, + expected: map[string]any{ + "utc": "2023-12-25T15:30:45.123456Z", + }, + }, + { + name: "zero time", + key: "zero", + value: zeroTime, + expected: map[string]any{ + "zero": "0001-01-01T00:00:00.000000Z", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Test memory allocations + allocs := testing.AllocsPerRun(100, func() { + _ = Time(tt.key, tt.value) + }) + require.Equal(t, float64(0), allocs, "Time() should not allocate memory") + + // Test output format + param := Time(tt.key, tt.value) + + jw := contentlog.NewJSONWriter() + defer jw.Release() + + jw.BeginObject() + param.WriteValueTo(jw) + jw.EndObject() + + var result map[string]any + + require.NoError(t, json.Unmarshal(jw.GetBufferForTesting(), &result)) + require.Equal(t, tt.expected, result) + }) + } +} + +func TestError(t *testing.T) { + err1 := errors.Errorf("test error") + err2 := errors.Errorf("another error") + + var nilErr error + + tests := []struct { + name string + key string + value error + expected map[string]any + }{ + { + name: "simple error", + key: "err", + value: err1, + expected: map[string]any{ + "err": "test error", + }, + }, + { + name: "another error", + key: "error", + value: err2, + expected: map[string]any{ + "error": "another error", + }, + }, + { + name: "nil error", + key: "nil", + value: nilErr, + expected: map[string]any{ + "nil": nil, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Test memory allocations + allocs := testing.AllocsPerRun(100, func() { + _ = Error(tt.key, tt.value) + }) + require.Equal(t, float64(0), allocs, "Error() should not allocate memory") + + // Test output format + param := Error(tt.key, tt.value) + + jw := contentlog.NewJSONWriter() + defer jw.Release() + + jw.BeginObject() + param.WriteValueTo(jw) + jw.EndObject() + + var result map[string]any + + require.NoError(t, json.Unmarshal(jw.GetBufferForTesting(), &result)) + require.Equal(t, tt.expected, result) + }) + } +} + +func TestUInt64(t *testing.T) { + tests := []struct { + name string + key string + value uint64 + expected map[string]any + }{ + { + name: "positive uint64", + key: "count", + value: 18446744073709551615, + expected: map[string]any{ + "count": 18446744073709551615.0, + }, + }, + { + name: "zero uint64", + key: "zero", + value: 0, + expected: map[string]any{ + "zero": 0.0, + }, + }, + { + name: "large uint64", + key: "large", + value: 1234567890123456789, + expected: map[string]any{ + "large": 1234567890123456789.0, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Test memory allocations + allocs := testing.AllocsPerRun(100, func() { + _ = UInt64(tt.key, tt.value) + }) + require.Equal(t, float64(0), allocs, "UInt64() should not allocate memory") + + // Test output format + param := UInt64(tt.key, tt.value) + + jw := contentlog.NewJSONWriter() + defer jw.Release() + + jw.BeginObject() + param.WriteValueTo(jw) + jw.EndObject() + + var result map[string]any + + require.NoError(t, json.Unmarshal(jw.GetBufferForTesting(), &result)) + require.Equal(t, tt.expected, result) + }) + } +} + +func TestDuration(t *testing.T) { + tests := []struct { + name string + key string + value time.Duration + expected map[string]any + }{ + { + name: "positive duration", + key: "duration", + value: 5 * time.Second, + expected: map[string]any{ + "duration": 5000000.0, // microseconds + }, + }, + { + name: "negative duration", + key: "negative", + value: -2 * time.Minute, + expected: map[string]any{ + "negative": -120000000.0, // microseconds + }, + }, + { + name: "zero duration", + key: "zero", + value: 0, + expected: map[string]any{ + "zero": 0.0, + }, + }, + { + name: "microsecond duration", + key: "micro", + value: 123 * time.Microsecond, + expected: map[string]any{ + "micro": 123.0, + }, + }, + { + name: "nanosecond duration", + key: "nano", + value: 500 * time.Nanosecond, + expected: map[string]any{ + "nano": 0.0, // rounds down to microseconds + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Test memory allocations + allocs := testing.AllocsPerRun(100, func() { + _ = Duration(tt.key, tt.value) + }) + require.Equal(t, float64(0), allocs, "Duration() should not allocate memory") + + // Test output format + param := Duration(tt.key, tt.value) + + jw := contentlog.NewJSONWriter() + defer jw.Release() + + jw.BeginObject() + param.WriteValueTo(jw) + jw.EndObject() + + var result map[string]any + + require.NoError(t, json.Unmarshal(jw.GetBufferForTesting(), &result)) + require.Equal(t, tt.expected, result) + }) + } +} + +// TestWriteValueToMemoryAllocations tests that WriteValueTo methods don't allocate memory. +func TestWriteValueToMemoryAllocations(t *testing.T) { + jw := contentlog.NewJSONWriter() + defer jw.Release() + + tests := []struct { + name string + param contentlog.ParamWriter + }{ + { + name: "stringParam", + param: String("key", "value"), + }, + { + name: "int64Param", + param: Int64("key", 123), + }, + { + name: "boolParam", + param: Bool("key", true), + }, + { + name: "timeParam", + param: Time("key", clock.Now()), + }, + { + name: "errorParam", + param: Error("key", errors.New("test")), + }, + { + name: "uint64Param", + param: UInt64("key", 123), + }, + { + name: "durationParam", + param: Duration("key", time.Second), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + allocs := testing.AllocsPerRun(100, func() { + jw.BeginObject() + tt.param.WriteValueTo(jw) + jw.EndObject() + }) + require.Equal(t, float64(0), allocs, "%s.WriteValueTo() should not allocate memory", tt.name) + }) + } +} diff --git a/internal/contentparam/contentid_params.go b/internal/contentparam/contentid_params.go new file mode 100644 index 00000000000..c3b228b70a7 --- /dev/null +++ b/internal/contentparam/contentid_params.go @@ -0,0 +1,27 @@ +// Package contentparam provides parameters for logging content-related operations. +package contentparam + +import ( + "github.com/kopia/kopia/internal/contentlog" + "github.com/kopia/kopia/repo/content/index" +) + +const maxLoggedContentIDLength = 5 + +type contentIDParam struct { + Key string + Value index.ID +} + +func (e contentIDParam) WriteValueTo(jw *contentlog.JSONWriter) { + var buf [128]byte + + jw.RawJSONField(e.Key, e.Value.AppendToJSON(buf[:0], maxLoggedContentIDLength)) +} + +// ContentID is a parameter that writes a content ID to the JSON writer. +// +//nolint:revive +func ContentID(key string, value index.ID) contentIDParam { + return contentIDParam{Key: key, Value: value} +} diff --git a/internal/crypto/aesgcm.go b/internal/crypto/aesgcm.go index 50e08c107a1..791f444fb35 100644 --- a/internal/crypto/aesgcm.go +++ b/internal/crypto/aesgcm.go @@ -11,9 +11,9 @@ import ( ) //nolint:gochecknoglobals -var ( - purposeAESKey = []byte("AES") - purposeAuthData = []byte("CHECKSUM") +const ( + purposeAESKey = "AES" + purposeAuthData = "CHECKSUM" ) func initCrypto(masterKey, salt []byte) (cipher.AEAD, []byte, error) { @@ -40,6 +40,8 @@ func initCrypto(masterKey, salt []byte) (cipher.AEAD, []byte, error) { return aead, authData, nil } +var errPlaintextTooLarge = errors.New("plaintext data is too large to be encrypted") + // EncryptAes256Gcm encrypts data with AES 256 GCM. func EncryptAes256Gcm(data, masterKey, salt []byte) ([]byte, error) { aead, authData, err := initCrypto(masterKey, salt) @@ -48,8 +50,14 @@ func EncryptAes256Gcm(data, masterKey, salt []byte) ([]byte, error) { } nonceLength := aead.NonceSize() - noncePlusContentLength := nonceLength + len(data) - cipherText := make([]byte, noncePlusContentLength+aead.Overhead()) + noncePlusOverhead := nonceLength + aead.Overhead() + + const maxInt = int(^uint(0) >> 1) + if len(data) > maxInt-noncePlusOverhead { + return nil, errPlaintextTooLarge + } + + cipherText := make([]byte, len(data)+noncePlusOverhead) // Store nonce at the beginning of ciphertext. nonce := cipherText[0:nonceLength] @@ -70,11 +78,12 @@ func DecryptAes256Gcm(data, masterKey, salt []byte) ([]byte, error) { return nil, errors.Wrap(err, "cannot initialize cipher") } - data = append([]byte(nil), data...) if len(data) < aead.NonceSize() { return nil, errors.New("invalid encrypted payload, too short") } + data = append([]byte(nil), data...) + nonce := data[0:aead.NonceSize()] payload := data[aead.NonceSize():] diff --git a/internal/crypto/key_derivation.go b/internal/crypto/key_derivation.go index 1709c0aa424..3afc878c11b 100644 --- a/internal/crypto/key_derivation.go +++ b/internal/crypto/key_derivation.go @@ -10,12 +10,12 @@ import ( var errInvalidMasterKey = errors.New("invalid primary key") // DeriveKeyFromMasterKey computes a key for a specific purpose and length using HKDF based on the master key. -func DeriveKeyFromMasterKey(masterKey, salt, purpose []byte, length int) (derivedKey []byte, err error) { +func DeriveKeyFromMasterKey(masterKey, salt []byte, purpose string, length int) (derivedKey []byte, err error) { if len(masterKey) == 0 { return nil, errors.Wrap(errInvalidMasterKey, "empty key") } - if derivedKey, err = hkdf.Key(sha256.New, masterKey, salt, string(purpose), length); err != nil { + if derivedKey, err = hkdf.Key(sha256.New, masterKey, salt, purpose, length); err != nil { return nil, errors.Wrap(err, "unable to derive key") } diff --git a/internal/crypto/key_derivation_test.go b/internal/crypto/key_derivation_test.go index 50df28a7616..576ac884e40 100644 --- a/internal/crypto/key_derivation_test.go +++ b/internal/crypto/key_derivation_test.go @@ -9,15 +9,16 @@ import ( "github.com/kopia/kopia/internal/crypto" ) -var ( - TestMasterKey = []byte("ABCDEFGHIJKLMNOP") - TestSalt = []byte("0123456789012345") - TestPurpose = []byte("the-test-purpose") -) - func TestDeriveKeyFromMasterKey(t *testing.T) { + const testPurpose = "the-test-purpose" + + var ( + testMasterKey = []byte("ABCDEFGHIJKLMNOP") + testSalt = []byte("0123456789012345") + ) + t.Run("ReturnsKey", func(t *testing.T) { - key, err := crypto.DeriveKeyFromMasterKey(TestMasterKey, TestSalt, TestPurpose, 32) + key, err := crypto.DeriveKeyFromMasterKey(testMasterKey, testSalt, testPurpose, 32) require.NoError(t, err) expected := "828769ee8969bc37f11dbaa32838f8db6c19daa6e3ae5f5eed2da2d94d8faddb" @@ -26,13 +27,13 @@ func TestDeriveKeyFromMasterKey(t *testing.T) { }) t.Run("ErrorOnNilMasterKey", func(t *testing.T) { - k, err := crypto.DeriveKeyFromMasterKey(nil, TestSalt, TestPurpose, 32) + k, err := crypto.DeriveKeyFromMasterKey(nil, testSalt, testPurpose, 32) require.Error(t, err) require.Nil(t, k) }) t.Run("ErrorOnEmptyMasterKey", func(t *testing.T) { - k, err := crypto.DeriveKeyFromMasterKey([]byte{}, TestSalt, TestPurpose, 32) + k, err := crypto.DeriveKeyFromMasterKey([]byte{}, testSalt, testPurpose, 32) require.Error(t, err) require.Nil(t, k) }) diff --git a/internal/crypto/pb_key_deriver_insecure_testing.go b/internal/crypto/pb_key_deriver_insecure_testing.go index 961e71f22a0..698eb732225 100644 --- a/internal/crypto/pb_key_deriver_insecure_testing.go +++ b/internal/crypto/pb_key_deriver_insecure_testing.go @@ -1,5 +1,4 @@ //go:build testing -// +build testing package crypto diff --git a/internal/diff/diff.go b/internal/diff/diff.go index 1b9f7bcb928..73985b824ff 100644 --- a/internal/diff/diff.go +++ b/internal/diff/diff.go @@ -59,14 +59,11 @@ type Comparer struct { // Compare compares two filesystem entries and emits their diff information. func (c *Comparer) Compare(ctx context.Context, e1, e2 fs.Entry) (Stats, error) { - c.stats = Stats{} + c.stats = Stats{} // reset stats err := c.compareEntry(ctx, e1, e2, ".") - if err != nil { - return c.stats, err - } - return c.stats, errors.Wrap(err, "error comparing fs entries") + return c.stats, err } // Close removes all temporary files used by the comparer. @@ -357,6 +354,7 @@ func (c *Comparer) compareFiles(ctx context.Context, f1, f2 fs.File, fname strin } var args []string + args = append(args, c.DiffArguments...) args = append(args, oldName, newName) diff --git a/internal/diff/diff_test.go b/internal/diff/diff_test.go index 66bd7d1d828..7d76bc55beb 100644 --- a/internal/diff/diff_test.go +++ b/internal/diff/diff_test.go @@ -550,11 +550,13 @@ func TestGetTwoLatestSnapshots(t *testing.T) { intermediateSnapshotManifestID := mustSaveSnapshot(t, env.RepositoryWriter, manifests["intermediate_snapshot"]) var expectedManifestIDs []manifest.ID + expectedManifestIDs = append(expectedManifestIDs, initialSnapshotManifestID, intermediateSnapshotManifestID) secondLastSnapshot, lastSnapshot, err := diff.GetTwoLatestSnapshotsForASource(ctx, env.RepositoryWriter, snapshotSrc) var gotManifestIDs []manifest.ID + gotManifestIDs = append(gotManifestIDs, secondLastSnapshot.ID, lastSnapshot.ID) require.NoError(t, err) diff --git a/internal/editor/editor.go b/internal/editor/editor.go index 2870ce72a6f..74ec75685e7 100644 --- a/internal/editor/editor.go +++ b/internal/editor/editor.go @@ -105,7 +105,7 @@ var EditFile = func(ctx context.Context, file string) error { args = append(args, editorArgs...) args = append(args, file) - cmd := exec.Command(editor, args...) //nolint:gosec + cmd := exec.CommandContext(ctx, editor, args...) //nolint:gosec cmd.Stderr = os.Stderr cmd.Stdin = os.Stdin cmd.Stdout = os.Stdout diff --git a/internal/epoch/epoch_manager.go b/internal/epoch/epoch_manager.go index 18e7b03c0ee..f6149875413 100644 --- a/internal/epoch/epoch_manager.go +++ b/internal/epoch/epoch_manager.go @@ -13,10 +13,13 @@ import ( "github.com/pkg/errors" "golang.org/x/sync/errgroup" + "github.com/kopia/kopia/internal/blobparam" "github.com/kopia/kopia/internal/completeset" + "github.com/kopia/kopia/internal/contentlog" + "github.com/kopia/kopia/internal/contentlog/logparam" "github.com/kopia/kopia/internal/gather" "github.com/kopia/kopia/repo/blob" - "github.com/kopia/kopia/repo/logging" + "github.com/kopia/kopia/repo/maintenancestats" ) // LatestEpoch represents the current epoch number in GetCompleteIndexSet. @@ -172,8 +175,12 @@ type CurrentSnapshot struct { DeletionWatermarkBlobs []blob.Metadata `json:"deletionWatermarkBlobs"` // list of deletion watermark blobs } +func (cs *CurrentSnapshot) lastSettledEpochNumber() int { + return cs.WriteEpoch - numUnsettledEpochs +} + func (cs *CurrentSnapshot) isSettledEpochNumber(epoch int) bool { - return epoch <= cs.WriteEpoch-numUnsettledEpochs + return epoch <= cs.lastSettledEpochNumber() } // Manager manages repository epochs. @@ -182,7 +189,7 @@ type Manager struct { st blob.Storage compact CompactionFunc - log logging.Logger + log *contentlog.Logger timeFunc func() time.Time // wait group that waits for all compaction and cleanup goroutines. @@ -232,27 +239,31 @@ func (e *Manager) Current(ctx context.Context) (CurrentSnapshot, error) { // AdvanceDeletionWatermark moves the deletion watermark time to a given timestamp // this causes all deleted content entries before given time to be treated as non-existent. -func (e *Manager) AdvanceDeletionWatermark(ctx context.Context, ts time.Time) error { +func (e *Manager) AdvanceDeletionWatermark(ctx context.Context, ts time.Time) (bool, error) { cs, err := e.committedState(ctx, 0) if err != nil { - return err + return false, err } if ts.Before(cs.DeletionWatermark) { - e.log.Debugf("ignoring attempt to move deletion watermark time backwards (%v < %v)", ts.Format(time.RFC3339), cs.DeletionWatermark.Format(time.RFC3339)) + contentlog.Log2(ctx, e.log, + "ignoring attempt to move deletion watermark time backwards", + logparam.Time("ts", ts), + logparam.Time("deletionWatermark", cs.DeletionWatermark), + ) - return nil + return false, nil } blobID := blob.ID(fmt.Sprintf("%v%v", string(DeletionWatermarkBlobPrefix), ts.Unix())) if err := e.st.PutBlob(ctx, blobID, gather.FromSlice([]byte("deletion-watermark")), blob.PutOptions{}); err != nil { - return errors.Wrap(err, "error writing deletion watermark") + return false, errors.Wrap(err, "error writing deletion watermark") } e.Invalidate() - return nil + return true, nil } // Refresh refreshes information about current epoch. @@ -284,21 +295,21 @@ func (e *Manager) maxCleanupTime(cs CurrentSnapshot) time.Time { } // CleanupMarkers removes superseded watermarks and epoch markers. -func (e *Manager) CleanupMarkers(ctx context.Context) error { +func (e *Manager) CleanupMarkers(ctx context.Context) (*maintenancestats.CleanupMarkersStats, error) { cs, err := e.committedState(ctx, 0) if err != nil { - return err + return nil, err } p, err := e.getParameters(ctx) if err != nil { - return err + return nil, err } return e.cleanupInternal(ctx, cs, p) } -func (e *Manager) cleanupInternal(ctx context.Context, cs CurrentSnapshot, p *Parameters) error { +func (e *Manager) cleanupInternal(ctx context.Context, cs CurrentSnapshot, p *Parameters) (*maintenancestats.CleanupMarkersStats, error) { eg, ctx := errgroup.WithContext(ctx) // find max timestamp recently written to the repository to establish storage clock. @@ -306,7 +317,7 @@ func (e *Manager) cleanupInternal(ctx context.Context, cs CurrentSnapshot, p *Pa // to this max time. This assumes that storage clock moves forward somewhat reasonably. maxTime := e.maxCleanupTime(cs) if maxTime.IsZero() { - return nil + return nil, nil } // only delete blobs if a suitable replacement exists and has been written sufficiently @@ -314,18 +325,35 @@ func (e *Manager) cleanupInternal(ctx context.Context, cs CurrentSnapshot, p *Pa // may have not observed them yet. maxReplacementTime := maxTime.Add(-p.CleanupSafetyMargin) + var deletedEpochMarkersCount, deletedWatermarksCount atomic.Int64 + eg.Go(func() error { - return e.cleanupEpochMarkers(ctx, cs) + deleted, err := e.cleanupEpochMarkers(ctx, cs) + deletedEpochMarkersCount.Store(int64(deleted)) + + return err }) eg.Go(func() error { - return e.cleanupWatermarks(ctx, cs, p, maxReplacementTime) + deleted, err := e.cleanupWatermarks(ctx, cs, p, maxReplacementTime) + deletedWatermarksCount.Store(int64(deleted)) + + return err }) - return errors.Wrap(eg.Wait(), "error cleaning up index blobs") + if err := eg.Wait(); err != nil { + return nil, errors.Wrap(err, "error cleaning up index blobs") + } + + result := &maintenancestats.CleanupMarkersStats{ + DeletedEpochMarkerBlobCount: int(deletedEpochMarkersCount.Load()), + DeletedWatermarkBlobCount: int(deletedWatermarksCount.Load()), + } + + return result, nil } -func (e *Manager) cleanupEpochMarkers(ctx context.Context, cs CurrentSnapshot) error { +func (e *Manager) cleanupEpochMarkers(ctx context.Context, cs CurrentSnapshot) (int, error) { // delete epoch markers for epoch < current-1 var toDelete []blob.ID @@ -339,13 +367,13 @@ func (e *Manager) cleanupEpochMarkers(ctx context.Context, cs CurrentSnapshot) e p, err := e.getParameters(ctx) if err != nil { - return err + return 0, err } - return errors.Wrap(blob.DeleteMultiple(ctx, e.st, toDelete, p.DeleteParallelism), "error deleting index blob marker") + return len(toDelete), errors.Wrap(blob.DeleteMultiple(ctx, e.st, toDelete, p.DeleteParallelism), "error deleting index blob marker") } -func (e *Manager) cleanupWatermarks(ctx context.Context, cs CurrentSnapshot, p *Parameters, maxReplacementTime time.Time) error { +func (e *Manager) cleanupWatermarks(ctx context.Context, cs CurrentSnapshot, p *Parameters, maxReplacementTime time.Time) (int, error) { var toDelete []blob.ID for _, bm := range cs.DeletionWatermarkBlobs { @@ -363,19 +391,19 @@ func (e *Manager) cleanupWatermarks(ctx context.Context, cs CurrentSnapshot, p * } } - return errors.Wrap(blob.DeleteMultiple(ctx, e.st, toDelete, p.DeleteParallelism), "error deleting watermark blobs") + return len(toDelete), errors.Wrap(blob.DeleteMultiple(ctx, e.st, toDelete, p.DeleteParallelism), "error deleting watermark blobs") } // CleanupSupersededIndexes cleans up the indexes which have been superseded by compacted ones. -func (e *Manager) CleanupSupersededIndexes(ctx context.Context) error { +func (e *Manager) CleanupSupersededIndexes(ctx context.Context) (*maintenancestats.CleanupSupersededIndexesStats, error) { cs, err := e.committedState(ctx, 0) if err != nil { - return err + return nil, err } p, err := e.getParameters(ctx) if err != nil { - return err + return nil, err } // find max timestamp recently written to the repository to establish storage clock. @@ -383,7 +411,7 @@ func (e *Manager) CleanupSupersededIndexes(ctx context.Context) error { // to this max time. This assumes that storage clock moves forward somewhat reasonably. maxTime := e.maxCleanupTime(cs) if maxTime.IsZero() { - return nil + return nil, nil } // only delete blobs if a suitable replacement exists and has been written sufficiently @@ -391,31 +419,38 @@ func (e *Manager) CleanupSupersededIndexes(ctx context.Context) error { // may have not observed them yet. maxReplacementTime := maxTime.Add(-p.CleanupSafetyMargin) - e.log.Debugw("Cleaning up superseded index blobs...", - "maxReplacementTime", maxReplacementTime) + contentlog.Log1(ctx, e.log, "Cleaning up superseded index blobs...", + logparam.Time("maxReplacementTime", maxReplacementTime)) // delete uncompacted indexes for epochs that already have single-epoch compaction // that was written sufficiently long ago. blobs, err := blob.ListAllBlobs(ctx, e.st, UncompactedIndexBlobPrefix) if err != nil { - return errors.Wrap(err, "error listing uncompacted blobs") + return nil, errors.Wrap(err, "error listing uncompacted blobs") } var toDelete []blob.ID + var deletedTotalSize int64 + for _, bm := range blobs { if epoch, ok := epochNumberFromBlobID(bm.BlobID); ok { if blobSetWrittenEarlyEnough(cs.SingleEpochCompactionSets[epoch], maxReplacementTime) { toDelete = append(toDelete, bm.BlobID) + deletedTotalSize += bm.Length } } } if err := blob.DeleteMultiple(ctx, e.st, toDelete, p.DeleteParallelism); err != nil { - return errors.Wrap(err, "unable to delete uncompacted blobs") + return nil, errors.Wrap(err, "unable to delete uncompacted blobs") } - return nil + return &maintenancestats.CleanupSupersededIndexesStats{ + MaxReplacementTime: maxReplacementTime, + DeletedBlobCount: len(toDelete), + DeletedTotalSize: deletedTotalSize, + }, nil } func blobSetWrittenEarlyEnough(replacementSet []blob.Metadata, maxReplacementTime time.Time) bool { @@ -459,14 +494,10 @@ func (e *Manager) refreshLocked(ctx context.Context) error { return errors.Wrap(ctx.Err(), "refreshAttemptLocked") } - e.log.Debugf("refresh attempt failed: %v, sleeping %v before next retry", err, nextDelayTime) + contentlog.Log2(ctx, e.log, "refresh attempt failed", logparam.Error("error", err), logparam.Duration("nextDelayTime", nextDelayTime)) time.Sleep(nextDelayTime) - nextDelayTime = time.Duration(float64(nextDelayTime) * maxRefreshAttemptSleepExponent) - - if nextDelayTime > maxRefreshAttemptSleep { - nextDelayTime = maxRefreshAttemptSleep - } + nextDelayTime = min(time.Duration(float64(nextDelayTime)*maxRefreshAttemptSleepExponent), maxRefreshAttemptSleep) } return nil @@ -500,7 +531,7 @@ func (e *Manager) loadDeletionWatermark(ctx context.Context, cs *CurrentSnapshot for _, b := range blobs { t, ok := deletionWatermarkFromBlobID(b.BlobID) if !ok { - e.log.Debugf("ignoring malformed deletion watermark: %v", b.BlobID) + contentlog.Log1(ctx, e.log, "ignoring malformed deletion watermark", blobparam.BlobID("blobID", b.BlobID)) continue } @@ -520,7 +551,10 @@ func (e *Manager) loadRangeCheckpoints(ctx context.Context, cs *CurrentSnapshot) return errors.Wrap(err, "error loading full checkpoints") } - e.log.Debugf("ranges: %v", blobs) + contentlog.Log2(ctx, e.log, + "ranges", + logparam.Int("numRanges", len(blobs)), + blobparam.BlobMetadataList("blobs", blobs)) var rangeCheckpointSets []*RangeMetadata @@ -561,33 +595,36 @@ func (e *Manager) loadSingleEpochCompactions(ctx context.Context, cs *CurrentSna // MaybeGenerateRangeCheckpoint may create a new range index for all the // individual epochs covered by the new range. If there are not enough epochs // to create a new range, then a range index is not created. -func (e *Manager) MaybeGenerateRangeCheckpoint(ctx context.Context) error { +func (e *Manager) MaybeGenerateRangeCheckpoint(ctx context.Context) (*maintenancestats.GenerateRangeCheckpointStats, error) { p, err := e.getParameters(ctx) if err != nil { - return err + return nil, err } cs, err := e.committedState(ctx, 0) if err != nil { - return err + return nil, err } - latestSettled, firstNonRangeCompacted, compact := getRangeToCompact(cs, *p) + firstNonRangeCompacted, latestSettled, compact := getRangeToCompact(cs, *p) if !compact { - e.log.Debug("not generating range checkpoint") + contentlog.Log(ctx, e.log, "not generating range checkpoint") - return nil + return nil, nil } if err := e.generateRangeCheckpointFromCommittedState(ctx, cs, firstNonRangeCompacted, latestSettled); err != nil { - return errors.Wrap(err, "unable to generate full checkpoint, performance will be affected") + return nil, errors.Wrap(err, "unable to generate full checkpoint, performance will be affected") } - return nil + return &maintenancestats.GenerateRangeCheckpointStats{ + RangeMinEpoch: firstNonRangeCompacted, + RangeMaxEpoch: latestSettled, + }, nil } func getRangeToCompact(cs CurrentSnapshot, p Parameters) (low, high int, compactRange bool) { - latestSettled := cs.WriteEpoch - numUnsettledEpochs + latestSettled := cs.lastSettledEpochNumber() if latestSettled < 0 { return -1, -1, false } @@ -601,7 +638,7 @@ func getRangeToCompact(cs CurrentSnapshot, p Parameters) (low, high int, compact return -1, -1, false } - return latestSettled, firstNonRangeCompacted, true + return firstNonRangeCompacted, latestSettled, true } func (e *Manager) loadUncompactedEpochs(ctx context.Context, first, last int) (map[int][]blob.Metadata, error) { @@ -641,7 +678,11 @@ func (e *Manager) loadUncompactedEpochs(ctx context.Context, first, last int) (m // refreshAttemptLocked attempts to load the committedState of // the index and updates `lastKnownState` state atomically when complete. func (e *Manager) refreshAttemptLocked(ctx context.Context) error { - e.log.Debug("refreshAttemptLocked") + ctx = contentlog.WithParams(ctx, + logparam.String("span:emr", contentlog.RandomSpanID())) + + contentlog.Log(ctx, e.log, "refreshAttempt started") + defer contentlog.Log(ctx, e.log, "refreshAttempt finished") p, perr := e.getParameters(ctx) if perr != nil { @@ -682,12 +723,14 @@ func (e *Manager) refreshAttemptLocked(ctx context.Context) error { cs.UncompactedEpochSets = ues - e.log.Debugf("current epoch %v, uncompacted epoch sets %v %v %v, valid until %v", - cs.WriteEpoch, - len(ues[cs.WriteEpoch-1]), - len(ues[cs.WriteEpoch]), - len(ues[cs.WriteEpoch+1]), - cs.ValidUntil.Format(time.RFC3339Nano)) + contentlog.Log5(ctx, e.log, + "epochs determined", + logparam.Int("writeEpoch", cs.WriteEpoch), + logparam.Int("ues1", len(ues[cs.WriteEpoch-1])), + logparam.Int("ues2", len(ues[cs.WriteEpoch])), + logparam.Int("ues3", len(ues[cs.WriteEpoch+1])), + logparam.Time("validUntil", cs.ValidUntil), + ) if now := e.timeFunc(); now.After(cs.ValidUntil) { atomic.AddInt32(e.committedStateRefreshTooSlow, 1) @@ -702,21 +745,30 @@ func (e *Manager) refreshAttemptLocked(ctx context.Context) error { // MaybeAdvanceWriteEpoch writes a new write epoch marker when a new write // epoch should be started, otherwise it does not do anything. -func (e *Manager) MaybeAdvanceWriteEpoch(ctx context.Context) error { +func (e *Manager) MaybeAdvanceWriteEpoch(ctx context.Context) (*maintenancestats.AdvanceEpochStats, error) { p, err := e.getParameters(ctx) if err != nil { - return err + return nil, err } e.mu.Lock() cs := e.lastKnownState e.mu.Unlock() + result := &maintenancestats.AdvanceEpochStats{ + CurrentEpoch: cs.WriteEpoch, + } + if shouldAdvance(cs.UncompactedEpochSets[cs.WriteEpoch], p.MinEpochDuration, p.EpochAdvanceOnCountThreshold, p.EpochAdvanceOnTotalSizeBytesThreshold) { - return errors.Wrap(e.advanceEpochMarker(ctx, cs), "error advancing epoch") + if err := e.advanceEpochMarker(ctx, cs); err != nil { + return nil, errors.Wrap(err, "error advancing epoch") + } + + result.CurrentEpoch = cs.WriteEpoch + 1 + result.WasAdvanced = true } - return nil + return result, nil } func (e *Manager) advanceEpochMarker(ctx context.Context, cs CurrentSnapshot) error { @@ -734,7 +786,12 @@ func (e *Manager) committedState(ctx context.Context, ensureMinTime time.Duratio defer e.mu.Unlock() if now := e.timeFunc().Add(ensureMinTime); now.After(e.lastKnownState.ValidUntil) { - e.log.Debugf("refreshing committed state because it's no longer valid (now %v, valid until %v)", now, e.lastKnownState.ValidUntil.Format(time.RFC3339Nano)) + contentlog.Log2( + ctx, e.log, + "refreshing committed state because it's no longer valid", + logparam.Time("now", now), + logparam.Time("validUntil", e.lastKnownState.ValidUntil), + ) if err := e.refreshLocked(ctx); err != nil { return CurrentSnapshot{}, err @@ -758,7 +815,14 @@ func (e *Manager) GetCompleteIndexSet(ctx context.Context, maxEpoch int) ([]blob result, err := e.getCompleteIndexSetForCommittedState(ctx, cs, 0, maxEpoch) if e.timeFunc().Before(cs.ValidUntil) { - e.log.Debugf("Complete Index Set for [%v..%v]: %v, deletion watermark %v", 0, maxEpoch, blob.IDsFromMetadata(result), cs.DeletionWatermark) + contentlog.Log4(ctx, e.log, + "complete index set", + logparam.Int("maxEpoch", maxEpoch), + logparam.Int("resultLength", len(result)), + blobparam.BlobIDList("result", blob.IDsFromMetadata(result)), + logparam.Time("deletionWatermark", cs.DeletionWatermark), + ) + return result, cs.DeletionWatermark, err } @@ -770,7 +834,7 @@ func (e *Manager) GetCompleteIndexSet(ctx context.Context, maxEpoch int) ([]blob // indexes that are still treated as authoritative according to old committed state. // // Retrying will re-examine the state of the world and re-do the logic. - e.log.Debug("GetCompleteIndexSet took too long, retrying to ensure correctness") + contentlog.Log(ctx, e.log, "GetCompleteIndexSet took too long, retrying to ensure correctness") atomic.AddInt32(e.getCompleteIndexSetTooSlow, 1) } } @@ -778,12 +842,15 @@ func (e *Manager) GetCompleteIndexSet(ctx context.Context, maxEpoch int) ([]blob var errWriteIndexTryAgain = errors.New("try again") // WriteIndex writes new index blob by picking the appropriate prefix based on current epoch. -func (e *Manager) WriteIndex(ctx context.Context, dataShards map[blob.ID]blob.Bytes) ([]blob.Metadata, error) { +func (e *Manager) WriteIndex(ctx0 context.Context, dataShards map[blob.ID]blob.Bytes) ([]blob.Metadata, error) { written := map[blob.ID]blob.Metadata{} writtenForEpoch := -1 + attempt := 0 + for { - e.log.Debug("WriteIndex") + ctx := contentlog.WithParams(ctx0, + logparam.String("span:writeEpochIndex", fmt.Sprintf("attempt-%v", attempt))) p, err := e.getParameters(ctx) if err != nil { @@ -812,7 +879,7 @@ func (e *Manager) WriteIndex(ctx context.Context, dataShards map[blob.ID]blob.By } if err != nil { - e.log.Debugw("index-write-error", "error", err) + contentlog.Log1(ctx, e.log, "index-write-error", logparam.Error("error", err)) return nil, err } @@ -821,16 +888,16 @@ func (e *Manager) WriteIndex(ctx context.Context, dataShards map[blob.ID]blob.By break } - cs, err := e.committedState(ctx, 0) + cs, err := e.committedState(ctx0, 0) if err != nil { return nil, errors.Wrap(err, "error getting committed state") } if cs.WriteEpoch >= writtenForEpoch+2 { - e.log.Debugw("index-write-extremely-slow") + contentlog.Log(ctx0, e.log, "index-write-extremely-slow") - if err = e.deletePartiallyWrittenShards(ctx, written); err != nil { - e.log.Debugw("index-write-extremely-slow-cleanup-failed", "error", err) + if err = e.deletePartiallyWrittenShards(ctx0, written); err != nil { + contentlog.Log1(ctx0, e.log, "index-write-extremely-slow-cleanup-failed", logparam.Error("error", err)) } return nil, ErrVerySlowIndexWrite @@ -842,7 +909,7 @@ func (e *Manager) WriteIndex(ctx context.Context, dataShards map[blob.ID]blob.By results = append(results, v) } - e.log.Debugw("index-write-success", "results", results) + contentlog.Log1(ctx0, e.log, "index-write-success", blobparam.BlobMetadataList("results", results)) return results, nil } @@ -858,22 +925,23 @@ func (e *Manager) deletePartiallyWrittenShards(ctx context.Context, blobs map[bl } func (e *Manager) writeIndexShards(ctx context.Context, dataShards map[blob.ID]blob.Bytes, written map[blob.ID]blob.Metadata, cs CurrentSnapshot) error { - e.log.Debugw("writing index shards", - "shardCount", len(dataShards), - "validUntil", cs.ValidUntil, - "remaining", cs.ValidUntil.Sub(e.timeFunc())) + contentlog.Log3(ctx, e.log, "writing index shards", + logparam.Int("shardCount", len(dataShards)), + logparam.Time("validUntil", cs.ValidUntil), + logparam.Duration("remaining", cs.ValidUntil.Sub(e.timeFunc()))) for unprefixedBlobID, data := range dataShards { blobID := UncompactedEpochBlobPrefix(cs.WriteEpoch) + unprefixedBlobID if _, ok := written[blobID]; ok { - e.log.Debugw("already written", - "blobID", blobID) + contentlog.Log1(ctx, e.log, + "already written", + blobparam.BlobID("blobID", blobID)) + continue } if now := e.timeFunc(); !now.Before(cs.ValidUntil) { - e.log.Debugw("write was too slow, retrying", - "validUntil", cs.ValidUntil) + contentlog.Log1(ctx, e.log, "write was too slow, retrying", logparam.Time("validUntil", cs.ValidUntil)) atomic.AddInt32(e.writeIndexTooSlow, 1) return errWriteIndexTryAgain @@ -884,7 +952,7 @@ func (e *Manager) writeIndexShards(ctx context.Context, dataShards map[blob.ID]b return errors.Wrap(err, "error writing index blob") } - e.log.Debugw("wrote-index-shard", "metadata", bm) + contentlog.Log1(ctx, e.log, "wrote-index-shard", blobparam.BlobMetadata("metadata", bm)) written[bm.BlobID] = bm } @@ -914,7 +982,12 @@ func (e *Manager) getCompleteIndexSetForCommittedState(ctx context.Context, cs C eg, ctx := errgroup.WithContext(ctx) - e.log.Debugf("adding incremental state for epochs %v..%v on top of %v", startEpoch, maxEpoch, result) + contentlog.Log3(ctx, e.log, "adding incremental state for epochs", + logparam.Int("startEpoch", startEpoch), + logparam.Int("maxEpoch", maxEpoch), + blobparam.BlobIDList("result", blob.IDsFromMetadata(result)), + ) + cnt := maxEpoch - startEpoch + 1 tmp := make([][]blob.Metadata, cnt) @@ -947,21 +1020,21 @@ func (e *Manager) getCompleteIndexSetForCommittedState(ctx context.Context, cs C // MaybeCompactSingleEpoch compacts the oldest epoch that is eligible for // compaction if there is one. -func (e *Manager) MaybeCompactSingleEpoch(ctx context.Context) error { +func (e *Manager) MaybeCompactSingleEpoch(ctx context.Context) (*maintenancestats.CompactSingleEpochStats, error) { cs, err := e.committedState(ctx, 0) if err != nil { - return err + return nil, err } uncompacted, err := oldestUncompactedEpoch(cs) if err != nil { - return err + return nil, err } if !cs.isSettledEpochNumber(uncompacted) { - e.log.Debugw("there are no uncompacted epochs eligible for compaction", "oldestUncompactedEpoch", uncompacted) + contentlog.Log1(ctx, e.log, "there are no uncompacted epochs eligible for compaction", logparam.Int("oldestUncompactedEpoch", uncompacted)) - return nil + return nil, nil } uncompactedBlobs, ok := cs.UncompactedEpochSets[uncompacted] @@ -969,19 +1042,30 @@ func (e *Manager) MaybeCompactSingleEpoch(ctx context.Context) error { // blobs for this epoch were not loaded in the current snapshot, get the list of blobs for this epoch ue, err := blob.ListAllBlobs(ctx, e.st, UncompactedEpochBlobPrefix(uncompacted)) if err != nil { - return errors.Wrapf(err, "error listing uncompacted indexes for epoch %v", uncompacted) + return nil, errors.Wrapf(err, "error listing uncompacted indexes for epoch %v", uncompacted) } uncompactedBlobs = ue } - e.log.Debugf("starting single-epoch compaction for epoch %v", uncompacted) + var uncompactedSize int64 + for _, b := range uncompactedBlobs { + uncompactedSize += b.Length + } + + result := &maintenancestats.CompactSingleEpochStats{ + SupersededIndexBlobCount: len(uncompactedBlobs), + SupersededIndexTotalSize: uncompactedSize, + Epoch: uncompacted, + } + + contentlog.Log1(ctx, e.log, "starting single-epoch compaction for epoch", result) if err := e.compact(ctx, blob.IDsFromMetadata(uncompactedBlobs), compactedEpochBlobPrefix(uncompacted)); err != nil { - return errors.Wrapf(err, "unable to compact blobs for epoch %v: performance will be affected", uncompacted) + return nil, errors.Wrapf(err, "unable to compact blobs for epoch %v: performance will be affected", uncompacted) } - return nil + return result, nil } func (e *Manager) getIndexesFromEpochInternal(ctx context.Context, cs CurrentSnapshot, epoch int) ([]blob.Metadata, error) { @@ -1007,7 +1091,10 @@ func (e *Manager) getIndexesFromEpochInternal(ctx context.Context, cs CurrentSna } func (e *Manager) generateRangeCheckpointFromCommittedState(ctx context.Context, cs CurrentSnapshot, minEpoch, maxEpoch int) error { - e.log.Debugf("generating range checkpoint for %v..%v", minEpoch, maxEpoch) + contentlog.Log2(ctx, e.log, + "generating range checkpoint", + logparam.Int("minEpoch", minEpoch), + logparam.Int("maxEpoch", maxEpoch)) completeSet, err := e.getCompleteIndexSetForCommittedState(ctx, cs, minEpoch, maxEpoch) if err != nil { @@ -1039,7 +1126,7 @@ func rangeCheckpointBlobPrefix(epoch1, epoch2 int) blob.ID { } // NewManager creates new epoch manager. -func NewManager(st blob.Storage, paramProvider ParametersProvider, compactor CompactionFunc, log logging.Logger, timeNow func() time.Time) *Manager { +func NewManager(st blob.Storage, paramProvider ParametersProvider, compactor CompactionFunc, log *contentlog.Logger, timeNow func() time.Time) *Manager { return &Manager{ st: st, log: log, diff --git a/internal/epoch/epoch_manager_test.go b/internal/epoch/epoch_manager_test.go index 6753fe15135..c1a83aff791 100644 --- a/internal/epoch/epoch_manager_test.go +++ b/internal/epoch/epoch_manager_test.go @@ -25,6 +25,7 @@ import ( "github.com/kopia/kopia/repo/blob" "github.com/kopia/kopia/repo/blob/logging" "github.com/kopia/kopia/repo/blob/readonly" + "github.com/kopia/kopia/repo/maintenancestats" ) type fakeIndex struct { @@ -90,7 +91,7 @@ func newTestEnv(t *testing.T) *epochManagerTestEnv { ft := faketime.NewClockTimeWithOffset(0) ms := blobtesting.NewMapStorage(data, nil, ft.NowFunc()) fs := blobtesting.NewFaultyStorage(ms) - st := logging.NewWrapper(fs, testlogging.NewTestLogger(t), "[STORAGE] ") + st := logging.NewWrapper(fs, testlogging.NewTestLogger(t), nil, "[STORAGE] ") te := &epochManagerTestEnv{unloggedst: ms, st: st, ft: ft} m := NewManager(te.st, parameterProvider{&Parameters{ Enabled: true, @@ -102,7 +103,7 @@ func newTestEnv(t *testing.T) *epochManagerTestEnv { EpochAdvanceOnCountThreshold: 15, EpochAdvanceOnTotalSizeBytesThreshold: 20 << 20, DeleteParallelism: 1, - }}, te.compact, testlogging.NewTestLogger(t), te.ft.NowFunc()) + }}, te.compact, nil, te.ft.NowFunc()) te.mgr = m te.faultyStorage = fs te.data = data @@ -379,7 +380,7 @@ func TestIndexEpochManager_NoCompactionInReadOnly(t *testing.T) { te2 := &epochManagerTestEnv{ data: te.data, unloggedst: st, - st: logging.NewWrapper(fs, testlogging.NewTestLogger(t), "[OTHER STORAGE] "), + st: logging.NewWrapper(fs, testlogging.NewTestLogger(t), nil, "[OTHER STORAGE] "), ft: te.ft, faultyStorage: fs, } @@ -620,9 +621,11 @@ func TestMaybeAdvanceEpoch_Empty(t *testing.T) { te.verifyCurrentWriteEpoch(t, 0) // this should be a no-op - err := te.mgr.MaybeAdvanceWriteEpoch(ctx) + stats, err := te.mgr.MaybeAdvanceWriteEpoch(ctx) require.NoError(t, err) + require.Equal(t, 0, stats.CurrentEpoch) + require.False(t, stats.WasAdvanced) // check current epoch again te.verifyCurrentWriteEpoch(t, 0) @@ -668,9 +671,11 @@ func TestMaybeAdvanceEpoch(t *testing.T) { require.NoError(t, err) te.verifyCurrentWriteEpoch(t, 0) - err = te.mgr.MaybeAdvanceWriteEpoch(ctx) + stats, err := te.mgr.MaybeAdvanceWriteEpoch(ctx) require.NoError(t, err) + require.Equal(t, 1, stats.CurrentEpoch) + require.True(t, stats.WasAdvanced) err = te.mgr.Refresh(ctx) // force state refresh @@ -695,10 +700,11 @@ func TestMaybeAdvanceEpoch_GetParametersError(t *testing.T) { paramsError := errors.New("no parameters error") te.mgr.paramProvider = faultyParamsProvider{err: paramsError} - err := te.mgr.MaybeAdvanceWriteEpoch(ctx) + stats, err := te.mgr.MaybeAdvanceWriteEpoch(ctx) require.Error(t, err) require.ErrorIs(t, err, paramsError) + require.Nil(t, stats) } func TestMaybeAdvanceEpoch_Error(t *testing.T) { @@ -738,10 +744,11 @@ func TestMaybeAdvanceEpoch_Error(t *testing.T) { te.faultyStorage.AddFaults(blobtesting.MethodPutBlob, fault.New().ErrorInstead(berr)) - err = te.mgr.MaybeAdvanceWriteEpoch(ctx) + stats, err := te.mgr.MaybeAdvanceWriteEpoch(ctx) require.Error(t, err) require.ErrorIs(t, err, berr) + require.Nil(t, stats) } func TestForceAdvanceEpoch(t *testing.T) { @@ -806,8 +813,9 @@ func TestInvalid_Cleanup(t *testing.T) { ctx, cancel := context.WithCancel(testlogging.Context(t)) cancel() - err := te.mgr.CleanupSupersededIndexes(ctx) + stats, err := te.mgr.CleanupSupersededIndexes(ctx) require.ErrorIs(t, err, ctx.Err()) + require.Nil(t, stats) } //nolint:thelper @@ -843,8 +851,14 @@ func verifySequentialWrites(t *testing.T, te *epochManagerTestEnv) { if indexNum%13 == 0 { ts := te.ft.NowFunc()().Truncate(time.Second) - require.NoError(t, te.mgr.AdvanceDeletionWatermark(ctx, ts)) - require.NoError(t, te.mgr.AdvanceDeletionWatermark(ctx, ts.Add(-time.Second))) + advanced, err := te.mgr.AdvanceDeletionWatermark(ctx, ts) + require.NoError(t, err) + require.True(t, advanced) + + advanced, err = te.mgr.AdvanceDeletionWatermark(ctx, ts.Add(-time.Second)) + require.NoError(t, err) + require.False(t, advanced) + lastDeletionWatermark = ts } } @@ -887,9 +901,10 @@ func TestMaybeCompactSingleEpoch_Empty(t *testing.T) { ctx := testlogging.Context(t) // this should be a no-op - err := te.mgr.MaybeCompactSingleEpoch(ctx) + stats, err := te.mgr.MaybeCompactSingleEpoch(ctx) require.NoError(t, err) + require.Nil(t, stats) } func TestMaybeCompactSingleEpoch_GetParametersError(t *testing.T) { @@ -901,10 +916,11 @@ func TestMaybeCompactSingleEpoch_GetParametersError(t *testing.T) { paramsError := errors.New("no parameters error") te.mgr.paramProvider = faultyParamsProvider{err: paramsError} - err := te.mgr.MaybeCompactSingleEpoch(ctx) + stats, err := te.mgr.MaybeCompactSingleEpoch(ctx) require.Error(t, err) require.ErrorIs(t, err, paramsError) + require.Nil(t, stats) } func TestMaybeCompactSingleEpoch_CompactionError(t *testing.T) { @@ -918,7 +934,7 @@ func TestMaybeCompactSingleEpoch_CompactionError(t *testing.T) { idxCount := p.GetEpochAdvanceOnCountThreshold() // Create sufficient indexes blobs and move clock forward to advance epoch. - for range 4 { + for j := range 4 { for i := range idxCount { if i == idxCount-1 { // Advance the time so that the difference in times for writes will force @@ -929,7 +945,14 @@ func TestMaybeCompactSingleEpoch_CompactionError(t *testing.T) { te.mustWriteIndexFiles(ctx, t, newFakeIndexWithEntries(i)) } - require.NoError(t, te.mgr.MaybeAdvanceWriteEpoch(ctx)) + stats, err := te.mgr.MaybeAdvanceWriteEpoch(ctx) + require.NoError(t, err) + require.Equal(t, j+1, stats.CurrentEpoch) + require.True(t, stats.WasAdvanced) + + err = te.mgr.Refresh(ctx) // force state refresh + + require.NoError(t, err) } compactionError := errors.New("test compaction error") @@ -937,10 +960,11 @@ func TestMaybeCompactSingleEpoch_CompactionError(t *testing.T) { return compactionError } - err = te.mgr.MaybeCompactSingleEpoch(ctx) + stats, err := te.mgr.MaybeCompactSingleEpoch(ctx) require.Error(t, err) require.ErrorIs(t, err, compactionError) + require.Nil(t, stats) } func TestMaybeCompactSingleEpoch(t *testing.T) { @@ -974,8 +998,10 @@ func TestMaybeCompactSingleEpoch(t *testing.T) { te.verifyCurrentWriteEpoch(t, j) - err = te.mgr.MaybeAdvanceWriteEpoch(ctx) + stats, err := te.mgr.MaybeAdvanceWriteEpoch(ctx) require.NoError(t, err) + require.Equal(t, j+1, stats.CurrentEpoch) + require.True(t, stats.WasAdvanced) err = te.mgr.Refresh(ctx) // force state refresh @@ -994,10 +1020,12 @@ func TestMaybeCompactSingleEpoch(t *testing.T) { require.Empty(t, cs.SingleEpochCompactionSets) // perform single-epoch compaction for settled epochs - newestEpochToCompact := cs.WriteEpoch - numUnsettledEpochs + 1 + newestEpochToCompact := cs.lastSettledEpochNumber() + 1 for j := range newestEpochToCompact { - err = te.mgr.MaybeCompactSingleEpoch(ctx) + stats, err := te.mgr.MaybeCompactSingleEpoch(ctx) require.NoError(t, err) + require.Equal(t, idxCount, stats.SupersededIndexBlobCount) + require.Equal(t, j, stats.Epoch) err = te.mgr.Refresh(ctx) // force state refresh require.NoError(t, err) @@ -1011,8 +1039,9 @@ func TestMaybeCompactSingleEpoch(t *testing.T) { require.Len(t, cs.SingleEpochCompactionSets, newestEpochToCompact) // no more epochs should be compacted at this point - err = te.mgr.MaybeCompactSingleEpoch(ctx) + stats, err := te.mgr.MaybeCompactSingleEpoch(ctx) require.NoError(t, err) + require.Nil(t, stats) err = te.mgr.Refresh(ctx) require.NoError(t, err) @@ -1030,9 +1059,10 @@ func TestMaybeGenerateRangeCheckpoint_Empty(t *testing.T) { ctx := testlogging.Context(t) // this should be a no-op - err := te.mgr.MaybeGenerateRangeCheckpoint(ctx) + stats, err := te.mgr.MaybeGenerateRangeCheckpoint(ctx) require.NoError(t, err) + require.Nil(t, stats) } func TestMaybeGenerateRangeCheckpoint_GetParametersError(t *testing.T) { @@ -1044,10 +1074,11 @@ func TestMaybeGenerateRangeCheckpoint_GetParametersError(t *testing.T) { paramsError := errors.New("no parameters error") te.mgr.paramProvider = faultyParamsProvider{err: paramsError} - err := te.mgr.MaybeGenerateRangeCheckpoint(ctx) + stats, err := te.mgr.MaybeGenerateRangeCheckpoint(ctx) require.Error(t, err) require.ErrorIs(t, err, paramsError) + require.Nil(t, stats) } func TestMaybeGenerateRangeCheckpoint_FailToReadState(t *testing.T) { @@ -1060,9 +1091,10 @@ func TestMaybeGenerateRangeCheckpoint_FailToReadState(t *testing.T) { cancel() - err := te.mgr.MaybeGenerateRangeCheckpoint(ctx) + stats, err := te.mgr.MaybeGenerateRangeCheckpoint(ctx) require.Error(t, err) + require.Nil(t, stats) } func TestMaybeGenerateRangeCheckpoint_CompactionError(t *testing.T) { @@ -1080,7 +1112,7 @@ func TestMaybeGenerateRangeCheckpoint_CompactionError(t *testing.T) { var k int // Create sufficient indexes blobs and move clock forward to advance epoch. - for range epochsToWrite { + for j := range epochsToWrite { for i := range idxCount { if i == idxCount-1 { // Advance the time so that the difference in times for writes will force @@ -1093,8 +1125,10 @@ func TestMaybeGenerateRangeCheckpoint_CompactionError(t *testing.T) { k++ } - err = te.mgr.MaybeAdvanceWriteEpoch(ctx) + stats, err := te.mgr.MaybeAdvanceWriteEpoch(ctx) require.NoError(t, err) + require.Equal(t, j+1, stats.CurrentEpoch) + require.True(t, stats.WasAdvanced) err = te.mgr.Refresh(ctx) require.NoError(t, err) @@ -1110,10 +1144,11 @@ func TestMaybeGenerateRangeCheckpoint_CompactionError(t *testing.T) { return compactionError } - err = te.mgr.MaybeGenerateRangeCheckpoint(ctx) + stats, err := te.mgr.MaybeGenerateRangeCheckpoint(ctx) require.Error(t, err) require.ErrorIs(t, err, compactionError) + require.Nil(t, stats) } func TestMaybeGenerateRangeCheckpoint_FromUncompactedEpochs(t *testing.T) { @@ -1130,7 +1165,7 @@ func TestMaybeGenerateRangeCheckpoint_FromUncompactedEpochs(t *testing.T) { epochsToWrite := p.FullCheckpointFrequency + 3 idxCount := p.GetEpochAdvanceOnCountThreshold() // Create sufficient indexes blobs and move clock forward to advance epoch. - for range epochsToWrite { + for j := range epochsToWrite { for i := range idxCount { if i == idxCount-1 { // Advance the time so that the difference in times for writes will force @@ -1141,8 +1176,10 @@ func TestMaybeGenerateRangeCheckpoint_FromUncompactedEpochs(t *testing.T) { te.mustWriteIndexFiles(ctx, t, newFakeIndexWithEntries(k)) } - err = te.mgr.MaybeAdvanceWriteEpoch(ctx) + stats, err := te.mgr.MaybeAdvanceWriteEpoch(ctx) require.NoError(t, err) + require.Equal(t, j+1, stats.CurrentEpoch) + require.True(t, stats.WasAdvanced) err = te.mgr.Refresh(ctx) require.NoError(t, err) @@ -1154,8 +1191,10 @@ func TestMaybeGenerateRangeCheckpoint_FromUncompactedEpochs(t *testing.T) { require.Equal(t, epochsToWrite, cs.WriteEpoch) require.Empty(t, cs.LongestRangeCheckpointSets) - err = te.mgr.MaybeGenerateRangeCheckpoint(ctx) + stats, err := te.mgr.MaybeGenerateRangeCheckpoint(ctx) require.NoError(t, err) + require.Equal(t, 0, stats.RangeMinEpoch) + require.Equal(t, 8, stats.RangeMaxEpoch) err = te.mgr.Refresh(ctx) require.NoError(t, err) @@ -1181,7 +1220,7 @@ func TestMaybeGenerateRangeCheckpoint_FromCompactedEpochs(t *testing.T) { epochsToWrite := p.FullCheckpointFrequency + 3 idxCount := p.GetEpochAdvanceOnCountThreshold() // Create sufficient indexes blobs and move clock forward to advance epoch. - for range epochsToWrite { + for j := range epochsToWrite { for i := range idxCount { if i == idxCount-1 { // Advance the time so that the difference in times for writes will force @@ -1192,8 +1231,10 @@ func TestMaybeGenerateRangeCheckpoint_FromCompactedEpochs(t *testing.T) { te.mustWriteIndexFiles(ctx, t, newFakeIndexWithEntries(k)) } - err = te.mgr.MaybeAdvanceWriteEpoch(ctx) + stats, err := te.mgr.MaybeAdvanceWriteEpoch(ctx) require.NoError(t, err) + require.Equal(t, j+1, stats.CurrentEpoch) + require.True(t, stats.WasAdvanced) err = te.mgr.Refresh(ctx) require.NoError(t, err) @@ -1205,10 +1246,12 @@ func TestMaybeGenerateRangeCheckpoint_FromCompactedEpochs(t *testing.T) { require.Equal(t, epochsToWrite, cs.WriteEpoch) // perform single-epoch compaction for settled epochs - newestEpochToCompact := cs.WriteEpoch - numUnsettledEpochs + 1 + newestEpochToCompact := cs.lastSettledEpochNumber() + 1 for j := range newestEpochToCompact { - err = te.mgr.MaybeCompactSingleEpoch(ctx) + stats, err := te.mgr.MaybeCompactSingleEpoch(ctx) require.NoError(t, err) + require.Equal(t, idxCount, stats.SupersededIndexBlobCount) + require.Equal(t, j, stats.Epoch) err = te.mgr.Refresh(ctx) // force state refresh require.NoError(t, err) @@ -1225,8 +1268,10 @@ func TestMaybeGenerateRangeCheckpoint_FromCompactedEpochs(t *testing.T) { require.Equal(t, epochsToWrite, cs.WriteEpoch) require.Empty(t, cs.LongestRangeCheckpointSets) - err = te.mgr.MaybeGenerateRangeCheckpoint(ctx) + stats, err := te.mgr.MaybeGenerateRangeCheckpoint(ctx) require.NoError(t, err) + require.Equal(t, 0, stats.RangeMinEpoch) + require.Equal(t, 8, stats.RangeMaxEpoch) err = te.mgr.Refresh(ctx) require.NoError(t, err) @@ -1319,9 +1364,10 @@ func TestCleanupMarkers_Empty(t *testing.T) { ctx := testlogging.Context(t) // this should be a no-op - err := te.mgr.CleanupMarkers(ctx) + stats, err := te.mgr.CleanupMarkers(ctx) require.NoError(t, err) + require.Nil(t, stats) } func TestCleanupMarkers_GetParametersError(t *testing.T) { @@ -1333,10 +1379,11 @@ func TestCleanupMarkers_GetParametersError(t *testing.T) { paramsError := errors.New("no parameters error") te.mgr.paramProvider = faultyParamsProvider{err: paramsError} - err := te.mgr.CleanupMarkers(ctx) + stats, err := te.mgr.CleanupMarkers(ctx) require.Error(t, err) require.ErrorIs(t, err, paramsError) + require.Nil(t, stats) } func TestCleanupMarkers_FailToReadState(t *testing.T) { @@ -1349,9 +1396,10 @@ func TestCleanupMarkers_FailToReadState(t *testing.T) { cancel() - err := te.mgr.CleanupMarkers(ctx) + stats, err := te.mgr.CleanupMarkers(ctx) require.Error(t, err) + require.Nil(t, stats) } func TestCleanupMarkers_AvoidCleaningUpSingleEpochMarker(t *testing.T) { @@ -1369,8 +1417,10 @@ func TestCleanupMarkers_AvoidCleaningUpSingleEpochMarker(t *testing.T) { require.NoError(t, err) require.Equal(t, 1, cs.WriteEpoch) - err = te.mgr.CleanupMarkers(ctx) + stats, err := te.mgr.CleanupMarkers(ctx) + require.NoError(t, err) + require.Nil(t, stats) require.NoError(t, te.mgr.Refresh(ctx)) @@ -1408,8 +1458,12 @@ func TestCleanupMarkers_CleanUpManyMarkers(t *testing.T) { require.NoError(t, err) require.Len(t, cs.EpochMarkerBlobs, epochsToAdvance) - err = te.mgr.CleanupMarkers(ctx) + stats, err := te.mgr.CleanupMarkers(ctx) require.NoError(t, err) + require.Equal(t, &maintenancestats.CleanupMarkersStats{ + DeletedEpochMarkerBlobCount: 3, + DeletedWatermarkBlobCount: 0, + }, stats) // is the epoch marker preserved? require.NoError(t, te.mgr.Refresh(ctx)) diff --git a/internal/feature/feature.go b/internal/feature/feature.go index e1788520570..35e09dff63b 100644 --- a/internal/feature/feature.go +++ b/internal/feature/feature.go @@ -4,6 +4,7 @@ package feature import ( "fmt" + "slices" ) // IfNotUnderstood describes the behavior of Kopia when a required feature is not understood. @@ -58,11 +59,5 @@ func GetUnsupportedFeatures(required []Required, supported []Feature) []Required } func isSupported(req Required, supported []Feature) bool { - for _, s := range supported { - if req.Feature == s { - return true - } - } - - return false + return slices.Contains(supported, req.Feature) } diff --git a/internal/fshasher/fshasher.go b/internal/fshasher/fshasher.go index 45b7b58bd49..937ebf7fc8a 100644 --- a/internal/fshasher/fshasher.go +++ b/internal/fshasher/fshasher.go @@ -41,7 +41,6 @@ func Hash(ctx context.Context, e fs.Entry) ([]byte, error) { return h.Sum(nil), nil } -//nolint:interfacer func write(ctx context.Context, tw *tar.Writer, fullpath string, e fs.Entry) error { h, err := header(ctx, fullpath, e) if err != nil { diff --git a/internal/fusemount/fusefs.go b/internal/fusemount/fusefs.go index 3c39af253c9..44cf0b4b879 100644 --- a/internal/fusemount/fusefs.go +++ b/internal/fusemount/fusefs.go @@ -1,5 +1,4 @@ //go:build !windows && !openbsd && !freebsd -// +build !windows,!openbsd,!freebsd // Package fusemount implements FUSE filesystem nodes for mounting contents of filesystem stored in repository. // @@ -7,6 +6,7 @@ package fusemount import ( + "context" "io" "os" "sync" @@ -15,7 +15,6 @@ import ( gofusefs "github.com/hanwen/go-fuse/v2/fs" "github.com/hanwen/go-fuse/v2/fuse" "github.com/pkg/errors" - "golang.org/x/net/context" "github.com/kopia/kopia/fs" "github.com/kopia/kopia/repo/logging" diff --git a/internal/gather/gather_bytes.go b/internal/gather/gather_bytes.go index 8fb34974fc5..8907fc109d8 100644 --- a/internal/gather/gather_bytes.go +++ b/internal/gather/gather_bytes.go @@ -85,10 +85,7 @@ func (b *Bytes) AppendSectionTo(w io.Writer, offset, size int) error { s := b.Slices[sliceNdx] // l is how many bytes we consume out of the current slice - l := size - if l > len(s) { - l = len(s) - } + l := min(size, len(s)) if _, err := w.Write(s[0:l]); err != nil { return errors.Wrap(err, "error appending") diff --git a/internal/gather/gather_write_buffer.go b/internal/gather/gather_write_buffer.go index 6e1f0b4e92d..ea5e9b70d53 100644 --- a/internal/gather/gather_write_buffer.go +++ b/internal/gather/gather_write_buffer.go @@ -140,10 +140,7 @@ func (b *WriteBuffer) Append(data []byte) { remaining = cap(b.inner.Slices[ndx]) - len(b.inner.Slices[ndx]) } - chunkSize := remaining - if chunkSize > len(data) { - chunkSize = len(data) - } + chunkSize := min(remaining, len(data)) b.inner.Slices[ndx] = append(b.inner.Slices[ndx], data[0:chunkSize]...) data = data[chunkSize:] @@ -164,6 +161,7 @@ func (b *WriteBuffer) Dup() *WriteBuffer { b.mu.Lock() defer b.mu.Unlock() + dup.alloc = b.alloc dup.inner = FromSlice(b.inner.ToByteSlice()) diff --git a/internal/hmac/hmac.go b/internal/hmac/hmac.go index 6bfd022db53..087c1fc05bd 100644 --- a/internal/hmac/hmac.go +++ b/internal/hmac/hmac.go @@ -39,6 +39,7 @@ func VerifyAndStrip(input gather.Bytes, secret []byte, output *gather.WriteBuffe } var sigBuf, actualSignature [sha256.Size]byte + validSignature := h.Sum(sigBuf[:0]) n, err := r.Read(actualSignature[:]) diff --git a/internal/indextest/indextest.go b/internal/indextest/indextest.go index a405db962c2..29122d2e0ec 100644 --- a/internal/indextest/indextest.go +++ b/internal/indextest/indextest.go @@ -61,7 +61,7 @@ func InfoDiff(i1, i2 index.Info, ignore ...string) []string { // dear future reader, if this fails because the number of methods has changed, // you need to add additional verification above. - if cnt := reflect.TypeOf(index.Info{}).NumMethod(); cnt != 1 { + if cnt := reflect.TypeFor[index.Info]().NumMethod(); cnt != 1 { diffs = append(diffs, fmt.Sprintf("unexpected number of methods on content.Info: %v, must update the test", cnt)) } diff --git a/internal/iocopy/iocopy_test.go b/internal/iocopy/iocopy_test.go index c880e98af79..5d13fc85a2c 100644 --- a/internal/iocopy/iocopy_test.go +++ b/internal/iocopy/iocopy_test.go @@ -31,6 +31,7 @@ func TestGetBuffer(t *testing.T) { func TestReleaseBuffer(t *testing.T) { buf := iocopy.GetBuffer() iocopy.ReleaseBuffer(buf) + buf2 := iocopy.GetBuffer() require.Equal(t, &buf[0], &buf2[0], "Buffer was not recycled after ReleaseBuffer") } diff --git a/internal/listcache/listcache.go b/internal/listcache/listcache.go index 8dea982a1d8..8cef5cfdb70 100644 --- a/internal/listcache/listcache.go +++ b/internal/listcache/listcache.go @@ -5,6 +5,7 @@ package listcache import ( "context" "encoding/json" + "slices" "strings" "time" @@ -140,13 +141,7 @@ func (s *listCacheStorage) DeleteBlob(ctx context.Context, blobID blob.ID) error } func (s *listCacheStorage) isCachedPrefix(prefix blob.ID) bool { - for _, p := range s.prefixes { - if prefix == p { - return true - } - } - - return false + return slices.Contains(s.prefixes, prefix) } func (s *listCacheStorage) invalidateAfterUpdate(ctx context.Context, blobID blob.ID) { diff --git a/internal/logfile/logfile.go b/internal/logfile/logfile.go index 6f3cedd5db8..51f310e1b8a 100644 --- a/internal/logfile/logfile.go +++ b/internal/logfile/logfile.go @@ -4,6 +4,7 @@ package logfile import ( "context" "fmt" + "io" "math" "os" "path/filepath" @@ -23,7 +24,6 @@ import ( "github.com/kopia/kopia/internal/clock" "github.com/kopia/kopia/internal/ospath" "github.com/kopia/kopia/internal/zaplogutil" - "github.com/kopia/kopia/repo/content" "github.com/kopia/kopia/repo/logging" ) @@ -53,6 +53,7 @@ type loggingFlags struct { consoleLogTimestamps bool waitForLogSweep bool disableFileLogging bool + disableContentLogs bool cliApp *cli.App } @@ -61,6 +62,7 @@ func (c *loggingFlags) setup(cliApp *cli.App, app *kingpin.Application) { app.Flag("log-file", "Override log file.").StringVar(&c.logFile) app.Flag("content-log-file", "Override content log file.").Hidden().StringVar(&c.contentLogFile) app.Flag("disable-file-logging", "Disable file-based logging.").BoolVar(&c.disableFileLogging) + app.Flag("disable-content-log", "Disable creation of content logs.").BoolVar(&c.disableContentLogs) app.Flag("log-dir", "Directory where log files should be written.").Envar(cliApp.EnvName("KOPIA_LOG_DIR")).Default(ospath.LogsDir()).StringVar(&c.logDir) app.Flag("log-dir-max-files", "Maximum number of log files to retain").Envar(cliApp.EnvName("KOPIA_LOG_DIR_MAX_FILES")).Default("1000").Hidden().IntVar(&c.logDirMaxFiles) @@ -75,7 +77,7 @@ func (c *loggingFlags) setup(cliApp *cli.App, app *kingpin.Application) { app.Flag("json-log-console", "JSON log file").Hidden().BoolVar(&c.jsonLogConsole) app.Flag("json-log-file", "JSON log file").Hidden().BoolVar(&c.jsonLogFile) app.Flag("file-log-level", "File log level").Default("debug").EnumVar(&c.fileLogLevel, logLevels...) - app.Flag("file-log-local-tz", "When logging to a file, use local timezone").Hidden().Envar(cliApp.EnvName("KOPIA_FILE_LOG_LOCAL_TZ")).BoolVar(&c.fileLogLocalTimezone) + app.Flag("file-log-local-tz", "When logging to a file, use local timezone").Default("false").Hidden().Envar(cliApp.EnvName("KOPIA_FILE_LOG_LOCAL_TZ")).BoolVar(&c.fileLogLocalTimezone) app.Flag("force-color", "Force color output").Hidden().Envar(cliApp.EnvName("KOPIA_FORCE_COLOR")).BoolVar(&c.forceColor) app.Flag("disable-color", "Disable color output").Hidden().Envar(cliApp.EnvName("KOPIA_DISABLE_COLOR")).BoolVar(&c.disableColor) app.Flag("console-timestamps", "Log timestamps to stderr.").Hidden().Default("false").Envar(cliApp.EnvName("KOPIA_CONSOLE_TIMESTAMPS")).BoolVar(&c.consoleLogTimestamps) @@ -122,22 +124,15 @@ func (c *loggingFlags) initialize(ctx *kingpin.ParseContext) error { rootLogger := zap.New(zapcore.NewTee(rootCores...), zap.WithClock(zaplogutil.Clock())) - var contentCore zapcore.Core - if c.disableFileLogging { - contentCore = c.setupConsoleCore() - } else { - contentCore = c.setupContentLogFileBackend(now, suffix) - } + var contentLogWriter io.Writer - contentLogger := zap.New(contentCore, zap.WithClock(zaplogutil.Clock())).Sugar() + if !c.disableFileLogging && !c.disableContentLogs { + contentLogWriter = c.setupLogFileBasedLogger(now, "content-logs", suffix, c.contentLogFile, c.contentLogDirMaxFiles, c.contentLogDirMaxTotalSizeMB, c.contentLogDirMaxAge) + } c.cliApp.SetLoggerFactory(func(module string) logging.Logger { - if module == content.FormatLogModule { - return contentLogger - } - return rootLogger.Named(module).Sugar() - }) + }, contentLogWriter) if c.forceColor { color.NoColor = false @@ -152,7 +147,6 @@ func (c *loggingFlags) initialize(ctx *kingpin.ParseContext) error { func (c *loggingFlags) setupConsoleCore() zapcore.Core { ec := zapcore.EncoderConfig{ - LevelKey: "l", MessageKey: "m", LineEnding: zapcore.DefaultLineEnding, EncodeTime: zapcore.RFC3339NanoTimeEncoder, @@ -185,10 +179,11 @@ func (c *loggingFlags) setupConsoleCore() zapcore.Core { if c.jsonLogConsole { ec.EncodeLevel = zapcore.CapitalLevelEncoder + stec.EmitLogLevel = false ec.NameKey = "n" ec.EncodeName = zapcore.FullNameEncoder } else { - stec.EmitLogLevel = true + stec.EmitLogLevel = false stec.DoNotEmitInfoLevel = true stec.ColoredLogLevel = !c.disableColor } @@ -244,15 +239,7 @@ func (c *loggingFlags) setupLogFileBasedLogger(now time.Time, subdir, suffix, lo logFileBaseName: logFileBaseName, symlinkName: symlinkName, maxSegmentSize: c.logFileMaxSegmentSize, - startSweep: func() { - sweepLogWG.Add(1) - - go func() { - defer sweepLogWG.Done() - - doSweep() - }() - }, + startSweep: func() { sweepLogWG.Go(doSweep) }, } if c.waitForLogSweep { @@ -302,17 +289,6 @@ func (c *loggingFlags) jsonOrConsoleEncoder(ec zaplogutil.StdConsoleEncoderConfi return zaplogutil.NewStdConsoleEncoder(ec) } -func (c *loggingFlags) setupContentLogFileBackend(now time.Time, suffix string) zapcore.Core { - return zapcore.NewCore( - zaplogutil.NewStdConsoleEncoder(zaplogutil.StdConsoleEncoderConfig{ - TimeLayout: zaplogutil.PreciseLayout, - LocalTime: false, - }, - ), - c.setupLogFileBasedLogger(now, "content-logs", suffix, c.contentLogFile, c.contentLogDirMaxFiles, c.contentLogDirMaxTotalSizeMB, c.contentLogDirMaxAge), - zap.DebugLevel) -} - func shouldSweepLog(maxFiles int, maxAge time.Duration) bool { return maxFiles > 0 || maxAge > 0 } diff --git a/internal/logfile/logfile_test.go b/internal/logfile/logfile_test.go index f4633dcc0ff..08d7b3a725b 100644 --- a/internal/logfile/logfile_test.go +++ b/internal/logfile/logfile_test.go @@ -2,6 +2,7 @@ package logfile_test import ( "bufio" + "encoding/json" "fmt" "os" "path/filepath" @@ -22,7 +23,6 @@ import ( var ( cliLogFormat = regexp.MustCompile(`^\d{4}-\d\d\-\d\dT\d\d:\d\d:\d\d\.\d{6}Z (DEBUG|INFO|WARN) [a-z/]+ .*$`) - contentLogFormat = regexp.MustCompile(`^\d{4}-\d\d\-\d\dT\d\d:\d\d:\d\d\.\d{6}Z .*$`) cliLogFormatLocalTimezone = regexp.MustCompile(`^\d{4}-\d\d\-\d\dT\d\d:\d\d:\d\d\.\d{6}[^Z][^ ]+ (DEBUG|INFO|WARN) [a-z/]+ .*$`) ) @@ -56,7 +56,7 @@ func TestLoggingFlags(t *testing.T) { } verifyFileLogFormat(t, filepath.Join(tmpLogDir, "cli-logs", "latest.log"), cliLogFormat) - verifyFileLogFormat(t, filepath.Join(tmpLogDir, "content-logs", "latest.log"), contentLogFormat) + verifyJSONLogFormat(t, filepath.Join(tmpLogDir, "content-logs", "latest.log")) _, stderr, err = env.Run(t, false, "snap", "create", dir1, "--file-log-local-tz", "--no-progress", "--log-level=debug", "--disable-color", @@ -69,7 +69,7 @@ func TestLoggingFlags(t *testing.T) { verifyFileLogFormat(t, filepath.Join(tmpLogDir, "cli-logs", "latest.log"), cliLogFormatLocalTimezone) } - verifyFileLogFormat(t, filepath.Join(tmpLogDir, "content-logs", "latest.log"), contentLogFormat) + verifyJSONLogFormat(t, filepath.Join(tmpLogDir, "content-logs", "latest.log")) for _, l := range stderr { require.NotContains(t, l, "INFO") // INFO is omitted @@ -256,3 +256,18 @@ func getTotalDirSize(t *testing.T, dir string) int { return totalSize } + +func verifyJSONLogFormat(t *testing.T, fname string) { + t.Helper() + + f, err := os.Open(fname) + require.NoError(t, err) + + defer f.Close() + + s := bufio.NewScanner(f) + + for s.Scan() { + require.True(t, json.Valid(s.Bytes()), "log line is not valid JSON: %q", s.Text()) + } +} diff --git a/internal/metricid/id_mapping.go b/internal/metricid/id_mapping.go index 6d8f9f6f1d3..36019d72b84 100644 --- a/internal/metricid/id_mapping.go +++ b/internal/metricid/id_mapping.go @@ -1,4 +1,3 @@ -// Package metricid provides mapping between metric names and persistent IDs. package metricid // Mapping contains translation of names to indexes and vice versa, which allows maps diff --git a/internal/metrics/metrics_counter_test.go b/internal/metrics/metrics_counter_test.go index 3f1b2f55077..df69d928be0 100644 --- a/internal/metrics/metrics_counter_test.go +++ b/internal/metrics/metrics_counter_test.go @@ -11,6 +11,7 @@ import ( func TestCounter_Nil(t *testing.T) { var e *metrics.Registry + cnt := e.CounterInt64("aaa", "bbb", nil) require.Nil(t, cnt) cnt.Add(33) diff --git a/internal/metrics/metrics_duration_distribution_test.go b/internal/metrics/metrics_duration_distribution_test.go index 5d7e687b64b..85f47f240d7 100644 --- a/internal/metrics/metrics_duration_distribution_test.go +++ b/internal/metrics/metrics_duration_distribution_test.go @@ -12,6 +12,7 @@ import ( func TestDurationDistribution_Nil(t *testing.T) { var e *metrics.Registry + dist := e.DurationDistribution("aaa", "bbb", metrics.IOLatencyThresholds, nil) require.Nil(t, dist) dist.Observe(time.Second) @@ -20,6 +21,7 @@ func TestDurationDistribution_Nil(t *testing.T) { func TestSizeDistribution_Nil(t *testing.T) { var e *metrics.Registry + cnt := e.SizeDistribution("aaa", "bbb", metrics.ISOBytesThresholds, nil) require.Nil(t, cnt) cnt.Observe(333) diff --git a/internal/metrics/metrics_timeseries.go b/internal/metrics/metrics_timeseries.go index 5dbf47a7e02..8b5be7608bf 100644 --- a/internal/metrics/metrics_timeseries.go +++ b/internal/metrics/metrics_timeseries.go @@ -47,7 +47,7 @@ type AggregateMetricsOptions struct { // SnapshotValueAggregator extracts and aggregates counter or distribution values from snapshots. type SnapshotValueAggregator[T any] interface { FromSnapshot(s *Snapshot) (T, bool) - Aggregate(previousAggregate T, incoming T, ratio float64) T + Aggregate(previousAggregate, incoming T, ratio float64) T } // CreateTimeSeries computes time series which represent aggregations of a given diff --git a/internal/mount/mount_fuse.go b/internal/mount/mount_fuse.go index 1d811fb22c5..97c6d596547 100644 --- a/internal/mount/mount_fuse.go +++ b/internal/mount/mount_fuse.go @@ -1,5 +1,4 @@ //go:build !windows && !freebsd && !openbsd -// +build !windows,!freebsd,!openbsd package mount diff --git a/internal/mount/mount_net_use.go b/internal/mount/mount_net_use.go index 83374b58a45..b5987d94b8f 100644 --- a/internal/mount/mount_net_use.go +++ b/internal/mount/mount_net_use.go @@ -1,5 +1,4 @@ //go:build windows -// +build windows package mount diff --git a/internal/mount/mount_posix_webdav_helper.go b/internal/mount/mount_posix_webdav_helper.go index 43cd899b639..04732a2ce26 100644 --- a/internal/mount/mount_posix_webdav_helper.go +++ b/internal/mount/mount_posix_webdav_helper.go @@ -1,5 +1,4 @@ //go:build !windows && !freebsd && !openbsd -// +build !windows,!freebsd,!openbsd package mount @@ -37,7 +36,7 @@ func newPosixWedavController(ctx context.Context, entry fs.Directory, mountPoint } func (c posixWedavController) Unmount(ctx context.Context) error { - if err := unmountWebDevHelper(ctx, c.mountPoint); err != nil { + if err := unmountWebDavHelper(ctx, c.mountPoint); err != nil { return err } diff --git a/internal/mount/mount_posix_webdav_helper_darwin.go b/internal/mount/mount_posix_webdav_helper_darwin.go index a7c2a7b013f..d6b673f4165 100644 --- a/internal/mount/mount_posix_webdav_helper_darwin.go +++ b/internal/mount/mount_posix_webdav_helper_darwin.go @@ -7,8 +7,8 @@ import ( "github.com/pkg/errors" ) -func mountWebDavHelper(_ context.Context, url, path string) error { - mount := exec.Command("/sbin/mount", "-t", "webdav", "-r", url, path) +func mountWebDavHelper(ctx context.Context, url, path string) error { + mount := exec.CommandContext(ctx, "/sbin/mount", "-t", "webdav", "-r", url, path) if err := mount.Run(); err != nil { return errors.Errorf("webdav mount %q on %q failed: %v", url, path, err) } @@ -16,8 +16,8 @@ func mountWebDavHelper(_ context.Context, url, path string) error { return nil } -func unmountWebDevHelper(_ context.Context, path string) error { - unmount := exec.Command("/usr/sbin/diskutil", "unmount", path) +func unmountWebDavHelper(ctx context.Context, path string) error { + unmount := exec.CommandContext(ctx, "/usr/sbin/diskutil", "unmount", path) if err := unmount.Run(); err != nil { return errors.Errorf("unmount %q failed: %v", path, err) } diff --git a/internal/mount/mount_posix_webdav_helper_linux.go b/internal/mount/mount_posix_webdav_helper_linux.go index 8d68560dc04..b17a4cf8e48 100644 --- a/internal/mount/mount_posix_webdav_helper_linux.go +++ b/internal/mount/mount_posix_webdav_helper_linux.go @@ -6,7 +6,7 @@ import ( ) func mountWebDavHelper(ctx context.Context, url, path string) error { - mount := exec.Command("/usr/bin/mount", "-t", "davfs", "-r", url, path) + mount := exec.CommandContext(ctx, "/usr/bin/mount", "-t", "davfs", "-r", url, path) if err := mount.Run(); err != nil { log(ctx).Errorf("mount command failed: %v. Cowardly refusing to run with root permissions. Try \"sudo /usr/bin/mount -t davfs -r %s %s\"\n", err, url, path) } @@ -14,8 +14,8 @@ func mountWebDavHelper(ctx context.Context, url, path string) error { return nil } -func unmountWebDevHelper(ctx context.Context, path string) error { - unmount := exec.Command("/usr/bin/umount", path) +func unmountWebDavHelper(ctx context.Context, path string) error { + unmount := exec.CommandContext(ctx, "/usr/bin/umount", path) if err := unmount.Run(); err != nil { log(ctx).Errorf("umount command failed: %v. Cowardly refusing to run with root permissions. Try \"sudo /usr/bin/umount %s\"\n", err, path) } diff --git a/internal/mount/mount_unsupported.go b/internal/mount/mount_unsupported.go index cc4fb24f9e7..9258c20684d 100644 --- a/internal/mount/mount_unsupported.go +++ b/internal/mount/mount_unsupported.go @@ -1,5 +1,4 @@ //go:build freebsd || openbsd -// +build freebsd openbsd package mount diff --git a/internal/mount/mount_webdav.go b/internal/mount/mount_webdav.go index 9e9f7efaa5f..10179bb7e33 100644 --- a/internal/mount/mount_webdav.go +++ b/internal/mount/mount_webdav.go @@ -46,7 +46,7 @@ func DirectoryWebDAV(ctx context.Context, entry fs.Directory) (Controller, error Logger: logger, }) - l, err := net.Listen("tcp", "127.0.0.1:0") + l, err := (&net.ListenConfig{}).Listen(ctx, "tcp", "127.0.0.1:0") if err != nil { return nil, errors.Wrap(err, "listen error") } diff --git a/internal/osexec/osexec_unix.go b/internal/osexec/osexec_unix.go index 5446b9f1a49..9f74debc29e 100644 --- a/internal/osexec/osexec_unix.go +++ b/internal/osexec/osexec_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package osexec diff --git a/internal/ospath/ospath_xdg.go b/internal/ospath/ospath_xdg.go index 875b496364b..3f7e81121e7 100644 --- a/internal/ospath/ospath_xdg.go +++ b/internal/ospath/ospath_xdg.go @@ -1,5 +1,4 @@ //go:build !windows && !darwin && !openbsd -// +build !windows,!darwin,!openbsd package ospath diff --git a/internal/parallelwork/parallel_work_queue.go b/internal/parallelwork/parallel_work_queue.go index da5d048782d..3a220e20be7 100644 --- a/internal/parallelwork/parallel_work_queue.go +++ b/internal/parallelwork/parallel_work_queue.go @@ -151,8 +151,10 @@ func OnNthCompletion(n int, callback CallbackFunc) CallbackFunc { return func() error { mu.Lock() + n-- call := n == 0 + mu.Unlock() if call { diff --git a/internal/parallelwork/parallel_work_queue_test.go b/internal/parallelwork/parallel_work_queue_test.go index 7ef32f7cddd..64d485db89d 100644 --- a/internal/parallelwork/parallel_work_queue_test.go +++ b/internal/parallelwork/parallel_work_queue_test.go @@ -110,6 +110,7 @@ func TestWaitForActiveWorkers(t *testing.T) { results <- 2 return nil }) + results <- 1 return nil diff --git a/internal/passwordpersist/passwordpersist_multiple.go b/internal/passwordpersist/passwordpersist_multiple.go index 575a7406c23..49448020ef4 100644 --- a/internal/passwordpersist/passwordpersist_multiple.go +++ b/internal/passwordpersist/passwordpersist_multiple.go @@ -34,7 +34,6 @@ func (m Multiple) GetPassword(ctx context.Context, configFile string) (string, e func (m Multiple) PersistPassword(ctx context.Context, configFile, password string) error { for _, s := range m { err := s.PersistPassword(ctx, configFile, password) - if err == nil { return nil } diff --git a/internal/pproflogging/pproflogging.go b/internal/pproflogging/pproflogging.go deleted file mode 100644 index 182fc0168d0..00000000000 --- a/internal/pproflogging/pproflogging.go +++ /dev/null @@ -1,445 +0,0 @@ -// Package pproflogging for pproflogging helper functions. -package pproflogging - -import ( - "bufio" - "bytes" - "context" - "encoding/pem" - "errors" - "fmt" - "io" - "os" - "runtime" - "runtime/pprof" - "strconv" - "strings" - "sync" - "time" - - "github.com/kopia/kopia/repo/logging" -) - -var log = logging.Module("kopia/pproflogging") - -// ProfileName the name of the profile (see: runtime/pprof/Lookup). -type ProfileName string - -const ( - pair = 2 - // PPROFDumpTimeout when dumping PPROF data, set an upper bound on the time it can take to log. - PPROFDumpTimeout = 15 * time.Second -) - -const ( - // DefaultDebugProfileRate default sample/data fraction for profile sample collection rates (1/x, where x is the - // data fraction sample rate). - DefaultDebugProfileRate = 100 - // DefaultDebugProfileDumpBufferSizeB default size of the pprof output buffer. - DefaultDebugProfileDumpBufferSizeB = 1 << 17 -) - -const ( - // EnvVarKopiaDebugPprof environment variable that contains the pprof dump configuration. - EnvVarKopiaDebugPprof = "KOPIA_PPROF_LOGGING_CONFIG" -) - -// flags used to configure profiling in EnvVarKopiaDebugPprof. -const ( - // KopiaDebugFlagForceGc force garbage collection before dumping heap data. - KopiaDebugFlagForceGc = "forcegc" - // KopiaDebugFlagDebug value of the profiles `debug` parameter. - KopiaDebugFlagDebug = "debug" - // KopiaDebugFlagRate rate setting for the named profile (if available). always an integer. - KopiaDebugFlagRate = "rate" -) - -const ( - // ProfileNameBlock block profile key. - ProfileNameBlock ProfileName = "block" - // ProfileNameMutex mutex profile key. - ProfileNameMutex = "mutex" - // ProfileNameCPU cpu profile key. - ProfileNameCPU = "cpu" -) - -var ( - // ErrEmptyProfileName returned when a profile configuration flag has no argument. - ErrEmptyProfileName = errors.New("empty profile flag") - - //nolint:gochecknoglobals - pprofConfigs = newProfileConfigs(os.Stderr) -) - -// Writer interface supports destination for PEM output. -type Writer interface { - io.Writer - io.StringWriter -} - -// ProfileConfigs configuration flags for all requested profiles. -type ProfileConfigs struct { - mu sync.Mutex - // wrt represents the final destination for the PPROF PEM output. Typically, - // this is attached to stderr or log output. A custom writer is used because - // not all loggers support line oriented output through the io.Writer interface... - // support is often attached th a io.StringWriter. - // +checklocks:mu - wrt Writer - // +checklocks:mu - pcm map[ProfileName]*ProfileConfig -} - -type pprofSetRate struct { - setter func(int) - defaultValue int -} - -//nolint:gochecknoglobals -var pprofProfileRates = map[ProfileName]pprofSetRate{ - ProfileNameBlock: { - setter: func(x int) { runtime.SetBlockProfileRate(x) }, - defaultValue: DefaultDebugProfileRate, - }, - ProfileNameMutex: { - setter: func(x int) { runtime.SetMutexProfileFraction(x) }, - defaultValue: DefaultDebugProfileRate, - }, -} - -func newProfileConfigs(wrt Writer) *ProfileConfigs { - q := &ProfileConfigs{ - wrt: wrt, - } - - return q -} - -// LoadProfileConfig configure PPROF profiling from the config in ppconfigss. -func LoadProfileConfig(ctx context.Context, ppconfigss string) (map[ProfileName]*ProfileConfig, error) { - // if empty, then don't bother configuring but emit a log message - user might be expecting them to be configured - if ppconfigss == "" { - log(ctx).Debug("no profile configuration. skipping PPROF setup") - return nil, nil - } - - bufSizeB := DefaultDebugProfileDumpBufferSizeB - - // look for matching services. "*" signals all services for profiling - log(ctx).Info("configuring profile buffers") - - // acquire global lock when performing operations with global side-effects - return parseProfileConfigs(bufSizeB, ppconfigss) -} - -// ProfileConfig configuration flags for a profile. -type ProfileConfig struct { - flags []string - buf *bytes.Buffer -} - -// GetValue get the value of the named flag, `s`. False will be returned -// if the flag does not exist. True will be returned if flag exists without -// a value. -func (p ProfileConfig) GetValue(s string) (string, bool) { - for _, f := range p.flags { - kvs := strings.SplitN(f, "=", pair) - if kvs[0] != s { - continue - } - - if len(kvs) == 1 { - return "", true - } - - return kvs[1], true - } - - return "", false -} - -func parseProfileConfigs(bufSizeB int, ppconfigs string) (map[ProfileName]*ProfileConfig, error) { - pbs := map[ProfileName]*ProfileConfig{} - allProfileOptions := strings.Split(ppconfigs, ":") - - for _, profileOptionWithFlags := range allProfileOptions { - // of those, see if any have profile specific settings - profileFlagNameValuePairs := strings.SplitN(profileOptionWithFlags, "=", pair) - flagValue := "" - - if len(profileFlagNameValuePairs) > 1 { - // only = allowed - flagValue = profileFlagNameValuePairs[1] - } - - flagKey := ProfileName(profileFlagNameValuePairs[0]) - if flagKey == "" { - return nil, ErrEmptyProfileName - } - - pbs[flagKey] = newProfileConfig(bufSizeB, flagValue) - } - - return pbs, nil -} - -// newProfileConfig create a new profiling configuration. -func newProfileConfig(bufSizeB int, ppconfig string) *ProfileConfig { - q := &ProfileConfig{ - buf: bytes.NewBuffer(make([]byte, 0, bufSizeB)), - } - - flgs := strings.Split(ppconfig, ",") - if len(flgs) > 0 && flgs[0] != "" { // len(flgs) > 1 && flgs[0] == "" should never happen - q.flags = flgs - } - - return q -} - -func setupProfileFractions(ctx context.Context, profileBuffers map[ProfileName]*ProfileConfig) { - for k, pprofset := range pprofProfileRates { - v, ok := profileBuffers[k] - if !ok { - // profile not configured - leave it alone - continue - } - - if v == nil { - // profile configured, but no rate - set to default - pprofset.setter(pprofset.defaultValue) - continue - } - - s, _ := v.GetValue(KopiaDebugFlagRate) - if s == "" { - // flag without an argument - set to default - pprofset.setter(pprofset.defaultValue) - continue - } - - n1, err := strconv.Atoi(s) - if err != nil { - log(ctx).With("cause", err).Warnf("invalid PPROF rate, %q, for '%s'", s, k) - continue - } - - log(ctx).Debugf("setting PPROF rate, %d, for %s", n1, k) - pprofset.setter(n1) - } -} - -// clearProfileFractions set the profile fractions to their zero values. -func clearProfileFractions(profileBuffers map[ProfileName]*ProfileConfig) { - for k, pprofset := range pprofProfileRates { - v := profileBuffers[k] - if v == nil { // fold missing values and empty values - continue - } - - _, ok := v.GetValue(KopiaDebugFlagRate) - if !ok { // only care if a value might have been set before - continue - } - - pprofset.setter(0) - } -} - -// StartProfileBuffers start profile buffers for enabled profiles/trace. Buffers -// are returned in an slice of buffers: CPU, Heap and trace respectively. class is used to distinguish profiles -// external to kopia. -func StartProfileBuffers(ctx context.Context) { - ppconfigs := os.Getenv(EnvVarKopiaDebugPprof) - // if empty, then don't bother configuring but emit a log message - use might be expecting them to be configured - if ppconfigs == "" { - log(ctx).Warn("no profile buffers enabled") - return - } - - bufSizeB := DefaultDebugProfileDumpBufferSizeB - - // look for matching services. "*" signals all services for profiling - log(ctx).Debug("configuring profile buffers") - - // acquire global lock when performing operations with global side-effects - pprofConfigs.mu.Lock() - defer pprofConfigs.mu.Unlock() - - var err error - - pprofConfigs.pcm, err = parseProfileConfigs(bufSizeB, ppconfigs) - if err != nil { - log(ctx).With("cause", err).Warnf("cannot start PPROF config, %q, due to parse error", ppconfigs) - return - } - - // profiling rates need to be set before starting profiling - setupProfileFractions(ctx, pprofConfigs.pcm) - - // cpu has special initialization - v, ok := pprofConfigs.pcm[ProfileNameCPU] - if ok { - err := pprof.StartCPUProfile(v.buf) - if err != nil { - log(ctx).With("cause", err).Warn("cannot start cpu PPROF") - delete(pprofConfigs.pcm, ProfileNameCPU) - } - } -} - -// DumpPem dump a PEM version of the byte slice, bs, into writer, wrt. -func DumpPem(bs []byte, types string, wrt *os.File) error { - // err0 for background process - var err0 error - - blk := &pem.Block{ - Type: types, - Bytes: bs, - } - // wrt is likely a line oriented writer, so writing individual lines - // will make best use of output buffer and help prevent overflows or - // stalls in the output path. - pr, pw := io.Pipe() - - // ensure read-end of the pipe is close - //nolint:errcheck - defer pr.Close() - - // encode PEM in the background and output in a line oriented - // fashion - this prevents the need for a large buffer to hold - // the encoded PEM. - go func() { - // writer close on exit of background process - // pipe writer will not return a meaningful error - //nolint:errcheck - defer pw.Close() - - // do the encoding - err0 = pem.Encode(pw, blk) - }() - - // connect rdr to pipe reader - rdr := bufio.NewReader(pr) - - // err1 for reading - // err2 for writing - var err1, err2 error - for err1 == nil && err2 == nil { - var ln []byte - ln, err1 = rdr.ReadBytes('\n') - // err1 can return ln and non-nil err1, so always call write - _, err2 = wrt.Write(ln) - } - - // got a write error. this has precedent - if err2 != nil { - return fmt.Errorf("could not write PEM: %w", err2) - } - - if err0 != nil { - return fmt.Errorf("could not write PEM: %w", err0) - } - - if err1 == nil { - return nil - } - - // if file does not end in newline, then output one - if errors.Is(err1, io.EOF) { - _, err2 = wrt.WriteString("\n") - if err2 != nil { - return fmt.Errorf("could not write PEM: %w", err2) - } - - return io.EOF - } - - return fmt.Errorf("error reading bytes: %w", err1) -} - -func parseDebugNumber(v *ProfileConfig) (int, error) { - debugs, ok := v.GetValue(KopiaDebugFlagDebug) - if !ok { - return 0, nil - } - - debug, err := strconv.Atoi(debugs) - if err != nil { - return 0, fmt.Errorf("could not parse number %q: %w", debugs, err) - } - - return debug, nil -} - -// StopProfileBuffers stop and dump the contents of the buffers to the log as PEMs. Buffers -// supplied here are from StartProfileBuffers. -func StopProfileBuffers(ctx context.Context) { - pprofConfigs.mu.Lock() - defer pprofConfigs.mu.Unlock() - - if pprofConfigs == nil { - log(ctx).Debug("profile buffers not configured") - return - } - - log(ctx).Debug("saving PEM buffers for output") - // cpu and heap profiles requires special handling - for k, v := range pprofConfigs.pcm { - log(ctx).Debugf("stopping PPROF profile %q", k) - - if v == nil { - continue - } - - if k == ProfileNameCPU { - pprof.StopCPUProfile() - continue - } - - _, ok := v.GetValue(KopiaDebugFlagForceGc) - if ok { - log(ctx).Debug("performing GC before PPROF dump ...") - runtime.GC() - } - - debug, err := parseDebugNumber(v) - if err != nil { - log(ctx).With("cause", err).Warn("invalid PPROF configuration debug number") - continue - } - - pent := pprof.Lookup(string(k)) - if pent == nil { - log(ctx).Warnf("no system PPROF entry for %q", k) - delete(pprofConfigs.pcm, k) - - continue - } - - err = pent.WriteTo(v.buf, debug) - if err != nil { - log(ctx).With("cause", err).Warn("error writing PPROF buffer") - - continue - } - } - // dump the profiles out into their respective PEMs - for k, v := range pprofConfigs.pcm { - if v == nil { - continue - } - - unm := strings.ToUpper(string(k)) - log(ctx).Infof("dumping PEM for %q", unm) - - err := DumpPem(v.buf.Bytes(), unm, os.Stderr) - if err != nil { - log(ctx).With("cause", err).Error("cannot write PEM") - } - } - - // clear the profile rates and fractions to effectively stop profiling - clearProfileFractions(pprofConfigs.pcm) - pprofConfigs.pcm = map[ProfileName]*ProfileConfig{} -} diff --git a/internal/pproflogging/pproflogging_test.go b/internal/pproflogging/pproflogging_test.go deleted file mode 100644 index 44ab703c953..00000000000 --- a/internal/pproflogging/pproflogging_test.go +++ /dev/null @@ -1,371 +0,0 @@ -package pproflogging - -import ( - "bytes" - "context" - "fmt" - "io" - "maps" - "os" - "regexp" - "slices" - "testing" - - "github.com/stretchr/testify/require" - - "github.com/kopia/kopia/repo/logging" -) - -func TestDebug_StartProfileBuffers(t *testing.T) { - saveLockEnv(t) - // placeholder to make coverage happy - tcs := []struct { - in string - rx *regexp.Regexp - }{ - { - in: "", - rx: regexp.MustCompile("no profile buffers enabled"), - }, - { - in: ":", - rx: regexp.MustCompile(`cannot start PPROF config, ".*", due to parse error`), - }, - } - for _, tc := range tcs { - lg := &bytes.Buffer{} - ctx := logging.WithLogger(context.Background(), logging.ToWriter(lg)) - - t.Setenv(EnvVarKopiaDebugPprof, tc.in) - StartProfileBuffers(ctx) - require.Regexp(t, tc.rx, lg.String()) - } -} - -func TestDebug_parseProfileConfigs(t *testing.T) { - saveLockEnv(t) - - tcs := []struct { - in string - key ProfileName - expect []string - expectError error - expectMissing bool - n int - }{ - { - in: "foo", - key: "foo", - expect: nil, - n: 1, - }, - { - in: "foo=bar", - key: "foo", - expect: []string{ - "bar", - }, - n: 1, - }, - { - in: "first=one=1", - key: "first", - expect: []string{ - "one=1", - }, - n: 1, - }, - { - in: "foo=bar:first=one=1", - key: "first", - expect: []string{ - "one=1", - }, - n: 2, - }, - { - in: "foo=bar:first=one=1,two=2", - key: "first", - expect: []string{ - "one=1", - "two=2", - }, - n: 2, - }, - { - in: "foo=bar:first=one=1,two=2:second:third", - key: "first", - expect: []string{ - "one=1", - "two=2", - }, - n: 4, - }, - { - in: "foo=bar:first=one=1,two=2:second:third", - key: "foo", - expect: []string{ - "bar", - }, - n: 4, - }, - { - in: "foo=bar:first=one=1,two=2:second:third", - key: "second", - expect: nil, - n: 4, - }, - { - in: "foo=bar:first=one=1,two=2:second:third", - key: "third", - expect: nil, - n: 4, - }, - { - in: "=", - key: "", - expectMissing: true, - expectError: ErrEmptyProfileName, - }, - { - in: ":", - key: "", - expectMissing: true, - expectError: ErrEmptyProfileName, - }, - { - in: ",", - key: ",", - expect: nil, - n: 1, - }, - { - in: "=,:", - key: "", - expectMissing: true, - expectError: ErrEmptyProfileName, - }, - { - in: "", - key: "", - expectMissing: true, - expectError: ErrEmptyProfileName, - }, - { - in: ":=", - key: "cpu", - expectMissing: true, - expectError: ErrEmptyProfileName, - }, - } - for i, tc := range tcs { - t.Run(fmt.Sprintf("%d %s", i, tc.in), func(t *testing.T) { - pbs, err := parseProfileConfigs(1<<10, tc.in) - require.ErrorIs(t, tc.expectError, err) - require.Len(t, pbs, tc.n) - pb, ok := pbs[tc.key] // no negative testing for missing keys (see newProfileConfigs) - require.Equalf(t, !tc.expectMissing, ok, "key %q for set %q expect missing %t", tc.key, mapKeys(pbs), tc.expectMissing) - - if tc.expectMissing { - return - } - - require.Equal(t, 1<<10, pb.buf.Cap()) // bufsize is always 1024 - require.Equal(t, 0, pb.buf.Len()) - require.Equal(t, tc.expect, pb.flags) - }) - } -} - -func mapKeys[Map ~map[K]V, K comparable, V any](m Map) []K { - return slices.AppendSeq(make([]K, 0, len(m)), maps.Keys(m)) -} - -func TestDebug_newProfileConfigs(t *testing.T) { - saveLockEnv(t) - - tcs := []struct { - in string - key string - expect string - ok bool - }{ - { - in: "foo=bar", - key: "foo", - ok: true, - expect: "bar", - }, - { - in: "foo=", - key: "foo", - ok: true, - expect: "", - }, - { - in: "", - key: "foo", - ok: false, - expect: "", - }, - { - in: "foo=bar", - key: "bar", - ok: false, - expect: "", - }, - } - for i, tc := range tcs { - t.Run(fmt.Sprintf("%d %s", i, tc.in), func(t *testing.T) { - pb := newProfileConfig(1<<10, tc.in) - require.NotNil(t, pb) // always not nil - require.Equal(t, 1<<10, pb.buf.Cap()) // bufsize is always 1024 - v, ok := pb.GetValue(tc.key) - require.Equal(t, tc.ok, ok) - require.Equal(t, tc.expect, v) - }) - } -} - -func TestDebug_LoadProfileConfigs(t *testing.T) { - // save environment and restore after testing - saveLockEnv(t) - - ctx := context.Background() - - tcs := []struct { - inArgs string - profileKey ProfileName - profileFlagKey string - expectProfileFlagValue string - expectProfileFlagExists bool - expectConfigurationCount int - expectError error - expectProfileConfigNotExists bool - }{ - { - inArgs: "", - expectConfigurationCount: 0, - profileKey: "", - expectError: nil, - expectProfileConfigNotExists: true, - }, - { - inArgs: "block=rate=10:cpu:mutex=10", - expectConfigurationCount: 3, - profileKey: "block", - profileFlagKey: "rate", - expectProfileFlagExists: true, - expectProfileFlagValue: "10", - expectError: nil, - }, - { - inArgs: "block=rate=10:cpu:mutex=10", - expectConfigurationCount: 3, - profileKey: "cpu", - profileFlagKey: "rate", - expectProfileFlagExists: false, - }, - { - inArgs: "block=rate=10:cpu:mutex=10", - expectConfigurationCount: 3, - profileKey: "mutex", - profileFlagKey: "10", - expectProfileFlagExists: true, - }, - { - inArgs: "mutex=10", - expectConfigurationCount: 1, - profileKey: "cpu", - profileFlagKey: "10", - expectProfileConfigNotExists: true, - }, - } - - for i, tc := range tcs { - t.Run(fmt.Sprintf("%d: %q", i, tc.inArgs), func(t *testing.T) { - pmp, err := LoadProfileConfig(ctx, tc.inArgs) - require.ErrorIs(t, tc.expectError, err) - - if err != nil { - return - } - - val, ok := pmp[tc.profileKey] - require.Equalf(t, tc.expectProfileConfigNotExists, !ok, "expecting key %q to %t exist", tc.profileKey, !tc.expectProfileConfigNotExists) - - if tc.expectProfileConfigNotExists { - return - } - - flagValue, ok := val.GetValue(tc.profileFlagKey) - require.Equal(t, tc.expectProfileFlagExists, ok, "expecting key %q to %t exist", tc.profileKey, tc.expectProfileFlagExists) - - if tc.expectProfileFlagExists { - return - } - - require.Equal(t, tc.expectProfileFlagValue, flagValue) - }) - } -} - -//nolint:gocritic -func saveLockEnv(t *testing.T) { - t.Helper() - - oldEnv := os.Getenv(EnvVarKopiaDebugPprof) - - t.Cleanup(func() { - // restore the old environment - t.Setenv(EnvVarKopiaDebugPprof, oldEnv) - }) -} - -func TestErrorWriter(t *testing.T) { - eww := &ErrorWriter{mx: 5, err: io.EOF} - n, err := eww.WriteString("Hello World") - require.ErrorIs(t, io.EOF, err) - require.Equal(t, 5, n) - require.Equal(t, "Hello", string(eww.bs)) -} - -// ErrorWriter allows injection of errors into the write stream. There are a few -// failures in PPROF dumps that are worth modeling for tests ([io.EOF] is one) -// For use specify the error, ErrorWriter.err, and byte index, ErrorWriter.mx, -// in which it should occur. -type ErrorWriter struct { - bs []byte - mx int - err error -} - -func (p *ErrorWriter) Write(bs []byte) (int, error) { - n := len(bs) - - if len(bs)+len(p.bs) > p.mx { - // error will be produced at p.mx - // so don't return any more than - // n - n = p.mx - len(p.bs) - } - - // append the bytes to the local buffer just - // in case someone wants to know. - p.bs = append(p.bs, bs[:n]...) - if n < len(bs) { - // here we assume that any less than len(bs) - // bytes written returns an error. This - // allows setting ErrorWriter up once - // to produce an error after multiple - // writes - return n, p.err - } - - return n, nil -} - -//nolint:gocritic -func (p *ErrorWriter) WriteString(s string) (int, error) { - return p.Write([]byte(s)) -} diff --git a/internal/providervalidation/providervalidation.go b/internal/providervalidation/providervalidation.go index 8106efe8cd7..da79185b439 100644 --- a/internal/providervalidation/providervalidation.go +++ b/internal/providervalidation/providervalidation.go @@ -92,7 +92,7 @@ func openEquivalentStorageConnections(ctx context.Context, st blob.Storage, n in log(ctx).Debugw("opened equivalent storage connection", "connectionID", i) - result = append(result, loggingwrapper.NewWrapper(c, log(ctx), fmt.Sprintf("[STORAGE-%v] ", i))) + result = append(result, loggingwrapper.NewWrapper(c, log(ctx), nil, fmt.Sprintf("[STORAGE-%v] ", i))) } return result, nil diff --git a/internal/releasable/releaseable_tracker.go b/internal/releasable/releaseable_tracker.go index efeeb70dda2..9b0a8a5d44e 100644 --- a/internal/releasable/releaseable_tracker.go +++ b/internal/releasable/releaseable_tracker.go @@ -4,6 +4,7 @@ package releasable import ( "bytes" "fmt" + "maps" "runtime/debug" "sync" @@ -91,12 +92,7 @@ func (s *perKindTracker) active() map[any]string { s.mu.Lock() defer s.mu.Unlock() - res := map[any]string{} - for k, v := range s.items { - res[k] = v - } - - return res + return maps.Clone(s.items) } var ( diff --git a/internal/repodiag/blob_writer.go b/internal/repodiag/blob_writer.go index 3948b8b37b2..714d85807c2 100644 --- a/internal/repodiag/blob_writer.go +++ b/internal/repodiag/blob_writer.go @@ -37,10 +37,7 @@ func (w *BlobWriter) EncryptAndWriteBlobAsync(ctx context.Context, prefix blob.I b := encrypted.Bytes() - w.wg.Add(1) - - go func() { - defer w.wg.Done() + w.wg.Go(func() { defer encrypted.Close() defer closeFunc() @@ -49,7 +46,7 @@ func (w *BlobWriter) EncryptAndWriteBlobAsync(ctx context.Context, prefix blob.I log(ctx).Warnf("unable to write diagnostics blob: %v", err) return } - }() + }) } // Wait waits for all the writes to complete. diff --git a/internal/repodiag/log_manager.go b/internal/repodiag/log_manager.go index c05f260c65c..44c0a20b70e 100644 --- a/internal/repodiag/log_manager.go +++ b/internal/repodiag/log_manager.go @@ -2,28 +2,29 @@ package repodiag import ( + "compress/gzip" "context" "crypto/rand" "fmt" + "io" + "sync" "sync/atomic" "time" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" - "github.com/kopia/kopia/internal/clock" + "github.com/kopia/kopia/internal/contentlog" + "github.com/kopia/kopia/internal/contentlog/logparam" "github.com/kopia/kopia/internal/gather" - "github.com/kopia/kopia/internal/zaplogutil" "github.com/kopia/kopia/repo/blob" ) -const blobLoggerFlushThreshold = 4 << 20 - // LogBlobPrefix is a prefix given to text logs stored in repository. const LogBlobPrefix = "_log_" // LogManager manages writing encrypted log blobs to the repository. type LogManager struct { + disableRepositoryLog bool + // Set by Enable(). Log blobs are not written to the repository until // Enable() is called. enabled atomic.Bool @@ -35,34 +36,28 @@ type LogManager struct { writer *BlobWriter timeFunc func() time.Time - flushThreshold int -} + flushThreshold int // +checklocksignore + prefix blob.ID -func (m *LogManager) encryptAndWriteLogBlob(prefix blob.ID, data gather.Bytes, closeFunc func()) { - m.writer.EncryptAndWriteBlobAsync(m.ctx, prefix, data, closeFunc) + mu sync.Mutex + currentSegment *gather.WriteBuffer + startTime int64 + params []contentlog.ParamWriter + textWriter io.Writer + + nextChunkNumber atomic.Uint64 + gz *gzip.Writer } // NewLogger creates new logger. -func (m *LogManager) NewLogger() *zap.SugaredLogger { +func (m *LogManager) NewLogger(name string) *contentlog.Logger { if m == nil { - return zap.NewNop().Sugar() + return nil } - var rnd [2]byte - - rand.Read(rnd[:]) //nolint:errcheck - - w := &logWriteSyncer{ - m: m, - prefix: blob.ID(fmt.Sprintf("%v%v_%x", LogBlobPrefix, clock.Now().Local().Format("20060102150405"), rnd)), - } - - return zap.New(zapcore.NewCore( - zaplogutil.NewStdConsoleEncoder(zaplogutil.StdConsoleEncoderConfig{ - TimeLayout: zaplogutil.PreciseLayout, - LocalTime: false, - }), - w, zap.DebugLevel), zap.WithClock(zaplogutil.Clock())).Sugar() + return contentlog.NewLogger( + m.outputEntry, + append(append([]contentlog.ParamWriter(nil), m.params...), logparam.String("n", name))...) } // Enable enables writing log blobs to repository. @@ -75,12 +70,96 @@ func (m *LogManager) Enable() { m.enabled.Store(true) } +// Disable disables writing log blobs to repository. +func (m *LogManager) Disable() { + if m == nil { + return + } + + m.enabled.Store(false) +} + +func (m *LogManager) outputEntry(data []byte) { + if m.textWriter != nil { + m.textWriter.Write(data) //nolint:errcheck + } + + if !m.enabled.Load() || m.disableRepositoryLog { + return + } + + m.mu.Lock() + + var ( + flushBuffer *gather.WriteBuffer + flushBlobID blob.ID + ) + + if m.currentSegment == nil || m.currentSegment.Length() > m.flushThreshold { + flushBuffer, flushBlobID = m.initNewBuffer() + } + + m.gz.Write(data) //nolint:errcheck + m.mu.Unlock() + + if flushBuffer != nil { + m.flushNextBuffer(flushBlobID, flushBuffer) + } +} + +func (m *LogManager) flushNextBuffer(blobID blob.ID, buf *gather.WriteBuffer) { + m.writer.EncryptAndWriteBlobAsync(m.ctx, blobID, buf.Bytes(), buf.Close) +} + +// Sync flushes the current buffer to the repository. +func (m *LogManager) Sync() { + if m == nil { + return + } + + m.mu.Lock() + flushBuffer, flushBlobID := m.initNewBuffer() + m.mu.Unlock() + + if flushBuffer != nil { + m.flushNextBuffer(flushBlobID, flushBuffer) + } +} + +func (m *LogManager) initNewBuffer() (flushBuffer *gather.WriteBuffer, flushBlobID blob.ID) { + if m.gz != nil { + m.gz.Close() //nolint:errcheck + } + + flushBuffer = m.currentSegment + + if flushBuffer != nil { + flushBlobID = blob.ID(fmt.Sprintf("%v_%v_%v_%v_", m.prefix, m.startTime, m.timeFunc().Unix(), m.nextChunkNumber.Add(1))) + } else { + flushBlobID = blob.ID("") + } + + m.startTime = m.timeFunc().Unix() + m.currentSegment = gather.NewWriteBuffer() + m.gz = gzip.NewWriter(m.currentSegment) + + return flushBuffer, flushBlobID +} + // NewLogManager creates a new LogManager that will emit logs as repository blobs. -func NewLogManager(ctx context.Context, w *BlobWriter) *LogManager { +func NewLogManager(ctx context.Context, w *BlobWriter, disableRepositoryLog bool, textWriter io.Writer, params ...contentlog.ParamWriter) *LogManager { + var rnd [2]byte + + rand.Read(rnd[:]) //nolint:errcheck + return &LogManager{ - ctx: context.WithoutCancel(ctx), - writer: w, - flushThreshold: blobLoggerFlushThreshold, - timeFunc: clock.Now, + ctx: context.WithoutCancel(ctx), + writer: w, + timeFunc: clock.Now, + params: params, + flushThreshold: 4 << 20, //nolint:mnd + disableRepositoryLog: disableRepositoryLog, + prefix: blob.ID(fmt.Sprintf("%v%v_%x", LogBlobPrefix, clock.Now().Local().Format("20060102150405"), rnd)), + textWriter: textWriter, } } diff --git a/internal/repodiag/log_manager_test.go b/internal/repodiag/log_manager_test.go index 4cf79719fb0..745cda2f26c 100644 --- a/internal/repodiag/log_manager_test.go +++ b/internal/repodiag/log_manager_test.go @@ -4,12 +4,14 @@ import ( "context" "crypto/rand" "encoding/hex" + "io" "strings" "testing" "github.com/stretchr/testify/require" "github.com/kopia/kopia/internal/blobtesting" + "github.com/kopia/kopia/internal/contentlog" "github.com/kopia/kopia/internal/repodiag" "github.com/kopia/kopia/internal/testlogging" ) @@ -19,14 +21,14 @@ func TestLogManager_Enabled(t *testing.T) { st := blobtesting.NewMapStorage(d, nil, nil) w := repodiag.NewWriter(st, newStaticCrypter(t)) ctx := testlogging.Context(t) - lm := repodiag.NewLogManager(ctx, w) + lm := repodiag.NewLogManager(ctx, w, false, io.Discard) lm.Enable() - l := lm.NewLogger() - l.Info("hello") + l := lm.NewLogger("test") + contentlog.Log(ctx, l, "hello") require.Empty(t, d) - l.Sync() + lm.Sync() w.Wait(ctx) // make sure log messages are written @@ -43,10 +45,10 @@ func TestLogManager_AutoFlush(t *testing.T) { st := blobtesting.NewMapStorage(d, nil, nil) w := repodiag.NewWriter(st, newStaticCrypter(t)) ctx := testlogging.Context(t) - lm := repodiag.NewLogManager(ctx, w) + lm := repodiag.NewLogManager(ctx, w, false, io.Discard) lm.Enable() - l := lm.NewLogger() + l := lm.NewLogger("test") // flush happens after 4 << 20 bytes (4MB) after compression, // write ~10MB of base16 data which compresses to ~5MB and writes 1 blob @@ -54,14 +56,14 @@ func TestLogManager_AutoFlush(t *testing.T) { var b [1024]byte rand.Read(b[:]) - l.Info(hex.EncodeToString(b[:])) + contentlog.Log(ctx, l, hex.EncodeToString(b[:])) } w.Wait(ctx) require.Len(t, d, 1) - l.Sync() + lm.Sync() w.Wait(ctx) require.Len(t, d, 2) @@ -72,13 +74,13 @@ func TestLogManager_NotEnabled(t *testing.T) { st := blobtesting.NewMapStorage(d, nil, nil) w := repodiag.NewWriter(st, newStaticCrypter(t)) ctx := testlogging.Context(t) - lm := repodiag.NewLogManager(ctx, w) + lm := repodiag.NewLogManager(ctx, w, false, io.Discard) - l := lm.NewLogger() - l.Info("hello") + l := lm.NewLogger("test") + contentlog.Log(ctx, l, "hello") require.Empty(t, d) - l.Sync() + lm.Sync() w.Wait(ctx) // make sure log messages are not written @@ -91,18 +93,18 @@ func TestLogManager_CancelledContext(t *testing.T) { w := repodiag.NewWriter(st, newStaticCrypter(t)) ctx := testlogging.Context(t) cctx, cancel := context.WithCancel(ctx) - lm := repodiag.NewLogManager(cctx, w) + lm := repodiag.NewLogManager(cctx, w, false, io.Discard) // cancel context, logs should still be written cancel() lm.Enable() - l := lm.NewLogger() - l.Info("hello") + l := lm.NewLogger("test") + contentlog.Log(ctx, l, "hello") require.Empty(t, d) - l.Sync() + lm.Sync() w.Wait(ctx) // make sure log messages are written @@ -112,9 +114,11 @@ func TestLogManager_CancelledContext(t *testing.T) { func TestLogManager_Null(t *testing.T) { var lm *repodiag.LogManager + ctx := testlogging.Context(t) + lm.Enable() - l := lm.NewLogger() - l.Info("hello") - l.Sync() + l := lm.NewLogger("test") + contentlog.Log(ctx, l, "hello") + lm.Sync() } diff --git a/internal/repodiag/log_write_syncer.go b/internal/repodiag/log_write_syncer.go deleted file mode 100644 index a347deb58fb..00000000000 --- a/internal/repodiag/log_write_syncer.go +++ /dev/null @@ -1,121 +0,0 @@ -package repodiag - -import ( - "compress/gzip" - "fmt" - "io" - "sync" - "sync/atomic" - - "github.com/kopia/kopia/internal/gather" - "github.com/kopia/kopia/repo/blob" -) - -// logWriteSyncer writes a sequence of log messages as blobs in the repository. -type logWriteSyncer struct { - nextChunkNumber atomic.Int32 - - m *LogManager - mu sync.Mutex - - // +checklocks:mu - buf *gather.WriteBuffer - // +checklocks:mu - gzw *gzip.Writer - - // +checklocks:mu - startTime int64 // unix timestamp of the first log - - prefix blob.ID // +checklocksignore -} - -func (l *logWriteSyncer) Write(b []byte) (int, error) { - if l != nil { - l.maybeEncryptAndWriteChunkUnlocked(l.addAndMaybeFlush(b)) - } - - return len(b), nil -} - -func (l *logWriteSyncer) maybeEncryptAndWriteChunkUnlocked(data gather.Bytes, closeFunc func()) { - if data.Length() == 0 { - closeFunc() - return - } - - if !l.m.enabled.Load() { - closeFunc() - return - } - - endTime := l.m.timeFunc().Unix() - - l.mu.Lock() - st := l.startTime - l.mu.Unlock() - - prefix := blob.ID(fmt.Sprintf("%v_%v_%v_%v_", l.prefix, st, endTime, l.nextChunkNumber.Add(1))) - - l.m.encryptAndWriteLogBlob(prefix, data, closeFunc) -} - -func (l *logWriteSyncer) addAndMaybeFlush(b []byte) (payload gather.Bytes, closeFunc func()) { - l.mu.Lock() - defer l.mu.Unlock() - - w := l.ensureWriterInitializedLocked() - - _, err := w.Write(b) - l.logUnexpectedError(err) - - if l.buf.Length() < l.m.flushThreshold { - return gather.Bytes{}, func() {} - } - - return l.flushAndResetLocked() -} - -// +checklocks:l.mu -func (l *logWriteSyncer) ensureWriterInitializedLocked() io.Writer { - if l.gzw == nil { - l.buf = gather.NewWriteBuffer() - l.gzw = gzip.NewWriter(l.buf) - l.startTime = l.m.timeFunc().Unix() - } - - return l.gzw -} - -// +checklocks:l.mu -func (l *logWriteSyncer) flushAndResetLocked() (payload gather.Bytes, closeFunc func()) { - if l.gzw == nil { - return gather.Bytes{}, func() {} - } - - l.logUnexpectedError(l.gzw.Flush()) - l.logUnexpectedError(l.gzw.Close()) - - closeBuf := l.buf.Close - res := l.buf.Bytes() - - l.buf = nil - l.gzw = nil - - return res, closeBuf -} - -func (l *logWriteSyncer) logUnexpectedError(err error) { - if err == nil { - return - } -} - -func (l *logWriteSyncer) Sync() error { - l.mu.Lock() - data, closeFunc := l.flushAndResetLocked() - l.mu.Unlock() - - l.maybeEncryptAndWriteChunkUnlocked(data, closeFunc) - - return nil -} diff --git a/internal/retry/retry.go b/internal/retry/retry.go index c2b78b45ca7..6310c601dc3 100644 --- a/internal/retry/retry.go +++ b/internal/retry/retry.go @@ -84,11 +84,7 @@ func internalRetry[T any](ctx context.Context, desc string, attempt func() (T, e log(ctx).Debugf("got error %v when %v (#%v), sleeping for %v before retrying", err, desc, i, sleepAmount) time.Sleep(sleepAmount) - sleepAmount = time.Duration(float64(sleepAmount) * factor) - - if sleepAmount > maxSleep { - sleepAmount = maxSleep - } + sleepAmount = min(time.Duration(float64(sleepAmount)*factor), maxSleep) } return defaultT, errors.Wrapf(lastError, "unable to complete %v despite %v retries", desc, i) diff --git a/internal/retry/retry_test.go b/internal/retry/retry_test.go index a51d4d69085..e744ca1bc4c 100644 --- a/internal/retry/retry_test.go +++ b/internal/retry/retry_test.go @@ -37,6 +37,7 @@ func TestRetry(t *testing.T) { if cnt < 2 { return 0, errRetriable } + return 4, nil }, 4, nil}, {"retriable-never-succeeds", func() (int, error) { return 0, errRetriable }, 0, errors.New("unable to complete retriable-never-succeeds despite 3 retries")}, diff --git a/internal/scheduler/scheduler.go b/internal/scheduler/scheduler.go index c5a54984be3..5da05aeb9e5 100644 --- a/internal/scheduler/scheduler.go +++ b/internal/scheduler/scheduler.go @@ -61,13 +61,9 @@ func Start(ctx context.Context, getItems GetItemsFunc, opts Options) *Scheduler Debug: opts.Debug, } - s.wg.Add(1) - - go func() { - defer s.wg.Done() - + s.wg.Go(func() { s.run(context.WithoutCancel(ctx)) - }() + }) return s } diff --git a/internal/server/api_estimate.go b/internal/server/api_estimate.go index 775dbf1623f..7ecffdcc5a6 100644 --- a/internal/server/api_estimate.go +++ b/internal/server/api_estimate.go @@ -30,9 +30,9 @@ func (p estimateTaskProgress) Processing(_ context.Context, dirname string) { func (p estimateTaskProgress) Error(ctx context.Context, dirname string, err error, isIgnored bool) { if isIgnored { - log(ctx).Errorf("ignored error in %v: %v", dirname, err) + userLog(ctx).Errorf("ignored error in %v: %v", dirname, err) } else { - log(ctx).Errorf("error in %v: %v", dirname, err) + userLog(ctx).Errorf("error in %v: %v", dirname, err) } } @@ -76,7 +76,7 @@ func logBucketSamples(ctx context.Context, buckets upload.SampleBuckets, prefix units.BytesString(buckets[i-1].MinSize)) } - log(ctx).Infof("%v files %v: %7v files, total size %v\n", + userLog(ctx).Infof("%v files %v: %7v files, total size %v\n", prefix, sizeRange, bucket.Count, units.BytesString(bucket.TotalSize)) @@ -84,16 +84,16 @@ func logBucketSamples(ctx context.Context, buckets upload.SampleBuckets, prefix hasAny = true if showExamples && len(bucket.Examples) > 0 { - log(ctx).Info("Examples:") + userLog(ctx).Info("Examples:") for _, sample := range bucket.Examples { - log(ctx).Infof(" - %v\n", sample) + userLog(ctx).Infof(" - %v\n", sample) } } } if !hasAny { - log(ctx).Infof("%v files: None", prefix) + userLog(ctx).Infof("%v files: None", prefix) } } @@ -115,7 +115,7 @@ func handleEstimate(ctx context.Context, rc requestContext) (any, *apiError) { dir, ok := e.(fs.Directory) if !ok { - return nil, internalServerError(errors.Wrap(err, "estimation is only supported on directories")) + return nil, internalServerError(errors.New("estimation is only supported on directories")) } taskIDChan := make(chan string) diff --git a/internal/server/api_mount.go b/internal/server/api_mount.go index cce7329fcff..a0ada646396 100644 --- a/internal/server/api_mount.go +++ b/internal/server/api_mount.go @@ -24,7 +24,7 @@ func handleMountCreate(ctx context.Context, rc requestContext) (any, *apiError) return nil, internalServerError(err) } - log(ctx).Debugf("mount for %v => %v", oid, c.MountPath()) + userLog(ctx).Debugf("mount for %v => %v", oid, c.MountPath()) return &serverapi.MountedSnapshot{ Path: c.MountPath(), diff --git a/internal/server/api_sources.go b/internal/server/api_sources.go index b8f2c956f5c..220983f0428 100644 --- a/internal/server/api_sources.go +++ b/internal/server/api_sources.go @@ -85,7 +85,7 @@ func handleSourcesCreate(ctx context.Context, rc requestContext) (any, *apiError if req.CreateSnapshot { resp.SnapshotStarted = true - log(ctx).Debugf("scheduling snapshot of %v immediately...", sourceInfo) + userLog(ctx).Debugf("scheduling snapshot of %v immediately...", sourceInfo) manager.scheduleSnapshotNow() } diff --git a/internal/server/grpc_session.go b/internal/server/grpc_session.go index ec87bf855f3..9be16e9fdc7 100644 --- a/internal/server/grpc_session.go +++ b/internal/server/grpc_session.go @@ -19,6 +19,8 @@ import ( "google.golang.org/grpc/status" "github.com/kopia/kopia/internal/auth" + "github.com/kopia/kopia/internal/contentlog" + "github.com/kopia/kopia/internal/contentlog/logparam" "github.com/kopia/kopia/internal/gather" "github.com/kopia/kopia/internal/grpcapi" "github.com/kopia/kopia/notification" @@ -38,6 +40,10 @@ type grpcServerState struct { grpcapi.UnimplementedKopiaRepositoryServer sem *semaphore.Weighted + + // GRPC server instance for graceful shutdown + grpcServer *grpc.Server + grpcMutex sync.RWMutex } // send sends the provided session response with the provided request ID. @@ -86,6 +92,9 @@ func (s *Server) Session(srv grpcapi.KopiaRepository_SessionServer) error { return status.Errorf(codes.Unavailable, "not connected to a direct repository") } + log := dr.LogManager().NewLogger("grpc-session") + ctx = contentlog.WithParams(ctx, logparam.String("span:server-session", contentlog.RandomSpanID())) + usernameAtHostname, err := s.authenticateGRPCSession(ctx, dr) if err != nil { return err @@ -101,12 +110,12 @@ func (s *Server) Session(srv grpcapi.KopiaRepository_SessionServer) error { return status.Errorf(codes.PermissionDenied, "peer not found in context") } - log(ctx).Infof("starting session for user %q from %v", usernameAtHostname, p.Addr) - defer log(ctx).Infof("session ended for user %q from %v", usernameAtHostname, p.Addr) + userLog(ctx).Infof("starting session for user %q from %v", usernameAtHostname, p.Addr) + defer userLog(ctx).Infof("session ended for user %q from %v", usernameAtHostname, p.Addr) opt, err := s.handleInitialSessionHandshake(srv, dr) if err != nil { - log(ctx).Errorf("session handshake error: %v", err) + userLog(ctx).Errorf("session handshake error: %v", err) return err } @@ -119,7 +128,12 @@ func (s *Server) Session(srv grpcapi.KopiaRepository_SessionServer) error { // propagate any error from the goroutines select { case err := <-lastErr: - log(ctx).Errorf("error handling session request: %v", err) + userLog(ctx).Errorf("error handling session request: %v", err) + + contentlog.Log1(ctx, log, + "error handling session request", + logparam.Error("error", err)) + return err default: @@ -153,6 +167,7 @@ var tracer = otel.Tracer("kopia/grpc") func (s *Server) handleSessionRequest(ctx context.Context, dw repo.DirectRepositoryWriter, authz auth.AuthorizationInfo, usernameAtHostname string, req *grpcapi.SessionRequest, respond func(*grpcapi.SessionResponse)) { if req.GetTraceContext() != nil { var tc propagation.TraceContext + ctx = tc.Extract(ctx, propagation.MapCarrier(req.GetTraceContext())) } @@ -622,18 +637,36 @@ func makeGRPCServerState(maxConcurrency int) grpcServerState { // GRPCRouterHandler returns HTTP handler that supports GRPC services and // routes non-GRPC calls to the provided handler. func (s *Server) GRPCRouterHandler(handler http.Handler) http.Handler { - grpcServer := grpc.NewServer( - grpc.MaxSendMsgSize(repo.MaxGRPCMessageSize), - grpc.MaxRecvMsgSize(repo.MaxGRPCMessageSize), - ) + s.grpcMutex.Lock() + defer s.grpcMutex.Unlock() - s.RegisterGRPCHandlers(grpcServer) + if s.grpcServer == nil { + s.grpcServer = grpc.NewServer( + grpc.MaxSendMsgSize(repo.MaxGRPCMessageSize), + grpc.MaxRecvMsgSize(repo.MaxGRPCMessageSize), + ) + + s.RegisterGRPCHandlers(s.grpcServer) + } return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.ProtoMajor == 2 && strings.Contains(r.Header.Get("Content-Type"), "application/grpc") { - grpcServer.ServeHTTP(w, r) + s.grpcServer.ServeHTTP(w, r) } else { handler.ServeHTTP(w, r) } }) } + +// ShutdownGRPCServer shuts down the GRPC server. +// Note: Since the GRPC server runs over HTTP handler transport, +// GracefulStop() doesn't work. We use Stop() instead. +func (s *Server) ShutdownGRPCServer() { + s.grpcMutex.Lock() + defer s.grpcMutex.Unlock() + + if s.grpcServer != nil { + s.grpcServer.Stop() + s.grpcServer = nil + } +} diff --git a/internal/server/htmlui_embed.go b/internal/server/htmlui_embed.go index fbd6eb11d0e..762986d1d7c 100644 --- a/internal/server/htmlui_embed.go +++ b/internal/server/htmlui_embed.go @@ -1,5 +1,4 @@ //go:build !nohtmlui -// +build !nohtmlui package server diff --git a/internal/server/htmlui_fallback.go b/internal/server/htmlui_fallback.go index 102ade08327..d0f2018a0c6 100644 --- a/internal/server/htmlui_fallback.go +++ b/internal/server/htmlui_fallback.go @@ -1,5 +1,4 @@ //go:build nohtmlui -// +build nohtmlui package server diff --git a/internal/server/server.go b/internal/server/server.go index 00f709ddde6..cb9b75fc5f4 100644 --- a/internal/server/server.go +++ b/internal/server/server.go @@ -8,6 +8,7 @@ import ( "fmt" "html" "io" + "maps" "net/http" "net/url" "strings" @@ -38,7 +39,7 @@ import ( "github.com/kopia/kopia/snapshot/snapshotmaintenance" ) -var log = logging.Module("kopia/server") +var userLog = logging.Module("kopia/server") const ( // retry initialization of repository starting at 1s doubling delay each time up to max 5 minutes @@ -220,7 +221,7 @@ func (s *Server) isAuthenticated(rc requestContext) bool { http.Error(rc.w, "Access denied.\n", http.StatusUnauthorized) // Log failed authentication attempt - log(rc.req.Context()).Warnf("failed login attempt by client %s for user %s", rc.req.RemoteAddr, username) + userLog(rc.req.Context()).Warnf("failed login attempt by client %s for user %s", rc.req.RemoteAddr, username) return false } @@ -229,7 +230,7 @@ func (s *Server) isAuthenticated(rc requestContext) bool { ac, err := rc.srv.generateShortTermAuthCookie(username, now) if err != nil { - log(rc.req.Context()).Errorf("unable to generate short-term auth cookie: %v", err) + userLog(rc.req.Context()).Errorf("unable to generate short-term auth cookie: %v", err) } else { http.SetCookie(rc.w, &http.Cookie{ Name: kopiaAuthCookie, @@ -240,7 +241,7 @@ func (s *Server) isAuthenticated(rc requestContext) bool { if s.options.LogRequests { // Log successful authentication - log(rc.req.Context()).Infof("successful login by client %s for user %s", rc.req.RemoteAddr, username) + userLog(rc.req.Context()).Infof("successful login by client %s for user %s", rc.req.RemoteAddr, username) } } @@ -370,7 +371,7 @@ func (s *Server) handleRequestPossiblyNotConnected(isAuthorized isAuthorizedFunc rc.body = body if s.options.LogRequests { - log(ctx).Debugf("request %v (%v bytes)", rc.req.URL, len(body)) + userLog(ctx).Debugf("request %v (%v bytes)", rc.req.URL, len(body)) } rc.w.Header().Set("Content-Type", "application/json") @@ -397,10 +398,10 @@ func (s *Server) handleRequestPossiblyNotConnected(isAuthorized isAuthorizedFunc if err == nil { if b, ok := v.([]byte); ok { if _, err := rc.w.Write(b); err != nil { - log(ctx).Errorf("error writing response: %v", err) + userLog(ctx).Errorf("error writing response: %v", err) } } else if err := e.Encode(v); err != nil { - log(ctx).Errorf("error encoding response: %v", err) + userLog(ctx).Errorf("error encoding response: %v", err) } return @@ -411,7 +412,7 @@ func (s *Server) handleRequestPossiblyNotConnected(isAuthorized isAuthorizedFunc rc.w.WriteHeader(err.httpErrorCode) if s.options.LogRequests && err.apiErrorCode == serverapi.ErrorNotConnected { - log(ctx).Debugf("%v: error code %v message %v", rc.req.URL, err.apiErrorCode, err.message) + userLog(ctx).Debugf("%v: error code %v message %v", rc.req.URL, err.apiErrorCode, err.message) } _ = e.Encode(&serverapi.ErrorResponse{ @@ -438,7 +439,7 @@ func (s *Server) Refresh() { ctx := s.rootctx if err := s.refreshLocked(ctx); err != nil { - log(s.rootctx).Warnw("refresh error", "err", err) + userLog(s.rootctx).Warnw("refresh error", "err", err) } } @@ -458,13 +459,13 @@ func (s *Server) refreshLocked(ctx context.Context) error { if s.authenticator != nil { if err := s.authenticator.Refresh(ctx); err != nil { - log(ctx).Errorf("unable to refresh authenticator: %v", err) + userLog(ctx).Errorf("unable to refresh authenticator: %v", err) } } if s.authorizer != nil { if err := s.authorizer.Refresh(ctx); err != nil { - log(ctx).Errorf("unable to refresh authorizer: %v", err) + userLog(ctx).Errorf("unable to refresh authorizer: %v", err) } } @@ -498,7 +499,7 @@ func handleFlush(ctx context.Context, rc requestContext) (any, *apiError) { } func handleShutdown(ctx context.Context, rc requestContext) (any, *apiError) { - log(ctx).Info("shutting down due to API request") + userLog(ctx).Info("shutting down due to API request") rc.srv.requestShutdown(ctx) @@ -509,7 +510,7 @@ func (s *Server) requestShutdown(ctx context.Context) { if f := s.OnShutdown; f != nil { go func() { if err := f(ctx); err != nil { - log(ctx).Errorf("shutdown failed: %v", err) + userLog(ctx).Errorf("shutdown failed: %v", err) } }() } @@ -528,7 +529,7 @@ func (s *Server) beginUpload(ctx context.Context, src snapshot.SourceInfo) bool defer s.parallelSnapshotsMutex.Unlock() for s.currentParallelSnapshots >= s.maxParallelSnapshots && ctx.Err() == nil { - log(ctx).Debugf("waiting on for parallel snapshot upload slot to be available %v", src) + userLog(ctx).Debugf("waiting on for parallel snapshot upload slot to be available %v", src) s.parallelSnapshotsChanged.Wait() } @@ -547,7 +548,7 @@ func (s *Server) endUpload(ctx context.Context, src snapshot.SourceInfo, mwe *no s.parallelSnapshotsMutex.Lock() defer s.parallelSnapshotsMutex.Unlock() - log(ctx).Debugf("finished uploading %v", src) + userLog(ctx).Debugf("finished uploading %v", src) s.currentParallelSnapshots-- @@ -617,9 +618,9 @@ func (s *Server) SetRepository(ctx context.Context, rep repo.Repository) error { s.unmountAllLocked(ctx) // close previous source managers - log(ctx).Debug("stopping all source managers") + userLog(ctx).Debug("stopping all source managers") s.stopAllSourceManagersLocked(ctx) - log(ctx).Debug("stopped all source managers") + userLog(ctx).Debug("stopped all source managers") if err := s.rep.Close(ctx); err != nil { return errors.Wrap(err, "unable to close previous repository") @@ -707,10 +708,7 @@ func (s *Server) syncSourcesLocked(ctx context.Context) error { // copy existing sources to a map, from which we will remove sources that are found // in the repository - oldSourceManagers := map[snapshot.SourceInfo]*sourceManager{} - for k, v := range s.sourceManagers { - oldSourceManagers[k] = v - } + oldSourceManagers := maps.Clone(s.sourceManagers) for src := range sources { if sm, ok := oldSourceManagers[src]; ok { @@ -919,7 +917,7 @@ func (s *Server) InitRepositoryAsync(ctx context.Context, mode string, initializ } if rep == nil { - log(ctx).Info("Repository not configured.") + userLog(ctx).Info("Repository not configured.") } if err = s.SetRepository(ctx, rep); err != nil { @@ -958,7 +956,7 @@ func RetryInitRepository(initialize InitRepositoryFunc) InitRepositoryFunc { return rep, nil } - log(ctx).Warnf("unable to open repository: %v, will keep trying until canceled. Sleeping for %v", rerr, nextSleepTime) + userLog(ctx).Warnf("unable to open repository: %v, will keep trying until canceled. Sleeping for %v", rerr, nextSleepTime) if !clock.SleepInterruptibly(ctx, nextSleepTime) { return nil, ctx.Err() @@ -978,6 +976,7 @@ func (s *Server) runSnapshotTask(ctx context.Context, src snapshot.SourceInfo, i } var result notifydata.ManifestWithError + result.Manifest.Source = src defer s.endUpload(ctx, src, &result) @@ -1016,7 +1015,7 @@ func (s *Server) getOrCreateSourceManager(ctx context.Context, src snapshot.Sour defer s.serverMutex.Unlock() if s.sourceManagers[src] == nil { - log(ctx).Debugf("creating source manager for %v", src) + userLog(ctx).Debugf("creating source manager for %v", src) sm := newSourceManager(src, s, s.rep) s.sourceManagers[src] = sm @@ -1046,13 +1045,7 @@ func (s *Server) snapshotAllSourceManagers() map[snapshot.SourceInfo]*sourceMana s.serverMutex.RLock() defer s.serverMutex.RUnlock() - result := map[snapshot.SourceInfo]*sourceManager{} - - for k, v := range s.sourceManagers { - result[k] = v - } - - return result + return maps.Clone(s.sourceManagers) } func (s *Server) getSchedulerItems(ctx context.Context, now time.Time) []scheduler.Item { @@ -1097,7 +1090,7 @@ func (s *Server) getSchedulerItems(ctx context.Context, now time.Time) []schedul NextTime: nst, }) } else { - log(ctx).Debugf("no snapshot scheduled for %v %v %v", sm.src, nst, now) + userLog(ctx).Debugf("no snapshot scheduled for %v %v %v", sm.src, nst, now) } } diff --git a/internal/server/server_authz_checks.go b/internal/server/server_authz_checks.go index eae68296316..cb00c33daae 100644 --- a/internal/server/server_authz_checks.go +++ b/internal/server/server_authz_checks.go @@ -36,7 +36,7 @@ func (s *Server) validateCSRFToken(r *http.Request) bool { sessionCookie, err := r.Cookie(kopiaSessionCookie) if err != nil { - log(ctx).Warnf("missing or invalid session cookie for %q: %v", path, err) + userLog(ctx).Warnf("missing or invalid session cookie for %q: %v", path, err) return false } @@ -45,7 +45,7 @@ func (s *Server) validateCSRFToken(r *http.Request) bool { token := r.Header.Get(apiclient.CSRFTokenHeader) if token == "" { - log(ctx).Warnf("missing CSRF token for %v", path) + userLog(ctx).Warnf("missing CSRF token for %v", path) return false } @@ -53,7 +53,7 @@ func (s *Server) validateCSRFToken(r *http.Request) bool { return true } - log(ctx).Warnf("got invalid CSRF token for %v: %v, want %v, session %v", path, token, validToken, sessionCookie.Value) + userLog(ctx).Warnf("got invalid CSRF token for %v: %v, want %v, session %v", path, token, validToken, sessionCookie.Value) return false } diff --git a/internal/server/server_maintenance.go b/internal/server/server_maintenance.go index 6daba6c27f9..63acd7da6be 100644 --- a/internal/server/server_maintenance.go +++ b/internal/server/server_maintenance.go @@ -56,7 +56,7 @@ func (s *srvMaintenance) stop(ctx context.Context) { close(s.closed) s.wg.Wait() - log(ctx).Debug("maintenance manager stopped") + userLog(ctx).Debug("maintenance manager stopped") } func (s *srvMaintenance) beforeRun() { @@ -84,7 +84,7 @@ func (s *srvMaintenance) refresh(ctx context.Context, notify bool) { defer s.mu.Unlock() if err := s.refreshLocked(ctx); err != nil { - log(ctx).Debugw("unable to refresh maintenance manager", "err", err) + userLog(ctx).Debugw("unable to refresh maintenance manager", "err", err) } } @@ -130,7 +130,7 @@ func maybeStartMaintenanceManager( } if rep.ClientOptions().ReadOnly { - log(ctx).Warnln("the repository connection is read-only, maintenance tasks will not be performed on this repository") + userLog(ctx).Warnln("the repository connection is read-only, maintenance tasks will not be performed on this repository") return nil } @@ -148,7 +148,7 @@ func maybeStartMaintenanceManager( m.wg.Add(1) - log(ctx).Debug("starting maintenance manager") + userLog(ctx).Debug("starting maintenance manager") m.refresh(ctx, false) @@ -158,14 +158,14 @@ func maybeStartMaintenanceManager( for { select { case <-m.triggerChan: - log(ctx).Debug("starting maintenance task") + userLog(ctx).Debug("starting maintenance task") m.beforeRun() t0 := clock.Now() if err := srv.runMaintenanceTask(mctx, dr); err != nil { - log(ctx).Debugw("maintenance task failed", "err", err) + userLog(ctx).Debugw("maintenance task failed", "err", err) m.afterFailedRun() if srv.enableErrorNotifications() { @@ -182,7 +182,7 @@ func maybeStartMaintenanceManager( m.refresh(mctx, true) case <-m.closed: - log(ctx).Debug("stopping maintenance manager") + userLog(ctx).Debug("stopping maintenance manager") return } } diff --git a/internal/server/server_mount_manager.go b/internal/server/server_mount_manager.go index 02cf1d9f9f5..c5cc53367e5 100644 --- a/internal/server/server_mount_manager.go +++ b/internal/server/server_mount_manager.go @@ -2,6 +2,7 @@ package server import ( "context" + "maps" "github.com/pkg/errors" @@ -24,7 +25,7 @@ func (s *Server) getMountController(ctx context.Context, rep repo.Repository, oi return nil, nil } - log(ctx).Debugf("mount controller for %v not found, starting", oid) + userLog(ctx).Debugf("mount controller for %v not found, starting", oid) c, err := mount.Directory(ctx, snapshotfs.DirectoryEntry(rep, oid, nil), "*", mount.Options{}) if err != nil { @@ -40,13 +41,7 @@ func (s *Server) listMounts() map[object.ID]mount.Controller { s.serverMutex.RLock() defer s.serverMutex.RUnlock() - result := map[object.ID]mount.Controller{} - - for oid, c := range s.mounts { - result[oid] = c - } - - return result + return maps.Clone(s.mounts) } func (s *Server) deleteMount(oid object.ID) { @@ -60,7 +55,7 @@ func (s *Server) deleteMount(oid object.ID) { func (s *Server) unmountAllLocked(ctx context.Context) { for oid, c := range s.mounts { if err := c.Unmount(ctx); err != nil { - log(ctx).Errorf("unable to unmount %v", oid) + userLog(ctx).Errorf("unable to unmount %v", oid) } delete(s.mounts, oid) diff --git a/internal/server/source_manager.go b/internal/server/source_manager.go index 7683ef0263c..0a83eacfb7a 100644 --- a/internal/server/source_manager.go +++ b/internal/server/source_manager.go @@ -23,7 +23,6 @@ import ( const ( failedSnapshotRetryInterval = 5 * time.Minute refreshTimeout = 30 * time.Second // max amount of time to refresh a single source - oneDay = 24 * time.Hour ) type sourceManagerServerInterface interface { @@ -155,6 +154,7 @@ func (s *sourceManager) setUploader(u *upload.Uploader) { func (s *sourceManager) start(ctx context.Context, isLocal bool) { s.refreshStatus(ctx) + go s.run(ctx, isLocal) } @@ -196,10 +196,10 @@ func (s *sourceManager) runLocal(ctx context.Context) { s.setStatus("PENDING") - log(ctx).Debugw("snapshotting", "source", s.src) + userLog(ctx).Debugw("snapshotting", "source", s.src) if err := s.server.runSnapshotTask(ctx, s.src, s.snapshotInternal); err != nil { - log(ctx).Errorf("snapshot error: %v", err) + userLog(ctx).Errorf("snapshot error: %v", err) s.backoffBeforeNextSnapshot() } else { @@ -252,17 +252,17 @@ func (s *sourceManager) scheduleSnapshotNow() { } func (s *sourceManager) upload(ctx context.Context) serverapi.SourceActionResponse { - log(ctx).Infof("upload triggered via API: %v", s.src) + userLog(ctx).Infof("upload triggered via API: %v", s.src) s.scheduleSnapshotNow() return serverapi.SourceActionResponse{Success: true} } func (s *sourceManager) cancel(ctx context.Context) serverapi.SourceActionResponse { - log(ctx).Debugw("cancel triggered via API", "source", s.src) + userLog(ctx).Debugw("cancel triggered via API", "source", s.src) if u := s.currentUploader(); u != nil { - log(ctx).Info("canceling current upload") + userLog(ctx).Info("canceling current upload") u.Cancel() } @@ -270,14 +270,14 @@ func (s *sourceManager) cancel(ctx context.Context) serverapi.SourceActionRespon } func (s *sourceManager) pause(ctx context.Context) serverapi.SourceActionResponse { - log(ctx).Debugw("pause triggered via API", "source", s.src) + userLog(ctx).Debugw("pause triggered via API", "source", s.src) s.sourceMutex.Lock() s.paused = true s.sourceMutex.Unlock() if u := s.currentUploader(); u != nil { - log(ctx).Info("canceling current upload") + userLog(ctx).Info("canceling current upload") u.Cancel() } @@ -287,7 +287,7 @@ func (s *sourceManager) pause(ctx context.Context) serverapi.SourceActionRespons } func (s *sourceManager) resume(ctx context.Context) serverapi.SourceActionResponse { - log(ctx).Debugw("resume triggered via API", "source", s.src) + userLog(ctx).Debugw("resume triggered via API", "source", s.src) s.sourceMutex.Lock() s.paused = false @@ -300,7 +300,7 @@ func (s *sourceManager) resume(ctx context.Context) serverapi.SourceActionRespon func (s *sourceManager) stop(ctx context.Context) { if u := s.currentUploader(); u != nil { - log(ctx).Infow("canceling current upload", "src", s.src) + userLog(ctx).Infow("canceling current upload", "src", s.src) u.Cancel() } @@ -320,7 +320,7 @@ func (s *sourceManager) snapshotInternal(ctx context.Context, ctrl uitask.Contro // check if we got closed while waiting on semaphore select { case <-s.closed: - log(ctx).Infof("not snapshotting %v because source manager is shutting down", s.src) + userLog(ctx).Infof("not snapshotting %v because source manager is shutting down", s.src) return nil default: @@ -351,7 +351,7 @@ func (s *sourceManager) snapshotInternal(ctx context.Context, ctrl uitask.Contro onUpload(numBytes) }, }, func(ctx context.Context, w repo.RepositoryWriter) error { - log(ctx).Debugf("uploading %v", s.src) + userLog(ctx).Debugf("uploading %v", s.src) u := upload.NewUploader(w) @@ -372,7 +372,7 @@ func (s *sourceManager) snapshotInternal(ctx context.Context, ctrl uitask.Contro u.Progress.UploadedBytes(numBytes) } - log(ctx).Debugf("starting upload of %v", s.src) + userLog(ctx).Debugf("starting upload of %v", s.src) s.setUploader(u) manifest, err := u.Upload(ctx, localEntry, policyTree, s.src, manifestsSinceLastCompleteSnapshot...) @@ -389,7 +389,7 @@ func (s *sourceManager) snapshotInternal(ctx context.Context, ctrl uitask.Contro ignoreIdenticalSnapshot := policyTree.EffectivePolicy().RetentionPolicy.IgnoreIdenticalSnapshots.OrDefault(false) if ignoreIdenticalSnapshot && len(manifestsSinceLastCompleteSnapshot) > 0 { if manifestsSinceLastCompleteSnapshot[0].RootObjectID() == manifest.RootObjectID() { - log(ctx).Debug("Not saving snapshot because no files have been changed since previous snapshot") + userLog(ctx).Debug("Not saving snapshot because no files have been changed since previous snapshot") return nil } } @@ -403,7 +403,7 @@ func (s *sourceManager) snapshotInternal(ctx context.Context, ctrl uitask.Contro return errors.Wrap(err, "unable to apply retention policy") } - log(ctx).Debugf("created snapshot %v", snapshotID) + userLog(ctx).Debugf("created snapshot %v", snapshotID) return nil }) diff --git a/internal/serverapi/serverapi.go b/internal/serverapi/serverapi.go index d0adda34262..e7b1761dd6d 100644 --- a/internal/serverapi/serverapi.go +++ b/internal/serverapi/serverapi.go @@ -295,4 +295,5 @@ type UIPreferences struct { FontSize string `json:"fontSize"` // Specifies the font size used by the UI PageSize int `json:"pageSize"` // A page size; the actual possible values will only be provided by the frontend Language string `json:"language"` // Specifies the language used by the UI + Locale string `json:"locale"` // Specifies the locale used by the UI for formatting numbers and dates } diff --git a/internal/sleepable/sleepable_timer.go b/internal/sleepable/sleepable_timer.go index c2ac1ee5c9f..3de5a1bcc7a 100644 --- a/internal/sleepable/sleepable_timer.go +++ b/internal/sleepable/sleepable_timer.go @@ -54,10 +54,7 @@ func NewTimer(nowFunc func() time.Time, until time.Time) *Timer { return } - nextSleepTime := until.Sub(now) - if nextSleepTime > maxSleepTime { - nextSleepTime = maxSleepTime - } + nextSleepTime := min(until.Sub(now), maxSleepTime) if currentTimer != nil { currentTimer.Stop() diff --git a/internal/sleepable/sleepable_timer_test.go b/internal/sleepable/sleepable_timer_test.go index f2cc369b978..a4e163794be 100644 --- a/internal/sleepable/sleepable_timer_test.go +++ b/internal/sleepable/sleepable_timer_test.go @@ -100,6 +100,7 @@ func TestTimerStop(t *testing.T) { timer := NewTimer(clock.Now, target) timer.Stop() time.Sleep(20 * time.Millisecond) + select { case <-timer.C: t.Error("timer triggered after being stopped") @@ -129,16 +130,12 @@ func TestTimerConcurrentStop(t *testing.T) { var wg sync.WaitGroup for range 10 { - wg.Add(1) - - go func() { - defer wg.Done() - timer.Stop() - }() + wg.Go(timer.Stop) } wg.Wait() time.Sleep(20 * time.Millisecond) + select { case <-timer.C: t.Error("timer triggered after being stopped") @@ -154,6 +151,7 @@ func TestTimerEdgeCases(t *testing.T) { t.Run("past time", func(t *testing.T) { start := clock.Now() target := start.Add(-1 * time.Second) + timer := NewTimer(clock.Now, target) select { case <-timer.C: @@ -165,6 +163,7 @@ func TestTimerEdgeCases(t *testing.T) { t.Run("exactly now", func(t *testing.T) { start := clock.Now() target := start + timer := NewTimer(clock.Now, target) select { case <-timer.C: @@ -176,6 +175,7 @@ func TestTimerEdgeCases(t *testing.T) { t.Run("very long duration", func(t *testing.T) { start := clock.Now() target := start.Add(100 * time.Millisecond) // Use a shorter duration for testing + timer := NewTimer(clock.Now, target) select { case <-timer.C: @@ -201,6 +201,7 @@ func TestTimerChannelBehavior(t *testing.T) { <-timer.C <-timer.C <-timer.C + select { case <-timer.C: default: @@ -214,6 +215,7 @@ func TestTimerChannelBehavior(t *testing.T) { timer := NewTimer(clock.Now, target) timer.Stop() time.Sleep(20 * time.Millisecond) + select { case <-timer.C: t.Error("stopped timer channel should not be closed") diff --git a/internal/stat/stat_bsd.go b/internal/stat/stat_bsd.go index 92b4a5e7dff..056f43030c0 100644 --- a/internal/stat/stat_bsd.go +++ b/internal/stat/stat_bsd.go @@ -1,5 +1,4 @@ //go:build openbsd -// +build openbsd // Package stat provides a cross-platform abstraction for // common stat commands. diff --git a/internal/stat/stat_test.go b/internal/stat/stat_test.go index 143920764cb..168c2cdb151 100644 --- a/internal/stat/stat_test.go +++ b/internal/stat/stat_test.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package stat diff --git a/internal/stat/stat_unix.go b/internal/stat/stat_unix.go index a0555cc9857..4745140a938 100644 --- a/internal/stat/stat_unix.go +++ b/internal/stat/stat_unix.go @@ -1,5 +1,4 @@ //go:build linux || freebsd || darwin -// +build linux freebsd darwin // Package stat provides a cross-platform abstraction for common stat commands. package stat diff --git a/internal/stat/stat_windows.go b/internal/stat/stat_windows.go index 7220b72076d..29d6872c580 100644 --- a/internal/stat/stat_windows.go +++ b/internal/stat/stat_windows.go @@ -1,5 +1,4 @@ //go:build windows -// +build windows // Package stat provides a cross-platform abstraction for // common stat commands. diff --git a/internal/stats/count_map.go b/internal/stats/count_map.go new file mode 100644 index 00000000000..9ce159e51b2 --- /dev/null +++ b/internal/stats/count_map.go @@ -0,0 +1,82 @@ +package stats + +import ( + "sync" + "sync/atomic" +) + +// CountersMap is a concurrency-safe map from keys of type K to uint32 counters. +// It allows increments and retrievals of counts from concurrent go routines +// without additional concurrency coordination. +// Added counters cannot be removed. +type CountersMap[K comparable] struct { + // Stores map[K]*atomic.Uint32 + // The counter is stored as pointer so it can be updated with atomic operations. + data sync.Map + length atomic.Uint32 +} + +// Increment increases the counter for the specified key by 1. +// Returns true if the key already existed, false if it was newly created. +func (m *CountersMap[K]) Increment(key K) bool { + return m.add(key, 1) +} + +// add increases the counter for the specified key by the value of v. +// Returns true if the key already existed, false if it was newly created. +// Note: if this function is directly exported at some point, then overflow +// checks should be performed. +func (m *CountersMap[K]) add(key K, v uint32) bool { + // Attempt looking for an already existing entry first to avoid spurious + // (value) allocations in workloads where the entry likely exists already. + actual, found := m.data.Load(key) + if !found { + actual, found = m.data.LoadOrStore(key, &atomic.Uint32{}) + if !found { + m.length.Add(1) + } + } + + actual.(*atomic.Uint32).Add(v) //nolint:forcetypeassert + + return found +} + +// Length returns the approximate number of keys in the map. The actual number +// of keys can be equal or larger to the returned value, but not less. +func (m *CountersMap[K]) Length() uint { + return uint(m.length.Load()) +} + +// Get returns the current value of the counter for key, or 0 when key does not exist. +func (m *CountersMap[K]) Get(key K) (uint32, bool) { + actual, ok := m.data.Load(key) + if !ok { + return 0, false // Key not found, return 0 + } + + return actual.(*atomic.Uint32).Load(), true //nolint:forcetypeassert +} + +// Range iterates over all key/count pairs in the map, calling f for each item. +// If f returns false, iteration stops. +func (m *CountersMap[K]) Range(f func(key K, count uint32) bool) { + m.data.Range(func(k, v any) bool { + return f(k.(K), v.(*atomic.Uint32).Load()) //nolint:forcetypeassert + }) +} + +// CountMap returns the current value of the counters. The counters do not +// correspond to a consistent snapshot of the map, the counters may change +// while the returned map is built. +func (m *CountersMap[K]) CountMap() map[K]uint32 { + r := map[K]uint32{} + + m.Range(func(key K, count uint32) bool { + r[key] = count + + return true + }) + + return r +} diff --git a/internal/stats/count_map_test.go b/internal/stats/count_map_test.go new file mode 100644 index 00000000000..839128539c0 --- /dev/null +++ b/internal/stats/count_map_test.go @@ -0,0 +1,155 @@ +package stats + +import ( + "sync" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestConcurrentCountMap_Get_MissingKey(t *testing.T) { + var m CountersMap[string] + + v, found := m.Get("missing") + + require.False(t, found) + require.EqualValues(t, 0, v, "expected 0 for missing key") +} + +func TestConcurrentCountMap_IncrementAndGet_NewAndExistingKey(t *testing.T) { + var m CountersMap[string] + + found := m.Increment("foo") + require.False(t, found, "Increment on new key should return false") + + got, found := m.Get("foo") + require.True(t, found) + require.EqualValues(t, 1, got, "expected 1 after first increment") + + found = m.Increment("foo") + require.True(t, found, "Increment on existing key should return true") + + got, found = m.Get("foo") + require.True(t, found) + require.EqualValues(t, 2, got, "expected 2 after second increment") +} + +func TestConcurrentCountMap_Add(t *testing.T) { + var m CountersMap[int] + + found := m.add(42, 5) + require.False(t, found, "Add on new key should return false to indicate 'key not previously found'") + + v, found := m.Get(42) + + require.EqualValues(t, 5, v, "expected 5 after add") + require.True(t, found) + + found = m.add(42, 3) + require.True(t, found, "Add on existing key should return true") + + v, found = m.Get(42) + + require.EqualValues(t, 8, v, "expected 8 after second add") + require.True(t, found) +} + +func TestConcurrentCountMap_Range(t *testing.T) { + var m CountersMap[string] + + expectedMap := map[string]uint32{ + "a": 1, + "b": 2, + "c": 3, + } + + for k, v := range expectedMap { + require.False(t, m.add(k, v)) + } + + m.Range(func(key string, gotCount uint32) bool { + expectedCount, found := expectedMap[key] + + require.True(t, found) + require.Equal(t, expectedCount, gotCount) + + return true + }) +} + +func TestConcurrentCountMap_Length(t *testing.T) { + var m CountersMap[string] + + require.False(t, m.add("a", 1)) + require.False(t, m.add("b", 2)) + + require.EqualValues(t, 2, m.Length()) + require.True(t, m.add("b", 2)) + require.EqualValues(t, 2, m.Length()) + + require.False(t, m.add("c", 3)) + require.EqualValues(t, 3, m.Length()) +} + +func TestConcurrentCountMap_Range_StopEarly(t *testing.T) { + var m CountersMap[string] + + require.False(t, m.add("a", 1)) + require.False(t, m.add("b", 2)) + require.False(t, m.add("c", 3)) + + count := 0 + + m.Range(func(key string, v uint32) bool { + count++ + + return false // stop after first + }) + + require.Equal(t, 1, count, "Range should stop after one iteration") +} + +func TestConcurrentCountMap_CountMap_Snapshot(t *testing.T) { + var m CountersMap[string] + + require.False(t, m.add("x", 10)) + require.False(t, m.add("y", 20)) + + expected := map[string]uint32{ + "x": 10, + "y": 20, + } + + require.Equal(t, expected, m.CountMap()) +} + +func TestConcurrentCountMap_ConcurrentIncrement(t *testing.T) { + const ( + key = 7 + concurrency = 8 + inc = 1000 + ) + + var m CountersMap[int] + + var wg sync.WaitGroup + + wg.Add(concurrency) + + for range concurrency { + go func() { + for range inc { + m.Increment(key) + } + + wg.Done() + }() + } + + wg.Wait() + + got, found := m.Get(key) + + require.True(t, found) + require.EqualValues(t, concurrency*inc, got) +} diff --git a/internal/stats/countsum.go b/internal/stats/countsum.go index aaf25cd5d07..66b9d7d9b0d 100644 --- a/internal/stats/countsum.go +++ b/internal/stats/countsum.go @@ -1,7 +1,9 @@ // Package stats provides helpers for simple stats package stats -import "sync/atomic" +import ( + "sync/atomic" +) // CountSum holds sum and count values. type CountSum struct { diff --git a/internal/tempfile/tempfile_linux_fallback_test.go b/internal/tempfile/tempfile_linux_fallback_test.go index dbc88e48eb9..58973a08464 100644 --- a/internal/tempfile/tempfile_linux_fallback_test.go +++ b/internal/tempfile/tempfile_linux_fallback_test.go @@ -1,5 +1,4 @@ //go:build linux -// +build linux package tempfile diff --git a/internal/tempfile/tempfile_unix_fallback.go b/internal/tempfile/tempfile_unix_fallback.go index f1ac3cb0011..96d62c49d03 100644 --- a/internal/tempfile/tempfile_unix_fallback.go +++ b/internal/tempfile/tempfile_unix_fallback.go @@ -1,5 +1,4 @@ //go:build linux || freebsd || darwin || openbsd -// +build linux freebsd darwin openbsd package tempfile diff --git a/internal/tempfile/tempfile_unix_nonlinux.go b/internal/tempfile/tempfile_unix_nonlinux.go index b0f28acc451..3a4ae850650 100644 --- a/internal/tempfile/tempfile_unix_nonlinux.go +++ b/internal/tempfile/tempfile_unix_nonlinux.go @@ -1,5 +1,4 @@ //go:build freebsd || darwin || openbsd -// +build freebsd darwin openbsd package tempfile diff --git a/internal/testlogging/ctx.go b/internal/testlogging/ctx.go index 2fc235aae77..44b3164b75e 100644 --- a/internal/testlogging/ctx.go +++ b/internal/testlogging/ctx.go @@ -11,6 +11,7 @@ import ( ) type testingT interface { + Context() context.Context Helper() Errorf(msg string, args ...any) Fatalf(msg string, args ...any) @@ -40,23 +41,35 @@ func Context(t testingT) context.Context { return ContextWithLevel(t, LevelDebug) } +// ContextForCleanup returns a context with attached logger that emits all log entries to go testing.T log output. +// This context is not canceled when the test finishes, so it is suitable to be used in cleanup functions. +func ContextForCleanup(t testingT) context.Context { + return contextWithLevelForCleanup(t, LevelDebug) +} + +func contextWithLevelForCleanup(t testingT, level Level) context.Context { + return logging.WithLogger(context.WithoutCancel(t.Context()), func(module string) logging.Logger { + return PrintfLevel(t.Logf, "["+module+"] ", level) + }) +} + // ContextWithLevel returns a context with attached logger that emits all log entries with given log level or above. func ContextWithLevel(t testingT, level Level) context.Context { - return logging.WithLogger(context.Background(), func(module string) logging.Logger { + return logging.WithLogger(t.Context(), func(module string) logging.Logger { return PrintfLevel(t.Logf, "["+module+"] ", level) }) } // ContextWithLevelAndPrefix returns a context with attached logger that emits all log entries with given log level or above. func ContextWithLevelAndPrefix(t testingT, level Level, prefix string) context.Context { - return logging.WithLogger(context.Background(), func(module string) logging.Logger { + return logging.WithLogger(t.Context(), func(module string) logging.Logger { return PrintfLevel(t.Logf, "["+module+"] "+prefix, level) }) } // ContextWithLevelAndPrefixFunc returns a context with attached logger that emits all log entries with given log level or above. func ContextWithLevelAndPrefixFunc(t testingT, level Level, prefixFunc func() string) context.Context { - return logging.WithLogger(context.Background(), func(module string) logging.Logger { + return logging.WithLogger(t.Context(), func(module string) logging.Logger { return PrintfLevel(t.Logf, "["+module+"] "+prefixFunc(), level) }) } diff --git a/internal/testutil/dockertestutil.go b/internal/testutil/dockertestutil.go index bdd739a04b2..6b69d8ae7ef 100644 --- a/internal/testutil/dockertestutil.go +++ b/internal/testutil/dockertestutil.go @@ -2,22 +2,30 @@ package testutil import ( "bytes" + "context" "net/url" "os" "os/exec" "strings" "testing" + + "github.com/kopia/kopia/internal/testlogging" ) // RunDockerAndGetOutputOrSkip runs Docker and returns the output as a string. func RunDockerAndGetOutputOrSkip(tb testing.TB, args ...string) string { tb.Helper() - tb.Logf("running docker %v", args) - c := exec.Command("docker", args...) + return runDockerAndGetOutputOrSkip(testlogging.Context(tb), tb, args...) +} + +func runDockerAndGetOutputOrSkip(ctx context.Context, tb testing.TB, args ...string) string { + tb.Helper() + tb.Logf("running docker %v", args) var stderr bytes.Buffer + c := exec.CommandContext(ctx, "docker", args...) c.Stderr = &stderr out, err := c.Output() @@ -41,7 +49,8 @@ func RunContainerAndKillOnCloseOrSkip(t *testing.T, args ...string) string { containerID := RunDockerAndGetOutputOrSkip(t, args...) t.Cleanup(func() { - RunDockerAndGetOutputOrSkip(t, "kill", containerID) + // t.Context() is canceled by the time cleanup executes, so it cannot be used here + runDockerAndGetOutputOrSkip(context.WithoutCancel(t.Context()), t, "kill", containerID) }) return containerID diff --git a/internal/testutil/norace.go b/internal/testutil/norace.go index d1855196e45..0d242f18c14 100644 --- a/internal/testutil/norace.go +++ b/internal/testutil/norace.go @@ -1,5 +1,4 @@ //go:build !race -// +build !race package testutil diff --git a/internal/testutil/race.go b/internal/testutil/race.go index 2e50295f2bd..bb636cd0999 100644 --- a/internal/testutil/race.go +++ b/internal/testutil/race.go @@ -1,5 +1,4 @@ //go:build race -// +build race package testutil diff --git a/internal/testutil/serverparameters.go b/internal/testutil/serverparameters.go index 75a1d577a9e..66cc2ff0138 100644 --- a/internal/testutil/serverparameters.go +++ b/internal/testutil/serverparameters.go @@ -21,21 +21,21 @@ type ServerParameters struct { // ProcessOutput processes output lines from a server that's starting up. func (s *ServerParameters) ProcessOutput(l string) bool { - if strings.HasPrefix(l, serverOutputAddress) { - s.BaseURL = strings.TrimPrefix(l, serverOutputAddress) + if after, ok := strings.CutPrefix(l, serverOutputAddress); ok { + s.BaseURL = after return false } - if strings.HasPrefix(l, serverOutputCertSHA256) { - s.SHA256Fingerprint = strings.TrimPrefix(l, serverOutputCertSHA256) + if after, ok := strings.CutPrefix(l, serverOutputCertSHA256); ok { + s.SHA256Fingerprint = after } - if strings.HasPrefix(l, serverOutputPassword) { - s.Password = strings.TrimPrefix(l, serverOutputPassword) + if after, ok := strings.CutPrefix(l, serverOutputPassword); ok { + s.Password = after } - if strings.HasPrefix(l, serverOutputControlPassword) { - s.ServerControlPassword = strings.TrimPrefix(l, serverOutputControlPassword) + if after, ok := strings.CutPrefix(l, serverOutputControlPassword); ok { + s.ServerControlPassword = after } return true diff --git a/internal/testutil/tmpdir.go b/internal/testutil/tmpdir.go index ca7fb5d7bd3..1d39e1332af 100644 --- a/internal/testutil/tmpdir.go +++ b/internal/testutil/tmpdir.go @@ -92,12 +92,12 @@ func TempDirectoryShort(tb testing.TB) string { // TempLogDirectory returns a temporary directory used for storing logs. // If KOPIA_LOGS_DIR is provided. -func TempLogDirectory(t *testing.T) string { - t.Helper() +func TempLogDirectory(tb testing.TB) string { + tb.Helper() - cleanName := strings.NewReplacer("/", "_", "\\", "_", ":", "_").Replace(t.Name()) + cleanName := strings.NewReplacer("/", "_", "\\", "_", ":", "_").Replace(tb.Name()) - t.Helper() + tb.Helper() logsBaseDir := os.Getenv("KOPIA_LOGS_DIR") if logsBaseDir == "" { @@ -106,16 +106,16 @@ func TempLogDirectory(t *testing.T) string { logsDir := filepath.Join(logsBaseDir, cleanName+"."+clock.Now().Local().Format("20060102150405")) - require.NoError(t, os.MkdirAll(logsDir, logsDirPermissions)) + require.NoError(tb, os.MkdirAll(logsDir, logsDirPermissions)) - t.Cleanup(func() { + tb.Cleanup(func() { if os.Getenv("KOPIA_KEEP_LOGS") != "" { - t.Logf("logs preserved in %v", logsDir) + tb.Logf("logs preserved in %v", logsDir) return } - if t.Failed() && os.Getenv("KOPIA_DISABLE_LOG_DUMP_ON_FAILURE") == "" { - dumpLogs(t, logsDir) + if tb.Failed() && os.Getenv("KOPIA_DISABLE_LOG_DUMP_ON_FAILURE") == "" { + dumpLogs(tb, logsDir) } os.RemoveAll(logsDir) //nolint:errcheck @@ -124,36 +124,36 @@ func TempLogDirectory(t *testing.T) string { return logsDir } -func dumpLogs(t *testing.T, dirname string) { - t.Helper() +func dumpLogs(tb testing.TB, dirname string) { + tb.Helper() entries, err := os.ReadDir(dirname) if err != nil { - t.Errorf("unable to read %v: %v", dirname, err) + tb.Errorf("unable to read %v: %v", dirname, err) return } for _, e := range entries { if e.IsDir() { - dumpLogs(t, filepath.Join(dirname, e.Name())) + dumpLogs(tb, filepath.Join(dirname, e.Name())) continue } - dumpLogFile(t, filepath.Join(dirname, e.Name())) + dumpLogFile(tb, filepath.Join(dirname, e.Name())) } } -func dumpLogFile(t *testing.T, fname string) { - t.Helper() +func dumpLogFile(tb testing.TB, fname string) { + tb.Helper() data, err := os.ReadFile(fname) //nolint:gosec if err != nil { - t.Error(err) + tb.Error(err) return } - t.Logf("LOG FILE: %v %v", fname, trimOutput(string(data))) + tb.Logf("LOG FILE: %v %v", fname, trimOutput(string(data))) } func trimOutput(s string) string { @@ -176,7 +176,7 @@ func splitLines(s string) []string { } var result []string - for _, l := range strings.Split(s, "\n") { + for l := range strings.SplitSeq(s, "\n") { result = append(result, strings.TrimRight(l, "\r")) } diff --git a/internal/timetrack/estimator.go b/internal/timetrack/estimator.go index 617abad5506..c9869e845bc 100644 --- a/internal/timetrack/estimator.go +++ b/internal/timetrack/estimator.go @@ -1,4 +1,3 @@ -// Package timetrack tracks the progress and estimates completion of a task. package timetrack import ( @@ -39,10 +38,7 @@ func (v Estimator) Estimate(completed, total float64) (Timings, bool) { predictedSeconds := elapsed.Seconds() / completedRatio predictedEndTime := v.startTime.Add(time.Duration(predictedSeconds) * time.Second) - dt := predictedEndTime.Sub(clock.Now()).Truncate(time.Second) - if dt < 0 { - dt = 0 - } + dt := max(predictedEndTime.Sub(clock.Now()).Truncate(time.Second), 0) return Timings{ PercentComplete: 100 * completed / total, diff --git a/internal/tlsutil/tlsutil.go b/internal/tlsutil/tlsutil.go index 4e624d408ed..b7f1e69fe29 100644 --- a/internal/tlsutil/tlsutil.go +++ b/internal/tlsutil/tlsutil.go @@ -11,6 +11,7 @@ import ( "crypto/x509/pkix" "encoding/hex" "encoding/pem" + stderrors "errors" "math/big" "net" "net/http" @@ -86,12 +87,15 @@ func GenerateServerCertificate(ctx context.Context, keySize int, certValid time. } // WritePrivateKeyToFile writes the private key to a given file. -func WritePrivateKeyToFile(fname string, priv *rsa.PrivateKey) error { +func WritePrivateKeyToFile(fname string, priv *rsa.PrivateKey) (err error) { f, err := os.OpenFile(fname, os.O_RDWR|os.O_CREATE|os.O_TRUNC, privateKeyFileMode) //nolint:gosec if err != nil { return errors.Wrap(err, "error opening private key file") } - defer f.Close() //nolint:errcheck + + defer func() { + err = stderrors.Join(err, f.Close()) + }() privBytes, err := x509.MarshalPKCS8PrivateKey(priv) if err != nil { @@ -106,12 +110,15 @@ func WritePrivateKeyToFile(fname string, priv *rsa.PrivateKey) error { } // WriteCertificateToFile writes the certificate to a given file. -func WriteCertificateToFile(fname string, cert *x509.Certificate) error { +func WriteCertificateToFile(fname string, cert *x509.Certificate) (err error) { f, err := os.OpenFile(fname, os.O_RDWR|os.O_CREATE|os.O_TRUNC, certificateFileMode) //nolint:gosec if err != nil { return errors.Wrap(err, "error opening certificate file") } - defer f.Close() //nolint:errcheck + + defer func() { + err = stderrors.Join(err, f.Close()) + }() if err := pem.Encode(f, &pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw}); err != nil { return errors.Wrap(err, "Failed to write data") diff --git a/internal/uitask/uitask.go b/internal/uitask/uitask.go index 61313975a34..3260c442431 100644 --- a/internal/uitask/uitask.go +++ b/internal/uitask/uitask.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "encoding/json" + "maps" "sync" "time" @@ -121,7 +122,7 @@ func (t *runningTaskInfo) ReportCounters(c map[string]CounterValue) { t.mu.Lock() defer t.mu.Unlock() - t.Counters = cloneCounters(c) + t.Counters = maps.Clone(c) } // info returns a copy of task information while holding a lock. @@ -130,7 +131,7 @@ func (t *runningTaskInfo) info() Info { defer t.mu.Unlock() i := t.Info - i.Counters = cloneCounters(i.Counters) + i.Counters = maps.Clone(i.Counters) return i } diff --git a/internal/uitask/uitask_counter.go b/internal/uitask/uitask_counter.go index 25eb13dbd20..00b6746a9c9 100644 --- a/internal/uitask/uitask_counter.go +++ b/internal/uitask/uitask_counter.go @@ -46,12 +46,3 @@ func ErrorBytesCounter(v int64) CounterValue { func ErrorCounter(v int64) CounterValue { return CounterValue{v, "", "error"} } - -func cloneCounters(c map[string]CounterValue) map[string]CounterValue { - newCounters := map[string]CounterValue{} - for k, v := range c { - newCounters[k] = v - } - - return newCounters -} diff --git a/internal/uitask/uitask_manager.go b/internal/uitask/uitask_manager.go index 3876959b6d6..45121092387 100644 --- a/internal/uitask/uitask_manager.go +++ b/internal/uitask/uitask_manager.go @@ -106,14 +106,7 @@ func (m *Manager) WaitForTask(ctx context.Context, taskID string, maxWaitTime ti deadline := clock.Now().Add(maxWaitTime) - sleepInterval := maxWaitTime / 10 //nolint:mnd - if sleepInterval > maxWaitInterval { - sleepInterval = maxWaitInterval - } - - if sleepInterval < minWaitInterval { - sleepInterval = minWaitInterval - } + sleepInterval := max(min(maxWaitTime/10, maxWaitInterval), minWaitInterval) //nolint:mnd for maxWaitTime < 0 || clock.Now().Before(deadline) { if !clock.SleepInterruptibly(ctx, sleepInterval) { diff --git a/internal/wcmatch/wcmatch_test.go b/internal/wcmatch/wcmatch_test.go index c8ab1f4bdf4..30aa0646157 100644 --- a/internal/wcmatch/wcmatch_test.go +++ b/internal/wcmatch/wcmatch_test.go @@ -51,7 +51,6 @@ func TestMatchWithBaseDir(t *testing.T) { } matcher, err := NewWildcardMatcher(tc.pattern, BaseDir(tc.baseDir)) - if err != nil { t.Errorf("(%v) unexpected error returned for pattern %#v: %v", i, tc.pattern, err) } else { diff --git a/internal/webdavmount/webdavmount.go b/internal/webdavmount/webdavmount.go index 81639d68147..1eb187a448a 100644 --- a/internal/webdavmount/webdavmount.go +++ b/internal/webdavmount/webdavmount.go @@ -2,13 +2,13 @@ package webdavmount import ( + "context" "os" "strings" "sync" "sync/atomic" "github.com/pkg/errors" - "golang.org/x/net/context" "golang.org/x/net/webdav" "github.com/kopia/kopia/fs" @@ -26,7 +26,7 @@ var ( type webdavFile struct { // webdavFile implements webdav.File but needs context // +checklocks:mu - ctx context.Context + ctx context.Context //nolint:containedctx entry fs.File @@ -100,7 +100,7 @@ func (f *webdavFile) Close() error { type webdavDir struct { // webdavDir implements webdav.File but needs context - ctx context.Context + ctx context.Context //nolint:containedctx w *webdavFS info os.FileInfo diff --git a/internal/workshare/workshare_pool.go b/internal/workshare/workshare_pool.go index fee2cb821e5..5ddd7203887 100644 --- a/internal/workshare/workshare_pool.go +++ b/internal/workshare/workshare_pool.go @@ -1,4 +1,3 @@ -// Package workshare implements work sharing worker pool. package workshare import ( @@ -49,11 +48,7 @@ func NewPool[T any](numWorkers int) *Pool[T] { } for range numWorkers { - w.wg.Add(1) - - go func() { - defer w.wg.Done() - + w.wg.Go(func() { for { select { case it := <-w.work: @@ -67,7 +62,7 @@ func NewPool[T any](numWorkers int) *Pool[T] { return } } - }() + }) } return w diff --git a/internal/workshare/workshare_test.go b/internal/workshare/workshare_test.go index de537b48cfe..6384fe89af0 100644 --- a/internal/workshare/workshare_test.go +++ b/internal/workshare/workshare_test.go @@ -154,9 +154,7 @@ func BenchmarkComputeTreeSum(b *testing.B) { w := workshare.NewPool[*computeTreeSumRequest](10) defer w.Close() - b.ResetTimer() - - for range b.N { + for b.Loop() { computeTreeSum(w, treeToWalk) } } diff --git a/notification/notifytemplate/embeddedtemplate.go b/notification/notifytemplate/embeddedtemplate.go index a975a0bd6f9..04c26029aa2 100644 --- a/notification/notifytemplate/embeddedtemplate.go +++ b/notification/notifytemplate/embeddedtemplate.go @@ -93,6 +93,7 @@ func functions(opt Options) template.FuncMap { sort.Slice(res, func(i, j int) bool { return res[i].Manifest.Source.String() < res[j].Manifest.Source.String() }) + return res }, "formatTime": func(t time.Time) string { diff --git a/notification/sender/email/email_sender_test.go b/notification/sender/email/email_sender_test.go index 6473ba5d861..c5f4481be52 100644 --- a/notification/sender/email/email_sender_test.go +++ b/notification/sender/email/email_sender_test.go @@ -20,6 +20,7 @@ func TestEmailProvider(t *testing.T) { LogServerActivity: true, LogToStdout: true, }) + require.NoError(t, srv.Start()) defer srv.Stop() @@ -83,6 +84,7 @@ func TestEmailProvider_Text(t *testing.T) { LogServerActivity: true, LogToStdout: true, }) + require.NoError(t, srv.Start()) defer srv.Stop() @@ -144,6 +146,7 @@ func TestEmailProvider_AUTH(t *testing.T) { LogServerActivity: true, LogToStdout: true, }) + require.NoError(t, srv.Start()) defer srv.Stop() diff --git a/notification/sender/webhook/webhook_sender.go b/notification/sender/webhook/webhook_sender.go index 1e860de916b..6e996ebcba4 100644 --- a/notification/sender/webhook/webhook_sender.go +++ b/notification/sender/webhook/webhook_sender.go @@ -34,7 +34,7 @@ func (p *webhookProvider) Send(ctx context.Context, msg *sender.Message) error { req.Header.Set("Subject", msg.Subject) // add extra headers from options - for _, l := range strings.Split(p.opt.Headers, "\n") { + for l := range strings.SplitSeq(p.opt.Headers, "\n") { const numParts = 2 if parts := strings.SplitN(strings.TrimSpace(l), ":", numParts); len(parts) == numParts { req.Header.Set(parts[0], strings.TrimSpace(parts[1])) diff --git a/repo/blob/azure/azure_options.go b/repo/blob/azure/azure_options.go index 2731e14812b..a8eea64ca1d 100644 --- a/repo/blob/azure/azure_options.go +++ b/repo/blob/azure/azure_options.go @@ -36,6 +36,9 @@ type Options struct { StorageDomain string `json:"storageDomain,omitempty"` + // DoNotUseTLS connects to Azure storage over HTTP instead of HTTPS + DoNotUseTLS bool `json:"doNotUseTLS,omitempty"` + throttling.Limits // PointInTime specifies a view of the (versioned) store at that time diff --git a/repo/blob/azure/azure_storage.go b/repo/blob/azure/azure_storage.go index 3d810f31585..e82fef3656a 100644 --- a/repo/blob/azure/azure_storage.go +++ b/repo/blob/azure/azure_storage.go @@ -8,6 +8,7 @@ import ( "time" "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" "github.com/Azure/azure-sdk-for-go/sdk/azidentity" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob" @@ -410,11 +411,27 @@ func getAZService(opt *Options, storageHostname string) (*azblob.Client, error) serviceErr error ) + protocol := "https" + coreClientOptions := azcore.ClientOptions{ + Telemetry: policy.TelemetryOptions{ + ApplicationID: blob.ApplicationID, + }, + } + + if opt.DoNotUseTLS { + protocol = "http" + coreClientOptions.InsecureAllowCredentialWithHTTP = true + } + + clientOptions := &azblob.ClientOptions{ + ClientOptions: coreClientOptions, + } + switch { // shared access signature case opt.SASToken != "": service, serviceErr = azblob.NewClientWithNoCredential( - fmt.Sprintf("https://%s?%s", storageHostname, opt.SASToken), nil) + fmt.Sprintf("%s://%s?%s", protocol, storageHostname, opt.SASToken), clientOptions) // storage account access key case opt.StorageKey != "": // create a credentials object. @@ -424,7 +441,7 @@ func getAZService(opt *Options, storageHostname string) (*azblob.Client, error) } service, serviceErr = azblob.NewClientWithSharedKeyCredential( - fmt.Sprintf("https://%s/", storageHostname), cred, nil, + fmt.Sprintf("%s://%s/", protocol, storageHostname), cred, clientOptions, ) // client secret case opt.TenantID != "" && opt.ClientID != "" && opt.ClientSecret != "": @@ -433,7 +450,7 @@ func getAZService(opt *Options, storageHostname string) (*azblob.Client, error) return nil, errors.Wrap(err, "unable to initialize client secret credential") } - service, serviceErr = azblob.NewClient(fmt.Sprintf("https://%s/", storageHostname), cred, nil) + service, serviceErr = azblob.NewClient(fmt.Sprintf("%s://%s/", protocol, storageHostname), cred, clientOptions) // client certificate case opt.TenantID != "" && opt.ClientID != "" && opt.ClientCertificate != "": certs, key, certErr := azidentity.ParseCertificates([]byte(opt.ClientCertificate), nil) @@ -446,7 +463,7 @@ func getAZService(opt *Options, storageHostname string) (*azblob.Client, error) return nil, errors.Wrap(credErr, "unable to initialize client cert credential") } - service, serviceErr = azblob.NewClient(fmt.Sprintf("https://%s/", storageHostname), cred, nil) + service, serviceErr = azblob.NewClient(fmt.Sprintf("%s://%s/", protocol, storageHostname), cred, clientOptions) // Azure Federated Token case opt.TenantID != "" && opt.ClientID != "" && opt.AzureFederatedTokenFile != "": cred, err := azidentity.NewWorkloadIdentityCredential(&azidentity.WorkloadIdentityCredentialOptions{ @@ -458,7 +475,7 @@ func getAZService(opt *Options, storageHostname string) (*azblob.Client, error) return nil, errors.Wrap(err, "unable to initialize Azure Federated Identity workload identity credential") } - service, serviceErr = azblob.NewClient(fmt.Sprintf("https://%s/", storageHostname), cred, nil) + service, serviceErr = azblob.NewClient(fmt.Sprintf("%s://%s/", protocol, storageHostname), cred, clientOptions) default: return nil, errors.New("one of the storage key, SAS token, client secret, client certificate, or Azure Federated Token must be provided") } diff --git a/repo/blob/azure/azure_storage_test.go b/repo/blob/azure/azure_storage_test.go index 089893b1397..9c30ee23f6a 100644 --- a/repo/blob/azure/azure_storage_test.go +++ b/repo/blob/azure/azure_storage_test.go @@ -29,7 +29,6 @@ const ( testImmutableContainerEnv = "KOPIA_AZURE_TEST_IMMUTABLE_CONTAINER" testImmutableStorageAccountEnv = "KOPIA_AZURE_TEST_IMMUTABLE_STORAGE_ACCOUNT" testImmutableStorageKeyEnv = "KOPIA_AZURE_TEST_IMMUTABLE_STORAGE_KEY" - testImmutableStorageSASTokenEnv = "KOPIA_AZURE_TEST_IMMUTABLE_SAS_TOKEN" testStorageTenantIDEnv = "KOPIA_AZURE_TEST_TENANT_ID" testStorageClientIDEnv = "KOPIA_AZURE_TEST_CLIENT_ID" testStorageClientSecretEnv = "KOPIA_AZURE_TEST_CLIENT_SECRET" @@ -52,16 +51,12 @@ func createContainer(t *testing.T, container, storageAccount, storageKey string) t.Helper() credential, err := azblob.NewSharedKeyCredential(storageAccount, storageKey) - if err != nil { - t.Fatalf("failed to create Azure credentials: %v", err) - } + require.NoError(t, err, "failed to create Azure credentials") serviceURL := fmt.Sprintf("https://%s.blob.core.windows.net", storageAccount) client, err := azblob.NewClientWithSharedKeyCredential(serviceURL, credential, nil) - if err != nil { - t.Fatalf("failed to get client: %v", err) - } + require.NoError(t, err, "failed to get azblob client") _, err = client.CreateContainer(context.Background(), container, nil) if err == nil { @@ -91,7 +86,9 @@ func TestCleanupOldData(t *testing.T) { require.NoError(t, err) - defer st.Close(ctx) + t.Cleanup(func() { + st.Close(testlogging.ContextForCleanup(t)) + }) blobtesting.CleanupOldData(ctx, t, st, blobtesting.MinCleanupAge) } @@ -157,8 +154,12 @@ func TestAzureStorageSASToken(t *testing.T) { require.NoError(t, err) cancel() - defer st.Close(ctx) - defer blobtesting.CleanupOldData(ctx, t, st, 0) + t.Cleanup(func() { + ctx := testlogging.ContextForCleanup(t) + + blobtesting.CleanupOldData(ctx, t, st, 0) + st.Close(ctx) + }) blobtesting.VerifyStorage(ctx, t, st, blob.PutOptions{}) blobtesting.AssertConnectionInfoRoundTrips(ctx, t, st) @@ -195,8 +196,12 @@ func TestAzureStorageClientSecret(t *testing.T) { require.NoError(t, err) cancel() - defer st.Close(ctx) - defer blobtesting.CleanupOldData(ctx, t, st, 0) + t.Cleanup(func() { + ctx := testlogging.ContextForCleanup(t) + + blobtesting.CleanupOldData(ctx, t, st, 0) + st.Close(ctx) + }) blobtesting.VerifyStorage(ctx, t, st, blob.PutOptions{}) blobtesting.AssertConnectionInfoRoundTrips(ctx, t, st) @@ -233,8 +238,12 @@ func TestAzureStorageClientCertificate(t *testing.T) { require.NoError(t, err) cancel() - defer st.Close(ctx) - defer blobtesting.CleanupOldData(ctx, t, st, 0) + t.Cleanup(func() { + ctx := testlogging.ContextForCleanup(t) + + blobtesting.CleanupOldData(ctx, t, st, 0) + st.Close(ctx) + }) blobtesting.VerifyStorage(ctx, t, st, blob.PutOptions{}) blobtesting.AssertConnectionInfoRoundTrips(ctx, t, st) @@ -271,8 +280,12 @@ func TestAzureFederatedIdentity(t *testing.T) { require.NoError(t, err) cancel() - defer st.Close(ctx) - defer blobtesting.CleanupOldData(ctx, t, st, 0) + t.Cleanup(func() { + ctx := testlogging.ContextForCleanup(t) + + blobtesting.CleanupOldData(ctx, t, st, 0) + st.Close(ctx) + }) blobtesting.VerifyStorage(ctx, t, st, blob.PutOptions{}) blobtesting.AssertConnectionInfoRoundTrips(ctx, t, st) @@ -286,16 +299,15 @@ func TestAzureStorageInvalidBlob(t *testing.T) { storageAccount := getEnvOrSkip(t, testStorageAccountEnv) storageKey := getEnvOrSkip(t, testStorageKeyEnv) - ctx := context.Background() + ctx := testlogging.Context(t) st, err := azure.New(ctx, &azure.Options{ Container: container, StorageAccount: storageAccount, StorageKey: storageKey, }, false) - if err != nil { - t.Fatalf("unable to connect to Azure container: %v", err) - } + + require.NoError(t, err, "unable to connect to Azure container") defer st.Close(ctx) @@ -303,9 +315,7 @@ func TestAzureStorageInvalidBlob(t *testing.T) { defer tmp.Close() err = st.GetBlob(ctx, "xxx", 0, 30, &tmp) - if err == nil { - t.Errorf("unexpected success when adding to non-existent container") - } + require.Error(t, err, "unexpected success when adding to non-existent container") } func TestAzureStorageInvalidContainer(t *testing.T) { @@ -315,16 +325,15 @@ func TestAzureStorageInvalidContainer(t *testing.T) { storageAccount := getEnvOrSkip(t, testStorageAccountEnv) storageKey := getEnvOrSkip(t, testStorageKeyEnv) - ctx := context.Background() + ctx := testlogging.Context(t) + _, err := azure.New(ctx, &azure.Options{ Container: container, StorageAccount: storageAccount, StorageKey: storageKey, }, false) - if err == nil { - t.Errorf("unexpected success connecting to Azure container, wanted error") - } + require.Error(t, err, "unexpected success connecting to Azure container, expected error") } func TestAzureStorageInvalidCreds(t *testing.T) { @@ -334,16 +343,15 @@ func TestAzureStorageInvalidCreds(t *testing.T) { storageKey := "invalid-key" container := "invalid-container" - ctx := context.Background() + ctx := testlogging.Context(t) + _, err := azure.New(ctx, &azure.Options{ Container: container, StorageAccount: storageAccount, StorageKey: storageKey, }, false) - if err == nil { - t.Errorf("unexpected success connecting to Azure blob storage, wanted error") - } + require.Error(t, err, "unexpected success connecting to Azure blob storage, expected error") } func getBlobCount(ctx context.Context, t *testing.T, st blob.Storage, prefix blob.ID) int { diff --git a/repo/blob/azure/azure_test.go b/repo/blob/azure/azure_test.go new file mode 100644 index 00000000000..bee3c8ae8fb --- /dev/null +++ b/repo/blob/azure/azure_test.go @@ -0,0 +1,71 @@ +package azure + +import ( + "encoding/base64" + "fmt" + "net/http" + "net/http/httptest" + "net/url" + "strings" + "sync/atomic" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/kopia/kopia/internal/clock" + "github.com/kopia/kopia/internal/testlogging" + "github.com/kopia/kopia/repo/blob" +) + +func TestUserAgent(t *testing.T) { + ctx := testlogging.Context(t) + container := "testContainer" + storageAccount := "testAccount" + storageKey := base64.StdEncoding.EncodeToString([]byte("testKey")) + + var seenKopiaUserAgent atomic.Bool + + handler := func(w http.ResponseWriter, r *http.Request) { + if strings.Contains(r.Header.Get("User-Agent"), blob.ApplicationID) { + seenKopiaUserAgent.Store(true) + } + + w.WriteHeader(http.StatusOK) + w.Write([]byte("test")) + } + + server := httptest.NewServer(http.HandlerFunc(handler)) + t.Cleanup(server.Close) + + opt := &Options{ + Container: container, + StorageAccount: storageAccount, + StorageKey: storageKey, + DoNotUseTLS: true, + } + serverURL, err := url.Parse(server.URL) + require.NoError(t, err) + + client, err := getAZService(opt, serverURL.Host) + require.NoError(t, err) + require.NotNil(t, client) + + raw := &azStorage{ + Options: *opt, + container: opt.Container, + service: client, + } + + // Test that the User-Agent is set correctly by calling ListBlobs. + nonExistentPrefix := fmt.Sprintf("kopia-azure-storage-initializing-%v", clock.Now().UnixNano()) + err = raw.ListBlobs(ctx, blob.ID(nonExistentPrefix), func(_ blob.Metadata) error { + return nil + }) + require.Error(t, err) + + require.EventuallyWithT(t, func(collect *assert.CollectT) { + require.True(collect, seenKopiaUserAgent.Load()) + }, time.Minute, 100*time.Millisecond) +} diff --git a/repo/blob/azure/azure_versioned_test.go b/repo/blob/azure/azure_versioned_test.go index e6fe6ea304d..9dd703f3729 100644 --- a/repo/blob/azure/azure_versioned_test.go +++ b/repo/blob/azure/azure_versioned_test.go @@ -103,11 +103,12 @@ func TestGetBlobVersions(t *testing.T) { latestData = "latest version" ) - dataBlobs := []string{originalData, updatedData, latestData} - const blobName = "TestGetBlobVersions" + blobID := blob.ID(blobName) + dataBlobs := []string{originalData, updatedData, latestData} dataTimestamps, err := putBlobs(ctx, st, blobID, dataBlobs) + require.NoError(t, err) pastPIT := dataTimestamps[0].Add(-1 * time.Second) @@ -162,6 +163,7 @@ func TestGetBlobVersions(t *testing.T) { require.NoError(t, err) var tmp gather.WriteBuffer + err = st.GetBlob(ctx, blobID, 0, -1, &tmp) require.ErrorIs(t, err, tt.expectedError) require.Equal(t, tt.expectedBlobData, string(tmp.ToByteSlice())) @@ -212,6 +214,7 @@ func TestGetBlobVersionsWithDeletion(t *testing.T) { dataBlobs := []string{originalData, updatedData} const blobName = "TestGetBlobVersionsWithDeletion" + blobID := blob.ID(blobName) dataTimestamps, err := putBlobs(ctx, st, blobID, dataBlobs) require.NoError(t, err) @@ -235,6 +238,7 @@ func TestGetBlobVersionsWithDeletion(t *testing.T) { require.Equal(t, 1, count) var tmp gather.WriteBuffer + err = st.GetBlob(ctx, blobID, 0, -1, &tmp) require.NoError(t, err) require.Equal(t, updatedData, string(tmp.ToByteSlice())) diff --git a/repo/blob/b2/b2_storage_test.go b/repo/blob/b2/b2_storage_test.go index 96a019836c4..1b6edd411d7 100644 --- a/repo/blob/b2/b2_storage_test.go +++ b/repo/blob/b2/b2_storage_test.go @@ -81,8 +81,12 @@ func TestB2Storage(t *testing.T) { cancel() require.NoError(t, err) - defer st.Close(ctx) - defer blobtesting.CleanupOldData(ctx, t, st, 0) + t.Cleanup(func() { + ctx := testlogging.ContextForCleanup(t) + + blobtesting.CleanupOldData(ctx, t, st, 0) + st.Close(ctx) + }) blobtesting.VerifyStorage(ctx, t, st, blob.PutOptions{}) blobtesting.AssertConnectionInfoRoundTrips(ctx, t, st) diff --git a/repo/blob/filesystem/filesystem_storage_capacity_openbsd.go b/repo/blob/filesystem/filesystem_storage_capacity_openbsd.go index 8eaf31ae3be..c26a877d1a2 100644 --- a/repo/blob/filesystem/filesystem_storage_capacity_openbsd.go +++ b/repo/blob/filesystem/filesystem_storage_capacity_openbsd.go @@ -1,5 +1,4 @@ //go:build openbsd -// +build openbsd package filesystem diff --git a/repo/blob/filesystem/filesystem_storage_capacity_unix.go b/repo/blob/filesystem/filesystem_storage_capacity_unix.go index d3057c34590..b62431696c0 100644 --- a/repo/blob/filesystem/filesystem_storage_capacity_unix.go +++ b/repo/blob/filesystem/filesystem_storage_capacity_unix.go @@ -1,5 +1,4 @@ //go:build linux || freebsd || darwin -// +build linux freebsd darwin package filesystem diff --git a/repo/blob/filesystem/filesystem_storage_capacity_windows.go b/repo/blob/filesystem/filesystem_storage_capacity_windows.go index 4a2cf443900..4780befc45a 100644 --- a/repo/blob/filesystem/filesystem_storage_capacity_windows.go +++ b/repo/blob/filesystem/filesystem_storage_capacity_windows.go @@ -1,5 +1,4 @@ //go:build windows -// +build windows package filesystem diff --git a/repo/blob/filesystem/filesystem_storage_unix_test.go b/repo/blob/filesystem/filesystem_storage_unix_test.go index 362c4bf7b3e..78b49ccb9b2 100644 --- a/repo/blob/filesystem/filesystem_storage_unix_test.go +++ b/repo/blob/filesystem/filesystem_storage_unix_test.go @@ -1,5 +1,4 @@ //go:build linux || freebsd || darwin -// +build linux freebsd darwin package filesystem diff --git a/repo/blob/filesystem/osinterface_mock_other_test.go b/repo/blob/filesystem/osinterface_mock_other_test.go index 7d6f89913b0..f75e5987292 100644 --- a/repo/blob/filesystem/osinterface_mock_other_test.go +++ b/repo/blob/filesystem/osinterface_mock_other_test.go @@ -1,5 +1,4 @@ //go:build !linux && !freebsd && !darwin -// +build !linux,!freebsd,!darwin package filesystem diff --git a/repo/blob/filesystem/osinterface_mock_unix_test.go b/repo/blob/filesystem/osinterface_mock_unix_test.go index 271d30cb49f..d2ae3143ae7 100644 --- a/repo/blob/filesystem/osinterface_mock_unix_test.go +++ b/repo/blob/filesystem/osinterface_mock_unix_test.go @@ -1,5 +1,4 @@ //go:build linux || freebsd || darwin -// +build linux freebsd darwin package filesystem diff --git a/repo/blob/filesystem/osinterface_realos_other.go b/repo/blob/filesystem/osinterface_realos_other.go index 8e9be018eab..3b18b56be7a 100644 --- a/repo/blob/filesystem/osinterface_realos_other.go +++ b/repo/blob/filesystem/osinterface_realos_other.go @@ -1,5 +1,4 @@ //go:build !linux && !freebsd && !darwin -// +build !linux,!freebsd,!darwin package filesystem diff --git a/repo/blob/filesystem/osinterface_realos_unix.go b/repo/blob/filesystem/osinterface_realos_unix.go index b18afbb9c57..7ce2170b1fe 100644 --- a/repo/blob/filesystem/osinterface_realos_unix.go +++ b/repo/blob/filesystem/osinterface_realos_unix.go @@ -1,5 +1,4 @@ //go:build linux || freebsd || darwin -// +build linux freebsd darwin package filesystem diff --git a/repo/blob/gcs/gcs_storage_test.go b/repo/blob/gcs/gcs_storage_test.go index 1c7a9472c0d..eceb8d8d030 100644 --- a/repo/blob/gcs/gcs_storage_test.go +++ b/repo/blob/gcs/gcs_storage_test.go @@ -53,8 +53,12 @@ func TestGCSStorage(t *testing.T) { cancel() require.NoError(t, err) - defer st.Close(ctx) - defer blobtesting.CleanupOldData(ctx, t, st, 0) + t.Cleanup(func() { + ctx := testlogging.ContextForCleanup(t) + + blobtesting.CleanupOldData(ctx, t, st, 0) + st.Close(ctx) + }) blobtesting.VerifyStorage(ctx, t, st, blob.PutOptions{}) diff --git a/repo/blob/gcs/gcs_versioned_test.go b/repo/blob/gcs/gcs_versioned_test.go index f303492849a..0378c22c2b9 100644 --- a/repo/blob/gcs/gcs_versioned_test.go +++ b/repo/blob/gcs/gcs_versioned_test.go @@ -105,6 +105,7 @@ func TestGetBlobVersions(t *testing.T) { dataBlobs := []string{originalData, updatedData, latestData} const blobName = "TestGetBlobVersions" + blobID := blob.ID(blobName) dataTimestamps, err := putBlobs(ctx, st, blobID, dataBlobs) require.NoError(t, err) @@ -161,6 +162,7 @@ func TestGetBlobVersions(t *testing.T) { require.NoError(t, err) var tmp gather.WriteBuffer + err = st.GetBlob(ctx, blobID, 0, -1, &tmp) require.ErrorIs(t, err, tt.expectedError) require.Equal(t, tt.expectedBlobData, string(tmp.ToByteSlice())) @@ -206,12 +208,11 @@ func TestGetBlobVersionsWithDeletion(t *testing.T) { const ( originalData = "original" updatedData = "some update" + blobName = "TestGetBlobVersionsWithDeletion" ) - dataBlobs := []string{originalData, updatedData} - - const blobName = "TestGetBlobVersionsWithDeletion" blobID := blob.ID(blobName) + dataBlobs := []string{originalData, updatedData} dataTimestamps, err := putBlobs(ctx, st, blobID, dataBlobs) require.NoError(t, err) @@ -234,6 +235,7 @@ func TestGetBlobVersionsWithDeletion(t *testing.T) { require.Equal(t, 1, count) var tmp gather.WriteBuffer + err = st.GetBlob(ctx, blobID, 0, -1, &tmp) require.NoError(t, err) require.Equal(t, updatedData, string(tmp.ToByteSlice())) diff --git a/repo/blob/logging/logging_storage.go b/repo/blob/logging/logging_storage.go index 7020e5140d0..4bf2186578e 100644 --- a/repo/blob/logging/logging_storage.go +++ b/repo/blob/logging/logging_storage.go @@ -8,6 +8,9 @@ import ( "go.opentelemetry.io/otel" + "github.com/kopia/kopia/internal/blobparam" + "github.com/kopia/kopia/internal/contentlog" + "github.com/kopia/kopia/internal/contentlog/logparam" "github.com/kopia/kopia/internal/timetrack" "github.com/kopia/kopia/repo/blob" "github.com/kopia/kopia/repo/logging" @@ -22,15 +25,19 @@ type loggingStorage struct { base blob.Storage prefix string logger logging.Logger + clog *contentlog.Logger } -func (s *loggingStorage) beginConcurrency() { +func (s *loggingStorage) beginConcurrency(ctx context.Context) { v := s.concurrency.Add(1) if mv := s.maxConcurrency.Load(); v > mv { if s.maxConcurrency.CompareAndSwap(mv, v) && v > 0 { s.logger.Debugw(s.prefix+"concurrency level reached", "maxConcurrency", v) + + contentlog.Log1(ctx, s.clog, "concurrency level reached", + logparam.Int32("maxConcurrency", v)) } } } @@ -43,7 +50,7 @@ func (s *loggingStorage) GetBlob(ctx context.Context, id blob.ID, offset, length ctx, span := tracer.Start(ctx, "GetBlob") defer span.End() - s.beginConcurrency() + s.beginConcurrency(ctx) defer s.endConcurrency() timer := timetrack.StartTimer() @@ -59,6 +66,15 @@ func (s *loggingStorage) GetBlob(ctx context.Context, id blob.ID, offset, length "duration", dt, ) + contentlog.Log6(ctx, s.clog, + "GetBlob", + blobparam.BlobID("blobID", id), + logparam.Int64("offset", offset), + logparam.Int64("length", length), + logparam.Int("outputLength", output.Length()), + logparam.Error("error", err), + logparam.Duration("duration", dt)) + //nolint:wrapcheck return err } @@ -78,6 +94,13 @@ func (s *loggingStorage) GetCapacity(ctx context.Context) (blob.Capacity, error) "duration", dt, ) + contentlog.Log4(ctx, s.clog, + "GetCapacity", + logparam.UInt64("sizeBytes", c.SizeB), + logparam.UInt64("freeBytes", c.FreeB), + logparam.Error("error", err), + logparam.Duration("duration", dt)) + //nolint:wrapcheck return c, err } @@ -90,7 +113,7 @@ func (s *loggingStorage) GetMetadata(ctx context.Context, id blob.ID) (blob.Meta ctx, span := tracer.Start(ctx, "GetMetadata") defer span.End() - s.beginConcurrency() + s.beginConcurrency(ctx) defer s.endConcurrency() timer := timetrack.StartTimer() @@ -104,6 +127,13 @@ func (s *loggingStorage) GetMetadata(ctx context.Context, id blob.ID) (blob.Meta "duration", dt, ) + contentlog.Log4(ctx, s.clog, + "GetMetadata", + blobparam.BlobID("blobID", id), + blobparam.BlobMetadata("result", result), + logparam.Error("error", err), + logparam.Duration("duration", dt)) + //nolint:wrapcheck return result, err } @@ -112,7 +142,7 @@ func (s *loggingStorage) PutBlob(ctx context.Context, id blob.ID, data blob.Byte ctx, span := tracer.Start(ctx, "PutBlob") defer span.End() - s.beginConcurrency() + s.beginConcurrency(ctx) defer s.endConcurrency() timer := timetrack.StartTimer() @@ -126,6 +156,13 @@ func (s *loggingStorage) PutBlob(ctx context.Context, id blob.ID, data blob.Byte "duration", dt, ) + contentlog.Log4(ctx, s.clog, + "PutBlob", + blobparam.BlobID("blobID", id), + logparam.Int("length", data.Length()), + logparam.Error("error", err), + logparam.Duration("duration", dt)) + //nolint:wrapcheck return err } @@ -134,7 +171,7 @@ func (s *loggingStorage) DeleteBlob(ctx context.Context, id blob.ID) error { ctx, span := tracer.Start(ctx, "DeleteBlob") defer span.End() - s.beginConcurrency() + s.beginConcurrency(ctx) defer s.endConcurrency() timer := timetrack.StartTimer() @@ -146,6 +183,13 @@ func (s *loggingStorage) DeleteBlob(ctx context.Context, id blob.ID) error { "error", s.translateError(err), "duration", dt, ) + + contentlog.Log3(ctx, s.clog, + "DeleteBlob", + blobparam.BlobID("blobID", id), + logparam.Error("error", err), + logparam.Duration("duration", dt)) + //nolint:wrapcheck return err } @@ -154,7 +198,7 @@ func (s *loggingStorage) ListBlobs(ctx context.Context, prefix blob.ID, callback ctx, span := tracer.Start(ctx, "ListBlobs") defer span.End() - s.beginConcurrency() + s.beginConcurrency(ctx) defer s.endConcurrency() timer := timetrack.StartTimer() @@ -172,6 +216,13 @@ func (s *loggingStorage) ListBlobs(ctx context.Context, prefix blob.ID, callback "duration", dt, ) + contentlog.Log4(ctx, s.clog, + "ListBlobs", + blobparam.BlobID("prefix", prefix), + logparam.Int("resultCount", cnt), + logparam.Error("error", err), + logparam.Duration("duration", dt)) + //nolint:wrapcheck return err } @@ -189,6 +240,11 @@ func (s *loggingStorage) Close(ctx context.Context) error { "duration", dt, ) + contentlog.Log2(ctx, s.clog, + "Close", + logparam.Error("error", err), + logparam.Duration("duration", dt)) + //nolint:wrapcheck return err } @@ -211,6 +267,11 @@ func (s *loggingStorage) FlushCaches(ctx context.Context) error { "duration", dt, ) + contentlog.Log2(ctx, s.clog, + "FlushCaches", + logparam.Error("error", err), + logparam.Duration("duration", dt)) + //nolint:wrapcheck return err } @@ -219,7 +280,7 @@ func (s *loggingStorage) ExtendBlobRetention(ctx context.Context, b blob.ID, opt ctx, span := tracer.Start(ctx, "ExtendBlobRetention") defer span.End() - s.beginConcurrency() + s.beginConcurrency(ctx) defer s.endConcurrency() timer := timetrack.StartTimer() @@ -231,6 +292,13 @@ func (s *loggingStorage) ExtendBlobRetention(ctx context.Context, b blob.ID, opt "error", err, "duration", dt, ) + + contentlog.Log3(ctx, s.clog, + "ExtendBlobRetention", + blobparam.BlobID("blobID", b), + logparam.Error("error", err), + logparam.Duration("duration", dt)) + //nolint:wrapcheck return err } @@ -248,6 +316,6 @@ func (s *loggingStorage) translateError(err error) any { } // NewWrapper returns a Storage wrapper that logs all storage commands. -func NewWrapper(wrapped blob.Storage, logger logging.Logger, prefix string) blob.Storage { - return &loggingStorage{base: wrapped, logger: logger, prefix: prefix} +func NewWrapper(wrapped blob.Storage, logger logging.Logger, clog *contentlog.Logger, prefix string) blob.Storage { + return &loggingStorage{base: wrapped, logger: logger, clog: clog, prefix: prefix} } diff --git a/repo/blob/logging/logging_storage_test.go b/repo/blob/logging/logging_storage_test.go index a30c2af5842..fba79290a60 100644 --- a/repo/blob/logging/logging_storage_test.go +++ b/repo/blob/logging/logging_storage_test.go @@ -1,13 +1,17 @@ package logging_test import ( + "encoding/json" "fmt" "strings" "sync/atomic" "testing" "time" + "github.com/stretchr/testify/require" + "github.com/kopia/kopia/internal/blobtesting" + "github.com/kopia/kopia/internal/contentlog" "github.com/kopia/kopia/internal/testlogging" "github.com/kopia/kopia/repo/blob" "github.com/kopia/kopia/repo/blob/logging" @@ -31,7 +35,15 @@ func TestLoggingStorage(t *testing.T) { kt := map[blob.ID]time.Time{} underlying := blobtesting.NewMapStorage(data, kt, nil) - st := logging.NewWrapper(underlying, testlogging.Printf(myOutput, ""), myPrefix) + var jsonOutputCount atomic.Int32 + + myJSONOutput := func(data []byte) { + v := map[string]any{} + require.NoError(t, json.Unmarshal(data, &v)) + jsonOutputCount.Add(1) + } + + st := logging.NewWrapper(underlying, testlogging.Printf(myOutput, ""), contentlog.NewLogger(myJSONOutput), myPrefix) if st == nil { t.Fatalf("unexpected result: %v", st) } @@ -47,6 +59,10 @@ func TestLoggingStorage(t *testing.T) { t.Errorf("did not write any output!") } + if jsonOutputCount.Load() == 0 { + t.Errorf("did not write any JSON output!") + } + if got, want := st.ConnectionInfo().Type, underlying.ConnectionInfo().Type; got != want { t.Errorf("unexpected connection info %v, want %v", got, want) } diff --git a/repo/blob/rclone/rclone_storage.go b/repo/blob/rclone/rclone_storage.go index def876aa824..b59bed2b679 100644 --- a/repo/blob/rclone/rclone_storage.go +++ b/repo/blob/rclone/rclone_storage.go @@ -339,7 +339,7 @@ func New(ctx context.Context, opt *Options, isCreate bool) (blob.Storage, error) "--vfs-write-back=0s", // disable write-back, critical for correctness ) - r.cmd = exec.Command(rcloneExe, arguments...) //nolint:gosec + r.cmd = exec.CommandContext(context.WithoutCancel(ctx), rcloneExe, arguments...) //nolint:gosec r.cmd.Env = append(r.cmd.Env, opt.RCloneEnv...) // https://github.com/kopia/kopia/issues/1934 @@ -361,6 +361,7 @@ func New(ctx context.Context, opt *Options, isCreate bool) (blob.Storage, error) fingerprintHexString := hex.EncodeToString(fingerprintBytes[:]) var cli http.Client + cli.Transport = &http.Transport{ TLSClientConfig: tlsutil.TLSConfigTrustingSingleCertificate(fingerprintHexString), } diff --git a/repo/blob/rclone/rclone_storage_test.go b/repo/blob/rclone/rclone_storage_test.go index 0196787354a..50b3bc81da2 100644 --- a/repo/blob/rclone/rclone_storage_test.go +++ b/repo/blob/rclone/rclone_storage_test.go @@ -47,7 +47,8 @@ func mustGetRcloneExeOrSkip(t *testing.T) string { rcloneExe = "rclone" } - if err := exec.Command(rcloneExe, "version").Run(); err != nil { + ctx := testlogging.Context(t) + if err := exec.CommandContext(ctx, rcloneExe, "version").Run(); err != nil { if os.Getenv("CI") == "" { t.Skipf("rclone not installed: %v", err) } else { @@ -61,6 +62,38 @@ func mustGetRcloneExeOrSkip(t *testing.T) string { return rcloneExe } +func TestRCloneStorageCancelContext(t *testing.T) { + t.Parallel() + testutil.ProviderTest(t) + + rcloneExe := mustGetRcloneExeOrSkip(t) + dataDir := testutil.TempDirectory(t) + ctx := testlogging.Context(t) + + // use context that gets canceled after opening storage to ensure it's not used beyond New(). + newCtx, cancel := context.WithCancel(ctx) + st, err := rclone.New(newCtx, &rclone.Options{ + // pass local file as remote path. + RemotePath: dataDir, + RCloneExe: rcloneExe, + }, true) + + cancel() + + require.NoError(t, err, "unable to connect to rclone backend") + require.NotNil(t, st, "unable to connect to rclone backend") + + t.Cleanup(func() { + st.Close(testlogging.ContextForCleanup(t)) + }) + + var tmp gather.WriteBuffer + defer tmp.Close() + + err = st.GetBlob(ctx, blob.ID(uuid.New().String()), 0, -1, &tmp) + require.ErrorIs(t, err, blob.ErrBlobNotFound, "unexpected error when downloading non-existent blob") +} + func TestRCloneStorage(t *testing.T) { t.Parallel() testutil.ProviderTest(t) @@ -71,8 +104,8 @@ func TestRCloneStorage(t *testing.T) { dataDir := testutil.TempDirectory(t) // use context that gets canceled after opening storage to ensure it's not used beyond New(). - newctx, cancel := context.WithCancel(ctx) - st, err := rclone.New(newctx, &rclone.Options{ + newCtx, cancel := context.WithCancel(ctx) + st, err := rclone.New(newCtx, &rclone.Options{ // pass local file as remote path. RemotePath: dataDir, RCloneExe: rcloneExe, @@ -250,7 +283,7 @@ func TestRCloneProviders(t *testing.T) { k, ok := st.(Killable) require.True(t, ok, "not killable") - blobtesting.VerifyStorage(ctx, t, logging.NewWrapper(st, testlogging.NewTestLogger(t), "[RCLONE-STORAGE] "), + blobtesting.VerifyStorage(ctx, t, logging.NewWrapper(st, testlogging.NewTestLogger(t), nil, "[RCLONE-STORAGE] "), blob.PutOptions{}) blobtesting.AssertConnectionInfoRoundTrips(ctx, t, st) @@ -263,15 +296,11 @@ func TestRCloneProviders(t *testing.T) { prefix := uuid.NewString() for i := range 10 { - wg.Add(1) - - go func() { - defer wg.Done() - + wg.Go(func() { for j := range 3 { assert.NoError(t, st.PutBlob(ctx, blob.ID(fmt.Sprintf("%v-%v-%v", prefix, i, j)), gather.FromSlice([]byte{1, 2, 3}), blob.PutOptions{})) } - }() + }) } wg.Wait() @@ -330,7 +359,8 @@ func cleanupOldData(t *testing.T, rcloneExe, remotePath string) { } } - c := exec.Command(rcloneExe, "--config", configFile, "lsjson", remotePath) + ctx := testlogging.Context(t) + c := exec.CommandContext(ctx, rcloneExe, "--config", configFile, "lsjson", remotePath) b, err := c.Output() require.NoError(t, err) @@ -351,7 +381,7 @@ func cleanupOldData(t *testing.T, rcloneExe, remotePath string) { if age > cleanupAge { t.Logf("purging: %v %v", e.Name, age) - if err := exec.Command(rcloneExe, "--config", configFile, "purge", remotePath+"/"+e.Name).Run(); err != nil { + if err := exec.CommandContext(ctx, rcloneExe, "--config", configFile, "purge", remotePath+"/"+e.Name).Run(); err != nil { t.Logf("error purging %v: %v", e.Name, err) } } diff --git a/repo/blob/s3/s3_storage_test.go b/repo/blob/s3/s3_storage_test.go index bd4c01a405d..0ad693bc2ef 100644 --- a/repo/blob/s3/s3_storage_test.go +++ b/repo/blob/s3/s3_storage_test.go @@ -537,10 +537,9 @@ func TestNeedMD5AWS(t *testing.T) { getOrMakeBucket(t, cli, options, true) // ensure it is a bucket with object locking enabled - want := "Enabled" - if got, _, _, _, _ := cli.GetObjectLockConfig(ctx, options.BucketName); got != want { - t.Fatalf("object locking is not enabled: got '%s', want '%s'", got, want) - } + got, _, _, _, _ := cli.GetObjectLockConfig(ctx, options.BucketName) //nolint:dogsled + + require.Equal(t, "Enabled", got, "object locking is not enabled") // ensure a locking configuration is in place lockingMode := minio.Governance @@ -555,7 +554,7 @@ func TestNeedMD5AWS(t *testing.T) { require.NoError(t, err, "could not create storage") t.Cleanup(func() { - blobtesting.CleanupOldData(ctx, t, s, 0) + blobtesting.CleanupOldData(testlogging.ContextForCleanup(t), t, s, 0) }) err = s.PutBlob(ctx, blob.ID("test-put-blob-0"), gather.FromSlice([]byte("xxyasdf243z")), blob.PutOptions{}) @@ -586,8 +585,12 @@ func testStorage(t *testing.T, options *Options, runValidationTest bool, opts bl cancel() require.NoError(t, err) - defer st.Close(ctx) - defer blobtesting.CleanupOldData(ctx, t, st, 0) + t.Cleanup(func() { + ctx := testlogging.ContextForCleanup(t) + + blobtesting.CleanupOldData(ctx, t, st, 0) + st.Close(ctx) + }) blobtesting.VerifyStorage(ctx, t, st, opts) blobtesting.AssertConnectionInfoRoundTrips(ctx, t, st) @@ -608,8 +611,12 @@ func testPutBlobWithInvalidRetention(t *testing.T, options Options, opts blob.Pu st, err := newStorage(ctx, &options) require.NoError(t, err) - defer st.Close(ctx) - defer blobtesting.CleanupOldData(ctx, t, st, 0) + t.Cleanup(func() { + ctx := testlogging.ContextForCleanup(t) + + blobtesting.CleanupOldData(ctx, t, st, 0) + st.Close(ctx) + }) // Now attempt to add a block and expect to fail require.Error(t, diff --git a/repo/blob/s3/s3_versioned_test.go b/repo/blob/s3/s3_versioned_test.go index 96f8a3720f2..200f5025405 100644 --- a/repo/blob/s3/s3_versioned_test.go +++ b/repo/blob/s3/s3_versioned_test.go @@ -663,7 +663,9 @@ func randLongHex(tb testing.TB, length int) string { b := make([]byte, byteLength) rMu.Lock() + n, err := r.Read(b) + rMu.Unlock() require.NoError(tb, err) @@ -758,13 +760,7 @@ func compareMetadata(tb testing.TB, a, b versionMetadata) { func compareVersionSlices(tb testing.TB, a, b []versionMetadata) { tb.Helper() - l := len(a) - - if len(b) < l { - l = len(b) - } - - for i := range a[:l] { + for i := range min(len(a), len(b)) { compareMetadata(tb, a[i], b[i]) } @@ -853,25 +849,25 @@ func isRetriable(err error) bool { func getVersionedTestStore(tb testing.TB, envName string) *s3Storage { tb.Helper() - ctx := testlogging.Context(tb) o := getProviderOptions(tb, envName) o.Prefix = path.Join(tb.Name(), uuid.NewString()) + "/" - s, err := newStorage(ctx, o) + s, err := newStorage(testlogging.Context(tb), o) require.NoError(tb, err, "error creating versioned store client") tb.Cleanup(func() { - cleanupVersions(tb, s) + ctx := testlogging.ContextForCleanup(tb) + + cleanupVersions(ctx, tb, s) blobtesting.CleanupOldData(ctx, tb, s, 0) }) return s } -func cleanupVersions(tb testing.TB, s *s3Storage) { +func cleanupVersions(ctx context.Context, tb testing.TB, s *s3Storage) { tb.Helper() - ctx := testlogging.Context(tb) ch := make(chan minio.ObjectInfo, 4) errChan := s.cli.RemoveObjects(ctx, s.BucketName, ch, minio.RemoveObjectsOptions{}) diff --git a/repo/blob/sftp/sftp_storage.go b/repo/blob/sftp/sftp_storage.go index 9bca5996eda..a4d2cb527ac 100644 --- a/repo/blob/sftp/sftp_storage.go +++ b/repo/blob/sftp/sftp_storage.go @@ -465,7 +465,7 @@ func getSFTPClientExternal(ctx context.Context, opt *Options) (*sftpConnection, log(ctx).Debugf("launching external SSH process %v %v", sshCommand, strings.Join(cmdArgs, " ")) - cmd := exec.Command(sshCommand, cmdArgs...) //nolint:gosec + cmd := exec.CommandContext(ctx, sshCommand, cmdArgs...) //nolint:gosec // send errors from ssh to stderr cmd.Stderr = os.Stderr @@ -553,7 +553,9 @@ func New(ctx context.Context, opts *Options, isCreate bool) (blob.Storage, error impl.rec = connection.NewReconnector(impl) - conn, err := impl.rec.GetOrOpenConnection(ctx) + // removing cancelation from ctx since ctx is likely to be canceled after + // New returns, causing the initial connection to be closed and not reused + conn, err := impl.rec.GetOrOpenConnection(context.WithoutCancel(ctx)) if err != nil { return nil, errors.Wrap(err, "unable to open SFTP storage") } diff --git a/repo/blob/sftp/sftp_storage_test.go b/repo/blob/sftp/sftp_storage_test.go index 603c577dbd0..306473ca26e 100644 --- a/repo/blob/sftp/sftp_storage_test.go +++ b/repo/blob/sftp/sftp_storage_test.go @@ -58,7 +58,8 @@ func runAndGetOutput(t *testing.T, cmd string, args ...string) ([]byte, error) { var stderr bytes.Buffer - c := exec.Command(cmd, args...) + ctx := testlogging.Context(t) + c := exec.CommandContext(ctx, cmd, args...) c.Stderr = &stderr o, err := c.Output() @@ -102,12 +103,14 @@ func startDockerSFTPServerOrSkip(t *testing.T, idRSA string) (host string, port sftpUsernameWithPasswordAuth+":"+sftpUserPassword+":::upload2") sftpEndpoint := testutil.GetContainerMappedPortAddress(t, shortContainerID, "22") + ctx := testlogging.Context(t) + // wait for SFTP server to come up. deadline := clock.Now().Add(dialTimeout) for clock.Now().Before(deadline) { t.Logf("waiting for SFTP server to come up on '%v'...", sftpEndpoint) - conn, err := net.DialTimeout("tcp", sftpEndpoint, time.Second) + conn, err := (&net.Dialer{Timeout: time.Second}).DialContext(ctx, "tcp", sftpEndpoint) if err != nil { t.Logf("err: %v", err) time.Sleep(time.Second) @@ -151,13 +154,12 @@ func startDockerSFTPServerOrSkip(t *testing.T, idRSA string) (host string, port t.Logf("SFTP server OK on host:%q port:%v. Known hosts file: %v", host, port, knownHostsFile) - //nolint:nakedret - return + return host, port, knownHostsFile } t.Skipf("SFTP server did not start!") - return //nolint:nakedret + return "", -1, "" } func TestSFTPStorageValid(t *testing.T) { @@ -210,8 +212,9 @@ func TestSFTPStorageValid(t *testing.T) { t.Run("PasswordCreds", func(t *testing.T) { ctx := testlogging.Context(t) + newctx, cancel := context.WithCancel(ctx) - st, err := createSFTPStorage(ctx, t, sftp.Options{ + st, err := createSFTPStorage(newctx, t, sftp.Options{ Path: "/upload2", Host: host, Username: sftpUsernameWithPasswordAuth, @@ -224,6 +227,8 @@ func TestSFTPStorageValid(t *testing.T) { t.Fatalf("unable to connect to SSH: %v", err) } + cancel() + deleteBlobs(ctx, t, st) blobtesting.VerifyStorage(ctx, t, st, blob.PutOptions{}) diff --git a/repo/blob/sharded/sharded.go b/repo/blob/sharded/sharded.go index 96a1331ae04..e0dbdbfd0de 100644 --- a/repo/blob/sharded/sharded.go +++ b/repo/blob/sharded/sharded.go @@ -79,7 +79,7 @@ func (s *Storage) ListBlobs(ctx context.Context, prefix blob.ID, callback func(b var walkDir func(string, string) error - walkDir = func(directory string, currentPrefix string) error { + walkDir = func(directory, currentPrefix string) error { select { case <-finished: // already finished return nil @@ -259,12 +259,12 @@ func (s *Storage) getShardDirectory(ctx context.Context, blobID blob.ID) (string func (s *Storage) GetShardedPathAndFilePath(ctx context.Context, blobID blob.ID) (shardPath, filePath string, err error) { shardPath, blobID, err = s.getShardDirectory(ctx, blobID) if err != nil { - return + return "", "", err } filePath = path.Join(shardPath, s.makeFileName(blobID)) - return + return shardPath, filePath, nil } // New returns new sharded.Storage helper. diff --git a/repo/blob/sharded/sharded_parameters.go b/repo/blob/sharded/sharded_parameters.go index adf37b6420b..e6169ff4238 100644 --- a/repo/blob/sharded/sharded_parameters.go +++ b/repo/blob/sharded/sharded_parameters.go @@ -1,4 +1,3 @@ -// Package sharded implements common support for sharded blob providers, such as filesystem or webdav. package sharded import ( diff --git a/repo/blob/storage.go b/repo/blob/storage.go index 9a35db5d1c4..28a24a9e469 100644 --- a/repo/blob/storage.go +++ b/repo/blob/storage.go @@ -45,6 +45,10 @@ var ErrNotAVolume = errors.New("unsupported method, storage is not a volume") // function on a storage implementation that does not have the intended functionality. var ErrUnsupportedObjectLock = errors.New("object locking unsupported") +// ApplicationID is sent to storage providers as metadata in the User-Agent of requests. +// It is used to identify the application making the request. +var ApplicationID = "kopia" + // Bytes encapsulates a sequence of bytes, possibly stored in a non-contiguous buffers, // which can be written sequentially or treated as a io.Reader. type Bytes interface { @@ -398,12 +402,12 @@ func PutBlobAndGetMetadata(ctx context.Context, st Storage, blobID ID, data Byte } // ReadBlobMap reads the map of all the blobs indexed by ID. -func ReadBlobMap(ctx context.Context, br Reader) (map[ID]Metadata, error) { +func ReadBlobMap(ctx context.Context, bl Lister) (map[ID]Metadata, error) { blobMap := map[ID]Metadata{} log(ctx).Info("Listing blobs...") - if err := br.ListBlobs(ctx, "", func(bm Metadata) error { + if err := bl.ListBlobs(ctx, "", func(bm Metadata) error { blobMap[bm.BlobID] = bm if len(blobMap)%10000 == 0 { log(ctx).Infof(" %v blobs...", len(blobMap)) diff --git a/repo/blob/throttling/throttler_test.go b/repo/blob/throttling/throttler_test.go index 2e26b5e0bd6..38f5055ecff 100644 --- a/repo/blob/throttling/throttler_test.go +++ b/repo/blob/throttling/throttler_test.go @@ -112,15 +112,11 @@ func testRateLimiting(t *testing.T, name string, wantRate float64, worker func(t var wg sync.WaitGroup for range numWorkers { - wg.Add(1) - - go func() { - defer wg.Done() - + wg.Go(func() { for clock.Now().Before(deadline) { worker(total) } - }() + }) } wg.Wait() diff --git a/repo/blob/throttling/throttling_semaphore_test.go b/repo/blob/throttling/throttling_semaphore_test.go index b255435b454..26389df47d2 100644 --- a/repo/blob/throttling/throttling_semaphore_test.go +++ b/repo/blob/throttling/throttling_semaphore_test.go @@ -27,15 +27,12 @@ func TestThrottlingSemaphore(t *testing.T) { ) for range 10 { - wg.Add(1) - - go func() { - defer wg.Done() - + wg.Go(func() { for range 10 { s.Acquire() mu.Lock() + concurrency++ if concurrency > maxConcurrency { @@ -47,12 +44,14 @@ func TestThrottlingSemaphore(t *testing.T) { time.Sleep(10 * time.Millisecond) mu.Lock() + concurrency-- + mu.Unlock() s.Release() } - }() + }) } wg.Wait() diff --git a/repo/blob/throttling/throttling_storage_test.go b/repo/blob/throttling/throttling_storage_test.go index 76c5e234c6f..dfe7c9751aa 100644 --- a/repo/blob/throttling/throttling_storage_test.go +++ b/repo/blob/throttling/throttling_storage_test.go @@ -55,7 +55,7 @@ func TestThrottling(t *testing.T) { ctx := testlogging.Context(t) m := &mockThrottler{} st := blobtesting.NewMapStorage(blobtesting.DataMap{}, nil, nil) - l := bloblogging.NewWrapper(st, testlogging.Printf(m.Printf, ""), "inner.") + l := bloblogging.NewWrapper(st, testlogging.Printf(m.Printf, ""), nil, "inner.") wrapped := throttling.NewWrapper(l, m) var tmp gather.WriteBuffer diff --git a/repo/blob/webdav/webdav_storage_test.go b/repo/blob/webdav/webdav_storage_test.go index e9ec4e81600..fa581465698 100644 --- a/repo/blob/webdav/webdav_storage_test.go +++ b/repo/blob/webdav/webdav_storage_test.go @@ -5,6 +5,7 @@ import ( "context" "fmt" "io" + "maps" "net/http" "net/http/httptest" "os" @@ -136,9 +137,7 @@ func transformMissingPUTs(next http.Handler) http.HandlerFunc { w.WriteHeader(http.StatusForbidden) } else { // Passthrough recorded response headers, status code, and body - for header, values := range rec.Header() { - w.Header()[header] = values - } + maps.Copy(w.Header(), rec.Header()) w.WriteHeader(result.StatusCode) io.Copy(w, result.Body) diff --git a/repo/buildinfo.go b/repo/buildinfo.go index c6f7a05467a..66c266c8a2a 100644 --- a/repo/buildinfo.go +++ b/repo/buildinfo.go @@ -1,7 +1,7 @@ package repo import ( - stdlib "log" + stdlog "log" "runtime/debug" "strings" ) @@ -24,7 +24,7 @@ func getBuildInfoAndVersion(linkedInfo, linkedVersion string) (info, version str info, version = linkedInfo, linkedVersion if info != "" && version != "" { - return // use the values specified at link time + return info, version // use the values specified at link time } // a value was not set at link time, set it from the executable's build @@ -32,8 +32,10 @@ func getBuildInfoAndVersion(linkedInfo, linkedVersion string) (info, version str bi, ok := debug.ReadBuildInfo() if !ok { // logging not yet set up, use stdlib's logging - stdlib.Println("executable build information is not available") - return // executable's build info is not available, use values set at link time, if any + stdlog.Println("executable build information is not available") + + // executable's build info is not available, use values set at link time, if any + return info, version } if version == "" { @@ -48,7 +50,7 @@ func getBuildInfoAndVersion(linkedInfo, linkedVersion string) (info, version str info = getRevisionString(bi.Settings) } - return + return info, version } func getRevisionString(s []debug.BuildSetting) string { diff --git a/repo/compression/compressor_gzip.go b/repo/compression/compressor_gzip.go index b73b3a4f0b9..25ebe28a16f 100644 --- a/repo/compression/compressor_gzip.go +++ b/repo/compression/compressor_gzip.go @@ -22,6 +22,7 @@ func newGZipCompressor(id HeaderID, level int) Compressor { New: func() any { w, err := gzip.NewWriterLevel(io.Discard, level) mustSucceed(err) + return w }, }} diff --git a/repo/compression/compressor_pgzip.go b/repo/compression/compressor_pgzip.go index 1d4399a91eb..05ab0992348 100644 --- a/repo/compression/compressor_pgzip.go +++ b/repo/compression/compressor_pgzip.go @@ -23,6 +23,7 @@ func newpgzipCompressor(id HeaderID, level int) Compressor { New: func() any { w, err := pgzip.NewWriterLevel(bytes.NewBuffer(nil), level) mustSucceed(err) + return w }, }} diff --git a/repo/compression/compressor_test.go b/repo/compression/compressor_test.go index 60c4f4200f2..024bebc82a2 100644 --- a/repo/compression/compressor_test.go +++ b/repo/compression/compressor_test.go @@ -4,7 +4,7 @@ import ( "bytes" "crypto/rand" "fmt" - "sort" + "slices" "testing" "github.com/kopia/kopia/internal/testutil" @@ -98,9 +98,7 @@ func BenchmarkCompressor(b *testing.B) { sortedNames = append(sortedNames, id) } - sort.Slice(sortedNames, func(i, j int) bool { - return sortedNames[i] < sortedNames[j] - }) + slices.Sort(sortedNames) for _, id := range sortedNames { comp := ByName[id] @@ -134,7 +132,7 @@ func compressionBenchmark(b *testing.B, comp Compressor, input []byte, output *b rdr := bytes.NewReader(input) - for range b.N { + for b.Loop() { output.Reset() rdr.Reset(input) @@ -151,7 +149,7 @@ func decompressionBenchmark(b *testing.B, comp Compressor, input []byte, output rdr := bytes.NewReader(input) - for range b.N { + for b.Loop() { output.Reset() rdr.Reset(input) diff --git a/repo/compression/compressor_zstd.go b/repo/compression/compressor_zstd.go index 3d4720a461f..9b29d60a93c 100644 --- a/repo/compression/compressor_zstd.go +++ b/repo/compression/compressor_zstd.go @@ -23,6 +23,7 @@ func newZstdCompressor(id HeaderID, level zstd.EncoderLevel) Compressor { New: func() any { w, err := zstd.NewWriter(io.Discard, zstd.WithEncoderLevel(level)) mustSucceed(err) + return w }, }} diff --git a/repo/content/committed_content_index.go b/repo/content/committed_content_index.go index d479852bab8..055e8648655 100644 --- a/repo/content/committed_content_index.go +++ b/repo/content/committed_content_index.go @@ -11,12 +11,14 @@ import ( "github.com/pkg/errors" "golang.org/x/sync/errgroup" + "github.com/kopia/kopia/internal/blobparam" "github.com/kopia/kopia/internal/clock" + "github.com/kopia/kopia/internal/contentlog" + "github.com/kopia/kopia/internal/contentlog/logparam" "github.com/kopia/kopia/internal/gather" "github.com/kopia/kopia/repo/blob" "github.com/kopia/kopia/repo/content/index" "github.com/kopia/kopia/repo/format" - "github.com/kopia/kopia/repo/logging" ) // smallIndexEntryCountThreshold is the threshold to determine whether an @@ -45,7 +47,7 @@ type committedContentIndex struct { // fetchIndexBlob retrieves one index blob from storage fetchIndexBlob func(ctx context.Context, blobID blob.ID, output *gather.WriteBuffer) error - log logging.Logger + log *contentlog.Logger } type committedContentIndexCache interface { @@ -113,7 +115,7 @@ func (c *committedContentIndex) addIndexBlob(ctx context.Context, indexBlobID bl return nil } - c.log.Debugf("use-new-committed-index %v", indexBlobID) + contentlog.Log1(ctx, c.log, "use-new-committed-index", blobparam.BlobID("indexBlobID", indexBlobID)) ndx, err := c.cache.openIndex(ctx, indexBlobID) if err != nil { @@ -195,7 +197,7 @@ func (c *committedContentIndex) merge(ctx context.Context, indexFiles []blob.ID) return nil, nil, errors.Wrap(err, "unable to combine small indexes") } - c.log.Debugw("combined index segments", "original", len(newMerged), "merged", len(mergedAndCombined)) + contentlog.Log2(ctx, c.log, "combined index segments", logparam.Int("original", len(newMerged)), logparam.Int("merged", len(mergedAndCombined))) return mergedAndCombined, newUsedMap, nil } @@ -212,7 +214,8 @@ func (c *committedContentIndex) use(ctx context.Context, indexFiles []blob.ID, i return nil } - c.log.Debugf("use-indexes %v", indexFiles) + contentlog.Log1(ctx, c.log, "use-indexes", + blobparam.BlobIDList("indexFiles", indexFiles)) mergedAndCombined, newInUse, err := c.merge(ctx, indexFiles) if err != nil { @@ -229,13 +232,17 @@ func (c *committedContentIndex) use(ctx context.Context, indexFiles []blob.ID, i for k, old := range oldInUse { if newInUse[k] == nil { if err := old.Close(); err != nil { - c.log.Errorf("unable to close unused index file: %v", err) + contentlog.Log1(ctx, c.log, + "unable to close unused index file", + logparam.Error("err", err)) } } } if err := c.cache.expireUnused(ctx, indexFiles); err != nil { - c.log.Errorf("unable to expire unused index files: %v", err) + contentlog.Log1(ctx, c.log, + "unable to expire unused index files", + logparam.Error("err", err)) } return nil @@ -309,7 +316,7 @@ func (c *committedContentIndex) fetchIndexBlobs(ctx context.Context, isPermissiv return nil } - c.log.Debugf("Downloading %v new index blobs...", len(indexBlobs)) + contentlog.Log1(ctx, c.log, "Downloading new index blobs", logparam.Int("len", len(indexBlobs))) eg, ctx := errgroup.WithContext(ctx) @@ -323,7 +330,7 @@ func (c *committedContentIndex) fetchIndexBlobs(ctx context.Context, isPermissiv if err := c.fetchIndexBlob(ctx, indexBlobID, &data); err != nil { if isPermissiveCacheLoading { - c.log.Errorf("skipping bad read of index blob %v", indexBlobID) + contentlog.Log1(ctx, c.log, "skipping bad read of index blob", blobparam.BlobID("indexBlobID", indexBlobID)) continue } @@ -343,7 +350,7 @@ func (c *committedContentIndex) fetchIndexBlobs(ctx context.Context, isPermissiv return errors.Wrap(err, "error downloading indexes") } - c.log.Debug("Index blobs downloaded.") + contentlog.Log(ctx, c.log, "Index blobs downloaded") return nil } @@ -372,7 +379,7 @@ func newCommittedContentIndex(caching *CachingOptions, formatProvider format.Provider, permissiveCacheLoading bool, fetchIndexBlob func(ctx context.Context, blobID blob.ID, output *gather.WriteBuffer) error, - log logging.Logger, + log *contentlog.Logger, minSweepAge time.Duration, ) *committedContentIndex { var cache committedContentIndexCache diff --git a/repo/content/committed_content_index_cache_test.go b/repo/content/committed_content_index_cache_test.go index ee0ef96986f..cb066a42c40 100644 --- a/repo/content/committed_content_index_cache_test.go +++ b/repo/content/committed_content_index_cache_test.go @@ -20,7 +20,12 @@ func TestCommittedContentIndexCache_Disk(t *testing.T) { ta := faketime.NewClockTimeWithOffset(0) - testCache(t, &diskCommittedContentIndexCache{testutil.TempDirectory(t), ta.NowFunc(), func() int { return 3 }, testlogging.Printf(t.Logf, ""), DefaultIndexCacheSweepAge}, ta) + testCache(t, &diskCommittedContentIndexCache{ + testutil.TempDirectory(t), + ta.NowFunc(), + func() int { return 3 }, + nil, DefaultIndexCacheSweepAge, + }, ta) } func TestCommittedContentIndexCache_Memory(t *testing.T) { diff --git a/repo/content/committed_content_index_disk_cache.go b/repo/content/committed_content_index_disk_cache.go index cdddca03373..6a65908c920 100644 --- a/repo/content/committed_content_index_disk_cache.go +++ b/repo/content/committed_content_index_disk_cache.go @@ -7,14 +7,15 @@ import ( "strings" "time" - "github.com/edsrzf/mmap-go" "github.com/pkg/errors" + "github.com/kopia/kopia/internal/blobparam" "github.com/kopia/kopia/internal/cache" + "github.com/kopia/kopia/internal/contentlog" + "github.com/kopia/kopia/internal/contentlog/logparam" "github.com/kopia/kopia/internal/gather" "github.com/kopia/kopia/repo/blob" "github.com/kopia/kopia/repo/content/index" - "github.com/kopia/kopia/repo/logging" ) const ( @@ -25,7 +26,7 @@ type diskCommittedContentIndexCache struct { dirname string timeNow func() time.Time v1PerContentOverhead func() int - log logging.Logger + log *contentlog.Logger minSweepAge time.Duration } @@ -33,10 +34,10 @@ func (c *diskCommittedContentIndexCache) indexBlobPath(indexBlobID blob.ID) stri return filepath.Join(c.dirname, string(indexBlobID)+simpleIndexSuffix) } -func (c *diskCommittedContentIndexCache) openIndex(_ context.Context, indexBlobID blob.ID) (index.Index, error) { +func (c *diskCommittedContentIndexCache) openIndex(ctx context.Context, indexBlobID blob.ID) (index.Index, error) { fullpath := c.indexBlobPath(indexBlobID) - f, closeMmap, err := c.mmapOpenWithRetry(fullpath) + f, closeMmap, err := c.mmapFile(ctx, fullpath) if err != nil { return nil, err } @@ -50,52 +51,6 @@ func (c *diskCommittedContentIndexCache) openIndex(_ context.Context, indexBlobI return ndx, nil } -// mmapOpenWithRetry attempts mmap.Open() with exponential back-off to work around rare issue specific to Windows where -// we can't open the file right after it has been written. -func (c *diskCommittedContentIndexCache) mmapOpenWithRetry(path string) (mmap.MMap, func() error, error) { - const ( - maxRetries = 8 - startingDelay = 10 * time.Millisecond - ) - - // retry milliseconds: 10, 20, 40, 80, 160, 320, 640, 1280, total ~2.5s - f, err := os.Open(path) //nolint:gosec - nextDelay := startingDelay - - retryCount := 0 - for err != nil && retryCount < maxRetries { - retryCount++ - c.log.Debugf("retry #%v unable to mmap.Open(): %v", retryCount, err) - time.Sleep(nextDelay) - nextDelay *= 2 - - f, err = os.Open(path) //nolint:gosec - } - - if err != nil { - return nil, nil, errors.Wrap(err, "unable to open file despite retries") - } - - mm, err := mmap.Map(f, mmap.RDONLY, 0) - if err != nil { - f.Close() //nolint:errcheck - - return nil, nil, errors.Wrap(err, "mmap error") - } - - return mm, func() error { - if err2 := mm.Unmap(); err2 != nil { - return errors.Wrapf(err2, "error unmapping index %v", path) - } - - if err2 := f.Close(); err2 != nil { - return errors.Wrapf(err2, "error closing index %v", path) - } - - return nil - }, nil -} - func (c *diskCommittedContentIndexCache) hasIndexBlobID(_ context.Context, indexBlobID blob.ID) (bool, error) { _, err := os.Stat(c.indexBlobPath(indexBlobID)) if err == nil { @@ -165,10 +120,10 @@ func writeTempFileAtomic(dirname string, data []byte) (string, error) { return tf.Name(), nil } -func (c *diskCommittedContentIndexCache) expireUnused(_ context.Context, used []blob.ID) error { - c.log.Debugw("expireUnused", - "except", used, - "minSweepAge", c.minSweepAge) +func (c *diskCommittedContentIndexCache) expireUnused(ctx context.Context, used []blob.ID) error { + contentlog.Log2(ctx, c.log, "expireUnused", + blobparam.BlobIDList("except", used), + logparam.Duration("minSweepAge", c.minSweepAge)) entries, err := os.ReadDir(c.dirname) if err != nil { @@ -188,8 +143,7 @@ func (c *diskCommittedContentIndexCache) expireUnused(_ context.Context, used [] return errors.Wrap(err, "failed to read file info") } - if strings.HasSuffix(ent.Name(), simpleIndexSuffix) { - n := strings.TrimSuffix(ent.Name(), simpleIndexSuffix) + if n, ok := strings.CutSuffix(ent.Name(), simpleIndexSuffix); ok { remaining[blob.ID(n)] = fi } } @@ -200,18 +154,20 @@ func (c *diskCommittedContentIndexCache) expireUnused(_ context.Context, used [] for _, rem := range remaining { if c.timeNow().Sub(rem.ModTime()) > c.minSweepAge { - c.log.Debugw("removing unused", - "name", rem.Name(), - "mtime", rem.ModTime()) + contentlog.Log2(ctx, c.log, "removing unused", + logparam.String("name", rem.Name()), + logparam.Time("mtime", rem.ModTime())) if err := os.Remove(filepath.Join(c.dirname, rem.Name())); err != nil { - c.log.Errorf("unable to remove unused index file: %v", err) + contentlog.Log1(ctx, c.log, + "unable to remove unused index file", + logparam.Error("err", err)) } } else { - c.log.Debugw("keeping unused index because it's too new", - "name", rem.Name(), - "mtime", rem.ModTime(), - "threshold", c.minSweepAge) + contentlog.Log3(ctx, c.log, "keeping unused index because it's too new", + logparam.String("name", rem.Name()), + logparam.Time("mtime", rem.ModTime()), + logparam.Duration("threshold", c.minSweepAge)) } } diff --git a/repo/content/committed_content_index_disk_cache_unix.go b/repo/content/committed_content_index_disk_cache_unix.go new file mode 100644 index 00000000000..9c6f7e62bcb --- /dev/null +++ b/repo/content/committed_content_index_disk_cache_unix.go @@ -0,0 +1,50 @@ +//go:build !windows + +package content + +import ( + "context" + "os" + + "github.com/edsrzf/mmap-go" + "github.com/pkg/errors" +) + +// mmapFile opens the named file and mmaps it. +// Unix semantics: Close the file descriptor immediately after a successful mmap so the +// process does not retain FDs for all mapped index files. The mapping remains valid until +// Unmap is called. +func (c *diskCommittedContentIndexCache) mmapFile(_ context.Context, filename string) (mmap.MMap, func() error, error) { + f, err := os.Open(filename) //nolint:gosec + if err != nil { + return nil, nil, errors.Wrap(err, "unable to open file despite retries") + } + + mm, err := mmap.Map(f, mmap.RDONLY, 0) + if err != nil { + _ = f.Close() + return nil, nil, errors.Wrap(err, "mmap error") + } + + // On Unix, it's safe to close the FD now; the mapping remains valid. + if err := f.Close(); err != nil { + // If close fails, still return mapping, but report error on closer to surface the issue later. + closeErr := errors.Wrapf(err, "error closing index %v after mmap", filename) + + return mm, func() error { + if err2 := mm.Unmap(); err2 != nil { + return errors.Wrapf(err2, "error unmapping index %v (also had close error: %v)", filename, closeErr) + } + + return closeErr + }, nil + } + + return mm, func() error { + if err2 := mm.Unmap(); err2 != nil { + return errors.Wrapf(err2, "error unmapping index %v", filename) + } + + return nil + }, nil +} diff --git a/repo/content/committed_content_index_disk_cache_windows.go b/repo/content/committed_content_index_disk_cache_windows.go new file mode 100644 index 00000000000..4815dc67737 --- /dev/null +++ b/repo/content/committed_content_index_disk_cache_windows.go @@ -0,0 +1,62 @@ +//go:build windows + +package content + +import ( + "context" + "os" + "time" + + "github.com/edsrzf/mmap-go" + "github.com/pkg/errors" + + "github.com/kopia/kopia/internal/contentlog" + "github.com/kopia/kopia/internal/contentlog/logparam" +) + +// mmapFile attempts mmap.Open() with exponential back-off to work around a rare issue +// where Windows can't open the file right after it has been written. +// +// Windows semantics: keep the file descriptor open until Unmap due to OS requirements. +func (c *diskCommittedContentIndexCache) mmapFile(ctx context.Context, filename string) (mmap.MMap, func() error, error) { + const ( + maxRetries = 8 + startingDelay = 10 * time.Millisecond + ) + + // retry milliseconds: 10, 20, 40, 80, 160, 320, 640, 1280, total ~2.5s + f, err := os.Open(filename) //nolint:gosec + nextDelay := startingDelay + + retryCount := 0 + for err != nil && retryCount < maxRetries { + retryCount++ + contentlog.Log2(ctx, c.log, "retry unable to mmap.Open()", + logparam.Int("retryCount", retryCount), + logparam.Error("err", err)) + time.Sleep(nextDelay) + nextDelay *= 2 + + f, err = os.Open(filename) //nolint:gosec + } + + if err != nil { + return nil, nil, errors.Wrap(err, "unable to open file despite retries") + } + + mm, err := mmap.Map(f, mmap.RDONLY, 0) + if err != nil { + _ = f.Close() + return nil, nil, errors.Wrap(err, "mmap error") + } + + return mm, func() error { + if err2 := mm.Unmap(); err2 != nil { + return errors.Wrapf(err2, "error unmapping index %v", filename) + } + if err2 := f.Close(); err2 != nil { + return errors.Wrapf(err2, "error closing index %v", filename) + } + return nil + }, nil +} diff --git a/repo/content/committed_content_index_fd_linux_test.go b/repo/content/committed_content_index_fd_linux_test.go new file mode 100644 index 00000000000..14fd8d4bf37 --- /dev/null +++ b/repo/content/committed_content_index_fd_linux_test.go @@ -0,0 +1,79 @@ +//go:build linux + +package content + +import ( + "fmt" + "os" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/kopia/kopia/internal/faketime" + "github.com/kopia/kopia/internal/repodiag" + "github.com/kopia/kopia/internal/testlogging" + "github.com/kopia/kopia/internal/testutil" + "github.com/kopia/kopia/repo/blob" + "github.com/kopia/kopia/repo/content/index" +) + +// countFDsLinux returns the number of open file descriptors for the current process on Linux. +func countFDsLinux(t *testing.T) int { + t.Helper() + + entries, err := os.ReadDir("/proc/self/fd") + require.NoError(t, err, "unable to read /proc/self/fd") + + return len(entries) +} + +// Test that opening many indexes on Linux does not retain a file descriptor per index. +func TestCommittedContentIndexCache_Disk_FDsNotGrowingOnOpen_Linux(t *testing.T) { + // Do not run in parallel to avoid fd count noise from other tests. + var lm *repodiag.LogManager + + ctx := testlogging.Context(t) + ft := faketime.NewClockTimeWithOffset(0) + cache := &diskCommittedContentIndexCache{ + testutil.TempDirectory(t), + ft.NowFunc(), + func() int { return 3 }, + lm.NewLogger("test"), + DefaultIndexCacheSweepAge, + } + + const indexCount = 200 + + // Prepare N small indexes in the cache directory. + for i := range indexCount { + b := index.Builder{ + mustParseID(t, fmt.Sprintf("c%03d", i)): Info{PackBlobID: blob.ID(fmt.Sprintf("p%03d", i)), ContentID: mustParseID(t, fmt.Sprintf("c%03d", i))}, + } + require.NoError(t, cache.addContentToCache(ctx, blob.ID(fmt.Sprintf("ndx%03d", i)), mustBuildIndex(t, b))) + } + + before := countFDsLinux(t) + + var opened []index.Index + + // Open all indexes and keep them open to maximize pressure. + for i := range indexCount { + ndx, err := cache.openIndex(ctx, blob.ID(fmt.Sprintf("ndx%03d", i))) + require.NoError(t, err) + + opened = append(opened, ndx) + } + + after := countFDsLinux(t) + + // Despite keeping many mappings alive, the FD count should not grow proportionally. + // Allow some slack for incidental FDs opened by runtime or test harness. + const maxDelta = 32 + + require.LessOrEqualf(t, after-before, maxDelta, "fd count grew too much after opening %d indexes", indexCount) + + // Cleanup + for _, ndx := range opened { + require.NoError(t, ndx.Close()) + } +} diff --git a/repo/content/committed_read_manager.go b/repo/content/committed_read_manager.go index 16a651e790b..bbfbdf5b94b 100644 --- a/repo/content/committed_read_manager.go +++ b/repo/content/committed_read_manager.go @@ -8,11 +8,13 @@ import ( "time" "github.com/pkg/errors" - "go.uber.org/zap" + "github.com/kopia/kopia/internal/blobparam" "github.com/kopia/kopia/internal/cache" "github.com/kopia/kopia/internal/cacheprot" "github.com/kopia/kopia/internal/clock" + "github.com/kopia/kopia/internal/contentlog" + "github.com/kopia/kopia/internal/contentlog/logparam" "github.com/kopia/kopia/internal/epoch" "github.com/kopia/kopia/internal/gather" "github.com/kopia/kopia/internal/listcache" @@ -38,6 +40,8 @@ const indexRefreshFrequency = 15 * time.Minute const ownWritesCacheDuration = 15 * time.Minute +var log = logging.Module("kopia/content") // +checklocksignore + // constants below specify how long to prevent cache entries from expiring. const ( DefaultMetadataCacheSweepAge = 24 * time.Hour @@ -67,7 +71,7 @@ var allIndexBlobPrefixes = []blob.ID{ // IndexBlobReader provides an API for reading index blobs. type IndexBlobReader interface { - ListIndexBlobInfos(ctx context.Context) ([]indexblob.Metadata, time.Time, error) + ListIndexBlobInfos(ctx context.Context) ([]indexblob.Metadata, error) } // SharedManager is responsible for read-only access to committed data. @@ -102,12 +106,10 @@ type SharedManager struct { paddingUnit int // logger where logs should be written - log logging.Logger + log *contentlog.Logger // logger associated with the context that opened the repository. - contextLogger logging.Logger repoLogManager *repodiag.LogManager - internalLogger *zap.SugaredLogger // backing logger for 'sharedBaseLogger' metricsStruct } @@ -143,15 +145,20 @@ func (sm *SharedManager) readPackFileLocalIndex(ctx context.Context, packFile bl if packFileLength >= indexRecoverPostambleSize { if err = sm.attemptReadPackFileLocalIndex(ctx, packFile, packFileLength-indexRecoverPostambleSize, indexRecoverPostambleSize, output); err == nil { - sm.log.Debugf("recovered %v index bytes from blob %v using optimized method", output.Length(), packFile) + contentlog.Log2(ctx, sm.log, "recovered index bytes from blob using optimized method", logparam.Int("length", output.Length()), blobparam.BlobID("packFile", packFile)) return nil } - sm.log.Debugf("unable to recover using optimized method: %v", err) + contentlog.Log1(ctx, sm.log, + "unable to recover using optimized method", + logparam.Error("err", err)) } if err = sm.attemptReadPackFileLocalIndex(ctx, packFile, 0, -1, output); err == nil { - sm.log.Debugf("recovered %v index bytes from blob %v using full blob read", output.Length(), packFile) + contentlog.Log2(ctx, sm.log, + "recovered index bytes from blob using full blob read", + logparam.Int("length", output.Length()), + blobparam.BlobID("packFile", packFile)) return nil } @@ -202,9 +209,15 @@ func (sm *SharedManager) attemptReadPackFileLocalIndex(ctx context.Context, pack // +checklocks:sm.indexesLock func (sm *SharedManager) loadPackIndexesLocked(ctx context.Context) error { + ctx0 := contentlog.WithParams(ctx, + logparam.String("span:loadindex", contentlog.RandomSpanID())) + nextSleepTime := 100 * time.Millisecond //nolint:mnd for i := range indexLoadAttempts { + ctx := contentlog.WithParams(ctx0, + logparam.Int("loadAttempt", i)) + ibm, err0 := sm.indexBlobManager(ctx) if err0 != nil { return err0 @@ -217,11 +230,13 @@ func (sm *SharedManager) loadPackIndexesLocked(ctx context.Context) error { if i > 0 { // invalidate any list caches. - if err := sm.st.FlushCaches(ctx); err != nil { - sm.log.Errorw("unable to flush caches", "err", err) - } + flushTimer := timetrack.StartTimer() + flushErr := sm.st.FlushCaches(ctx) + + contentlog.Log2(ctx, sm.log, "flushCaches", + logparam.Duration("latency", flushTimer.Elapsed()), + logparam.Error("error", flushErr)) - sm.log.Debugf("encountered NOT_FOUND when loading, sleeping %v before retrying #%v", nextSleepTime, i) time.Sleep(nextSleepTime) nextSleepTime *= 2 } @@ -244,7 +259,9 @@ func (sm *SharedManager) loadPackIndexesLocked(ctx context.Context) error { } if len(indexBlobs) > indexBlobCompactionWarningThreshold { - sm.log.Errorf("Found too many index blobs (%v), this may result in degraded performance.\n\nPlease ensure periodic repository maintenance is enabled or run 'kopia maintenance'.", len(indexBlobs)) + log(ctx).Errorf("Found too many index blobs (%v), this may result in degraded performance.\n\nPlease ensure periodic repository maintenance is enabled or run 'kopia maintenance'.", len(indexBlobs)) + + contentlog.Log1(ctx, sm.log, "Found too many index blobs", logparam.Int("len", len(indexBlobs))) } sm.refreshIndexesAfter = sm.timeNow().Add(indexRefreshFrequency) @@ -411,14 +428,8 @@ func newCacheBackingStorage(ctx context.Context, caching *CachingOptions, subdir }, false) } -func (sm *SharedManager) namedLogger(n string) logging.Logger { - if sm.internalLogger != nil { - return logging.Broadcast( - sm.contextLogger, - sm.internalLogger.Named("["+n+"]")) - } - - return sm.contextLogger +func (sm *SharedManager) namedLogger(n string) *contentlog.Logger { + return sm.repoLogManager.NewLogger(n) } func contentCacheSweepSettings(caching *CachingOptions) cache.SweepSettings { @@ -574,25 +585,12 @@ func (sm *SharedManager) CloseShared(ctx context.Context) error { sm.metadataCache.Close(ctx) sm.indexBlobCache.Close(ctx) - if sm.internalLogger != nil { - sm.internalLogger.Sync() //nolint:errcheck - } - sm.indexBlobManagerV1.EpochManager().Flush() + sm.repoLogManager.Sync() return nil } -// AlsoLogToContentLog wraps the provided content so that all logs are also sent to -// internal content log. -func (sm *SharedManager) AlsoLogToContentLog(ctx context.Context) context.Context { - sm.repoLogManager.Enable() - - return logging.WithAdditionalLogger(ctx, func(_ string) logging.Logger { - return sm.log - }) -} - func (sm *SharedManager) shouldRefreshIndexes() bool { sm.indexesLock.RLock() defer sm.indexesLock.RUnlock() @@ -624,16 +622,11 @@ func NewSharedManager(ctx context.Context, st blob.Storage, prov format.Provider paddingUnit: defaultPaddingUnit, checkInvariantsOnUnlock: os.Getenv("KOPIA_VERIFY_INVARIANTS") != "", repoLogManager: repoLogManager, - contextLogger: logging.Module(FormatLogModule)(ctx), metricsStruct: initMetricsStruct(mr), } - if !opts.DisableInternalLog { - sm.internalLogger = sm.repoLogManager.NewLogger() - } - - sm.log = sm.namedLogger("shared-manager") + sm.log = sm.repoLogManager.NewLogger("shared-manager") caching = caching.CloneOrDefault() diff --git a/repo/content/content_formatter_test.go b/repo/content/content_formatter_test.go index fda3d8587c1..2efd44cc049 100644 --- a/repo/content/content_formatter_test.go +++ b/repo/content/content_formatter_test.go @@ -59,10 +59,7 @@ func TestFormatters(t *testing.T) { require.NoError(t, enc.Decrypt(cipherText.Bytes(), contentID, &plainText)) h1 := sha1.Sum(plainText.ToByteSlice()) - - if !bytes.Equal(h0[:], h1[:]) { - t.Errorf("Encrypt()/Decrypt() does not round-trip: %x %x", h0, h1) - } + assert.Equal(t, h0[:], h1[:], "Encrypt()/Decrypt() does not round-trip") verifyEndToEndFormatter(ctx, t, hashAlgo, encryptionAlgo) }) @@ -87,10 +84,7 @@ func verifyEndToEndFormatter(ctx context.Context, t *testing.T, hashAlgo, encryp }, MasterKey: make([]byte, 32), // zero key, does not matter }), nil, nil) - if err != nil { - t.Errorf("can't create content manager with hash %v and encryption %v: %v", hashAlgo, encryptionAlgo, err.Error()) - return - } + require.NoErrorf(t, err, "can't create content manager with hash %v and encryption %v", hashAlgo, encryptionAlgo) defer bm.CloseShared(ctx) @@ -103,37 +97,22 @@ func verifyEndToEndFormatter(ctx context.Context, t *testing.T, hashAlgo, encryp for _, b := range cases { contentID, err := bm.WriteContent(ctx, b, "", NoCompression) - if err != nil { - t.Errorf("err: %v", err) - } + require.NoError(t, err) t.Logf("contentID %v", contentID) b2, err := bm.GetContent(ctx, contentID) - if err != nil { - t.Fatalf("unable to read content %q: %v", contentID, err) - return - } - if got, want := b2, b.ToByteSlice(); !bytes.Equal(got, want) { - t.Errorf("content %q data mismatch: got %x, wanted %x", contentID, got, want) - return - } + require.NoErrorf(t, err, "unable to read content %q", contentID) + require.Equalf(t, b.ToByteSlice(), b2, "content %q data mismatch", contentID) - if err = bm.Flush(ctx); err != nil { - t.Errorf("flush error: %v", err) - } + err = bm.Flush(ctx) + require.NoError(t, err, "flush error") b3, err := bm.GetContent(ctx, contentID) - if err != nil { - t.Fatalf("unable to read content after flush %q: %v", contentID, err) - return - } - - if got, want := b3, b.ToByteSlice(); !bytes.Equal(got, want) { - t.Errorf("content %q data mismatch: got %x, wanted %x", contentID, got, want) - return - } + + require.NoErrorf(t, err, "unable to read content after flush %q", contentID) + require.Equalf(t, b.ToByteSlice(), b3, "content %q data mismatch", contentID) } } @@ -141,7 +120,7 @@ func mustCreateFormatProvider(t *testing.T, f *format.ContentFormat) format.Prov t.Helper() fop, err := format.NewFormattingOptionsProvider(f, nil) - assert.NoError(t, err) + require.NoError(t, err) return fop } diff --git a/repo/content/content_manager.go b/repo/content/content_manager.go index 06bd41634b4..ff6c7e4335a 100644 --- a/repo/content/content_manager.go +++ b/repo/content/content_manager.go @@ -13,8 +13,12 @@ import ( "github.com/pkg/errors" "go.opentelemetry.io/otel" + "github.com/kopia/kopia/internal/blobparam" "github.com/kopia/kopia/internal/cache" "github.com/kopia/kopia/internal/clock" + "github.com/kopia/kopia/internal/contentlog" + "github.com/kopia/kopia/internal/contentlog/logparam" + "github.com/kopia/kopia/internal/contentparam" "github.com/kopia/kopia/internal/gather" "github.com/kopia/kopia/internal/timetrack" "github.com/kopia/kopia/repo/blob" @@ -23,7 +27,6 @@ import ( "github.com/kopia/kopia/repo/content/indexblob" "github.com/kopia/kopia/repo/format" "github.com/kopia/kopia/repo/hashing" - "github.com/kopia/kopia/repo/logging" ) // Prefixes for pack blobs. @@ -98,7 +101,7 @@ type WriteManager struct { *SharedManager - log logging.Logger + log *contentlog.Logger } type pendingPackInfo struct { @@ -125,7 +128,9 @@ func (bm *WriteManager) DeleteContent(ctx context.Context, contentID ID) error { bm.revision.Add(1) - bm.log.Debugf("delete-content %v", contentID) + contentlog.Log1(ctx, bm.log, + "delete-content", + contentparam.ContentID("contentID", contentID)) // remove from all pending packs for _, pp := range bm.pendingPacks { @@ -228,7 +233,7 @@ func (bm *WriteManager) maybeRetryWritingFailedPacksUnlocked(ctx context.Context // do not start new uploads while flushing for bm.flushing { - bm.log.Debug("wait-before-retry") + contentlog.Log(ctx, bm.log, "wait-before-retry") bm.cond.Wait() } @@ -239,7 +244,7 @@ func (bm *WriteManager) maybeRetryWritingFailedPacksUnlocked(ctx context.Context // will remove from it on success. fp := append([]*pendingPackInfo(nil), bm.failedPacks...) for _, pp := range fp { - bm.log.Debugf("retry-write %v", pp.packBlobID) + contentlog.Log1(ctx, bm.log, "retry-write", blobparam.BlobID("packBlobID", pp.packBlobID)) if err := bm.writePackAndAddToIndexLocked(ctx, pp); err != nil { return errors.Wrap(err, "error writing previously failed pack") @@ -280,7 +285,7 @@ func (bm *WriteManager) addToPackUnlocked(ctx context.Context, contentID ID, dat // do not start new uploads while flushing for bm.flushing { - bm.log.Debug("wait-before-flush") + contentlog.Log(ctx, bm.log, "wait-before-flush") bm.cond.Wait() } @@ -291,7 +296,8 @@ func (bm *WriteManager) addToPackUnlocked(ctx context.Context, contentID ID, dat // will remove from it on success. fp := append([]*pendingPackInfo(nil), bm.failedPacks...) for _, pp := range fp { - bm.log.Debugf("retry-write %v", pp.packBlobID) + contentlog.Log1(ctx, bm.log, "retry-write", + blobparam.BlobID("packBlobID", pp.packBlobID)) if err = bm.writePackAndAddToIndexLocked(ctx, pp); err != nil { bm.unlock(ctx) @@ -351,7 +357,7 @@ func (bm *WriteManager) DisableIndexFlush(ctx context.Context) { bm.lock() defer bm.unlock(ctx) - bm.log.Debug("DisableIndexFlush()") + contentlog.Log(ctx, bm.log, "DisableIndexFlush()") bm.disableIndexFlushCount++ } @@ -362,7 +368,7 @@ func (bm *WriteManager) EnableIndexFlush(ctx context.Context) { bm.lock() defer bm.unlock(ctx) - bm.log.Debug("EnableIndexFlush()") + contentlog.Log(ctx, bm.log, "EnableIndexFlush()") bm.disableIndexFlushCount-- } @@ -444,7 +450,7 @@ func (bm *WriteManager) flushPackIndexesLocked(ctx context.Context, mp format.Mu defer span.End() if bm.disableIndexFlushCount > 0 { - bm.log.Debug("not flushing index because flushes are currently disabled") + contentlog.Log(ctx, bm.log, "not flushing index because flushes are currently disabled") return nil } @@ -551,23 +557,23 @@ func (bm *WriteManager) processWritePackResultLocked(pp *pendingPackInfo, packFi } func (sm *SharedManager) prepareAndWritePackInternal(ctx context.Context, pp *pendingPackInfo, onUpload func(int64)) (index.Builder, error) { + ctx = contentlog.WithParams(ctx, + logparam.String("span:writePack", string(pp.packBlobID))) + mp, mperr := sm.format.GetMutableParameters(ctx) if mperr != nil { return nil, errors.Wrap(mperr, "mutable parameters") } - packFileIndex, err := sm.preparePackDataContent(mp, pp) + packFileIndex, err := sm.preparePackDataContent(ctx, mp, pp) if err != nil { return nil, errors.Wrap(err, "error preparing data content") } if pp.currentPackData.Length() > 0 { if err := sm.writePackFileNotLocked(ctx, pp.packBlobID, pp.currentPackData.Bytes(), onUpload); err != nil { - sm.log.Debugf("failed-pack %v %v", pp.packBlobID, err) return nil, errors.Wrapf(err, "can't save pack data blob %v", pp.packBlobID) } - - sm.log.Debugf("wrote-pack %v %v", pp.packBlobID, pp.currentPackData.Length()) } return packFileIndex, nil @@ -599,6 +605,9 @@ func (bm *WriteManager) setFlushingLocked(v bool) { // Any pending writes completed before Flush() has started are guaranteed to be committed to the // repository before Flush() returns. func (bm *WriteManager) Flush(ctx context.Context) error { + ctx = contentlog.WithParams(ctx, + logparam.String("span:flush", contentlog.RandomSpanID())) + mp, mperr := bm.format.GetMutableParameters(ctx) if mperr != nil { return errors.Wrap(mperr, "mutable parameters") @@ -607,8 +616,6 @@ func (bm *WriteManager) Flush(ctx context.Context) error { bm.lock() defer bm.unlock(ctx) - bm.log.Debug("flush") - // when finished flushing, notify goroutines that were waiting for it. defer bm.cond.Broadcast() @@ -622,7 +629,7 @@ func (bm *WriteManager) Flush(ctx context.Context) error { // will remove from it on success. fp := append([]*pendingPackInfo(nil), bm.failedPacks...) for _, pp := range fp { - bm.log.Debugf("retry-write %v", pp.packBlobID) + contentlog.Log1(ctx, bm.log, "retry-write", blobparam.BlobID("packBlobID", pp.packBlobID)) if err := bm.writePackAndAddToIndexLocked(ctx, pp); err != nil { return errors.Wrap(err, "error writing previously failed pack") @@ -630,7 +637,7 @@ func (bm *WriteManager) Flush(ctx context.Context) error { } for len(bm.writingPacks) > 0 { - bm.log.Debugf("waiting for %v in-progress packs to finish", len(bm.writingPacks)) + contentlog.Log1(ctx, bm.log, "waiting for in-progress packs to finish", logparam.Int("len", len(bm.writingPacks))) // wait packs that are currently writing in other goroutines to finish bm.cond.Wait() @@ -652,7 +659,8 @@ func (bm *WriteManager) Flush(ctx context.Context) error { // TODO(jkowalski): this will currently always re-encrypt and re-compress data, perhaps consider a // pass-through mode that preserves encrypted/compressed bits. func (bm *WriteManager) RewriteContent(ctx context.Context, contentID ID) error { - bm.log.Debugf("rewrite-content %v", contentID) + contentlog.Log1(ctx, bm.log, "rewrite-content", + contentparam.ContentID("contentID", contentID)) mp, mperr := bm.format.GetMutableParameters(ctx) if mperr != nil { @@ -683,7 +691,8 @@ func (bm *WriteManager) getContentDataAndInfo(ctx context.Context, contentID ID, // and is mark deleted. If the content exists and is not marked deleted, this // operation is a no-op. func (bm *WriteManager) UndeleteContent(ctx context.Context, contentID ID) error { - bm.log.Debugf("UndeleteContent(%q)", contentID) + contentlog.Log1(ctx, bm.log, "UndeleteContent", + contentparam.ContentID("contentID", contentID)) mp, mperr := bm.format.GetMutableParameters(ctx) if mperr != nil { @@ -696,9 +705,8 @@ func (bm *WriteManager) UndeleteContent(ctx context.Context, contentID ID) error // When onlyRewriteDelete is true, the content is only rewritten if the existing // content is marked as deleted. The new content is NOT marked deleted. // -// When onlyRewriteDelete is false, the content is unconditionally rewritten -// -// and the content's deleted status is preserved. +// When onlyRewriteDelete is false, the content is unconditionally rewritten and +// the content's deleted status is preserved. func (bm *WriteManager) rewriteContent(ctx context.Context, contentID ID, onlyRewriteDeleted bool, mp format.MutableParameters) error { var data gather.WriteBuffer defer data.Close() @@ -812,12 +820,6 @@ func (bm *WriteManager) WriteContent(ctx context.Context, data gather.Bytes, pre _, bi, err := bm.getContentInfoReadLocked(ctx, contentID) bm.mu.RUnlock() - logbuf := logging.GetBuffer() - defer logbuf.Release() - - logbuf.AppendString("write-content ") - contentID.AppendToLogBuffer(logbuf) - // content already tracked if err == nil { if !bi.Deleted { @@ -829,12 +831,14 @@ func (bm *WriteManager) WriteContent(ctx context.Context, data gather.Bytes, pre previousWriteTime = bi.TimestampSeconds - logbuf.AppendString(" previously-deleted:") - logbuf.AppendInt64(previousWriteTime) + contentlog.Log2(ctx, bm.log, "write-content-previously-deleted", + contentparam.ContentID("cid", contentID), + logparam.Int64("previousWriteTime", previousWriteTime)) + } else { + contentlog.Log1(ctx, bm.log, "write-content-new", + contentparam.ContentID("cid", contentID)) } - bm.log.Debug(logbuf.String()) - return contentID, bm.addToPackUnlocked(ctx, contentID, data, false, comp, previousWriteTime, mp) } @@ -858,7 +862,11 @@ func (bm *WriteManager) GetContent(ctx context.Context, contentID ID) (v []byte, _, err = bm.getContentDataAndInfo(ctx, contentID, &tmp) if err != nil { - bm.log.Debugf("getContentDataAndInfo(%v) error %v", contentID, err) + contentlog.Log2(ctx, bm.log, + "getContentDataAndInfo", + contentparam.ContentID("contentID", contentID), + logparam.Error("err", err)) + return nil, err } @@ -912,7 +920,10 @@ func (bm *WriteManager) ContentInfo(ctx context.Context, contentID ID) (Info, er _, bi, err := bm.getContentInfoReadLocked(ctx, contentID) if err != nil { - bm.log.Debugf("ContentInfo(%q) - error %v", contentID, err) + contentlog.Log2(ctx, bm.log, "ContentInfo", + contentparam.ContentID("contentID", contentID), + logparam.Error("err", err)) + return Info{}, err } @@ -949,7 +960,6 @@ func (bm *WriteManager) MetadataCache() cache.ContentCache { // ManagerOptions are the optional parameters for manager creation. type ManagerOptions struct { TimeNow func() time.Time // Time provider - DisableInternalLog bool PermissiveCacheLoading bool } @@ -1005,7 +1015,7 @@ func NewWriteManager(_ context.Context, sm *SharedManager, options SessionOption sm.uploadedBytes.Add(numBytes) }, - log: sm.namedLogger(writeManagerID), + log: sm.repoLogManager.NewLogger(writeManagerID), } wm.cond = sync.NewCond(&wm.mu) diff --git a/repo/content/content_manager_indexes.go b/repo/content/content_manager_indexes.go index 5677eb3306d..b70fe8c52e0 100644 --- a/repo/content/content_manager_indexes.go +++ b/repo/content/content_manager_indexes.go @@ -6,11 +6,14 @@ import ( "github.com/pkg/errors" "github.com/kopia/kopia/internal/blobcrypto" + "github.com/kopia/kopia/internal/contentlog" + "github.com/kopia/kopia/internal/contentlog/logparam" "github.com/kopia/kopia/internal/gather" "github.com/kopia/kopia/internal/timetrack" "github.com/kopia/kopia/repo/blob" "github.com/kopia/kopia/repo/content/index" "github.com/kopia/kopia/repo/content/indexblob" + "github.com/kopia/kopia/repo/maintenancestats" ) // Refresh reloads the committed content indexes. @@ -18,7 +21,7 @@ func (sm *SharedManager) Refresh(ctx context.Context) error { sm.indexesLock.Lock() defer sm.indexesLock.Unlock() - sm.log.Debug("Refresh started") + timer := timetrack.StartTimer() ibm, err := sm.indexBlobManager(ctx) if err != nil { @@ -27,38 +30,44 @@ func (sm *SharedManager) Refresh(ctx context.Context) error { ibm.Invalidate() - timer := timetrack.StartTimer() - err = sm.loadPackIndexesLocked(ctx) - sm.log.Debugf("Refresh completed in %v", timer.Elapsed()) + + contentlog.Log2(ctx, sm.log, "refreshIndexes", + logparam.Duration("latency", timer.Elapsed()), + logparam.Error("error", err)) return err } // CompactIndexes performs compaction of index blobs ensuring that # of small index blobs is below opt.maxSmallBlobs. -func (sm *SharedManager) CompactIndexes(ctx context.Context, opt indexblob.CompactOptions) error { +func (sm *SharedManager) CompactIndexes(ctx context.Context, opt indexblob.CompactOptions) (*maintenancestats.CompactIndexesStats, error) { // we must hold the lock here to avoid the race with Refresh() which can reload the // current set of indexes while we process them. sm.indexesLock.Lock() defer sm.indexesLock.Unlock() - sm.log.Debugf("CompactIndexes(%+v)", opt) + contentlog.Log4(ctx, sm.log, "CompactIndexes", + logparam.Bool("allIndexes", opt.AllIndexes), + logparam.Int64("maxSmallBlobs", int64(opt.MaxSmallBlobs)), + logparam.Time("dropDeletedBefore", opt.DropDeletedBefore), + logparam.Bool("disableEventualConsistencySafety", opt.DisableEventualConsistencySafety)) ibm, err := sm.indexBlobManager(ctx) if err != nil { - return err + return nil, err } - if err := ibm.Compact(ctx, opt); err != nil { - return errors.Wrap(err, "error performing compaction") + stats, err := ibm.Compact(ctx, opt) + if err != nil { + return nil, errors.Wrap(err, "error performing compaction") } // reload indexes after compaction. if err := sm.loadPackIndexesLocked(ctx); err != nil { - return errors.Wrap(err, "error re-loading indexes") + return nil, errors.Wrap(err, "error re-loading indexes") } - return nil + return stats, nil } // ParseIndexBlob loads entries in a given index blob and returns them. diff --git a/repo/content/content_manager_iterate.go b/repo/content/content_manager_iterate.go index 0f5e0e047de..48ef4496f52 100644 --- a/repo/content/content_manager_iterate.go +++ b/repo/content/content_manager_iterate.go @@ -10,6 +10,9 @@ import ( "github.com/pkg/errors" "github.com/kopia/kopia/internal/bigmap" + "github.com/kopia/kopia/internal/blobparam" + "github.com/kopia/kopia/internal/contentlog" + "github.com/kopia/kopia/internal/contentlog/logparam" "github.com/kopia/kopia/repo/blob" "github.com/kopia/kopia/repo/content/index" ) @@ -65,11 +68,7 @@ func maybeParallelExecutor(parallel int, originalCallback IterateCallback) (Iter // start N workers, each fetching from the shared channel and invoking the provided callback. // cleanup() must be called to for worker completion for range parallel { - wg.Add(1) - - go func() { - defer wg.Done() - + wg.Go(func() { for i := range workch { if err := originalCallback(i); err != nil { select { @@ -78,7 +77,7 @@ func maybeParallelExecutor(parallel int, originalCallback IterateCallback) (Iter } } } - }() + }) } return callback, cleanup @@ -204,10 +203,9 @@ func (bm *WriteManager) IteratePacks(ctx context.Context, options IteratePackOpt pi := packUsage[ci.PackBlobID] if pi == nil { - pi = &PackInfo{} + pi = &PackInfo{PackID: ci.PackBlobID} packUsage[ci.PackBlobID] = pi } - pi.PackID = ci.PackBlobID pi.ContentCount++ pi.TotalSize += int64(ci.PackedLength) if options.IncludeContentInfos { @@ -227,8 +225,8 @@ func (bm *WriteManager) IteratePacks(ctx context.Context, options IteratePackOpt return nil } -// IterateUnreferencedBlobs returns the list of unreferenced storage blobs. -func (bm *WriteManager) IterateUnreferencedBlobs(ctx context.Context, blobPrefixes []blob.ID, parallelism int, callback func(blob.Metadata) error) error { +// IterateUnreferencedPacks returns the list of unreferenced storage blobs. +func (bm *WriteManager) IterateUnreferencedPacks(ctx context.Context, blobPrefixes []blob.ID, parallelism int, callback func(blob.Metadata) error) error { usedPacks, err := bigmap.NewSet(ctx) if err != nil { return errors.Wrap(err, "new set") @@ -236,7 +234,7 @@ func (bm *WriteManager) IterateUnreferencedBlobs(ctx context.Context, blobPrefix defer usedPacks.Close(ctx) - bm.log.Debug("determining blobs in use") + contentlog.Log(ctx, bm.log, "determining blobs in use") // find packs in use if err := bm.IteratePacks( ctx, @@ -253,8 +251,6 @@ func (bm *WriteManager) IterateUnreferencedBlobs(ctx context.Context, blobPrefix return errors.Wrap(err, "error iterating packs") } - unusedCount := new(int32) - if len(blobPrefixes) == 0 { blobPrefixes = PackBlobIDPrefixes } @@ -272,7 +268,10 @@ func (bm *WriteManager) IterateUnreferencedBlobs(ctx context.Context, blobPrefix } } - bm.log.Debugf("scanning prefixes %v", prefixes) + contentlog.Log1(ctx, bm.log, "scanning prefixes", + blobparam.BlobIDList("prefixes", prefixes)) + + var unusedCount atomic.Int32 if err := blob.IterateAllPrefixesInParallel(ctx, parallelism, bm.st, prefixes, func(bm blob.Metadata) error { @@ -280,14 +279,14 @@ func (bm *WriteManager) IterateUnreferencedBlobs(ctx context.Context, blobPrefix return nil } - atomic.AddInt32(unusedCount, 1) + unusedCount.Add(1) return callback(bm) }); err != nil { return errors.Wrap(err, "error iterating blobs") } - bm.log.Debugf("found %v pack blobs not in use", *unusedCount) + contentlog.Log1(ctx, bm.log, "found pack blobs not in use", logparam.Int("unusedCount", int(unusedCount.Load()))) return nil } diff --git a/repo/content/content_manager_lock_free.go b/repo/content/content_manager_lock_free.go index e85ffae65e0..1319dc933ec 100644 --- a/repo/content/content_manager_lock_free.go +++ b/repo/content/content_manager_lock_free.go @@ -12,6 +12,10 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" + "github.com/kopia/kopia/internal/blobparam" + "github.com/kopia/kopia/internal/contentlog" + "github.com/kopia/kopia/internal/contentlog/logparam" + "github.com/kopia/kopia/internal/contentparam" "github.com/kopia/kopia/internal/gather" "github.com/kopia/kopia/internal/timetrack" "github.com/kopia/kopia/repo/blob" @@ -19,7 +23,6 @@ import ( "github.com/kopia/kopia/repo/content/index" "github.com/kopia/kopia/repo/format" "github.com/kopia/kopia/repo/hashing" - "github.com/kopia/kopia/repo/logging" ) const indexBlobCompactionWarningThreshold = 1000 @@ -120,30 +123,25 @@ func (sm *SharedManager) getContentDataReadLocked(ctx context.Context, pp *pendi return sm.decryptContentAndVerify(payload.Bytes(), bi, output) } -func (sm *SharedManager) preparePackDataContent(mp format.MutableParameters, pp *pendingPackInfo) (index.Builder, error) { +func (sm *SharedManager) preparePackDataContent(ctx context.Context, mp format.MutableParameters, pp *pendingPackInfo) (index.Builder, error) { packFileIndex := index.Builder{} haveContent := false - sb := logging.GetBuffer() - defer sb.Release() - for _, info := range pp.currentPackItems { if info.PackBlobID == pp.packBlobID { haveContent = true - } - sb.Reset() - sb.AppendString("add-to-pack ") - sb.AppendString(string(pp.packBlobID)) - sb.AppendString(" ") - info.ContentID.AppendToLogBuffer(sb) - sb.AppendString(" p:") - sb.AppendString(string(info.PackBlobID)) - sb.AppendString(" ") - sb.AppendUint32(info.PackedLength) - sb.AppendString(" d:") - sb.AppendBoolean(info.Deleted) - sm.log.Debug(sb.String()) + contentlog.Log3(ctx, sm.log, "add-to-pack", + contentparam.ContentID("cid", info.ContentID), + logparam.UInt32("len", info.PackedLength), + logparam.Bool("del", info.Deleted)) + } else { + contentlog.Log4(ctx, sm.log, "move-to-pack", + contentparam.ContentID("cid", info.ContentID), + blobparam.BlobID("sourcePack", info.PackBlobID), + logparam.UInt32("len", info.PackedLength), + logparam.Bool("del", info.Deleted)) + } packFileIndex.Add(info) } diff --git a/repo/content/content_manager_test.go b/repo/content/content_manager_test.go index bda1cccba35..f992ea5d846 100644 --- a/repo/content/content_manager_test.go +++ b/repo/content/content_manager_test.go @@ -85,6 +85,7 @@ func (s *contentManagerSuite) TestContentManagerEmptyFlush(t *testing.T) { bm := s.newTestContentManager(t, st) defer bm.CloseShared(ctx) + bm.Flush(ctx) if got, want := len(data), 0; got != want { @@ -99,6 +100,7 @@ func (s *contentManagerSuite) TestContentZeroBytes1(t *testing.T) { bm := s.newTestContentManager(t, st) defer bm.CloseShared(ctx) + contentID := writeContentAndVerify(ctx, t, bm, []byte{}) bm.Flush(ctx) @@ -442,10 +444,12 @@ func (s *contentManagerSuite) TestIndexCompactionDropsContent(t *testing.T) { bm = s.newTestContentManagerWithCustomTime(t, st, timeFunc) // this drops deleted entries, including from index #1 - require.NoError(t, bm.CompactIndexes(ctx, indexblob.CompactOptions{ + _, err := bm.CompactIndexes(ctx, indexblob.CompactOptions{ DropDeletedBefore: deleteThreshold, AllIndexes: true, - })) + }) + require.NoError(t, err) + require.NoError(t, bm.Flush(ctx)) require.NoError(t, bm.CloseShared(ctx)) @@ -522,7 +526,7 @@ func (s *contentManagerSuite) TestContentManagerConcurrency(t *testing.T) { validateIndexCount(t, data, 4, 0) - if err := bm4.CompactIndexes(ctx, indexblob.CompactOptions{MaxSmallBlobs: 1}); err != nil { + if _, err := bm4.CompactIndexes(ctx, indexblob.CompactOptions{MaxSmallBlobs: 1}); err != nil { t.Errorf("compaction error: %v", err) } @@ -540,7 +544,7 @@ func (s *contentManagerSuite) TestContentManagerConcurrency(t *testing.T) { verifyContent(ctx, t, bm5, bm2content, seededRandomData(32, 100)) verifyContent(ctx, t, bm5, bm3content, seededRandomData(33, 100)) - if err := bm5.CompactIndexes(ctx, indexblob.CompactOptions{MaxSmallBlobs: 1}); err != nil { + if _, err := bm5.CompactIndexes(ctx, indexblob.CompactOptions{MaxSmallBlobs: 1}); err != nil { t.Errorf("compaction error: %v", err) } } @@ -1027,85 +1031,70 @@ func (s *contentManagerSuite) TestParallelWrites(t *testing.T) { defer bm.CloseShared(ctx) numWorkers := 8 - closeWorkers := make(chan bool) // workerLock allows workers to append to their own list of IDs (when R-locked) in parallel. // W-lock allows flusher to capture the state without any worker being able to modify it. workerWritten := make([][]ID, numWorkers) + var stopWorker atomic.Bool + + // ensure the worker routines are stopped even if the test fails early + t.Cleanup(func() { + stopWorker.Store(true) + }) + // start numWorkers, each writing random block and recording it for workerID := range numWorkers { - workersWG.Add(1) + workersWG.Go(func() { + for !stopWorker.Load() { + id := writeContentAndVerify(ctx, t, bm, seededRandomData(rand.Int(), 100)) - go func() { - defer workersWG.Done() + workerLock.RLock() - for { - select { - case <-closeWorkers: - return - case <-time.After(1 * time.Nanosecond): - id := writeContentAndVerify(ctx, t, bm, seededRandomData(rand.Int(), 100)) + workerWritten[workerID] = append(workerWritten[workerID], id) - workerLock.RLock() - workerWritten[workerID] = append(workerWritten[workerID], id) - workerLock.RUnlock() - } + workerLock.RUnlock() } - }() + }) } - closeFlusher := make(chan bool) - - var flusherWG sync.WaitGroup - - flusherWG.Add(1) + flush := func() { + t.Logf("about to flush") - go func() { - defer flusherWG.Done() + // capture snapshot of all content IDs while holding a writer lock + allWritten := map[ID]bool{} - for { - select { - case <-closeFlusher: - t.Logf("closing flusher goroutine") - return - case <-time.After(2 * time.Second): - t.Logf("about to flush") + workerLock.Lock() - // capture snapshot of all content IDs while holding a writer lock - allWritten := map[ID]bool{} - - workerLock.Lock() - - for _, ww := range workerWritten { - for _, id := range ww { - allWritten[id] = true - } - } + for _, ww := range workerWritten { + for _, id := range ww { + allWritten[id] = true + } + } - workerLock.Unlock() + workerLock.Unlock() - t.Logf("captured %v contents", len(allWritten)) + t.Logf("captured %v contents", len(allWritten)) - if err := bm.Flush(ctx); err != nil { - t.Errorf("flush error: %v", err) - } + err := bm.Flush(ctx) - // open new content manager and verify all contents are visible there. - s.verifyAllDataPresent(ctx, t, st, allWritten) - } - } - }() + require.NoError(t, err, "flush error") + // open new content manager and verify all contents are visible there. + s.verifyAllDataPresent(ctx, t, st, allWritten) + } - // run workers and flushers for some time, enough for 2 flushes to complete - time.Sleep(5 * time.Second) + // flush a couple of times + for range 2 { + time.Sleep(2 * time.Second) + flush() + } // shut down workers and wait for them - close(closeWorkers) + stopWorker.Store(true) workersWG.Wait() - close(closeFlusher) - flusherWG.Wait() + // flush and check once more + flush() } func (s *contentManagerSuite) TestFlushResumesWriters(t *testing.T) { @@ -1128,17 +1117,14 @@ func (s *contentManagerSuite) TestFlushResumesWriters(t *testing.T) { bm := s.newTestContentManagerWithTweaks(t, fs, nil) defer bm.CloseShared(ctx) + first := writeContentAndVerify(ctx, t, bm, []byte{1, 2, 3}) var second ID var writeWG sync.WaitGroup - writeWG.Add(1) - - go func() { - defer writeWG.Done() - + writeWG.Go(func() { // start a write while flush is ongoing, the write will block on the condition variable <-resumeWrites t.Logf("write started") @@ -1146,7 +1132,7 @@ func (s *contentManagerSuite) TestFlushResumesWriters(t *testing.T) { second = writeContentAndVerify(ctx, t, bm, []byte{3, 4, 5}) t.Logf("write finished") - }() + }) // flush will take 5 seconds, 1 second into that we will start a write bm.Flush(ctx) @@ -1651,7 +1637,9 @@ func (s *contentManagerSuite) TestIterateContents(t *testing.T) { } mu.Lock() + got[ci.ContentID] = true + mu.Unlock() return nil @@ -1774,7 +1762,7 @@ func verifyUnreferencedBlobsCount(ctx context.Context, t *testing.T, bm *WriteMa var unrefCount int32 - err := bm.IterateUnreferencedBlobs(ctx, nil, 1, func(_ blob.Metadata) error { + err := bm.IterateUnreferencedPacks(ctx, nil, 1, func(_ blob.Metadata) error { atomic.AddInt32(&unrefCount, 1) return nil }) @@ -1974,7 +1962,7 @@ func (s *contentManagerSuite) verifyVersionCompat(t *testing.T, writeVersion for // make sure we can read everything verifyContentManagerDataSet(ctx, t, mgr, dataSet) - if err := mgr.CompactIndexes(ctx, indexblob.CompactOptions{MaxSmallBlobs: 1}); err != nil { + if _, err := mgr.CompactIndexes(ctx, indexblob.CompactOptions{MaxSmallBlobs: 1}); err != nil { t.Fatalf("unable to compact indexes: %v", err) } @@ -1987,6 +1975,7 @@ func (s *contentManagerSuite) verifyVersionCompat(t *testing.T, writeVersion for // now open one more manager mgr = s.newTestContentManager(t, st) defer mgr.CloseShared(ctx) + verifyContentManagerDataSet(ctx, t, mgr, dataSet) } @@ -1998,7 +1987,7 @@ func (s *contentManagerSuite) TestReadsOwnWritesWithEventualConsistencyPersisten cacheKeyTime := map[blob.ID]time.Time{} cacheSt := blobtesting.NewMapStorage(cacheData, cacheKeyTime, timeNow) ecst := blobtesting.NewEventuallyConsistentStorage( - logging.NewWrapper(st, testlogging.NewTestLogger(t), "[STORAGE] "), + logging.NewWrapper(st, testlogging.NewTestLogger(t), nil, "[STORAGE] "), 3*time.Second, timeNow) @@ -2132,7 +2121,7 @@ func (s *contentManagerSuite) TestCompression_NonCompressibleData(t *testing.T) nonCompressibleData := make([]byte, 65000) headerID := compression.ByName["pgzip"].HeaderID() - randRead(nonCompressibleData) + randRead(t, nonCompressibleData) cid, err := bm.WriteContent(ctx, gather.FromSlice(nonCompressibleData), "", headerID) require.NoError(t, err) @@ -2221,6 +2210,7 @@ func (s *contentManagerSuite) TestPrefetchContent(t *testing.T) { }) defer bm.CloseShared(ctx) + bm.Flush(ctx) // write 6 x 6 MB content in 2 blobs. @@ -2604,9 +2594,7 @@ func flushWithRetries(ctx context.Context, t *testing.T, bm *WriteManager) int { retryCount++ } - if err != nil { - t.Fatalf("err: %v", err) - } + require.NoError(t, err) return retryCount } @@ -2678,9 +2666,7 @@ func makeRandomHexID(t *testing.T, length int) index.ID { t.Helper() b := make([]byte, length/2) - if _, err := randRead(b); err != nil { - t.Fatal("Could not read random bytes", err) - } + randRead(t, b) id, err := IDFromHash("", b) require.NoError(t, err) @@ -2691,18 +2677,15 @@ func makeRandomHexID(t *testing.T, length int) index.ID { func deleteContent(ctx context.Context, t *testing.T, bm *WriteManager, c ID) { t.Helper() - if err := bm.DeleteContent(ctx, c); err != nil { - t.Fatalf("Unable to delete content %v: %v", c, err) - } + err := bm.DeleteContent(ctx, c) + require.NoErrorf(t, err, "Unable to delete content %v", c) } func getContentInfo(t *testing.T, bm *WriteManager, c ID) Info { t.Helper() i, err := bm.ContentInfo(testlogging.Context(t), c) - if err != nil { - t.Fatalf("Unable to get content info for %q: %v", c, err) - } + require.NoErrorf(t, err, "Unable to get content info for %q", c) return i } @@ -2733,12 +2716,16 @@ var ( rMu sync.Mutex ) -func randRead(b []byte) (n int, err error) { +func randRead(t *testing.T, b []byte) { + t.Helper() + rMu.Lock() - n, err = r.Read(b) - rMu.Unlock() + defer rMu.Unlock() + + n, err := r.Read(b) - return + require.NoError(t, err, "unable to read random bytes") + require.Equal(t, len(b), n) } func dirMetadataContent() gather.Bytes { diff --git a/repo/content/content_prefetch.go b/repo/content/content_prefetch.go index e08f8300977..2a60375ead0 100644 --- a/repo/content/content_prefetch.go +++ b/repo/content/content_prefetch.go @@ -6,6 +6,10 @@ import ( "strings" "sync" + "github.com/kopia/kopia/internal/blobparam" + "github.com/kopia/kopia/internal/contentlog" + "github.com/kopia/kopia/internal/contentlog/logparam" + "github.com/kopia/kopia/internal/contentparam" "github.com/kopia/kopia/internal/gather" "github.com/kopia/kopia/repo/blob" ) @@ -104,11 +108,7 @@ func (bm *WriteManager) PrefetchContents(ctx context.Context, contentIDs []ID, h }() for range parallelFetches { - wg.Add(1) - - go func() { - defer wg.Done() - + wg.Go(func() { var tmp gather.WriteBuffer defer tmp.Close() @@ -116,21 +116,30 @@ func (bm *WriteManager) PrefetchContents(ctx context.Context, contentIDs []ID, h switch { case strings.HasPrefix(string(w.blobID), string(PackBlobIDPrefixRegular)): if err := bm.contentCache.PrefetchBlob(ctx, w.blobID); err != nil { - bm.log.Debugw("error prefetching data blob", "blobID", w.blobID, "err", err) + contentlog.Log2(ctx, bm.log, + "error prefetching data blob", + blobparam.BlobID("blobID", w.blobID), + logparam.Error("err", err)) } case strings.HasPrefix(string(w.blobID), string(PackBlobIDPrefixSpecial)): if err := bm.metadataCache.PrefetchBlob(ctx, w.blobID); err != nil { - bm.log.Debugw("error prefetching metadata blob", "blobID", w.blobID, "err", err) + contentlog.Log2(ctx, bm.log, + "error prefetching metadata blob", + blobparam.BlobID("blobID", w.blobID), + logparam.Error("err", err)) } case w.contentID != EmptyID: tmp.Reset() if _, err := bm.getContentDataAndInfo(ctx, w.contentID, &tmp); err != nil { - bm.log.Debugw("error prefetching content", "contentID", w.contentID, "err", err) + contentlog.Log2(ctx, bm.log, + "error prefetching content", + contentparam.ContentID("contentID", w.contentID), + logparam.Error("err", err)) } } } - }() + }) } wg.Wait() diff --git a/repo/content/content_reader.go b/repo/content/content_reader.go index 91af67b7124..4fc7367092b 100644 --- a/repo/content/content_reader.go +++ b/repo/content/content_reader.go @@ -20,4 +20,5 @@ type Reader interface { IteratePacks(ctx context.Context, opts IteratePackOptions, callback IteratePacksCallback) error ListActiveSessions(ctx context.Context) (map[SessionID]*SessionInfo, error) EpochManager(ctx context.Context) (*epoch.Manager, bool, error) + VerifyContents(ctx context.Context, o VerifyOptions) error } diff --git a/repo/content/index/id.go b/repo/content/index/id.go index 52cb4bf0f88..78312fd081d 100644 --- a/repo/content/index/id.go +++ b/repo/content/index/id.go @@ -9,7 +9,6 @@ import ( "github.com/pkg/errors" "github.com/kopia/kopia/repo/hashing" - "github.com/kopia/kopia/repo/logging" ) // IDPrefix represents a content ID prefix (empty string or single character between 'g' and 'z'). @@ -98,16 +97,25 @@ func (i ID) less(other ID) bool { return bytes.Compare(i.data[:i.idLen], other.data[:other.idLen]) < 0 } -// AppendToLogBuffer appends content ID to log buffer. -func (i ID) AppendToLogBuffer(sb *logging.Buffer) { - var buf [128]byte - +// AppendToJSON appends content ID to JSON buffer. +func (i ID) AppendToJSON(buf []byte, maxLength uint8) []byte { + buf = append(buf, '"') if i.prefix != 0 { - sb.AppendByte(i.prefix) + buf = append(buf, i.prefix) } - hex.Encode(buf[0:i.idLen*2], i.data[0:i.idLen]) - sb.AppendBytes(buf[0 : i.idLen*2]) + if maxLength > i.idLen { + maxLength = i.idLen + } + + var tmp [128]byte + + hex.Encode(tmp[0:maxLength*2], i.data[0:maxLength]) + + buf = append(buf, tmp[0:maxLength*2]...) + buf = append(buf, '"') + + return buf } // Append appends content ID to the slice. diff --git a/repo/content/index/id_test.go b/repo/content/index/id_test.go index ebcdb66cdaa..23e9cccafc4 100644 --- a/repo/content/index/id_test.go +++ b/repo/content/index/id_test.go @@ -6,8 +6,6 @@ import ( "testing" "github.com/stretchr/testify/require" - - "github.com/kopia/kopia/repo/logging" ) func TestIDValid(t *testing.T) { @@ -26,18 +24,26 @@ func TestIDValid(t *testing.T) { var validContentIDsOrdered []ID - sb := logging.GetBuffer() - defer sb.Release() - for _, s := range validIDsOrdered { cid, err := ParseID(s) require.NoError(t, err) require.Equal(t, s, cid.String()) - sb.Reset() - cid.AppendToLogBuffer(sb) - require.Equal(t, s, sb.String()) + jb := cid.AppendToJSON(nil, 10) + require.Equal(t, "\""+s+"\"", string(jb)) + + if s != "" { + // limit to 3 bytes + jb2 := cid.AppendToJSON(nil, 3) + if len(s)%2 == 0 { + // no prefix - 6 chars + require.Equal(t, "\""+s[:6]+"\"", string(jb2)) + } else { + // with prefix - 7 chars + require.Equal(t, "\""+s[:7]+"\"", string(jb2)) + } + } validContentIDsOrdered = append(validContentIDsOrdered, cid) } diff --git a/repo/content/index/index_builder.go b/repo/content/index/index_builder.go index e6216d8d266..4eec3e31240 100644 --- a/repo/content/index/index_builder.go +++ b/repo/content/index/index_builder.go @@ -4,6 +4,7 @@ import ( "crypto/rand" "hash/fnv" "io" + "maps" "runtime" "sort" "sync" @@ -18,24 +19,9 @@ const randomSuffixSize = 32 // number of random bytes to append at the end to ma // Builder prepares and writes content index. type Builder map[ID]Info -// BuilderCreator is an interface for caller to add indexes to builders. -type BuilderCreator interface { - Add(info Info) -} - // Clone returns a deep Clone of the Builder. func (b Builder) Clone() Builder { - if b == nil { - return nil - } - - r := Builder{} - - for k, v := range b { - r[k] = v - } - - return r + return maps.Clone(b) } // Add adds a new entry to the builder or conditionally replaces it if the timestamp is greater. @@ -89,11 +75,7 @@ func (b Builder) sortedContents() []*Info { numWorkers := runtime.NumCPU() for worker := range numWorkers { - wg.Add(1) - - go func() { - defer wg.Done() - + wg.Go(func() { for i := range buckets { if i%numWorkers == worker { buck := buckets[i] @@ -103,7 +85,7 @@ func (b Builder) sortedContents() []*Info { }) } } - }() + }) } wg.Wait() @@ -111,7 +93,7 @@ func (b Builder) sortedContents() []*Info { // Phase 3 - merge results from all buckets. result := make([]*Info, 0, len(b)) - for i := range len(buckets) { //nolint:intrange + for i := range buckets { result = append(result, buckets[i]...) } @@ -120,7 +102,7 @@ func (b Builder) sortedContents() []*Info { // Build writes the pack index to the provided output. func (b Builder) Build(output io.Writer, version int) error { - if err := b.BuildStable(output, version); err != nil { + if err := b.buildStable(output, version); err != nil { return err } @@ -137,8 +119,8 @@ func (b Builder) Build(output io.Writer, version int) error { return nil } -// BuildStable writes the pack index to the provided output. -func (b Builder) BuildStable(output io.Writer, version int) error { +// buildStable writes the pack index to the provided output. +func (b Builder) buildStable(output io.Writer, version int) error { return buildSortedContents(b.sortedContents(), output, version) } @@ -215,7 +197,7 @@ func (b Builder) BuildShards(indexVersion int, stable bool, shardSize int) ([]ga dataShardsBuf = append(dataShardsBuf, buf) - if err := s.BuildStable(buf, indexVersion); err != nil { + if err := s.buildStable(buf, indexVersion); err != nil { closeShards() return nil, nil, errors.Wrap(err, "error building index shard") diff --git a/repo/content/index/index_v1.go b/repo/content/index/index_v1.go index 1f117062ed7..fa3ef3c816a 100644 --- a/repo/content/index/index_v1.go +++ b/repo/content/index/index_v1.go @@ -92,6 +92,7 @@ func (b *indexV1) entryToInfoStruct(contentID ID, data []byte, result *Info) err result.Deleted = data[12]&0x80 != 0 //nolint:mnd const packOffsetMask = 1<<31 - 1 + result.PackOffset = decodeBigEndianUint32(data[12:]) & packOffsetMask result.PackedLength = decodeBigEndianUint32(data[16:]) result.OriginalLength = result.PackedLength - b.v1PerContentOverhead diff --git a/repo/content/index/index_v2.go b/repo/content/index/index_v2.go index acd50995caf..ff658697599 100644 --- a/repo/content/index/index_v2.go +++ b/repo/content/index/index_v2.go @@ -412,20 +412,12 @@ func buildPackIDToIndexMap(sortedInfos []*Info) map[blob.ID]int { // maxContentLengths computes max content lengths in the builder. func maxContentLengths(sortedInfos []*Info) (maxPackedLength, maxOriginalLength, maxPackOffset uint32) { for _, v := range sortedInfos { - if l := v.PackedLength; l > maxPackedLength { - maxPackedLength = l - } - - if l := v.OriginalLength; l > maxOriginalLength { - maxOriginalLength = l - } - - if l := v.PackOffset; l > maxPackOffset { - maxPackOffset = l - } + maxPackedLength = max(maxPackedLength, v.PackedLength) + maxOriginalLength = max(maxOriginalLength, v.OriginalLength) + maxPackOffset = max(maxPackOffset, v.PackOffset) } - return + return maxPackedLength, maxOriginalLength, maxPackOffset } func newIndexBuilderV2(sortedInfos []*Info) (*indexBuilderV2, error) { diff --git a/repo/content/index/merged_test.go b/repo/content/index/merged_test.go index 609980cf807..d4bf407e6d0 100644 --- a/repo/content/index/merged_test.go +++ b/repo/content/index/merged_test.go @@ -166,6 +166,7 @@ func TestMergedGetInfoError(t *testing.T) { m := Merged{failingIndex{nil, someError}} var info Info + ok, err := m.GetInfo(mustParseID(t, "xabcdef"), &info) require.ErrorIs(t, err, someError) require.False(t, ok) diff --git a/repo/content/index/one_use_index_builder.go b/repo/content/index/one_use_index_builder.go index c7255321f62..b0bc3c57ac6 100644 --- a/repo/content/index/one_use_index_builder.go +++ b/repo/content/index/one_use_index_builder.go @@ -86,11 +86,6 @@ func (b *OneUseBuilder) shard(maxShardSize int) [][]*Info { return nonEmpty } -// BuildStable writes the pack index to the provided output. -func (b *OneUseBuilder) BuildStable(output io.Writer, version int) error { - return buildSortedContents(b.sortedContents(), output, version) -} - // BuildShards builds the set of index shards ensuring no more than the provided number of contents are in each index. // Returns shard bytes and function to clean up after the shards have been written. func (b *OneUseBuilder) BuildShards(indexVersion int, stable bool, shardSize int) ([]gather.Bytes, func(), error) { diff --git a/repo/content/index/packindex_test.go b/repo/content/index/packindex_test.go index dd29b058563..b674310300a 100644 --- a/repo/content/index/packindex_test.go +++ b/repo/content/index/packindex_test.go @@ -162,10 +162,10 @@ func testPackIndex(t *testing.T, version int) { err = b2.Build(&buf2, version) require.NoError(t, err) - err = b3.BuildStable(&buf3, version) + err = b3.buildStable(&buf3, version) require.NoError(t, err) - err = b4.BuildStable(&buf4, version) + err = buildSortedContents(b4.sortedContents(), &buf4, version) require.NoError(t, err) data1 := buf1.Bytes() diff --git a/repo/content/indexblob/index_blob.go b/repo/content/indexblob/index_blob.go index d01ac6b32aa..da0a9618f0f 100644 --- a/repo/content/indexblob/index_blob.go +++ b/repo/content/indexblob/index_blob.go @@ -8,13 +8,14 @@ import ( "github.com/kopia/kopia/internal/gather" "github.com/kopia/kopia/repo/blob" "github.com/kopia/kopia/repo/content/index" + "github.com/kopia/kopia/repo/maintenancestats" ) // Manager is the API of index blob manager as used by content manager. type Manager interface { WriteIndexBlobs(ctx context.Context, data []gather.Bytes, suffix blob.ID) ([]blob.Metadata, error) ListActiveIndexBlobs(ctx context.Context) ([]Metadata, time.Time, error) - Compact(ctx context.Context, opts CompactOptions) error + Compact(ctx context.Context, opts CompactOptions) (*maintenancestats.CompactIndexesStats, error) Invalidate() } diff --git a/repo/content/indexblob/index_blob_encryption.go b/repo/content/indexblob/index_blob_encryption.go index 067aea405e6..071520d66e2 100644 --- a/repo/content/indexblob/index_blob_encryption.go +++ b/repo/content/indexblob/index_blob_encryption.go @@ -6,10 +6,13 @@ import ( "github.com/pkg/errors" "github.com/kopia/kopia/internal/blobcrypto" + "github.com/kopia/kopia/internal/blobparam" "github.com/kopia/kopia/internal/cache" + "github.com/kopia/kopia/internal/contentlog" + "github.com/kopia/kopia/internal/contentlog/logparam" "github.com/kopia/kopia/internal/gather" + "github.com/kopia/kopia/internal/timetrack" "github.com/kopia/kopia/repo/blob" - "github.com/kopia/kopia/repo/logging" ) // Metadata is an information about a single index blob managed by Manager. @@ -18,12 +21,68 @@ type Metadata struct { Superseded []blob.Metadata } +// WriteValueTo writes the metadata to a JSON writer. +func (m Metadata) WriteValueTo(jw *contentlog.JSONWriter) { + blobparam.BlobMetadata("metadata", m.Metadata).WriteValueTo(jw) + jw.BeginListField("superseded") + + for _, bm := range m.Superseded { + jw.BeginObject() + jw.StringField("blobID", string(bm.BlobID)) + jw.Int64Field("l", bm.Length) + jw.TimeField("ts", bm.Timestamp) + jw.EndObject() + } +} + +type metadataParam struct { + key string + val Metadata +} + +func (v metadataParam) WriteValueTo(jw *contentlog.JSONWriter) { + jw.BeginObjectField(v.key) + v.val.WriteValueTo(jw) + jw.EndObject() +} + +// MetadataParam creates a parameter for a metadata. +// +//nolint:revive +func MetadataParam(name string, bm Metadata) metadataParam { + return metadataParam{key: name, val: bm} +} + +type metadataListParam struct { + key string + list []Metadata +} + +func (v metadataListParam) WriteValueTo(jw *contentlog.JSONWriter) { + jw.BeginListField(v.key) + + for _, bm := range v.list { + jw.BeginObject() + bm.WriteValueTo(jw) + jw.EndObject() + } + + jw.EndList() +} + +// MetadataListParam creates a parameter for a list of metadata. +// +//nolint:revive +func MetadataListParam(name string, list []Metadata) metadataListParam { + return metadataListParam{key: name, list: list} +} + // EncryptionManager manages encryption and caching of index blobs. type EncryptionManager struct { st blob.Storage crypter blobcrypto.Crypter indexBlobCache *cache.PersistentCache - log logging.Logger + log *contentlog.Logger } // GetEncryptedBlob fetches and decrypts the contents of a given encrypted blob @@ -52,15 +111,18 @@ func (m *EncryptionManager) EncryptAndWriteBlob(ctx context.Context, data gather return blob.Metadata{}, errors.Wrap(err, "error encrypting") } + t0 := timetrack.StartTimer() + bm, err := blob.PutBlobAndGetMetadata(ctx, m.st, blobID, data2.Bytes(), blob.PutOptions{}) - if err != nil { - m.log.Debugf("write-index-blob %v failed %v", blobID, err) - return blob.Metadata{}, errors.Wrapf(err, "error writing blob %v", blobID) - } - m.log.Debugf("write-index-blob %v %v %v", blobID, bm.Length, bm.Timestamp) + contentlog.Log5(ctx, m.log, "write-index-blob", + blobparam.BlobID("indexBlobID", blobID), + logparam.Int("len", data2.Length()), + logparam.Time("timestamp", bm.Timestamp), + logparam.Duration("latency", t0.Elapsed()), + logparam.Error("error", err)) - return bm, nil + return bm, errors.Wrapf(err, "error writing blob %v", blobID) } // NewEncryptionManager creates new encryption manager. @@ -68,7 +130,7 @@ func NewEncryptionManager( st blob.Storage, crypter blobcrypto.Crypter, indexBlobCache *cache.PersistentCache, - log logging.Logger, + log *contentlog.Logger, ) *EncryptionManager { return &EncryptionManager{st, crypter, indexBlobCache, log} } diff --git a/repo/content/indexblob/index_blob_encryption_test.go b/repo/content/indexblob/index_blob_encryption_test.go index 88bbf6ba441..d272221b910 100644 --- a/repo/content/indexblob/index_blob_encryption_test.go +++ b/repo/content/indexblob/index_blob_encryption_test.go @@ -14,7 +14,6 @@ import ( "github.com/kopia/kopia/repo/encryption" "github.com/kopia/kopia/repo/format" "github.com/kopia/kopia/repo/hashing" - "github.com/kopia/kopia/repo/logging" ) type failingEncryptor struct { @@ -43,7 +42,7 @@ func TestEncryptedBlobManager(t *testing.T) { st: fs, crypter: blobcrypto.StaticCrypter{Hash: hf, Encryption: enc}, indexBlobCache: nil, - log: logging.NullLogger, + log: nil, } ctx := testlogging.Context(t) diff --git a/repo/content/indexblob/index_blob_manager_v0.go b/repo/content/indexblob/index_blob_manager_v0.go index 3e76df69157..7a47f287489 100644 --- a/repo/content/indexblob/index_blob_manager_v0.go +++ b/repo/content/indexblob/index_blob_manager_v0.go @@ -8,11 +8,15 @@ import ( "github.com/pkg/errors" "golang.org/x/sync/errgroup" + "github.com/kopia/kopia/internal/blobparam" + "github.com/kopia/kopia/internal/contentlog" + "github.com/kopia/kopia/internal/contentlog/logparam" + "github.com/kopia/kopia/internal/contentparam" "github.com/kopia/kopia/internal/gather" "github.com/kopia/kopia/repo/blob" "github.com/kopia/kopia/repo/content/index" "github.com/kopia/kopia/repo/format" - "github.com/kopia/kopia/repo/logging" + "github.com/kopia/kopia/repo/maintenancestats" ) // V0IndexBlobPrefix is the prefix for all legacy (v0) index blobs. @@ -62,14 +66,14 @@ type ManagerV0 struct { enc *EncryptionManager timeNow func() time.Time formattingOptions IndexFormattingOptions - log logging.Logger + log *contentlog.Logger } -// ListIndexBlobInfos list active blob info structs. Also returns time of latest content deletion commit. -func (m *ManagerV0) ListIndexBlobInfos(ctx context.Context) ([]Metadata, time.Time, error) { - activeIndexBlobs, t0, err := m.ListActiveIndexBlobs(ctx) +// ListIndexBlobInfos lists active blob info structs. +func (m *ManagerV0) ListIndexBlobInfos(ctx context.Context) ([]Metadata, error) { + activeIndexBlobs, _, err := m.ListActiveIndexBlobs(ctx) if err != nil { - return nil, time.Time{}, err + return nil, err } q := make([]Metadata, 0, len(activeIndexBlobs)) @@ -83,7 +87,7 @@ func (m *ManagerV0) ListIndexBlobInfos(ctx context.Context) ([]Metadata, time.Ti q = append(q, activeIndexBlob) } - return q, t0, nil + return q, nil } // ListActiveIndexBlobs lists the metadata for active index blobs and returns the cut-off time @@ -112,13 +116,9 @@ func (m *ManagerV0) ListActiveIndexBlobs(ctx context.Context) ([]Metadata, time. return nil, time.Time{}, errors.Wrap(err, "error listing indexes") } - for i, sib := range storageIndexBlobs { - m.log.Debugf("found-index-blobs[%v] = %v", i, sib) - } - - for i, clm := range compactionLogMetadata { - m.log.Debugf("found-compaction-blobs[%v] %v", i, clm) - } + contentlog.Log2(ctx, m.log, "found index blobs", + blobparam.BlobMetadataList("storageIndexBlobs", storageIndexBlobs), + blobparam.BlobMetadataList("compactionLogMetadata", compactionLogMetadata)) indexMap := map[blob.ID]*Metadata{} addBlobsToIndex(indexMap, storageIndexBlobs) @@ -129,16 +129,16 @@ func (m *ManagerV0) ListActiveIndexBlobs(ctx context.Context) ([]Metadata, time. } // remove entries from indexMap that have been compacted and replaced by other indexes. - m.removeCompactedIndexes(indexMap, compactionLogs) + m.removeCompactedIndexes(ctx, indexMap, compactionLogs) var results []Metadata for _, v := range indexMap { results = append(results, *v) } - for i, res := range results { - m.log.Debugf("active-index-blobs[%v] = %v", i, res) - } + contentlog.Log1(ctx, m.log, + "active index blobs", + MetadataListParam("results", results)) return results, time.Time{}, nil } @@ -149,28 +149,35 @@ func (m *ManagerV0) Invalidate() { // Compact performs compaction of index blobs by merging smaller ones into larger // and registering compaction and cleanup blobs in the repository. -func (m *ManagerV0) Compact(ctx context.Context, opt CompactOptions) error { +func (m *ManagerV0) Compact(ctx context.Context, opt CompactOptions) (*maintenancestats.CompactIndexesStats, error) { indexBlobs, _, err := m.ListActiveIndexBlobs(ctx) if err != nil { - return errors.Wrap(err, "error listing active index blobs") + return nil, errors.Wrap(err, "error listing active index blobs") } mp, mperr := m.formattingOptions.GetMutableParameters(ctx) if mperr != nil { - return errors.Wrap(mperr, "mutable parameters") + return nil, errors.Wrap(mperr, "mutable parameters") } - blobsToCompact := m.getBlobsToCompact(indexBlobs, opt, mp) + blobsToCompact := m.getBlobsToCompact(ctx, indexBlobs, opt, mp) - if err := m.compactIndexBlobs(ctx, blobsToCompact, opt); err != nil { - return errors.Wrap(err, "error performing compaction") + compacted, err := m.compactIndexBlobs(ctx, blobsToCompact, opt) + if err != nil { + return nil, errors.Wrap(err, "error performing compaction") } if err := m.cleanup(ctx, opt.maxEventualConsistencySettleTime()); err != nil { - return errors.Wrap(err, "error cleaning up index blobs") + return nil, errors.Wrap(err, "error cleaning up index blobs") } - return nil + if compacted { + return &maintenancestats.CompactIndexesStats{ + DroppedContentsDeletedBefore: opt.DropDeletedBefore, + }, nil + } + + return nil, nil } func (m *ManagerV0) registerCompaction(ctx context.Context, inputs, outputs []blob.Metadata, maxEventualConsistencySettleTime time.Duration) error { @@ -187,15 +194,12 @@ func (m *ManagerV0) registerCompaction(ctx context.Context, inputs, outputs []bl return errors.Wrap(err, "unable to write compaction log") } - for i, input := range inputs { - m.log.Debugf("compacted-input[%v/%v] %v", i, len(inputs), input) - } - - for i, output := range outputs { - m.log.Debugf("compacted-output[%v/%v] %v", i, len(outputs), output) - } - - m.log.Debugf("compaction-log %v %v", compactionLogBlobMetadata.BlobID, compactionLogBlobMetadata.Timestamp) + contentlog.Log4(ctx, m.log, + "registered compaction", + blobparam.BlobMetadataList("inputs", inputs), + blobparam.BlobMetadataList("outputs", outputs), + blobparam.BlobID("compactionLogBlobID", compactionLogBlobMetadata.BlobID), + logparam.Time("compactionLogBlobTimestamp", compactionLogBlobMetadata.Timestamp)) if err := m.deleteOldBlobs(ctx, compactionLogBlobMetadata, maxEventualConsistencySettleTime); err != nil { return errors.Wrap(err, "error deleting old index blobs") @@ -295,14 +299,18 @@ func (m *ManagerV0) deleteOldBlobs(ctx context.Context, latestBlob blob.Metadata compactionLogServerTimeCutoff := latestBlob.Timestamp.Add(-maxEventualConsistencySettleTime) compactionBlobs := blobsOlderThan(allCompactionLogBlobs, compactionLogServerTimeCutoff) - m.log.Debugf("fetching %v/%v compaction logs older than %v", len(compactionBlobs), len(allCompactionLogBlobs), compactionLogServerTimeCutoff) + contentlog.Log3(ctx, m.log, + "fetching compaction logs", + logparam.Int("compactionBlobs", len(compactionBlobs)), + logparam.Int("allCompactionLogBlobs", len(allCompactionLogBlobs)), + logparam.Time("compactionLogServerTimeCutoff", compactionLogServerTimeCutoff)) compactionBlobEntries, err := m.getCompactionLogEntries(ctx, compactionBlobs) if err != nil { return errors.Wrap(err, "unable to get compaction log entries") } - indexBlobsToDelete := m.findIndexBlobsToDelete(latestBlob.Timestamp, compactionBlobEntries, maxEventualConsistencySettleTime) + indexBlobsToDelete := m.findIndexBlobsToDelete(ctx, latestBlob.Timestamp, compactionBlobEntries, maxEventualConsistencySettleTime) // note that we must always delete index blobs first before compaction logs // otherwise we may inadvertently resurrect an index blob that should have been removed. @@ -310,7 +318,7 @@ func (m *ManagerV0) deleteOldBlobs(ctx context.Context, latestBlob blob.Metadata return errors.Wrap(err, "unable to delete compaction logs") } - compactionLogBlobsToDelayCleanup := m.findCompactionLogBlobsToDelayCleanup(compactionBlobs) + compactionLogBlobsToDelayCleanup := m.findCompactionLogBlobsToDelayCleanup(ctx, compactionBlobs) if err := m.delayCleanupBlobs(ctx, compactionLogBlobsToDelayCleanup, latestBlob.Timestamp); err != nil { return errors.Wrap(err, "unable to schedule delayed cleanup of blobs") @@ -319,18 +327,26 @@ func (m *ManagerV0) deleteOldBlobs(ctx context.Context, latestBlob blob.Metadata return nil } -func (m *ManagerV0) findIndexBlobsToDelete(latestServerBlobTime time.Time, entries map[blob.ID]*compactionLogEntry, maxEventualConsistencySettleTime time.Duration) []blob.ID { +func (m *ManagerV0) findIndexBlobsToDelete(ctx context.Context, latestServerBlobTime time.Time, entries map[blob.ID]*compactionLogEntry, maxEventualConsistencySettleTime time.Duration) []blob.ID { tmp := map[blob.ID]bool{} for _, cl := range entries { // are the input index blobs in this compaction eligible for deletion? if age := latestServerBlobTime.Sub(cl.metadata.Timestamp); age < maxEventualConsistencySettleTime { - m.log.Debugf("not deleting compacted index blob used as inputs for compaction %v, because it's too recent: %v < %v", cl.metadata.BlobID, age, maxEventualConsistencySettleTime) + contentlog.Log3(ctx, m.log, + "not deleting compacted index blob used as inputs for compaction", + blobparam.BlobID("blobID", cl.metadata.BlobID), + logparam.Duration("age", age), + logparam.Duration("maxEventualConsistencySettleTime", maxEventualConsistencySettleTime)) + continue } for _, b := range cl.InputMetadata { - m.log.Debugf("will delete old index %v compacted to %v", b, cl.OutputMetadata) + contentlog.Log2(ctx, m.log, + "will delete old index", + blobparam.BlobMetadata("b", b), + blobparam.BlobMetadataList("outputMetadata", cl.OutputMetadata)) tmp[b.BlobID] = true } @@ -345,11 +361,13 @@ func (m *ManagerV0) findIndexBlobsToDelete(latestServerBlobTime time.Time, entri return result } -func (m *ManagerV0) findCompactionLogBlobsToDelayCleanup(compactionBlobs []blob.Metadata) []blob.ID { +func (m *ManagerV0) findCompactionLogBlobsToDelayCleanup(ctx context.Context, compactionBlobs []blob.Metadata) []blob.ID { var result []blob.ID for _, cb := range compactionBlobs { - m.log.Debugf("will delete compaction log blob %v", cb) + contentlog.Log1(ctx, m.log, "will delete compaction log blob", + blobparam.BlobMetadata("cb", cb)) + result = append(result, cb.BlobID) } @@ -364,7 +382,7 @@ func (m *ManagerV0) findBlobsToDelete(entries map[blob.ID]*cleanupEntry, maxEven } } - return + return compactionLogs, cleanupBlobs } func (m *ManagerV0) delayCleanupBlobs(ctx context.Context, blobIDs []blob.ID, cleanupScheduleTime time.Time) error { @@ -390,11 +408,11 @@ func (m *ManagerV0) delayCleanupBlobs(ctx context.Context, blobIDs []blob.ID, cl func (m *ManagerV0) deleteBlobsFromStorageAndCache(ctx context.Context, blobIDs []blob.ID) error { for _, blobID := range blobIDs { if err := m.st.DeleteBlob(ctx, blobID); err != nil && !errors.Is(err, blob.ErrBlobNotFound) { - m.log.Debugf("delete-blob failed %v %v", blobID, err) + contentlog.Log2(ctx, m.log, "delete-blob failed", blobparam.BlobID("blobID", blobID), logparam.Error("err", err)) return errors.Wrapf(err, "unable to delete blob %v", blobID) } - m.log.Debugf("delete-blob succeeded %v", blobID) + contentlog.Log1(ctx, m.log, "delete-blob succeeded", blobparam.BlobID("blobID", blobID)) } return nil @@ -433,13 +451,13 @@ func (m *ManagerV0) cleanup(ctx context.Context, maxEventualConsistencySettleTim } if err := m.st.FlushCaches(ctx); err != nil { - m.log.Debugw("error flushing caches", "err", err) + contentlog.Log1(ctx, m.log, "error flushing caches", logparam.Error("err", err)) } return nil } -func (m *ManagerV0) getBlobsToCompact(indexBlobs []Metadata, opt CompactOptions, mp format.MutableParameters) []Metadata { +func (m *ManagerV0) getBlobsToCompact(ctx context.Context, indexBlobs []Metadata, opt CompactOptions, mp format.MutableParameters) []Metadata { var ( nonCompactedBlobs, verySmallBlobs []Metadata totalSizeNonCompactedBlobs, totalSizeVerySmallBlobs, totalSizeMediumSizedBlobs int64 @@ -465,39 +483,42 @@ func (m *ManagerV0) getBlobsToCompact(indexBlobs []Metadata, opt CompactOptions, if len(nonCompactedBlobs) < opt.MaxSmallBlobs { // current count is below min allowed - nothing to do - m.log.Debug("no small contents to Compact") + contentlog.Log(ctx, m.log, "no small contents to Compact") return nil } if len(verySmallBlobs) > len(nonCompactedBlobs)/2 && mediumSizedBlobCount+1 < opt.MaxSmallBlobs { - m.log.Debugf("compacting %v very small contents", len(verySmallBlobs)) + contentlog.Log1(ctx, m.log, "compacting very small contents", logparam.Int("len", len(verySmallBlobs))) return verySmallBlobs } - m.log.Debugf("compacting all %v non-compacted contents", len(nonCompactedBlobs)) + contentlog.Log1(ctx, m.log, "compacting all non-compacted contents", logparam.Int("len", len(nonCompactedBlobs))) return nonCompactedBlobs } -func (m *ManagerV0) compactIndexBlobs(ctx context.Context, indexBlobs []Metadata, opt CompactOptions) error { +func (m *ManagerV0) compactIndexBlobs(ctx context.Context, indexBlobs []Metadata, opt CompactOptions) (bool, error) { if len(indexBlobs) <= 1 && opt.DropDeletedBefore.IsZero() && len(opt.DropContents) == 0 { - return nil + return false, nil } mp, mperr := m.formattingOptions.GetMutableParameters(ctx) if mperr != nil { - return errors.Wrap(mperr, "mutable parameters") + return false, errors.Wrap(mperr, "mutable parameters") } bld := make(index.Builder) var inputs, outputs []blob.Metadata - for i, indexBlob := range indexBlobs { - m.log.Debugf("compacting-entries[%v/%v] %v", i, len(indexBlobs), indexBlob) + for _, indexBlob := range indexBlobs { + contentlog.Log2(ctx, m.log, + "compacting-entries", + blobparam.BlobMetadata("indexBlob", indexBlob.Metadata), + blobparam.BlobMetadataList("superseded", indexBlob.Superseded)) - if err := addIndexBlobsToBuilder(ctx, m.enc, bld, indexBlob.BlobID); err != nil { - return errors.Wrap(err, "error adding index to builder") + if err := addIndexBlobsToBuilder(ctx, m.enc, bld.Add, indexBlob.BlobID); err != nil { + return false, errors.Wrap(err, "error adding index to builder") } inputs = append(inputs, indexBlob.Metadata) @@ -505,52 +526,54 @@ func (m *ManagerV0) compactIndexBlobs(ctx context.Context, indexBlobs []Metadata // after we built index map in memory, drop contents from it // we must do it after all input blobs have been merged, otherwise we may resurrect contents. - m.dropContentsFromBuilder(bld, opt) + m.dropContentsFromBuilder(ctx, bld, opt) dataShards, cleanupShards, err := bld.BuildShards(mp.IndexVersion, false, DefaultIndexShardSize) if err != nil { - return errors.Wrap(err, "unable to build an index") + return false, errors.Wrap(err, "unable to build an index") } defer cleanupShards() compactedIndexBlobs, err := m.WriteIndexBlobs(ctx, dataShards, "") if err != nil { - return errors.Wrap(err, "unable to write compacted indexes") + return false, errors.Wrap(err, "unable to write compacted indexes") } outputs = append(outputs, compactedIndexBlobs...) if err := m.registerCompaction(ctx, inputs, outputs, opt.maxEventualConsistencySettleTime()); err != nil { - return errors.Wrap(err, "unable to register compaction") + return false, errors.Wrap(err, "unable to register compaction") } - return nil + return true, nil } -func (m *ManagerV0) dropContentsFromBuilder(bld index.Builder, opt CompactOptions) { +func (m *ManagerV0) dropContentsFromBuilder(ctx context.Context, bld index.Builder, opt CompactOptions) { for _, dc := range opt.DropContents { if _, ok := bld[dc]; ok { - m.log.Debugf("manual-drop-from-index %v", dc) + contentlog.Log1(ctx, m.log, "manual-drop-from-index", logparam.String("dc", dc.String())) delete(bld, dc) } } if !opt.DropDeletedBefore.IsZero() { - m.log.Debugf("drop-content-deleted-before %v", opt.DropDeletedBefore) + contentlog.Log1(ctx, m.log, "drop-content-deleted-before", logparam.Time("dropDeletedBefore", opt.DropDeletedBefore)) for _, i := range bld { if i.Deleted && i.Timestamp().Before(opt.DropDeletedBefore) { - m.log.Debugf("drop-from-index-old-deleted %v %v", i.ContentID, i.Timestamp()) + contentlog.Log2(ctx, m.log, "drop-from-index-old-deleted", + contentparam.ContentID("contentID", i.ContentID), + logparam.Time("timestamp", i.Timestamp())) delete(bld, i.ContentID) } } - m.log.Debugf("finished drop-content-deleted-before %v", opt.DropDeletedBefore) + contentlog.Log1(ctx, m.log, "finished drop-content-deleted-before", logparam.Time("dropDeletedBefore", opt.DropDeletedBefore)) } } -func addIndexBlobsToBuilder(ctx context.Context, enc *EncryptionManager, bld index.BuilderCreator, indexBlobID blob.ID) error { +func addIndexBlobsToBuilder(ctx context.Context, enc *EncryptionManager, addEntry func(index.Info), indexBlobID blob.ID) error { var data gather.WriteBuffer defer data.Close() @@ -565,7 +588,7 @@ func addIndexBlobsToBuilder(ctx context.Context, enc *EncryptionManager, bld ind } _ = ndx.Iterate(index.AllIDs, func(i index.Info) error { - bld.Add(i) + addEntry(i) return nil }) @@ -584,7 +607,7 @@ func blobsOlderThan(m []blob.Metadata, cutoffTime time.Time) []blob.Metadata { return res } -func (m *ManagerV0) removeCompactedIndexes(bimap map[blob.ID]*Metadata, compactionLogs map[blob.ID]*compactionLogEntry) { +func (m *ManagerV0) removeCompactedIndexes(ctx context.Context, bimap map[blob.ID]*Metadata, compactionLogs map[blob.ID]*compactionLogEntry) { var validCompactionLogs []*compactionLogEntry for _, cl := range compactionLogs { @@ -595,7 +618,8 @@ func (m *ManagerV0) removeCompactedIndexes(bimap map[blob.ID]*Metadata, compacti if bimap[o.BlobID] == nil { haveAllOutputs = false - m.log.Debugf("blob %v referenced by compaction log is not found", o.BlobID) + contentlog.Log1(ctx, m.log, "blob referenced by compaction log is not found", + blobparam.BlobID("blobID", o.BlobID)) break } @@ -610,7 +634,9 @@ func (m *ManagerV0) removeCompactedIndexes(bimap map[blob.ID]*Metadata, compacti for _, cl := range validCompactionLogs { for _, ib := range cl.InputMetadata { if md := bimap[ib.BlobID]; md != nil && md.Superseded == nil { - m.log.Debugf("ignore-index-blob %v compacted to %v", ib, cl.OutputMetadata) + contentlog.Log2(ctx, m.log, "ignore-index-blob", + blobparam.BlobMetadata("ib", ib), + blobparam.BlobMetadataList("outputMetadata", cl.OutputMetadata)) delete(bimap, ib.BlobID) } @@ -624,7 +650,7 @@ func NewManagerV0( enc *EncryptionManager, timeNow func() time.Time, formattingOptions IndexFormattingOptions, - log logging.Logger, + log *contentlog.Logger, ) *ManagerV0 { return &ManagerV0{st, enc, timeNow, formattingOptions, log} } diff --git a/repo/content/indexblob/index_blob_manager_v0_test.go b/repo/content/indexblob/index_blob_manager_v0_test.go index a4f4040d60a..78cf90aadaa 100644 --- a/repo/content/indexblob/index_blob_manager_v0_test.go +++ b/repo/content/indexblob/index_blob_manager_v0_test.go @@ -197,7 +197,7 @@ func TestIndexBlobManagerStress(t *testing.T) { for actorID := range numActors { loggedSt := logging.NewWrapper(st, testlogging.Printf(func(m string, args ...any) { t.Logf(fmt.Sprintf("@%v actor[%v]:", fakeTimeFunc().Format("150405.000"), actorID)+m, args...) - }, ""), "") + }, ""), nil, "") contentPrefix := fmt.Sprintf("a%v", actorID) eg.Go(func() error { @@ -285,7 +285,7 @@ func TestCompactionCreatesPreviousIndex(t *testing.T) { st = blobtesting.NewEventuallyConsistentStorage(st, testEventualConsistencySettleTime, fakeTimeFunc) st = logging.NewWrapper(st, testlogging.Printf(func(msg string, args ...any) { t.Logf("[store] "+fakeTimeFunc().Format("150405.000")+" "+msg, args...) - }, ""), "") + }, ""), nil, "") m := newIndexBlobManagerForTesting(t, st, fakeTimeFunc) numWritten := 0 @@ -363,7 +363,7 @@ func verifyIndexBlobManagerPreventsResurrectOfDeletedContents(t *testing.T, dela st = blobtesting.NewEventuallyConsistentStorage(st, testEventualConsistencySettleTime, fakeTimeFunc) st = logging.NewWrapper(st, testlogging.Printf(func(msg string, args ...any) { t.Logf(fakeTimeFunc().Format("150405.000")+" "+msg, args...) - }, ""), "") + }, ""), nil, "") m := newIndexBlobManagerForTesting(t, st, fakeTimeFunc) numWritten := 0 @@ -780,18 +780,16 @@ func newIndexBlobManagerForTesting(t *testing.T, st blob.Storage, localTimeNow f 15*time.Minute, ) - log := testlogging.Printf(t.Logf, "") - m := &ManagerV0{ st: st, enc: &EncryptionManager{ st: st, indexBlobCache: nil, crypter: blobcrypto.StaticCrypter{Hash: hf, Encryption: enc}, - log: log, + log: nil, }, timeNow: localTimeNow, - log: log, + log: nil, } return m diff --git a/repo/content/indexblob/index_blob_manager_v1.go b/repo/content/indexblob/index_blob_manager_v1.go index 579231c586d..31e7821144e 100644 --- a/repo/content/indexblob/index_blob_manager_v1.go +++ b/repo/content/indexblob/index_blob_manager_v1.go @@ -9,11 +9,13 @@ import ( "github.com/pkg/errors" "github.com/kopia/kopia/internal/blobcrypto" + "github.com/kopia/kopia/internal/contentlog" + "github.com/kopia/kopia/internal/contentlog/logparam" "github.com/kopia/kopia/internal/epoch" "github.com/kopia/kopia/internal/gather" "github.com/kopia/kopia/repo/blob" "github.com/kopia/kopia/repo/content/index" - "github.com/kopia/kopia/repo/logging" + "github.com/kopia/kopia/repo/maintenancestats" ) // ManagerV1 is the append-only implementation of indexblob.Manager @@ -23,14 +25,16 @@ type ManagerV1 struct { enc *EncryptionManager timeNow func() time.Time formattingOptions IndexFormattingOptions - log logging.Logger + log *contentlog.Logger epochMgr *epoch.Manager } -// ListIndexBlobInfos list active blob info structs. Also returns time of latest content deletion commit. -func (m *ManagerV1) ListIndexBlobInfos(ctx context.Context) ([]Metadata, time.Time, error) { - return m.ListActiveIndexBlobs(ctx) +// ListIndexBlobInfos lists active blob info structs. +func (m *ManagerV1) ListIndexBlobInfos(ctx context.Context) ([]Metadata, error) { + blobs, _, err := m.ListActiveIndexBlobs(ctx) + + return blobs, err } // ListActiveIndexBlobs lists the metadata for active index blobs and returns the cut-off time @@ -47,7 +51,7 @@ func (m *ManagerV1) ListActiveIndexBlobs(ctx context.Context) ([]Metadata, time. result = append(result, Metadata{Metadata: bm}) } - m.log.Debugf("total active indexes %v, deletion watermark %v", len(active), deletionWatermark) + contentlog.Log2(ctx, m.log, "total active indexes", logparam.Int("len", len(active)), logparam.Time("deletionWatermark", deletionWatermark)) return result, deletionWatermark, nil } @@ -58,12 +62,23 @@ func (m *ManagerV1) Invalidate() { } // Compact advances the deletion watermark. -func (m *ManagerV1) Compact(ctx context.Context, opt CompactOptions) error { +func (m *ManagerV1) Compact(ctx context.Context, opt CompactOptions) (*maintenancestats.CompactIndexesStats, error) { if opt.DropDeletedBefore.IsZero() { - return nil + return nil, nil + } + + advanced, err := m.epochMgr.AdvanceDeletionWatermark(ctx, opt.DropDeletedBefore) + if err != nil { + return nil, errors.Wrap(err, "error advancing deletion watermark") + } + + if !advanced { + return nil, nil } - return errors.Wrap(m.epochMgr.AdvanceDeletionWatermark(ctx, opt.DropDeletedBefore), "error advancing deletion watermark") + return &maintenancestats.CompactIndexesStats{ + DroppedContentsDeletedBefore: opt.DropDeletedBefore, + }, nil } // CompactEpoch compacts the provided index blobs and writes a new set of blobs. @@ -71,7 +86,7 @@ func (m *ManagerV1) CompactEpoch(ctx context.Context, blobIDs []blob.ID, outputP tmpbld := index.NewOneUseBuilder() for _, indexBlob := range blobIDs { - if err := addIndexBlobsToBuilder(ctx, m.enc, tmpbld, indexBlob); err != nil { + if err := addIndexBlobsToBuilder(ctx, m.enc, tmpbld.Add, indexBlob); err != nil { return errors.Wrap(err, "error adding index to builder") } } @@ -173,7 +188,7 @@ func NewManagerV1( epochMgr *epoch.Manager, timeNow func() time.Time, formattingOptions IndexFormattingOptions, - log logging.Logger, + log *contentlog.Logger, ) *ManagerV1 { return &ManagerV1{ st: st, diff --git a/repo/content/verify.go b/repo/content/verify.go new file mode 100644 index 00000000000..82c05f15a66 --- /dev/null +++ b/repo/content/verify.go @@ -0,0 +1,200 @@ +package content + +import ( + "context" + "math/rand" + "sync/atomic" + + "github.com/pkg/errors" + + "github.com/kopia/kopia/internal/stats" + "github.com/kopia/kopia/repo/blob" + "github.com/kopia/kopia/repo/logging" +) + +// VerifyOptions allows specifying the optional arguments for WriteManager.VerifyContent. +type VerifyOptions struct { + ContentIDRange IDRange // defaults to AllIDs when not specified + ContentReadPercentage float64 + IncludeDeletedContents bool + + ContentIterateParallelism int + + ProgressCallback func(VerifyProgressStats) + // Number of contents that need to be processed between calls to ProgressCallback. + // For example, with a ProgressCallbackInterval of 1000, ProgressCallback + // is called once for every 1000 contents that are processed. + ProgressCallbackInterval uint32 +} + +// VerifyProgressStats contains progress counters that are passed to the +// progress callback used in WriteManager.VerifyContent. +type VerifyProgressStats struct { + ErrorCount uint32 + SuccessCount uint32 +} + +// VerifyContents checks whether contents are backed by valid blobs. +func (bm *WriteManager) VerifyContents(ctx context.Context, o VerifyOptions) error { + var v contentVerifier + + return v.verifyContents(ctx, bm, o) +} + +var errMissingPacks = errors.New("the repository is corrupted, it is missing pack blobs with index-referenced content") + +type contentVerifier struct { + bm *WriteManager + + existingPacks map[blob.ID]blob.Metadata + + progressCallback func(VerifyProgressStats) + progressCallbackInterval uint32 + + contentReadProbability float64 + + // content verification stats + successCount atomic.Uint32 + readContentCount atomic.Uint32 + missingPackContentCount atomic.Uint32 + truncatedPackContentCount atomic.Uint32 + errorContentCount atomic.Uint32 + + // Per pack counts for content errors. Notice that a truncated pack, can + // also appear in the corruptedPacks map. A missing pack can only be in + // missingPacks, but not in the other sets. + missingPacks stats.CountersMap[blob.ID] + truncatedPacks stats.CountersMap[blob.ID] + corruptedPacks stats.CountersMap[blob.ID] + + verifiedCount atomic.Uint32 // used for calling the progress callback at the specified interval. + + log logging.Logger +} + +func (v *contentVerifier) verifyContents(ctx context.Context, bm *WriteManager, o VerifyOptions) error { + existingPacks, err := blob.ReadBlobMap(ctx, bm.st) + if err != nil { + return errors.Wrap(err, "unable to get blob metadata map") + } + + v.log = logging.Module("content/verify")(ctx) + v.bm = bm + v.existingPacks = existingPacks + v.progressCallback = o.ProgressCallback + v.contentReadProbability = max(o.ContentReadPercentage/100, 0) //nolint:mnd + + if o.ProgressCallback != nil { + v.progressCallbackInterval = o.ProgressCallbackInterval + } + + v.log.Info("Verifying contents...") + + itOpts := IterateOptions{ + Range: o.ContentIDRange, + Parallel: o.ContentIterateParallelism, + IncludeDeleted: o.IncludeDeletedContents, + } + + cb := func(ci Info) error { + v.verify(ctx, ci) + + return nil + } + + err = bm.IterateContents(ctx, itOpts, cb) + + contentInMissingPackCount := v.missingPackContentCount.Load() + contentInTruncatedPackCount := v.truncatedPackContentCount.Load() + contentErrorCount := v.errorContentCount.Load() + totalErrorCount := contentInMissingPackCount + contentInTruncatedPackCount + contentErrorCount + + contentCount := v.verifiedCount.Load() + + v.log.Info("Finished verifying contents") + v.log.Infow("verifyCounters:", + "verifiedContents", contentCount, + "totalErrorCount", totalErrorCount, + "contentsInMissingPacks", contentInMissingPackCount, + "contentsInTruncatedPacks", contentInTruncatedPackCount, + "unreadableContents", contentErrorCount, + "readContents", v.readContentCount.Load(), + "missingPacks", v.missingPacks.Length(), + "truncatedPacks", v.truncatedPacks.Length(), + "corruptedPacks", v.corruptedPacks.Length(), + ) + + logCountMap(v.log, "missingPack", &v.missingPacks) + logCountMap(v.log, "truncatedPack", &v.truncatedPacks) + logCountMap(v.log, "corruptedPack", &v.corruptedPacks) + + if err != nil { + return err + } + + if totalErrorCount != 0 { + return errors.Wrapf(errMissingPacks, "encountered %v errors", totalErrorCount) + } + + return nil +} + +// verifies a content, updates the corresponding counter stats and it may call +// the progress callback. +func (v *contentVerifier) verify(ctx context.Context, ci Info) { + v.verifyContentImpl(ctx, ci) + + count := v.verifiedCount.Add(1) + + if v.progressCallbackInterval > 0 && count%v.progressCallbackInterval == 0 { + s := VerifyProgressStats{ + SuccessCount: v.successCount.Load(), + ErrorCount: v.missingPackContentCount.Load() + v.truncatedPackContentCount.Load() + v.errorContentCount.Load(), + } + + v.progressCallback(s) + } +} + +func (v *contentVerifier) verifyContentImpl(ctx context.Context, ci Info) { + bi, found := v.existingPacks[ci.PackBlobID] + if !found { + v.missingPackContentCount.Add(1) + v.missingPacks.Increment(ci.PackBlobID) + v.log.Warnf("content %v depends on missing blob %v", ci.ContentID, ci.PackBlobID) + + return + } + + if int64(ci.PackOffset+ci.PackedLength) > bi.Length { + v.truncatedPackContentCount.Add(1) + v.truncatedPacks.Increment(ci.PackBlobID) + v.log.Warnf("content %v out of bounds of its pack blob %v", ci.ContentID, ci.PackBlobID) + + return + } + + //nolint:gosec + if v.contentReadProbability > 0 && rand.Float64() < v.contentReadProbability { + v.readContentCount.Add(1) + + if _, err := v.bm.GetContent(ctx, ci.ContentID); err != nil { + v.errorContentCount.Add(1) + v.corruptedPacks.Increment(ci.PackBlobID) + v.log.Warnf("content %v is invalid: %v", ci.ContentID, err) + + return + } + } + + v.successCount.Add(1) +} + +// nothing is logged for an empty map. +func logCountMap(log logging.Logger, mapName string, m *stats.CountersMap[blob.ID]) { + m.Range(func(packID blob.ID, contentCount uint32) bool { + log.Warnw(mapName, "packID", packID, "numberOfReferencedContents", contentCount) + + return true + }) +} diff --git a/repo/content/verify_test.go b/repo/content/verify_test.go new file mode 100644 index 00000000000..e3e27867427 --- /dev/null +++ b/repo/content/verify_test.go @@ -0,0 +1,309 @@ +package content + +import ( + "bytes" + "encoding/binary" + "sync/atomic" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/kopia/kopia/internal/blobtesting" + "github.com/kopia/kopia/internal/epoch" + "github.com/kopia/kopia/internal/gather" + "github.com/kopia/kopia/internal/testlogging" + "github.com/kopia/kopia/repo/blob" + "github.com/kopia/kopia/repo/content/index" + "github.com/kopia/kopia/repo/format" +) + +func newTestingMapStorage() blob.Storage { + data := blobtesting.DataMap{} + keyTime := map[blob.ID]time.Time{} + + return blobtesting.NewMapStorage(data, keyTime, nil) +} + +// newTestWriteManager is a helper to create a WriteManager for testing. +func newTestWriteManager(t *testing.T, st blob.Storage) *WriteManager { + t.Helper() + + fp := mustCreateFormatProvider(t, &format.ContentFormat{ + Hash: "HMAC-SHA256-128", + Encryption: "AES256-GCM-HMAC-SHA256", + HMACSecret: []byte("test-hmac"), + MasterKey: []byte("0123456789abcdef0123456789abcdef"), + MutableParameters: format.MutableParameters{ + Version: 2, + EpochParameters: epoch.DefaultParameters(), + IndexVersion: index.Version2, + MaxPackSize: 1024 * 1024, // 1 MB + }, + }) + + bm, err := NewManagerForTesting(testlogging.Context(t), st, fp, nil, nil) + + require.NoError(t, err, "cannot create content write manager") + + return bm +} + +func TestVerifyContents_NoMissingPacks(t *testing.T) { + st := newTestingMapStorage() + bm := newTestWriteManager(t, st) + ctx := testlogging.Context(t) + + // Create pack by writing contents. + _, err := bm.WriteContent(ctx, gather.FromSlice([]byte("hello")), "", NoCompression) + require.NoError(t, err) + + _, err = bm.WriteContent(ctx, gather.FromSlice([]byte("hello prefixed")), "k", NoCompression) + require.NoError(t, err) + + require.NoError(t, bm.Flush(ctx)) + + err = bm.VerifyContents(ctx, VerifyOptions{ + ContentIterateParallelism: 1, + }) + + require.NoError(t, err, "verification should pass as the packs exists") +} + +func TestVerifyContentToPackMapping_EnsureCallbackIsCalled(t *testing.T) { + const numberOfContents = 6 + + st := newTestingMapStorage() + bm := newTestWriteManager(t, st) + ctx := testlogging.Context(t) + + // Create numberOfContents contents + var buf [4]byte + + for i := range numberOfContents { + binary.LittleEndian.PutUint32(buf[:], uint32(i)) + _, err := bm.WriteContent(ctx, gather.FromSlice(buf[:]), "", NoCompression) + require.NoError(t, err) + } + + require.NoError(t, bm.Flush(ctx)) + + var callbackCount atomic.Uint32 // use atomic to support higher parallelism + + cb := func(st VerifyProgressStats) { + callbackCount.Add(1) + } + + // verify that the callback is called twice (every numberOfContents / 2) + err := bm.VerifyContents(ctx, VerifyOptions{ + ContentIterateParallelism: 1, + ProgressCallback: cb, + ProgressCallbackInterval: numberOfContents / 2, + }) + + require.NoError(t, err, "verification should pass as the packs exists") + require.EqualValues(t, 2, callbackCount.Load(), "unexpected callback call count") + + // Delete the pack from storage so verification fails + blobs, err := blob.ListAllBlobs(ctx, st, PackBlobIDPrefixRegular) + require.NoError(t, err) + require.Len(t, blobs, 1) + require.NoError(t, st.DeleteBlob(ctx, blobs[0].BlobID)) + + callbackCount.Store(0) + + // verify the callback is called when there are errors as well. + // verify that the callback is called twice (every numberOfContents / 2) + err = bm.VerifyContents(ctx, VerifyOptions{ + ContentIterateParallelism: 1, + ProgressCallback: cb, + ProgressCallbackInterval: numberOfContents / 2, + }) + + require.Error(t, err, "verification should fail as the pack is missing") + require.EqualValues(t, 2, callbackCount.Load(), "unexpected callback call count") +} + +func TestVerifyContents_Deleted(t *testing.T) { + st := newTestingMapStorage() + bm := newTestWriteManager(t, st) + ctx := testlogging.Context(t) + + // Create pack by writing contents. + cid, err := bm.WriteContent(ctx, gather.FromSlice([]byte("hello 1")), "", NoCompression) + + require.NoError(t, err) + require.NoError(t, bm.Flush(ctx)) + + // get pack id + blobs, err := blob.ListAllBlobs(ctx, st, PackBlobIDPrefixRegular) + require.NoError(t, err) + require.Len(t, blobs, 1) + packId := blobs[0].BlobID + + // write another content and delete the first content + _, err = bm.WriteContent(ctx, gather.FromSlice([]byte("hello 2")), "", NoCompression) + require.NoError(t, err) + + err = bm.DeleteContent(ctx, cid) + require.NoError(t, err) + + require.NoError(t, bm.Flush(ctx)) + + err = bm.VerifyContents(ctx, VerifyOptions{ + IncludeDeletedContents: true, + }) + require.NoError(t, err, "Verification should succeed") + + // Delete the first pack from storage so verification fails + require.NoError(t, st.DeleteBlob(ctx, packId)) + + err = bm.VerifyContents(ctx, VerifyOptions{ + IncludeDeletedContents: false, + }) + require.NoError(t, err, "Verification should succeed") + + err = bm.VerifyContents(ctx, VerifyOptions{ + IncludeDeletedContents: true, + }) + require.Error(t, err, "Verification should fail when deleted contents are included and the pack for the deleted content is missing") +} + +func TestVerifyContents_TruncatedPack(t *testing.T) { + st := newTestingMapStorage() + bm := newTestWriteManager(t, st) + ctx := testlogging.Context(t) + + // Create pack by writing contents. + _, err := bm.WriteContent(ctx, gather.FromSlice([]byte("hello")), "", NoCompression) + require.NoError(t, err) + + _, err = bm.WriteContent(ctx, gather.FromSlice([]byte("hello prefixed")), "k", NoCompression) + require.NoError(t, err) + + require.NoError(t, bm.Flush(ctx)) + + // Truncate the pack so verification fails + blobs, err := blob.ListAllBlobs(ctx, st, PackBlobIDPrefixRegular) + require.NoError(t, err) + require.Len(t, blobs, 1) + require.NoError(t, st.PutBlob(ctx, blobs[0].BlobID, gather.Bytes{}, blob.PutOptions{})) + + err = bm.VerifyContents(ctx, VerifyOptions{}) + require.Error(t, err, "Verification should fail when a 'p' pack blob is truncated") + require.ErrorIs(t, err, errMissingPacks) + + err = bm.VerifyContents(ctx, VerifyOptions{ContentIDRange: index.AllNonPrefixedIDs}) + require.Error(t, err, "Verification should fail when a 'p' pack blob is truncated and non-prefixed contents are verified") + require.ErrorIs(t, err, errMissingPacks) + + err = bm.VerifyContents(ctx, VerifyOptions{ContentIDRange: index.AllPrefixedIDs}) + require.NoError(t, err, "verification should succeed when a 'p' pack blob is truncated and prefixed contents are verified") +} + +func TestVerifyContents_CorruptedPack(t *testing.T) { + st := newTestingMapStorage() + bm := newTestWriteManager(t, st) + ctx := testlogging.Context(t) + + // Create pack by writing contents. + _, err := bm.WriteContent(ctx, gather.FromSlice([]byte("hello")), "", NoCompression) + require.NoError(t, err) + + _, err = bm.WriteContent(ctx, gather.FromSlice([]byte("hello prefixed")), "k", NoCompression) + require.NoError(t, err) + + require.NoError(t, bm.Flush(ctx)) + + // Corrupt the pack so verification fails + blobs, err := blob.ListAllBlobs(ctx, st, PackBlobIDPrefixRegular) + require.NoError(t, err) + require.Len(t, blobs, 1) + bid := blobs[0].BlobID + + meta, err := st.GetMetadata(ctx, bid) + require.NoError(t, err) + require.NotZero(t, meta) + + bSize := meta.Length + require.NotZero(t, bSize) + + err = st.PutBlob(ctx, bid, gather.FromSlice(bytes.Repeat([]byte{1}, int(bSize))), blob.PutOptions{}) + require.NoError(t, err) + + err = bm.VerifyContents(ctx, VerifyOptions{ContentReadPercentage: 100}) + require.Error(t, err, "Verification should fail when a 'p' pack blob is corrupted") + require.ErrorIs(t, err, errMissingPacks) + + err = bm.VerifyContents(ctx, VerifyOptions{ContentIDRange: index.AllNonPrefixedIDs, ContentReadPercentage: 100}) + require.Error(t, err, "Verification should fail when a 'p' pack blob is corrupted and non-prefixed contents are verified") + require.ErrorIs(t, err, errMissingPacks) + + err = bm.VerifyContents(ctx, VerifyOptions{ContentIDRange: index.AllPrefixedIDs, ContentReadPercentage: 100}) + require.NoError(t, err, "verification should succeed when a 'p' pack blob is corrupted and prefixed contents are verified") +} + +func TestVerifyContents_MissingPackP(t *testing.T) { + st := newTestingMapStorage() + bm := newTestWriteManager(t, st) + ctx := testlogging.Context(t) + + // Create pack by writing contents. + _, err := bm.WriteContent(ctx, gather.FromSlice([]byte("hello")), "", NoCompression) + require.NoError(t, err) + + _, err = bm.WriteContent(ctx, gather.FromSlice([]byte("hello prefixed")), "k", NoCompression) + require.NoError(t, err) + + require.NoError(t, bm.Flush(ctx)) + + // Delete pack so verification fails + blobs, err := blob.ListAllBlobs(ctx, st, PackBlobIDPrefixRegular) + require.NoError(t, err) + require.Len(t, blobs, 1) + require.NoError(t, st.DeleteBlob(ctx, blobs[0].BlobID)) + + err = bm.VerifyContents(ctx, VerifyOptions{}) + require.Error(t, err, "Verification should fail when a 'p' pack blob is missing") + require.ErrorIs(t, err, errMissingPacks) + + err = bm.VerifyContents(ctx, VerifyOptions{ContentIDRange: index.AllNonPrefixedIDs}) + require.Error(t, err, "Verification should fail when a 'p' pack blob is missing and non-prefixed contents are verified") + require.ErrorIs(t, err, errMissingPacks) + + err = bm.VerifyContents(ctx, VerifyOptions{ContentIDRange: index.AllPrefixedIDs}) + require.NoError(t, err, "verification should succeed when a 'p' pack blob is missing and prefixed contents are verified") +} + +func TestVerifyContentToPackMapping_MissingPackQ(t *testing.T) { + st := newTestingMapStorage() + bm := newTestWriteManager(t, st) + ctx := testlogging.Context(t) + + // Create a 'p' pack by writing a non-prefixed content + _, err := bm.WriteContent(ctx, gather.FromSlice([]byte("hello")), "", NoCompression) + require.NoError(t, err) + + // Create a 'q' pack by writing a prefixed content + _, err = bm.WriteContent(ctx, gather.FromSlice([]byte("hello prefixed")), "k", NoCompression) + require.NoError(t, err) + + require.NoError(t, bm.Flush(ctx)) + + // Delete the pack with 'q' prefix so verification fails + blobs, err := blob.ListAllBlobs(ctx, st, PackBlobIDPrefixSpecial) + require.NoError(t, err) + require.Len(t, blobs, 1) + require.NoError(t, st.DeleteBlob(ctx, blobs[0].BlobID)) + + err = bm.VerifyContents(ctx, VerifyOptions{}) + require.Error(t, err, "verification should fail when a 'q' pack blob is missing") + require.ErrorIs(t, err, errMissingPacks) + + err = bm.VerifyContents(ctx, VerifyOptions{ContentIDRange: index.AllPrefixedIDs}) + require.Error(t, err, "verification should fail when a 'q' pack blob is missing and prefixed contents are verified") + require.ErrorIs(t, err, errMissingPacks) + + err = bm.VerifyContents(ctx, VerifyOptions{ContentIDRange: index.AllNonPrefixedIDs}) + require.NoError(t, err, "verification should succeed when a 'q' pack blob is missing and non-prefixed contents are verified") +} diff --git a/repo/ecc/ecc_rs_crc.go b/repo/ecc/ecc_rs_crc.go index ce85da8608e..430eea63a50 100644 --- a/repo/ecc/ecc_rs_crc.go +++ b/repo/ecc/ecc_rs_crc.go @@ -172,6 +172,7 @@ func (r *ReedSolomonCrcECC) Encrypt(input gather.Bytes, _ []byte, output *gather // Allocate space for the input + padding var inputBuffer gather.WriteBuffer defer inputBuffer.Close() + inputBytes := inputBuffer.MakeContiguous(dataSizeInBlock * sizes.Blocks) binary.BigEndian.PutUint32(inputBytes[:lengthSize], uint32(input.Length())) //nolint:gosec @@ -185,13 +186,16 @@ func (r *ReedSolomonCrcECC) Encrypt(input gather.Bytes, _ []byte, output *gather // Compute and store ECC + checksum var crcBuffer [crcSize]byte + crcBytes := crcBuffer[:] var eccBuffer gather.WriteBuffer defer eccBuffer.Close() + eccBytes := eccBuffer.MakeContiguous(paritySizeInBlock) var maxShards [256][]byte + shards := maxShards[:sizes.DataShards+sizes.ParityShards] inputPos := 0 @@ -255,6 +259,7 @@ func (r *ReedSolomonCrcECC) Decrypt(input gather.Bytes, _ []byte, output *gather // Allocate space for the input + padding var inputBuffer gather.WriteBuffer defer inputBuffer.Close() + inputBytes := inputBuffer.MakeContiguous((dataPlusCrcSizeInBlock + parityPlusCrcSizeInBlock) * sizes.Blocks) copied := input.AppendToSlice(inputBytes[:0]) @@ -268,6 +273,7 @@ func (r *ReedSolomonCrcECC) Decrypt(input gather.Bytes, _ []byte, output *gather dataBytes := inputBytes[parityPlusCrcSizeInBlock*sizes.Blocks:] var maxShards [256][]byte + shards := maxShards[:sizes.DataShards+sizes.ParityShards] dataPos := 0 @@ -351,7 +357,7 @@ func readLength(shards [][]byte, sizes *sizesInfo) (originalSize, startShard, st startShard = 4 startByte = 0 - for i := range 4 { + for i := range lengthBuffer { lengthBuffer[i] = shards[i][0] } @@ -384,8 +390,7 @@ func readLength(shards [][]byte, sizes *sizesInfo) (originalSize, startShard, st originalSize = int(binary.BigEndian.Uint32(lengthBuffer[:])) - //nolint:nakedret - return + return originalSize, startShard, startByte } // Overhead should not be called. It's just implemented because it is in the interface. diff --git a/repo/ecc/ecc_utils.go b/repo/ecc/ecc_utils.go index b64a2f3748e..7ec0f5dfe36 100644 --- a/repo/ecc/ecc_utils.go +++ b/repo/ecc/ecc_utils.go @@ -16,7 +16,7 @@ func computeShards(spaceOverhead float32) (data, parity int) { data = between(applyPercent(parity, 100/spaceOverhead), 128, 254) //nolint:mnd } - return + return data, parity } func between(val, minValue, maxValue int) int { diff --git a/repo/encryption/aes256_gcm_hmac_sha256_encryptor.go b/repo/encryption/aes256_gcm_hmac_sha256_encryptor.go index 42fc8a56138..15a5a7b8e81 100644 --- a/repo/encryption/aes256_gcm_hmac_sha256_encryptor.go +++ b/repo/encryption/aes256_gcm_hmac_sha256_encryptor.go @@ -26,6 +26,7 @@ func (e aes256GCMHmacSha256) aeadForContent(contentID []byte) (cipher.AEAD, erro //nolint:forcetypeassert h := e.hmacPool.Get().(hash.Hash) defer e.hmacPool.Put(h) + h.Reset() if _, err := h.Write(contentID); err != nil { @@ -33,6 +34,7 @@ func (e aes256GCMHmacSha256) aeadForContent(contentID []byte) (cipher.AEAD, erro } var hashBuf [32]byte + key := h.Sum(hashBuf[:0]) c, err := aes.NewCipher(key) diff --git a/repo/encryption/chacha20_poly1305_hmac_sha256_encryptor.go b/repo/encryption/chacha20_poly1305_hmac_sha256_encryptor.go index b56852df705..6bb6ce744a1 100644 --- a/repo/encryption/chacha20_poly1305_hmac_sha256_encryptor.go +++ b/repo/encryption/chacha20_poly1305_hmac_sha256_encryptor.go @@ -34,6 +34,7 @@ func (e chacha20poly1305hmacSha256Encryptor) aeadForContent(contentID []byte) (c } var hashBuf [32]byte + key := h.Sum(hashBuf[:0]) //nolint:wrapcheck diff --git a/repo/encryption/encryption_test.go b/repo/encryption/encryption_test.go index 06f023f7366..68e8314c1d0 100644 --- a/repo/encryption/encryption_test.go +++ b/repo/encryption/encryption_test.go @@ -144,6 +144,7 @@ func verifyCiphertextSamples(t *testing.T, masterKey, contentID, payload []byte, func() { var v gather.WriteBuffer defer v.Close() + require.NoError(t, enc.Encrypt(gather.FromSlice(payload), contentID, &v)) t.Errorf("missing ciphertext sample for %q: %q,", encryptionAlgo, hex.EncodeToString(payload)) @@ -186,9 +187,7 @@ func BenchmarkEncryption(b *testing.B) { require.NoError(b, enc.Encrypt(plainText, iv, &warmupOut)) warmupOut.Close() - b.ResetTimer() - - for range b.N { + for b.Loop() { var out gather.WriteBuffer enc.Encrypt(plainText, iv, &out) diff --git a/repo/format/content_format.go b/repo/format/content_format.go index cb0845fd934..a49e21fc257 100644 --- a/repo/format/content_format.go +++ b/repo/format/content_format.go @@ -63,10 +63,10 @@ func (f *ContentFormat) SupportsPasswordChange() bool { // MutableParameters represents parameters of the content manager that can be mutated after the repository // is created. type MutableParameters struct { - Version Version `json:"version,omitempty"` // version number, must be "1", "2" or "3" - MaxPackSize int `json:"maxPackSize,omitempty"` // maximum size of a pack object - IndexVersion int `json:"indexVersion,omitempty"` // force particular index format version (1,2,..) - EpochParameters epoch.Parameters `json:"epochParameters,omitempty"` // epoch manager parameters + Version Version `json:"version,omitempty"` // version number, must be "1", "2" or "3" + MaxPackSize int `json:"maxPackSize,omitempty"` // maximum size of a pack object + IndexVersion int `json:"indexVersion,omitempty"` // force particular index format version (1,2,..) + EpochParameters epoch.Parameters `json:"epochParameters"` // epoch manager parameters } // Validate validates the parameters. diff --git a/repo/format/format_blob.go b/repo/format/format_blob.go index 53ae50e39a5..65f35669c5f 100644 --- a/repo/format/format_blob.go +++ b/repo/format/format_blob.go @@ -106,10 +106,7 @@ func RecoverFormatBlob(ctx context.Context, st blob.Storage, blobID blob.ID, opt } func recoverFormatBlobWithLength(ctx context.Context, st blob.Storage, blobID blob.ID, length int64) ([]byte, error) { - chunkLength := int64(maxRecoverChunkLength) - if chunkLength > length { - chunkLength = length - } + chunkLength := min(int64(maxRecoverChunkLength), length) if chunkLength <= minRecoverableChunkLength { return nil, errFormatBlobNotFound diff --git a/repo/format/format_blob_key_derivation_nontesting.go b/repo/format/format_blob_key_derivation_nontesting.go index 40170ffc0ca..12079baca5e 100644 --- a/repo/format/format_blob_key_derivation_nontesting.go +++ b/repo/format/format_blob_key_derivation_nontesting.go @@ -1,5 +1,4 @@ //go:build !testing -// +build !testing package format diff --git a/repo/format/format_blob_key_derivation_testing.go b/repo/format/format_blob_key_derivation_testing.go index 171e7d4e8ba..2a75c914800 100644 --- a/repo/format/format_blob_key_derivation_testing.go +++ b/repo/format/format_blob_key_derivation_testing.go @@ -1,5 +1,4 @@ //go:build testing -// +build testing package format diff --git a/repo/format/upgrade_lock_intent.go b/repo/format/upgrade_lock_intent.go index 8b9e9fd9fe5..a65191ab976 100644 --- a/repo/format/upgrade_lock_intent.go +++ b/repo/format/upgrade_lock_intent.go @@ -13,7 +13,7 @@ import ( // repository. type UpgradeLockIntent struct { OwnerID string `json:"ownerID,omitempty"` - CreationTime time.Time `json:"creationTime,omitempty"` + CreationTime time.Time `json:"creationTime"` AdvanceNoticeDuration time.Duration `json:"advanceNoticeDuration,omitempty"` IODrainTimeout time.Duration `json:"ioDrainTimeout,omitempty"` StatusPollInterval time.Duration `json:"statusPollInterval,omitempty"` diff --git a/repo/grpc_repository_client.go b/repo/grpc_repository_client.go index aeaaecb7400..29000fa43ce 100644 --- a/repo/grpc_repository_client.go +++ b/repo/grpc_repository_client.go @@ -122,6 +122,7 @@ func (r *grpcInnerSession) readLoop(ctx context.Context) { r.activeRequestsMutex.Unlock() ch <- msg + if !msg.GetHasMore() { close(ch) } diff --git a/repo/initialize.go b/repo/initialize.go index fd10b1e0433..13fa50feaad 100644 --- a/repo/initialize.go +++ b/repo/initialize.go @@ -4,7 +4,6 @@ import ( "context" "crypto/rand" "io" - "os" "time" "github.com/pkg/errors" @@ -74,16 +73,7 @@ func blobCfgBlobFromOptions(opt *NewRepositoryOptions) format.BlobStorageConfigu func repositoryObjectFormatFromOptions(opt *NewRepositoryOptions) (*format.RepositoryConfig, error) { fv := opt.BlockFormat.Version if fv == 0 { - switch os.Getenv("KOPIA_REPOSITORY_FORMAT_VERSION") { - case "1": - fv = format.FormatVersion1 - case "2": - fv = format.FormatVersion2 - case "3": - fv = format.FormatVersion3 - default: - fv = format.FormatVersion3 - } + fv = format.FormatVersion3 } f := &format.RepositoryConfig{ diff --git a/repo/locking_storage.go b/repo/locking_storage.go index 3500a71a319..b7bd339c08a 100644 --- a/repo/locking_storage.go +++ b/repo/locking_storage.go @@ -2,21 +2,18 @@ package repo import ( "github.com/kopia/kopia/internal/epoch" + "github.com/kopia/kopia/repo/blob" "github.com/kopia/kopia/repo/content" "github.com/kopia/kopia/repo/content/indexblob" "github.com/kopia/kopia/repo/format" ) // GetLockingStoragePrefixes Return all prefixes that may be maintained by Object Locking. -func GetLockingStoragePrefixes() []string { - var prefixes []string - // collect prefixes that need to be locked on put - for _, prefix := range content.PackBlobIDPrefixes { - prefixes = append(prefixes, string(prefix)) - } - - prefixes = append(prefixes, indexblob.V0IndexBlobPrefix, epoch.EpochManagerIndexUberPrefix, format.KopiaRepositoryBlobID, - format.KopiaBlobCfgBlobID) - - return prefixes +func GetLockingStoragePrefixes() []blob.ID { + return append([]blob.ID{ + blob.ID(indexblob.V0IndexBlobPrefix), + blob.ID(epoch.EpochManagerIndexUberPrefix), + blob.ID(format.KopiaRepositoryBlobID), + blob.ID(format.KopiaBlobCfgBlobID), + }, content.PackBlobIDPrefixes...) } diff --git a/repo/logging/logging_buf.go b/repo/logging/logging_buf.go deleted file mode 100644 index dcc4f9338b6..00000000000 --- a/repo/logging/logging_buf.go +++ /dev/null @@ -1,142 +0,0 @@ -package logging - -import ( - "strconv" - "sync" - "time" - "unsafe" -) - -// Buffer is a specialized buffer that can be kept in a pool used -// for constructing logging messages without allocation. -type Buffer struct { - buf [1024]byte - validLen int // valid length -} - -//nolint:gochecknoglobals -var bufPool = &sync.Pool{ - New: func() any { - return &Buffer{} - }, -} - -// GetBuffer gets a logging buffer. -func GetBuffer() *Buffer { - //nolint:forcetypeassert - return bufPool.Get().(*Buffer) -} - -// Release releases logging buffer back to the pool. -func (b *Buffer) Release() { - b.Reset() - - bufPool.Put(b) -} - -// Reset resets logging buffer back to zero length. -func (b *Buffer) Reset() { - b.validLen = 0 -} - -// AppendByte appends a single byte/character. -func (b *Buffer) AppendByte(val byte) *Buffer { - if b.validLen < len(b.buf) { - b.buf[b.validLen] = val - b.validLen++ - } - - return b -} - -// AppendString appends a string of characters. -func (b *Buffer) AppendString(val string) *Buffer { - vl := len(val) - - if b.validLen+vl > len(b.buf) { - vl = len(b.buf) - b.validLen - } - - if vl > 0 { - copy(b.buf[b.validLen:b.validLen+vl], val) - b.validLen += vl - } - - return b -} - -// AppendTime appends a time representation. -func (b *Buffer) AppendTime(val time.Time, layout string) *Buffer { - var buf [64]byte - - return b.AppendBytes(val.AppendFormat(buf[:0], layout)) -} - -// AppendBytes appends a slice of bytes. -func (b *Buffer) AppendBytes(val []byte) *Buffer { - vl := len(val) - - if b.validLen+vl > len(b.buf) { - vl = len(b.buf) - b.validLen - } - - if vl > 0 { - copy(b.buf[b.validLen:b.validLen+vl], val) - b.validLen += vl - } - - return b -} - -// AppendBoolean appends boolean string ("true" or "false"). -func (b *Buffer) AppendBoolean(val bool) *Buffer { - if val { - return b.AppendString("true") - } - - return b.AppendString("false") -} - -// AppendInt32 appends int32 value formatted as a decimal string. -func (b *Buffer) AppendInt32(val int32) *Buffer { - return b.AppendInt(int64(val), 10) //nolint:mnd -} - -// AppendInt64 appends int64 value formatted as a decimal string. -func (b *Buffer) AppendInt64(val int64) *Buffer { - return b.AppendInt(val, 10) //nolint:mnd -} - -// AppendInt appends integer value formatted as a string in a given base. -func (b *Buffer) AppendInt(val int64, base int) *Buffer { - var buf [64]byte - - return b.AppendBytes(strconv.AppendInt(buf[:0], val, base)) -} - -// AppendUint32 appends uint32 value formatted as a decimal string. -func (b *Buffer) AppendUint32(val uint32) *Buffer { - return b.AppendUint(uint64(val), 10) //nolint:mnd -} - -// AppendUint64 appends uint64 value formatted as a decimal string. -func (b *Buffer) AppendUint64(val uint64) *Buffer { - return b.AppendUint(val, 10) //nolint:mnd -} - -// AppendUint appends unsigned integer value formatted as a string in a given base. -func (b *Buffer) AppendUint(val uint64, base int) *Buffer { - var buf [64]byte - - return b.AppendBytes(strconv.AppendUint(buf[:0], val, base)) -} - -// String returns a string value of a buffer. The value is valid as long as -// string remains allocated and no Append*() methods have been called. -func (b *Buffer) String() string { - if b.validLen == 0 { - return "" - } - - return unsafe.String(&b.buf[0], b.validLen) //nolint:gosec -} diff --git a/repo/logging/logging_buf_test.go b/repo/logging/logging_buf_test.go deleted file mode 100644 index 2e5dc31a6d3..00000000000 --- a/repo/logging/logging_buf_test.go +++ /dev/null @@ -1,77 +0,0 @@ -package logging_test - -import ( - "strings" - "testing" - "time" - - "github.com/stretchr/testify/require" - - "github.com/kopia/kopia/repo/logging" -) - -func TestLoggingBuffer_ReusesMemory(t *testing.T) { - b := logging.GetBuffer() - defer b.Release() - - b.AppendString("xx") - - s := b.String() - require.Equal(t, "xx", s) - - // ensure we're reusing memory - b.Reset() - b.AppendString("yy") - - require.Equal(t, "yy", s) -} - -func TestLoggingBuffer_Overflow(t *testing.T) { - b := logging.GetBuffer() - defer b.Release() - - filler := strings.Repeat("x", 1020) - b.AppendString(filler) - b.AppendString("foobarbaz") - - // only room for 4 more characters - require.Equal(t, filler+"foob", b.String()) - - b.Reset() - - b.AppendString(filler) - b.AppendBytes([]byte{65, 66, 67, 68, 69}) - - // only room for 4 more characters - require.Equal(t, filler+"ABCD", b.String()) -} - -func TestLoggingBuffer_Append(t *testing.T) { - b := logging.GetBuffer() - defer b.Release() - - require.Empty(t, b.String()) - - require.Equal(t, - "xx ABC D -42 -23 true 42 false 23 2000-01-02T03:04:05Z", - b.AppendString("xx"). - AppendString(" "). - AppendBytes([]byte{65, 66, 67}). - AppendString(" "). - AppendByte('D'). - AppendString(" "). - AppendInt32(-42). - AppendString(" "). - AppendInt64(-23). - AppendString(" "). - AppendBoolean(true). - AppendString(" "). - AppendUint32(42). - AppendString(" "). - AppendBoolean(false). - AppendString(" "). - AppendUint64(23). - AppendString(" "). - AppendTime(time.Date(2000, 1, 2, 3, 4, 5, 6, time.UTC), time.RFC3339). - String()) -} diff --git a/repo/logging/logging_test.go b/repo/logging/logging_test.go index 9f0001975d1..443678f795a 100644 --- a/repo/logging/logging_test.go +++ b/repo/logging/logging_test.go @@ -103,9 +103,7 @@ func BenchmarkLogger(b *testing.B) { mod1 := logging.Module("mod1") ctx := logging.WithLogger(context.Background(), testlogging.PrintfFactory(b.Logf)) - b.ResetTimer() - - for range b.N { + for b.Loop() { mod1(ctx) } } diff --git a/repo/maintenance/blob_gc.go b/repo/maintenance/blob_gc.go deleted file mode 100644 index 069d5f0f52a..00000000000 --- a/repo/maintenance/blob_gc.go +++ /dev/null @@ -1,139 +0,0 @@ -package maintenance - -import ( - "context" - "time" - - "github.com/pkg/errors" - "golang.org/x/sync/errgroup" - - "github.com/kopia/kopia/internal/stats" - "github.com/kopia/kopia/internal/units" - "github.com/kopia/kopia/repo" - "github.com/kopia/kopia/repo/blob" - "github.com/kopia/kopia/repo/content" -) - -// DeleteUnreferencedBlobsOptions provides option for blob garbage collection algorithm. -type DeleteUnreferencedBlobsOptions struct { - Parallel int - Prefix blob.ID - DryRun bool - NotAfterTime time.Time -} - -// DeleteUnreferencedBlobs deletes o was created after maintenance startederenced by index entries. -// -//nolint:gocyclo,funlen -func DeleteUnreferencedBlobs(ctx context.Context, rep repo.DirectRepositoryWriter, opt DeleteUnreferencedBlobsOptions, safety SafetyParameters) (int, error) { - if opt.Parallel == 0 { - opt.Parallel = 16 - } - - const deleteQueueSize = 100 - - var unreferenced, deleted stats.CountSum - - var eg errgroup.Group - - unused := make(chan blob.Metadata, deleteQueueSize) - - if !opt.DryRun { - // start goroutines to delete blobs as they come. - for range opt.Parallel { - eg.Go(func() error { - for bm := range unused { - if err := rep.BlobStorage().DeleteBlob(ctx, bm.BlobID); err != nil { - return errors.Wrapf(err, "unable to delete blob %q", bm.BlobID) - } - - cnt, del := deleted.Add(bm.Length) - if cnt%100 == 0 { - log(ctx).Infof(" deleted %v unreferenced blobs (%v)", cnt, units.BytesString(del)) - } - } - - return nil - }) - } - } - - // iterate unreferenced blobs and count them + optionally send to the channel to be deleted - log(ctx).Info("Looking for unreferenced blobs...") - - var prefixes []blob.ID - if p := opt.Prefix; p != "" { - prefixes = append(prefixes, p) - } else { - prefixes = append(prefixes, content.PackBlobIDPrefixRegular, content.PackBlobIDPrefixSpecial, content.BlobIDPrefixSession) - } - - activeSessions, err := rep.ContentManager().ListActiveSessions(ctx) - if err != nil { - return 0, errors.Wrap(err, "unable to load active sessions") - } - - cutoffTime := opt.NotAfterTime - if cutoffTime.IsZero() { - cutoffTime = rep.Time() - } - - // move the cutoff time a bit forward, because on Windows clock does not reliably move forward so we may end - // up not deleting some blobs - this only really affects tests, since BlobDeleteMinAge provides real - // protection here. - const cutoffTimeSlack = 1 * time.Second - - cutoffTime = cutoffTime.Add(cutoffTimeSlack) - - // iterate all pack blobs + session blobs and keep ones that are too young or - // belong to alive sessions. - if err := rep.ContentManager().IterateUnreferencedBlobs(ctx, prefixes, opt.Parallel, func(bm blob.Metadata) error { - if bm.Timestamp.After(cutoffTime) { - log(ctx).Debugf(" preserving %v because it was created after maintenance started", bm.BlobID) - return nil - } - - if age := cutoffTime.Sub(bm.Timestamp); age < safety.BlobDeleteMinAge { - log(ctx).Debugf(" preserving %v because it's too new (age: %v<%v)", bm.BlobID, age, safety.BlobDeleteMinAge) - return nil - } - - sid := content.SessionIDFromBlobID(bm.BlobID) - if s, ok := activeSessions[sid]; ok { - if age := cutoffTime.Sub(s.CheckpointTime); age < safety.SessionExpirationAge { - log(ctx).Debugf(" preserving %v because it's part of an active session (%v)", bm.BlobID, sid) - return nil - } - } - - unreferenced.Add(bm.Length) - - if !opt.DryRun { - unused <- bm - } - - return nil - }); err != nil { - return 0, errors.Wrap(err, "error looking for unreferenced blobs") - } - - close(unused) - - unreferencedCount, unreferencedSize := unreferenced.Approximate() - log(ctx).Debugf("Found %v blobs to delete (%v)", unreferencedCount, units.BytesString(unreferencedSize)) - - // wait for all delete workers to finish. - if err := eg.Wait(); err != nil { - return 0, errors.Wrap(err, "worker error") - } - - if opt.DryRun { - return int(unreferencedCount), nil - } - - del, cnt := deleted.Approximate() - - log(ctx).Infof("Deleted total %v unreferenced blobs (%v)", del, units.BytesString(cnt)) - - return int(del), nil -} diff --git a/repo/maintenance/blob_retain.go b/repo/maintenance/blob_retain.go index 1e10b23357b..8c45bf541ea 100644 --- a/repo/maintenance/blob_retain.go +++ b/repo/maintenance/blob_retain.go @@ -3,15 +3,20 @@ package maintenance import ( "context" "runtime" - "sync" "sync/atomic" "time" "github.com/pkg/errors" + "golang.org/x/sync/errgroup" + "github.com/kopia/kopia/internal/blobparam" + "github.com/kopia/kopia/internal/contentlog" + "github.com/kopia/kopia/internal/contentlog/logparam" + "github.com/kopia/kopia/internal/impossible" "github.com/kopia/kopia/repo" "github.com/kopia/kopia/repo/blob" "github.com/kopia/kopia/repo/format" + "github.com/kopia/kopia/repo/maintenancestats" ) const parallelBlobRetainCPUMultiplier = 2 @@ -21,106 +26,102 @@ const minRetentionMaintenanceDiff = time.Duration(24) * time.Hour // ExtendBlobRetentionTimeOptions provides options for extending blob retention algorithm. type ExtendBlobRetentionTimeOptions struct { Parallel int - DryRun bool } -// ExtendBlobRetentionTime extends the retention time of all relevant blobs managed by storage engine with Object Locking enabled. -func ExtendBlobRetentionTime(ctx context.Context, rep repo.DirectRepositoryWriter, opt ExtendBlobRetentionTimeOptions) (int, error) { - const extendQueueSize = 100 - - var ( - wg sync.WaitGroup - prefixes []blob.ID - cnt = new(uint32) - toExtend = new(uint32) - failedCnt = new(uint32) - ) - - if opt.Parallel == 0 { - opt.Parallel = runtime.NumCPU() * parallelBlobRetainCPUMultiplier - } +// extendBlobRetentionTime extends the retention time of all relevant blobs managed by storage engine with Object Locking enabled. +func extendBlobRetentionTime(ctx context.Context, rep repo.DirectRepositoryWriter, opt ExtendBlobRetentionTimeOptions) (*maintenancestats.ExtendBlobRetentionStats, error) { + ctx = contentlog.WithParams(ctx, + logparam.String("span:blob-retain", contentlog.RandomSpanID())) + log := rep.LogManager().NewLogger("maintenance-blob-retain") blobCfg, err := rep.FormatManager().BlobCfgBlob(ctx) if err != nil { - return 0, errors.Wrap(err, "blob configuration") + return nil, errors.Wrap(err, "blob configuration") } if !blobCfg.IsRetentionEnabled() { // Blob retention is disabled - log(ctx).Info("Object lock retention is disabled.") - return 0, nil + contentlog.Log(ctx, log, "Object lock retention is disabled.") + + return nil, nil } + const extendQueueSize = 100 + extend := make(chan blob.Metadata, extendQueueSize) extendOpts := blob.ExtendOptions{ RetentionMode: blobCfg.RetentionMode, RetentionPeriod: blobCfg.RetentionPeriod, } - if !opt.DryRun { - // start goroutines to extend blob retention as they come. - for range opt.Parallel { - wg.Add(1) + var ( + wg errgroup.Group + extendedCount, toExtend, failedCount atomic.Uint32 + ) + + if opt.Parallel == 0 { + opt.Parallel = runtime.NumCPU() * parallelBlobRetainCPUMultiplier + } - go func() { - defer wg.Done() + // start goroutines to extend blob retention as they come. + for range opt.Parallel { + wg.Go(func() error { + for bm := range extend { + if err1 := rep.BlobStorage().ExtendBlobRetention(ctx, bm.BlobID, extendOpts); err1 != nil { + contentlog.Log2(ctx, log, + "Failed to extend blob", + blobparam.BlobID("blobID", bm.BlobID), + logparam.Error("error", err1)) - for bm := range extend { - if err1 := rep.BlobStorage().ExtendBlobRetention(ctx, bm.BlobID, extendOpts); err1 != nil { - log(ctx).Errorf("Failed to extend blob %v: %v", bm.BlobID, err1) - atomic.AddUint32(failedCnt, 1) + failedCount.Add(1) - continue - } + continue + } - curCnt := atomic.AddUint32(cnt, 1) - if curCnt%100 == 0 { - log(ctx).Infof(" extended %v blobs", curCnt) - } + if currentCount := extendedCount.Add(1); currentCount%100 == 0 { + contentlog.Log1(ctx, log, "extended blobs", logparam.UInt32("count", currentCount)) } - }() - } - } + } - // Convert prefixes from string to BlobID. - for _, pfx := range repo.GetLockingStoragePrefixes() { - prefixes = append(prefixes, blob.ID(pfx)) + return nil + }) } // iterate all relevant (active, extendable) blobs and count them + optionally send to the channel to be extended - log(ctx).Info("Extending retention time for blobs...") + contentlog.Log(ctx, log, "Extending retention time for blobs...") - err = blob.IterateAllPrefixesInParallel(ctx, opt.Parallel, rep.BlobStorage(), prefixes, func(bm blob.Metadata) error { - if !opt.DryRun { - extend <- bm - } + err = blob.IterateAllPrefixesInParallel(ctx, opt.Parallel, rep.BlobStorage(), repo.GetLockingStoragePrefixes(), func(bm blob.Metadata) error { + extend <- bm - atomic.AddUint32(toExtend, 1) + toExtend.Add(1) return nil }) close(extend) - log(ctx).Infof("Found %v blobs to extend", *toExtend) - // wait for all extend workers to finish. - wg.Wait() + contentlog.Log1(ctx, log, "Found blobs to extend", logparam.UInt32("count", toExtend.Load())) + + errWait := wg.Wait() // wait for all extend workers to finish. + impossible.PanicOnError(errWait) - if *failedCnt > 0 { - return 0, errors.Errorf("Failed to extend %v blobs", *failedCnt) + if count := failedCount.Load(); count > 0 { + return nil, errors.Errorf("Failed to extend %v blobs", count) } if err != nil { - return 0, errors.Wrap(err, "error iterating packs") + return nil, errors.Wrap(err, "error iterating packs") } - if opt.DryRun { - return int(*toExtend), nil + result := &maintenancestats.ExtendBlobRetentionStats{ + ToExtendBlobCount: toExtend.Load(), + ExtendedBlobCount: extendedCount.Load(), + RetentionPeriod: extendOpts.RetentionPeriod.String(), } - log(ctx).Infof("Extended total %v blobs", *cnt) + contentlog.Log1(ctx, log, "Extended retention time for blobs", result) - return int(*cnt), nil + return result, nil } // CheckExtendRetention verifies if extension can be enabled due to maintenance and blob parameters. @@ -130,7 +131,7 @@ func CheckExtendRetention(ctx context.Context, blobCfg format.BlobStorageConfigu } if !p.FullCycle.Enabled { - log(ctx).Warn("Object Lock extension will not function because Full-Maintenance is disabled") + userLog(ctx).Warn("Object Lock extension will not function because Full-Maintenance is disabled") } if blobCfg.RetentionPeriod > 0 && blobCfg.RetentionPeriod-p.FullCycle.Interval < minRetentionMaintenanceDiff { diff --git a/repo/maintenance/blob_retain_test.go b/repo/maintenance/blob_retain_test.go index c4f657265cc..4d78cb16e7a 100644 --- a/repo/maintenance/blob_retain_test.go +++ b/repo/maintenance/blob_retain_test.go @@ -71,8 +71,11 @@ func (s *formatSpecificTestSuite) TestExtendBlobRetentionTime(t *testing.T) { earliestExpiry = ta.NowFunc()().Add(period) // extend retention time of all blobs - _, err = maintenance.ExtendBlobRetentionTime(ctx, env.RepositoryWriter, maintenance.ExtendBlobRetentionTimeOptions{}) + stats, err := maintenance.ExtendBlobRetentionTime(ctx, env.RepositoryWriter, maintenance.ExtendBlobRetentionTimeOptions{}) require.NoError(t, err) + require.Equal(t, uint32(4), stats.ToExtendBlobCount) + require.Equal(t, uint32(4), stats.ExtendedBlobCount) + require.Equal(t, "24h0m0s", stats.RetentionPeriod) gotMode, expiry, err = st.GetRetention(ctx, blobsBefore[lastBlobIdx].BlobID) require.NoError(t, err, "getting blob retention info") @@ -120,8 +123,9 @@ func (s *formatSpecificTestSuite) TestExtendBlobRetentionTimeDisabled(t *testing require.NoError(t, err, "Altering expired object failed") // extend retention time of all blobs - _, err = maintenance.ExtendBlobRetentionTime(ctx, env.RepositoryWriter, maintenance.ExtendBlobRetentionTimeOptions{}) + stats, err := maintenance.ExtendBlobRetentionTime(ctx, env.RepositoryWriter, maintenance.ExtendBlobRetentionTimeOptions{}) require.NoError(t, err) + require.Nil(t, stats) _, err = st.TouchBlob(ctx, blobsBefore[lastBlobIdx].BlobID, time.Hour) require.NoError(t, err, "Altering expired object failed") diff --git a/repo/maintenance/cleanup_logs.go b/repo/maintenance/cleanup_logs.go index 25a47f5617c..fb74131c64e 100644 --- a/repo/maintenance/cleanup_logs.go +++ b/repo/maintenance/cleanup_logs.go @@ -8,9 +8,11 @@ import ( "github.com/pkg/errors" "github.com/kopia/kopia/internal/clock" - "github.com/kopia/kopia/internal/units" + "github.com/kopia/kopia/internal/contentlog" + "github.com/kopia/kopia/internal/contentlog/logparam" "github.com/kopia/kopia/repo" "github.com/kopia/kopia/repo/blob" + "github.com/kopia/kopia/repo/maintenancestats" ) // LogRetentionOptions provides options for logs retention. @@ -42,7 +44,12 @@ func defaultLogRetention() LogRetentionOptions { } // CleanupLogs deletes old logs blobs beyond certain age, total size or count. -func CleanupLogs(ctx context.Context, rep repo.DirectRepositoryWriter, opt LogRetentionOptions) ([]blob.Metadata, error) { +func CleanupLogs(ctx context.Context, rep repo.DirectRepositoryWriter, opt LogRetentionOptions) (*maintenancestats.CleanupLogsStats, error) { + ctx = contentlog.WithParams(ctx, + logparam.String("span:cleanup-logs", contentlog.RandomSpanID())) + + log := rep.LogManager().NewLogger("maintenance-cleanup-logs") + if opt.TimeFunc == nil { opt.TimeFunc = clock.Now } @@ -57,14 +64,14 @@ func CleanupLogs(ctx context.Context, rep repo.DirectRepositoryWriter, opt LogRe return allLogBlobs[i].Timestamp.After(allLogBlobs[j].Timestamp) }) - var totalSize int64 + var retainedSize int64 deletePosition := len(allLogBlobs) for i, bm := range allLogBlobs { - totalSize += bm.Length + retainedSize += bm.Length - if totalSize > opt.MaxTotalSize && opt.MaxTotalSize > 0 { + if retainedSize > opt.MaxTotalSize && opt.MaxTotalSize > 0 { deletePosition = i break } @@ -82,7 +89,21 @@ func CleanupLogs(ctx context.Context, rep repo.DirectRepositoryWriter, opt LogRe toDelete := allLogBlobs[deletePosition:] - log(ctx).Debugf("Keeping %v logs of total size %v", deletePosition, units.BytesString(totalSize)) + var toDeleteSize int64 + for _, bm := range toDelete { + toDeleteSize += bm.Length + } + + result := &maintenancestats.CleanupLogsStats{ + RetainedBlobCount: deletePosition, + RetainedBlobSize: retainedSize, + ToDeleteBlobCount: len(toDelete), + ToDeleteBlobSize: toDeleteSize, + DeletedBlobCount: 0, + DeletedBlobSize: 0, + } + + contentlog.Log1(ctx, log, "Clean up logs", result) if !opt.DryRun { for _, bm := range toDelete { @@ -90,7 +111,10 @@ func CleanupLogs(ctx context.Context, rep repo.DirectRepositoryWriter, opt LogRe return nil, errors.Wrapf(err, "error deleting log %v", bm.BlobID) } } + + result.DeletedBlobCount = result.ToDeleteBlobCount + result.DeletedBlobSize = result.ToDeleteBlobSize } - return toDelete, nil + return result, nil } diff --git a/repo/maintenance/content_index_to_pack_check.go b/repo/maintenance/content_index_to_pack_check.go new file mode 100644 index 00000000000..eee36cde43c --- /dev/null +++ b/repo/maintenance/content_index_to_pack_check.go @@ -0,0 +1,79 @@ +package maintenance + +import ( + "context" + "math/rand/v2" + "os" + "strconv" + + "github.com/pkg/errors" + + "github.com/kopia/kopia/repo" + "github.com/kopia/kopia/repo/content" + "github.com/kopia/kopia/repo/content/index" + "github.com/kopia/kopia/repo/maintenancestats" +) + +// Checks the consistency of the mapping from content index entries to packs, +// to verify that all the referenced packs are present in storage. +func checkContentIndexToPacks(ctx context.Context, r content.Reader) error { + const verifyContentsDefaultParallelism = 5 + + opts := content.VerifyOptions{ + ContentIDRange: index.AllIDs, + ContentReadPercentage: 0, + IncludeDeletedContents: true, + ContentIterateParallelism: verifyContentsDefaultParallelism, + } + + if err := r.VerifyContents(ctx, opts); err != nil { + return errors.Wrap(err, "maintenance verify contents") + } + + return nil +} + +func shouldRunContentIndexVerify(ctx context.Context) bool { + const envName = "KOPIA_MAINTENANCE_CONTENT_VERIFY_PERCENTAGE" + + v := os.Getenv(envName) + if v == "" { + return false + } + + percentage, err := strconv.ParseFloat(v, 64) + if err != nil { + userLog(ctx).Warnf("The '%s' environment variable appears to have a non numeric value: '%q', %s", envName, v, err) + + return false + } + + if rand.Float64() < percentage/100 { //nolint:gosec + return true + } + + return false +} + +func reportRunAndMaybeCheckContentIndex(ctx context.Context, rep repo.DirectRepositoryWriter, taskType TaskType, s *Schedule, run func() (maintenancestats.Kind, error)) error { + if !shouldRunContentIndexVerify(ctx) { + return ReportRun(ctx, rep, taskType, s, run) + } + + return ReportRun(ctx, rep, taskType, s, func() (maintenancestats.Kind, error) { + if err := checkContentIndexToPacks(ctx, rep.ContentReader()); err != nil { + return nil, err + } + + stats, err := run() + if err != nil { + return nil, err + } + + if err := checkContentIndexToPacks(ctx, rep.ContentReader()); err != nil { + return nil, err + } + + return stats, nil + }) +} diff --git a/repo/maintenance/content_rewrite.go b/repo/maintenance/content_rewrite.go index 360f96d6379..c94ad11175d 100644 --- a/repo/maintenance/content_rewrite.go +++ b/repo/maintenance/content_rewrite.go @@ -6,13 +6,19 @@ import ( "runtime" "strings" "sync" + "sync/atomic" "github.com/pkg/errors" - "github.com/kopia/kopia/internal/units" + "github.com/kopia/kopia/internal/blobparam" + "github.com/kopia/kopia/internal/contentlog" + "github.com/kopia/kopia/internal/contentlog/logparam" + "github.com/kopia/kopia/internal/contentparam" + "github.com/kopia/kopia/internal/stats" "github.com/kopia/kopia/repo" "github.com/kopia/kopia/repo/blob" "github.com/kopia/kopia/repo/content" + "github.com/kopia/kopia/repo/maintenancestats" ) const parallelContentRewritesCPUMultiplier = 2 @@ -37,23 +43,29 @@ type contentInfoOrError struct { // RewriteContents rewrites contents according to provided criteria and creates new // blobs and index entries to point at them. -func RewriteContents(ctx context.Context, rep repo.DirectRepositoryWriter, opt *RewriteContentsOptions, safety SafetyParameters) error { +// +//nolint:funlen +func RewriteContents(ctx context.Context, rep repo.DirectRepositoryWriter, opt *RewriteContentsOptions, safety SafetyParameters) (*maintenancestats.RewriteContentsStats, error) { + ctx = contentlog.WithParams(ctx, + logparam.String("span:content-rewrite", contentlog.RandomSpanID())) + + log := rep.LogManager().NewLogger("maintenance-content-rewrite") + if opt == nil { - return errors.New("missing options") + return nil, errors.New("missing options") } if opt.ShortPacks { - log(ctx).Info("Rewriting contents from short packs...") + contentlog.Log(ctx, log, "Rewriting contents from short packs...") } else { - log(ctx).Info("Rewriting contents...") + contentlog.Log(ctx, log, "Rewriting contents...") } cnt := getContentToRewrite(ctx, rep, opt) var ( - mu sync.Mutex - totalBytes int64 - failedCount int + toRewrite, retained, rewritten stats.CountSum + failedCount atomic.Uint64 ) if opt.Parallel == 0 { @@ -63,35 +75,38 @@ func RewriteContents(ctx context.Context, rep repo.DirectRepositoryWriter, opt * var wg sync.WaitGroup for range opt.Parallel { - wg.Add(1) - - go func() { - defer wg.Done() - + wg.Go(func() { for c := range cnt { if c.err != nil { - mu.Lock() - failedCount++ - mu.Unlock() + failedCount.Add(1) return } - var optDeleted string - if c.Deleted { - optDeleted = " (deleted)" - } - age := rep.Time().Sub(c.Timestamp()) if age < safety.RewriteMinAge { - log(ctx).Debugf("Not rewriting content %v (%v bytes) from pack %v%v %v, because it's too new.", c.ContentID, c.PackedLength, c.PackBlobID, optDeleted, age) + contentlog.Log5(ctx, log, + "Not rewriting content", + contentparam.ContentID("contentID", c.ContentID), + logparam.UInt32("bytes", c.PackedLength), + blobparam.BlobID("packBlobID", c.PackBlobID), + logparam.Bool("deleted", c.Deleted), + logparam.Duration("age", age)) + + retained.Add(int64(c.PackedLength)) + continue } - log(ctx).Debugf("Rewriting content %v (%v bytes) from pack %v%v %v", c.ContentID, c.PackedLength, c.PackBlobID, optDeleted, age) - mu.Lock() - totalBytes += int64(c.PackedLength) - mu.Unlock() + contentlog.Log5(ctx, log, + "Rewriting content", + contentparam.ContentID("contentID", c.ContentID), + logparam.UInt32("bytes", c.PackedLength), + blobparam.BlobID("packBlobID", c.PackBlobID), + logparam.Bool("deleted", c.Deleted), + logparam.Duration("age", age)) + + toRewrite.Add(int64(c.PackedLength)) if opt.DryRun { continue @@ -101,28 +116,51 @@ func RewriteContents(ctx context.Context, rep repo.DirectRepositoryWriter, opt * // provide option to ignore failures when rewriting deleted contents during maintenance // this is for advanced use only if os.Getenv("KOPIA_IGNORE_MAINTENANCE_REWRITE_ERROR") != "" && c.Deleted { - log(ctx).Infof("IGNORED: unable to rewrite deleted content %q: %v", c.ContentID, err) + contentlog.Log2(ctx, log, + "IGNORED: unable to rewrite deleted content", + contentparam.ContentID("contentID", c.ContentID), + logparam.Error("error", err)) } else { - log(ctx).Infof("unable to rewrite content %q: %v", c.ContentID, err) - mu.Lock() - failedCount++ - mu.Unlock() + contentlog.Log2(ctx, log, + "unable to rewrite content", + contentparam.ContentID("contentID", c.ContentID), + logparam.Error("error", err)) + + failedCount.Add(1) } + } else { + rewritten.Add(int64(c.PackedLength)) } } - }() + }) } wg.Wait() - log(ctx).Infof("Total bytes rewritten %v", units.BytesString(totalBytes)) + toRewriteCount, toRewriteBytes := toRewrite.Approximate() + retainedCount, retainedBytes := retained.Approximate() + rewrittenCount, rewrittenBytes := rewritten.Approximate() + + result := &maintenancestats.RewriteContentsStats{ + ToRewriteContentCount: int(toRewriteCount), + ToRewriteContentSize: toRewriteBytes, + RewrittenContentCount: int(rewrittenCount), + RewrittenContentSize: rewrittenBytes, + RetainedContentCount: int(retainedCount), + RetainedContentSize: retainedBytes, + } + + contentlog.Log1(ctx, log, "Rewritten contents", result) + + if failedCount.Load() == 0 { + if err := rep.ContentManager().Flush(ctx); err != nil { + return nil, errors.Wrap(err, "error flushing repo") + } - if failedCount == 0 { - //nolint:wrapcheck - return rep.ContentManager().Flush(ctx) + return result, nil } - return errors.Errorf("failed to rewrite %v contents", failedCount) + return nil, errors.Errorf("failed to rewrite %v contents", failedCount.Load()) } func getContentToRewrite(ctx context.Context, rep repo.DirectRepository, opt *RewriteContentsOptions) <-chan contentInfoOrError { diff --git a/repo/maintenance/content_rewrite_test.go b/repo/maintenance/content_rewrite_test.go index 72611821610..d08a7c0e04c 100644 --- a/repo/maintenance/content_rewrite_test.go +++ b/repo/maintenance/content_rewrite_test.go @@ -12,6 +12,7 @@ import ( "github.com/kopia/kopia/repo" "github.com/kopia/kopia/repo/blob" "github.com/kopia/kopia/repo/maintenance" + "github.com/kopia/kopia/repo/maintenancestats" "github.com/kopia/kopia/repo/object" ) @@ -22,6 +23,7 @@ func (s *formatSpecificTestSuite) TestContentRewrite(t *testing.T) { opt *maintenance.RewriteContentsOptions wantPDelta int wantQDelta int + stats *maintenancestats.RewriteContentsStats }{ { numPContents: 2, @@ -31,6 +33,12 @@ func (s *formatSpecificTestSuite) TestContentRewrite(t *testing.T) { }, wantPDelta: 1, wantQDelta: 1, + stats: &maintenancestats.RewriteContentsStats{ + ToRewriteContentCount: 5, + ToRewriteContentSize: 320, + RewrittenContentCount: 5, + RewrittenContentSize: 320, + }, }, { numPContents: 2, @@ -41,6 +49,10 @@ func (s *formatSpecificTestSuite) TestContentRewrite(t *testing.T) { }, wantPDelta: 0, wantQDelta: 0, + stats: &maintenancestats.RewriteContentsStats{ + ToRewriteContentCount: 5, + ToRewriteContentSize: 320, + }, }, { numPContents: 2, @@ -51,6 +63,12 @@ func (s *formatSpecificTestSuite) TestContentRewrite(t *testing.T) { }, wantPDelta: 1, wantQDelta: 0, + stats: &maintenancestats.RewriteContentsStats{ + ToRewriteContentCount: 2, + ToRewriteContentSize: 128, + RewrittenContentCount: 2, + RewrittenContentSize: 128, + }, }, { numPContents: 1, @@ -60,6 +78,7 @@ func (s *formatSpecificTestSuite) TestContentRewrite(t *testing.T) { }, wantPDelta: 0, // single pack won't get rewritten wantQDelta: 0, + stats: &maintenancestats.RewriteContentsStats{}, }, { numPContents: 1, @@ -69,6 +88,7 @@ func (s *formatSpecificTestSuite) TestContentRewrite(t *testing.T) { }, wantPDelta: 0, wantQDelta: 0, + stats: &maintenancestats.RewriteContentsStats{}, }, } @@ -103,8 +123,11 @@ func (s *formatSpecificTestSuite) TestContentRewrite(t *testing.T) { qBlobsBefore, err := blob.ListAllBlobs(ctx, env.RepositoryWriter.BlobStorage(), "q") require.NoError(t, err) + var stats *maintenancestats.RewriteContentsStats + require.NoError(t, repo.DirectWriteSession(ctx, env.RepositoryWriter, repo.WriteSessionOptions{}, func(ctx context.Context, w repo.DirectRepositoryWriter) error { - return maintenance.RewriteContents(ctx, w, tc.opt, maintenance.SafetyNone) + stats, err = maintenance.RewriteContents(ctx, w, tc.opt, maintenance.SafetyNone) + return err })) pBlobsAfter, err := blob.ListAllBlobs(ctx, env.RepositoryWriter.BlobStorage(), "p") @@ -115,6 +138,12 @@ func (s *formatSpecificTestSuite) TestContentRewrite(t *testing.T) { require.Equal(t, tc.wantPDelta, len(pBlobsAfter)-len(pBlobsBefore), "invalid p blob count delta") require.Equal(t, tc.wantQDelta, len(qBlobsAfter)-len(qBlobsBefore), "invalid q blob count delta") + + if tc.stats == nil { + require.Nil(t, stats) + } else { + require.Equal(t, *tc.stats, *stats) + } }) } } diff --git a/repo/maintenance/drop_deleted_contents.go b/repo/maintenance/drop_deleted_contents.go index 3ed8d4cacb7..bc83c40b370 100644 --- a/repo/maintenance/drop_deleted_contents.go +++ b/repo/maintenance/drop_deleted_contents.go @@ -4,13 +4,21 @@ import ( "context" "time" + "github.com/kopia/kopia/internal/contentlog" + "github.com/kopia/kopia/internal/contentlog/logparam" "github.com/kopia/kopia/repo" "github.com/kopia/kopia/repo/content/indexblob" + "github.com/kopia/kopia/repo/maintenancestats" ) // dropDeletedContents rewrites indexes while dropping deleted contents above certain age. -func dropDeletedContents(ctx context.Context, rep repo.DirectRepositoryWriter, dropDeletedBefore time.Time, safety SafetyParameters) error { - log(ctx).Infof("Dropping contents deleted before %v", dropDeletedBefore) +func dropDeletedContents(ctx context.Context, rep repo.DirectRepositoryWriter, dropDeletedBefore time.Time, safety SafetyParameters) (*maintenancestats.CompactIndexesStats, error) { + ctx = contentlog.WithParams(ctx, + logparam.String("span:drop-deleted-contents", contentlog.RandomSpanID())) + + log := rep.LogManager().NewLogger("maintenance-drop-deleted-contents") + + contentlog.Log1(ctx, log, "Dropping deleted contents", logparam.Time("dropDeletedBefore", dropDeletedBefore)) //nolint:wrapcheck return rep.ContentManager().CompactIndexes(ctx, indexblob.CompactOptions{ diff --git a/repo/maintenance/helper_test.go b/repo/maintenance/helper_test.go new file mode 100644 index 00000000000..28518694c12 --- /dev/null +++ b/repo/maintenance/helper_test.go @@ -0,0 +1,14 @@ +package maintenance + +import ( + "context" + + "github.com/kopia/kopia/repo" + "github.com/kopia/kopia/repo/maintenancestats" +) + +// helpers exported for tests + +func ExtendBlobRetentionTime(ctx context.Context, rep repo.DirectRepositoryWriter, opt ExtendBlobRetentionTimeOptions) (*maintenancestats.ExtendBlobRetentionStats, error) { + return extendBlobRetentionTime(ctx, rep, opt) +} diff --git a/repo/maintenance/index_compaction.go b/repo/maintenance/index_compaction.go index a8341ce0916..df852fba7f7 100644 --- a/repo/maintenance/index_compaction.go +++ b/repo/maintenance/index_compaction.go @@ -3,13 +3,17 @@ package maintenance import ( "context" + "github.com/kopia/kopia/internal/contentlog" "github.com/kopia/kopia/repo/content/indexblob" + "github.com/kopia/kopia/repo/maintenancestats" ) // runTaskIndexCompactionQuick rewrites index blobs to reduce their count but does not drop any contents. func runTaskIndexCompactionQuick(ctx context.Context, runParams RunParameters, s *Schedule, safety SafetyParameters) error { - return ReportRun(ctx, runParams.rep, TaskIndexCompaction, s, func() error { - log(ctx).Info("Compacting indexes...") + return reportRunAndMaybeCheckContentIndex(ctx, runParams.rep, TaskIndexCompaction, s, func() (maintenancestats.Kind, error) { + log := runParams.rep.LogManager().NewLogger("maintenance-index-compaction") + + contentlog.Log(ctx, log, "Compacting indexes...") const maxSmallBlobsForIndexCompaction = 8 diff --git a/repo/maintenance/maintenance_run.go b/repo/maintenance/maintenance_run.go index 0a3168c67fc..ac942b67a30 100644 --- a/repo/maintenance/maintenance_run.go +++ b/repo/maintenance/maintenance_run.go @@ -10,14 +10,18 @@ import ( "github.com/pkg/errors" "github.com/kopia/kopia/internal/clock" + "github.com/kopia/kopia/internal/contentlog" + "github.com/kopia/kopia/internal/contentlog/logparam" "github.com/kopia/kopia/internal/epoch" "github.com/kopia/kopia/repo" "github.com/kopia/kopia/repo/content" "github.com/kopia/kopia/repo/content/index" "github.com/kopia/kopia/repo/logging" + "github.com/kopia/kopia/repo/maintenancestats" ) -var log = logging.Module("maintenance") +// User-visible log output. +var userLog = logging.Module("maintenance") const maxClockSkew = 5 * time.Minute @@ -56,7 +60,7 @@ const ( // shouldRun returns Mode if repository is due for periodic maintenance. func shouldRun(ctx context.Context, rep repo.DirectRepository, p *Params) (Mode, error) { if myUsername := rep.ClientOptions().UsernameAtHost(); p.Owner != myUsername { - log(ctx).Debugf("maintenance owned by another user '%v'", p.Owner) + userLog(ctx).Debugf("maintenance owned by another user '%v'", p.Owner) return ModeNone, nil } @@ -68,25 +72,25 @@ func shouldRun(ctx context.Context, rep repo.DirectRepository, p *Params) (Mode, // check full cycle first, as it does more than the quick cycle if p.FullCycle.Enabled { if !rep.Time().Before(s.NextFullMaintenanceTime) { - log(ctx).Debug("due for full maintenance cycle") + userLog(ctx).Debug("due for full maintenance cycle") return ModeFull, nil } - log(ctx).Debugf("not due for full maintenance cycle until %v", s.NextFullMaintenanceTime) + userLog(ctx).Debugf("not due for full maintenance cycle until %v", s.NextFullMaintenanceTime) } else { - log(ctx).Debug("full maintenance cycle not enabled") + userLog(ctx).Debug("full maintenance cycle not enabled") } // no time for full cycle, check quick cycle if p.QuickCycle.Enabled { if !rep.Time().Before(s.NextQuickMaintenanceTime) { - log(ctx).Debug("due for quick maintenance cycle") + userLog(ctx).Debug("due for quick maintenance cycle") return ModeQuick, nil } - log(ctx).Debugf("not due for quick maintenance cycle until %v", s.NextQuickMaintenanceTime) + userLog(ctx).Debugf("not due for quick maintenance cycle until %v", s.NextQuickMaintenanceTime) } else { - log(ctx).Debug("quick maintenance cycle not enabled") + userLog(ctx).Debug("quick maintenance cycle not enabled") } return ModeNone, nil @@ -106,13 +110,13 @@ func updateSchedule(ctx context.Context, runParams RunParameters) error { // on full cycle, also update the quick cycle s.NextFullMaintenanceTime = rep.Time().Add(p.FullCycle.Interval) s.NextQuickMaintenanceTime = rep.Time().Add(p.QuickCycle.Interval) - log(ctx).Debugf("scheduling next full cycle at %v", s.NextFullMaintenanceTime) - log(ctx).Debugf("scheduling next quick cycle at %v", s.NextQuickMaintenanceTime) + userLog(ctx).Debugf("scheduling next full cycle at %v", s.NextFullMaintenanceTime) + userLog(ctx).Debugf("scheduling next quick cycle at %v", s.NextQuickMaintenanceTime) return SetSchedule(ctx, rep, s) case ModeQuick: - log(ctx).Debugf("scheduling next quick cycle at %v", s.NextQuickMaintenanceTime) + userLog(ctx).Debugf("scheduling next quick cycle at %v", s.NextQuickMaintenanceTime) s.NextQuickMaintenanceTime = rep.Time().Add(p.QuickCycle.Interval) return SetSchedule(ctx, rep, s) @@ -149,9 +153,10 @@ func (e NotOwnedError) Error() string { // lock can be acquired. Lock is passed to the function, which ensures that every call to Run() // is within the exclusive context. func RunExclusive(ctx context.Context, rep repo.DirectRepositoryWriter, mode Mode, force bool, cb func(ctx context.Context, runParams RunParameters) error) error { - rep.DisableIndexRefresh() + ctx = contentlog.WithParams(ctx, + logparam.String("span:maintenance", contentlog.RandomSpanID())) - ctx = rep.AlsoLogToContentLog(ctx) + rep.DisableIndexRefresh() p, err := GetParams(ctx, rep) if err != nil { @@ -170,12 +175,12 @@ func RunExclusive(ctx context.Context, rep repo.DirectRepositoryWriter, mode Mod } if mode == ModeNone { - log(ctx).Debug("not due for maintenance") + userLog(ctx).Debug("not due for maintenance") return nil } lockFile := rep.ConfigFilename() + ".mlock" - log(ctx).Debugf("Acquiring maintenance lock in file %v", lockFile) + userLog(ctx).Debugf("Acquiring maintenance lock in file %v", lockFile) // acquire local lock on a config file l := flock.New(lockFile) @@ -186,7 +191,7 @@ func RunExclusive(ctx context.Context, rep repo.DirectRepositoryWriter, mode Mod } if !ok { - log(ctx).Debug("maintenance is already in progress locally") + userLog(ctx).Debug("maintenance is already in progress locally") return nil } @@ -211,8 +216,8 @@ func RunExclusive(ctx context.Context, rep repo.DirectRepositoryWriter, mode Mod return errors.Wrap(err, "error checking for clock skew") } - log(ctx).Infof("Running %v maintenance...", runParams.Mode) - defer log(ctx).Infof("Finished %v maintenance.", runParams.Mode) + userLog(ctx).Infof("Running %v maintenance...", runParams.Mode) + defer userLog(ctx).Infof("Finished %v maintenance.", runParams.Mode) if err := runParams.rep.Refresh(ctx); err != nil { return errors.Wrap(err, "error refreshing indexes before maintenance") @@ -252,6 +257,8 @@ func Run(ctx context.Context, runParams RunParameters, safety SafetyParameters) } func runQuickMaintenance(ctx context.Context, runParams RunParameters, safety SafetyParameters) error { + log := runParams.rep.LogManager().NewLogger("maintenance-quick") + s, err := GetSchedule(ctx, runParams.rep) if err != nil { return errors.Wrap(err, "unable to get schedule") @@ -259,7 +266,7 @@ func runQuickMaintenance(ctx context.Context, runParams RunParameters, safety Sa em, ok, emerr := runParams.rep.ContentManager().EpochManager(ctx) if ok { - log(ctx).Debug("running quick epoch maintenance only") + userLog(ctx).Debug("running quick epoch maintenance only") return runTaskEpochMaintenanceQuick(ctx, em, runParams, s) } @@ -275,7 +282,7 @@ func runQuickMaintenance(ctx context.Context, runParams RunParameters, safety Sa return errors.Wrap(err, "error rewriting metadata contents") } } else { - notRewritingContents(ctx) + notRewritingContents(ctx, log) } if shouldDeleteOrphanedPacks(runParams.rep.Time(), s, safety) { @@ -286,64 +293,75 @@ func runQuickMaintenance(ctx context.Context, runParams RunParameters, safety Sa // running full orphaned blob deletion, otherwise next quick maintenance will start a quick rewrite // and we'd never delete blobs orphaned by full rewrite. if hadRecentFullRewrite(s) { - log(ctx).Debug("Had recent full rewrite - performing full blob deletion.") - err = runTaskDeleteOrphanedBlobsFull(ctx, runParams, s, safety) + userLog(ctx).Debug("Had recent full rewrite - performing full pack deletion.") + err = runTaskDeleteOrphanedPacksFull(ctx, runParams, s, safety) } else { - log(ctx).Debug("Performing quick blob deletion.") - err = runTaskDeleteOrphanedBlobsQuick(ctx, runParams, s, safety) + userLog(ctx).Debug("Performing quick pack deletion.") + err = runTaskDeleteOrphanedPacksQuick(ctx, runParams, s, safety) } if err != nil { - return errors.Wrap(err, "error deleting unreferenced metadata blobs") + return errors.Wrap(err, "error deleting unreferenced metadata packs") } } else { - notDeletingOrphanedBlobs(ctx, s, safety) + notDeletingOrphanedPacks(ctx, log, s, safety) } // consolidate many smaller indexes into fewer larger ones. - if err := runTaskIndexCompactionQuick(ctx, runParams, s, safety); err != nil { + if err := runTaskIndexCompactionQuick(contentlog.WithParams(ctx, logparam.String("span:index-compaction", contentlog.RandomSpanID())), runParams, s, safety); err != nil { return errors.Wrap(err, "error performing index compaction") } // clean up logs last - if err := runTaskCleanupLogs(ctx, runParams, s); err != nil { + if err := runTaskCleanupLogs(contentlog.WithParams(ctx, logparam.String("span:cleanup-logs", contentlog.RandomSpanID())), runParams, s); err != nil { return errors.Wrap(err, "error cleaning up logs") } return nil } -func notRewritingContents(ctx context.Context) { - log(ctx).Info("Previous content rewrite has not been finalized yet, waiting until the next blob deletion.") +func notRewritingContents(ctx context.Context, log *contentlog.Logger) { + contentlog.Log(ctx, log, "Previous content rewrite has not been finalized yet, waiting until the next blob deletion.") } -func notDeletingOrphanedBlobs(ctx context.Context, s *Schedule, safety SafetyParameters) { - left := nextBlobDeleteTime(s, safety).Sub(clock.Now()).Truncate(time.Second) +func notDeletingOrphanedPacks(ctx context.Context, log *contentlog.Logger, s *Schedule, safety SafetyParameters) { + left := nextPackDeleteTime(s, safety).Sub(clock.Now()).Truncate(time.Second) - log(ctx).Infof("Skipping blob deletion because not enough time has passed yet (%v left).", left) + contentlog.Log1(ctx, log, "Skipping pack deletion because not enough time has passed yet", logparam.Duration("left", left)) } func runTaskCleanupLogs(ctx context.Context, runParams RunParameters, s *Schedule) error { - return ReportRun(ctx, runParams.rep, TaskCleanupLogs, s, func() error { - deleted, err := CleanupLogs(ctx, runParams.rep, runParams.Params.LogRetention.OrDefault()) + return ReportRun(ctx, runParams.rep, TaskCleanupLogs, s, func() (maintenancestats.Kind, error) { + stats, err := CleanupLogs(ctx, runParams.rep, runParams.Params.LogRetention.OrDefault()) - log(ctx).Infof("Cleaned up %v logs.", len(deleted)) + var deletedLogCount int + if stats != nil { + deletedLogCount = stats.DeletedBlobCount + } - return err + userLog(ctx).Infof("Cleaned up %v logs.", deletedLogCount) + + return stats, err }) } func runTaskEpochAdvance(ctx context.Context, em *epoch.Manager, runParams RunParameters, s *Schedule) error { - return ReportRun(ctx, runParams.rep, TaskEpochAdvance, s, func() error { - log(ctx).Info("Cleaning up no-longer-needed epoch markers...") - return errors.Wrap(em.MaybeAdvanceWriteEpoch(ctx), "error advancing epoch marker") + return reportRunAndMaybeCheckContentIndex(ctx, runParams.rep, TaskEpochAdvance, s, func() (maintenancestats.Kind, error) { + userLog(ctx).Info("Advancing epoch markers...") + + stats, err := em.MaybeAdvanceWriteEpoch(ctx) + + return stats, errors.Wrap(err, "error advancing epoch marker") }) } func runTaskEpochMaintenanceQuick(ctx context.Context, em *epoch.Manager, runParams RunParameters, s *Schedule) error { - err := ReportRun(ctx, runParams.rep, TaskEpochCompactSingle, s, func() error { - log(ctx).Info("Compacting an eligible uncompacted epoch...") - return errors.Wrap(em.MaybeCompactSingleEpoch(ctx), "error compacting single epoch") + err := reportRunAndMaybeCheckContentIndex(ctx, runParams.rep, TaskEpochCompactSingle, s, func() (maintenancestats.Kind, error) { + userLog(ctx).Info("Compacting an eligible uncompacted epoch...") + + stats, err := em.MaybeCompactSingleEpoch(ctx) + + return stats, errors.Wrap(err, "error compacting single epoch") }) if err != nil { return err @@ -365,9 +383,12 @@ func runTaskEpochMaintenanceFull(ctx context.Context, runParams RunParameters, s } // compact a single epoch - if err := ReportRun(ctx, runParams.rep, TaskEpochCompactSingle, s, func() error { - log(ctx).Info("Compacting an eligible uncompacted epoch...") - return errors.Wrap(em.MaybeCompactSingleEpoch(ctx), "error compacting single epoch") + if err := reportRunAndMaybeCheckContentIndex(ctx, runParams.rep, TaskEpochCompactSingle, s, func() (maintenancestats.Kind, error) { + userLog(ctx).Info("Compacting an eligible uncompacted epoch...") + + stats, err := em.MaybeCompactSingleEpoch(ctx) + + return stats, errors.Wrap(err, "error compacting single epoch") }); err != nil { return err } @@ -377,33 +398,42 @@ func runTaskEpochMaintenanceFull(ctx context.Context, runParams RunParameters, s } // compact range - if err := ReportRun(ctx, runParams.rep, TaskEpochGenerateRange, s, func() error { - log(ctx).Info("Attempting to compact a range of epoch indexes ...") + if err := reportRunAndMaybeCheckContentIndex(ctx, runParams.rep, TaskEpochGenerateRange, s, func() (maintenancestats.Kind, error) { + userLog(ctx).Info("Attempting to compact a range of epoch indexes ...") - return errors.Wrap(em.MaybeGenerateRangeCheckpoint(ctx), "error creating epoch range indexes") + stats, err := em.MaybeGenerateRangeCheckpoint(ctx) + + return stats, errors.Wrap(err, "error creating epoch range indexes") }); err != nil { return err } // clean up epoch markers - err := ReportRun(ctx, runParams.rep, TaskEpochCleanupMarkers, s, func() error { - log(ctx).Info("Cleaning up unneeded epoch markers...") + err := ReportRun(ctx, runParams.rep, TaskEpochCleanupMarkers, s, func() (maintenancestats.Kind, error) { + userLog(ctx).Info("Cleaning up unneeded epoch markers...") + + stats, err := em.CleanupMarkers(ctx) - return errors.Wrap(em.CleanupMarkers(ctx), "error removing epoch markers") + return stats, errors.Wrap(err, "error removing epoch markers") }) if err != nil { return err } - return ReportRun(ctx, runParams.rep, TaskEpochDeleteSupersededIndexes, s, func() error { - log(ctx).Info("Cleaning up old index blobs which have already been compacted...") - return errors.Wrap(em.CleanupSupersededIndexes(ctx), "error removing superseded epoch index blobs") + return reportRunAndMaybeCheckContentIndex(ctx, runParams.rep, TaskEpochDeleteSupersededIndexes, s, func() (maintenancestats.Kind, error) { + userLog(ctx).Info("Cleaning up old index blobs which have already been compacted...") + + stats, err := em.CleanupSupersededIndexes(ctx) + + return stats, errors.Wrap(err, "error removing superseded epoch index blobs") }) } func runTaskDropDeletedContentsFull(ctx context.Context, runParams RunParameters, s *Schedule, safety SafetyParameters) error { var safeDropTime time.Time + log := runParams.rep.LogManager().NewLogger("maintenance-drop-deleted-contents") + if safety.RequireTwoGCCycles { safeDropTime = findSafeDropTime(s.Runs[TaskSnapshotGarbageCollection], safety) } else { @@ -411,19 +441,21 @@ func runTaskDropDeletedContentsFull(ctx context.Context, runParams RunParameters } if safeDropTime.IsZero() { - log(ctx).Info("Not enough time has passed since previous successful Snapshot GC. Will try again next time.") + contentlog.Log(ctx, log, + "Not forgetting deleted contents yet since not enough time has passed since previous successful Snapshot GC. Will try again next time.") + return nil } - log(ctx).Infof("Found safe time to drop indexes: %v", safeDropTime) + contentlog.Log1(ctx, log, "Found safe time to drop indexes", logparam.Time("safeDropTime", safeDropTime)) - return ReportRun(ctx, runParams.rep, TaskDropDeletedContentsFull, s, func() error { + return reportRunAndMaybeCheckContentIndex(ctx, runParams.rep, TaskDropDeletedContentsFull, s, func() (maintenancestats.Kind, error) { return dropDeletedContents(ctx, runParams.rep, safeDropTime, safety) }) } func runTaskRewriteContentsQuick(ctx context.Context, runParams RunParameters, s *Schedule, safety SafetyParameters) error { - return ReportRun(ctx, runParams.rep, TaskRewriteContentsQuick, s, func() error { + return reportRunAndMaybeCheckContentIndex(ctx, runParams.rep, TaskRewriteContentsQuick, s, func() (maintenancestats.Kind, error) { return RewriteContents(ctx, runParams.rep, &RewriteContentsOptions{ ContentIDRange: index.AllPrefixedIDs, PackPrefix: content.PackBlobIDPrefixSpecial, @@ -433,7 +465,7 @@ func runTaskRewriteContentsQuick(ctx context.Context, runParams RunParameters, s } func runTaskRewriteContentsFull(ctx context.Context, runParams RunParameters, s *Schedule, safety SafetyParameters) error { - return ReportRun(ctx, runParams.rep, TaskRewriteContentsFull, s, func() error { + return reportRunAndMaybeCheckContentIndex(ctx, runParams.rep, TaskRewriteContentsFull, s, func() (maintenancestats.Kind, error) { return RewriteContents(ctx, runParams.rep, &RewriteContentsOptions{ ContentIDRange: index.AllIDs, ShortPacks: true, @@ -441,37 +473,34 @@ func runTaskRewriteContentsFull(ctx context.Context, runParams RunParameters, s }) } -func runTaskDeleteOrphanedBlobsFull(ctx context.Context, runParams RunParameters, s *Schedule, safety SafetyParameters) error { - return ReportRun(ctx, runParams.rep, TaskDeleteOrphanedBlobsFull, s, func() error { - _, err := DeleteUnreferencedBlobs(ctx, runParams.rep, DeleteUnreferencedBlobsOptions{ +func runTaskDeleteOrphanedPacksFull(ctx context.Context, runParams RunParameters, s *Schedule, safety SafetyParameters) error { + return reportRunAndMaybeCheckContentIndex(ctx, runParams.rep, TaskDeleteOrphanedBlobsFull, s, func() (maintenancestats.Kind, error) { + return DeleteUnreferencedPacks(ctx, runParams.rep, DeleteUnreferencedPacksOptions{ NotAfterTime: runParams.MaintenanceStartTime, Parallel: runParams.Params.ListParallelism, }, safety) - - return err }) } -func runTaskDeleteOrphanedBlobsQuick(ctx context.Context, runParams RunParameters, s *Schedule, safety SafetyParameters) error { - return ReportRun(ctx, runParams.rep, TaskDeleteOrphanedBlobsQuick, s, func() error { - _, err := DeleteUnreferencedBlobs(ctx, runParams.rep, DeleteUnreferencedBlobsOptions{ +func runTaskDeleteOrphanedPacksQuick(ctx context.Context, runParams RunParameters, s *Schedule, safety SafetyParameters) error { + return reportRunAndMaybeCheckContentIndex(ctx, runParams.rep, TaskDeleteOrphanedBlobsQuick, s, func() (maintenancestats.Kind, error) { + return DeleteUnreferencedPacks(ctx, runParams.rep, DeleteUnreferencedPacksOptions{ NotAfterTime: runParams.MaintenanceStartTime, Prefix: content.PackBlobIDPrefixSpecial, Parallel: runParams.Params.ListParallelism, }, safety) - - return err }) } func runTaskExtendBlobRetentionTimeFull(ctx context.Context, runParams RunParameters, s *Schedule) error { - return ReportRun(ctx, runParams.rep, TaskExtendBlobRetentionTimeFull, s, func() error { - _, err := ExtendBlobRetentionTime(ctx, runParams.rep, ExtendBlobRetentionTimeOptions{}) - return err + return ReportRun(ctx, runParams.rep, TaskExtendBlobRetentionTimeFull, s, func() (maintenancestats.Kind, error) { + return extendBlobRetentionTime(ctx, runParams.rep, ExtendBlobRetentionTimeOptions{}) }) } func runFullMaintenance(ctx context.Context, runParams RunParameters, safety SafetyParameters) error { + log := runParams.rep.LogManager().NewLogger("maintenance-full") + s, err := GetSchedule(ctx, runParams.rep) if err != nil { return errors.Wrap(err, "unable to get schedule") @@ -484,7 +513,7 @@ func runFullMaintenance(ctx context.Context, runParams RunParameters, safety Saf return errors.Wrap(err, "error rewriting contents in short packs") } } else { - notRewritingContents(ctx) + notRewritingContents(ctx, log) } // rewrite indexes by dropping content entries that have been marked @@ -495,11 +524,11 @@ func runFullMaintenance(ctx context.Context, runParams RunParameters, safety Saf if shouldDeleteOrphanedPacks(runParams.rep.Time(), s, safety) { // delete orphaned packs after some time. - if err := runTaskDeleteOrphanedBlobsFull(ctx, runParams, s, safety); err != nil { + if err := runTaskDeleteOrphanedPacksFull(ctx, runParams, s, safety); err != nil { return errors.Wrap(err, "error deleting unreferenced blobs") } } else { - notDeletingOrphanedBlobs(ctx, s, safety) + notDeletingOrphanedPacks(ctx, log, s, safety) } // extend retention-time on supported storage. @@ -508,7 +537,7 @@ func runFullMaintenance(ctx context.Context, runParams RunParameters, safety Saf return errors.Wrap(err, "error extending object lock retention time") } } else { - log(ctx).Debug("Extending object lock retention-period is disabled.") + userLog(ctx).Debug("Extending object lock retention-period is disabled.") } if err := runTaskEpochMaintenanceFull(ctx, runParams, s); err != nil { @@ -561,10 +590,10 @@ func shouldFullRewriteContents(s *Schedule, safety SafetyParameters) bool { // rewritten packs become orphaned immediately but if we don't wait before their deletion // clients who have old indexes cached may be trying to read pre-rewrite blobs. func shouldDeleteOrphanedPacks(now time.Time, s *Schedule, safety SafetyParameters) bool { - return !now.Before(nextBlobDeleteTime(s, safety)) + return !now.Before(nextPackDeleteTime(s, safety)) } -func nextBlobDeleteTime(s *Schedule, safety SafetyParameters) time.Time { +func nextPackDeleteTime(s *Schedule, safety SafetyParameters) time.Time { latestContentRewriteEndTime := maxEndTime(s.Runs[TaskRewriteContentsFull], s.Runs[TaskRewriteContentsQuick]) if latestContentRewriteEndTime.IsZero() { return time.Time{} diff --git a/repo/maintenance/maintenance_safety.go b/repo/maintenance/maintenance_safety.go index 7cd5516b2de..a2bafa3e19b 100644 --- a/repo/maintenance/maintenance_safety.go +++ b/repo/maintenance/maintenance_safety.go @@ -26,7 +26,7 @@ type SafetyParameters struct { DropContentFromIndexExtraMargin time.Duration // Blob GC: Delete unused blobs above this age. - BlobDeleteMinAge time.Duration + PackDeleteMinAge time.Duration // Blob GC: Drop incomplete session blobs above this age. SessionExpirationAge time.Duration @@ -43,7 +43,7 @@ var ( // delays, but it is safe only if no other kopia clients are running and storage backend is // strongly consistent. SafetyNone = SafetyParameters{ - BlobDeleteMinAge: 0, + PackDeleteMinAge: 0, DropContentFromIndexExtraMargin: 0, MarginBetweenSnapshotGC: 0, MinContentAgeSubjectToGC: 0, @@ -56,7 +56,7 @@ var ( // SafetyFull has default safety parameters which allow safe GC concurrent with snapshotting // by other Kopia clients. SafetyFull = SafetyParameters{ - BlobDeleteMinAge: 24 * time.Hour, //nolint:mnd + PackDeleteMinAge: 24 * time.Hour, //nolint:mnd DropContentFromIndexExtraMargin: time.Hour, MarginBetweenSnapshotGC: 4 * time.Hour, //nolint:mnd MinContentAgeSubjectToGC: 24 * time.Hour, //nolint:mnd diff --git a/repo/maintenance/maintenance_safety_test.go b/repo/maintenance/maintenance_safety_test.go index 05256c7f22d..31929dcead9 100644 --- a/repo/maintenance/maintenance_safety_test.go +++ b/repo/maintenance/maintenance_safety_test.go @@ -38,6 +38,7 @@ func (s *formatSpecificTestSuite) TestMaintenanceSafety(t *testing.T) { fmt.Fprintf(ow, "hello world") var err error + objectID, err = ow.Result() return err diff --git a/repo/maintenance/maintenance_schedule.go b/repo/maintenance/maintenance_schedule.go index abcfc2ffeb1..545190fc021 100644 --- a/repo/maintenance/maintenance_schedule.go +++ b/repo/maintenance/maintenance_schedule.go @@ -14,28 +14,30 @@ import ( "github.com/kopia/kopia/internal/gather" "github.com/kopia/kopia/repo" "github.com/kopia/kopia/repo/blob" + "github.com/kopia/kopia/repo/maintenancestats" ) const ( - maintenanceScheduleKeySize = 32 - maintenanceScheduleBlobID = "kopia.maintenance" + maintenanceScheduleKeySize = 32 + maintenanceScheduleBlobID = "kopia.maintenance" + maintenanceScheduleKeyPurpose = "maintenance schedule" ) //nolint:gochecknoglobals var ( - maintenanceScheduleKeyPurpose = []byte("maintenance schedule") maintenanceScheduleAEADExtraData = []byte("maintenance") ) // maxRetainedRunInfoPerRunType the maximum number of retained RunInfo entries per run type. -const maxRetainedRunInfoPerRunType = 5 +const maxRetainedRunInfoPerRunType = 50 // RunInfo represents information about a single run of a maintenance task. type RunInfo struct { - Start time.Time `json:"start"` - End time.Time `json:"end"` - Success bool `json:"success,omitempty"` - Error string `json:"error,omitempty"` + Start time.Time `json:"start"` + End time.Time `json:"end"` + Success bool `json:"success,omitempty"` + Error string `json:"error,omitempty"` + Extra []maintenancestats.Extra `json:"extra,omitempty"` } // Schedule keeps track of scheduled maintenance times. @@ -185,7 +187,7 @@ func SetSchedule(ctx context.Context, rep repo.DirectRepositoryWriter, s *Schedu } // ReportRun reports timing of a maintenance run and persists it in repository. -func ReportRun(ctx context.Context, rep repo.DirectRepositoryWriter, taskType TaskType, s *Schedule, run func() error) error { +func ReportRun(ctx context.Context, rep repo.DirectRepositoryWriter, taskType TaskType, s *Schedule, run func() (maintenancestats.Kind, error)) error { if s == nil { var err error @@ -199,7 +201,7 @@ func ReportRun(ctx context.Context, rep repo.DirectRepositoryWriter, taskType Ta Start: rep.Time(), } - runErr := run() + stats, runErr := run() ri.End = rep.Time() @@ -207,13 +209,28 @@ func ReportRun(ctx context.Context, rep repo.DirectRepositoryWriter, taskType Ta ri.Error = runErr.Error() } else { ri.Success = true + ri.Extra = buildRunStats(ctx, stats) } s.ReportRun(taskType, ri) if err := SetSchedule(ctx, rep, s); err != nil { - log(ctx).Errorf("unable to report run: %v", err) + userLog(ctx).Errorf("unable to report run: %v", err) } return runErr } + +func buildRunStats(ctx context.Context, stats maintenancestats.Kind) []maintenancestats.Extra { + if stats == nil { + return nil + } + + extra, err := maintenancestats.BuildExtra(stats) + if err != nil { + userLog(ctx).Warnf("error building raw data from stats %v, err %v", stats, err) + return nil + } + + return []maintenancestats.Extra{extra} +} diff --git a/repo/maintenance/pack_gc.go b/repo/maintenance/pack_gc.go new file mode 100644 index 00000000000..f2e13ed52c3 --- /dev/null +++ b/repo/maintenance/pack_gc.go @@ -0,0 +1,176 @@ +package maintenance + +import ( + "context" + "time" + + "github.com/pkg/errors" + "golang.org/x/sync/errgroup" + + "github.com/kopia/kopia/internal/blobparam" + "github.com/kopia/kopia/internal/contentlog" + "github.com/kopia/kopia/internal/contentlog/logparam" + "github.com/kopia/kopia/internal/stats" + "github.com/kopia/kopia/repo" + "github.com/kopia/kopia/repo/blob" + "github.com/kopia/kopia/repo/content" + "github.com/kopia/kopia/repo/maintenancestats" +) + +// DeleteUnreferencedPacksOptions provides option for pack garbage collection algorithm. +type DeleteUnreferencedPacksOptions struct { + Parallel int + Prefix blob.ID + DryRun bool + NotAfterTime time.Time +} + +// DeleteUnreferencedPacks deletes pack blobs that are unreferenced by index entries. +// +//nolint:gocyclo,funlen +func DeleteUnreferencedPacks(ctx context.Context, rep repo.DirectRepositoryWriter, opt DeleteUnreferencedPacksOptions, safety SafetyParameters) (*maintenancestats.DeleteUnreferencedPacksStats, error) { + ctx = contentlog.WithParams(ctx, + logparam.String("span:pack-gc", contentlog.RandomSpanID())) + + log := rep.LogManager().NewLogger("maintenance-pack-gc") + + if opt.Parallel == 0 { + opt.Parallel = 16 + } + + const deleteQueueSize = 100 + + var unreferenced, deleted, retained stats.CountSum + + var eg errgroup.Group + + unused := make(chan blob.Metadata, deleteQueueSize) + + if !opt.DryRun { + // start goroutines to delete packs as they come. + for range opt.Parallel { + eg.Go(func() error { + for bm := range unused { + if err := rep.BlobStorage().DeleteBlob(ctx, bm.BlobID); err != nil { + return errors.Wrapf(err, "unable to delete pack blob %q", bm.BlobID) + } + + cnt, del := deleted.Add(bm.Length) + if cnt%100 == 0 { + contentlog.Log2(ctx, log, "deleted unreferenced pack blobs", logparam.UInt32("count", cnt), logparam.Int64("bytes", del)) + } + } + + return nil + }) + } + } + + // iterate unreferenced packs and count them + optionally send to the channel to be deleted + contentlog.Log(ctx, log, "Looking for unreferenced pack blobs...") + + var prefixes []blob.ID + if p := opt.Prefix; p != "" { + prefixes = append(prefixes, p) + } else { + prefixes = append(prefixes, content.PackBlobIDPrefixRegular, content.PackBlobIDPrefixSpecial, content.BlobIDPrefixSession) + } + + activeSessions, err := rep.ContentManager().ListActiveSessions(ctx) + if err != nil { + return nil, errors.Wrap(err, "unable to load active sessions") + } + + cutoffTime := opt.NotAfterTime + if cutoffTime.IsZero() { + cutoffTime = rep.Time() + } + + // move the cutoff time a bit forward, because on Windows clock does not reliably move forward so we may end + // up not deleting some blobs - this only really affects tests, since BlobDeleteMinAge provides real + // protection here. + const cutoffTimeSlack = 1 * time.Second + + cutoffTime = cutoffTime.Add(cutoffTimeSlack) + + // iterate all pack blobs + session blobs and keep ones that are too young or + // belong to alive sessions. + if err := rep.ContentManager().IterateUnreferencedPacks(ctx, prefixes, opt.Parallel, func(bm blob.Metadata) error { + if bm.Timestamp.After(cutoffTime) { + retained.Add(bm.Length) + + contentlog.Log3(ctx, log, + "preserving pack - after cutoff time", + blobparam.BlobID("blobID", bm.BlobID), + logparam.Time("cutoffTime", cutoffTime), + logparam.Time("timestamp", bm.Timestamp)) + return nil + } + + if age := cutoffTime.Sub(bm.Timestamp); age < safety.PackDeleteMinAge { + retained.Add(bm.Length) + + contentlog.Log2(ctx, log, + "preserving pack - below min age", + blobparam.BlobID("blobID", bm.BlobID), + logparam.Duration("age", age)) + return nil + } + + sid := content.SessionIDFromBlobID(bm.BlobID) + if s, ok := activeSessions[sid]; ok { + if age := cutoffTime.Sub(s.CheckpointTime); age < safety.SessionExpirationAge { + retained.Add(bm.Length) + + contentlog.Log2(ctx, log, + "preserving pack - part of active session", + blobparam.BlobID("blobID", bm.BlobID), + logparam.String("sessionID", string(sid))) + return nil + } + } + + unreferenced.Add(bm.Length) + + if !opt.DryRun { + unused <- bm + } + + return nil + }); err != nil { + return nil, errors.Wrap(err, "error looking for unreferenced pack blobs") + } + + close(unused) + + unreferencedCount, unreferencedSize := unreferenced.Approximate() + retainedCount, retainedSize := retained.Approximate() + + result := &maintenancestats.DeleteUnreferencedPacksStats{ + UnreferencedPackCount: unreferencedCount, + UnreferencedTotalSize: unreferencedSize, + RetainedPackCount: retainedCount, + RetainedTotalSize: retainedSize, + DeletedPackCount: 0, + DeletedTotalSize: 0, + } + + contentlog.Log1(ctx, log, "Found unreferenced pack blobs to delete", result) + + // wait for all delete workers to finish. + if err := eg.Wait(); err != nil { + return nil, errors.Wrap(err, "worker error") + } + + if opt.DryRun { + return result, nil + } + + deletedCount, deletedSize := deleted.Approximate() + result.DeletedPackCount = deletedCount + result.DeletedTotalSize = deletedSize + + contentlog.Log1(ctx, log, "Completed deleting unreferenced pack blobs", result) + + return result, nil +} diff --git a/repo/maintenance/blob_gc_test.go b/repo/maintenance/pack_gc_test.go similarity index 90% rename from repo/maintenance/blob_gc_test.go rename to repo/maintenance/pack_gc_test.go index 542f7746e88..78186e9a378 100644 --- a/repo/maintenance/blob_gc_test.go +++ b/repo/maintenance/pack_gc_test.go @@ -30,7 +30,7 @@ var testHMACSecret = []byte{1, 2, 3} var testMasterKey = []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15} -func (s *formatSpecificTestSuite) TestDeleteUnreferencedBlobs(t *testing.T) { +func (s *formatSpecificTestSuite) TestDeleteUnreferencedPacks(t *testing.T) { // set up fake clock which is initially synchronized to wall clock time // and moved at the same speed but which can be moved forward. ta := faketime.NewClockTimeWithOffset(0) @@ -70,7 +70,7 @@ func (s *formatSpecificTestSuite) TestDeleteUnreferencedBlobs(t *testing.T) { verifyBlobExists(t, env.RepositoryWriter.BlobStorage(), extraBlobID2) // new blobs not will be deleted because of minimum age requirement - _, err = maintenance.DeleteUnreferencedBlobs(ctx, env.RepositoryWriter, maintenance.DeleteUnreferencedBlobsOptions{}, maintenance.SafetyFull) + _, err = maintenance.DeleteUnreferencedPacks(ctx, env.RepositoryWriter, maintenance.DeleteUnreferencedPacksOptions{}, maintenance.SafetyFull) require.NoError(t, err) verifyBlobExists(t, env.RepositoryWriter.BlobStorage(), extraBlobID1) @@ -78,12 +78,12 @@ func (s *formatSpecificTestSuite) TestDeleteUnreferencedBlobs(t *testing.T) { // mixed safety parameters safetyFastDeleteLongSessionExpiration := maintenance.SafetyParameters{ - BlobDeleteMinAge: 1, + PackDeleteMinAge: 1, SessionExpirationAge: 4 * 24 * time.Hour, } // new blobs will be deleted - _, err = maintenance.DeleteUnreferencedBlobs(ctx, env.RepositoryWriter, maintenance.DeleteUnreferencedBlobsOptions{}, maintenance.SafetyNone) + _, err = maintenance.DeleteUnreferencedPacks(ctx, env.RepositoryWriter, maintenance.DeleteUnreferencedPacksOptions{}, maintenance.SafetyNone) require.NoError(t, err) verifyBlobNotFound(t, env.RepositoryWriter.BlobStorage(), extraBlobID1) @@ -107,7 +107,7 @@ func (s *formatSpecificTestSuite) TestDeleteUnreferencedBlobs(t *testing.T) { CheckpointTime: ta.NowFunc()(), }) - _, err = maintenance.DeleteUnreferencedBlobs(ctx, env.RepositoryWriter, maintenance.DeleteUnreferencedBlobsOptions{}, safetyFastDeleteLongSessionExpiration) + _, err = maintenance.DeleteUnreferencedPacks(ctx, env.RepositoryWriter, maintenance.DeleteUnreferencedPacksOptions{}, safetyFastDeleteLongSessionExpiration) require.NoError(t, err) verifyBlobExists(t, env.RepositoryWriter.BlobStorage(), extraBlobIDWithSession1) @@ -119,7 +119,7 @@ func (s *formatSpecificTestSuite) TestDeleteUnreferencedBlobs(t *testing.T) { // now finish session 2 env.RepositoryWriter.BlobStorage().DeleteBlob(ctx, session2Marker) - _, err = maintenance.DeleteUnreferencedBlobs(ctx, env.RepositoryWriter, maintenance.DeleteUnreferencedBlobsOptions{}, safetyFastDeleteLongSessionExpiration) + _, err = maintenance.DeleteUnreferencedPacks(ctx, env.RepositoryWriter, maintenance.DeleteUnreferencedPacksOptions{}, safetyFastDeleteLongSessionExpiration) require.NoError(t, err) verifyBlobExists(t, env.RepositoryWriter.BlobStorage(), extraBlobIDWithSession1) @@ -131,7 +131,7 @@ func (s *formatSpecificTestSuite) TestDeleteUnreferencedBlobs(t *testing.T) { // now move time into the future making session 1 timed out ta.Advance(7 * 24 * time.Hour) - _, err = maintenance.DeleteUnreferencedBlobs(ctx, env.RepositoryWriter, maintenance.DeleteUnreferencedBlobsOptions{}, maintenance.SafetyFull) + _, err = maintenance.DeleteUnreferencedPacks(ctx, env.RepositoryWriter, maintenance.DeleteUnreferencedPacksOptions{}, maintenance.SafetyFull) require.NoError(t, err) verifyBlobNotFound(t, env.RepositoryWriter.BlobStorage(), extraBlobIDWithSession1) diff --git a/repo/maintenancestats/builder.go b/repo/maintenancestats/builder.go new file mode 100644 index 00000000000..ce67fcdcb5d --- /dev/null +++ b/repo/maintenancestats/builder.go @@ -0,0 +1,82 @@ +// Package maintenancestats manages statistics for maintenance tasks. +package maintenancestats + +import ( + "encoding/json" + + "github.com/pkg/errors" +) + +// Extra holds the data for a maintenance statistics. +type Extra struct { + Kind string `json:"kind,omitempty"` + Data json.RawMessage `json:"data,omitempty"` +} + +// Summarizer defines the methods for summarizing a maintenance statistics. +type Summarizer interface { + Summary() string +} + +// Kind defines the methods for detecting kind of a maintenance statistics. +type Kind interface { + Kind() string +} + +// ErrUnSupportedStatKindError is reported for unsupported stats kind. +var ErrUnSupportedStatKindError = errors.New("unsupported stats kind") + +// BuildExtra builds an Extra from maintenance statistics. +func BuildExtra(stats Kind) (Extra, error) { + if stats == nil { + return Extra{}, errors.New("invalid stats") + } + + bytes, err := json.Marshal(stats) + if err != nil { + return Extra{}, errors.Wrapf(err, "error marshaling stats %v", stats) + } + + return Extra{ + Kind: stats.Kind(), + Data: bytes, + }, nil +} + +// BuildFromExtra builds maintenance statistics from an Extra and returns a Summarizer. +func BuildFromExtra(stats Extra) (Summarizer, error) { + var result Summarizer + + switch stats.Kind { + case cleanupMarkersStatsKind: + result = &CleanupMarkersStats{} + case cleanupSupersededIndexesStatsKind: + result = &CleanupSupersededIndexesStats{} + case generateRangeCheckpointStatsKind: + result = &GenerateRangeCheckpointStats{} + case advanceEpochStatsKind: + result = &AdvanceEpochStats{} + case compactSingleEpochStatsKind: + result = &CompactSingleEpochStats{} + case compactIndexesStatsKind: + result = &CompactIndexesStats{} + case deleteUnreferencedPacksStatsKind: + result = &DeleteUnreferencedPacksStats{} + case extendBlobRetentionStatsKind: + result = &ExtendBlobRetentionStats{} + case cleanupLogsStatsKind: + result = &CleanupLogsStats{} + case rewriteContentsStatsKind: + result = &RewriteContentsStats{} + case snapshotGCStatsKind: + result = &SnapshotGCStats{} + default: + return nil, errors.Wrapf(ErrUnSupportedStatKindError, "invalid kind for stats %v", stats) + } + + if err := json.Unmarshal(stats.Data, result); err != nil { + return nil, errors.Wrapf(err, "error unmarshaling raw stats %v of kind %s to %T", stats.Data, stats.Kind, result) + } + + return result, nil +} diff --git a/repo/maintenancestats/builder_test.go b/repo/maintenancestats/builder_test.go new file mode 100644 index 00000000000..3969fad6a1b --- /dev/null +++ b/repo/maintenancestats/builder_test.go @@ -0,0 +1,393 @@ +package maintenancestats + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +type unmarshalable struct { + Data string + Channel chan int +} + +func (u *unmarshalable) Kind() string { + return u.Data +} + +func TestBuildExtraSuccess(t *testing.T) { + cases := []struct { + name string + stats Kind + expected Extra + }{ + { + name: "CleanupMarkersStats", + stats: &CleanupMarkersStats{ + DeletedEpochMarkerBlobCount: 10, + DeletedWatermarkBlobCount: 20, + }, + expected: Extra{ + Kind: "cleanupMarkersStats", + Data: []byte(`{"deletedEpochMarkerBlobCount":10,"deletedWatermarkBlobCount":20}`), + }, + }, + { + name: "GenerateRangeCheckpointStats", + stats: &GenerateRangeCheckpointStats{ + RangeMinEpoch: 3, + RangeMaxEpoch: 5, + }, + expected: Extra{ + Kind: generateRangeCheckpointStatsKind, + Data: []byte(`{"rangeMinEpoch":3,"rangeMaxEpoch":5}`), + }, + }, + { + name: "advanceEpochStats", + stats: &AdvanceEpochStats{ + CurrentEpoch: 3, + WasAdvanced: true, + }, + expected: Extra{ + Kind: advanceEpochStatsKind, + Data: []byte(`{"currentEpoch":3,"wasAdvanced":true}`), + }, + }, + { + name: "CompactSingleEpochStats", + stats: &CompactSingleEpochStats{ + SupersededIndexBlobCount: 3, + SupersededIndexTotalSize: 4096, + Epoch: 1, + }, + expected: Extra{ + Kind: compactSingleEpochStatsKind, + Data: []byte(`{"supersededIndexBlobCount":3,"supersededIndexTotalSize":4096,"epoch":1}`), + }, + }, + { + name: "CompactIndexesStats", + stats: &CompactIndexesStats{ + DroppedContentsDeletedBefore: time.Date(2025, time.January, 1, 0, 0, 0, 0, time.UTC), + }, + expected: Extra{ + Kind: compactIndexesStatsKind, + Data: []byte(`{"droppedContentsDeletedBefore":"2025-01-01T00:00:00Z"}`), + }, + }, + { + name: "DeleteUnreferencedPacksStats", + stats: &DeleteUnreferencedPacksStats{ + UnreferencedPackCount: 50, + UnreferencedTotalSize: 4096, + DeletedPackCount: 20, + DeletedTotalSize: 2048, + RetainedPackCount: 30, + RetainedTotalSize: 2048, + }, + expected: Extra{ + Kind: deleteUnreferencedPacksStatsKind, + Data: []byte(`{"unreferencedPackCount":50,"unreferencedTotalSize":4096,"deletedPackCount":20,"deletedTotalSize":2048,"retainedPackCount":30,"retainedTotalSize":2048}`), + }, + }, + { + name: "ExtendBlobRetentionStats", + stats: &ExtendBlobRetentionStats{ + ToExtendBlobCount: 10, + ExtendedBlobCount: 10, + RetentionPeriod: (time.Hour * 24 * 15).String(), + }, + expected: Extra{ + Kind: extendBlobRetentionStatsKind, + Data: []byte(`{"toExtendBlobCount":10,"extendedBlobCount":10,"retentionPeriod":"360h0m0s"}`), + }, + }, + { + name: "CleanupLogsStats", + stats: &CleanupLogsStats{ + ToDeleteBlobCount: 10, + ToDeleteBlobSize: 1024, + DeletedBlobCount: 5, + DeletedBlobSize: 512, + RetainedBlobCount: 20, + RetainedBlobSize: 2048, + }, + expected: Extra{ + Kind: cleanupLogsStatsKind, + Data: []byte(`{"toDeleteBlobCount":10,"toDeleteBlobSize":1024,"deletedBlobCount":5,"deletedBlobSize":512,"retainedBlobCount":20,"retainedBlobSize":2048}`), + }, + }, + { + name: "RewriteContentsStats", + stats: &RewriteContentsStats{ + ToRewriteContentCount: 30, + ToRewriteContentSize: 3092, + RewrittenContentCount: 10, + RewrittenContentSize: 1024, + RetainedContentCount: 20, + RetainedContentSize: 2048, + }, + expected: Extra{ + Kind: rewriteContentsStatsKind, + Data: []byte(`{"toRewriteContentCount":30,"toRewriteContentSize":3092,"rewrittenContentCount":10,"rewrittenContentSize":1024,"retainedContentCount":20,"retainedContentSize":2048}`), + }, + }, + { + name: "SnapshotGCStats", + stats: &SnapshotGCStats{ + UnreferencedContentCount: 10, + UnreferencedContentSize: 1024, + DeletedContentCount: 5, + DeletedContentSize: 512, + InUseContentCount: 20, + InUseContentSize: 2048, + InUseSystemContentCount: 1, + InUseSystemContentSize: 128, + UnreferencedRecentContentCount: 30, + UnreferencedRecentContentSize: 3072, + RecoveredContentCount: 40, + RecoveredContentSize: 4096, + }, + expected: Extra{ + Kind: snapshotGCStatsKind, + Data: []byte(`{"unreferencedContentCount":10,"unreferencedContentSize":1024,` + + `"deletedContentCount":5,"deletedContentSize":512,` + + `"unreferencedRecentContentCount":30,"unreferencedRecentContentSize":3072,` + + `"inUseContentCount":20,"inUseContentSize":2048,` + + `"inUseSystemContentCount":1,"inUseSystemContentSize":128,` + + `"recoveredContentCount":40,"recoveredContentSize":4096}`), + }, + }, + } + + for _, tc := range cases { + result, err := BuildExtra(tc.stats) + + require.NoError(t, err) + require.Equal(t, tc.expected, result) + } +} + +func TestBuildExtraError(t *testing.T) { + um := unmarshalable{ + Data: "fake", + } + + cases := []struct { + name string + stats Kind + expectedErr string + }{ + { + name: "nil stats", + expectedErr: "invalid stats", + }, + { + name: "marshal fails", + stats: &um, + expectedErr: "error marshaling stats &{fake }: json: unsupported type: chan int", + }, + } + + for _, tc := range cases { + result, err := BuildExtra(tc.stats) + + require.EqualError(t, err, tc.expectedErr) + require.Equal(t, Extra{}, result) + } +} + +func TestBuildFromExtraSuccess(t *testing.T) { + cases := []struct { + name string + stats Extra + expected Summarizer + }{ + { + name: "cleanupMarkersStats", + stats: Extra{ + Kind: cleanupMarkersStatsKind, + Data: []byte(`{"deletedEpochMarkerBlobCount":10,"deletedWatermarkBlobCount":20}`), + }, + expected: &CleanupMarkersStats{ + DeletedEpochMarkerBlobCount: 10, + DeletedWatermarkBlobCount: 20, + }, + }, + { + name: "cleanupSupersededIndexesStats", + stats: Extra{ + Kind: cleanupSupersededIndexesStatsKind, + Data: []byte(`{"maxReplacementTime":"2025-01-01T00:00:00Z","deletedBlobCount":10,"deletedTotalSize":1024}`), + }, + expected: &CleanupSupersededIndexesStats{ + MaxReplacementTime: time.Date(2025, time.January, 1, 0, 0, 0, 0, time.UTC), + DeletedBlobCount: 10, + DeletedTotalSize: 1024, + }, + }, + { + name: "generateRangeCheckpointStats", + stats: Extra{ + Kind: generateRangeCheckpointStatsKind, + Data: []byte(`{"rangeMinEpoch":3,"rangeMaxEpoch":5}`), + }, + expected: &GenerateRangeCheckpointStats{ + RangeMinEpoch: 3, + RangeMaxEpoch: 5, + }, + }, + { + name: "advanceEpochStats", + stats: Extra{ + Kind: advanceEpochStatsKind, + Data: []byte(`{"currentEpoch":3,"wasAdvanced":true}`), + }, + expected: &AdvanceEpochStats{ + CurrentEpoch: 3, + WasAdvanced: true, + }, + }, + { + name: "CompactSingleEpochStats", + stats: Extra{ + Kind: compactSingleEpochStatsKind, + Data: []byte(`{"supersededIndexBlobCount":3,"supersededIndexTotalSize":4096,"epoch":1}`), + }, + expected: &CompactSingleEpochStats{ + SupersededIndexBlobCount: 3, + SupersededIndexTotalSize: 4096, + Epoch: 1, + }, + }, + { + name: "CompactIndexesStats", + stats: Extra{ + Kind: compactIndexesStatsKind, + Data: []byte(`{"droppedContentsDeletedBefore":"2025-01-01T00:00:00Z"}`), + }, + expected: &CompactIndexesStats{ + DroppedContentsDeletedBefore: time.Date(2025, time.January, 1, 0, 0, 0, 0, time.UTC), + }, + }, + { + name: "DeleteUnreferencedPacksStats", + stats: Extra{ + Kind: deleteUnreferencedPacksStatsKind, + Data: []byte(`{"unreferencedPackCount":50,"unreferencedTotalSize":4096,"deletedPackCount":20,"deletedTotalSize":2048,"retainedPackCount":30,"retainedTotalSize":2048}`), + }, + expected: &DeleteUnreferencedPacksStats{ + UnreferencedPackCount: 50, + UnreferencedTotalSize: 4096, + DeletedPackCount: 20, + DeletedTotalSize: 2048, + RetainedPackCount: 30, + RetainedTotalSize: 2048, + }, + }, + { + name: "ExtendBlobRetentionStats", + stats: Extra{ + Kind: extendBlobRetentionStatsKind, + Data: []byte(`{"toExtendBlobCount":10,"extendedBlobCount":10,"retentionPeriod":"360h0m0s"}`), + }, + expected: &ExtendBlobRetentionStats{ + ToExtendBlobCount: 10, + ExtendedBlobCount: 10, + RetentionPeriod: (time.Hour * 24 * 15).String(), + }, + }, + { + name: "CleanupLogsStats", + stats: Extra{ + Kind: cleanupLogsStatsKind, + Data: []byte(`{"toDeleteBlobCount":10,"toDeleteBlobSize":1024,"retainedBlobCount":20,"retainedBlobSize":2048,"deletedBlobCount":5,"deletedBlobSize":512}`), + }, + expected: &CleanupLogsStats{ + ToDeleteBlobCount: 10, + ToDeleteBlobSize: 1024, + RetainedBlobCount: 20, + RetainedBlobSize: 2048, + DeletedBlobCount: 5, + DeletedBlobSize: 512, + }, + }, + { + name: "RewriteContentsStats", + stats: Extra{ + Kind: rewriteContentsStatsKind, + Data: []byte(`{"toRewriteContentCount":30,"toRewriteContentSize":3092,"rewrittenContentCount":10,"rewrittenContentSize":1024,"retainedContentCount":20,"retainedContentSize":2048}`), + }, + expected: &RewriteContentsStats{ + ToRewriteContentCount: 30, + ToRewriteContentSize: 3092, + RewrittenContentCount: 10, + RewrittenContentSize: 1024, + RetainedContentCount: 20, + RetainedContentSize: 2048, + }, + }, + { + name: "SnapshotGCStats", + stats: Extra{ + Kind: snapshotGCStatsKind, + Data: []byte(`{"unreferencedContentCount":10,"unreferencedContentSize":1024,` + + `"deletedContentCount":5,"deletedContentSize":512,` + + `"unreferencedRecentContentCount":30,"unreferencedRecentContentSize":3072,` + + `"inUseContentCount":20,"inUseContentSize":2048,` + + `"inUseSystemContentCount":1,"inUseSystemContentSize":128,` + + `"recoveredContentCount":40,"recoveredContentSize":4096}`), + }, + expected: &SnapshotGCStats{ + UnreferencedContentCount: 10, + UnreferencedContentSize: 1024, + DeletedContentCount: 5, + DeletedContentSize: 512, + InUseContentCount: 20, + InUseContentSize: 2048, + InUseSystemContentCount: 1, + InUseSystemContentSize: 128, + UnreferencedRecentContentCount: 30, + UnreferencedRecentContentSize: 3072, + RecoveredContentCount: 40, + RecoveredContentSize: 4096, + }, + }, + } + + for _, tc := range cases { + result, err := BuildFromExtra(tc.stats) + + require.NoError(t, err) + require.Equal(t, tc.expected, result) + } +} + +func TestBuildFromExtraError(t *testing.T) { + cases := []struct { + name string + stats Extra + expectedErr string + }{ + { + name: "unsupported kind", + expectedErr: "invalid kind for stats { []}: unsupported stats kind", + }, + { + name: "unmarshal fails", + stats: Extra{ + Kind: cleanupMarkersStatsKind, + }, + expectedErr: "error unmarshaling raw stats [] of kind cleanupMarkersStats to *maintenancestats.CleanupMarkersStats: unexpected end of JSON input", + }, + } + + for _, tc := range cases { + result, err := BuildFromExtra(tc.stats) + + require.EqualError(t, err, tc.expectedErr) + require.Nil(t, result) + } +} diff --git a/repo/maintenancestats/stats_advance_epoch.go b/repo/maintenancestats/stats_advance_epoch.go new file mode 100644 index 00000000000..6f35a7dfe8d --- /dev/null +++ b/repo/maintenancestats/stats_advance_epoch.go @@ -0,0 +1,40 @@ +package maintenancestats + +import ( + "fmt" + + "github.com/kopia/kopia/internal/contentlog" +) + +const advanceEpochStatsKind = "advanceEpochStats" + +// AdvanceEpochStats are the stats for advancing write epoch. +type AdvanceEpochStats struct { + CurrentEpoch int `json:"currentEpoch"` + WasAdvanced bool `json:"wasAdvanced"` +} + +// WriteValueTo writes the stats to JSONWriter. +func (as *AdvanceEpochStats) WriteValueTo(jw *contentlog.JSONWriter) { + jw.BeginObjectField(as.Kind()) + jw.IntField("currentEpoch", as.CurrentEpoch) + jw.BoolField("wasAdvanced", as.WasAdvanced) + jw.EndObject() +} + +// Summary generates a human readable summary for the stats. +func (as *AdvanceEpochStats) Summary() string { + var message string + if as.WasAdvanced { + message = fmt.Sprintf("Advanced epoch to %v", as.CurrentEpoch) + } else { + message = fmt.Sprintf("Stayed at epoch %v", as.CurrentEpoch) + } + + return message +} + +// Kind returns the kind name for the stats. +func (as *AdvanceEpochStats) Kind() string { + return advanceEpochStatsKind +} diff --git a/repo/maintenancestats/stats_clean_up_log.go b/repo/maintenancestats/stats_clean_up_log.go new file mode 100644 index 00000000000..eca79270bc0 --- /dev/null +++ b/repo/maintenancestats/stats_clean_up_log.go @@ -0,0 +1,41 @@ +package maintenancestats + +import ( + "fmt" + + "github.com/kopia/kopia/internal/contentlog" +) + +const cleanupLogsStatsKind = "cleanupLogsStats" + +// CleanupLogsStats are the stats for cleanning up logs. +type CleanupLogsStats struct { + ToDeleteBlobCount int `json:"toDeleteBlobCount"` + ToDeleteBlobSize int64 `json:"toDeleteBlobSize"` + DeletedBlobCount int `json:"deletedBlobCount"` + DeletedBlobSize int64 `json:"deletedBlobSize"` + RetainedBlobCount int `json:"retainedBlobCount"` + RetainedBlobSize int64 `json:"retainedBlobSize"` +} + +// WriteValueTo writes the stats to JSONWriter. +func (cs *CleanupLogsStats) WriteValueTo(jw *contentlog.JSONWriter) { + jw.BeginObjectField(cs.Kind()) + jw.IntField("toDeleteBlobCount", cs.ToDeleteBlobCount) + jw.Int64Field("toDeleteBlobSize", cs.ToDeleteBlobSize) + jw.IntField("deletedBlobCount", cs.DeletedBlobCount) + jw.Int64Field("deletedBlobSize", cs.DeletedBlobSize) + jw.IntField("retainedBlobCount", cs.RetainedBlobCount) + jw.Int64Field("retainedBlobSize", cs.RetainedBlobSize) + jw.EndObject() +} + +// Summary generates a human readable summary for the stats. +func (cs *CleanupLogsStats) Summary() string { + return fmt.Sprintf("Found %v(%v) logs blobs for deletion and deleted %v(%v) of them. Retained %v(%v) log blobs.", cs.ToDeleteBlobCount, cs.ToDeleteBlobSize, cs.DeletedBlobCount, cs.DeletedBlobSize, cs.RetainedBlobCount, cs.RetainedBlobSize) +} + +// Kind returns the kind name for the stats. +func (cs *CleanupLogsStats) Kind() string { + return cleanupLogsStatsKind +} diff --git a/repo/maintenancestats/stats_cleanup_markers.go b/repo/maintenancestats/stats_cleanup_markers.go new file mode 100644 index 00000000000..f212f719330 --- /dev/null +++ b/repo/maintenancestats/stats_cleanup_markers.go @@ -0,0 +1,33 @@ +package maintenancestats + +import ( + "fmt" + + "github.com/kopia/kopia/internal/contentlog" +) + +const cleanupMarkersStatsKind = "cleanupMarkersStats" + +// CleanupMarkersStats are the stats for cleaning up markers. +type CleanupMarkersStats struct { + DeletedEpochMarkerBlobCount int `json:"deletedEpochMarkerBlobCount"` + DeletedWatermarkBlobCount int `json:"deletedWatermarkBlobCount"` +} + +// WriteValueTo writes the stats to JSONWriter. +func (cs *CleanupMarkersStats) WriteValueTo(jw *contentlog.JSONWriter) { + jw.BeginObjectField(cs.Kind()) + jw.IntField("deletedEpochMarkerBlobCount", cs.DeletedEpochMarkerBlobCount) + jw.IntField("deletedWatermarkBlobCount", cs.DeletedWatermarkBlobCount) + jw.EndObject() +} + +// Summary generates a human readable summary for the stats. +func (cs *CleanupMarkersStats) Summary() string { + return fmt.Sprintf("Cleaned up %v epoch markers and %v deletion watermarks", cs.DeletedEpochMarkerBlobCount, cs.DeletedWatermarkBlobCount) +} + +// Kind returns the kind name for the stats. +func (cs *CleanupMarkersStats) Kind() string { + return cleanupMarkersStatsKind +} diff --git a/repo/maintenancestats/stats_cleanup_superseded_indexes.go b/repo/maintenancestats/stats_cleanup_superseded_indexes.go new file mode 100644 index 00000000000..dff8bc788e0 --- /dev/null +++ b/repo/maintenancestats/stats_cleanup_superseded_indexes.go @@ -0,0 +1,36 @@ +package maintenancestats + +import ( + "fmt" + "time" + + "github.com/kopia/kopia/internal/contentlog" +) + +const cleanupSupersededIndexesStatsKind = "cleanupSupersededIndexesStats" + +// CleanupSupersededIndexesStats are the stats for cleaning up superseded indexes. +type CleanupSupersededIndexesStats struct { + MaxReplacementTime time.Time `json:"maxReplacementTime"` + DeletedBlobCount int `json:"deletedBlobCount"` + DeletedTotalSize int64 `json:"deletedTotalSize"` +} + +// WriteValueTo writes the stats to JSONWriter. +func (cs *CleanupSupersededIndexesStats) WriteValueTo(jw *contentlog.JSONWriter) { + jw.BeginObjectField(cs.Kind()) + jw.TimeField("maxReplacementTime", cs.MaxReplacementTime) + jw.IntField("deletedBlobCount", cs.DeletedBlobCount) + jw.Int64Field("deletedTotalSize", cs.DeletedTotalSize) + jw.EndObject() +} + +// Summary generates a human readable summary for the stats. +func (cs *CleanupSupersededIndexesStats) Summary() string { + return fmt.Sprintf("Cleaned up %v(%v) superseded index blobs, max replacement time %v", cs.DeletedBlobCount, cs.DeletedTotalSize, cs.MaxReplacementTime) +} + +// Kind returns the kind name for the stats. +func (cs *CleanupSupersededIndexesStats) Kind() string { + return cleanupSupersededIndexesStatsKind +} diff --git a/repo/maintenancestats/stats_compact_indexes.go b/repo/maintenancestats/stats_compact_indexes.go new file mode 100644 index 00000000000..4b3b0ce320e --- /dev/null +++ b/repo/maintenancestats/stats_compact_indexes.go @@ -0,0 +1,32 @@ +package maintenancestats + +import ( + "fmt" + "time" + + "github.com/kopia/kopia/internal/contentlog" +) + +const compactIndexesStatsKind = "compactIndexesStats" + +// CompactIndexesStats are the stats for dropping deleted contents. +type CompactIndexesStats struct { + DroppedContentsDeletedBefore time.Time `json:"droppedContentsDeletedBefore"` +} + +// WriteValueTo writes the stats to JSONWriter. +func (cs *CompactIndexesStats) WriteValueTo(jw *contentlog.JSONWriter) { + jw.BeginObjectField(cs.Kind()) + jw.TimeField("droppedContentsDeletedBefore", cs.DroppedContentsDeletedBefore) + jw.EndObject() +} + +// Summary generates a human readable summary for the stats. +func (cs *CompactIndexesStats) Summary() string { + return fmt.Sprintf("Dropped contents deleted before %v", cs.DroppedContentsDeletedBefore) +} + +// Kind returns the kind name for the stats. +func (cs *CompactIndexesStats) Kind() string { + return compactIndexesStatsKind +} diff --git a/repo/maintenancestats/stats_compact_single_epoch.go b/repo/maintenancestats/stats_compact_single_epoch.go new file mode 100644 index 00000000000..1dfc930648c --- /dev/null +++ b/repo/maintenancestats/stats_compact_single_epoch.go @@ -0,0 +1,35 @@ +package maintenancestats + +import ( + "fmt" + + "github.com/kopia/kopia/internal/contentlog" +) + +const compactSingleEpochStatsKind = "compactSingleEpochStats" + +// CompactSingleEpochStats are the stats for compacting an index epoch. +type CompactSingleEpochStats struct { + SupersededIndexBlobCount int `json:"supersededIndexBlobCount"` + SupersededIndexTotalSize int64 `json:"supersededIndexTotalSize"` + Epoch int `json:"epoch"` +} + +// WriteValueTo writes the stats to JSONWriter. +func (cs *CompactSingleEpochStats) WriteValueTo(jw *contentlog.JSONWriter) { + jw.BeginObjectField(cs.Kind()) + jw.IntField("supersededIndexBlobCount", cs.SupersededIndexBlobCount) + jw.Int64Field("supersededIndexTotalSize", cs.SupersededIndexTotalSize) + jw.IntField("epoch", cs.Epoch) + jw.EndObject() +} + +// Summary generates a human readable summary for the stats. +func (cs *CompactSingleEpochStats) Summary() string { + return fmt.Sprintf("Compacted %v(%v) index blobs for epoch %v", cs.SupersededIndexBlobCount, cs.SupersededIndexTotalSize, cs.Epoch) +} + +// Kind returns the kind name for the stats. +func (cs *CompactSingleEpochStats) Kind() string { + return compactSingleEpochStatsKind +} diff --git a/repo/maintenancestats/stats_delete_unreferenced_packs.go b/repo/maintenancestats/stats_delete_unreferenced_packs.go new file mode 100644 index 00000000000..d20a1a2f061 --- /dev/null +++ b/repo/maintenancestats/stats_delete_unreferenced_packs.go @@ -0,0 +1,42 @@ +package maintenancestats + +import ( + "fmt" + + "github.com/kopia/kopia/internal/contentlog" +) + +const deleteUnreferencedPacksStatsKind = "deleteUnreferencedPacksStats" + +// DeleteUnreferencedPacksStats are the stats for deleting unreferenced packs. +type DeleteUnreferencedPacksStats struct { + UnreferencedPackCount uint32 `json:"unreferencedPackCount"` + UnreferencedTotalSize int64 `json:"unreferencedTotalSize"` + DeletedPackCount uint32 `json:"deletedPackCount"` + DeletedTotalSize int64 `json:"deletedTotalSize"` + RetainedPackCount uint32 `json:"retainedPackCount"` + RetainedTotalSize int64 `json:"retainedTotalSize"` +} + +// WriteValueTo writes the stats to JSONWriter. +func (ds *DeleteUnreferencedPacksStats) WriteValueTo(jw *contentlog.JSONWriter) { + jw.BeginObjectField(ds.Kind()) + jw.UInt32Field("unreferencedPackCount", ds.UnreferencedPackCount) + jw.Int64Field("unreferencedTotalSize", ds.UnreferencedTotalSize) + jw.UInt32Field("deletedPackCount", ds.DeletedPackCount) + jw.Int64Field("deletedTotalSize", ds.DeletedTotalSize) + jw.UInt32Field("retainedPackCount", ds.RetainedPackCount) + jw.Int64Field("retainedTotalSize", ds.RetainedTotalSize) + jw.EndObject() +} + +// Summary generates a human readable summary for the stats. +func (ds *DeleteUnreferencedPacksStats) Summary() string { + return fmt.Sprintf("Found %v(%v) unreferenced pack blobs to delete and deleted %v(%v). Retained %v(%v) unreferenced pack blobs.", + ds.UnreferencedPackCount, ds.UnreferencedTotalSize, ds.DeletedPackCount, ds.DeletedTotalSize, ds.RetainedPackCount, ds.RetainedTotalSize) +} + +// Kind returns the kind name for the stats. +func (ds *DeleteUnreferencedPacksStats) Kind() string { + return deleteUnreferencedPacksStatsKind +} diff --git a/repo/maintenancestats/stats_extend_blob_retention.go b/repo/maintenancestats/stats_extend_blob_retention.go new file mode 100644 index 00000000000..0663aa94f19 --- /dev/null +++ b/repo/maintenancestats/stats_extend_blob_retention.go @@ -0,0 +1,35 @@ +package maintenancestats + +import ( + "fmt" + + "github.com/kopia/kopia/internal/contentlog" +) + +const extendBlobRetentionStatsKind = "extendBlobRetentionStats" + +// ExtendBlobRetentionStats are the stats for extending blob retention time. +type ExtendBlobRetentionStats struct { + ToExtendBlobCount uint32 `json:"toExtendBlobCount"` + ExtendedBlobCount uint32 `json:"extendedBlobCount"` + RetentionPeriod string `json:"retentionPeriod"` +} + +// WriteValueTo writes the stats to JSONWriter. +func (es *ExtendBlobRetentionStats) WriteValueTo(jw *contentlog.JSONWriter) { + jw.BeginObjectField(es.Kind()) + jw.UInt32Field("toExtendBlobCount", es.ToExtendBlobCount) + jw.UInt32Field("extendedBlobCount", es.ExtendedBlobCount) + jw.StringField("retentionPeriod", es.RetentionPeriod) + jw.EndObject() +} + +// Summary generates a human readable summary for the stats. +func (es *ExtendBlobRetentionStats) Summary() string { + return fmt.Sprintf("Blob retention extension found %v blobs and extended for %v blobs, retention period %v", es.ToExtendBlobCount, es.ExtendedBlobCount, es.RetentionPeriod) +} + +// Kind returns the kind name for the stats. +func (es *ExtendBlobRetentionStats) Kind() string { + return extendBlobRetentionStatsKind +} diff --git a/repo/maintenancestats/stats_generate_range_checkpoint.go b/repo/maintenancestats/stats_generate_range_checkpoint.go new file mode 100644 index 00000000000..d8b6c7fa890 --- /dev/null +++ b/repo/maintenancestats/stats_generate_range_checkpoint.go @@ -0,0 +1,33 @@ +package maintenancestats + +import ( + "fmt" + + "github.com/kopia/kopia/internal/contentlog" +) + +const generateRangeCheckpointStatsKind = "generateRangeCheckpointStats" + +// GenerateRangeCheckpointStats are the stats for generating range checkpoints. +type GenerateRangeCheckpointStats struct { + RangeMinEpoch int `json:"rangeMinEpoch"` + RangeMaxEpoch int `json:"rangeMaxEpoch"` +} + +// WriteValueTo writes the stats to JSONWriter. +func (gs *GenerateRangeCheckpointStats) WriteValueTo(jw *contentlog.JSONWriter) { + jw.BeginObjectField(gs.Kind()) + jw.IntField("rangeMinEpoch", gs.RangeMinEpoch) + jw.IntField("rangeMaxEpoch", gs.RangeMaxEpoch) + jw.EndObject() +} + +// Summary generates a human readable summary for the stats. +func (gs *GenerateRangeCheckpointStats) Summary() string { + return fmt.Sprintf("Generated a range checkpoint from epoch %v to %v inclusive", gs.RangeMinEpoch, gs.RangeMaxEpoch) +} + +// Kind returns the kind name for the stats. +func (gs *GenerateRangeCheckpointStats) Kind() string { + return generateRangeCheckpointStatsKind +} diff --git a/repo/maintenancestats/stats_rewrite_contents.go b/repo/maintenancestats/stats_rewrite_contents.go new file mode 100644 index 00000000000..f86681b6c86 --- /dev/null +++ b/repo/maintenancestats/stats_rewrite_contents.go @@ -0,0 +1,42 @@ +package maintenancestats + +import ( + "fmt" + + "github.com/kopia/kopia/internal/contentlog" +) + +const rewriteContentsStatsKind = "rewriteContentsStats" + +// RewriteContentsStats are the stats for rewriting contents. +type RewriteContentsStats struct { + ToRewriteContentCount int `json:"toRewriteContentCount"` + ToRewriteContentSize int64 `json:"toRewriteContentSize"` + RewrittenContentCount int `json:"rewrittenContentCount"` + RewrittenContentSize int64 `json:"rewrittenContentSize"` + RetainedContentCount int `json:"retainedContentCount"` + RetainedContentSize int64 `json:"retainedContentSize"` +} + +// WriteValueTo writes the stats to JSONWriter. +func (rs *RewriteContentsStats) WriteValueTo(jw *contentlog.JSONWriter) { + jw.BeginObjectField(rs.Kind()) + jw.IntField("toRewriteContentCount", rs.ToRewriteContentCount) + jw.Int64Field("toRewriteContentSize", rs.ToRewriteContentSize) + jw.IntField("rewrittenContentCount", rs.RewrittenContentCount) + jw.Int64Field("rewrittenContentSize", rs.RewrittenContentSize) + jw.IntField("retainedContentCount", rs.RetainedContentCount) + jw.Int64Field("retainedContentSize", rs.RetainedContentSize) + jw.EndObject() +} + +// Summary generates a human readable summary for the stats. +func (rs *RewriteContentsStats) Summary() string { + return fmt.Sprintf("Found %v(%v) contents to rewrite and rewrote %v(%v). Retained %v(%v) contents from rewrite", + rs.ToRewriteContentCount, rs.ToRewriteContentSize, rs.RewrittenContentCount, rs.RewrittenContentSize, rs.RetainedContentCount, rs.RetainedContentSize) +} + +// Kind returns the kind name for the stats. +func (rs *RewriteContentsStats) Kind() string { + return rewriteContentsStatsKind +} diff --git a/repo/maintenancestats/stats_snapshot_gc.go b/repo/maintenancestats/stats_snapshot_gc.go new file mode 100644 index 00000000000..47ccb169442 --- /dev/null +++ b/repo/maintenancestats/stats_snapshot_gc.go @@ -0,0 +1,55 @@ +package maintenancestats + +import ( + "fmt" + + "github.com/kopia/kopia/internal/contentlog" +) + +const snapshotGCStatsKind = "snapshotGCStats" + +// SnapshotGCStats delivers are the stats for snapshot GC. +type SnapshotGCStats struct { + UnreferencedContentCount uint32 `json:"unreferencedContentCount"` + UnreferencedContentSize int64 `json:"unreferencedContentSize"` + DeletedContentCount uint32 `json:"deletedContentCount"` + DeletedContentSize int64 `json:"deletedContentSize"` + UnreferencedRecentContentCount uint32 `json:"unreferencedRecentContentCount"` + UnreferencedRecentContentSize int64 `json:"unreferencedRecentContentSize"` + InUseContentCount uint32 `json:"inUseContentCount"` + InUseContentSize int64 `json:"inUseContentSize"` + InUseSystemContentCount uint32 `json:"inUseSystemContentCount"` + InUseSystemContentSize int64 `json:"inUseSystemContentSize"` + RecoveredContentCount uint32 `json:"recoveredContentCount"` + RecoveredContentSize int64 `json:"recoveredContentSize"` +} + +// WriteValueTo writes the stats to JSONWriter. +func (ss *SnapshotGCStats) WriteValueTo(jw *contentlog.JSONWriter) { + jw.BeginObjectField(ss.Kind()) + jw.UInt32Field("unreferencedContentCount", ss.UnreferencedContentCount) + jw.Int64Field("unreferencedContentSize", ss.UnreferencedContentSize) + jw.UInt32Field("deletedContentCount", ss.DeletedContentCount) + jw.Int64Field("deletedContentSize", ss.DeletedContentSize) + jw.UInt32Field("unreferencedRecentContentCount", ss.UnreferencedRecentContentCount) + jw.Int64Field("unreferencedRecentContentSize", ss.UnreferencedRecentContentSize) + jw.UInt32Field("inUseContentCount", ss.InUseContentCount) + jw.Int64Field("inUseContentSize", ss.InUseContentSize) + jw.UInt32Field("inUseSystemContentCount", ss.InUseSystemContentCount) + jw.Int64Field("inUseSystemContentSize", ss.InUseSystemContentSize) + jw.UInt32Field("recoveredContentCount", ss.RecoveredContentCount) + jw.Int64Field("recoveredContentSize", ss.RecoveredContentSize) + jw.EndObject() +} + +// Summary generates a human readable summary for the stats. +func (ss *SnapshotGCStats) Summary() string { + return fmt.Sprintf("Found %v(%v) unreferenced contents and marked %v(%v) for deletion. Found %v(%v) in-use contents and %v(%v) in-use system contents. Retained %v(%v) recent contents. Recovered %v(%v) contents", + ss.UnreferencedContentCount, ss.UnreferencedContentSize, ss.DeletedContentCount, ss.DeletedContentSize, ss.InUseContentCount, ss.InUseContentSize, + ss.InUseSystemContentCount, ss.InUseSystemContentSize, ss.UnreferencedRecentContentCount, ss.UnreferencedRecentContentSize, ss.RecoveredContentCount, ss.RecoveredContentSize) +} + +// Kind returns the kind name for the stats. +func (ss *SnapshotGCStats) Kind() string { + return snapshotGCStatsKind +} diff --git a/repo/manifest/committed_manifest_manager.go b/repo/manifest/committed_manifest_manager.go index b8eab50dfce..662de6a2cfa 100644 --- a/repo/manifest/committed_manifest_manager.go +++ b/repo/manifest/committed_manifest_manager.go @@ -164,7 +164,9 @@ func (m *committedManifestManager) loadCommittedContentsLocked(ctx context.Conte } mu.Lock() + manifests[ci.ContentID] = man + mu.Unlock() return nil diff --git a/repo/manifest/manifest_manager_test.go b/repo/manifest/manifest_manager_test.go index 4bd55f638f5..69dbac11403 100644 --- a/repo/manifest/manifest_manager_test.go +++ b/repo/manifest/manifest_manager_test.go @@ -5,7 +5,7 @@ import ( "encoding/json" "fmt" "reflect" - "sort" + "slices" "strings" "testing" "time" @@ -208,7 +208,9 @@ func TestManifestInitCorruptedBlock(t *testing.T) { }{ {"GetRaw", func() error { var raw json.RawMessage + _, err := mgr.Get(ctx, "anything", &raw) + return err }}, {"GetMetadata", func() error { _, err := mgr.GetMetadata(ctx, "anything"); return err }}, @@ -302,9 +304,7 @@ func verifyMatches(ctx context.Context, t *testing.T, mgr *Manager, labels map[s } func sortIDs(s []ID) { - sort.Slice(s, func(i, j int) bool { - return s[i] < s[j] - }) + slices.Sort(s) } type contentManagerOpts struct { @@ -498,7 +498,7 @@ func BenchmarkLargeCompaction(b *testing.B) { for _, numItems := range table { b.Run(fmt.Sprintf("%dItems", numItems), func(b *testing.B) { - for range b.N { + for b.Loop() { b.StopTimer() // Use default context to avoid lots of log output during benchmark. ctx := context.Background() diff --git a/repo/manifest/serialized_test.go b/repo/manifest/serialized_test.go index 5f5971acaeb..a82ed042c23 100644 --- a/repo/manifest/serialized_test.go +++ b/repo/manifest/serialized_test.go @@ -5,6 +5,7 @@ import ( "encoding/json" "fmt" "reflect" + "slices" "strings" "testing" "time" @@ -37,10 +38,8 @@ func checkPopulated( ) } - for _, typ := range ignoreTypeSubfields { - if typ == v.Type() { - return - } + if slices.Contains(ignoreTypeSubfields, v.Type()) { + return } switch v.Kind() { diff --git a/repo/object/indirect.go b/repo/object/indirect.go index 252e6c1811a..48e499ab708 100644 --- a/repo/object/indirect.go +++ b/repo/object/indirect.go @@ -4,7 +4,7 @@ package object type IndirectObjectEntry struct { Start int64 `json:"s,omitempty"` Length int64 `json:"l,omitempty"` - Object ID `json:"o,omitempty"` + Object ID `json:"o"` } func (i *IndirectObjectEntry) endOffset() int64 { diff --git a/repo/object/object_manager_test.go b/repo/object/object_manager_test.go index 414b790fe11..4913b315dbf 100644 --- a/repo/object/object_manager_test.go +++ b/repo/object/object_manager_test.go @@ -11,6 +11,7 @@ import ( "math/rand" "runtime" "runtime/debug" + "slices" "sync" "testing" @@ -393,10 +394,8 @@ func TestObjectWriterRaceBetweenCheckpointAndResult(t *testing.T) { return errors.Wrapf(err, "Checkpoint() returned invalid object %v", cpID) } - for _, id := range ids { - if id == content.EmptyID { - return errors.New("checkpoint returned empty id") - } + if slices.Contains(ids, content.EmptyID) { + return errors.New("checkpoint returned empty id") } } diff --git a/repo/object/object_writer.go b/repo/object/object_writer.go index 1d60009fce5..52f6dc879e8 100644 --- a/repo/object/object_writer.go +++ b/repo/object/object_writer.go @@ -159,6 +159,7 @@ func (w *objectWriter) flushBufferLocked() error { // acquire write semaphore w.asyncWritesSemaphore <- struct{}{} + w.asyncWritesWG.Add(1) asyncBuf := gather.NewWriteBuffer() diff --git a/repo/open.go b/repo/open.go index 00f272068b5..260f6dc3143 100644 --- a/repo/open.go +++ b/repo/open.go @@ -3,6 +3,7 @@ package repo import ( "context" "encoding/json" + "io" "os" "path/filepath" "strings" @@ -14,6 +15,8 @@ import ( "github.com/kopia/kopia/internal/cache" "github.com/kopia/kopia/internal/cacheprot" + "github.com/kopia/kopia/internal/contentlog" + "github.com/kopia/kopia/internal/contentlog/logparam" "github.com/kopia/kopia/internal/crypto" "github.com/kopia/kopia/internal/feature" "github.com/kopia/kopia/internal/metrics" @@ -60,18 +63,19 @@ const throttleBucketInitialFill = 0.1 const localCacheIntegrityHMACSecretLength = 16 //nolint:gochecknoglobals -var localCacheIntegrityPurpose = []byte("local-cache-integrity") +const localCacheIntegrityPurpose = "local-cache-integrity" var log = logging.Module("kopia/repo") // Options provides configuration parameters for connection to a repository. type Options struct { - TraceStorage bool // Logs all storage access using provided Printf-style function - TimeNowFunc func() time.Time // Time provider - DisableInternalLog bool // Disable internal log - UpgradeOwnerID string // Owner-ID of any upgrade in progress, when this is not set the access may be restricted - DoNotWaitForUpgrade bool // Disable the exponential forever backoff on an upgrade lock. - BeforeFlush []RepositoryWriterCallback // list of callbacks to invoke before every flush + TraceStorage bool // Logs all storage access using provided Printf-style function + ContentLogWriter io.Writer // Writer to which the content log is also written + TimeNowFunc func() time.Time // Time provider + DisableRepositoryLog bool // Disable repository log manager + UpgradeOwnerID string // Owner-ID of any upgrade in progress, when this is not set the access may be restricted + DoNotWaitForUpgrade bool // Disable the exponential forever backoff on an upgrade lock. + BeforeFlush []RepositoryWriterCallback // list of callbacks to invoke before every flush OnFatalError func(err error) // function to invoke when repository encounters a fatal error, usually invokes os.Exit @@ -217,10 +221,6 @@ func openDirect(ctx context.Context, configFile string, lc *LocalConfig, passwor return nil, errors.Wrap(err, "cannot open storage") } - if options.TraceStorage { - st = loggingwrapper.NewWrapper(st, log(ctx), "[STORAGE] ") - } - if lc.ReadOnly { st = readonly.NewWrapper(st) } @@ -243,7 +243,6 @@ func openWithConfig(ctx context.Context, st blob.Storage, cliOpts ClientOptions, cacheOpts = cacheOpts.CloneOrDefault() cmOpts := &content.ManagerOptions{ TimeNow: defaultTime(options.TimeNowFunc), - DisableInternalLog: options.DisableInternalLog, PermissiveCacheLoading: cliOpts.PermissiveCacheLoading, } @@ -323,7 +322,14 @@ func openWithConfig(ctx context.Context, st blob.Storage, cliOpts ClientOptions, } dw := repodiag.NewWriter(st, fmgr) - logManager := repodiag.NewLogManager(ctx, dw) + + logManager := repodiag.NewLogManager(ctx, dw, options.DisableRepositoryLog, options.ContentLogWriter, + logparam.String("span:client", contentlog.HashSpanID(cliOpts.UsernameAtHost())), + logparam.String("span:repo", contentlog.RandomSpanID())) + + if options.TraceStorage { + st = loggingwrapper.NewWrapper(st, log(ctx), logManager.NewLogger("storage"), "[STORAGE] ") + } scm, ferr := content.NewSharedManager(ctx, st, fmgr, cacheOpts, cmOpts, logManager, mr) if ferr != nil { @@ -369,6 +375,7 @@ func openWithConfig(ctx context.Context, st blob.Storage, cliOpts ClientOptions, metricsRegistry: mr, refCountedCloser: closer, beforeFlush: options.BeforeFlush, + logManager: logManager, }, } @@ -419,7 +426,7 @@ func wrapLockingStorage(st blob.Storage, r format.BlobStorageConfiguration) blob return beforeop.NewWrapper(st, nil, nil, nil, func(_ context.Context, id blob.ID, opts *blob.PutOptions) error { for _, prefix := range prefixes { - if strings.HasPrefix(string(id), prefix) { + if strings.HasPrefix(string(id), string(prefix)) { opts.RetentionMode = r.RetentionMode opts.RetentionPeriod = r.RetentionPeriod @@ -460,6 +467,7 @@ func upgradeLockMonitor( m.RUnlock() return nil } + m.RUnlock() // upgrade the lock and verify again in-case someone else won the race to refresh diff --git a/repo/repo_benchmarks_test.go b/repo/repo_benchmarks_test.go index abfa90b2a8f..f2e18aa3044 100644 --- a/repo/repo_benchmarks_test.go +++ b/repo/repo_benchmarks_test.go @@ -21,9 +21,7 @@ func BenchmarkWriterDedup1M(b *testing.B) { require.NoError(b, err) writer.Close() - b.ResetTimer() - - for range b.N { + for b.Loop() { // write exactly the same data writer := env.RepositoryWriter.NewObjectWriter(ctx, object.WriterOptions{MetadataCompressor: "zstd-fastest"}) writer.Write(dataBuf) diff --git a/repo/repository.go b/repo/repository.go index e19054808a9..07408ddf5eb 100644 --- a/repo/repository.go +++ b/repo/repository.go @@ -13,6 +13,7 @@ import ( "github.com/kopia/kopia/internal/crypto" "github.com/kopia/kopia/internal/grpcapi" "github.com/kopia/kopia/internal/metrics" + "github.com/kopia/kopia/internal/repodiag" "github.com/kopia/kopia/repo/blob" "github.com/kopia/kopia/repo/blob/throttling" "github.com/kopia/kopia/repo/compression" @@ -81,13 +82,13 @@ type DirectRepository interface { ContentReader() content.Reader IndexBlobs(ctx context.Context, includeInactive bool) ([]indexblob.Metadata, error) NewDirectWriter(ctx context.Context, opt WriteSessionOptions) (context.Context, DirectRepositoryWriter, error) - AlsoLogToContentLog(ctx context.Context) context.Context UniqueID() []byte ConfigFilename() string - DeriveKey(purpose []byte, keyLength int) ([]byte, error) + DeriveKey(purpose string, keyLength int) ([]byte, error) Token(password string) (string, error) Throttler() throttling.SettableThrottler DisableIndexRefresh() + LogManager() *repodiag.LogManager } // DirectRepositoryWriter provides low-level write access to the repository. @@ -108,6 +109,7 @@ type immutableDirectRepositoryParameters struct { throttler throttling.SettableThrottler metricsRegistry *metrics.Registry beforeFlush []RepositoryWriterCallback + logManager *repodiag.LogManager *refCountedCloser } @@ -139,7 +141,7 @@ type directRepository struct { } // DeriveKey derives encryption key of the provided length from the master key. -func (r *directRepository) DeriveKey(purpose []byte, keyLength int) (derivedKey []byte, err error) { +func (r *directRepository) DeriveKey(purpose string, keyLength int) (derivedKey []byte, err error) { if r.cmgr.ContentFormat().SupportsPasswordChange() { derivedKey, err = crypto.DeriveKeyFromMasterKey(r.cmgr.ContentFormat().GetMasterKey(), r.UniqueID(), purpose, keyLength) if err != nil { @@ -206,6 +208,11 @@ func (r *directRepository) DisableIndexRefresh() { r.cmgr.DisableIndexRefresh() } +// LogManager returns the log manager. +func (r *directRepository) LogManager() *repodiag.LogManager { + return r.logManager +} + // OpenObject opens the reader for a given object, returns object.ErrNotFound. func (r *directRepository) OpenObject(ctx context.Context, id object.ID) (object.Reader, error) { //nolint:wrapcheck @@ -275,11 +282,6 @@ func (r *directRepository) UpdateDescription(d string) { r.cliOpts.Description = d } -// AlsoLogToContentLog returns a context that causes all logs to also be sent to content log. -func (r *directRepository) AlsoLogToContentLog(ctx context.Context) context.Context { - return r.sm.AlsoLogToContentLog(ctx) -} - // NewWriter returns new RepositoryWriter session for repository. func (r *directRepository) NewWriter(ctx context.Context, opt WriteSessionOptions) (context.Context, RepositoryWriter, error) { return r.NewDirectWriter(ctx, opt) diff --git a/repo/repository_test.go b/repo/repository_test.go index f3218cc8b84..43aa56294f7 100644 --- a/repo/repository_test.go +++ b/repo/repository_test.go @@ -133,7 +133,7 @@ func (s *formatSpecificTestSuite) TestPackingSimple(t *testing.T) { verify(ctx, t, env.RepositoryWriter, oid2a, []byte(content2), "packed-object-2") verify(ctx, t, env.RepositoryWriter, oid3a, []byte(content3), "packed-object-3") - if err := env.RepositoryWriter.ContentManager().CompactIndexes(ctx, indexblob.CompactOptions{MaxSmallBlobs: 1}); err != nil { + if _, err := env.RepositoryWriter.ContentManager().CompactIndexes(ctx, indexblob.CompactOptions{MaxSmallBlobs: 1}); err != nil { t.Errorf("optimize error: %v", err) } @@ -143,7 +143,7 @@ func (s *formatSpecificTestSuite) TestPackingSimple(t *testing.T) { verify(ctx, t, env.RepositoryWriter, oid2a, []byte(content2), "packed-object-2") verify(ctx, t, env.RepositoryWriter, oid3a, []byte(content3), "packed-object-3") - if err := env.RepositoryWriter.ContentManager().CompactIndexes(ctx, indexblob.CompactOptions{MaxSmallBlobs: 1}); err != nil { + if _, err := env.RepositoryWriter.ContentManager().CompactIndexes(ctx, indexblob.CompactOptions{MaxSmallBlobs: 1}); err != nil { t.Errorf("optimize error: %v", err) } @@ -550,6 +550,7 @@ func TestInitializeWithNoRetention(t *testing.T) { // are not supplied. var b gather.WriteBuffer defer b.Close() + require.NoError(t, env.RepositoryWriter.BlobStorage().GetBlob(ctx, format.KopiaBlobCfgBlobID, 0, -1, &b)) } @@ -611,6 +612,7 @@ func TestWriteSessionFlushOnSuccess(t *testing.T) { afterFlushCount.Add(1) return nil }) + return nil }) }, @@ -672,6 +674,7 @@ func TestWriteSessionFlushOnSuccessClient(t *testing.T) { afterFlushCount.Add(1) return nil }) + return nil }, }, @@ -854,7 +857,8 @@ func TestAllRegistryMetricsAreMapped(t *testing.T) { } func TestDeriveKey(t *testing.T) { - testPurpose := []byte{0, 0, 0, 0} + const testPurpose = "test purpose" + testKeyLength := 8 masterKey := []byte("01234567890123456789012345678901") uniqueID := []byte("a5ba5d2da4b14b518b9501b64b5d87ca") diff --git a/repo/splitter/splitter_buzhash32.go b/repo/splitter/splitter_buzhash32.go index c6fe666eac5..3304375c9be 100644 --- a/repo/splitter/splitter_buzhash32.go +++ b/repo/splitter/splitter_buzhash32.go @@ -28,17 +28,8 @@ func (rs *buzhash32Splitter) NextSplitPoint(b []byte) int { // until minSize, only hash the last splitterSlidingWindowSize bytes if left := rs.minSize - rs.count - 1; left > 0 { - fastPathBytes = left - if fastPathBytes > len(b) { - fastPathBytes = len(b) - } - - var i int - - i = fastPathBytes - splitterSlidingWindowSize - if i < 0 { - i = 0 - } + fastPathBytes = min(left, len(b)) + i := max(fastPathBytes-splitterSlidingWindowSize, 0) for ; i < fastPathBytes; i++ { rs.rh.Roll(b[i]) @@ -50,10 +41,7 @@ func (rs *buzhash32Splitter) NextSplitPoint(b []byte) int { // until the max size, check if we have any splitting point if left := rs.maxSize - rs.count; left > 0 { - fp := left - if fp >= len(b) { - fp = len(b) - } + fp := min(left, len(b)) for i, b := range b[0:fp] { rs.rh.Roll(b) diff --git a/repo/splitter/splitter_rabinkarp64.go b/repo/splitter/splitter_rabinkarp64.go index 31899df50eb..9553d37f009 100644 --- a/repo/splitter/splitter_rabinkarp64.go +++ b/repo/splitter/splitter_rabinkarp64.go @@ -28,17 +28,8 @@ func (rs *rabinKarp64Splitter) NextSplitPoint(b []byte) int { // until minSize, only hash the last splitterSlidingWindowSize bytes if left := rs.minSize - rs.count - 1; left > 0 { - fastPathBytes = left - if fastPathBytes > len(b) { - fastPathBytes = len(b) - } - - var i int - - i = fastPathBytes - splitterSlidingWindowSize - if i < 0 { - i = 0 - } + fastPathBytes = min(left, len(b)) + i := max(fastPathBytes-splitterSlidingWindowSize, 0) for ; i < fastPathBytes; i++ { rs.rh.Roll(b[i]) @@ -50,10 +41,7 @@ func (rs *rabinKarp64Splitter) NextSplitPoint(b []byte) int { // until the max size, check if we have any splitting point if left := rs.maxSize - rs.count; left > 0 { - fp := left - if fp >= len(b) { - fp = len(b) - } + fp := min(left, len(b)) for i, b := range b[0:fp] { rs.rh.Roll(b) diff --git a/site/.go-version b/site/.go-version index 4ceb49d6153..fc4bf71be76 100644 --- a/site/.go-version +++ b/site/.go-version @@ -1 +1 @@ -1.23.x +1.25.x diff --git a/site/content/docs/Advanced/Logging/_index.md b/site/content/docs/Advanced/Logging/_index.md index 5cece55c564..9824b54cd9d 100644 --- a/site/content/docs/Advanced/Logging/_index.md +++ b/site/content/docs/Advanced/Logging/_index.md @@ -54,3 +54,7 @@ You can control how much data is written to console and log files by using flags By default, console output will be colored to indicate different log levels, this can be disabled (useful when redirecting output to a file) with `--disable-color`. To force color colorized output when redirecting to a file use `--force-color`. +### Progress Output + +Kopia displays progress information during operations such as snapshots and synchronization. This output can be controlled separately from log levels using `--progress` (default) or `--no-progress` flags. This is particularly useful when running Kopia in scripts or scheduled tasks where clean output is preferred. + diff --git a/site/content/docs/Advanced/Synchronization/_index.md b/site/content/docs/Advanced/Synchronization/_index.md index 060ee7efc17..3f84a3cf7b7 100644 --- a/site/content/docs/Advanced/Synchronization/_index.md +++ b/site/content/docs/Advanced/Synchronization/_index.md @@ -30,3 +30,7 @@ When synchronizing to a filesystem location, it is important to check that the f ``` $ kopia repository sync-to filesystem --path /dest/repository --must-exist ``` + +### Automation + +For automated synchronization tasks, progress output can be suppressed using the `--no-progress` flag to provide clean output suitable for cron jobs and scripts. diff --git a/site/content/docs/Contribution guidelines/_index.md b/site/content/docs/Contribution guidelines/_index.md index fcfb3782510..85dbe80aaff 100644 --- a/site/content/docs/Contribution guidelines/_index.md +++ b/site/content/docs/Contribution guidelines/_index.md @@ -6,9 +6,10 @@ toc_hide: true > NOTE: Those guidelines are preliminary and will change as the project grows and expands in scope. -## Contacting Developers +## Code Documentation -* Using [Slack](https://slack.kopia.io) is the quickest way to get in touch with developers. +Kopia is primarily written using [The Go Programming Language](https://go.dev/doc/). +The source code documentation can be found on [godoc.org](https://godoc.org/github.com/kopia/kopia/repo). ## Submitting issues @@ -35,3 +36,7 @@ toc_hide: true - `` is a clear description of a PR. - Follow the pattern precisely, as the title-checker cares about capitalization parentheses, and spaces. - For example: `feat(cli): Add new policy rule --new-feature-x to enable using feature x`. + +## Contacting Community + +* Use [Slack](https://slack.kopia.io) to reach out to other developers. diff --git a/site/content/docs/FAQs/_index.md b/site/content/docs/FAQs/_index.md index 0a7d33f2eaa..fbf8599a6d3 100644 --- a/site/content/docs/FAQs/_index.md +++ b/site/content/docs/FAQs/_index.md @@ -32,7 +32,7 @@ A `snapshot` is a [point-in-time backup](../features#backup-files-and-directorie #### What is a Repository? -A `repository` is the storage location where your snapshots are saved; Kopia supports [cloud/remote, network, and local storage locations](../features#save-snapshots-to-cloud-network-or-local-storage) and all repositories are [encrypted](../features/#end-to-end-zero-knowledge-encryption) with a password that you designate. +A `repository` is the storage location where your snapshots are saved; Kopia supports [cloud/remote, network, and local storage locations](../features#save-snapshots-to-cloud-network-or-local-storage) and all repositories are [encrypted](../features/#user-controlled-end-to-end-encryption) with a password that you designate. See the [repository help docs](../repositories) for more information. @@ -95,7 +95,7 @@ You must use Kopia CLI if you want to change your `repository` password; changin Before changing your password, you must be [connected to your `repository`](../getting-started/#connecting-to-repository). This means that you **can** reset your password if you forget your password AND you are still connected to your `repository`. But this also means that you **cannot** reset your password if you forget your password and you are NOT still connected to your `repository`, because you will need your current password to connect to the `repository`. -Remember to select a secure _repository password_. The password is used to [decrypt](../features/#end-to-end-zero-knowledge-encryption) and access the data in your snapshots. +Remember to select a secure _repository password_. The password is used to [decrypt](../features/#user-controlled-end-to-end-encryption) and access the data in your snapshots. #### Does Kopia Support Storage Classes, Like Amazon Glacier? diff --git a/site/content/docs/Features/_index.md b/site/content/docs/Features/_index.md index 91fdbeb0b3a..97c5412863d 100644 --- a/site/content/docs/Features/_index.md +++ b/site/content/docs/Features/_index.md @@ -8,7 +8,7 @@ weight: 10 * [Policies Control What and How Files/Directories are Saved in Snapshots](#policies-control-what-and-how-filesdirectories-are-saved-in-snapshots) * [Save Snapshots to Cloud, Network, or Local Storage](#save-snapshots-to-cloud-network-or-local-storage) * [Restore Snapshots Using Multiple Methods](#restore-snapshots-using-multiple-methods) -* [End-to-End 'Zero Knowledge' Encryption](#end-to-end-zero-knowledge-encryption) +* [User-controlled End-to-End Encryption](#user-controlled-end-to-end-encryption) * [Compression](#compression) * [Error Correction](#error-correction) * [Verifying Backup Validity and Consistency](#verifying-backup-validity-and-consistency) @@ -21,7 +21,7 @@ weight: 10 ### Backup Files and Directories Using Snapshots -Kopia creates snapshots of the files and directories you designate, then [encrypts](#end-to-end-zero-knowledge-encryption) these snapshots before they leave your computer, and finally uploads these encrypted snapshots to cloud/network/local storage called a [repository](../repositories/). Snapshots are maintained as a set of historical point-in-time records based on [policies](#policies-control-what-and-how-filesdirectories-are-saved-in-snapshots) that you define. +Kopia creates snapshots of the files and directories you designate, then [encrypts](#user-controlled-end-to-end-encryption) these snapshots before they leave your computer, and finally uploads these encrypted snapshots to cloud/network/local storage called a [repository](../repositories/). Snapshots are maintained as a set of historical point-in-time records based on [policies](#policies-control-what-and-how-filesdirectories-are-saved-in-snapshots) that you define. Kopia uses [content-addressable storage](https://en.wikipedia.org/wiki/Content-addressable%20storage) for snapshots, which has many benefits: @@ -76,29 +76,48 @@ With Kopia you’re in full control of where to store your snapshots; you pick t To restore data, Kopia gives you three options: -* mount the contents of a snapshot as a local disk so that you can browse and copy files/directories from the snapshot as if the snapshot is a local directory on your machine - -* restore all files/directories contained in a snapshot to any local or network location that you designate - -* selectively restore individual files from a snapshot - -### End-to-End 'Zero Knowledge' Encryption - -All data is encrypted before it leaves your machine. Encryption is baked into the DNA of Kopia, and you cannot create a backup without using encryption. Kopia allows you to pick from two state-of-the-art encryption algorithms, [AES-256](https://en.wikipedia.org/wiki/AES256) and [ChaCha20](https://en.wikipedia.org/wiki/ChaCha20). - -Kopia encrypts both the content and the names of your backed up files/directories. - -The data is encrypted using per-content keys which are derived from the 256-bit master key that is stored in the repository. The master key is encrypted with a password you provide. This means that anyone that does not know the password cannot access your backed up files and will not know what files/directories are contained in the snapshots that are saved in the repository. Importantly, the password you provide is never sent to any server or anywhere outside your machine, and only you know your password. In other words, Kopia provides your backups with end-to-end 'zero knowledge' encryption. However, this also means that you cannot restore your files if you forget your password: there is no way to recover a forgotten password because only you know it. (But you can [change your password](../faqs/#how-do-i-change-my-repository-password) if you are still connected to the repository that stores your snapshots.) +- mount the contents of a snapshot as a local disk so that you can browse and copy files/directories from the snapshot as if the snapshot is a local directory on your machine +- restore all files/directories contained in a snapshot to any local or network location that you designate +- selectively restore individual files from a snapshot + +### User-Controlled End-to-End Encryption + +Kopia uses end-to-end encryption, with user-controlled encryption keys,[^1] to +encrypt all data before it leaves your machine, this includes the content and the +names of the backed up files/directories. Kopia does not allow creating +unencrypted backups. + +[^1]: This encryption approach is often imprecisely named 'Zero-knowledge +encryption' in marketing materials. The name can be easily confused with the +term ['Zero-knowledge Proof'](https://en.wikipedia.org/wiki/Zero-knowledge_proof), +which refers to a completely different concept. + +When creating or accessing a backup repository, you provide the repository +password, which is not sent to any server or anywhere outside your machine, and +only you know the password. +Instead, the repository password is used to encrypt and decrypt the repository's +primary encryption key. The primary key is securely generated at repository +creation time in the computer that is creating the repository, such as your laptop. +This means that the repository password is required to read any data stored in +that repository. Without the password, you will not be able to read the contents +of files stored in backups, nor see what files/directories are contained in +the backups, nor list what backups are in the repository. It also means that +you cannot restore your files if you forget your password, there is no way to +recover a forgotten password because only you know it. You can + [change your password](../faqs/#how-do-i-change-my-repository-password) if +you are still connected to the repository. + +You can pick from two standard encryption algorithms, +[AES-256](https://en.wikipedia.org/wiki/AES256) and +[ChaCha20](https://en.wikipedia.org/wiki/ChaCha20), for encrypting your backups. ### Compression Kopia can [compress your data](../advanced/compression/) to save storage and bandwidth. Several compression methods are supported, including: -* [pgzip](https://github.com/klauspost/pgzip) - -* [s2](https://github.com/klauspost/compress/tree/master/s2) - -* [zstd](https://github.com/klauspost/compress/tree/master/zstd) +- [pgzip](https://github.com/klauspost/pgzip) +- [s2](https://github.com/klauspost/compress/tree/master/s2) +- [zstd](https://github.com/klauspost/compress/tree/master/zstd) ### Error Correction diff --git a/site/content/docs/Getting started/_index.md b/site/content/docs/Getting started/_index.md index 3368bb5bac0..a25f5374043 100755 --- a/site/content/docs/Getting started/_index.md +++ b/site/content/docs/Getting started/_index.md @@ -7,7 +7,7 @@ weight: 15 This guide will walk you through installing Kopia and setting up Kopia to backup/restore your data. Make sure to familiarize yourself with Kopia [features](../features/) before following this guide, so that you understand the appropriate terminology. As a reminder: * A `snapshot` is a [point-in-time backup](../features#backup-files-and-directories-using-snapshots) of your files/directories; each snapshot contains the files/directories that you can [restore when you need to](../features#restore-snapshots-using-multiple-methods). -* A `repository` is the storage location where your snapshots are saved; Kopia supports [cloud/remote, network, and local storage locations](../features#save-snapshots-to-cloud-network-or-local-storage) and all repositories are [encrypted](../features/#end-to-end-zero-knowledge-encryption) with a password that you designate. +* A `repository` is the storage location where your snapshots are saved; Kopia supports [cloud/remote, network, and local storage locations](../features#save-snapshots-to-cloud-network-or-local-storage) and all repositories are [encrypted](../features/#user-controlled-end-to-end-encryption) with a password that you designate. * A `policy` is a set of rules that tells Kopia how to create/manage snapshots; this includes features such as [compression, snapshot retention, and scheduling when to take automatically snapshots](../features#policies-control-what-and-how-filesdirectories-are-saved-in-snapshots). ## Download and Installation @@ -20,19 +20,21 @@ Once you have installed Kopia, setting up Kopia is quite easy but varies dependi ### Kopia GUI (`KopiaUI`) -Setting up Kopia via the GUI is very easy. +Setting up Kopia via the GUI is very easy. #### Creating and Connecting to a Repository -When you run `KopiaUI` for the first time, you will need to create a `repository`. You will see all supported [repository types](../repositories/) on-screen within the program interface. Pick the one you want and follow the on-screen directions to get it setup; you will need to enter various different details about the storage location that you selected, and you will pick a password that will be used to encrypt all the snapshots that you store in the repository. (As a reminder, Kopia uses [end-to-end zero knowledge encryption](../features#end-to-end-zero-knowledge-encryption), so your password is never sent anywhere and it never leaves your machine!) You can also name the repository whatever you want. +When you run `KopiaUI` for the first time, you will need to create a `repository`. You will see all supported [repository types](../repositories/) on-screen within the program interface. Pick the one you want and follow the on-screen directions to get it setup. You will need to enter details about the storage location that you selected, and you will pick a password that will be used to encrypt all the snapshots that you store in the repository. Kopia uses [end-to-end encryption](../features#user-controlled-end-to-end-encryption), that way your password never leaves your machine nor +is sent elsewhere. You can name your repository in a way that is useful for you. -**There is absolutely no way to restore snapshots (i.e., your backed up files/directories) from a repository if you forget your password, so do not forget it and keep it secure!** +**Remember your repository password and keep it secure. There is absolutely no way to restore any of your backed up files from a repository if you forget your password!** -> NOTE: Remember, before you use Kopia, you need to provision, setup, and pay (the storage provider) for whatever storage location you want to use; Kopia will not do that for you. After you have done that, you can create a `repository` for that storage location in Kopia. For example, if you want to use `Backblaze B2`, you need to create a Backblaze account, create a B2 bucket, and get the access keys for the bucket; then you can use the `Backblaze B2` repository option in `KopiaUI` to create a repository. +> NOTE: Remember, before you use Kopia, you need to provision, setup, and pay (the storage provider) for whatever storage location you want to use; Kopia will not do that for you. After you have done that, you can create a `repository` for that storage location in Kopia. For example, if you want to use a cloud storage provider, then you need to: (1) create an account with your storage provider; (2) create an object storage bucket; and (3) get the credentials to access the bucket. +After you have set up your storage provider, then you can select the appropriate storage provider type in `KopiaUI` to create a repository. #### Defining Snapshot Policy and Creating New Snapshot -Once you have created a repository, you can start backing up your files/directories by creating a new `policy` in `KopiaUI`. You can do this from the `Policies` tab and the process, again, is quite straightforward: enter the `directory` which contains the files you want to backup (you can either manually type in the `directory path` or browse for the `directory`), hit the `Set Policy` button, choose your policy settings from the on-screen options (all policy options are fairly self-explanatory), and hit the `Save Policy` button. Kopia will then automatically begin taking the snapshot following the settings you set for the policy. +Once you have created a repository, you can start backing up your files/directories by creating a new `policy` in `KopiaUI`. You can do this from the `Policies` tab and the process, again, is quite straightforward: enter the `directory` which contains the files you want to backup (you can either manually type in the `directory path` or browse for the `directory`), hit the `Set Policy` button, choose your policy settings from the on-screen options (all policy options are fairly self-explanatory), and hit the `Save Policy` button. Kopia will then automatically begin taking the snapshot following the settings you set for the policy. After the initial snapshot, for every snapshot afterwards Kopia will rescan the file/directories and [only upload file content that has changed](../features/#backup-files-and-directories-using-snapshots). All snapshots in Kopia are [always incremental](../features/#backup-files-and-directories-using-snapshots); a snapshot will only upload files/file contents that are not in the repository yet, which saves storage space and upload time. This even applies to files that were moved or renamed. In fact, if two computers have exactly the same file and both computers are backing up to the same `repository`, the file will still be stored only once. @@ -52,7 +54,7 @@ For example, say you have a path policy for `foo@bar:/path` which doesn't define #### Restoring Files/Directories from Snapshots -When you want to restore your files/directories from a snapshot, you can do so from the `Snapshots` tab in `KopiaUI`. Just click the `Path` for the files/directories you want to restore and then find the specific `snapshot` you want to restore from. You will then be given the option to either +When you want to restore your files/directories from a snapshot, you can do so from the `Snapshots` tab in `KopiaUI`. Just click the `Path` for the files/directories you want to restore and then find the specific `snapshot` you want to restore from. You will then be given the option to either * `Mount` the snapshot as a local drive so that you can browse, open, and copy any files/directories from the snapshot to your local machine; * `Restore` all the contents of the snapshot to a local or network location; @@ -76,9 +78,9 @@ Setting up Kopia via the CLI follows similar steps as the GUI, but obviously req The first thing you need to do is create a `repository`. For a full list of supported types of repositories that you can create, see the [repositories page](../repositories). -To create a repository, use one of the [subcommands](../reference/command-line/common/#commands-to-manipulate-repository) of `kopia repository create` and follow the on-screen instructions. When creating the repository, you must provide a password that will be used to encrypt all the snapshots and their contents in the repository. (As a reminder, Kopia uses [end-to-end zero knowledge encryption](../features#end-to-end-zero-knowledge-encryption), so your password is never sent anywhere and it never leaves your machine!) +To create a repository, use one of the [subcommands](../reference/command-line/common/#commands-to-manipulate-repository) of `kopia repository create` and follow the on-screen instructions. When creating the repository, you must provide a password that will be used to encrypt all the snapshots and their contents in the repository. Kopia uses [end-to-end encryption](../features#user-controlled-end-to-end-encryption), so your password remains in your computer and is not sent anywhere else. -**There is absolutely no way to restore snapshots (i.e., your backed up files/directories) from a repository if you forget your password, so do not forget it and keep it secure!** +**Remember your repository password and keep it secure. There is absolutely no way to restore any of your backed up files from a repository if you forget your password!** As an example, if you want to create a repository in a locally-mounted or network-attached filesystem, you would run the following command: @@ -87,7 +89,9 @@ $ kopia repository create filesystem --path /tmp/my-repository ``` You can read more about all the supported `kopia repository create` commands for different repositories from the [repositories page](../repositories). -> NOTE: Remember, before you use Kopia, you need to provision, setup, and pay (the storage provider) for whatever storage location you want to use; Kopia will not do that for you. After you have done that, you can create a `repository` for that storage location in Kopia. For example, if you want to use `Backblaze B2`, you need to create a Backblaze account, create a B2 bucket, and get the access keys for the bucket; then you can use the [`kopia repository create b2` command](../reference/command-line/common/repository-create-b2/) to create a repository. +> NOTE: Remember, before you use Kopia, you need to provision, setup, and pay (the storage provider) for whatever storage location you want to use; Kopia will not do that for you. After you have done that, you can create a `repository` for that storage location in Kopia. For example, if you want to use a cloud storage provider, then you need to: (1) create an account with your storage provider; (2) create an object storage bucket; and (3) get the credentials to access the bucket. +After you have set up your storage provider, then you can create a repository using the [`kopia repository create ` command](../reference/command-line/common/#commands-to-manipulate-repository) with the appropriate storage provider options. + #### Connecting to Repository @@ -402,7 +406,7 @@ Kopia CLI provides low-level commands to examine the contents of repository, per ##### BLOBs -We can list the files in a repository using `kopia blob ls`, which shows how Kopia manages snapshots. We can see that repository contents are grouped into pack files (starting with `p`) and indexed using index files (starting with `n`). Both index and pack files are [encrypted](../features/#end-to-end-zero-knowledge-encryption): +We can list the files in a repository using `kopia blob ls`, which shows how Kopia manages snapshots. We can see that repository contents are grouped into pack files (starting with `p`) and indexed using index files (starting with `n`). Both index and pack files are [encrypted](../features/#user-controlled-end-to-end-encryption): ``` $ kopia blob ls diff --git a/site/content/docs/Installation/_index.md b/site/content/docs/Installation/_index.md index 6587520aceb..8edad2be29a 100644 --- a/site/content/docs/Installation/_index.md +++ b/site/content/docs/Installation/_index.md @@ -52,7 +52,7 @@ CLI and GUI packages are available for: * macOS 10.11 or later, 64-bit (CLI binary, GUI installer {`KopiaUI`}, and Homebrew package) * Linux - `amd64`, `armhf` or `arm64` (CLI binary and `KopiaUI` available via RPM and DEB repositories) -### Windows CLI installation using Scoop +### Windows CLI installation On Windows, Kopia CLI is available as a [Scoop](https://scoop.sh) package, which automates installation and upgrades. @@ -71,10 +71,22 @@ Alternatively, to install the latest unreleased version of Kopia use the followi > scoop bucket add kopia https://github.com/kopia/scoop-test-builds.git ``` +You can also install Kopia CLI with `winget`. `winget` comes preinstalled with Windows 11 (21H2 and later), and is also available on Windows 10 via [the Microsoft Store](https://apps.microsoft.com/detail/9nblggh4nns1). + +```shell +> winget install Kopia.KopiaCLI +``` + ### Windows GUI installation The installer of `KopiaUI` is available on the [releases page](https://github.com/kopia/kopia/releases/latest). Simply download the file named `KopiaUI-Setup-X.Y.Z.exe` (where `X.Y.Z` is the version number), double click the file, and follow on-screen prompts. +You can also install `KopiaUI` with `winget`. See the CLI installation above for `winget`. + +```shell +> winget install Kopia.KopiaUI +``` + ### macOS CLI using Homebrew On macOS, you can use [Homebrew](https://brew.sh) to install and keep Kopia up-to-date. diff --git a/site/content/docs/Reference/Command-Line/_index.md b/site/content/docs/Reference/Command-Line/_index.md index 22be47b6343..36fb26f8144 100644 --- a/site/content/docs/Reference/Command-Line/_index.md +++ b/site/content/docs/Reference/Command-Line/_index.md @@ -1,6 +1,6 @@ --- -title: "Command Line" -linkTitle: "Command Line" +title: "Command-Line Reference" +linkTitle: "Command-Line Reference" weight: 10 --- @@ -16,6 +16,16 @@ The following environment variables can be used to configure how Kopia runs: | --------------------------- | ------- | -------------------------------------------------------------------------------------------------------- | | `KOPIA_BYTES_STRING_BASE_2` | `false` | If set to `true`, Kopia will output storage values in binary (base-2). The default is decimal (base-10). | +### Global Flags + +The following flags apply to all Kopia commands: + +| Flag | Description | +| ------------------- | -------------------------------------------------------------------------------------------- | +| `--[no-]progress` | Enable or disable progress output (default: enabled). | +| `--log-level` | Set console log level: `debug`, `info`, `warning`, or `error` (default: `info`). | +| `--config-file` | Override the default configuration file location. | + ### Connecting to Repository Most commands require a [Repository](../../advanced/architecture/) to be connected first. The first time you use Kopia, repository must be created, later on it can be connected to from one or more machines. diff --git a/site/content/docs/Reference/Go API/_index.md b/site/content/docs/Reference/Go API/_index.md deleted file mode 100644 index fa15a6a4bb5..00000000000 --- a/site/content/docs/Reference/Go API/_index.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: "Go API" -linkTitle: "Go API" -weight: 1 ---- - -Kopia provides programmatic API for accessing the repository. - -The documentation can be found on [godoc.org](https://godoc.org/github.com/kopia/kopia/repo). diff --git a/site/content/docs/Reference/_index.md b/site/content/docs/Reference/_index.md index 4f66f11a10d..0fc5359ff1d 100644 --- a/site/content/docs/Reference/_index.md +++ b/site/content/docs/Reference/_index.md @@ -1,10 +1,9 @@ --- -title: "Command-Line References" -linkTitle: "Command-Line References" +title: "Reference" +linkTitle: "Reference" weight: 40 hide_summary: true no_list: true --- -* [Go API Reference](go-api/) * [Command-Line Reference](command-line/) diff --git a/site/content/docs/Repositories/_index.md b/site/content/docs/Repositories/_index.md index 5a8fdb048c2..190e9f0086d 100644 --- a/site/content/docs/Repositories/_index.md +++ b/site/content/docs/Repositories/_index.md @@ -4,7 +4,7 @@ linkTitle: "Supported Storage Locations" weight: 25 --- -Kopia allows you to save your [encrypted](../features/#end-to-end-zero-knowledge-encryption) backups (which are called [`snapshots`](../faqs/#what-is-a-snapshot) in Kopia) to a variety of storage locations, and in Kopia a storage location is called a `repository`. Kopia supports all of the following storage locations: +Kopia allows you to save your [encrypted](../features/#user-controlled-end-to-end-encryption) backups (which are called [`snapshots`](../faqs/#what-is-a-snapshot) in Kopia) to a variety of storage locations, and in Kopia a storage location is called a `repository`. Kopia supports all of the following storage locations: > PRO TIP: You pick the storage locations you want to use. Kopia plays no role in selecting your storage locations. This means you must provision, setup, and pay (the storage provider) for whatever storage locations you want to use **before** you create a `repository` for that storage location in Kopia. diff --git a/site/content/docs/_index.md b/site/content/docs/_index.md index 2142de71cd1..5408ed5c2d3 100755 --- a/site/content/docs/_index.md +++ b/site/content/docs/_index.md @@ -5,15 +5,15 @@ linkTitle: "What is Kopia?" weight: 20 --- -Kopia is a fast and secure open-source backup/restore tool that allows you to create [encrypted](features/#end-to-end-zero-knowledge-encryption) snapshots of your data and save the snapshots to [remote or cloud storage](features/#save-snapshots-to-cloud-network-or-local-storage) of your choice, [to network-attached storage or server](features/#save-snapshots-to-cloud-network-or-local-storage), or [locally on your machine](features/#save-snapshots-to-cloud-network-or-local-storage). Kopia does not 'image' your whole machine. Rather, Kopia allows you to backup/restore any and all files/directories that you deem are important or critical. +Kopia is a fast and secure open-source backup/restore tool that allows you to create [encrypted](features/#user-controlled-end-to-end-encryption) snapshots of your data and save the snapshots to [remote or cloud storage](features/#save-snapshots-to-cloud-network-or-local-storage) of your choice, [to network-attached storage or server](features/#save-snapshots-to-cloud-network-or-local-storage), or [locally on your machine](features/#save-snapshots-to-cloud-network-or-local-storage). Kopia does not 'image' your whole machine. Rather, Kopia allows you to backup/restore any and all files/directories that you deem are important or critical. -Kopia has both [CLI (command-line interface)](features/#both-command-line-and-graphical-user-interfaces) and [GUI (graphical user interface)](features/#both-command-line-and-graphical-user-interfaces) versions, making it the perfect tool for both advanced and regular users. You can read more about Kopia's unique [features](features/) -- which include [compression](features/#compression), [deduplication](features/#backup-files-and-directories-using-snapshots), [end-to-end 'zero knowledge' encryption](features/#end-to-end-zero-knowledge-encryption), and [error correction](features/#error-correction) -- to get a better understanding of how Kopia works. +Kopia has both [CLI (command-line interface)](features/#both-command-line-and-graphical-user-interfaces) and [GUI (graphical user interface)](features/#both-command-line-and-graphical-user-interfaces) versions, making it the perfect tool for both advanced and regular users. You can read more about Kopia's unique [features](features/) -- which include [compression](features/#compression), [deduplication](features/#backup-files-and-directories-using-snapshots), [user-controlled end-to-end encryption](features/#user-controlled-end-to-end-encryption), and [error correction](features/#error-correction) -- to get a better understanding of how Kopia works. When ready, head to the [installation](installation/) page to download and install Kopia, and make sure to read the [Getting Started Guide](getting-started/) for a step-by-step walkthrough of how to use Kopia. ### Pick the Cloud Storage Provider You Want -Kopia supports saving your [encrypted](features/#end-to-end-zero-knowledge-encryption) and [compressed](features/#compression) snapshots to all of the following [storage locations](features/#save-snapshots-to-cloud-network-or-local-storage): +Kopia supports saving your [encrypted](features/#user-controlled-end-to-end-encryption) and [compressed](features/#compression) snapshots to all of the following [storage locations](features/#save-snapshots-to-cloud-network-or-local-storage): * **Amazon S3** and any **cloud storage that is compatible with S3** * **Azure Blob Storage** diff --git a/site/go.mod b/site/go.mod index fa4f761770e..ff7a94a609d 100644 --- a/site/go.mod +++ b/site/go.mod @@ -1,6 +1,6 @@ module github.com/kopia/kopia/site -go 1.21 +go 1.25 require ( github.com/google/docsy v0.7.0 // indirect diff --git a/snapshot/manager.go b/snapshot/manager.go index a35828d2816..99408d4a0c8 100644 --- a/snapshot/manager.go +++ b/snapshot/manager.go @@ -3,6 +3,7 @@ package snapshot import ( "context" + "maps" "github.com/pkg/errors" @@ -195,9 +196,7 @@ func ListSnapshotManifests(ctx context.Context, rep repo.Repository, src *Source labels = sourceInfoToLabels(*src) } - for key, value := range tags { - labels[key] = value - } + maps.Copy(labels, tags) entries, err := rep.FindManifests(ctx, labels) if err != nil { diff --git a/snapshot/manifest.go b/snapshot/manifest.go index 8f3dcf68186..87228ecd435 100644 --- a/snapshot/manifest.go +++ b/snapshot/manifest.go @@ -23,7 +23,7 @@ type Manifest struct { StartTime fs.UTCTimestamp `json:"startTime"` EndTime fs.UTCTimestamp `json:"endTime"` - Stats Stats `json:"stats,omitempty"` + Stats Stats `json:"stats"` IncompleteReason string `json:"incomplete,omitempty"` RootEntry *DirEntry `json:"rootEntry"` @@ -126,7 +126,7 @@ type DirEntry struct { ModTime fs.UTCTimestamp `json:"mtime,omitempty"` UserID uint32 `json:"uid,omitempty"` GroupID uint32 `json:"gid,omitempty"` - ObjectID object.ID `json:"obj,omitempty"` + ObjectID object.ID `json:"obj"` DirSummary *fs.DirectorySummary `json:"summ,omitempty"` } @@ -187,8 +187,8 @@ func (m *Manifest) Clone() *Manifest { type StorageStats struct { // amount of new unique data in this snapshot that wasn't there before. // note that this depends on ordering of snapshots. - NewData StorageUsageDetails `json:"newData,omitempty"` - RunningTotal StorageUsageDetails `json:"runningTotal,omitempty"` + NewData StorageUsageDetails `json:"newData"` + RunningTotal StorageUsageDetails `json:"runningTotal"` } // StorageUsageDetails provides details about snapshot storage usage. diff --git a/snapshot/policy/policy_merge_test.go b/snapshot/policy/policy_merge_test.go index 5cbe2a1b144..3fd150caa1e 100644 --- a/snapshot/policy/policy_merge_test.go +++ b/snapshot/policy/policy_merge_test.go @@ -25,37 +25,39 @@ var omittedDefinitionFields = map[string]bool{ func TestPolicyDefinition(t *testing.T) { // verify that each field in the policy struct recursively matches a corresponding field // from the policy.Definition() struct. - ensureTypesMatch(t, reflect.TypeOf(policy.Policy{}), reflect.TypeOf(policy.Definition{})) + ensureTypesMatch(t, reflect.TypeFor[policy.Policy](), reflect.TypeFor[policy.Definition]()) } func ensureTypesMatch(t *testing.T, policyType, definitionType reflect.Type) { t.Helper() - sourceInfoType := reflect.TypeOf(snapshot.SourceInfo{}) + sourceInfoType := reflect.TypeFor[snapshot.SourceInfo]() for i := range policyType.NumField() { f := policyType.Field(i) - dt, ok := definitionType.FieldByName(f.Name) - if !ok { - require.True(t, omittedDefinitionFields[definitionType.Name()+"."+f.Name], "definition field %q not found in %q", f.Name, definitionType.Name()) - continue - } + t.Run(definitionType.Name()+"_"+f.Name, func(t *testing.T) { + t.Logf("f: %v %v", definitionType.Name(), f.Name) - t.Logf("f: %v %v", definitionType.Name(), f.Name) + dt, ok := definitionType.FieldByName(f.Name) + if !ok { + require.True(t, omittedDefinitionFields[definitionType.Name()+"."+f.Name], "definition field %q not found in %q", f.Name, definitionType.Name()) + return + } - if f.Type.Kind() == reflect.Struct { - ensureTypesMatch(t, f.Type, dt.Type) - } else { - require.True(t, sourceInfoType.AssignableTo(dt.Type), "invalid type of %v.%v - %v", definitionType.Name(), dt.Name, dt.Type) - } + if f.Type.Kind() == reflect.Struct { + ensureTypesMatch(t, f.Type, dt.Type) + } else { + require.True(t, sourceInfoType.AssignableTo(dt.Type), "invalid type of %v.%v - %v", definitionType.Name(), dt.Name, dt.Type) + } - require.Equal(t, f.Tag.Get("json"), dt.Tag.Get("json"), dt.Name) + require.Equal(t, f.Tag.Get("json"), dt.Tag.Get("json"), dt.Name) + }) } } func TestPolicyMerge(t *testing.T) { - testPolicyMerge(t, reflect.TypeOf(policy.Policy{}), reflect.TypeOf(policy.Definition{}), "") + testPolicyMerge(t, reflect.TypeFor[policy.Policy](), reflect.TypeFor[policy.Definition](), "") } //nolint:thelper diff --git a/snapshot/policy/retention_policy.go b/snapshot/policy/retention_policy.go index 8b5b7635fa8..18c59437a2c 100644 --- a/snapshot/policy/retention_policy.go +++ b/snapshot/policy/retention_policy.go @@ -261,7 +261,7 @@ func prefixSuffix(s string) (prefix, suffix string) { suffix = s[p+1:] } - return + return prefix, suffix } func appendRLE(out []string, prefix string, numbers []int) []string { diff --git a/snapshot/restore/local_fs_output.go b/snapshot/restore/local_fs_output.go index 9b9c576a09e..1d43ba03b0a 100644 --- a/snapshot/restore/local_fs_output.go +++ b/snapshot/restore/local_fs_output.go @@ -1,8 +1,8 @@ -// Package restore manages restoring filesystem snapshots. package restore import ( "context" + stderrors "errors" "io" "os" "path/filepath" @@ -110,6 +110,12 @@ type FilesystemOutput struct { // copier is the StreamCopier to use for copying the actual bit stream to output. // It is assigned at runtime based on the target filesystem and restore options. copier streamCopier `json:"-"` + + // Indicate whether or not flush files after restore. + // Varying from OS, the copier may write the file data to the system cache, + // so the data may not be written to disk when the restore to the file completes. + // This flag guarantees the file data is flushed to disk. + FlushFiles bool `json:"flushFiles"` } // Init initializes the internal members of the filesystem writer output. @@ -374,26 +380,29 @@ func (o *FilesystemOutput) createDirectory(ctx context.Context, path string) err } } -func write(targetPath string, r fs.Reader, size int64, c streamCopier) error { +func write(targetPath string, r fs.Reader, size int64, flush bool, c streamCopier) (err error) { f, err := os.OpenFile(targetPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0o600) //nolint:gosec,mnd if err != nil { return err //nolint:wrapcheck } + defer func() { + // always close f and report close error + err = stderrors.Join(err, f.Close()) + }() + if err := f.Truncate(size); err != nil { return err //nolint:wrapcheck } - // ensure we always close f. Note that this does not conflict with the - // close below, as close is idempotent. - defer f.Close() //nolint:errcheck - if _, err := c(f, r); err != nil { return errors.Wrapf(err, "cannot write data to file %q", f.Name()) } - if err := f.Close(); err != nil { - return err //nolint:wrapcheck + if flush { + if err := f.Sync(); err != nil { + return errors.Wrapf(err, "cannot flush file %q", f.Name()) + } } return nil @@ -431,7 +440,7 @@ func (o *FilesystemOutput) copyFileContent(ctx context.Context, targetPath strin return atomicfile.Write(targetPath, rr) } - return write(targetPath, rr, f.Size(), o.copier) + return write(targetPath, rr, f.Size(), o.FlushFiles, o.copier) } func isEmptyDirectory(name string) (bool, error) { diff --git a/snapshot/restore/local_fs_output_unix.go b/snapshot/restore/local_fs_output_unix.go index 852c607f51e..8e3d5e96524 100644 --- a/snapshot/restore/local_fs_output_unix.go +++ b/snapshot/restore/local_fs_output_unix.go @@ -1,5 +1,4 @@ //go:build linux || freebsd || openbsd -// +build linux freebsd openbsd package restore diff --git a/snapshot/restore/long_paths_unix.go b/snapshot/restore/long_paths_unix.go index e6584dd2f7a..7f25dfa5c9e 100644 --- a/snapshot/restore/long_paths_unix.go +++ b/snapshot/restore/long_paths_unix.go @@ -1,5 +1,4 @@ //go:build !windows && !plan9 -// +build !windows,!plan9 package restore diff --git a/snapshot/restore/restore.go b/snapshot/restore/restore.go index a6a4df40cf7..a8a8d511975 100644 --- a/snapshot/restore/restore.go +++ b/snapshot/restore/restore.go @@ -1,3 +1,4 @@ +// Package restore manages restoring filesystem snapshots. package restore import ( @@ -312,7 +313,7 @@ func (c *copier) deleteExtraFilesInDir(ctx context.Context, o *FilesystemOutput, } if err != nil { - return errors.Wrap(err, "read existing dir entries ('"+path.Join(o.TargetPath, targetPath)+"')") + return errors.Wrapf(err, "read existing dir entries (%q)", path.Join(o.TargetPath, targetPath)) } snapshotEntries, err := fs.GetAllEntries(ctx, d) @@ -346,7 +347,7 @@ func (c *copier) deleteExtraFilesInDir(ctx context.Context, o *FilesystemOutput, log(ctx).Debugf("deleting directory %v since it does not exist in snapshot", entryPath) if err := os.RemoveAll(entryPath); err != nil { - return errors.Wrap(err, "delete directory "+path.Join(o.TargetPath, targetPath, existingEntry.Name())) + return errors.Wrapf(err, "delete directory %q", entryPath) } c.stats.DeletedDirCount.Add(1) @@ -361,7 +362,7 @@ func (c *copier) deleteExtraFilesInDir(ctx context.Context, o *FilesystemOutput, log(ctx).Debugf("deleting file %v since it does not exist in snapshot", entryPath) if err := os.Remove(entryPath); err != nil { - return errors.Wrap(err, "delete file "+path.Join(o.TargetPath, targetPath, existingEntry.Name())) + return errors.Wrapf(err, "delete file %q", entryPath) } if existingEntry.Type() == os.ModeSymlink { diff --git a/snapshot/restore/shallow_fs_output.go b/snapshot/restore/shallow_fs_output.go index 509012e39eb..5b657bf560d 100644 --- a/snapshot/restore/shallow_fs_output.go +++ b/snapshot/restore/shallow_fs_output.go @@ -1,4 +1,3 @@ -// Package restore manages restoring filesystem snapshots. package restore import ( diff --git a/snapshot/snapshotfs/snapshot_verifier.go b/snapshot/snapshotfs/snapshot_verifier.go index 4a6a9e171f9..ab9c9968b77 100644 --- a/snapshot/snapshotfs/snapshot_verifier.go +++ b/snapshot/snapshotfs/snapshot_verifier.go @@ -275,11 +275,7 @@ func (v *Verifier) InParallel(ctx context.Context, enqueue func(tw *TreeWalker) v.fileWorkQueue = make(chan verifyFileWorkItem, v.opts.FileQueueLength) for range v.opts.Parallelism { - v.workersWG.Add(1) - - go func() { - defer v.workersWG.Done() - + v.workersWG.Go(func() { for wi := range v.fileWorkQueue { if tw.TooManyErrors() { continue @@ -289,7 +285,7 @@ func (v *Verifier) InParallel(ctx context.Context, enqueue func(tw *TreeWalker) tw.ReportError(ctx, wi.entryPath, err) } } - }() + }) } err := enqueue(tw) diff --git a/snapshot/snapshotgc/gc.go b/snapshot/snapshotgc/gc.go index 8e47877160a..8c97dc2cb00 100644 --- a/snapshot/snapshotgc/gc.go +++ b/snapshot/snapshotgc/gc.go @@ -9,21 +9,26 @@ import ( "github.com/kopia/kopia/fs" "github.com/kopia/kopia/internal/bigmap" + "github.com/kopia/kopia/internal/contentlog" + "github.com/kopia/kopia/internal/contentlog/logparam" + "github.com/kopia/kopia/internal/contentparam" "github.com/kopia/kopia/internal/stats" "github.com/kopia/kopia/internal/units" "github.com/kopia/kopia/repo" "github.com/kopia/kopia/repo/content" "github.com/kopia/kopia/repo/logging" "github.com/kopia/kopia/repo/maintenance" + "github.com/kopia/kopia/repo/maintenancestats" "github.com/kopia/kopia/repo/manifest" "github.com/kopia/kopia/repo/object" "github.com/kopia/kopia/snapshot" "github.com/kopia/kopia/snapshot/snapshotfs" ) -var log = logging.Module("snapshotgc") +// User-visible log output. +var userLog = logging.Module("snapshotgc") -func findInUseContentIDs(ctx context.Context, rep repo.Repository, used *bigmap.Set) error { +func findInUseContentIDs(ctx context.Context, log *contentlog.Logger, rep repo.Repository, used *bigmap.Set) error { ids, err := snapshot.ListSnapshotManifests(ctx, rep, nil, nil) if err != nil { return errors.Wrap(err, "unable to list snapshot manifest IDs") @@ -56,7 +61,7 @@ func findInUseContentIDs(ctx context.Context, rep repo.Repository, used *bigmap. defer w.Close(ctx) - log(ctx).Info("Looking for active contents...") + contentlog.Log(ctx, log, "Looking for active contents...") for _, m := range manifests { root, err := snapshotfs.SnapshotRoot(rep, m) @@ -74,38 +79,44 @@ func findInUseContentIDs(ctx context.Context, rep repo.Repository, used *bigmap. // Run performs garbage collection on all the snapshots in the repository. func Run(ctx context.Context, rep repo.DirectRepositoryWriter, gcDelete bool, safety maintenance.SafetyParameters, maintenanceStartTime time.Time) error { - err := maintenance.ReportRun(ctx, rep, maintenance.TaskSnapshotGarbageCollection, nil, func() error { + err := maintenance.ReportRun(ctx, rep, maintenance.TaskSnapshotGarbageCollection, nil, func() (maintenancestats.Kind, error) { return runInternal(ctx, rep, gcDelete, safety, maintenanceStartTime) }) return errors.Wrap(err, "error running snapshot gc") } -func runInternal(ctx context.Context, rep repo.DirectRepositoryWriter, gcDelete bool, safety maintenance.SafetyParameters, maintenanceStartTime time.Time) error { +func runInternal(ctx context.Context, rep repo.DirectRepositoryWriter, gcDelete bool, safety maintenance.SafetyParameters, maintenanceStartTime time.Time) (*maintenancestats.SnapshotGCStats, error) { + ctx = contentlog.WithParams(ctx, + logparam.String("span:snapshot-gc", contentlog.RandomSpanID())) + + log := rep.LogManager().NewLogger("maintenance-snapshot-gc") + used, serr := bigmap.NewSet(ctx) if serr != nil { - return errors.Wrap(serr, "unable to create new set") + return nil, errors.Wrap(serr, "unable to create new set") } defer used.Close(ctx) - if err := findInUseContentIDs(ctx, rep, used); err != nil { - return errors.Wrap(err, "unable to find in-use content ID") + if err := findInUseContentIDs(ctx, log, rep, used); err != nil { + return nil, errors.Wrap(err, "unable to find in-use content ID") } - return findUnreferencedAndRepairRereferenced(ctx, rep, gcDelete, safety, maintenanceStartTime, used) + return findUnreferencedAndRepairRereferenced(ctx, log, rep, gcDelete, safety, maintenanceStartTime, used) } func findUnreferencedAndRepairRereferenced( ctx context.Context, + log *contentlog.Logger, rep repo.DirectRepositoryWriter, gcDelete bool, safety maintenance.SafetyParameters, maintenanceStartTime time.Time, used *bigmap.Set, -) error { - var unused, inUse, system, tooRecent, undeleted stats.CountSum +) (*maintenancestats.SnapshotGCStats, error) { + var unused, inUse, system, tooRecent, undeleted, deleted stats.CountSum - log(ctx).Info("Looking for unreferenced contents...") + contentlog.Log(ctx, log, "Looking for unreferenced contents...") // Ensure that the iteration includes deleted contents, so those can be // undeleted (recovered). @@ -132,23 +143,37 @@ func findUnreferencedAndRepairRereferenced( } if maintenanceStartTime.Sub(ci.Timestamp()) < safety.MinContentAgeSubjectToGC { - log(ctx).Debugf("recent unreferenced content %v (%v bytes, modified %v)", ci.ContentID, ci.PackedLength, ci.Timestamp()) + contentlog.Log3(ctx, log, + "recent unreferenced content", + contentparam.ContentID("contentID", ci.ContentID), + logparam.Int64("bytes", int64(ci.PackedLength)), + logparam.Time("modified", ci.Timestamp())) tooRecent.Add(int64(ci.PackedLength)) return nil } - log(ctx).Debugf("unreferenced %v (%v bytes, modified %v)", ci.ContentID, ci.PackedLength, ci.Timestamp()) + contentlog.Log3(ctx, log, + "unreferenced content", + contentparam.ContentID("contentID", ci.ContentID), + logparam.Int64("bytes", int64(ci.PackedLength)), + logparam.Time("modified", ci.Timestamp())) + cnt, totalSize := unused.Add(int64(ci.PackedLength)) if gcDelete { if err := rep.ContentManager().DeleteContent(ctx, ci.ContentID); err != nil { return errors.Wrap(err, "error deleting content") } + + deleted.Add(int64(ci.PackedLength)) } if cnt%100000 == 0 { - log(ctx).Infof("... found %v unused contents so far (%v bytes)", cnt, units.BytesString(totalSize)) + contentlog.Log2(ctx, log, + "found unused contents so far", + logparam.UInt32("count", cnt), + logparam.Int64("bytes", totalSize)) if gcDelete { if err := rep.Flush(ctx); err != nil { @@ -160,35 +185,57 @@ func findUnreferencedAndRepairRereferenced( return nil }) - unusedCount, unusedBytes := toCountAndBytesString(&unused) - inUseCount, inUseBytes := toCountAndBytesString(&inUse) - systemCount, systemBytes := toCountAndBytesString(&system) - tooRecentCount, tooRecentBytes := toCountAndBytesString(&tooRecent) - undeletedCount, undeletedBytes := toCountAndBytesString(&undeleted) + result := buildGCResult(&unused, &inUse, &system, &tooRecent, &undeleted, &deleted) + + userLog(ctx).Infof("GC found %v unused contents (%v)", result.UnreferencedContentCount, units.BytesString(result.UnreferencedContentSize)) + userLog(ctx).Infof("GC found %v unused contents that are too recent to delete (%v)", result.UnreferencedRecentContentCount, units.BytesString(result.UnreferencedRecentContentSize)) + userLog(ctx).Infof("GC found %v in-use contents (%v)", result.InUseContentCount, units.BytesString(result.InUseContentSize)) + userLog(ctx).Infof("GC found %v in-use system-contents (%v)", result.InUseSystemContentCount, units.BytesString(result.InUseSystemContentSize)) + userLog(ctx).Infof("GC undeleted %v contents (%v)", result.RecoveredContentCount, units.BytesString(result.RecoveredContentSize)) - log(ctx).Infof("GC found %v unused contents (%v)", unusedCount, unusedBytes) - log(ctx).Infof("GC found %v unused contents that are too recent to delete (%v)", tooRecentCount, tooRecentBytes) - log(ctx).Infof("GC found %v in-use contents (%v)", inUseCount, inUseBytes) - log(ctx).Infof("GC found %v in-use system-contents (%v)", systemCount, systemBytes) - log(ctx).Infof("GC undeleted %v contents (%v)", undeletedCount, undeletedBytes) + contentlog.Log1(ctx, log, "Snapshot GC", result) if err != nil { - return errors.Wrap(err, "error iterating contents") + return nil, errors.Wrap(err, "error iterating contents") } if err := rep.Flush(ctx); err != nil { - return errors.Wrap(err, "flush error") + return nil, errors.Wrap(err, "flush error") } - if unusedCount > 0 && !gcDelete { - return errors.New("Not deleting because 'gcDelete' was not set") + if result.UnreferencedContentCount > 0 && !gcDelete { + return result, errors.New("Not deleting because 'gcDelete' was not set") } - return nil + return result, nil } -func toCountAndBytesString(cs *stats.CountSum) (uint32, string) { - count, sumBytes := cs.Approximate() +func buildGCResult(unused, inUse, system, tooRecent, undeleted, deleted *stats.CountSum) *maintenancestats.SnapshotGCStats { + result := &maintenancestats.SnapshotGCStats{} + + cnt, size := unused.Approximate() + result.UnreferencedContentCount = cnt + result.UnreferencedContentSize = size + + cnt, size = tooRecent.Approximate() + result.UnreferencedRecentContentCount = cnt + result.UnreferencedRecentContentSize = size + + cnt, size = inUse.Approximate() + result.InUseContentCount = cnt + result.InUseContentSize = size + + cnt, size = system.Approximate() + result.InUseSystemContentCount = cnt + result.InUseSystemContentSize = size + + cnt, size = undeleted.Approximate() + result.RecoveredContentCount = cnt + result.RecoveredContentSize = size + + cnt, size = deleted.Approximate() + result.DeletedContentCount = cnt + result.DeletedContentSize = size - return count, units.BytesString(sumBytes) + return result } diff --git a/snapshot/snapshotmaintenance/snapshotmaintenance.go b/snapshot/snapshotmaintenance/snapshotmaintenance.go index dd051c6afdc..b027875a538 100644 --- a/snapshot/snapshotmaintenance/snapshotmaintenance.go +++ b/snapshot/snapshotmaintenance/snapshotmaintenance.go @@ -20,6 +20,8 @@ func Run(ctx context.Context, dr repo.DirectRepositoryWriter, mode maintenance.M return ErrReadonly } + dr.LogManager().Enable() + //nolint:wrapcheck return maintenance.RunExclusive(ctx, dr, mode, force, func(ctx context.Context, runParams maintenance.RunParameters) error { diff --git a/snapshot/upload/upload.go b/snapshot/upload/upload.go index 593957218db..63b8d03152e 100644 --- a/snapshot/upload/upload.go +++ b/snapshot/upload/upload.go @@ -22,6 +22,8 @@ import ( "github.com/kopia/kopia/fs" "github.com/kopia/kopia/fs/ignorefs" + "github.com/kopia/kopia/internal/contentlog" + "github.com/kopia/kopia/internal/contentlog/logparam" "github.com/kopia/kopia/internal/iocopy" "github.com/kopia/kopia/internal/timetrack" "github.com/kopia/kopia/internal/workshare" @@ -509,6 +511,8 @@ func (u *Uploader) uploadFileWithCheckpointing(ctx context.Context, relativePath // checkpointRoot invokes checkpoints on the provided registry and if a checkpoint entry was generated, // saves it in an incomplete snapshot manifest. func (u *Uploader) checkpointRoot(ctx context.Context, cp *checkpointRegistry, prototypeManifest *snapshot.Manifest) error { + ctx = contentlog.WithParams(ctx, logparam.String("span:checkpoint", contentlog.RandomSpanID())) + var dmbCheckpoint snapshotfs.DirManifestBuilder if err := cp.runCheckpoints(&dmbCheckpoint); err != nil { return errors.Wrap(err, "running checkpointers") @@ -1260,6 +1264,15 @@ func (u *Uploader) Upload( ctx, span := uploadTracer.Start(ctx, "Upload") defer span.End() + ctx = contentlog.WithParams(ctx, logparam.String("span:upload", contentlog.HashSpanID(sourceInfo.String()))) + + if dr, ok := u.repo.(repo.DirectRepository); ok { + log := dr.LogManager().NewLogger("uploader") + + contentlog.Log(ctx, log, "uploading started") + defer contentlog.Log(ctx, log, "uploading finished") + } + u.traceEnabled = span.IsRecording() u.Progress.UploadStarted() diff --git a/snapshot/upload/upload_estimator.go b/snapshot/upload/upload_estimator.go index 1ebcfd6d736..e8985be14b0 100644 --- a/snapshot/upload/upload_estimator.go +++ b/snapshot/upload/upload_estimator.go @@ -105,11 +105,8 @@ func (e *estimator) StartEstimation(ctx context.Context, cb EstimationDoneFn) { scanCtx, cancelScan := context.WithCancel(ctx) e.cancelCtx = cancelScan - e.scanWG.Add(1) - - go func() { - defer e.scanWG.Done() + e.scanWG.Go(func() { logger := estimateLog(ctx) var filesCount, totalFileSize int64 @@ -147,7 +144,7 @@ func (e *estimator) StartEstimation(ctx context.Context, cb EstimationDoneFn) { } cb(filesCount, totalFileSize) - }() + }) } func (e *estimator) Wait() { diff --git a/snapshot/upload/upload_estimator_test.go b/snapshot/upload/upload_estimator_test.go index 5eb31decc3c..b723edffb52 100644 --- a/snapshot/upload/upload_estimator_test.go +++ b/snapshot/upload/upload_estimator_test.go @@ -88,6 +88,7 @@ func expectSuccessfulEstimation( done := make(chan struct{}) go func() { defer close(done) + estimator.StartEstimation(ctx, func(fc, ts int64) { filesCount = fc totalFileSize = ts diff --git a/snapshot/upload/upload_os_snapshot_nonwindows.go b/snapshot/upload/upload_os_snapshot_nonwindows.go index 0216762c75a..ec969cdee17 100644 --- a/snapshot/upload/upload_os_snapshot_nonwindows.go +++ b/snapshot/upload/upload_os_snapshot_nonwindows.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package upload diff --git a/snapshot/upload/upload_progress.go b/snapshot/upload/upload_progress.go index 770627b7f72..8a0ba5e6410 100644 --- a/snapshot/upload/upload_progress.go +++ b/snapshot/upload/upload_progress.go @@ -79,7 +79,7 @@ type Progress interface { EstimationParameters() EstimationParameters // EstimatedDataSize is emitted whenever the size of upload is estimated. - EstimatedDataSize(fileCount int64, totalBytes int64) + EstimatedDataSize(fileCount, totalBytes int64) } // NullUploadProgress is an implementation of UploadProgress that does not produce any output. diff --git a/snapshot/upload/upload_test.go b/snapshot/upload/upload_test.go index 7519b03d851..5e1491b3b49 100644 --- a/snapshot/upload/upload_test.go +++ b/snapshot/upload/upload_test.go @@ -78,7 +78,7 @@ func newUploadTestHarness(ctx context.Context, t *testing.T) *uploadTestHarness require.NoError(t, err, "cannot create storage directory") faulty := blobtesting.NewFaultyStorage(storage) - logged := bloblogging.NewWrapper(faulty, testlogging.Printf(t.Logf, "{STORAGE} "), "") + logged := bloblogging.NewWrapper(faulty, testlogging.Printf(t.Logf, "{STORAGE} "), nil, "") rec := repotesting.NewReconnectableStorage(t, logged) err = repo.Initialize(ctx, rec, &repo.NewRepositoryOptions{}, masterPassword) @@ -1109,6 +1109,7 @@ func (w *mockLogger) Write(p []byte) (int, error) { parts := strings.SplitN(strings.TrimSpace(string(p)), "\t", 2) var la loggedAction + la.msg = parts[0] if len(parts) == 2 { diff --git a/tests/clitestutil/clitestutil.go b/tests/clitestutil/clitestutil.go index dc13072ed1b..6b99d8b4f5c 100644 --- a/tests/clitestutil/clitestutil.go +++ b/tests/clitestutil/clitestutil.go @@ -25,8 +25,8 @@ type SnapshotInfo struct { } // MustParseSnapshots parsers the output of 'snapshot list'. -func MustParseSnapshots(t *testing.T, lines []string) []SourceInfo { - t.Helper() +func MustParseSnapshots(tb testing.TB, lines []string) []SourceInfo { + tb.Helper() var ( result []SourceInfo @@ -40,16 +40,16 @@ func MustParseSnapshots(t *testing.T, lines []string) []SourceInfo { if strings.HasPrefix(l, " ") { if currentSource == nil { - t.Errorf("snapshot without a source: %q", l) + tb.Errorf("snapshot without a source: %q", l) return nil } - currentSource.Snapshots = append(currentSource.Snapshots, mustParseSnapshotInfo(t, l[2:])) + currentSource.Snapshots = append(currentSource.Snapshots, mustParseSnapshotInfo(tb, l[2:])) continue } - s := mustParseSourceInfo(t, l) + s := mustParseSourceInfo(tb, l) result = append(result, s) currentSource = &result[len(result)-1] } @@ -57,8 +57,8 @@ func MustParseSnapshots(t *testing.T, lines []string) []SourceInfo { return result } -func mustParseSnapshotInfo(t *testing.T, l string) SnapshotInfo { - t.Helper() +func mustParseSnapshotInfo(tb testing.TB, l string) SnapshotInfo { + tb.Helper() incomplete := strings.Contains(l, "incomplete") @@ -66,7 +66,7 @@ func mustParseSnapshotInfo(t *testing.T, l string) SnapshotInfo { ts, err := time.Parse("2006-01-02 15:04:05 MST", strings.Join(parts[0:3], " ")) if err != nil { - t.Fatalf("err: %v", err) + tb.Fatalf("err: %v", err) } var manifestField string @@ -93,18 +93,17 @@ func mustParseSnapshotInfo(t *testing.T, l string) SnapshotInfo { } } -func mustParseSourceInfo(t *testing.T, l string) SourceInfo { - t.Helper() +func mustParseSourceInfo(tb testing.TB, l string) SourceInfo { + tb.Helper() p1 := strings.Index(l, "@") - p2 := strings.Index(l, ":") if p1 >= 0 && p2 > p1 { return SourceInfo{User: l[0:p1], Host: l[p1+1 : p2], Path: l[p2+1:]} } - t.Fatalf("can't parse source info: %q", l) + tb.Fatalf("can't parse source info: %q", l) return SourceInfo{} } @@ -131,32 +130,32 @@ func mustParseDirectoryEntries(lines []string) []DirEntry { } type testEnv interface { - RunAndExpectSuccess(t *testing.T, args ...string) []string + RunAndExpectSuccess(t testing.TB, args ...string) []string } // ListSnapshotsAndExpectSuccess lists given snapshots and parses the output. -func ListSnapshotsAndExpectSuccess(t *testing.T, e testEnv, targets ...string) []SourceInfo { - t.Helper() +func ListSnapshotsAndExpectSuccess(tb testing.TB, e testEnv, targets ...string) []SourceInfo { + tb.Helper() - lines := e.RunAndExpectSuccess(t, append([]string{"snapshot", "list", "-l", "--manifest-id"}, targets...)...) + lines := e.RunAndExpectSuccess(tb, append([]string{"snapshot", "list", "-l", "--manifest-id"}, targets...)...) - return MustParseSnapshots(t, lines) + return MustParseSnapshots(tb, lines) } // ListDirectory lists a given directory and returns directory entries. -func ListDirectory(t *testing.T, e testEnv, targets ...string) []DirEntry { - t.Helper() +func ListDirectory(tb testing.TB, e testEnv, targets ...string) []DirEntry { + tb.Helper() - lines := e.RunAndExpectSuccess(t, append([]string{"ls", "-l"}, targets...)...) + lines := e.RunAndExpectSuccess(tb, append([]string{"ls", "-l"}, targets...)...) return mustParseDirectoryEntries(lines) } // ListDirectoryRecursive lists a given directory recursively and returns directory entries. -func ListDirectoryRecursive(t *testing.T, e testEnv, targets ...string) []DirEntry { - t.Helper() +func ListDirectoryRecursive(tb testing.TB, e testEnv, targets ...string) []DirEntry { + tb.Helper() - lines := e.RunAndExpectSuccess(t, append([]string{"ls", "-lr"}, targets...)...) + lines := e.RunAndExpectSuccess(tb, append([]string{"ls", "-lr"}, targets...)...) return mustParseDirectoryEntries(lines) } diff --git a/tests/end_to_end_test/api_server_repository_test.go b/tests/end_to_end_test/api_server_repository_test.go index 1b20a553922..cc6b458b88b 100644 --- a/tests/end_to_end_test/api_server_repository_test.go +++ b/tests/end_to_end_test/api_server_repository_test.go @@ -2,6 +2,7 @@ package endtoend_test import ( "context" + "encoding/json" "os" "path/filepath" "strings" @@ -10,6 +11,7 @@ import ( "github.com/google/uuid" "github.com/pkg/errors" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/kopia/kopia/internal/apiclient" @@ -252,6 +254,9 @@ func testAPIServerRepository(t *testing.T, allowRepositoryUsers bool) { wait2() + // wait for the logs to be uploaded + verifyServerJSONLogs(t, e.RunAndExpectSuccess(t, "logs", "show", "--younger-than=2h")) + // open repository client to a dead server, this should fail quickly instead of retrying forever. timer := timetrack.StartTimer() @@ -267,6 +272,30 @@ func testAPIServerRepository(t *testing.T, allowRepositoryUsers bool) { require.Less(t, timer.Elapsed(), 15*time.Second) } +func verifyServerJSONLogs(t require.TestingT, s []string) { + clientSpans := make(map[string]int) + remoteSessionSpans := make(map[string]int) + + for _, l := range s { + var logLine map[string]any + + json.Unmarshal([]byte(l), &logLine) + + if s, ok := logLine["span:client"].(string); ok { + clientSpans[s]++ + } + + if s, ok := logLine["span:server-session"].(string); ok { + remoteSessionSpans[s]++ + } + } + + // there should be 2 client session (initial setup + server), + // at least 3 remote sessions (GRPC clients) - may be more due to transparent retries + assert.Len(t, clientSpans, 2) + assert.GreaterOrEqual(t, len(remoteSessionSpans), 3) +} + func verifyFindManifestCount(ctx context.Context, t *testing.T, rep repo.Repository, pageSize int32, labels map[string]string, wantCount int) { t.Helper() diff --git a/tests/end_to_end_test/auto_update_test.go b/tests/end_to_end_test/auto_update_test.go index 481eb16555e..0f38f830374 100644 --- a/tests/end_to_end_test/auto_update_test.go +++ b/tests/end_to_end_test/auto_update_test.go @@ -2,6 +2,7 @@ package endtoend_test import ( "encoding/json" + "maps" "os" "path/filepath" "testing" @@ -46,9 +47,7 @@ func TestAutoUpdateEnableTest(t *testing.T) { "repo", "create", "filesystem", "--path", e.RepoDir, }, tc.extraArgs...) - for k, v := range tc.extraEnv { - e.Environment[k] = v - } + maps.Copy(e.Environment, tc.extraEnv) e.RunAndExpectSuccess(t, args...) diff --git a/tests/end_to_end_test/index_recover_test.go b/tests/end_to_end_test/index_recover_test.go index 4a838731367..f2c520454df 100644 --- a/tests/end_to_end_test/index_recover_test.go +++ b/tests/end_to_end_test/index_recover_test.go @@ -44,7 +44,7 @@ func (s *formatSpecificTestSuite) TestIndexRecover(t *testing.T) { e.RunAndVerifyOutputLineCount(t, 0, "cache", "clear") // there should be no index files at this point - e.RunAndVerifyOutputLineCount(t, 0, "index", "ls", "--no-list-caching") + e.RunAndVerifyOutputLineCount(t, 0, "index", "ls") // there should be no contents, since there are no indexes to find them e.RunAndVerifyOutputLineCount(t, 0, "content", "ls") diff --git a/tests/end_to_end_test/maintenance_test.go b/tests/end_to_end_test/maintenance_test.go index 127fa4cad2b..4cd5a2b7d86 100644 --- a/tests/end_to_end_test/maintenance_test.go +++ b/tests/end_to_end_test/maintenance_test.go @@ -16,7 +16,7 @@ func (s *formatSpecificTestSuite) TestFullMaintenance(t *testing.T) { runner := testenv.NewInProcRunner(t) e := testenv.NewCLITest(t, s.formatFlags, runner) - e.RunAndExpectSuccess(t, "repo", "create", "filesystem", "--path", e.RepoDir, "--disable-internal-log") + e.RunAndExpectSuccess(t, "repo", "create", "filesystem", "--path", e.RepoDir, "--disable-repository-log") defer e.RunAndExpectSuccess(t, "repo", "disconnect") var ( @@ -35,11 +35,11 @@ func (s *formatSpecificTestSuite) TestFullMaintenance(t *testing.T) { beforeSnapshotBlobs := e.RunAndExpectSuccess(t, "blob", "list", "--data-only") _ = beforeSnapshotBlobs - testutil.MustParseJSONLines(t, e.RunAndExpectSuccess(t, "snapshot", "create", sharedTestDataDir1, "--json", "--disable-internal-log"), &snap) + testutil.MustParseJSONLines(t, e.RunAndExpectSuccess(t, "snapshot", "create", sharedTestDataDir1, "--json", "--disable-repository-log"), &snap) // avoid create and delete in the same second. time.Sleep(2 * time.Second) - e.RunAndExpectSuccess(t, "snapshot", "delete", string(snap.ID), "--delete", "--disable-internal-log") + e.RunAndExpectSuccess(t, "snapshot", "delete", string(snap.ID), "--delete", "--disable-repository-log") e.RunAndVerifyOutputLineCount(t, 0, "snapshot", "list") @@ -47,7 +47,7 @@ func (s *formatSpecificTestSuite) TestFullMaintenance(t *testing.T) { e.RunAndExpectSuccess(t, "maintenance", "info") testutil.MustParseJSONLines(t, e.RunAndExpectSuccess(t, "maintenance", "info", "--json"), &mi) - e.RunAndVerifyOutputLineCount(t, 0, "maintenance", "run", "--full", "--disable-internal-log") + e.RunAndVerifyOutputLineCount(t, 0, "maintenance", "run", "--full", "--disable-repository-log") e.RunAndExpectSuccess(t, "maintenance", "info") testutil.MustParseJSONLines(t, e.RunAndExpectSuccess(t, "maintenance", "info", "--json"), &mi) @@ -56,7 +56,7 @@ func (s *formatSpecificTestSuite) TestFullMaintenance(t *testing.T) { } // now rerun with --safety=none - e.RunAndExpectSuccess(t, "maintenance", "run", "--full", "--safety=none", "--disable-internal-log") + e.RunAndExpectSuccess(t, "maintenance", "run", "--full", "--safety=none", "--disable-repository-log") e.RunAndExpectSuccess(t, "maintenance", "info") testutil.MustParseJSONLines(t, e.RunAndExpectSuccess(t, "maintenance", "info", "--json"), &mi) diff --git a/tests/end_to_end_test/norace_test.go b/tests/end_to_end_test/norace_test.go index 1673ed9c97e..1b1ea4e0b78 100644 --- a/tests/end_to_end_test/norace_test.go +++ b/tests/end_to_end_test/norace_test.go @@ -1,5 +1,4 @@ //go:build !race -// +build !race package endtoend_test diff --git a/tests/end_to_end_test/profile_flags_test.go b/tests/end_to_end_test/profile_flags_test.go new file mode 100644 index 00000000000..c41b3db5769 --- /dev/null +++ b/tests/end_to_end_test/profile_flags_test.go @@ -0,0 +1,47 @@ +package endtoend_test + +import ( + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/kopia/kopia/tests/testenv" +) + +func TestProfileFlags(t *testing.T) { + env := testenv.NewCLITest(t, testenv.RepoFormatNotImportant, testenv.NewExeRunner(t)) + + // contents not needed on test failure + diagsDir := t.TempDir() + + env.RunAndExpectSuccess(t, "repo", "create", "filesystem", "--path", env.RepoDir) + env.RunAndExpectSuccess(t, "repo", "status", + "--diagnostics-output-directory", diagsDir, + "--profile-store-on-exit", + "--profile-cpu", + "--profile-blocking-rate=1", + "--profile-mutex-fraction=1", + "--profile-memory-rate=1", + ) + + // get per-execution directory + entries, err := os.ReadDir(diagsDir) + require.NoError(t, err) + require.NotEmpty(t, entries) + + pprofDir := filepath.Join(diagsDir, entries[0].Name(), "profiles") + + for _, name := range []string{"cpu.pprof", "allocs.pprof", "block.pprof", "goroutine.pprof", "mutex.pprof", "heap.pprof", "threadcreate.pprof"} { + f := filepath.Join(pprofDir, name) + t.Run(f, func(t *testing.T) { + require.FileExists(t, f, "expected profile file") + + info, err := os.Stat(f) + + require.NoError(t, err) + require.NotZero(t, info.Size(), "profile file %s should not be empty", f) + }) + } +} diff --git a/tests/end_to_end_test/repository_connect_test.go b/tests/end_to_end_test/repository_connect_test.go index fd54fff933f..0294347974f 100644 --- a/tests/end_to_end_test/repository_connect_test.go +++ b/tests/end_to_end_test/repository_connect_test.go @@ -93,8 +93,8 @@ func TestReconnectUsingToken(t *testing.T) { // look for output line containing the prefix - this will be our reconnect command for _, l := range lines { - if strings.HasPrefix(l, prefix) { - reconnectArgs = strings.Split(strings.TrimPrefix(l, prefix), " ") + if after, ok := strings.CutPrefix(l, prefix); ok { + reconnectArgs = strings.Split(after, " ") } } diff --git a/tests/end_to_end_test/repository_set_client_test.go b/tests/end_to_end_test/repository_set_client_test.go index 6465199ed8d..b9e2e709a75 100644 --- a/tests/end_to_end_test/repository_set_client_test.go +++ b/tests/end_to_end_test/repository_set_client_test.go @@ -1,6 +1,7 @@ package endtoend_test import ( + "slices" "strings" "testing" @@ -75,10 +76,8 @@ func (s *formatSpecificTestSuite) TestRepositorySetClient(t *testing.T) { func verifyHasLine(t *testing.T, lines []string, ok func(s string) bool) { t.Helper() - for _, l := range lines { - if ok(l) { - return - } + if slices.ContainsFunc(lines, ok) { + return } t.Errorf("output line meeting given condition was not found") diff --git a/tests/end_to_end_test/restore_test.go b/tests/end_to_end_test/restore_test.go index 8e6d4dc9f0e..b5a58c91dfa 100644 --- a/tests/end_to_end_test/restore_test.go +++ b/tests/end_to_end_test/restore_test.go @@ -944,6 +944,7 @@ func TestRestoreByPathWithoutTarget(t *testing.T) { e := testenv.NewCLITest(t, testenv.RepoFormatNotImportant, runner) defer e.RunAndExpectSuccess(t, "repo", "disconnect") + e.RunAndExpectSuccess(t, "repo", "create", "filesystem", "--path", e.RepoDir) srcdir := testutil.TempDirectory(t) diff --git a/tests/end_to_end_test/server_start_test.go b/tests/end_to_end_test/server_start_test.go index 61cdf0bd087..1cdb7fe02d3 100644 --- a/tests/end_to_end_test/server_start_test.go +++ b/tests/end_to_end_test/server_start_test.go @@ -147,6 +147,7 @@ func TestServerStart(t *testing.T) { // make sure root payload is valid JSON for the directory. var dummy map[string]any + err = json.Unmarshal(rootPayload, &dummy) require.NoError(t, err) @@ -256,10 +257,7 @@ func TestServerStartAsyncRepoConnect(t *testing.T) { } func TestServerCreateAndConnectViaAPI(t *testing.T) { - t.Parallel() - - //nolint:tenv - os.Setenv("KOPIA_UPGRADE_LOCK_ENABLED", "true") + t.Setenv("KOPIA_UPGRADE_LOCK_ENABLED", "true") ctx := testlogging.Context(t) @@ -445,6 +443,7 @@ func TestServerScheduling(t *testing.T) { emptyDir2 := testutil.TempDirectory(t) defer e.RunAndExpectSuccess(t, "repo", "disconnect") + e.RunAndExpectSuccess(t, "repo", "create", "filesystem", "--path", e.RepoDir, "--override-hostname=fake-hostname", "--override-username=fake-username") e.RunAndExpectSuccess(t, "snapshot", "create", emptyDir1) @@ -492,7 +491,7 @@ func TestServerScheduling(t *testing.T) { // make sure we got some maintenance runs numRuns := len(miAfter.Runs["cleanup-logs"]) - len(miBefore.Runs["cleanup-logs"]) require.Greater(t, numRuns, 2) - require.Less(t, numRuns, 5) + require.Less(t, numRuns, 50) } func TestServerStartInsecure(t *testing.T) { diff --git a/tests/end_to_end_test/shallowrestore_test.go b/tests/end_to_end_test/shallowrestore_test.go index c663b3ddbc3..516b76bbbe5 100644 --- a/tests/end_to_end_test/shallowrestore_test.go +++ b/tests/end_to_end_test/shallowrestore_test.go @@ -786,6 +786,7 @@ func getShallowInfo(t *testing.T, srp string) (string, os.FileInfo) { t.Helper() const ENTRYTYPES = 3 + shallowinfos := make([]os.FileInfo, ENTRYTYPES) errors := make([]error, ENTRYTYPES) paths := make([]string, ENTRYTYPES) diff --git a/tests/end_to_end_test/snapshot_actions_test.go b/tests/end_to_end_test/snapshot_actions_test.go index 2b69a8915fc..2420cada283 100644 --- a/tests/end_to_end_test/snapshot_actions_test.go +++ b/tests/end_to_end_test/snapshot_actions_test.go @@ -383,6 +383,7 @@ func mustReadEnvFile(t *testing.T, fname string) map[string]string { verifyNoError(t, err) defer f.Close() + s := bufio.NewScanner(f) m := map[string]string{} diff --git a/tests/end_to_end_test/snapshot_create_test.go b/tests/end_to_end_test/snapshot_create_test.go index 3530bcad09a..968325dfdd1 100644 --- a/tests/end_to_end_test/snapshot_create_test.go +++ b/tests/end_to_end_test/snapshot_create_test.go @@ -6,6 +6,7 @@ import ( "path/filepath" "reflect" "runtime" + "slices" "sort" "strings" "testing" @@ -514,6 +515,7 @@ func TestSnapshotCreateWithIgnore(t *testing.T) { } defer e.RunAndExpectSuccess(t, "repo", "disconnect") + e.RunAndExpectSuccess(t, "repo", "create", "filesystem", "--path", e.RepoDir) e.RunAndExpectSuccess(t, "snapshot", "create", baseDir) sources := clitestutil.ListSnapshotsAndExpectSuccess(t, e) @@ -556,8 +558,8 @@ func TestSnapshotCreateAllWithManualSnapshot(t *testing.T) { e := testenv.NewCLITest(t, testenv.RepoFormatNotImportant, runner) defer e.RunAndExpectSuccess(t, "repo", "disconnect") - e.RunAndExpectSuccess(t, "repo", "create", "filesystem", "--path", e.RepoDir) + e.RunAndExpectSuccess(t, "repo", "create", "filesystem", "--path", e.RepoDir) e.RunAndExpectSuccess(t, "snapshot", "create", sharedTestDataDir1) e.RunAndExpectSuccess(t, "snapshot", "create", sharedTestDataDir2) @@ -584,6 +586,7 @@ func TestSnapshotCreateWithStdinStream(t *testing.T) { e := testenv.NewCLITest(t, testenv.RepoFormatNotImportant, runner) defer e.RunAndExpectSuccess(t, "repo", "disconnect") + e.RunAndExpectSuccess(t, "repo", "create", "filesystem", "--path", e.RepoDir) // Create a temporary pipe file with test data @@ -643,10 +646,8 @@ func TestSnapshotCreateWithStdinStream(t *testing.T) { } func appendIfMissing(slice []string, i string) []string { - for _, ele := range slice { - if ele == i { - return slice - } + if slices.Contains(slice, i) { + return slice } return append(slice, i) diff --git a/tests/end_to_end_test/snapshot_fail_test.go b/tests/end_to_end_test/snapshot_fail_test.go index 3e40ce0ccdf..33b734eba72 100644 --- a/tests/end_to_end_test/snapshot_fail_test.go +++ b/tests/end_to_end_test/snapshot_fail_test.go @@ -2,6 +2,7 @@ package endtoend_test import ( "fmt" + "maps" "math/rand" "os" "path/filepath" @@ -351,13 +352,9 @@ func testPermissions( oldEnv := e.Environment e.Environment = map[string]string{} - for k, v := range oldEnv { - e.Environment[k] = v - } - for k, v := range snapshotCreateEnv { - e.Environment[k] = v - } + maps.Copy(e.Environment, oldEnv) + maps.Copy(e.Environment, snapshotCreateEnv) defer func() { e.Environment = oldEnv }() diff --git a/tests/endurance_test/endurance_test.go b/tests/endurance_test/endurance_test.go index 5c3c47e6b38..d3fe9cbb6d2 100644 --- a/tests/endurance_test/endurance_test.go +++ b/tests/endurance_test/endurance_test.go @@ -145,7 +145,7 @@ func actionSnapshotExisting(t *testing.T, e *testenv.CLITest, s *runnerState) { t.Helper() randomPath := s.dirs[rand.Intn(len(s.dirs))] - e.RunAndExpectSuccess(t, "snapshot", "create", randomPath) + e.RunAndExpectSuccess(t, "--no-auto-maintenance", "snapshot", "create", randomPath) s.snapshottedAnything = true } @@ -157,7 +157,7 @@ func actionSnapshotAll(t *testing.T, e *testenv.CLITest, s *runnerState) { return } - e.RunAndExpectSuccess(t, "snapshot", "create", "--all") + e.RunAndExpectSuccess(t, "--no-auto-maintenance", "snapshot", "create", "--all") } func actionSnapshotVerify(t *testing.T, e *testenv.CLITest, s *runnerState) { diff --git a/tests/os_snapshot_test/os_snapshot_nonwindows_test.go b/tests/os_snapshot_test/os_snapshot_nonwindows_test.go index 37cf10ed317..88ebf900d5b 100644 --- a/tests/os_snapshot_test/os_snapshot_nonwindows_test.go +++ b/tests/os_snapshot_test/os_snapshot_nonwindows_test.go @@ -1,4 +1,3 @@ //go:build !windows -// +build !windows package os_snapshot_test diff --git a/tests/recovery/blobmanipulator/blobmanipulator.go b/tests/recovery/blobmanipulator/blobmanipulator.go index e46802970dd..883ad706b1c 100644 --- a/tests/recovery/blobmanipulator/blobmanipulator.go +++ b/tests/recovery/blobmanipulator/blobmanipulator.go @@ -1,5 +1,4 @@ //go:build darwin || (linux && amd64) -// +build darwin linux,amd64 // Package blobmanipulator provides the framework for snapshot fix testing. package blobmanipulator @@ -100,7 +99,7 @@ func (bm *BlobManipulator) DeleteBlob(blobID string) error { log.Printf("Deleting BLOB %s", blobID) - _, _, err := bm.KopiaCommandRunner.Run("blob", "delete", blobID, "--advanced-commands=enabled") + _, _, err := bm.KopiaCommandRunner.Run("blob", "delete", blobID, "--dangerous-commands=enabled") if err != nil { return err } diff --git a/tests/recovery/recovery_test/main_test.go b/tests/recovery/recovery_test/main_test.go index 625cdbcee55..38ccc8cce59 100644 --- a/tests/recovery/recovery_test/main_test.go +++ b/tests/recovery/recovery_test/main_test.go @@ -1,5 +1,4 @@ //go:build darwin || (linux && amd64) -// +build darwin linux,amd64 package recovery diff --git a/tests/recovery/recovery_test/recovery_test.go b/tests/recovery/recovery_test/recovery_test.go index d0b5479b8ff..3820e2992ca 100644 --- a/tests/recovery/recovery_test/recovery_test.go +++ b/tests/recovery/recovery_test/recovery_test.go @@ -1,5 +1,4 @@ //go:build darwin || (linux && amd64) -// +build darwin linux,amd64 package recovery @@ -21,6 +20,7 @@ import ( "github.com/kopia/kopia/fs/localfs" "github.com/kopia/kopia/internal/diff" + "github.com/kopia/kopia/internal/testlogging" "github.com/kopia/kopia/tests/recovery/blobmanipulator" "github.com/kopia/kopia/tests/testenv" "github.com/kopia/kopia/tests/tools/kopiarunner" @@ -54,8 +54,9 @@ func TestSnapshotFix(t *testing.T) { t.FailNow() } + ctx := testlogging.Context(t) kopiaExe := os.Getenv("KOPIA_EXE") - cmd := exec.Command(kopiaExe, "maintenance", "run", "--full", "--force", "--safety", "none") + cmd := exec.CommandContext(ctx, kopiaExe, "maintenance", "run", "--full", "--force", "--safety", "none") err = cmd.Start() if err != nil { @@ -132,8 +133,9 @@ func TestSnapshotFixInvalidFiles(t *testing.T) { t.FailNow() } + ctx := testlogging.Context(t) kopiaExe := os.Getenv("KOPIA_EXE") - cmd := exec.Command(kopiaExe, "maintenance", "run", "--full", "--force", "--safety", "none") + cmd := exec.CommandContext(ctx, kopiaExe, "maintenance", "run", "--full", "--force", "--safety", "none") err = cmd.Start() if err != nil { @@ -208,11 +210,12 @@ func TestConsistencyWhenKill9AfterModify(t *testing.T) { require.NoError(t, err) newDir := bm.PathToTakeSnapshot + ctx := testlogging.Context(t) // connect with repository with the environment configuration, otherwise it will display "ERROR open repository: repository is not connected.kopia connect repo". kopiaExe := os.Getenv("KOPIA_EXE") - cmd := exec.Command(kopiaExe, "repo", "connect", "filesystem", "--path="+dataRepoPath, "--content-cache-size-mb", "500", "--metadata-cache-size-mb", "500", "--no-check-for-updates") + cmd := exec.CommandContext(ctx, kopiaExe, "repo", "connect", "filesystem", "--path="+dataRepoPath, "--content-cache-size-mb", "500", "--metadata-cache-size-mb", "500", "--no-check-for-updates") env := []string{"KOPIA_PASSWORD=" + testenv.TestRepoPassword} cmd.Env = append(os.Environ(), env...) @@ -221,7 +224,7 @@ func TestConsistencyWhenKill9AfterModify(t *testing.T) { t.Log(string(o)) // create snapshot with StderrPipe - cmd = exec.Command(kopiaExe, "snap", "create", newDir, "--json", "--parallel=1") + cmd = exec.CommandContext(ctx, kopiaExe, "snap", "create", newDir, "--json", "--parallel=1") // kill the kopia command before it exits t.Logf("Kill the kopia command before it exits:") @@ -255,11 +258,8 @@ func killOnCondition(t *testing.T, cmd *exec.Cmd) { var wg sync.WaitGroup // Add a WaitGroup counter for the first goroutine - wg.Add(1) - - go func() { - defer wg.Done() + wg.Go(func() { // Create a scanner to read from stderrPipe scanner := bufio.NewScanner(stderrPipe) scanner.Split(bufio.ScanLines) @@ -276,7 +276,7 @@ func killOnCondition(t *testing.T, cmd *exec.Cmd) { break } } - }() + }) // Start the command err = cmd.Start() diff --git a/tests/repository_stress_test/repository_stress_test.go b/tests/repository_stress_test/repository_stress_test.go index 1ac35d59b4b..d52d9ad570e 100644 --- a/tests/repository_stress_test/repository_stress_test.go +++ b/tests/repository_stress_test/repository_stress_test.go @@ -439,9 +439,9 @@ func compact(ctx context.Context, r repo.DirectRepositoryWriter, rs *repomodel.R log.Debug("compact()") - return errors.Wrapf( - r.ContentManager().CompactIndexes(ctx, indexblob.CompactOptions{MaxSmallBlobs: 1}), - "compact()") + _, err := r.ContentManager().CompactIndexes(ctx, indexblob.CompactOptions{MaxSmallBlobs: 1}) + + return errors.Wrapf(err, "compact()") } func flush(ctx context.Context, r repo.DirectRepositoryWriter, rs *repomodel.RepositorySession, log logging.Logger) error { diff --git a/tests/robustness/checker/checker.go b/tests/robustness/checker/checker.go index 08bc2ccbb45..4174432214e 100644 --- a/tests/robustness/checker/checker.go +++ b/tests/robustness/checker/checker.go @@ -1,5 +1,4 @@ //go:build darwin || (linux && amd64) -// +build darwin linux,amd64 // Package checker defines the framework for creating and restoring snapshots // with a data integrity check diff --git a/tests/robustness/engine/action.go b/tests/robustness/engine/action.go index 7f4e3dd3d00..afaf59b3da5 100644 --- a/tests/robustness/engine/action.go +++ b/tests/robustness/engine/action.go @@ -1,5 +1,4 @@ //go:build darwin || (linux && amd64) -// +build darwin linux,amd64 package engine @@ -238,21 +237,21 @@ func writeRandomFilesAction(ctx context.Context, e *Engine, opts map[string]stri out, err = e.FileWriter.WriteRandomFiles(ctx, opts) setLogEntryCmdOpts(l, out) - return + return out, err } func deleteRandomSubdirectoryAction(ctx context.Context, e *Engine, opts map[string]string, l *LogEntry) (out map[string]string, err error) { out, err = e.FileWriter.DeleteRandomSubdirectory(ctx, opts) setLogEntryCmdOpts(l, out) - return + return out, err } func deleteDirectoryContentsAction(ctx context.Context, e *Engine, opts map[string]string, l *LogEntry) (out map[string]string, err error) { out, err = e.FileWriter.DeleteDirectoryContents(ctx, opts) setLogEntryCmdOpts(l, out) - return + return out, err } func restoreIntoDataDirectoryAction(ctx context.Context, e *Engine, opts map[string]string, l *LogEntry) (out map[string]string, err error) { diff --git a/tests/robustness/engine/engine.go b/tests/robustness/engine/engine.go index 70e3e2f98b6..8c1e4ff64c8 100644 --- a/tests/robustness/engine/engine.go +++ b/tests/robustness/engine/engine.go @@ -1,5 +1,4 @@ //go:build darwin || (linux && amd64) -// +build darwin linux,amd64 // Package engine provides the framework for a snapshot repository testing engine package engine diff --git a/tests/robustness/engine/engine_test.go b/tests/robustness/engine/engine_test.go index 59dbf3d3453..d50db604f15 100644 --- a/tests/robustness/engine/engine_test.go +++ b/tests/robustness/engine/engine_test.go @@ -1,7 +1,5 @@ //go:build darwin || (linux && amd64) -// +build darwin linux,amd64 -// Package engine provides the framework for a snapshot repository testing engine package engine import ( diff --git a/tests/robustness/engine/log.go b/tests/robustness/engine/log.go index 1545af09615..5f7709b7ffd 100644 --- a/tests/robustness/engine/log.go +++ b/tests/robustness/engine/log.go @@ -1,5 +1,4 @@ //go:build darwin || (linux && amd64) -// +build darwin linux,amd64 package engine diff --git a/tests/robustness/engine/metadata.go b/tests/robustness/engine/metadata.go index b1bb772324e..1b3163239fe 100644 --- a/tests/robustness/engine/metadata.go +++ b/tests/robustness/engine/metadata.go @@ -1,5 +1,4 @@ //go:build darwin || (linux && amd64) -// +build darwin linux,amd64 package engine diff --git a/tests/robustness/engine/stats.go b/tests/robustness/engine/stats.go index 6e694a3c463..79e0b77beb3 100644 --- a/tests/robustness/engine/stats.go +++ b/tests/robustness/engine/stats.go @@ -1,5 +1,4 @@ //go:build darwin || (linux && amd64) -// +build darwin linux,amd64 package engine diff --git a/tests/robustness/errors.go b/tests/robustness/errors.go index 198856e5db9..9fed1a363d5 100644 --- a/tests/robustness/errors.go +++ b/tests/robustness/errors.go @@ -1,5 +1,4 @@ //go:build darwin || (linux && amd64) -// +build darwin linux,amd64 package robustness diff --git a/tests/robustness/filewriter.go b/tests/robustness/filewriter.go index 15b31aff5c2..c9d516f6905 100644 --- a/tests/robustness/filewriter.go +++ b/tests/robustness/filewriter.go @@ -1,5 +1,4 @@ //go:build darwin || (linux && amd64) -// +build darwin linux,amd64 package robustness diff --git a/tests/robustness/fiofilewriter/fio_filewriter.go b/tests/robustness/fiofilewriter/fio_filewriter.go index b57122a6a33..00ad223c03a 100644 --- a/tests/robustness/fiofilewriter/fio_filewriter.go +++ b/tests/robustness/fiofilewriter/fio_filewriter.go @@ -1,5 +1,4 @@ //go:build darwin || (linux && amd64) -// +build darwin linux,amd64 // Package fiofilewriter provides a FileWriter based on FIO. package fiofilewriter @@ -8,6 +7,7 @@ import ( "context" "errors" "log" + "maps" "math/rand" "strconv" "syscall" @@ -147,13 +147,9 @@ func (fw *FileWriter) WriteRandomFiles(ctx context.Context, opts map[string]stri log.Printf("Writing files at depth %v (fileSize: %v-%v, numFiles: %v, blockSize: %v, dedupPcnt: %v, ioLimit: %v)\n", dirDepth, minFileSizeB, maxFileSizeB, numFiles, blockSize, dedupPcnt, ioLimit) retOpts := make(map[string]string, len(opts)) - for k, v := range opts { - retOpts[k] = v - } - for k, v := range fioOpts { - retOpts[k] = v - } + maps.Copy(retOpts, opts) + maps.Copy(retOpts, fioOpts) retOpts["dirDepth"] = strconv.Itoa(dirDepth) retOpts["relBasePath"] = relBasePath @@ -179,11 +175,7 @@ func (fw *FileWriter) DeleteRandomSubdirectory(ctx context.Context, opts map[str log.Printf("Deleting directory at depth %v\n", dirDepth) - retOpts := make(map[string]string, len(opts)) - for k, v := range opts { - retOpts[k] = v - } - + retOpts := maps.Clone(opts) retOpts["dirDepth"] = strconv.Itoa(dirDepth) err := fw.Runner.DeleteDirAtDepth("", dirDepth) @@ -212,11 +204,7 @@ func (fw *FileWriter) DeleteDirectoryContents(ctx context.Context, opts map[stri log.Printf("Deleting %d%% of directory contents at depth %v\n", pcnt, dirDepth) - retOpts := make(map[string]string, len(opts)) - for k, v := range opts { - retOpts[k] = v - } - + retOpts := maps.Clone(opts) retOpts["dirDepth"] = strconv.Itoa(dirDepth) retOpts["percent"] = strconv.Itoa(pcnt) diff --git a/tests/robustness/multiclient_test/framework/client.go b/tests/robustness/multiclient_test/framework/client.go index 7704bcdb368..266b3c9cc7d 100644 --- a/tests/robustness/multiclient_test/framework/client.go +++ b/tests/robustness/multiclient_test/framework/client.go @@ -1,5 +1,4 @@ //go:build darwin || (linux && amd64) -// +build darwin linux,amd64 package framework diff --git a/tests/robustness/multiclient_test/framework/filewriter.go b/tests/robustness/multiclient_test/framework/filewriter.go index 8ae55492454..fbdafeedbf3 100644 --- a/tests/robustness/multiclient_test/framework/filewriter.go +++ b/tests/robustness/multiclient_test/framework/filewriter.go @@ -1,5 +1,4 @@ //go:build darwin || (linux && amd64) -// +build darwin linux,amd64 package framework diff --git a/tests/robustness/multiclient_test/framework/framework.go b/tests/robustness/multiclient_test/framework/framework.go index f05ebcd6c0d..e7610d25d30 100644 --- a/tests/robustness/multiclient_test/framework/framework.go +++ b/tests/robustness/multiclient_test/framework/framework.go @@ -1,5 +1,4 @@ //go:build darwin || (linux && amd64) -// +build darwin linux,amd64 // Package framework contains tools to enable multiple clients to connect to a // central repository server and run robustness tests concurrently. diff --git a/tests/robustness/multiclient_test/framework/harness.go b/tests/robustness/multiclient_test/framework/harness.go index 0c7d7662e1f..605a8d8b8ca 100644 --- a/tests/robustness/multiclient_test/framework/harness.go +++ b/tests/robustness/multiclient_test/framework/harness.go @@ -1,5 +1,4 @@ //go:build darwin || (linux && amd64) -// +build darwin linux,amd64 package framework @@ -303,6 +302,7 @@ func (th *TestHarness) GetDirsToLog(ctx context.Context) []string { } var dirList []string + dirList = append(dirList, th.dataRepoPath, // repo under test base dir th.metaRepoPath, // metadata repository base dir diff --git a/tests/robustness/multiclient_test/framework/snapshotter.go b/tests/robustness/multiclient_test/framework/snapshotter.go index 39b690d4ea9..c212ce19440 100644 --- a/tests/robustness/multiclient_test/framework/snapshotter.go +++ b/tests/robustness/multiclient_test/framework/snapshotter.go @@ -1,5 +1,4 @@ //go:build darwin || (linux && amd64) -// +build darwin linux,amd64 package framework diff --git a/tests/robustness/multiclient_test/main_test.go b/tests/robustness/multiclient_test/main_test.go index a0b59e76228..5502512a7b7 100644 --- a/tests/robustness/multiclient_test/main_test.go +++ b/tests/robustness/multiclient_test/main_test.go @@ -1,5 +1,4 @@ //go:build darwin || (linux && amd64) -// +build darwin linux,amd64 package multiclienttest diff --git a/tests/robustness/multiclient_test/multiclient_test.go b/tests/robustness/multiclient_test/multiclient_test.go index 08304caac49..a1f1c477e37 100644 --- a/tests/robustness/multiclient_test/multiclient_test.go +++ b/tests/robustness/multiclient_test/multiclient_test.go @@ -1,5 +1,4 @@ //go:build darwin || (linux && amd64) -// +build darwin linux,amd64 package multiclienttest diff --git a/tests/robustness/multiclient_test/storagestats/storage_stats.go b/tests/robustness/multiclient_test/storagestats/storage_stats.go index f1faf605bbc..b6863e279b3 100644 --- a/tests/robustness/multiclient_test/storagestats/storage_stats.go +++ b/tests/robustness/multiclient_test/storagestats/storage_stats.go @@ -1,5 +1,4 @@ //go:build darwin || (linux && amd64) -// +build darwin linux,amd64 // Package storagestats contains logging mechanism // log disk space consumed by directories created by diff --git a/tests/robustness/options.go b/tests/robustness/options.go index 7af37e1ed56..2a3fda91afd 100644 --- a/tests/robustness/options.go +++ b/tests/robustness/options.go @@ -1,5 +1,4 @@ //go:build darwin || (linux && amd64) -// +build darwin linux,amd64 package robustness diff --git a/tests/robustness/pathlock/path_lock_test.go b/tests/robustness/pathlock/path_lock_test.go index 214c2b5057c..9c810408f39 100644 --- a/tests/robustness/pathlock/path_lock_test.go +++ b/tests/robustness/pathlock/path_lock_test.go @@ -116,11 +116,8 @@ func TestPathLockBasic(t *testing.T) { var path2Err error wg := new(sync.WaitGroup) - wg.Add(1) - - go func() { - defer wg.Done() + wg.Go(func() { lock2, err := pl.Lock(tc.path2) if err != nil { path2Err = err @@ -128,7 +125,7 @@ func TestPathLockBasic(t *testing.T) { } lock2.Unlock() - }() + }) // Wait until the internal atomic counter increments. // That will only happen once the Lock call to path2 executes @@ -281,11 +278,7 @@ func TestPathLockRace(t *testing.T) { numGoroutines := 100 for range numGoroutines { - wg.Add(1) - - go func() { - defer wg.Done() - + wg.Go(func() { // Pick from three different path values that should all be // covered by the same lock. path := "/some/path/a/b/c" @@ -303,8 +296,9 @@ func TestPathLockRace(t *testing.T) { } counter++ + lock.Unlock() - }() + }) } wg.Wait() diff --git a/tests/robustness/persister.go b/tests/robustness/persister.go index f66382fa715..76fe91b5494 100644 --- a/tests/robustness/persister.go +++ b/tests/robustness/persister.go @@ -1,5 +1,4 @@ //go:build darwin || (linux && amd64) -// +build darwin linux,amd64 package robustness diff --git a/tests/robustness/robustness_test/main_test.go b/tests/robustness/robustness_test/main_test.go index 3a799245aed..4c84ba98a09 100644 --- a/tests/robustness/robustness_test/main_test.go +++ b/tests/robustness/robustness_test/main_test.go @@ -1,5 +1,4 @@ //go:build darwin || (linux && amd64) -// +build darwin linux,amd64 package robustness @@ -247,7 +246,7 @@ func (th *kopiaRobustnessTestHarness) cleanup(ctx context.Context) (retErr error os.RemoveAll(th.baseDirPath) } - return + return retErr } func (th *kopiaRobustnessTestHarness) getUpgrader() bool { diff --git a/tests/robustness/robustness_test/robustness_test.go b/tests/robustness/robustness_test/robustness_test.go index c1d2bd340d9..c0cbe9121f5 100644 --- a/tests/robustness/robustness_test/robustness_test.go +++ b/tests/robustness/robustness_test/robustness_test.go @@ -1,5 +1,4 @@ //go:build darwin || (linux && amd64) -// +build darwin linux,amd64 package robustness diff --git a/tests/robustness/snapmeta/index.go b/tests/robustness/snapmeta/index.go index a4debcf93f0..84f59dcbad1 100644 --- a/tests/robustness/snapmeta/index.go +++ b/tests/robustness/snapmeta/index.go @@ -1,5 +1,4 @@ //go:build darwin || (linux && amd64) -// +build darwin linux,amd64 package snapmeta diff --git a/tests/robustness/snapmeta/index_test.go b/tests/robustness/snapmeta/index_test.go index 9805bab9f6b..c5a242d6349 100644 --- a/tests/robustness/snapmeta/index_test.go +++ b/tests/robustness/snapmeta/index_test.go @@ -1,5 +1,4 @@ //go:build darwin || (linux && amd64) -// +build darwin linux,amd64 package snapmeta diff --git a/tests/robustness/snapmeta/kopia_connector.go b/tests/robustness/snapmeta/kopia_connector.go index 460786bef59..2afa40e61d2 100644 --- a/tests/robustness/snapmeta/kopia_connector.go +++ b/tests/robustness/snapmeta/kopia_connector.go @@ -1,5 +1,4 @@ //go:build darwin || (linux && amd64) -// +build darwin linux,amd64 package snapmeta diff --git a/tests/robustness/snapmeta/kopia_connector_test.go b/tests/robustness/snapmeta/kopia_connector_test.go index 84e6b5eba3c..4815133e202 100644 --- a/tests/robustness/snapmeta/kopia_connector_test.go +++ b/tests/robustness/snapmeta/kopia_connector_test.go @@ -1,5 +1,4 @@ //go:build darwin || (linux && amd64) -// +build darwin linux,amd64 package snapmeta diff --git a/tests/robustness/snapmeta/kopia_persister.go b/tests/robustness/snapmeta/kopia_persister.go index 39146cfdc55..103b10dd255 100644 --- a/tests/robustness/snapmeta/kopia_persister.go +++ b/tests/robustness/snapmeta/kopia_persister.go @@ -1,5 +1,4 @@ //go:build darwin || (linux && amd64) -// +build darwin linux,amd64 // Package snapmeta provides Kopia implementations of Persister and Snapshotter. package snapmeta diff --git a/tests/robustness/snapmeta/kopia_persister_light.go b/tests/robustness/snapmeta/kopia_persister_light.go index f223d6ab07f..ad97df8a9a6 100644 --- a/tests/robustness/snapmeta/kopia_persister_light.go +++ b/tests/robustness/snapmeta/kopia_persister_light.go @@ -1,5 +1,4 @@ //go:build darwin || (linux && amd64) -// +build darwin linux,amd64 package snapmeta @@ -112,6 +111,7 @@ func (kpl *KopiaPersisterLight) Cleanup() { func (kpl *KopiaPersisterLight) waitFor(key string) { kpl.c.L.Lock() + for kpl.keysInProcess[key] { kpl.c.Wait() } diff --git a/tests/robustness/snapmeta/kopia_persister_light_test.go b/tests/robustness/snapmeta/kopia_persister_light_test.go index 5000430a0fc..509bff06519 100644 --- a/tests/robustness/snapmeta/kopia_persister_light_test.go +++ b/tests/robustness/snapmeta/kopia_persister_light_test.go @@ -1,5 +1,4 @@ //go:build darwin || (linux && amd64) -// +build darwin linux,amd64 package snapmeta diff --git a/tests/robustness/snapmeta/kopia_snapshotter.go b/tests/robustness/snapmeta/kopia_snapshotter.go index eefd791adc4..6d6b4823778 100644 --- a/tests/robustness/snapmeta/kopia_snapshotter.go +++ b/tests/robustness/snapmeta/kopia_snapshotter.go @@ -1,5 +1,4 @@ //go:build darwin || (linux && amd64) -// +build darwin linux,amd64 package snapmeta @@ -93,14 +92,14 @@ func (ks *KopiaSnapshotter) ServerFingerprint() string { func (ks *KopiaSnapshotter) CreateSnapshot(ctx context.Context, sourceDir string, opts map[string]string) (snapID string, fingerprint []byte, snapStats *robustness.CreateSnapshotStats, err error) { fingerprint, err = ks.comparer.Gather(ctx, sourceDir, opts) if err != nil { - return + return snapID, fingerprint, snapStats, err } ssStart := clock.Now() snapID, err = ks.snap.CreateSnapshot(sourceDir) if err != nil { - return + return snapID, fingerprint, snapStats, err } ssEnd := clock.Now() @@ -110,7 +109,7 @@ func (ks *KopiaSnapshotter) CreateSnapshot(ctx context.Context, sourceDir string SnapEndTime: ssEnd, } - return + return snapID, fingerprint, snapStats, err } // RestoreSnapshot restores the snapshot with the given ID to the provided restore directory. It returns @@ -118,7 +117,7 @@ func (ks *KopiaSnapshotter) CreateSnapshot(ctx context.Context, sourceDir string func (ks *KopiaSnapshotter) RestoreSnapshot(ctx context.Context, snapID, restoreDir string, opts map[string]string) (fingerprint []byte, err error) { err = ks.snap.RestoreSnapshot(snapID, restoreDir) if err != nil { - return + return fingerprint, err } return ks.comparer.Gather(ctx, restoreDir, opts) diff --git a/tests/robustness/snapmeta/kopia_snapshotter_upgrade_test.go b/tests/robustness/snapmeta/kopia_snapshotter_upgrade_test.go index f068a4127d5..e893f4251ca 100644 --- a/tests/robustness/snapmeta/kopia_snapshotter_upgrade_test.go +++ b/tests/robustness/snapmeta/kopia_snapshotter_upgrade_test.go @@ -1,5 +1,4 @@ //go:build darwin || (linux && amd64) -// +build darwin linux,amd64 package snapmeta diff --git a/tests/robustness/snapmeta/simple.go b/tests/robustness/snapmeta/simple.go index c6ecbc870e0..c3c0cac3b88 100644 --- a/tests/robustness/snapmeta/simple.go +++ b/tests/robustness/snapmeta/simple.go @@ -1,5 +1,4 @@ //go:build darwin || (linux && amd64) -// +build darwin linux,amd64 package snapmeta diff --git a/tests/robustness/snapmeta/simple_test.go b/tests/robustness/snapmeta/simple_test.go index cd36869cd62..5ed2cefcd6b 100644 --- a/tests/robustness/snapmeta/simple_test.go +++ b/tests/robustness/snapmeta/simple_test.go @@ -1,5 +1,4 @@ //go:build darwin || (linux && amd64) -// +build darwin linux,amd64 package snapmeta diff --git a/tests/robustness/snapshotter.go b/tests/robustness/snapshotter.go index e2df9c26d17..74cf3d9fa84 100644 --- a/tests/robustness/snapshotter.go +++ b/tests/robustness/snapshotter.go @@ -1,5 +1,4 @@ //go:build darwin || (linux && amd64) -// +build darwin linux,amd64 // Package robustness contains tests that validate data stability over time. // The package, while designed for Kopia, is written with abstractions that diff --git a/tests/socketactivation_test/socketactivation_test.go b/tests/socketactivation_test/socketactivation_test.go index 90f9bece267..b662453f546 100644 --- a/tests/socketactivation_test/socketactivation_test.go +++ b/tests/socketactivation_test/socketactivation_test.go @@ -1,5 +1,4 @@ -//go:build linux -// +build linux +//go:build linux || darwin package socketactivation_test @@ -8,11 +7,14 @@ import ( "os" "strconv" "strings" + "sync/atomic" "testing" "time" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/kopia/kopia/internal/testlogging" "github.com/kopia/kopia/internal/testutil" "github.com/kopia/kopia/tests/testenv" ) @@ -36,31 +38,26 @@ func TestServerControlSocketActivated(t *testing.T) { // The KOPIA_EXE wrapper will set the LISTEN_PID variable for us env.Environment["LISTEN_FDS"] = "1" - l1, err := net.Listen("tcp", ":0") + ctx := testlogging.Context(t) + l1, err := (&net.ListenConfig{}).Listen(ctx, "tcp", ":0") + require.NoError(t, err, "Failed to open Listener") - defer func() { - l1.Close() - }() + t.Cleanup(func() { l1.Close() }) port = testutil.EnsureType[*net.TCPAddr](t, l1.Addr()).Port t.Logf("Activating socket on port %v", port) + l1File, err := testutil.EnsureType[*net.TCPListener](t, l1).File() + require.NoError(t, err, "failed to get filehandle for socket") + serverStarted := make(chan struct{}) serverStopped := make(chan struct{}) var sp testutil.ServerParameters go func() { - l1File, err := testutil.EnsureType[*net.TCPListener](t, l1).File() - if err != nil { - t.Log("ERROR: Failed to get filehandle for socket") - close(serverStarted) - - return - } - runner.ExtraFiles = append(runner.ExtraFiles, l1File) wait, _ := env.RunAndProcessStderr(t, sp.ProcessOutput, "server", "start", "--insecure", "--random-server-control-password", "--address=127.0.0.1:0") @@ -78,15 +75,19 @@ func TestServerControlSocketActivated(t *testing.T) { require.NotEmpty(t, sp.BaseURL, "Failed to start server") t.Logf("server started on %v", sp.BaseURL) - case <-time.After(5 * time.Second): + case <-time.After(15 * time.Second): t.Fatal("server did not start in time") } require.Contains(t, sp.BaseURL, ":"+strconv.Itoa(port)) - lines := env.RunAndExpectSuccess(t, "server", "status", "--address", "http://127.0.0.1:"+strconv.Itoa(port), "--server-control-password", sp.ServerControlPassword, "--remote") - require.Len(t, lines, 1) - require.Contains(t, lines, "IDLE: another-user@another-host:"+dir0) + checkServerStatusFn := func(collect *assert.CollectT) { + lines := env.RunAndExpectSuccess(t, "server", "status", "--address", "http://127.0.0.1:"+strconv.Itoa(port), "--server-control-password", sp.ServerControlPassword, "--remote") + require.Len(collect, lines, 1) + require.Contains(collect, lines, "IDLE: another-user@another-host:"+dir0) + } + + require.EventuallyWithT(t, checkServerStatusFn, 30*time.Second, 2*time.Second, "could not get server status, perhaps it was not listening on the control endpoint yet?") env.RunAndExpectSuccess(t, "server", "shutdown", "--address", sp.BaseURL, "--server-control-password", sp.ServerControlPassword) @@ -100,8 +101,6 @@ func TestServerControlSocketActivated(t *testing.T) { } func TestServerControlSocketActivatedTooManyFDs(t *testing.T) { - var port int - serverExe := os.Getenv("KOPIA_SERVER_EXE") if serverExe == "" { t.Skip("skipping socket-activation test") @@ -111,57 +110,64 @@ func TestServerControlSocketActivatedTooManyFDs(t *testing.T) { env := testenv.NewCLITest(t, testenv.RepoFormatNotImportant, runner) env.RunAndExpectSuccess(t, "repo", "create", "filesystem", "--path", env.RepoDir, "--override-username=another-user", "--override-hostname=another-host") - // The KOPIA_EXE wrapper will set the LISTEN_PID variable for us - env.Environment["LISTEN_FDS"] = "2" - l1, err := net.Listen("tcp", ":0") + // create 2 file descriptor for a single socket and pass the descriptors to the server + ctx := testlogging.Context(t) + l1, err := (&net.ListenConfig{}).Listen(ctx, "tcp", ":0") + require.NoError(t, err, "Failed to open Listener") - defer func() { - l1.Close() - }() + t.Cleanup(func() { l1.Close() }) - port = testutil.EnsureType[*net.TCPAddr](t, l1.Addr()).Port + port := testutil.EnsureType[*net.TCPAddr](t, l1.Addr()).Port - t.Logf("Activating socket on port %v", port) + t.Logf("activation socket port %v", port) - serverStarted := make(chan []string) + listener := testutil.EnsureType[*net.TCPListener](t, l1) - go func() { - listener := testutil.EnsureType[*net.TCPListener](t, l1) + l1File, err := listener.File() + require.NoError(t, err, "failed to get 1st filehandle for socket") - l1File, err := listener.File() - if err != nil { - t.Log("Failed to get filehandle for socket") - close(serverStarted) + t.Cleanup(func() { l1File.Close() }) - return - } + l2File, err := listener.File() + require.NoError(t, err, "failed to get 2nd filehandle for socket") + + t.Cleanup(func() { l2File.Close() }) + + runner.ExtraFiles = append(runner.ExtraFiles, l1File, l2File) + // The KOPIA_EXE wrapper will set the LISTEN_PID variable for us + env.Environment["LISTEN_FDS"] = "2" - l2File, err := listener.File() - if err != nil { - t.Log("Failed to get 2nd filehandle for socket") - close(serverStarted) + var gotExpectedErrorMessage atomic.Bool - return + stderrAsyncCallback := func(line string) { + if strings.Contains(line, "Too many activated sockets found. Expected 1, got 2") { + gotExpectedErrorMessage.Store(true) } + } - runner.ExtraFiles = append(runner.ExtraFiles, l1File, l2File) + // although the server is expected to stop quickly with an error, the server's + // stderr is processed async to avoid test deadlocks if the server continues + // to run and does not exit. + wait, kill := env.RunAndProcessStderrAsync(t, func(string) bool { return false }, stderrAsyncCallback, "server", "start", "--insecure", "--random-server-control-password", "--address=127.0.0.1:0") - _, stderr := env.RunAndExpectFailure(t, "server", "start", "--insecure", "--random-server-control-password", "--address=127.0.0.1:0") + t.Cleanup(kill) - l1File.Close() - l2File.Close() - serverStarted <- stderr - close(serverStarted) + serverStopped := make(chan error) + go func() { + defer close(serverStopped) + + serverStopped <- wait() }() select { - case stderr := <-serverStarted: - require.Contains(t, strings.Join(stderr, ""), "Too many activated sockets found. Expected 1, got 2") + case err := <-serverStopped: + require.Error(t, err, "server did not exit with an error") t.Log("Done") - - case <-time.After(5 * time.Second): + case <-time.After(30 * time.Second): t.Fatal("server did not exit in time") } + + require.True(t, gotExpectedErrorMessage.Load(), "expected server's stderr to contain a line along the lines of 'Too many activated sockets ...'") } diff --git a/tests/testdirtree/testdirtree.go b/tests/testdirtree/testdirtree.go index 6d0ba8f2372..bf9f9a92e84 100644 --- a/tests/testdirtree/testdirtree.go +++ b/tests/testdirtree/testdirtree.go @@ -9,6 +9,7 @@ import ( "math/rand" "os" "path/filepath" + "strings" "sync/atomic" "testing" "unicode" @@ -43,7 +44,7 @@ func generateHexString(l int) string { func generateUnicodeString(rangeMin, rangeMax, l int) string { // generate a random unicode string within a defined range - s := "" + var s strings.Builder for i := 0; i < l; { c := rand.Intn(rangeMax-rangeMin+1) + rangeMin @@ -51,12 +52,13 @@ func generateUnicodeString(rangeMin, rangeMax, l int) string { // IsLetter & IsDigit function as a sanity check to prevent writing punctuation/control characters // ValidRune is a sanity check for macOS since APFS can't handle invalid utf-8 and will error out if (unicode.IsLetter(r) || unicode.IsDigit(r)) && utf8.ValidRune(r) { - s += string(r) + s.WriteRune(r) + i++ } } - return s + return s.String() } func randomUnicodeName(l int) string { diff --git a/tests/testenv/cli_exe_runner.go b/tests/testenv/cli_exe_runner.go index f1895455278..b1cc73ea8b5 100644 --- a/tests/testenv/cli_exe_runner.go +++ b/tests/testenv/cli_exe_runner.go @@ -21,10 +21,10 @@ type CLIExeRunner struct { } // Start implements CLIRunner. -func (e *CLIExeRunner) Start(t *testing.T, ctx context.Context, args []string, env map[string]string) (stdout, stderr io.Reader, wait func() error, interrupt func(os.Signal)) { - t.Helper() +func (e *CLIExeRunner) Start(tb testing.TB, ctx context.Context, args []string, env map[string]string) (stdout, stderr io.Reader, wait func() error, interrupt func(os.Signal)) { + tb.Helper() - c := exec.Command(e.Exe, append([]string{ + c := exec.CommandContext(ctx, e.Exe, append([]string{ "--log-dir", e.LogsDir, }, args...)...) @@ -36,12 +36,12 @@ func (e *CLIExeRunner) Start(t *testing.T, ctx context.Context, args []string, e stdoutPipe, err := c.StdoutPipe() if err != nil { - t.Fatalf("can't set up stdout pipe reader: %v", err) + tb.Fatalf("can't set up stdout pipe reader: %v", err) } stderrPipe, err := c.StderrPipe() if err != nil { - t.Fatalf("can't set up stderr pipe reader: %v", err) + tb.Fatalf("can't set up stderr pipe reader: %v", err) } c.Stdin = e.NextCommandStdin @@ -49,7 +49,7 @@ func (e *CLIExeRunner) Start(t *testing.T, ctx context.Context, args []string, e c.ExtraFiles = e.ExtraFiles if err := c.Start(); err != nil { - t.Fatalf("unable to start: %v", err) + tb.Fatalf("unable to start: %v", err) } return stdoutPipe, stderrPipe, c.Wait, func(sig os.Signal) { @@ -67,8 +67,8 @@ func (e *CLIExeRunner) Start(t *testing.T, ctx context.Context, args []string, e // for each. The kopia executable must be passed via KOPIA_EXE environment variable. The test // will be skipped if it's not provided (unless running inside an IDE in which case system-wide // `kopia` will be used by default). -func NewExeRunner(t *testing.T) *CLIExeRunner { - t.Helper() +func NewExeRunner(tb testing.TB) *CLIExeRunner { + tb.Helper() exe := os.Getenv("KOPIA_EXE") if exe == "" { @@ -76,22 +76,22 @@ func NewExeRunner(t *testing.T) *CLIExeRunner { // we're launched from VSCode, use system-installed kopia executable. exe = "kopia" } else { - t.Skip() + tb.Skip() } } - return NewExeRunnerWithBinary(t, exe) + return NewExeRunnerWithBinary(tb, exe) } // NewExeRunnerWithBinary returns a CLIRunner that will execute kopia commands by launching subprocesses // for each. -func NewExeRunnerWithBinary(t *testing.T, exe string) *CLIExeRunner { - t.Helper() +func NewExeRunnerWithBinary(tb testing.TB, exe string) *CLIExeRunner { + tb.Helper() // unset environment variables that disrupt tests when passed to subprocesses. os.Unsetenv("KOPIA_PASSWORD") - logsDir := testutil.TempLogDirectory(t) + logsDir := testutil.TempLogDirectory(tb) return &CLIExeRunner{ Exe: filepath.FromSlash(exe), diff --git a/tests/testenv/cli_inproc_runner.go b/tests/testenv/cli_inproc_runner.go index a2696aebcbd..e7042a97d44 100644 --- a/tests/testenv/cli_inproc_runner.go +++ b/tests/testenv/cli_inproc_runner.go @@ -27,11 +27,11 @@ type CLIInProcRunner struct { } // Start implements CLIRunner. -func (e *CLIInProcRunner) Start(t *testing.T, ctx context.Context, args []string, env map[string]string) (stdout, stderr io.Reader, wait func() error, interrupt func(os.Signal)) { - t.Helper() +func (e *CLIInProcRunner) Start(tb testing.TB, ctx context.Context, args []string, env map[string]string) (stdout, stderr io.Reader, wait func() error, interrupt func(os.Signal)) { + tb.Helper() a := cli.NewApp() - a.AdvancedCommands = "enabled" + a.DangerousCommands = "enabled" envPrefix := fmt.Sprintf("T%v_", atomic.AddInt32(envPrefixCounter, 1)) a.SetEnvNamePrefixForTesting(envPrefix) diff --git a/tests/testenv/cli_test_env.go b/tests/testenv/cli_test_env.go index e729adacd24..0d4c19664cb 100644 --- a/tests/testenv/cli_test_env.go +++ b/tests/testenv/cli_test_env.go @@ -34,7 +34,7 @@ const ( // CLIRunner encapsulates running kopia subcommands for testing purposes. // It supports implementations that use subprocesses or in-process invocations. type CLIRunner interface { - Start(t *testing.T, ctx context.Context, args []string, env map[string]string) (stdout, stderr io.Reader, wait func() error, interrupt func(os.Signal)) + Start(tb testing.TB, ctx context.Context, args []string, env map[string]string) (stdout, stderr io.Reader, wait func() error, interrupt func(os.Signal)) } // CLITest encapsulates state for a CLI-based test. @@ -66,9 +66,9 @@ type CLITest struct { var RepoFormatNotImportant []string // NewCLITest creates a new instance of *CLITest. -func NewCLITest(t *testing.T, repoCreateFlags []string, runner CLIRunner) *CLITest { - t.Helper() - configDir := testutil.TempDirectory(t) +func NewCLITest(tb testing.TB, repoCreateFlags []string, runner CLIRunner) *CLITest { + tb.Helper() + configDir := testutil.TempDirectory(tb) // unset global environment variable that may interfere with the test os.Unsetenv("KOPIA_METRICS_PUSH_ADDR") @@ -99,9 +99,9 @@ func NewCLITest(t *testing.T, repoCreateFlags []string, runner CLIRunner) *CLITe } return &CLITest{ - RunContext: testsender.CaptureMessages(testlogging.Context(t)), + RunContext: testsender.CaptureMessages(testlogging.Context(tb)), startTime: clock.Now(), - RepoDir: testutil.TempDirectory(t), + RepoDir: testutil.TempDirectory(tb), ConfigDir: configDir, fixedArgs: fixedArgs, DefaultRepositoryCreateFlags: formatFlags, @@ -113,44 +113,44 @@ func NewCLITest(t *testing.T, repoCreateFlags []string, runner CLIRunner) *CLITe } // RunAndExpectSuccess runs the given command, expects it to succeed and returns its output lines. -func (e *CLITest) RunAndExpectSuccess(t *testing.T, args ...string) []string { - t.Helper() +func (e *CLITest) RunAndExpectSuccess(tb testing.TB, args ...string) []string { + tb.Helper() - stdout, _, err := e.Run(t, false, args...) - require.NoError(t, err, "'kopia %v' failed", strings.Join(args, " ")) + stdout, _, err := e.Run(tb, false, args...) + require.NoError(tb, err, "'kopia %v' failed", strings.Join(args, " ")) return stdout } // TweakFile writes a xor-ed byte at a random point in a file. Used to simulate file corruption. -func (e *CLITest) TweakFile(t *testing.T, dirn, fglob string) { - t.Helper() +func (e *CLITest) TweakFile(tb testing.TB, dirn, fglob string) { + tb.Helper() const RwUserGroupOther = 0o666 // find a file within the repository to corrupt mch, err := fs.Glob(os.DirFS(dirn), fglob) - require.NoError(t, err) - require.NotEmpty(t, mch) + require.NoError(tb, err) + require.NotEmpty(tb, mch) // grab a random file in the directory dirn fn := mch[rand.Intn(len(mch))] f, err := os.OpenFile(path.Join(dirn, fn), os.O_RDWR, os.FileMode(RwUserGroupOther)) - require.NoError(t, err) + require.NoError(tb, err) // find the length of the file, then seek to a random location l, err := f.Seek(0, io.SeekEnd) - require.NoError(t, err) + require.NoError(tb, err) i := rand.Int63n(l) bs := [1]byte{} _, err = f.ReadAt(bs[:], i) - require.NoError(t, err) + require.NoError(tb, err) // write the byte _, err = f.WriteAt([]byte{^bs[0]}, i) - require.NoError(t, err) + require.NoError(tb, err) } func (e *CLITest) SetLogOutput(enable bool, prefix string) { @@ -172,11 +172,11 @@ func (e *CLITest) getLogOutputPrefix() (string, bool) { return e.logOutputPrefix, os.Getenv("KOPIA_TEST_LOG_OUTPUT") != "" || e.logOutputEnabled } -// RunAndProcessStderr runs the given command, and streams its output line-by-line to a given function until it returns false. -func (e *CLITest) RunAndProcessStderr(t *testing.T, callback func(line string) bool, args ...string) (wait func() error, kill func()) { - t.Helper() +// RunAndProcessStderr runs the given command, and streams its stderr line-by-line to stderrCallback until it returns false. +func (e *CLITest) RunAndProcessStderr(tb testing.TB, stderrCallback func(line string) bool, args ...string) (wait func() error, kill func()) { + tb.Helper() - wait, interrupt := e.RunAndProcessStderrInt(t, callback, nil, args...) + wait, interrupt := e.RunAndProcessStderrInt(tb, stderrCallback, nil, args...) kill = func() { interrupt(os.Kill) } @@ -184,11 +184,11 @@ func (e *CLITest) RunAndProcessStderr(t *testing.T, callback func(line string) b return wait, kill } -// RunAndProcessStderrAsync runs the given command, and streams its output line-by-line to a given function until it returns false. -func (e *CLITest) RunAndProcessStderrAsync(t *testing.T, callback func(line string) bool, asyncCallback func(line string), args ...string) (wait func() error, kill func()) { - t.Helper() +// RunAndProcessStderrAsync runs the given command, and streams its stderr line-by-line stderrCallback until it returns false. +func (e *CLITest) RunAndProcessStderrAsync(tb testing.TB, stderrCallback func(line string) bool, stderrAsyncCallback func(line string), args ...string) (wait func() error, kill func()) { + tb.Helper() - wait, interrupt := e.RunAndProcessStderrInt(t, callback, asyncCallback, args...) + wait, interrupt := e.RunAndProcessStderrInt(tb, stderrCallback, stderrAsyncCallback, args...) kill = func() { interrupt(os.Kill) } @@ -196,12 +196,14 @@ func (e *CLITest) RunAndProcessStderrAsync(t *testing.T, callback func(line stri return wait, kill } -// RunAndProcessStderrInt runs the given command, and streams its output -// line-by-line to outputCallback until it returns false. -func (e *CLITest) RunAndProcessStderrInt(t *testing.T, outputCallback func(line string) bool, asyncCallback func(line string), args ...string) (wait func() error, interrupt func(os.Signal)) { - t.Helper() +// RunAndProcessStderrInt runs the given command, and streams its stderr +// line-by-line to stderrCallback until it returns false. The remaining lines +// from stderr, if any, are asynchronously sent line-by-line to +// stderrAsyncCallback. +func (e *CLITest) RunAndProcessStderrInt(tb testing.TB, stderrCallback func(line string) bool, stderrAsyncCallback func(line string), args ...string) (wait func() error, interrupt func(os.Signal)) { + tb.Helper() - stdout, stderr, wait, interrupt := e.Runner.Start(t, e.RunContext, e.cmdArgs(args), e.Environment) + stdout, stderr, wait, interrupt := e.Runner.Start(tb, e.RunContext, e.cmdArgs(args), e.Environment) prefix, logOutput := e.getLogOutputPrefix() @@ -209,18 +211,18 @@ func (e *CLITest) RunAndProcessStderrInt(t *testing.T, outputCallback func(line scanner := bufio.NewScanner(stdout) for scanner.Scan() { if logOutput { - t.Logf("[%vstdout] %v", prefix, scanner.Text()) + tb.Logf("[%vstdout] %v", prefix, scanner.Text()) } } if logOutput { - t.Logf("[%vstdout] EOF", prefix) + tb.Logf("[%vstdout] EOF", prefix) } }() scanner := bufio.NewScanner(stderr) for scanner.Scan() { - if !outputCallback(scanner.Text()) { + if !stderrCallback(scanner.Text()) { break } } @@ -228,17 +230,17 @@ func (e *CLITest) RunAndProcessStderrInt(t *testing.T, outputCallback func(line // complete stderr scanning in the background without processing lines. go func() { for scanner.Scan() { - if asyncCallback != nil { - asyncCallback(scanner.Text()) + if stderrAsyncCallback != nil { + stderrAsyncCallback(scanner.Text()) } if logOutput { - t.Logf("[%vstderr] %v", prefix, scanner.Text()) + tb.Logf("[%vstderr] %v", prefix, scanner.Text()) } } if logOutput { - t.Logf("[%vstderr] EOF", prefix) + tb.Logf("[%vstderr] EOF", prefix) } }() @@ -246,33 +248,33 @@ func (e *CLITest) RunAndProcessStderrInt(t *testing.T, outputCallback func(line } // RunAndExpectSuccessWithErrOut runs the given command, expects it to succeed and returns its stdout and stderr lines. -func (e *CLITest) RunAndExpectSuccessWithErrOut(t *testing.T, args ...string) (stdout, stderr []string) { - t.Helper() +func (e *CLITest) RunAndExpectSuccessWithErrOut(tb testing.TB, args ...string) (stdout, stderr []string) { + tb.Helper() - stdout, stderr, err := e.Run(t, false, args...) - require.NoError(t, err, "'kopia %v' failed", strings.Join(args, " ")) + stdout, stderr, err := e.Run(tb, false, args...) + require.NoError(tb, err, "'kopia %v' failed", strings.Join(args, " ")) return stdout, stderr } // RunAndExpectFailure runs the given command, expects it to fail and returns its output lines. -func (e *CLITest) RunAndExpectFailure(t *testing.T, args ...string) (stdout, stderr []string) { - t.Helper() +func (e *CLITest) RunAndExpectFailure(tb testing.TB, args ...string) (stdout, stderr []string) { + tb.Helper() var err error - stdout, stderr, err = e.Run(t, true, args...) - require.Error(t, err, "'kopia %v' succeeded, but expected failure", strings.Join(args, " ")) + stdout, stderr, err = e.Run(tb, true, args...) + require.Error(tb, err, "'kopia %v' succeeded, but expected failure", strings.Join(args, " ")) return stdout, stderr } // RunAndVerifyOutputLineCount runs the given command and asserts it returns the given number of output lines, then returns them. -func (e *CLITest) RunAndVerifyOutputLineCount(t *testing.T, wantLines int, args ...string) []string { - t.Helper() +func (e *CLITest) RunAndVerifyOutputLineCount(tb testing.TB, wantLines int, args ...string) []string { + tb.Helper() - lines := e.RunAndExpectSuccess(t, args...) - require.Len(t, lines, wantLines, "unexpected output lines for 'kopia %v', lines:\n %s", strings.Join(args, " "), strings.Join(lines, "\n ")) + lines := e.RunAndExpectSuccess(tb, args...) + require.Len(tb, lines, wantLines, "unexpected output lines for 'kopia %v', lines:\n %s", strings.Join(args, " "), strings.Join(lines, "\n ")) return lines } @@ -290,61 +292,53 @@ func (e *CLITest) cmdArgs(args []string) []string { } // Run executes kopia with given arguments and returns the output lines. -func (e *CLITest) Run(t *testing.T, expectedError bool, args ...string) (stdout, stderr []string, err error) { - t.Helper() +func (e *CLITest) Run(tb testing.TB, expectedError bool, args ...string) (stdout, stderr []string, err error) { + tb.Helper() args = e.cmdArgs(args) outputPrefix, logOutput := e.getLogOutputPrefix() - t.Logf("%vrunning 'kopia %v' with %v", outputPrefix, strings.Join(args, " "), e.Environment) + tb.Logf("%vrunning 'kopia %v' with %v", outputPrefix, strings.Join(args, " "), e.Environment) timer := timetrack.StartTimer() - stdoutReader, stderrReader, wait, _ := e.Runner.Start(t, e.RunContext, args, e.Environment) + stdoutReader, stderrReader, wait, _ := e.Runner.Start(tb, e.RunContext, args, e.Environment) var wg sync.WaitGroup - wg.Add(1) - - go func() { - defer wg.Done() - + wg.Go(func() { scanner := bufio.NewScanner(stdoutReader) for scanner.Scan() { if logOutput { - t.Logf("[%vstdout] %v", outputPrefix, scanner.Text()) + tb.Logf("[%vstdout] %v", outputPrefix, scanner.Text()) } stdout = append(stdout, scanner.Text()) } - }() - - wg.Add(1) - - go func() { - defer wg.Done() + }) + wg.Go(func() { scanner := bufio.NewScanner(stderrReader) for scanner.Scan() { if logOutput { - t.Logf("[%vstderr] %v", outputPrefix, scanner.Text()) + tb.Logf("[%vstderr] %v", outputPrefix, scanner.Text()) } stderr = append(stderr, scanner.Text()) } - }() + }) wg.Wait() gotErr := wait() if expectedError { - require.Error(t, gotErr, "unexpected success when running 'kopia %v' (stdout:\n%v\nstderr:\n%v", strings.Join(args, " "), strings.Join(stdout, "\n"), strings.Join(stderr, "\n")) + require.Error(tb, gotErr, "unexpected success when running 'kopia %v' (stdout:\n%v\nstderr:\n%v", strings.Join(args, " "), strings.Join(stdout, "\n"), strings.Join(stderr, "\n")) } else { - require.NoError(t, gotErr, "unexpected error when running 'kopia %v' (stdout:\n%v\nstderr:\n%v", strings.Join(args, " "), strings.Join(stdout, "\n"), strings.Join(stderr, "\n")) + require.NoError(tb, gotErr, "unexpected error when running 'kopia %v' (stdout:\n%v\nstderr:\n%v", strings.Join(args, " "), strings.Join(stdout, "\n"), strings.Join(stderr, "\n")) } //nolint:forbidigo - t.Logf("%vfinished in %v: 'kopia %v'", outputPrefix, timer.Elapsed().Milliseconds(), strings.Join(args, " ")) + tb.Logf("%vfinished in %v: 'kopia %v'", outputPrefix, timer.Elapsed().Milliseconds(), strings.Join(args, " ")) return stdout, stderr, gotErr } diff --git a/tests/tools/fio/fio.go b/tests/tools/fio/fio.go index 4e8cdc12cda..7df348a7d95 100644 --- a/tests/tools/fio/fio.go +++ b/tests/tools/fio/fio.go @@ -6,6 +6,7 @@ package fio import ( "bytes" + "context" "fmt" "log" "math/rand" @@ -260,7 +261,7 @@ func (fr *Runner) Run(args ...string) (stdout, stderr string, err error) { log.Printf("running '%s %v'", fr.Exe, argsStr) } - c := exec.Command(fr.Exe, args...) + c := exec.CommandContext(context.Background(), fr.Exe, args...) errOut := &bytes.Buffer{} c.Stderr = errOut diff --git a/tests/tools/fio/options.go b/tests/tools/fio/options.go index 7e987c2e055..0379b69ad43 100644 --- a/tests/tools/fio/options.go +++ b/tests/tools/fio/options.go @@ -2,6 +2,7 @@ package fio import ( "fmt" + "maps" "path/filepath" "strconv" ) @@ -35,13 +36,8 @@ const ( func (o Options) Merge(other Options) Options { out := make(map[string]string, len(o)+len(other)) - for k, v := range o { - out[k] = v - } - - for k, v := range other { - out[k] = v - } + maps.Copy(out, o) + maps.Copy(out, other) return out } diff --git a/tests/tools/fswalker/fswalker.go b/tests/tools/fswalker/fswalker.go index bc1f1cd6cb3..d618f9cdb12 100644 --- a/tests/tools/fswalker/fswalker.go +++ b/tests/tools/fswalker/fswalker.go @@ -1,5 +1,4 @@ //go:build darwin || (linux && amd64) -// +build darwin linux,amd64 // Package fswalker provides the checker.Comparer interface using FSWalker // walker and reporter. diff --git a/tests/tools/fswalker/fswalker_test.go b/tests/tools/fswalker/fswalker_test.go index 6837f51cef9..b279a801803 100644 --- a/tests/tools/fswalker/fswalker_test.go +++ b/tests/tools/fswalker/fswalker_test.go @@ -1,5 +1,4 @@ //go:build darwin || (linux && amd64) -// +build darwin linux,amd64 package fswalker @@ -194,6 +193,7 @@ func TestWalkChecker_GatherCompare(t *testing.T) { return true } } + return false }, }, @@ -345,6 +345,7 @@ func TestWalkChecker_filterReportDiffs(t *testing.T) { return true } } + return false }, }, diff --git a/tests/tools/fswalker/reporter/reporter.go b/tests/tools/fswalker/reporter/reporter.go index befaf0364d9..9f3f4e91b5a 100644 --- a/tests/tools/fswalker/reporter/reporter.go +++ b/tests/tools/fswalker/reporter/reporter.go @@ -1,5 +1,4 @@ //go:build darwin || (linux && amd64) -// +build darwin linux,amd64 // Package reporter wraps calls to the fswalker Reporter package reporter diff --git a/tests/tools/fswalker/reporter/reporter_test.go b/tests/tools/fswalker/reporter/reporter_test.go index 8319d4d6118..459729c8f60 100644 --- a/tests/tools/fswalker/reporter/reporter_test.go +++ b/tests/tools/fswalker/reporter/reporter_test.go @@ -1,5 +1,4 @@ //go:build darwin || (linux && amd64) -// +build darwin linux,amd64 package reporter diff --git a/tests/tools/fswalker/walker/walker.go b/tests/tools/fswalker/walker/walker.go index 0c391be6496..ee98fc35479 100644 --- a/tests/tools/fswalker/walker/walker.go +++ b/tests/tools/fswalker/walker/walker.go @@ -1,5 +1,4 @@ //go:build darwin || (linux && amd64) -// +build darwin linux,amd64 // Package walker wraps calls to the fswalker Walker package walker @@ -21,7 +20,7 @@ const ( // Walk performs a walk governed by the contents of the provided // Policy, and returns the pointer to the Walk. -func Walk(ctx context.Context, policy *fspb.Policy) (*fspb.Walk, error) { //nolint:interfacer +func Walk(ctx context.Context, policy *fspb.Policy) (*fspb.Walk, error) { f, err := os.CreateTemp("", "fswalker-policy-") if err != nil { return nil, err diff --git a/tests/tools/fswalker/walker/walker_test.go b/tests/tools/fswalker/walker/walker_test.go index 5ce3dfc5d39..6c59a8ad943 100644 --- a/tests/tools/fswalker/walker/walker_test.go +++ b/tests/tools/fswalker/walker/walker_test.go @@ -1,5 +1,4 @@ //go:build darwin || (linux && amd64) -// +build darwin linux,amd64 package walker diff --git a/tests/tools/kopiaclient/kopiaclient.go b/tests/tools/kopiaclient/kopiaclient.go index 184edcf0540..a30bba2f5bd 100644 --- a/tests/tools/kopiaclient/kopiaclient.go +++ b/tests/tools/kopiaclient/kopiaclient.go @@ -1,5 +1,4 @@ //go:build darwin || (linux && amd64) -// +build darwin linux,amd64 // Package kopiaclient provides a client to interact with a Kopia repo. package kopiaclient @@ -72,7 +71,7 @@ func (kc *KopiaClient) CreateOrConnectRepo(ctx context.Context, repoDir, bucketN return errors.Wrap(iErr, "error connecting to repository") } - return errors.Wrap(err, "unable to open repository") + return nil } // SetCacheLimits sets cache size limits to the already connected repository. diff --git a/tests/tools/kopiarunner/kopia_snapshotter.go b/tests/tools/kopiarunner/kopia_snapshotter.go index 6ec3de2a99d..2294556b3b2 100644 --- a/tests/tools/kopiarunner/kopia_snapshotter.go +++ b/tests/tools/kopiarunner/kopia_snapshotter.go @@ -317,11 +317,11 @@ func parseSnapID(lines []string) (string, error) { func parseSnapshotListForSnapshotIDs(output string) []string { var ret []string - lines := strings.Split(output, "\n") - for _, l := range lines { - fields := strings.Fields(l) + lines := strings.SplitSeq(output, "\n") + for l := range lines { + fields := strings.FieldsSeq(l) - for _, f := range fields { + for f := range fields { spl := strings.Split(f, "manifest:") if len(spl) == 2 { //nolint:mnd ret = append(ret, spl[1]) @@ -335,8 +335,8 @@ func parseSnapshotListForSnapshotIDs(output string) []string { func parseManifestListForSnapshotIDs(output string) []string { var ret []string - lines := strings.Split(output, "\n") - for _, l := range lines { + lines := strings.SplitSeq(output, "\n") + for l := range lines { fields := strings.Fields(l) typeFieldIdx := 5 diff --git a/tests/tools/kopiarunner/kopiarun.go b/tests/tools/kopiarunner/kopiarun.go index 9299cbf4872..0068d1030d7 100644 --- a/tests/tools/kopiarunner/kopiarun.go +++ b/tests/tools/kopiarunner/kopiarun.go @@ -3,6 +3,7 @@ package kopiarunner import ( "bytes" + "context" "log" "os" "os/exec" @@ -63,8 +64,10 @@ func (kr *Runner) Cleanup() { func (kr *Runner) Run(args ...string) (stdout, stderr string, err error) { argsStr := strings.Join(args, " ") log.Printf("running '%s %v'", kr.Exe, argsStr) + cmdArgs := append(append([]string(nil), kr.fixedArgs...), args...) - c := exec.Command(kr.Exe, cmdArgs...) + ctx := context.Background() + c := exec.CommandContext(ctx, kr.Exe, cmdArgs...) c.Env = append(os.Environ(), kr.environment...) errOut := &bytes.Buffer{} @@ -79,9 +82,11 @@ func (kr *Runner) Run(args ...string) (stdout, stderr string, err error) { // RunAsync will execute the kopia command with the given args in background. func (kr *Runner) RunAsync(args ...string) (*exec.Cmd, error) { log.Printf("running async '%s %v'", kr.Exe, strings.Join(args, " ")) + cmdArgs := append(append([]string(nil), kr.fixedArgs...), args...) + ctx := context.Background() //nolint:gosec //G204 - c := exec.Command(kr.Exe, cmdArgs...) + c := exec.CommandContext(ctx, kr.Exe, cmdArgs...) c.Env = append(os.Environ(), kr.environment...) c.Stderr = &bytes.Buffer{} diff --git a/tests/tools/kopiarunner/kopiarun_test.go b/tests/tools/kopiarunner/kopiarun_test.go index 8336e125d0c..9c29d122f7a 100644 --- a/tests/tools/kopiarunner/kopiarun_test.go +++ b/tests/tools/kopiarunner/kopiarun_test.go @@ -3,6 +3,8 @@ package kopiarunner import ( "os" "testing" + + "github.com/stretchr/testify/require" ) func TestKopiaRunner(t *testing.T) { @@ -11,13 +13,6 @@ func TestKopiaRunner(t *testing.T) { t.Skip("Skipping kopia runner test: 'KOPIA_EXE' is unset") } - defer func() { - envErr := os.Setenv("KOPIA_EXE", origEnv) - if envErr != nil { - t.Fatal("Unable to reset env KOPIA_EXE to original value") - } - }() - for _, tt := range []struct { name string exe string @@ -58,20 +53,24 @@ func TestKopiaRunner(t *testing.T) { t.Setenv("KOPIA_EXE", tt.exe) runner, err := NewRunner("") - if (err != nil) != tt.expNewRunnerErr { - t.Fatalf("Expected NewRunner error: %v, got %v", tt.expNewRunnerErr, err) - } + if tt.expNewRunnerErr { + require.Error(t, err, "expected NewRunner error") - if err != nil { return } - defer runner.Cleanup() + require.NoError(t, err) + + t.Cleanup(runner.Cleanup) _, _, err = runner.Run(tt.args...) - if (err != nil) != tt.expRunErr { - t.Fatalf("Expected Run error: %v, got %v", tt.expRunErr, err) + if tt.expRunErr { + require.Error(t, err, "expected Run error") + + return } + + require.NoError(t, err) }) } } diff --git a/tests/tools/kopiarunner/setpdeath.go b/tests/tools/kopiarunner/setpdeath.go index 28716e1504d..d792b5002a0 100644 --- a/tests/tools/kopiarunner/setpdeath.go +++ b/tests/tools/kopiarunner/setpdeath.go @@ -1,5 +1,4 @@ //go:build !linux -// +build !linux package kopiarunner diff --git a/tools/cli2md/cli2md.go b/tools/cli2md/cli2md.go index eda32e7aa3f..a6e63c34053 100644 --- a/tools/cli2md/cli2md.go +++ b/tools/cli2md/cli2md.go @@ -288,25 +288,31 @@ hide_summary: true `, title, title) - flagSummary := "" - argSummary := "" + var ( + argSummary strings.Builder + flagSummary strings.Builder + ) for _, a := range cmd.Args { if a.Required { - argSummary += " <" + a.Name + ">" + argSummary.WriteString(" <") + argSummary.WriteString(a.Name) + argSummary.WriteRune('>') } else { - argSummary += " [" + a.Name + "]" + argSummary.WriteString(" [") + argSummary.WriteString(a.Name) + argSummary.WriteRune(']') } } for _, fl := range cmd.Flags { if fl.Required { - flagSummary += " \\\n --" + fl.Name + "=..." + flagSummary.WriteString(" \\\n --" + fl.Name + "=...") } } - fmt.Fprintf(f, "```shell\n$ kopia %v%v%v\n```\n\n", cmd.FullCommand, flagSummary, argSummary) //nolint:errcheck - fmt.Fprintf(f, "%v\n\n", escapeFlags(cmd.Help)) //nolint:errcheck + fmt.Fprintf(f, "```shell\n$ kopia %v%v%v\n```\n\n", cmd.FullCommand, flagSummary.String(), argSummary.String()) //nolint:errcheck + fmt.Fprintf(f, "%v\n\n", escapeFlags(cmd.Help)) //nolint:errcheck emitFlags(f, cmd.Flags) emitArgs(f, cmd.Args) diff --git a/tools/gettool/autodownload/autodownload.go b/tools/gettool/autodownload/autodownload.go index 6fc7d4c4638..9f6a51ed8bd 100644 --- a/tools/gettool/autodownload/autodownload.go +++ b/tools/gettool/autodownload/autodownload.go @@ -9,6 +9,7 @@ import ( "compress/gzip" "crypto/sha256" "encoding/hex" + stderrors "errors" "fmt" "io" "log" @@ -23,15 +24,17 @@ import ( const dirMode = 0o750 -func createFile(target string, mode os.FileMode, modTime time.Time, src io.Reader) error { - f, err := os.OpenFile(target, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, mode) //nolint:gosec +func createFile(outDir *os.Root, target string, mode os.FileMode, modTime time.Time, src io.Reader) error { + f, err := outDir.OpenFile(target, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, mode) if err != nil { return errors.Wrap(err, "error creating file") } - defer os.Chtimes(target, modTime, modTime) //nolint:errcheck + defer outDir.Chtimes(target, modTime, modTime) //nolint:errcheck - defer f.Close() //nolint:errcheck + defer func() { + err = stderrors.Join(err, f.Close()) + }() if _, err := io.Copy(f, src); err != nil { return errors.Wrap(err, "error copying contents") @@ -40,21 +43,23 @@ func createFile(target string, mode os.FileMode, modTime time.Time, src io.Reade return nil } -func createSymlink(linkPath, linkTarget string) error { - os.Remove(linkPath) //nolint:errcheck +func createSymlink(outDir *os.Root, linkPath, linkTarget string) error { + outDir.Remove(linkPath) //nolint:errcheck - return errors.Wrap(os.Symlink(linkTarget, linkPath), "error creating symlink") + return errors.Wrap(outDir.Symlink(linkTarget, linkPath), "error creating symlink") } -func joinAndStripPath(dir, fname string, stripPathComponents int) (string, bool) { - parts := strings.Split(filepath.ToSlash(fname), "/") +func stripLeadingPath(fname string, stripPathComponents int) (string, bool) { + if stripPathComponents == 0 { + return fname, true + } + + parts := strings.Split(filepath.ToSlash(filepath.Clean(fname)), "/") if len(parts) <= stripPathComponents { return "", false } - parts = parts[stripPathComponents:] - - return filepath.Join(append([]string{dir}, parts...)...), true + return filepath.Join(parts[stripPathComponents:]...), true } func untar(dir string, r io.Reader, stripPathComponents int) error { @@ -63,6 +68,17 @@ func untar(dir string, r io.Reader, stripPathComponents int) error { header *tar.Header ) + if err := os.MkdirAll(dir, dirMode); err != nil { + return errors.Wrapf(err, "error creating output directory %q", dir) + } + + outDir, err := os.OpenRoot(dir) + if err != nil { + return errors.Wrapf(err, "could not open output directory root %q", dir) + } + + defer outDir.Close() //nolint:errcheck + tr := tar.NewReader(r) for header, err = tr.Next(); err == nil; header, err = tr.Next() { @@ -70,29 +86,29 @@ func untar(dir string, r io.Reader, stripPathComponents int) error { continue } - target, ok := joinAndStripPath(dir, header.Name, stripPathComponents) + target, ok := stripLeadingPath(header.Name, stripPathComponents) if !ok { continue } - if derr := os.MkdirAll(filepath.Dir(target), dirMode); derr != nil { + if derr := outDir.MkdirAll(filepath.Dir(target), dirMode); derr != nil { return errors.Wrap(derr, "error creating parent directory") } switch header.Typeflag { case tar.TypeDir: - if derr := os.MkdirAll(target, dirMode); derr != nil { + if derr := outDir.MkdirAll(target, dirMode); derr != nil { return errors.Wrap(derr, "error creating directory") } case tar.TypeReg: //nolint:gosec - if ferr := createFile(target, os.FileMode(header.Mode), header.ModTime, tr); ferr != nil { + if ferr := createFile(outDir, target, os.FileMode(header.Mode), header.ModTime, tr); ferr != nil { return errors.Wrapf(ferr, "error creating file %v", target) } case tar.TypeSymlink: - if ferr := createSymlink(target, header.Linkname); ferr != nil { + if ferr := createSymlink(outDir, target, header.Linkname); ferr != nil { return errors.Wrapf(ferr, "error creating file %v", target) } @@ -109,6 +125,17 @@ func untar(dir string, r io.Reader, stripPathComponents int) error { } func unzip(dir string, r io.Reader, stripPathComponents int) error { + if err := os.MkdirAll(dir, dirMode); err != nil { + return errors.Wrapf(err, "error creating output directory %q", dir) + } + + outDir, err := os.OpenRoot(dir) + if err != nil { + return errors.Wrapf(err, "could not open output directory root %q", dir) + } + + defer outDir.Close() //nolint:errcheck + // zips require ReaderAt, most installers are quite small so we'll just buffer them in memory var buf bytes.Buffer if _, err := io.Copy(&buf, r); err != nil { @@ -123,18 +150,18 @@ func unzip(dir string, r io.Reader, stripPathComponents int) error { } for _, f := range zf.File { - fpath, ok := joinAndStripPath(dir, f.Name, stripPathComponents) + fpath, ok := stripLeadingPath(f.Name, stripPathComponents) if !ok { continue } - if err := os.MkdirAll(filepath.Dir(fpath), dirMode); err != nil { + if err := outDir.MkdirAll(filepath.Dir(fpath), dirMode); err != nil { return errors.Wrap(err, "error creating parent directory") } switch f.FileInfo().Mode() & os.ModeType { case os.ModeDir: - if err := os.MkdirAll(fpath, dirMode); err != nil { + if err := outDir.MkdirAll(fpath, dirMode); err != nil { return errors.Wrap(err, "error creating directory") } @@ -146,7 +173,7 @@ func unzip(dir string, r io.Reader, stripPathComponents int) error { return errors.Wrap(err, "error opening zip entry") } - if ferr := createFile(fpath, f.FileInfo().Mode(), f.FileInfo().ModTime(), fc); ferr != nil { + if ferr := createFile(outDir, fpath, f.FileInfo().Mode(), f.FileInfo().ModTime(), fc); ferr != nil { return errors.Wrapf(ferr, "error creating file %v", f.Name) } diff --git a/tools/gettool/checksums.txt b/tools/gettool/checksums.txt index a0659270625..9d5e4d438d5 100644 --- a/tools/gettool/checksums.txt +++ b/tools/gettool/checksums.txt @@ -7,12 +7,12 @@ https://github.com/git-chglog/git-chglog/releases/download/v0.15.1/git-chglog_0. https://github.com/gohugoio/hugo/releases/download/v0.113.0/hugo_extended_0.113.0_darwin-universal.tar.gz: 1557f896f34743d241e1aecab588be273dde59692b362a9f4488231a2595b2ae https://github.com/gohugoio/hugo/releases/download/v0.113.0/hugo_extended_0.113.0_linux-amd64.tar.gz: e04bccfa81df6c727f1c03bc858eb21d6f95123d311cafe245f4485d289123f3 https://github.com/gohugoio/hugo/releases/download/v0.113.0/hugo_extended_0.113.0_windows-amd64.zip: 3eabfbfad1431939058e6f7e76573c6bac1fee92f3a7b1ac5739c555940f0e0e -https://github.com/golangci/golangci-lint/releases/download/v2.1.2/golangci-lint-2.1.2-darwin-amd64.tar.gz: ed02ba3ad28466d61d2ae2b80cc95671713fa842c353da37842b1b89e36cb3ce -https://github.com/golangci/golangci-lint/releases/download/v2.1.2/golangci-lint-2.1.2-darwin-arm64.tar.gz: 1cff60651d7c95a4248fa72f0dd020bffed1d2dc4dd8c2c77aee89a0731fa615 -https://github.com/golangci/golangci-lint/releases/download/v2.1.2/golangci-lint-2.1.2-linux-amd64.tar.gz: bc16fd1ef25bce2c600de0122600100ab26d6d75388cc5369c5bb916cb2b82e3 -https://github.com/golangci/golangci-lint/releases/download/v2.1.2/golangci-lint-2.1.2-linux-arm64.tar.gz: 46e86f1c4a94236e4d0bb35252c72939bed9f749897aaad54b576d430b1bb6d4 -https://github.com/golangci/golangci-lint/releases/download/v2.1.2/golangci-lint-2.1.2-linux-armv6.tar.gz: a0ddb93965d25d11f973ef3ff226bae3486bb12000a6b1a5ed18657aa13ef8a9 -https://github.com/golangci/golangci-lint/releases/download/v2.1.2/golangci-lint-2.1.2-windows-amd64.zip: 1f920e8af6d596deeb9295153270ad2ced9cfa2085c79e68e09a5d96721b48dd +https://github.com/golangci/golangci-lint/releases/download/v2.6.1/golangci-lint-2.6.1-darwin-amd64.tar.gz: aee6e16af4dfa60dd3c4e39536edc905f28369fda3c138090db00c8233cfe450 +https://github.com/golangci/golangci-lint/releases/download/v2.6.1/golangci-lint-2.6.1-darwin-arm64.tar.gz: 402e903029391f1b6383cc63c8d0fcd38e879a4dfe3a0aff258a1817d7a296ec +https://github.com/golangci/golangci-lint/releases/download/v2.6.1/golangci-lint-2.6.1-linux-amd64.tar.gz: c22e188e46aff9b140588abe6828ba271b600ae82b2d6a4f452196a639c17ec0 +https://github.com/golangci/golangci-lint/releases/download/v2.6.1/golangci-lint-2.6.1-linux-arm64.tar.gz: 1c22b899f2dd84f9638e0e0352a319a2867b0bb082c5323ad50d8713b65bb793 +https://github.com/golangci/golangci-lint/releases/download/v2.6.1/golangci-lint-2.6.1-linux-armv6.tar.gz: b52331fb224cdc987f8f703120d546a98114c400a453c61a2b51a86d0d669dbe +https://github.com/golangci/golangci-lint/releases/download/v2.6.1/golangci-lint-2.6.1-windows-amd64.zip: b6edeea3d1d52331e98dc6378f710cfe2d752ca1ba09032fe60e62a87a27a25f https://github.com/goreleaser/goreleaser/releases/download/v0.176.0/goreleaser_Darwin_arm64.tar.gz: 1f95e6561974f4766d8833438b646b06930563ca9867447ea03edb623d876c75 https://github.com/goreleaser/goreleaser/releases/download/v0.176.0/goreleaser_Darwin_x86_64.tar.gz: 17ecad881a50e32f033da5a200c8417d37cae70f09e925645452937998aca506 https://github.com/goreleaser/goreleaser/releases/download/v0.176.0/goreleaser_Linux_arm64.tar.gz: 8bf2a9b9e84498bfa239f2fe91b2d555642c87ab9d3f5d37f29e6e97116910a3 diff --git a/tools/gettool/gettool.go b/tools/gettool/gettool.go index 8e7f7635e09..545f49e3ca1 100644 --- a/tools/gettool/gettool.go +++ b/tools/gettool/gettool.go @@ -184,7 +184,7 @@ func main() { var errorCount int - for _, toolNameVersion := range strings.Split(*tool, ",") { + for toolNameVersion := range strings.SplitSeq(*tool, ",") { parts := strings.Split(toolNameVersion, ":") //nolint:mnd diff --git a/tools/tools.mk b/tools/tools.mk index c73527b6d79..d91f479a9c1 100644 --- a/tools/tools.mk +++ b/tools/tools.mk @@ -104,7 +104,7 @@ retry:= endif # tool versions -GOLANGCI_LINT_VERSION=2.1.2 +GOLANGCI_LINT_VERSION=2.6.1 CHECKLOCKS_VERSION=release-20241104.0 NODE_VERSION=22.15.1 HUGO_VERSION=0.113.0 @@ -261,9 +261,6 @@ export KOPIA_VERSION_NO_PREFIX=$(KOPIA_VERSION:v%=%) export REACT_APP_SHORT_VERSION_INFO:=$(KOPIA_VERSION) export REACT_APP_FULL_VERSION_INFO:=$(KOPIA_VERSION) built on $(date_full) $(hostname) -KOPIA_BUILD_TAGS= -KOPIA_BUILD_FLAGS=-ldflags "-s -w -X github.com/kopia/kopia/repo.BuildVersion=$(KOPIA_VERSION_NO_PREFIX) -X github.com/kopia/kopia/repo.BuildInfo=$(shell git rev-parse HEAD) -X github.com/kopia/kopia/repo.BuildGitHubRepo=$(GITHUB_REPOSITORY)" - clean-tools: rm -rf $(TOOLS_DIR)