diff --git a/.github/workflows/build-cli.yml b/.github/workflows/build-cli.yml new file mode 100644 index 00000000..d7197cb1 --- /dev/null +++ b/.github/workflows/build-cli.yml @@ -0,0 +1,222 @@ +name: Build CLI Binaries + +on: + workflow_call: + inputs: + version: + description: 'Version string (without leading v), e.g. 0.3.0 or dev-' + type: string + required: true + commit: + description: 'Short commit SHA' + type: string + required: true + upload-to-release: + description: 'Whether to attach archives to a GitHub release' + type: boolean + required: false + default: false + release-tag: + description: 'Release tag to attach archives to (when upload-to-release is true)' + type: string + required: false + default: '' + prerelease: + description: 'Mark the release as a prerelease' + type: boolean + required: false + default: false + +jobs: + build-cli: + runs-on: ${{ matrix.runner }} + permissions: + contents: write + strategy: + matrix: + include: + # Linux builds + - goos: linux + goarch: amd64 + runner: ubuntu-latest + cc: zig cc -target x86_64-linux-musl + - goos: linux + goarch: arm64 + runner: ubuntu-latest + cc: zig cc -target aarch64-linux-musl + # macOS builds + - goos: darwin + goarch: amd64 + runner: macos-latest + cc: '' + - goos: darwin + goarch: arm64 + runner: macos-latest + cc: '' + # Windows builds + - goos: windows + goarch: amd64 + runner: ubuntu-latest + cc: zig cc -target x86_64-windows-gnu + steps: + - uses: mlugg/setup-zig@v2 + if: matrix.cc != '' + + - uses: actions/setup-go@v6 + with: + go-version: 1.26.0 + + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Download frontend build + uses: actions/download-artifact@v8 + with: + name: frontend-build + path: frontend/dist/ + + - name: Install macFUSE + if: matrix.goos == 'darwin' + run: brew install --cask macfuse + + - name: Compute timestamp + id: ts + run: echo "timestamp=$(date -u '+%Y-%m-%dT%H:%M:%SZ')" >> $GITHUB_OUTPUT + + - name: Build ${{ matrix.goos }}-${{ matrix.goarch }} binary + env: + GOOS: ${{ matrix.goos }} + GOARCH: ${{ matrix.goarch }} + CGO_ENABLED: 1 + CC: ${{ matrix.cc }} + run: | + EXT="" + if [ "${{ matrix.goos }}" = "windows" ]; then + EXT=".exe" + fi + + go build \ + -trimpath \ + -tags=cli \ + -ldflags="-s -w -X 'github.com/javi11/altmount/internal/version.Version=${{ inputs.version }}' -X 'github.com/javi11/altmount/internal/version.GitCommit=${{ inputs.commit }}' -X 'github.com/javi11/altmount/internal/version.Timestamp=${{ steps.ts.outputs.timestamp }}'" \ + -o "altmount-cli-${{ matrix.goos }}-${{ matrix.goarch }}${EXT}" \ + ./cmd/altmount/main.go + + - name: Create archive + run: | + EXT="" + if [ "${{ matrix.goos }}" = "windows" ]; then + EXT=".exe" + fi + + BINARY_NAME="altmount-cli-${{ matrix.goos }}-${{ matrix.goarch }}${EXT}" + ARCHIVE_NAME="altmount-cli_v${{ inputs.version }}_${{ matrix.goos }}_${{ matrix.goarch }}" + + if [ "${{ matrix.goos }}" = "windows" ]; then + zip "${ARCHIVE_NAME}.zip" "$BINARY_NAME" + else + tar -czf "${ARCHIVE_NAME}.tar.gz" "$BINARY_NAME" + fi + + - name: Upload ${{ matrix.goos }}-${{ matrix.goarch }} artifacts + uses: actions/upload-artifact@v6 + with: + name: cli-${{ matrix.goos }}-${{ matrix.goarch }} + path: | + altmount-cli_v${{ inputs.version }}_${{ matrix.goos }}_${{ matrix.goarch }}.* + retention-days: 1 + + create-universal-darwin: + runs-on: macos-latest + needs: build-cli + permissions: + contents: write + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Download darwin-amd64 binary + uses: actions/download-artifact@v8 + with: + name: cli-darwin-amd64 + path: ./artifacts/ + + - name: Download darwin-arm64 binary + uses: actions/download-artifact@v8 + with: + name: cli-darwin-arm64 + path: ./artifacts/ + + - name: Extract binaries + run: | + cd artifacts + tar -xzf altmount-cli_v${{ inputs.version }}_darwin_amd64.tar.gz + tar -xzf altmount-cli_v${{ inputs.version }}_darwin_arm64.tar.gz + + - name: Create universal binary + run: | + lipo -create \ + artifacts/altmount-cli-darwin-amd64 \ + artifacts/altmount-cli-darwin-arm64 \ + -output altmount-cli-darwin-universal + + tar -czf "altmount-cli_v${{ inputs.version }}_darwin_universal.tar.gz" altmount-cli-darwin-universal + + - name: Upload universal darwin artifact + uses: actions/upload-artifact@v6 + with: + name: cli-darwin-universal + path: altmount-cli_v${{ inputs.version }}_darwin_universal.tar.gz + retention-days: 1 + + publish: + runs-on: ubuntu-latest + needs: [build-cli, create-universal-darwin] + permissions: + contents: write + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Download all CLI artifacts + uses: actions/download-artifact@v8 + with: + pattern: cli-* + path: ./artifacts/ + merge-multiple: true + + - name: Create checksums + run: | + cd artifacts + sha512sum altmount-cli_v${{ inputs.version }}_*.* > checksums-cli.txt + cat checksums-cli.txt + + - name: Upload combined artifacts (when not publishing to release) + if: ${{ !inputs.upload-to-release }} + uses: actions/upload-artifact@v6 + with: + name: cli-release-bundle + path: | + artifacts/altmount-cli_v${{ inputs.version }}_*.* + artifacts/checksums-cli.txt + retention-days: 7 + + - name: Attach assets to GitHub Release + if: ${{ inputs.upload-to-release }} + uses: softprops/action-gh-release@v2 + with: + tag_name: ${{ inputs.release-tag }} + files: | + artifacts/altmount-cli_v${{ inputs.version }}_*.* + artifacts/checksums-cli.txt + draft: false + prerelease: ${{ inputs.prerelease }} + make_latest: ${{ !inputs.prerelease }} + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/dev-image.yml b/.github/workflows/dev-image.yml index abbd25d5..b11777fc 100644 --- a/.github/workflows/dev-image.yml +++ b/.github/workflows/dev-image.yml @@ -59,6 +59,67 @@ jobs: path: frontend/dist retention-days: 1 + prepare-dev-release: + name: Ensure Dev Prerelease Exists + runs-on: ubuntu-latest + needs: build-frontend + permissions: + contents: write + outputs: + short_sha: ${{ steps.sha.outputs.short_sha }} + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Compute short SHA + id: sha + run: echo "short_sha=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT + + - name: Ensure rolling dev prerelease + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + gh release create dev \ + --prerelease \ + --title "Dev Build" \ + --notes "Rolling development build from main branch" \ + --target main 2>/dev/null \ + || gh release edit dev \ + --prerelease \ + --target main \ + --title "Dev Build" \ + --notes "Rolling development build from main branch (commit ${{ github.sha }})" + + - name: Delete previous dev assets + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + assets=$(gh release view dev --json assets -q '.assets[].name' || true) + if [ -n "$assets" ]; then + echo "$assets" | while read -r asset; do + [ -z "$asset" ] && continue + echo "Deleting asset: $asset" + gh release delete-asset dev "$asset" --yes || true + done + else + echo "No existing assets on dev release" + fi + + publish-dev-binaries: + name: Publish Dev CLI Binaries + needs: prepare-dev-release + permissions: + contents: write + uses: ./.github/workflows/build-cli.yml + with: + version: dev-${{ github.sha }} + commit: ${{ needs.prepare-dev-release.outputs.short_sha }} + upload-to-release: true + release-tag: dev + prerelease: true + build-dev-amd64: name: Build Dev Image (AMD64) runs-on: ubuntu-latest diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 444e53cc..879e482c 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -90,66 +90,21 @@ jobs: path: frontend/dist/ retention-days: 1 - build-cli: - runs-on: ${{ matrix.runner }} + prepare-release: + runs-on: ubuntu-latest needs: [test, build-frontend] permissions: contents: write - strategy: - matrix: - include: - # Linux builds - - goos: linux - goarch: amd64 - runner: ubuntu-latest - cc: zig cc -target x86_64-linux-musl - - goos: linux - goarch: arm64 - runner: ubuntu-latest - cc: zig cc -target aarch64-linux-musl - # macOS builds - - goos: darwin - goarch: amd64 - runner: macos-latest - cc: '' - - goos: darwin - goarch: arm64 - runner: macos-latest - cc: '' - # Windows builds - - goos: windows - goarch: amd64 - runner: ubuntu-latest - cc: zig cc -target x86_64-windows-gnu + outputs: + version: ${{ steps.version.outputs.version }} + commit: ${{ steps.version.outputs.commit }} + release_tag: ${{ steps.version.outputs.release_tag }} steps: - # dependencies - - uses: mlugg/setup-zig@v2 - if: matrix.cc != '' - - - uses: actions/setup-go@v6 - with: - go-version: 1.26.0 - - # checkout - name: Checkout uses: actions/checkout@v4 with: fetch-depth: 0 - # Download frontend build - - name: Download frontend build - uses: actions/download-artifact@v8 - with: - name: frontend-build - path: frontend/dist/ - - # Install macFUSE headers (required by cgofuse CGO dependency) - - name: Install macFUSE - if: matrix.goos == 'darwin' - run: brew install --cask macfuse - - - # Extract version info - name: Extract version info id: version run: | @@ -157,180 +112,41 @@ jobs: TAG="${{ inputs.tag }}" VERSION="${TAG#v}" else - VERSION="${GITHUB_REF#refs/tags/v}" + TAG="${GITHUB_REF#refs/tags/}" + VERSION="${TAG#v}" fi COMMIT=$(git rev-parse --short HEAD) - TIMESTAMP=$(date -u '+%Y-%m-%dT%H:%M:%SZ') echo "version=$VERSION" >> $GITHUB_OUTPUT echo "commit=$COMMIT" >> $GITHUB_OUTPUT - echo "timestamp=$TIMESTAMP" >> $GITHUB_OUTPUT + echo "release_tag=$TAG" >> $GITHUB_OUTPUT - # Build binary - - name: Build ${{ matrix.goos }}-${{ matrix.goarch }} binary + - name: Ensure GitHub Release exists with generated notes env: - GOOS: ${{ matrix.goos }} - GOARCH: ${{ matrix.goarch }} - CGO_ENABLED: 1 - CC: ${{ matrix.cc }} + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + TAG: ${{ steps.version.outputs.release_tag }} + GENERATE_NOTES: ${{ github.event_name != 'workflow_dispatch' }} run: | - # Set binary extension for Windows - EXT="" - if [ "${{ matrix.goos }}" = "windows" ]; then - EXT=".exe" - fi - - # Build the binary - go build \ - -trimpath \ - -tags=cli \ - -ldflags="-s -w -X 'github.com/javi11/altmount/internal/version.Version=${{ steps.version.outputs.version }}' -X 'github.com/javi11/altmount/internal/version.GitCommit=${{ steps.version.outputs.commit }}' -X 'github.com/javi11/altmount/internal/version.Timestamp=${{ steps.version.outputs.timestamp }}'" \ - -o "altmount-cli-${{ matrix.goos }}-${{ matrix.goarch }}${EXT}" \ - ./cmd/altmount/main.go - - # Create archive - - name: Create archive - run: | - # Set binary extension and archive format - EXT="" - ARCHIVE_EXT="tar.gz" - if [ "${{ matrix.goos }}" = "windows" ]; then - EXT=".exe" - ARCHIVE_EXT="zip" - fi - - BINARY_NAME="altmount-cli-${{ matrix.goos }}-${{ matrix.goarch }}${EXT}" - ARCHIVE_NAME="altmount-cli_v${{ steps.version.outputs.version }}_${{ matrix.goos }}_${{ matrix.goarch }}" - - if [ "${{ matrix.goos }}" = "windows" ]; then - zip "${ARCHIVE_NAME}.zip" "$BINARY_NAME" - else - tar -czf "${ARCHIVE_NAME}.tar.gz" "$BINARY_NAME" - fi - - # Upload individual build artifacts - - name: Upload ${{ matrix.goos }}-${{ matrix.goarch }} artifacts - uses: actions/upload-artifact@v6 - with: - name: cli-${{ matrix.goos }}-${{ matrix.goarch }} - path: | - altmount-cli_v${{ steps.version.outputs.version }}_${{ matrix.goos }}_${{ matrix.goarch }}.* - retention-days: 1 - - create-universal-darwin: - runs-on: macos-latest - needs: build-cli - permissions: - contents: write - steps: - # checkout - - name: Checkout - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - # Extract version info - - name: Extract version info - id: version - run: | - if [ "${{ github.event_name }}" = "workflow_dispatch" ]; then - TAG="${{ inputs.tag }}" - VERSION="${TAG#v}" + if gh release view "$TAG" >/dev/null 2>&1; then + echo "Release $TAG already exists" else - VERSION="${GITHUB_REF#refs/tags/v}" + if [ "$GENERATE_NOTES" = "true" ]; then + gh release create "$TAG" --title "$TAG" --generate-notes + else + gh release create "$TAG" --title "$TAG" + fi fi - echo "version=$VERSION" >> $GITHUB_OUTPUT - - # Download darwin binaries - - name: Download darwin-amd64 binary - uses: actions/download-artifact@v8 - with: - name: cli-darwin-amd64 - path: ./artifacts/ - - name: Download darwin-arm64 binary - uses: actions/download-artifact@v8 - with: - name: cli-darwin-arm64 - path: ./artifacts/ - - # Extract binaries from archives - - name: Extract binaries - run: | - cd artifacts - tar -xzf altmount-cli_v${{ steps.version.outputs.version }}_darwin_amd64.tar.gz - tar -xzf altmount-cli_v${{ steps.version.outputs.version }}_darwin_arm64.tar.gz - - # Create universal binary - - name: Create universal binary - run: | - lipo -create \ - artifacts/altmount-cli-darwin-amd64 \ - artifacts/altmount-cli-darwin-arm64 \ - -output altmount-cli-darwin-universal - - # Create universal archive - tar -czf "altmount-cli_v${{ steps.version.outputs.version }}_darwin_universal.tar.gz" altmount-cli-darwin-universal - - # Upload universal binary - - name: Upload universal darwin artifact - uses: actions/upload-artifact@v6 - with: - name: cli-darwin-universal - path: altmount-cli_v${{ steps.version.outputs.version }}_darwin_universal.tar.gz - retention-days: 1 - - create-release: - runs-on: ubuntu-latest - needs: [build-cli, create-universal-darwin] + build-cli: + needs: [prepare-release, build-frontend] permissions: contents: write - steps: - # checkout - - name: Checkout - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - # Extract version info - - name: Extract version info - id: version - run: | - if [ "${{ github.event_name }}" = "workflow_dispatch" ]; then - TAG="${{ inputs.tag }}" - VERSION="${TAG#v}" - else - VERSION="${GITHUB_REF#refs/tags/v}" - fi - echo "version=$VERSION" >> $GITHUB_OUTPUT - - # Download all artifacts - - name: Download all CLI artifacts - uses: actions/download-artifact@v8 - with: - pattern: cli-* - path: ./artifacts/ - merge-multiple: true - - # Create checksums - - name: Create checksums - run: | - cd artifacts - sha512sum altmount-cli_v${{ steps.version.outputs.version }}_*.* > checksums-cli.txt - cat checksums-cli.txt - - # Create GitHub release - - name: Create GitHub Release - uses: softprops/action-gh-release@v2 - with: - tag_name: ${{ github.event_name == 'workflow_dispatch' && inputs.tag || github.ref_name }} - files: | - artifacts/altmount-cli_v${{ steps.version.outputs.version }}_*.* - artifacts/checksums-cli.txt - draft: false - prerelease: false - generate_release_notes: ${{ github.event_name != 'workflow_dispatch' }} - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + uses: ./.github/workflows/build-cli.yml + with: + version: ${{ needs.prepare-release.outputs.version }} + commit: ${{ needs.prepare-release.outputs.commit }} + upload-to-release: true + release-tag: ${{ needs.prepare-release.outputs.release_tag }} + prerelease: false build-image-amd64: runs-on: ubuntu-latest diff --git a/frontend/src/components/config/UpdateSection.tsx b/frontend/src/components/config/UpdateSection.tsx index 16e290bb..e0e8a87d 100644 --- a/frontend/src/components/config/UpdateSection.tsx +++ b/frontend/src/components/config/UpdateSection.tsx @@ -33,11 +33,18 @@ export function UpdateSection() { refetch(); }; + const dockerMode = updateStatus?.docker_available ?? false; + const binaryMode = !dockerMode && (updateStatus?.binary_update_available ?? false); + const updateUnavailable = updateStatus !== undefined && !dockerMode && !binaryMode; + const handleApplyUpdate = async (force = false) => { const actionTitle = force ? "Force Reinstall" : "Apply Update"; + const baseAction = dockerMode + ? `pull the ${channel} image and restart the container` + : `download the ${channel} binary and restart`; const actionMessage = force - ? `This will force-pull the ${channel} image and restart the container, even if the version hasn't changed. Continue?` - : `This will pull the latest ${channel} image and restart the container. The service will be briefly unavailable. Continue?`; + ? `This will force-${baseAction}, even if the version hasn't changed. Continue?` + : `This will ${baseAction}. The service will be briefly unavailable. Continue?`; const confirmed = await confirmAction(actionTitle, actionMessage, { type: force ? "error" : "warning", @@ -51,7 +58,9 @@ export function UpdateSection() { showToast({ type: "success", title: force ? "Reinstall started" : "Update started", - message: "Pulling image. The container will restart automatically.", + message: dockerMode + ? "Pulling image. The container will restart automatically." + : "Downloading binary. The service will restart automatically.", }); } catch (err) { showToast({ @@ -62,7 +71,6 @@ export function UpdateSection() { } }; - const dockerUnavailable = updateStatus && !updateStatus.docker_available; const updateAvailable = updateStatus?.update_available ?? false; /** Taller tap targets below md (touch-friendly ~48px min height) */ @@ -157,7 +165,7 @@ export function UpdateSection() { type="button" className={`btn btn-sm btn-warning min-w-0 ${updateActionBtnLayout}`} onClick={() => handleApplyUpdate(false)} - disabled={applyUpdate.isPending || dockerUnavailable} + disabled={applyUpdate.isPending || updateUnavailable} > {applyUpdate.isPending ? ( @@ -171,7 +179,7 @@ export function UpdateSection() { type="button" className={`btn btn-sm btn-ghost min-w-0 border-base-300 bg-base-100 hover:bg-base-200 ${updateActionBtnLayout}`} onClick={() => handleApplyUpdate(true)} - disabled={applyUpdate.isPending || dockerUnavailable || isChecking} + disabled={applyUpdate.isPending || updateUnavailable || isChecking} > {applyUpdate.isPending ? ( @@ -214,18 +222,27 @@ export function UpdateSection() { ) : null} - {dockerUnavailable && ( + {updateUnavailable && (
Auto-update unavailable
- Mount /var/run/docker.sock into the container - to enable one-click updates. + For Docker installs, mount /var/run/docker.sock{" "} + into the container to enable one-click updates. For standalone binaries, ensure + the executable file is writable by this process.
)} + {binaryMode && ( +
+ +
+ Running as standalone binary — updates download the new binary and restart. +
+
+ )} )} diff --git a/frontend/src/types/update.ts b/frontend/src/types/update.ts index b665a4ac..20ce5226 100644 --- a/frontend/src/types/update.ts +++ b/frontend/src/types/update.ts @@ -8,4 +8,5 @@ export interface UpdateStatusResponse { update_available: boolean; release_url?: string; docker_available?: boolean; + binary_update_available?: boolean; } diff --git a/go.mod b/go.mod index bc2624d7..ee2ed2e8 100644 --- a/go.mod +++ b/go.mod @@ -28,6 +28,7 @@ require ( github.com/klauspost/compress v1.18.0 github.com/mattn/go-sqlite3 v1.14.22 github.com/middelink/go-parse-torrent-name v0.0.0-20190301154245-3ff4efacd4c4 + github.com/minio/selfupdate v0.6.0 github.com/natefinch/lumberjack v2.0.0+incompatible github.com/pressly/goose/v3 v3.24.3 github.com/rfjakob/eme v1.1.2 @@ -63,6 +64,7 @@ require ( require ( 4d63.com/gocheckcompilerdirectives v1.3.0 // indirect 4d63.com/gochecknoglobals v0.2.2 // indirect + aead.dev/minisign v0.2.0 // indirect cloud.google.com/go/compute/metadata v0.8.0 // indirect codeberg.org/chavacava/garif v0.2.0 // indirect dev.gaijin.team/go/exhaustruct/v4 v4.0.0 // indirect diff --git a/go.sum b/go.sum index 1ee2a53e..9ecce448 100644 --- a/go.sum +++ b/go.sum @@ -2,6 +2,8 @@ 4d63.com/gocheckcompilerdirectives v1.3.0/go.mod h1:ofsJ4zx2QAuIP/NO/NAh1ig6R1Fb18/GI7RVMwz7kAY= 4d63.com/gochecknoglobals v0.2.2 h1:H1vdnwnMaZdQW/N+NrkT1SZMTBmcwHe9Vq8lJcYYTtU= 4d63.com/gochecknoglobals v0.2.2/go.mod h1:lLxwTQjL5eIesRbvnzIP3jZtG140FnTdz+AlMa+ogt0= +aead.dev/minisign v0.2.0 h1:kAWrq/hBRu4AARY6AlciO83xhNnW9UaC8YipS2uhLPk= +aead.dev/minisign v0.2.0/go.mod h1:zdq6LdSd9TbuSxchxwhpA9zEb9YXcVGoE8JakuiGaIQ= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= @@ -476,6 +478,8 @@ github.com/mgechev/revive v1.13.0 h1:yFbEVliCVKRXY8UgwEO7EOYNopvjb1BFbmYqm9hZjBM github.com/mgechev/revive v1.13.0/go.mod h1:efJfeBVCX2JUumNQ7dtOLDja+QKj9mYGgEZA7rt5u+0= github.com/middelink/go-parse-torrent-name v0.0.0-20190301154245-3ff4efacd4c4 h1:C/VViMMbR/4Ti2aXrWpKy34S05cRaVd6EvV9BFR3qJ8= github.com/middelink/go-parse-torrent-name v0.0.0-20190301154245-3ff4efacd4c4/go.mod h1:H66QhXPJpUSdWschhL6u//v3ge96/qMnQ9mWp3efbxA= +github.com/minio/selfupdate v0.6.0 h1:i76PgT0K5xO9+hjzKcacQtO7+MjJ4JKA8Ak8XQ9DDwU= +github.com/minio/selfupdate v0.6.0/go.mod h1:bO02GTIPCMQFTEvE5h4DjYB58bCoZ35XLeBf0buTDdM= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mnightingale/rapidyenc v0.0.0-20251128204712-7aafef1eaf1c h1:UFEKx2AsNb8Tx80rlOwUCCz4lDxSsZ1tjq2+QDBNOUA= @@ -789,7 +793,9 @@ golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211209193657-4570a0811e8b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= @@ -905,6 +911,7 @@ golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -916,6 +923,7 @@ golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210228012217-479acdf4ea46/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -937,6 +945,7 @@ golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk= golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/telemetry v0.0.0-20251203150158-8fff8a5912fc h1:bH6xUXay0AIFMElXG2rQ4uiE+7ncwtiOdPfYK1NK2XA= golang.org/x/telemetry v0.0.0-20251203150158-8fff8a5912fc/go.mod h1:hKdjCMrbv9skySur+Nek8Hd0uJ0GuxJIoIX2payrIdQ= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= diff --git a/internal/api/server.go b/internal/api/server.go index d923ed51..e71f0951 100644 --- a/internal/api/server.go +++ b/internal/api/server.go @@ -23,6 +23,7 @@ import ( "github.com/javi11/altmount/internal/pool" "github.com/javi11/altmount/internal/progress" "github.com/javi11/altmount/internal/rclone" + "github.com/javi11/altmount/internal/updater" "github.com/javi11/altmount/internal/version" "github.com/javi11/altmount/pkg/rclonecli" ) @@ -64,6 +65,7 @@ type Server struct { cacheSource *segcache.Source logFilePath string migrationRepo *database.ImportMigrationRepository + updater updater.Updater ready atomic.Bool } @@ -109,6 +111,7 @@ func NewServer( streamTracker: streamTracker, cacheSource: cacheSource, fuseManager: NewFuseManager(newMountFactory(nzbFilesystem, configManager, streamTracker)), + updater: updater.Default(), } return server @@ -119,6 +122,12 @@ func (s *Server) SetHealthWorker(healthWorker *health.HealthWorker) { s.healthWorker = healthWorker } +// SetUpdater overrides the binary updater used for self-update operations. +// Primarily intended for tests that need to substitute a fake implementation. +func (s *Server) SetUpdater(u updater.Updater) { + s.updater = u +} + // SetLibrarySyncWorker sets the library sync worker reference for the server func (s *Server) SetLibrarySyncWorker(librarySyncWorker *health.LibrarySyncWorker) { s.librarySyncWorker = librarySyncWorker diff --git a/internal/api/types.go b/internal/api/types.go index 3aace5c0..759aae72 100644 --- a/internal/api/types.go +++ b/internal/api/types.go @@ -542,13 +542,14 @@ const ( // UpdateStatusResponse represents the current update status. type UpdateStatusResponse struct { - CurrentVersion string `json:"current_version"` - GitCommit string `json:"git_commit,omitempty"` - Channel UpdateChannel `json:"channel"` - LatestVersion string `json:"latest_version,omitempty"` - UpdateAvailable bool `json:"update_available"` - ReleaseURL string `json:"release_url,omitempty"` - DockerAvailable bool `json:"docker_available"` + CurrentVersion string `json:"current_version"` + GitCommit string `json:"git_commit,omitempty"` + Channel UpdateChannel `json:"channel"` + LatestVersion string `json:"latest_version,omitempty"` + UpdateAvailable bool `json:"update_available"` + ReleaseURL string `json:"release_url,omitempty"` + DockerAvailable bool `json:"docker_available"` + BinaryUpdateAvailable bool `json:"binary_update_available"` } // SystemHealthResponse represents system health check result diff --git a/internal/api/update_handlers.go b/internal/api/update_handlers.go index 108b57ca..a4110fa3 100644 --- a/internal/api/update_handlers.go +++ b/internal/api/update_handlers.go @@ -16,6 +16,19 @@ import ( "github.com/javi11/altmount/internal/version" ) +// insideContainer reports whether the current process is running inside a +// Docker or Kubernetes container. When true, the Docker-based update path is +// preferred over the binary self-update path. +func insideContainer() bool { + if _, err := os.Stat("/.dockerenv"); err == nil { + return true + } + if os.Getenv("KUBERNETES_SERVICE_HOST") != "" { + return true + } + return false +} + const ( ghAPIBase = "https://api.github.com" ghRepoOwner = "javi11" @@ -115,10 +128,11 @@ func (s *Server) handleGetUpdateStatus(c *fiber.Ctx) error { } resp := UpdateStatusResponse{ - CurrentVersion: version.Version, - GitCommit: version.GitCommit, - Channel: channel, - DockerAvailable: isDockerAvailable(), + CurrentVersion: version.Version, + GitCommit: version.GitCommit, + Channel: channel, + DockerAvailable: isDockerAvailable(), + BinaryUpdateAvailable: s.updater != nil && s.updater.CanSelfUpdate(), } ctx, cancel := context.WithTimeout(c.Context(), 10*time.Second) @@ -175,10 +189,6 @@ func (s *Server) handleApplyUpdate(c *fiber.Ctx) error { return RespondForbidden(c, "Admin privileges required", "Only administrators can perform system updates.") } - if !isDockerAvailable() { - return RespondBadRequest(c, "Auto-update is not available. Mount docker.sock into the container and ensure docker CLI is installed.", "") - } - var req struct { Channel UpdateChannel `json:"channel"` Force bool `json:"force"` @@ -196,33 +206,68 @@ func (s *Server) handleApplyUpdate(c *fiber.Ctx) error { return RespondBadRequest(c, "Invalid channel. Use 'latest' or 'dev'", "") } - // Use goroutine to avoid blocking the API response - go func() { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute) - defer cancel() + // Prefer the Docker-based update path when running inside a container + // with docker.sock mounted. Fall back to in-place binary self-update for + // standalone installs. + dockerPath := insideContainer() && isDockerAvailable() + binaryPath := !dockerPath && s.updater != nil && s.updater.CanSelfUpdate() - image := fmt.Sprintf("ghcr.io/%s/%s:%s", ghRepoOwner, ghRepoName, channel) - slog.InfoContext(ctx, "Starting auto-update", "channel", channel, "image", image, "force", req.Force) + switch { + case dockerPath: + go func() { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute) + defer cancel() - // 1. Pull the new image - cmd := exec.CommandContext(ctx, "docker", "pull", image) - cmd.Env = append(os.Environ(), "HOME=/config") - output, err := cmd.CombinedOutput() - if err != nil { - slog.ErrorContext(ctx, "Failed to pull latest image", "error", err, "output", string(output)) - return - } - slog.InfoContext(ctx, "Successfully pulled latest image", "output", string(output)) - - // 2. Trigger restart - // Note: performRestart only restarts the process. To pick up the new image, - // the container needs to be recreated. However, if the user has a setup - // that handles image updates on restart (like Watchtower or similar), this will work. - // For many users, a simple process restart is the first step. - s.performRestart(ctx) - }() - - return RespondSuccess(c, fiber.Map{ - "message": "Update initiated. The image is being pulled and the server will restart automatically.", - }) + image := fmt.Sprintf("ghcr.io/%s/%s:%s", ghRepoOwner, ghRepoName, channel) + slog.InfoContext(ctx, "Starting docker auto-update", + "channel", channel, + "image", image, + "force", req.Force) + + cmd := exec.CommandContext(ctx, "docker", "pull", image) + cmd.Env = append(os.Environ(), "HOME=/config") + output, err := cmd.CombinedOutput() + if err != nil { + slog.ErrorContext(ctx, "Failed to pull latest image", + "error", err, + "output", string(output)) + return + } + slog.InfoContext(ctx, "Successfully pulled latest image", + "output", string(output)) + + s.performRestart(ctx) + }() + + return RespondSuccess(c, fiber.Map{ + "message": "Update initiated. The image is being pulled and the server will restart automatically.", + }) + + case binaryPath: + go func() { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute) + defer cancel() + + slog.InfoContext(ctx, "Starting binary auto-update", + "channel", channel, + "force", req.Force) + + if err := s.updater.ApplyBinaryUpdate(ctx, string(channel)); err != nil { + slog.ErrorContext(ctx, "Failed to apply binary update", "error", err) + return + } + slog.InfoContext(ctx, "Binary update applied, restarting") + s.performRestart(ctx) + }() + + return RespondSuccess(c, fiber.Map{ + "message": "Update initiated. Downloading the new binary and restarting automatically.", + }) + + default: + return RespondBadRequest(c, + "Auto-update is not available. For Docker installs, mount /var/run/docker.sock and install the docker CLI. For standalone binaries, ensure the executable file is writable by this process.", + "") + } } + diff --git a/internal/api/update_handlers_test.go b/internal/api/update_handlers_test.go index 0ea3bfde..1c0a921a 100644 --- a/internal/api/update_handlers_test.go +++ b/internal/api/update_handlers_test.go @@ -1,33 +1,179 @@ package api import ( + "bytes" + "context" + "encoding/json" + "errors" + "io" + "net/http/httptest" "os" + "sync/atomic" "testing" + "time" + "github.com/gofiber/fiber/v2" + "github.com/javi11/altmount/internal/config" + "github.com/javi11/altmount/internal/updater" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestIsDockerAvailable(t *testing.T) { - // By default, it should be false in a normal environment unless /var/run/docker.sock exists - // and docker CLI is in PATH. - - // We can't easily mock /var/run/docker.sock without root, but we can check the logic. + // Ensure isDockerAvailable does not panic and returns a bool. available := isDockerAvailable() - - // In most CI environments this will be false. - // If it's true, it means the environment has docker. t.Logf("Docker available in this environment: %v", available) - - // Ensure it doesn't panic assert.NotPanics(t, func() { isDockerAvailable() }) } -func TestIsDockerAvailable_Mock(t *testing.T) { - // Create a dummy file for docker.sock in a temp dir - tmpDir, err := os.MkdirTemp("", "docker-test") - assert.NoError(t, err) - defer os.RemoveAll(tmpDir) +// fakeUpdater is a test double implementing updater.Updater. +type fakeUpdater struct { + canSelfUpdate bool + applyErr error + applyCalls atomic.Int32 + lastChannel atomic.Value // string +} + +func (f *fakeUpdater) CanSelfUpdate() bool { return f.canSelfUpdate } + +func (f *fakeUpdater) ApplyBinaryUpdate(_ context.Context, channel string) error { + f.applyCalls.Add(1) + f.lastChannel.Store(channel) + return f.applyErr +} + +func TestHandleGetUpdateStatus_PopulatesBinaryField(t *testing.T) { + app := fiber.New() + loginRequired := false + s := &Server{ + configManager: &mockConfigManager{cfg: &config.Config{ + Auth: config.AuthConfig{LoginRequired: &loginRequired}, + }}, + updater: &fakeUpdater{canSelfUpdate: true}, + } + app.Get("/status", s.handleGetUpdateStatus) + + req := httptest.NewRequest("GET", "/status?channel=latest", nil) + resp, err := app.Test(req, -1) + require.NoError(t, err) + defer resp.Body.Close() + + body, _ := io.ReadAll(resp.Body) + var parsed struct { + Data UpdateStatusResponse `json:"data"` + } + require.NoError(t, json.Unmarshal(body, &parsed)) + assert.True(t, parsed.Data.BinaryUpdateAvailable, "binary_update_available should reflect updater.CanSelfUpdate") +} + +// postApplyUpdate posts a body to the apply handler and returns the response. +func postApplyUpdate(t *testing.T, s *Server, body any) (status int, decoded map[string]any) { + t.Helper() + app := fiber.New() + app.Post("/apply", s.handleApplyUpdate) + + var buf bytes.Buffer + require.NoError(t, json.NewEncoder(&buf).Encode(body)) + req := httptest.NewRequest("POST", "/apply", &buf) + req.Header.Set("Content-Type", "application/json") + + resp, err := app.Test(req, -1) + require.NoError(t, err) + defer resp.Body.Close() + + raw, _ := io.ReadAll(resp.Body) + _ = json.Unmarshal(raw, &decoded) + return resp.StatusCode, decoded +} + +func TestHandleApplyUpdate_NoPathAvailable(t *testing.T) { + // Skip if running inside a container (the Docker branch would be + // evaluated instead of the "no path" branch). + if _, err := os.Stat("/.dockerenv"); err == nil { + t.Skip("running inside docker; /.dockerenv present") + } + + loginRequired := false + s := &Server{ + configManager: &mockConfigManager{cfg: &config.Config{ + Auth: config.AuthConfig{LoginRequired: &loginRequired}, + }}, + updater: &fakeUpdater{canSelfUpdate: false}, + } + + status, decoded := postApplyUpdate(t, s, map[string]string{"channel": "latest"}) + assert.Equal(t, 400, status) + assert.Equal(t, false, decoded["success"]) +} + +func TestHandleApplyUpdate_BinaryBranch(t *testing.T) { + if _, err := os.Stat("/.dockerenv"); err == nil { + t.Skip("running inside docker; /.dockerenv present") + } + + loginRequired := false + // Make ApplyBinaryUpdate return an error so performRestart is not + // invoked in the background goroutine — performRestart would syscall.Exec + // the test binary and kill the entire test run. + fake := &fakeUpdater{canSelfUpdate: true, applyErr: errTestApply} + s := &Server{ + configManager: &mockConfigManager{cfg: &config.Config{ + Auth: config.AuthConfig{LoginRequired: &loginRequired}, + }}, + updater: fake, + } + + status, decoded := postApplyUpdate(t, s, map[string]string{"channel": "dev"}) + assert.Equal(t, 200, status) + assert.Equal(t, true, decoded["success"]) - // We can't mock /var/run/docker.sock easily because isDockerAvailable has it hardcoded. - // This shows that isDockerAvailable might be hard to test if we don't allow path injection. + // Wait for the background goroutine to observe the fake updater call. + deadline := time.Now().Add(2 * time.Second) + for time.Now().Before(deadline) { + if fake.applyCalls.Load() >= 1 { + break + } + time.Sleep(10 * time.Millisecond) + } + assert.GreaterOrEqual(t, int(fake.applyCalls.Load()), 1, "ApplyBinaryUpdate should have been invoked") + if v := fake.lastChannel.Load(); v != nil { + assert.Equal(t, "dev", v.(string)) + } } + +var errTestApply = errors.New("test apply failure") + +func TestHandleApplyUpdate_InvalidChannel(t *testing.T) { + loginRequired := false + s := &Server{ + configManager: &mockConfigManager{cfg: &config.Config{ + Auth: config.AuthConfig{LoginRequired: &loginRequired}, + }}, + updater: &fakeUpdater{canSelfUpdate: true}, + } + + status, _ := postApplyUpdate(t, s, map[string]string{"channel": "banana"}) + assert.Equal(t, 400, status) +} + +func TestHandleApplyUpdate_DockerBranchRespected(t *testing.T) { + // This test verifies the decision logic: when the fake updater says it + // cannot self-update AND we're not in a container, we must return 400. + // It is a sibling to TestHandleApplyUpdate_NoPathAvailable to make + // explicit that s.updater is consulted. + if _, err := os.Stat("/.dockerenv"); err == nil { + t.Skip("running inside docker; /.dockerenv present") + } + loginRequired := false + s := &Server{ + configManager: &mockConfigManager{cfg: &config.Config{ + Auth: config.AuthConfig{LoginRequired: &loginRequired}, + }}, + updater: nil, // No updater configured at all. + } + status, _ := postApplyUpdate(t, s, map[string]string{"channel": "latest"}) + assert.Equal(t, 400, status) +} + +// Ensure the fake satisfies the Updater interface. +var _ updater.Updater = (*fakeUpdater)(nil) diff --git a/internal/updater/binary.go b/internal/updater/binary.go new file mode 100644 index 00000000..de089d62 --- /dev/null +++ b/internal/updater/binary.go @@ -0,0 +1,416 @@ +// Package updater provides binary self-update capabilities for standalone +// (non-Docker) installs of altmount. It fetches release assets from GitHub, +// verifies their SHA-512 checksum, extracts the binary, and applies the +// update in-place using github.com/minio/selfupdate. +package updater + +import ( + "archive/tar" + "archive/zip" + "bytes" + "compress/gzip" + "context" + "crypto/sha512" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "io" + "log/slog" + "net/http" + "os" + "path" + "runtime" + "slices" + "strings" + "time" + + "github.com/minio/selfupdate" +) + +const ( + defaultGitHubAPIBase = "https://api.github.com" + repoOwner = "javi11" + repoName = "altmount" + + // Channel identifiers. + ChannelLatest = "latest" + ChannelDev = "dev" + + downloadTimeout = 10 * time.Minute +) + +// githubAsset mirrors the subset of the GitHub release asset schema used here. +type githubAsset struct { + Name string `json:"name"` + BrowserDownloadURL string `json:"browser_download_url"` +} + +// githubRelease mirrors the subset of the GitHub release schema used here. +type githubRelease struct { + TagName string `json:"tag_name"` + Assets []githubAsset `json:"assets"` +} + +// Updater applies binary updates in-place using assets from GitHub releases. +// It exists as an interface so callers (and tests) can swap in a fake +// implementation. +type Updater interface { + CanSelfUpdate() bool + ApplyBinaryUpdate(ctx context.Context, channel string) error +} + +// Default returns a Updater backed by the real GitHub API and minio/selfupdate. +func Default() Updater { + return &binaryUpdater{ + apiBase: defaultGitHubAPIBase, + httpClient: &http.Client{Timeout: downloadTimeout}, + } +} + +// binaryUpdater is the production implementation of Updater. +type binaryUpdater struct { + apiBase string + httpClient *http.Client +} + +// CanSelfUpdate reports whether a binary self-update is feasible in the +// current runtime. It returns false when running inside a Docker container +// (the Docker path is preferred in that case), when os.Executable cannot be +// resolved, or when the current executable path is not writable. +func (u *binaryUpdater) CanSelfUpdate() bool { + if insideContainer() { + return false + } + exe, err := os.Executable() + if err != nil { + return false + } + return isWritable(exe) +} + +// ApplyBinaryUpdate downloads the release asset for the current platform, +// verifies its checksum, extracts the binary and applies the update. The +// channel must be either "latest" or "dev". +func (u *binaryUpdater) ApplyBinaryUpdate(ctx context.Context, channel string) error { + slog.InfoContext(ctx, "Starting binary self-update", + "channel", channel, + "goos", runtime.GOOS, + "goarch", runtime.GOARCH) + + reader, cleanup, err := u.downloadAndExtract(ctx, channel) + if err != nil { + return fmt.Errorf("prepare binary update: %w", err) + } + defer cleanup() + + if err := selfupdate.Apply(reader, selfupdate.Options{}); err != nil { + slog.ErrorContext(ctx, "selfupdate.Apply failed", "error", err) + if rerr := selfupdate.RollbackError(err); rerr != nil { + slog.ErrorContext(ctx, "selfupdate rollback failed", "error", rerr) + } + return fmt.Errorf("apply binary update: %w", err) + } + + slog.InfoContext(ctx, "Binary self-update applied successfully") + return nil +} + +// downloadAndExtract resolves the release for the given channel, downloads +// the matching archive and checksums, verifies the SHA-512 hash, and extracts +// the binary. It returns an io.Reader positioned at the start of the binary +// and a cleanup function the caller must invoke when done. +func (u *binaryUpdater) downloadAndExtract(ctx context.Context, channel string) (io.Reader, func(), error) { + release, err := u.fetchRelease(ctx, channel) + if err != nil { + return nil, nil, fmt.Errorf("fetch release: %w", err) + } + + archiveAsset, checksumAsset, err := pickAssets(release.Assets, runtime.GOOS, runtime.GOARCH) + if err != nil { + return nil, nil, err + } + + slog.InfoContext(ctx, "Selected release assets", + "archive", archiveAsset.Name, + "checksum", checksumAsset.Name, + "tag", release.TagName) + + archiveBytes, err := u.downloadBytes(ctx, archiveAsset.BrowserDownloadURL) + if err != nil { + return nil, nil, fmt.Errorf("download archive: %w", err) + } + + checksumBytes, err := u.downloadBytes(ctx, checksumAsset.BrowserDownloadURL) + if err != nil { + return nil, nil, fmt.Errorf("download checksum file: %w", err) + } + + if err := verifyChecksum(archiveAsset.Name, archiveBytes, checksumBytes); err != nil { + return nil, nil, fmt.Errorf("verify checksum: %w", err) + } + + binaryName := expectedBinaryName(runtime.GOOS, runtime.GOARCH) + reader, err := extractBinary(archiveAsset.Name, archiveBytes, binaryName) + if err != nil { + return nil, nil, fmt.Errorf("extract binary %q: %w", binaryName, err) + } + + return reader, func() {}, nil +} + +// fetchRelease retrieves the release metadata for the requested channel. +func (u *binaryUpdater) fetchRelease(ctx context.Context, channel string) (*githubRelease, error) { + var url string + switch channel { + case ChannelLatest: + url = fmt.Sprintf("%s/repos/%s/%s/releases/latest", u.apiBase, repoOwner, repoName) + case ChannelDev: + url = fmt.Sprintf("%s/repos/%s/%s/releases/tags/dev", u.apiBase, repoOwner, repoName) + default: + return nil, fmt.Errorf("unknown channel %q", channel) + } + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return nil, fmt.Errorf("build request: %w", err) + } + req.Header.Set("Accept", "application/vnd.github+json") + + resp, err := u.httpClient.Do(req) + if err != nil { + return nil, fmt.Errorf("perform request: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("github api returned status %d", resp.StatusCode) + } + + var rel githubRelease + if err := json.NewDecoder(resp.Body).Decode(&rel); err != nil { + return nil, fmt.Errorf("decode release: %w", err) + } + return &rel, nil +} + +// downloadBytes fetches a URL and returns the full response body. +func (u *binaryUpdater) downloadBytes(ctx context.Context, url string) ([]byte, error) { + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return nil, fmt.Errorf("build request: %w", err) + } + resp, err := u.httpClient.Do(req) + if err != nil { + return nil, fmt.Errorf("perform request: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("download %q returned status %d", url, resp.StatusCode) + } + data, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("read body: %w", err) + } + return data, nil +} + +// pickAssets selects the archive + checksum assets that match the given +// GOOS/GOARCH. For darwin, a universal binary asset is preferred when +// available. +func pickAssets(assets []githubAsset, goos, goarch string) (archive githubAsset, checksum githubAsset, err error) { + var checksumAsset *githubAsset + for i, a := range assets { + if a.Name == "checksums-cli.txt" { + checksumAsset = &assets[i] + break + } + } + if checksumAsset == nil { + return githubAsset{}, githubAsset{}, errors.New("release does not include checksums-cli.txt") + } + + candidateNames := candidateArchiveNames(goos, goarch) + for _, want := range candidateNames { + for _, a := range assets { + if strings.HasSuffix(a.Name, want) { + return a, *checksumAsset, nil + } + } + } + return githubAsset{}, githubAsset{}, fmt.Errorf("no release asset matches %s/%s", goos, goarch) +} + +// candidateArchiveNames returns the suffixes we accept for a given +// GOOS/GOARCH, in priority order. Asset names follow the release.yml pattern +// `altmount-cli_v__.`. +func candidateArchiveNames(goos, goarch string) []string { + ext := ".tar.gz" + if goos == "windows" { + ext = ".zip" + } + if goos == "darwin" { + // Prefer the universal binary, fall back to arch-specific archives. + return []string{ + fmt.Sprintf("_darwin_universal%s", ext), + fmt.Sprintf("_%s_%s%s", goos, goarch, ext), + } + } + return []string{fmt.Sprintf("_%s_%s%s", goos, goarch, ext)} +} + +// expectedBinaryName returns the binary file name that is embedded in the +// archive for the given GOOS/GOARCH. +func expectedBinaryName(goos, goarch string) string { + if goos == "darwin" { + // If the universal binary is present, it uses this name; otherwise the + // arch-specific binary name is the fallback. extractBinary tries both. + return "altmount-cli-darwin-universal" + } + name := fmt.Sprintf("altmount-cli-%s-%s", goos, goarch) + if goos == "windows" { + name += ".exe" + } + return name +} + +// verifyChecksum validates archiveBytes against the sha512 digest listed for +// archiveName in the provided checksum file contents. +func verifyChecksum(archiveName string, archiveBytes, checksumFile []byte) error { + want, err := findChecksum(archiveName, checksumFile) + if err != nil { + return err + } + sum := sha512.Sum512(archiveBytes) + got := hex.EncodeToString(sum[:]) + if !strings.EqualFold(got, want) { + return fmt.Errorf("checksum mismatch for %s: got %s want %s", archiveName, got, want) + } + return nil +} + +// findChecksum parses a `sha512sum` style checksum file and returns the digest +// for the named file. +func findChecksum(name string, checksumFile []byte) (string, error) { + for line := range strings.SplitSeq(string(checksumFile), "\n") { + line = strings.TrimSpace(line) + if line == "" { + continue + } + // Format: " " (two spaces) or " *". + fields := strings.Fields(line) + if len(fields) < 2 { + continue + } + fname := strings.TrimPrefix(fields[len(fields)-1], "*") + if fname == name || path.Base(fname) == name { + return fields[0], nil + } + } + return "", fmt.Errorf("checksum for %q not found", name) +} + +// extractBinary pulls the expected binary out of the archive. For tar.gz it +// iterates entries; for zip it looks up by file name. If the expected name is +// not present, a secondary lookup by arch-specific name is attempted (this +// covers darwin_universal archives whose binary name differs). +func extractBinary(archiveName string, archiveBytes []byte, expectedName string) (io.Reader, error) { + alt := fmt.Sprintf("altmount-cli-%s-%s", runtime.GOOS, runtime.GOARCH) + if runtime.GOOS == "windows" { + alt += ".exe" + } + candidates := []string{expectedName, alt} + + switch { + case strings.HasSuffix(archiveName, ".tar.gz"): + return extractFromTarGz(archiveBytes, candidates) + case strings.HasSuffix(archiveName, ".zip"): + return extractFromZip(archiveBytes, candidates) + default: + return nil, fmt.Errorf("unsupported archive format: %s", archiveName) + } +} + +func extractFromTarGz(data []byte, candidates []string) (io.Reader, error) { + gz, err := gzip.NewReader(bytes.NewReader(data)) + if err != nil { + return nil, fmt.Errorf("open gzip: %w", err) + } + defer gz.Close() + + tr := tar.NewReader(gz) + for { + hdr, err := tr.Next() + if errors.Is(err, io.EOF) { + break + } + if err != nil { + return nil, fmt.Errorf("read tar: %w", err) + } + if hdr.Typeflag != tar.TypeReg { + continue + } + base := path.Base(hdr.Name) + if matchesAny(base, candidates) { + buf, err := io.ReadAll(tr) + if err != nil { + return nil, fmt.Errorf("read entry %q: %w", hdr.Name, err) + } + return bytes.NewReader(buf), nil + } + } + return nil, fmt.Errorf("binary not found in tar.gz (looked for %v)", candidates) +} + +func extractFromZip(data []byte, candidates []string) (io.Reader, error) { + zr, err := zip.NewReader(bytes.NewReader(data), int64(len(data))) + if err != nil { + return nil, fmt.Errorf("open zip: %w", err) + } + for _, f := range zr.File { + base := path.Base(f.Name) + if !matchesAny(base, candidates) { + continue + } + rc, err := f.Open() + if err != nil { + return nil, fmt.Errorf("open zip entry %q: %w", f.Name, err) + } + buf, err := io.ReadAll(rc) + _ = rc.Close() + if err != nil { + return nil, fmt.Errorf("read zip entry %q: %w", f.Name, err) + } + return bytes.NewReader(buf), nil + } + return nil, fmt.Errorf("binary not found in zip (looked for %v)", candidates) +} + +func matchesAny(name string, candidates []string) bool { + return slices.Contains(candidates, name) +} + +// insideContainer reports whether the current process appears to be running +// inside a Docker/Kubernetes container. It checks for the /.dockerenv marker +// file and the KUBERNETES_SERVICE_HOST env var. +func insideContainer() bool { + if _, err := os.Stat("/.dockerenv"); err == nil { + return true + } + if os.Getenv("KUBERNETES_SERVICE_HOST") != "" { + return true + } + return false +} + +// isWritable returns true if the current process can open the given file for +// writing without truncating it. +func isWritable(path string) bool { + f, err := os.OpenFile(path, os.O_WRONLY, 0) + if err != nil { + return false + } + _ = f.Close() + return true +} diff --git a/internal/updater/binary_test.go b/internal/updater/binary_test.go new file mode 100644 index 00000000..d102c6d3 --- /dev/null +++ b/internal/updater/binary_test.go @@ -0,0 +1,318 @@ +package updater + +import ( + "archive/tar" + "archive/zip" + "bytes" + "compress/gzip" + "context" + "crypto/sha512" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "net/http" + "net/http/httptest" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestPickAssets(t *testing.T) { + t.Parallel() + + assets := []githubAsset{ + {Name: "altmount-cli_v1.2.3_linux_amd64.tar.gz", BrowserDownloadURL: "https://e/linux-amd64"}, + {Name: "altmount-cli_v1.2.3_linux_arm64.tar.gz", BrowserDownloadURL: "https://e/linux-arm64"}, + {Name: "altmount-cli_v1.2.3_windows_amd64.zip", BrowserDownloadURL: "https://e/win"}, + {Name: "altmount-cli_v1.2.3_darwin_amd64.tar.gz", BrowserDownloadURL: "https://e/darwin-amd64"}, + {Name: "altmount-cli_v1.2.3_darwin_universal.tar.gz", BrowserDownloadURL: "https://e/darwin-universal"}, + {Name: "checksums-cli.txt", BrowserDownloadURL: "https://e/checksums"}, + } + + tests := []struct { + name string + goos string + goarch string + wantArchive string + wantErr bool + }{ + {name: "linux amd64", goos: "linux", goarch: "amd64", wantArchive: "altmount-cli_v1.2.3_linux_amd64.tar.gz"}, + {name: "linux arm64", goos: "linux", goarch: "arm64", wantArchive: "altmount-cli_v1.2.3_linux_arm64.tar.gz"}, + {name: "windows amd64", goos: "windows", goarch: "amd64", wantArchive: "altmount-cli_v1.2.3_windows_amd64.zip"}, + {name: "darwin prefers universal", goos: "darwin", goarch: "amd64", wantArchive: "altmount-cli_v1.2.3_darwin_universal.tar.gz"}, + {name: "unsupported os", goos: "plan9", goarch: "amd64", wantErr: true}, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + archive, checksum, err := pickAssets(assets, tc.goos, tc.goarch) + if tc.wantErr { + require.Error(t, err) + return + } + require.NoError(t, err) + assert.Equal(t, tc.wantArchive, archive.Name) + assert.Equal(t, "checksums-cli.txt", checksum.Name) + }) + } +} + +func TestPickAssets_MissingChecksum(t *testing.T) { + t.Parallel() + assets := []githubAsset{ + {Name: "altmount-cli_v1.2.3_linux_amd64.tar.gz"}, + } + _, _, err := pickAssets(assets, "linux", "amd64") + require.Error(t, err) + assert.Contains(t, err.Error(), "checksums-cli.txt") +} + +func TestVerifyChecksum(t *testing.T) { + t.Parallel() + + data := []byte("hello world") + sum := sha512.Sum512(data) + digest := hex.EncodeToString(sum[:]) + checksums := fmt.Sprintf("%s altmount-cli_v1_linux_amd64.tar.gz\nbaddigest other.tar.gz\n", digest) + + t.Run("happy path", func(t *testing.T) { + t.Parallel() + err := verifyChecksum("altmount-cli_v1_linux_amd64.tar.gz", data, []byte(checksums)) + require.NoError(t, err) + }) + + t.Run("mismatch", func(t *testing.T) { + t.Parallel() + err := verifyChecksum("altmount-cli_v1_linux_amd64.tar.gz", []byte("tampered"), []byte(checksums)) + require.Error(t, err) + assert.Contains(t, err.Error(), "mismatch") + }) + + t.Run("unknown file", func(t *testing.T) { + t.Parallel() + err := verifyChecksum("other.tar.gz.missing", data, []byte(checksums)) + require.Error(t, err) + }) +} + +func TestExtractBinary_TarGz(t *testing.T) { + t.Parallel() + + payload := []byte("fake-binary-contents") + archive := buildTarGz(t, "altmount-cli-linux-amd64", payload) + + r, err := extractBinary("altmount-cli_v1_linux_amd64.tar.gz", archive, "altmount-cli-linux-amd64") + require.NoError(t, err) + got, err := io.ReadAll(r) + require.NoError(t, err) + assert.Equal(t, payload, got) +} + +func TestExtractBinary_Zip(t *testing.T) { + t.Parallel() + + payload := []byte("fake-windows-binary") + archive := buildZip(t, "altmount-cli-windows-amd64.exe", payload) + + r, err := extractBinary("altmount-cli_v1_windows_amd64.zip", archive, "altmount-cli-windows-amd64.exe") + require.NoError(t, err) + got, err := io.ReadAll(r) + require.NoError(t, err) + assert.Equal(t, payload, got) +} + +func TestExtractBinary_BinaryMissing(t *testing.T) { + t.Parallel() + archive := buildTarGz(t, "unrelated-file", []byte("x")) + _, err := extractBinary("something_linux_amd64.tar.gz", archive, "altmount-cli-linux-amd64") + require.Error(t, err) +} + +func TestDownloadAndExtract_HappyPath(t *testing.T) { + t.Parallel() + + payload := []byte("fake-binary-contents-for-download") + archive := buildTarGz(t, "altmount-cli-linux-amd64", payload) + sum := sha512.Sum512(archive) + digestLine := fmt.Sprintf("%s altmount-cli_v1.0.0_linux_amd64.tar.gz\n", hex.EncodeToString(sum[:])) + + var baseURL string + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch { + case strings.HasSuffix(r.URL.Path, "/releases/latest"): + rel := githubRelease{ + TagName: "v1.0.0", + Assets: []githubAsset{ + {Name: "altmount-cli_v1.0.0_linux_amd64.tar.gz", BrowserDownloadURL: baseURL + "/archive"}, + {Name: "checksums-cli.txt", BrowserDownloadURL: baseURL + "/checksums"}, + }, + } + _ = json.NewEncoder(w).Encode(rel) + case r.URL.Path == "/archive": + _, _ = w.Write(archive) + case r.URL.Path == "/checksums": + _, _ = w.Write([]byte(digestLine)) + default: + http.NotFound(w, r) + } + })) + defer srv.Close() + baseURL = srv.URL + + u := &binaryUpdater{apiBase: srv.URL, httpClient: srv.Client()} + reader, cleanup, err := u.downloadAndExtractWith(context.Background(), ChannelLatest, "linux", "amd64") + require.NoError(t, err) + defer cleanup() + + got, err := io.ReadAll(reader) + require.NoError(t, err) + assert.Equal(t, payload, got) +} + +func TestDownloadAndExtract_ChecksumMismatch(t *testing.T) { + t.Parallel() + + archive := buildTarGz(t, "altmount-cli-linux-amd64", []byte("real contents")) + // Intentionally wrong checksum. + digestLine := fmt.Sprintf("%s altmount-cli_v1.0.0_linux_amd64.tar.gz\n", strings.Repeat("0", 128)) + + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {})) + srv.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch { + case strings.HasSuffix(r.URL.Path, "/releases/latest"): + rel := githubRelease{ + TagName: "v1.0.0", + Assets: []githubAsset{ + {Name: "altmount-cli_v1.0.0_linux_amd64.tar.gz", BrowserDownloadURL: srv.URL + "/archive"}, + {Name: "checksums-cli.txt", BrowserDownloadURL: srv.URL + "/checksums"}, + }, + } + _ = json.NewEncoder(w).Encode(rel) + case r.URL.Path == "/archive": + _, _ = w.Write(archive) + case r.URL.Path == "/checksums": + _, _ = w.Write([]byte(digestLine)) + } + }) + defer srv.Close() + + u := &binaryUpdater{apiBase: srv.URL, httpClient: srv.Client()} + _, _, err := u.downloadAndExtractWith(context.Background(), ChannelLatest, "linux", "amd64") + require.Error(t, err) + assert.Contains(t, err.Error(), "checksum") +} + +func TestFetchRelease_UnknownChannel(t *testing.T) { + t.Parallel() + u := &binaryUpdater{apiBase: "http://127.0.0.1:1", httpClient: http.DefaultClient} + _, err := u.fetchRelease(context.Background(), "banana") + require.Error(t, err) + assert.Contains(t, err.Error(), "unknown channel") +} + +func TestIsWritable(t *testing.T) { + t.Parallel() + dir := t.TempDir() + + writable := filepath.Join(dir, "writable") + require.NoError(t, os.WriteFile(writable, []byte("x"), 0o600)) + assert.True(t, isWritable(writable)) + + readOnly := filepath.Join(dir, "readonly") + require.NoError(t, os.WriteFile(readOnly, []byte("x"), 0o400)) + // On most filesystems 0o400 forbids O_WRONLY for the owner. + assert.False(t, isWritable(readOnly)) + + assert.False(t, isWritable(filepath.Join(dir, "does-not-exist"))) +} + +func TestCanSelfUpdate_RefusesWhenExecutableNotWritable(t *testing.T) { + // We can't portably make os.Executable point at a read-only file, but we + // can at least assert the function does not panic and returns a boolean. + u := &binaryUpdater{} + _ = u.CanSelfUpdate() +} + +// --- helpers ----------------------------------------------------------- + +func buildTarGz(t *testing.T, name string, payload []byte) []byte { + t.Helper() + var buf bytes.Buffer + gz := gzip.NewWriter(&buf) + tw := tar.NewWriter(gz) + require.NoError(t, tw.WriteHeader(&tar.Header{ + Name: name, + Mode: 0o755, + Size: int64(len(payload)), + Typeflag: tar.TypeReg, + })) + _, err := tw.Write(payload) + require.NoError(t, err) + require.NoError(t, tw.Close()) + require.NoError(t, gz.Close()) + return buf.Bytes() +} + +func buildZip(t *testing.T, name string, payload []byte) []byte { + t.Helper() + var buf bytes.Buffer + zw := zip.NewWriter(&buf) + w, err := zw.Create(name) + require.NoError(t, err) + _, err = w.Write(payload) + require.NoError(t, err) + require.NoError(t, zw.Close()) + return buf.Bytes() +} + +// downloadAndExtractWith exposes downloadAndExtract with explicit goos/goarch +// for tests. It mirrors the logic of downloadAndExtract so callers do not need +// to mutate runtime.GOOS/GOARCH. +func (u *binaryUpdater) downloadAndExtractWith(ctx context.Context, channel, goos, goarch string) (io.Reader, func(), error) { + release, err := u.fetchRelease(ctx, channel) + if err != nil { + return nil, nil, fmt.Errorf("fetch release: %w", err) + } + archiveAsset, checksumAsset, err := pickAssets(release.Assets, goos, goarch) + if err != nil { + return nil, nil, err + } + archiveBytes, err := u.downloadBytes(ctx, archiveAsset.BrowserDownloadURL) + if err != nil { + return nil, nil, fmt.Errorf("download archive: %w", err) + } + checksumBytes, err := u.downloadBytes(ctx, checksumAsset.BrowserDownloadURL) + if err != nil { + return nil, nil, fmt.Errorf("download checksum file: %w", err) + } + if err := verifyChecksum(archiveAsset.Name, archiveBytes, checksumBytes); err != nil { + return nil, nil, fmt.Errorf("verify checksum: %w", err) + } + binaryName := expectedBinaryName(goos, goarch) + // For non-darwin the alt fallback equals the expected name; for darwin we + // want to try the arch-specific binary name too. + alt := fmt.Sprintf("altmount-cli-%s-%s", goos, goarch) + if goos == "windows" { + alt += ".exe" + } + candidates := []string{binaryName, alt} + var reader io.Reader + switch { + case strings.HasSuffix(archiveAsset.Name, ".tar.gz"): + reader, err = extractFromTarGz(archiveBytes, candidates) + case strings.HasSuffix(archiveAsset.Name, ".zip"): + reader, err = extractFromZip(archiveBytes, candidates) + default: + return nil, nil, fmt.Errorf("unsupported archive: %s", archiveAsset.Name) + } + if err != nil { + return nil, nil, err + } + return reader, func() {}, nil +}