diff --git a/.box-test-subscription.env.example b/.box-test-subscription.env.example new file mode 100644 index 0000000..0db943c --- /dev/null +++ b/.box-test-subscription.env.example @@ -0,0 +1 @@ +BOX_TEST_SUBSCRIPTION_URL= diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 0000000..1fec3ea --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,125 @@ +name: CI + +on: + push: + branches: + - '**' + tags: + - 'v*' + pull_request: + +permissions: + contents: read + +jobs: + lint-and-tests: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Bash syntax checks + run: ./tests/lint_shell.sh syntax + + - name: ShellCheck (if available) + run: | + if command -v shellcheck >/dev/null 2>&1; then + ./tests/lint_shell.sh shellcheck + else + echo "shellcheck not available; skipping" + fi + + - name: Mock integration tests + run: | + ./tests/integration/test_phase2.sh + ./tests/integration/test_policy.sh + ./tests/integration/test_updater.sh + + - name: Real-kernel integration tests (skip-capable) + continue-on-error: true + run: | + set -o pipefail + : > real-kernel.log + sudo ./tests/integration/test_real_kernel.sh | tee real-kernel.log + + - name: Upload real-kernel log + if: always() + uses: actions/upload-artifact@v4 + with: + name: real-kernel-log + path: real-kernel.log + + build-arch-package: + runs-on: ubuntu-latest + needs: + - lint-and-tests + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Build Arch package in container + run: | + docker run --rm \ + -v "$PWD":/work \ + -w /work \ + archlinux:base-devel \ + bash -lc ' + set -euo pipefail + # Avoid relying on distro default unprivileged accounts (for example `nobody`) + # because some base images can mark them as expired. + useradd -m -U builder + chown -R builder:builder /work + su builder -s /bin/bash -c "cd /work/packaging/arch && makepkg --nodeps --noconfirm -f" + ' + + - name: Capture package path + id: pkg + run: | + pkg_path="$(ls -1 packaging/arch/*.pkg.tar.* | head -n 1)" + echo "package_path=${pkg_path}" >> "${GITHUB_OUTPUT}" + echo "Built package: ${pkg_path}" + + - name: Upload Arch package artifact + uses: actions/upload-artifact@v4 + with: + name: box4linux-arch-pkg + path: ${{ steps.pkg.outputs.package_path }} + + smoke-package: + runs-on: ubuntu-latest + needs: + - build-arch-package + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Download Arch package artifact + uses: actions/download-artifact@v4 + with: + name: box4linux-arch-pkg + path: ./dist + + - name: Package smoke test + run: | + pkg_path="$(ls -1 ./dist/*.pkg.tar.* | head -n 1)" + ./tests/integration/test_arch_package_smoke.sh "${pkg_path}" + + release: + runs-on: ubuntu-latest + if: startsWith(github.ref, 'refs/tags/v') + needs: + - smoke-package + permissions: + contents: write + steps: + - name: Download Arch package artifact + uses: actions/download-artifact@v4 + with: + name: box4linux-arch-pkg + path: ./dist + + - name: Publish release assets + uses: softprops/action-gh-release@v2 + with: + files: ./dist/*.pkg.tar.* + generate_release_notes: true diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..e1639c9 --- /dev/null +++ b/.gitignore @@ -0,0 +1,22 @@ +# OS / editor +.DS_Store +Thumbs.db +.idea/ +.vscode/ +*.swp +*.swo +*~ + +# Box references +box-reference/ + +# Local Linux dev runtime state +.box-dev/ + +# Arch packaging build artifacts +packaging/arch/*.pkg.tar.* +packaging/arch/pkg/ +packaging/arch/src/ + +# Local docker/vm test secrets +.box-test-subscription.env diff --git a/README.md b/README.md new file mode 100644 index 0000000..c37c8f8 --- /dev/null +++ b/README.md @@ -0,0 +1,254 @@ +# box4linux workspace + +Linux-native control plane lives in: +- `cmd/boxctl` +- `lib/common.sh` +- `lib/config.sh` +- `lib/supervisor/` +- `lib/firewall/` +- `lib/updater/` +- `systemd/` +- `tests/integration/` +- `packaging/arch/` + +Android reference artifacts are kept untouched in `box-reference/`. + +## Quick run (dev) + +1. Use repo-local fallback config at `etc/box/box.toml` (auto-used when `/etc/box/box.toml` is missing). + - If `BOX_CONFIG_FILE` is explicitly set, it must exist; commands fail fast instead of falling back. +2. Run status commands: + - `./cmd/boxctl service status` + - `./cmd/boxctl service status --json` + - `./cmd/boxctl firewall status` + - `./cmd/boxctl firewall status --json` + - `./cmd/boxctl policy status` + - `./cmd/boxctl policy status --json` + - `./cmd/boxctl update status` + - `./cmd/boxctl update status --json` +3. Run privileged actions as root: + - `sudo ./cmd/boxctl service start|stop|restart|reload` + - `sudo ./cmd/boxctl firewall enable|disable|renew` + - `sudo ./cmd/boxctl policy evaluate|enable|disable` + - `sudo ./cmd/boxctl update kernel|subs|geo|dashboard|all` + - `./cmd/boxctl firewall dry-run` +4. Run integration checks: + - `./tests/integration/test_phase2.sh` + - `./tests/integration/test_policy.sh` + - `./tests/integration/test_updater.sh` + - `sudo ./tests/integration/test_real_kernel.sh` + - `./tests/integration/test_docker_privileged.sh` + +## Updater + +Commands: +- `boxctl update kernel` +- `boxctl update subs` +- `boxctl update geo` +- `boxctl update dashboard` +- `boxctl update all` +- `boxctl update status --json` + +Update sources are configured in `/etc/box/box.toml` under: +- `[updater]` +- `[updater.kernel]` +- `[updater.subs]` +- `[updater.geo]` +- `[updater.dashboard]` + +Supported inputs: +- `url` +- `file` +- `source = "release"` for `kernel` and `geo` +- `checksum` +- `checksum_file` +- `target` + +Release resolver fields for `kernel` and `geo`: +- `source = "release"` +- `release_api_url` or `release_repo` +- `release_channel = stable|prerelease|any` +- `release_tag` +- `asset_regex` +- `checksum_asset_regex` +- `release_os` +- `release_arch` +- `archive_member_regex` + +Failure semantics: +- downloads go to staging first +- checksum verification follows `checksum_policy = off|optional|required` +- staged payload is validated before install +- install stages beside the target and renames into place with backup/restore on failure +- runtime handoff prefers reload when supported, otherwise controlled restart +- current `mihomo` and `sing-box` updater handoff uses controlled restart; API reload hooks remain TODO +- release resolution requires `jq` +- `source = "auto"` does not implicitly enable release downloads; set `source = "release"` explicitly +- kernel release assets support raw binaries, `.gz`, `.tar`, `.tar.gz`, `.tgz`, and `.tar.xz` +- dashboard archives support `.zip`, `.tar.gz`, `.tgz`, `.tar`, `.tar.xz` +- nested dashboard archive roots are flattened automatically when a single top-level directory is present +- dashboard target and download URL can be derived from core config (`external-ui` / `external_ui`, `external-ui-download-url` / `external_ui_download_url`) +- if the core config omits a dashboard target, updater falls back to `./dashboard` relative to the core config path +- `geo` updates do not restart the running core +- `kernel` updates restart only when the updated target matches the active core binary + +Timer units shipped in `systemd/`: +- `box-update-kernel.service` + `.timer` +- `box-update-subs.service` + `.timer` +- `box-update-geo.service` + `.timer` +- `box-update-dashboard.service` + `.timer` +- `box-update-all.service` + `.timer` + +Enable only the timers you actually want. Do not enable both the per-component timers and `box-update-all.timer` unless duplicate update attempts are acceptable in your environment. + +## Policy Watcher + +Commands: +- `boxctl policy evaluate` +- `boxctl policy enable` +- `boxctl policy disable` +- `boxctl policy status --json` + +Config is under `[policy]` in `/etc/box/box.toml`: +- `enabled = true|false` +- `proxy_mode = core|whitelist|blacklist` +- `debounce_seconds` +- `use_module_on_wifi_disconnect` +- `disable_marker` +- `allow_ifaces`, `ignore_ifaces` +- `allow_ssids`, `ignore_ssids` +- `allow_bssids`, `ignore_bssids` + +Behavior: +- policy watcher uses `ip monitor link route address` for Linux-native event intake +- active Wi-Fi identity prefers `nmcli`, then falls back to `iw` +- `wlan+`-style patterns are treated as prefix wildcards +- address-change refresh is decoupled from policy evaluation and triggers a background `firewall renew` +- `box-policy.service` is optional and should only be enabled when `[policy].enabled = true` + +## Docker Test Harness + +- Local docker-backed privileged validation: + - `./tests/integration/test_docker_privileged.sh` +- Optional secret/env input is loaded from ignored file: + - `.box-test-subscription.env` +- Tracked template: + - `.box-test-subscription.env.example` +- The harness runs: + - `./tests/integration/test_phase2.sh` + - `./tests/integration/test_real_kernel.sh` +- It uses a privileged Arch container and still shares the host kernel, so nftables kernel/runtime gaps will reproduce there too. + +## Arch Package Build/Install + +Build package from repo root: +- `cd packaging/arch && makepkg --noconfirm -f` + +Install package: +- `sudo pacman -U ./box4linux-*.pkg.tar.zst` + +Installed layout: +- `/usr/bin/boxctl` +- `/usr/lib/box4linux/cmd/boxctl` +- `/usr/lib/box4linux/lib/...` +- `/etc/box/box.toml` +- `/usr/lib/systemd/system/box.service` +- `/usr/lib/systemd/system/box-firewall.service` +- `/usr/lib/systemd/system/box-policy.service` +- `/usr/lib/systemd/system/box-update-*.service` +- `/usr/lib/systemd/system/box-update-*.timer` +- `/usr/share/doc/box4linux/` + +Config upgrade behavior: +- Package marks `/etc/box/box.toml` as backup config. +- Local edits are preserved across upgrades. +- New template versions land as `.pacnew` when needed. + +## Service Lifecycle (Packaged Install) + +Use helper script from package docs: +- `sudo /usr/share/doc/box4linux/systemd-lifecycle.sh enable` +- `sudo /usr/share/doc/box4linux/systemd-lifecycle.sh status` +- `sudo /usr/share/doc/box4linux/systemd-lifecycle.sh disable` +- The lifecycle helper enables `box-update-all.timer` by default and only enables `box-policy.service` when the loaded config has `[policy].enabled = true`. + +Manual equivalent: +- `sudo systemctl daemon-reload` +- `sudo systemctl enable --now box.service box-firewall.service` +- `sudo systemctl enable --now box-policy.service` only when `[policy].enabled = true` +- `sudo systemctl enable --now box-update-all.timer` for the default scheduled updater path +- `sudo systemctl disable --now box-policy.service box-firewall.service box.service box-update-all.timer` + +## Packaged Operational Quickstart + +1. Verify command and units: + - `boxctl service status --json` + - `boxctl firewall status --json` +2. Preview firewall operations: + - `boxctl firewall dry-run` +3. Start runtime: + - `sudo systemctl start box.service` +4. Renew firewall policy safely: + - `sudo systemctl reload box-firewall.service` +5. Run or schedule updates: + - `sudo systemctl start box-update-all.service` + - `sudo systemctl enable --now box-update-subs.timer` + +## CI/Release Flow + +Workflow file: `.github/workflows/ci.yml` + +On push/PR: +- `./tests/lint_shell.sh` +- mock integration: `./tests/integration/test_phase2.sh` +- privileged integration: `sudo ./tests/integration/test_real_kernel.sh` (suite prints `SKIP` when capabilities/tooling are unavailable) +- Arch package build in Arch container +- package smoke test: `./tests/integration/test_arch_package_smoke.sh ` + +On tags (`v*`): +- built package artifact is published to GitHub Releases + +## Phase 3 Notes + +- Supported cores: `mihomo`, `sing-box` +- Runtime overlays rendered under `/run/box/rendered` (or dev fallback) +- Updater components: `kernel`, `subs`, `geo`, `dashboard` +- Policy watcher commands: `evaluate`, `enable`, `disable`, `status` +- `kernel` and `geo` can resolve release assets by channel/tag/arch when explicitly configured with `source = "release"` +- Firewall backends: + - `iptables` (mature path) + - `nftables` (MVP parity) +- Supported modes on both backends: `tun`, `tproxy`, `redirect`, `mixed`, `enhance` +- DNS strategies: `tproxy`, `redirect`, `disable` +- Coexistence modes: + - `preserve_tailnet` (default): apply tailscale and MagicDNS bypasses + - `strict_box`: skip tailscale/MagicDNS bypass insertion +- Route convergence: renew/reapply prunes stale BOX fwmark rules and enforces one current `route_pref` rule +- Idempotent + lock-protected: `enable|renew|disable` +- `BOX_TRACE_COMMANDS=1` logs external command executions with component/action context +- `boxctl firewall status --json` exposes stable diagnostics (backend, capabilities, coexist fields, errors) +- `boxctl policy status --json` exposes watcher/runtime intent, active interfaces, Wi-Fi identity, and last refresh metadata + +## Backend Capability Notes + +- `iptables`: + - `cap_ipv4=true` + - `cap_ipv6=false` (full ip6tables graph pending) +- `nftables`: + - `cap_ipv4=true` + - `cap_ipv6=false` (full IPv6 interception/hijack graph pending) + +## Rollback/Uninstall + +Safe uninstall (keeps config backups/data unless manually removed): +- `sudo pacman -R box4linux` + +Optional manual purge of local state: +- `sudo rm -rf /etc/box /var/lib/box /run/box /var/log/box` + +## Remaining TODO + +- Full UID/GID/interface/MAC policy graph in firewall. +- Full IPv6 interception/hijack parity. +- API-based reload hooks for `mihomo` and `sing-box`. +- Broader kernel-capability probing across distro variants. diff --git a/cmd/boxctl b/cmd/boxctl new file mode 100755 index 0000000..b9d29cf --- /dev/null +++ b/cmd/boxctl @@ -0,0 +1,93 @@ +#!/usr/bin/env bash + +# shellcheck shell=bash + +set -euo pipefail + +CMD_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +BOXCTL_SELF_PATH="$(cd "${CMD_DIR}" && pwd)/$(basename "${BASH_SOURCE[0]}")" +export BOXCTL_SELF_PATH +LIB_DIR_CANDIDATES=( + "${BOX_LIB_DIR:-}" + "${CMD_DIR}/../lib" + "/usr/lib/box4linux/lib" +) +LIB_DIR="" + +find_lib_dir() { + local candidate + for candidate in "${LIB_DIR_CANDIDATES[@]}"; do + [[ -n "${candidate}" ]] || continue + if [[ -f "${candidate}/common.sh" ]]; then + LIB_DIR="${candidate}" + export BOX_LIB_DIR="${LIB_DIR}" + return 0 + fi + done + return 1 +} + +if ! find_lib_dir; then + printf 'failed to locate box4linux lib directory; checked:\n' >&2 + local_candidate_list="$(printf ' %s\n' "${LIB_DIR_CANDIDATES[@]}")" + printf '%s' "${local_candidate_list}" >&2 + exit 1 +fi + +source "${LIB_DIR}/common.sh" +source "${LIB_DIR}/config.sh" +source "${LIB_DIR}/supervisor/supervisor.sh" +source "${LIB_DIR}/firewall/firewall.sh" +source "${LIB_DIR}/updater/updater.sh" +source "${LIB_DIR}/policy/policy.sh" + +usage() { + cat <<'EOF' +Usage: + boxctl service [--json] + boxctl firewall [--json] + boxctl update [--json] + boxctl policy [--json] +EOF +} + +main() { + local component="${1:-}" + local action="${2:-}" + shift 2 || true + + for arg in "$@"; do + if [[ "${arg}" == "--json" ]]; then + BOX_OUTPUT_FORMAT="json" + export BOX_OUTPUT_FORMAT + fi + done + + enable_command_trace "${component:-boxctl}" "${action:-none}" + trap 'disable_command_trace' EXIT + + case "${component}" in + service) + supervisor_cmd "${action}" + ;; + firewall) + firewall_cmd "${action}" + ;; + update) + updater_cmd "${action}" + ;; + policy) + policy_cmd "${action}" + ;; + -h|--help|help|"") + usage + ;; + *) + printf 'unknown component: %s\n' "${component}" >&2 + usage >&2 + return 2 + ;; + esac +} + +main "$@" diff --git a/docs/linux-port/04-component-firewall-routing.md b/docs/linux-port/04-component-firewall-routing.md index dc19b43..29782a6 100644 --- a/docs/linux-port/04-component-firewall-routing.md +++ b/docs/linux-port/04-component-firewall-routing.md @@ -72,6 +72,27 @@ If unsupported, apply controlled downgrade with explicit logs. - Do not hardcode interface names like `wlan0`. - Make route table id configurable to avoid collisions. +## Tailscale Coexistence Requirements +For hosts that run Tailscale alongside Box, firewall apply/cleanup must preserve Tailscale routing and DNS behavior. + +Hard requirements: + +- Never flush/delete non-BOX chains or global policy rules. +- Never touch Tailscale policy-routing entries (commonly table `52`, fwmark rules like `0x80000/0xff0000`, or rule priorities around `5210..5270`). +- Add explicit bypass for Tailscale interface traffic: +- `-i tailscale0 -j RETURN` and `-o tailscale0 -j RETURN` in relevant chains. +- Add destination bypass CIDRs for tailnet traffic: +- IPv4 `100.64.0.0/10` +- IPv6 `fd7a:115c:a1e0::/48` +- Exclude Tailscale DNS endpoint from DNS hijack: +- `100.100.100.100:53` +- Keep Box rule/table/pref IDs configurable and in a dedicated namespace to avoid collisions with existing local policy routing (for example `2022`, `2024`, `52` already in use on some hosts). + +DNS guidance: + +- When transparent DNS interception is enabled, provide `dns_exclude_servers` and `dns_exclude_domains` settings. +- Default excludes should include Tailscale resolver and tailnet domains (`*.ts.net` and local MagicDNS suffix). + ## Required Tests - each mode on IPv4-only and dual-stack - idempotent enable/renew/disable loops diff --git a/docs/linux-port/05-component-updater-and-assets.md b/docs/linux-port/05-component-updater-and-assets.md index caadce0..5808405 100644 --- a/docs/linux-port/05-component-updater-and-assets.md +++ b/docs/linux-port/05-component-updater-and-assets.md @@ -3,6 +3,15 @@ ## Objective Port `box.tool` update capabilities with stronger integrity and Linux artifact selection. +## Linux-Native Command Surface + +- `boxctl update kernel` +- `boxctl update subs` +- `boxctl update geo` +- `boxctl update dashboard` +- `boxctl update all` +- `boxctl update status [--json]` + ## Baseline Commands to Preserve - `upkernel`, `upkernels` - `subs`, `geox`, `geosub`, `upgeox_all` @@ -22,6 +31,13 @@ Port `box.tool` update capabilities with stronger integrity and Linux artifact s 4. `rollback` - `.bak` restore on failure +Implementation split in the Linux-native tree: +- `lib/updater/resolver.sh` +- `lib/updater/fetcher.sh` +- `lib/updater/verifier.sh` +- `lib/updater/installer.sh` +- `lib/updater/updater.sh` + ## Critical Linux Port Changes - Select Linux binaries for all architectures; remove Android-only downloads. - `upyq` must use Linux yq release assets. @@ -34,6 +50,15 @@ Port `box.tool` update capabilities with stronger integrity and Linux artifact s - Verify non-empty executable and expected file type. - Atomic move into final location. +Current Linux-native behavior: +- `checksum_policy = off|optional|required` +- verification supports literal sha256 or `checksum_file` +- file, archive, and directory payloads are staged before install +- runtime handoff happens only after validation + install succeed +- failed handoff restores the pre-update target from backup +- `kernel` and `geo` support explicit release-resolution mode via GitHub release metadata +- final install is staged in the target directory before the last rename + ## Subscription Pipeline For mihomo: 1. download each source @@ -54,12 +79,80 @@ For sing-box: - Keep external-ui URL override behavior. - Extract archive to temp dir; move only validated payload. +Current Linux-native install kinds: +- `kernel`: file -> executable target +- `subs`: file -> config source target +- `geo`: file -> data target +- `dashboard`: zip/tar archive or directory -> target directory + +## Config Model + +Single file for now: `/etc/box/box.toml` + +```toml +[updater] +artifact_dir = "/var/lib/box/artifacts" +staging_dir = "/var/lib/box/staging" +checksum_policy = "optional" +kernel_interval = "daily" +subs_interval = "hourly" +geo_interval = "daily" +dashboard_interval = "weekly" + +[updater.kernel] +url = "" +file = "" +checksum = "" +checksum_file = "" +target = "" +``` + +Accepted source fields per component: +- `url` or `file` +- optional `checksum` or `checksum_file` +- optional `target` + +Release-resolution fields for `kernel` and `geo`: +- `source = "release"` +- `release_api_url` or `release_repo` +- `release_channel = stable|prerelease|any` +- `release_tag` +- `asset_regex` +- `checksum_asset_regex` +- `release_os` +- `release_arch` +- `archive_member_regex` + +Resolver notes: +- `source = "auto"` does not implicitly enable release downloads +- `kernel` release mode falls back to built-in default repos for the supported cores +- `geo` release mode requires an explicit `release_repo` or `release_api_url` +- `jq` is required when resolving release metadata + +Failure rules: +- no configured source => component update fails safely +- explicit checksum mismatch => install aborted +- download failure => install aborted +- unchanged payload => status becomes `unchanged` and no runtime handoff runs + ## Timed Execution Replace embedded crond logic with systemd timers: - `box-update-subscriptions.timer` - `box-update-geodata.timer` - optional combined `box-update-all.timer` +Implemented units: +- `box-update-kernel.service` + `.timer` +- `box-update-subs.service` + `.timer` +- `box-update-geo.service` + `.timer` +- `box-update-dashboard.service` + `.timer` +- `box-update-all.service` + `.timer` + +Timer notes: +- shipped timers use conservative fixed schedules matching the default config intervals +- if you need different schedules, override the timer units with normal systemd drop-ins +- do not enable per-component timers together with `box-update-all.timer` unless repeated update attempts are acceptable + ## Resolver Strategy by Core 1. Query release metadata (GitHub API). 2. Select tag by policy (`stable`, `prerelease`). @@ -67,6 +160,23 @@ Replace embedded crond logic with systemd timers: 4. Fetch artifact + optional checksum. 5. Validate and install atomically. +Current resolver contract: +- `kernel` + - supports raw binaries, release-selected `.gz` payloads, and release-selected archives + - archive payloads extract the member matching `archive_member_regex` + - default repos: + - `mihomo` -> `MetaCubeX/mihomo` + - `sing-box` -> `SagerNet/sing-box` +- `geo` + - supports raw files and release-selected assets + - archive payloads extract the member matching `archive_member_regex` + - asset naming is intentionally config-driven via `asset_regex` +- `dashboard` + - supports `.zip`, `.tar`, `.tar.gz`, `.tgz`, `.tar.xz`, and pre-unpacked directories + - if an archive expands into a single top-level directory, that directory becomes the installed UI root + - if updater target/url are unset, the resolver can derive them from the current core config + - if the core config does not define a dashboard path, the install target falls back to `./dashboard` relative to that config file + ## Atomic Install Procedure ```bash download -> verify -> unpack(tmp) -> smoke-check(version) -> move(final) -> chmod/chown -> cleanup @@ -84,9 +194,34 @@ download -> verify -> unpack(tmp) -> smoke-check(version) -> move(final) -> chmo - `installed_versions` - `failed_reason` +Current Linux-native status file shape: +- top-level runtime metadata: `timestamp`, `artifact_dir`, `staging_dir`, `checksum_policy`, `core` +- per-component state: + - `configured` + - `status` + - `last_error` + - `last_attempt_ts` + - `last_success_ts` + - `source_ref` + - `target_path` + - `installed_sha256` + - `last_handoff` + - `interval` + ## Subscription Validation Before activation: - parse output format - ensure provider content non-empty - run core-specific config check - only then trigger reload/restart + +Current handoff behavior: +- `sing-box` subscriptions: controlled restart +- `mihomo` subscriptions: controlled restart +- `kernel`: controlled restart only when the updated target is the active running core binary +- `geo`: no forced restart +- `dashboard`: no runtime handoff + +TODO: +- replace restart fallback with real API-driven reloads once core-specific reload endpoints are implemented +- add richer release asset heuristics for common geo providers so fewer installs need explicit regex overrides diff --git a/docs/linux-port/06-component-network-policy-watchers.md b/docs/linux-port/06-component-network-policy-watchers.md index 0be7343..285473f 100644 --- a/docs/linux-port/06-component-network-policy-watchers.md +++ b/docs/linux-port/06-component-network-policy-watchers.md @@ -21,6 +21,16 @@ Use separate daemons or systemd path/network units: 3. optional file-triggered control - if needed for manual disable marker compatibility +Current Linux-native implementation: +- `lib/policy/context.sh` +- `lib/policy/engine.sh` +- `lib/policy/policy.sh` +- `systemd/box-policy.service` + +Implemented control surface: +- `boxctl policy evaluate|enable|disable|status` +- hidden `boxctl policy monitor` action for `systemd` + ## Policy Engine Contract Input: - network status @@ -38,10 +48,26 @@ Action: - Android `cmd wifi status` -> `nmcli`, `iw`, or NetworkManager DBus - interface/IP detection remains via `ip` tooling +## DNS Orchestrator Coexistence (resolved + Tailscale + Proxy TUN) +On Linux hosts like this one, DNS can be simultaneously influenced by: +- `systemd-resolved` link domains and per-link DNS +- Tailscale MagicDNS (`tailscale0`, `100.100.100.100`, `*.ts.net`) +- Proxy TUN DNS default route domains (for example `~.` on a proxy tunnel) + +Policy/watcher implications: +- Do not assume a single DNS authority. +- Detect and log current per-link DNS owners before applying DNS interception. +- Add a `dns_coexist_mode` policy: + - accepted values: `preserve_tailnet|strict_box` + - `preserve_tailnet` (default): do not hijack Tailscale resolver/domain path; preserve MagicDNS/system resolver flow. + - `strict_box`: explicit opt-in for full Box DNS hijack behavior. +- If `preserve_tailnet`, route `*.ts.net`/MagicDNS via system resolver path and bypass proxy DNS interception for Tailscale resolver. + ## Concurrency and Locking - Keep lock directory semantics under `/var/run/box/locks`. - one active policy evaluation at a time. - coalesce rapid network events. +- address-triggered firewall refresh runs as a separate background `firewall renew` worker so policy evaluation does not block on full firewall reapply. ## Baseline Defects to Correct - `ctr.inotify` references undefined `RUN_DIR`. @@ -77,3 +103,24 @@ on_net_event() { 3. fallback `unknown` If SSID data unavailable, policy should default based on `use_module_on_wifi_disconnect`. + +## Status Contract +`boxctl policy status --json` includes: +- `status` +- `policy_enabled` +- `watcher_running` +- `pid` +- `desired_state` +- `applied_state` +- `proxy_mode` +- `debounce_seconds` +- `active_ifaces` +- `wifi_connected` +- `ssid` +- `bssid` +- `disable_marker_present` +- `last_reason` +- `last_error` +- `last_event` +- `last_event_ts` +- `last_refresh_ts` diff --git a/docs/linux-port/07-component-installer-packaging.md b/docs/linux-port/07-component-installer-packaging.md index e156d5f..e74b6c4 100644 --- a/docs/linux-port/07-component-installer-packaging.md +++ b/docs/linux-port/07-component-installer-packaging.md @@ -1,73 +1,83 @@ # 07 - Component: Installer and Packaging ## Objective -Replace Magisk module installation/update/uninstall flow with Linux package/deploy logic. - -## Baseline Installer Functions -Current `box-reference/box/customize.sh` provides: -- install payload -- preserve old configs/binaries -- optional interactive downloads -- set permissions -- install boot hook - -## Linux Packaging Targets -Provide at least one of: -1. Debian package (`.deb`) -2. RPM package (`.rpm`) -3. tarball installer (`install.sh`) fallback - -## Package Contents -- `/usr/lib/box/` scripts -- `/etc/box/` default configs -- `/var/lib/box/` managed artifacts -- `/usr/bin/boxctl` CLI -- systemd unit/timer files - -## Install Script Responsibilities -1. create service user/group (or root mode with capabilities) -2. create directories and ownership -3. install config templates without clobbering local overrides -4. run migration for legacy config keys -5. enable/start selected units - -## Upgrade Strategy -- preserve `/etc/box/*.toml` -- preserve `/var/lib/box/bin/*` if newer than packaged versions -- write migration report at `/var/log/box/migration.log` - -## Uninstall Strategy -Equivalent of `box-reference/box/uninstall.sh` but safe: -- stop and disable all units -- remove generated firewall/routing state -- optionally keep config/data (`--purge` removes all) - -## Non-Interactive First -Do not require interactive key events (volume key flow in `box-reference/box/customize.sh` is Android-only). -Use explicit CLI flags instead: -- `boxctl update --bootstrap` -- `boxctl install --with-core sing-box` - -## Install Transaction Steps -1. precheck root + dependencies -2. create users/groups and directories -3. deploy binaries/scripts -4. install config templates if missing -5. migrate old config if present -6. daemon-reload + enable units -7. optional bootstrap update (`boxctl update kernel`) -8. health check - -On failure, rollback to previous known package state where possible. - -## Post-Install Verification -- `boxctl service status` -- `boxctl firewall status` -- `boxctl update check` (dry-run) -- ensure required dirs exist with expected permissions - -## Package Upgrade Hooks -- `preinst`: stop services safely -- `postinst`: run migration + restart -- `prerm`: disable timers/services -- `postrm`: optional purge + +Deliver Linux-native packaging/install lifecycle for repo-root implementation (`cmd/`, `lib/`, `etc/`, `systemd/`, `tests/`) with safe upgrade and uninstall semantics. + +## Arch Packaging (Implemented) + +Package assets: +- `packaging/arch/PKGBUILD` +- `packaging/arch/box4linux.install` + +Install paths: +- `/usr/bin/boxctl` +- `/usr/lib/box4linux/cmd/boxctl` +- `/usr/lib/box4linux/lib/...` +- `/etc/box/box.toml` +- `/usr/lib/systemd/system/box.service` +- `/usr/lib/systemd/system/box-firewall.service` +- `/usr/share/doc/box4linux/` + +Build/install: +1. `cd packaging/arch` +2. `makepkg --noconfirm -f` +3. `sudo pacman -U ./box4linux-*.pkg.tar.zst` + +## Config Upgrade Behavior + +- `/etc/box/box.toml` is registered as a backup config in PKGBUILD. +- Upgrades do not clobber local edits. +- New upstream defaults are provided as `.pacnew` when necessary. + +## Unit Lifecycle Safety + +Helper script: +- `packaging/scripts/systemd-lifecycle.sh` +- Installed to `/usr/share/doc/box4linux/systemd-lifecycle.sh` + +Safe operations: +- `enable`: daemon-reload, enable `box.service` and `box-firewall.service`, enable `box-policy.service` only when `[policy].enabled = true`, enable `box-update-all.timer` as the default scheduled updater timer, then start service +- `disable`: stop/disable service, firewall, policy, and shipped updater units/timers, then daemon-reload +- `restart`: restart service/policy units without touching config/data + +Pacman hook behavior (`box4linux.install`): +- `post_install`/`post_upgrade`: daemon-reload, operator guidance +- `pre_remove`: best-effort `boxctl policy disable`, `boxctl firewall disable`, `boxctl service stop`, disable all shipped units/timers +- `post_remove`: leave config/data unless manually purged + +## CI/Release Automation + +Workflow: +- `.github/workflows/ci.yml` + +On push/PR: +- shell syntax checks (`bash -n`) +- shellcheck (when available) +- `./tests/integration/test_phase2.sh` +- `sudo ./tests/integration/test_real_kernel.sh` (skip-capable) +- Arch package build in container +- package smoke test (`./tests/integration/test_arch_package_smoke.sh`) + +On tag (`v*`): +- publish built `.pkg.tar.*` artifact to GitHub Release + +## Smoke Validation + +Script: +- `tests/integration/test_arch_package_smoke.sh` + +What it verifies: +1. package can be extracted into a clean temp root +2. installed paths/files exist +3. `boxctl service status --json` works in installed layout +4. `boxctl firewall status --json` works in installed layout +5. `boxctl firewall dry-run` prints planned operations +6. `systemd-analyze verify` runs when available + +## Uninstall / Rollback Notes + +- Default uninstall: `sudo pacman -R box4linux` +- Keeps operator-managed data/config backups by default. +- Explicit purge is manual and should be deliberate: + - `sudo rm -rf /etc/box /var/lib/box /run/box /var/log/box` diff --git a/docs/linux-port/08-delivery-plan-and-risk-register.md b/docs/linux-port/08-delivery-plan-and-risk-register.md index 965d817..a292fbc 100644 --- a/docs/linux-port/08-delivery-plan-and-risk-register.md +++ b/docs/linux-port/08-delivery-plan-and-risk-register.md @@ -38,6 +38,7 @@ 3. cgroup v1/v2 behavior mismatch 4. core config mutation compatibility across core versions 5. remote release asset naming changes +6. DNS ownership conflicts with `systemd-resolved` + Tailscale + proxy-tun `~.` domains ## Known Baseline Issues (from source audit) - undefined `${settings}` writes in `box.service` and `box.iptables` @@ -52,6 +53,11 @@ - Modes: `tun`, `tproxy`, `redirect`, `mixed`, `enhance` - Cores: `mihomo`, `sing-box` (phase 1), then `xray`, `v2fly`, `hysteria` - IPv4-only / dual-stack +- Tailscale present/absent +- DNS stacks: +- plain resolved +- resolved + Tailscale MagicDNS +- resolved + Tailscale + proxy tun default DNS domain (`~.`) ## Suggested Initial Milestone Deliver MVP with: @@ -89,6 +95,7 @@ Then expand to remaining cores/modes. - no Android-only command/path usage - documented upgrade and rollback path - reproducible installation on at least two Linux distributions +- Tailscale reachability and MagicDNS preserved when Box firewall is `enable`, `renew`, and `disable` ## Linux-Native Implementation Checklist Use this checklist during review to keep the port \"native\": diff --git a/docs/linux-port/09-phase2-firewall-supervisor-tests.md b/docs/linux-port/09-phase2-firewall-supervisor-tests.md new file mode 100644 index 0000000..e13ca8b --- /dev/null +++ b/docs/linux-port/09-phase2-firewall-supervisor-tests.md @@ -0,0 +1,142 @@ +# 09 - Linux Firewall/Supervisor Notes + +## Scope Implemented + +- Firewall staged apply/rollback in: + - `lib/firewall/backend_iptables.sh` + - `lib/firewall/backend_nft.sh` +- Modes on both backends: `tun`, `tproxy`, `redirect`, `mixed`, `enhance` +- DNS hijack strategies: `tproxy`, `redirect`, `disable` +- Tailscale coexistence safeguards: + - preserve existing tailscale policy/routing ownership + - never flush non-BOX rules/routes + - bypass `tailscale0`, `100.64.0.0/10`, and resolver `100.100.100.100:53` + - expose tailscale preservation flags in `firewall status --json` +- Core runtime overlays for `mihomo` and `sing-box`: + - `lib/supervisor/mutator_mihomo.sh` + - `lib/supervisor/mutator_sing_box.sh` +- JSON status output: + - `boxctl service status --json` + - `boxctl firewall status --json` +- Dry-run: + - `boxctl firewall dry-run` +- Command tracing: + - `BOX_TRACE_COMMANDS=1 ./cmd/boxctl firewall enable` + - `BOX_TRACE_COMMANDS=1 ./cmd/boxctl service start` +- Integration test harness: + - `tests/integration/test_phase2.sh` + - `tests/integration/test_real_kernel.sh` (privileged netns suite) + +## Firewall Apply Order + +1. Cleanup existing BOX-owned objects. +2. Create base BOX chains and attach table jumps. +3. Apply anti-loop guards. +4. Apply tailscale bypass and DNS exclusions. +5. Apply policy placeholder stage. +6. Apply mode-specific rules. +7. Apply DNS strategy rules. +8. Ensure policy routing for mark-based paths. +9. On any failure: full cleanup rollback. + +## Coexistence Modes + +- `preserve_tailnet` (default): + - apply tailscale bypass for `tailscale0` + - bypass tailnet CIDR `100.64.0.0/10` + - exclude `100.100.100.100:53` from DNS hijack +- `strict_box`: + - skip tailscale bypass and MagicDNS exclusion rule insertion + - still keep non-destructive cleanup boundaries (BOX-owned objects only) + +## Route Pref Convergence + +- Renew/reapply path prunes stale BOX policy rules matching the configured Box fwmark+table regardless of previous `pref`. +- After apply there must be exactly one BOX fwmark rule at the current `route_pref`. +- Tailscale rules (e.g., `fwmark 0x80000/0xff0000` and table `52`) are never targeted. + +## Backend Matrix + +- `iptables`: + - mature path in this repo + - status includes capability and tailscale coexist flags + - `cap_ipv4=true`, `cap_ipv6=false` (no ip6tables graph yet) +- `nftables`: + - MVP parity with iptables modes/DNS/coexist behavior + - cleanup only deletes BOX-owned nft tables (`inet box_mangle`, `ip box_nat`) + - `cap_ipv4=true`, `cap_ipv6=false` (full IPv6 parity still pending) + +## Status Diagnostics Schema + +- `boxctl firewall status --json` includes stable diagnostics: + - `backend` / `backend_selected` + - `backend_available` + - `mode` + - `dns_hijack_mode` + - `dns_coexist_mode` + - `dns_coexist_mode_active` + - `cap_ipv4`, `cap_ipv6`, `cap_tproxy` + - `dry_run_supported` + - `last_error` +- Status path is read-only and must not create firewall objects. +- If `BOX_CONFIG_FILE` is explicitly set but missing, commands fail fast with `E_CONFIG_FILE` (no dev/system fallback). + +## Status JSON Contract + +- `boxctl service status --json` + - required fields: `status`, `core`, `pid`, `mode`, `dns_hijack_mode`, `rendered_config`, `config` + - conditional fields: none + - healthy example: +```json +{"status":"healthy","core":"mihomo","pid":1234,"mode":"mixed","dns_hijack_mode":"redirect","rendered_config":"/run/box/rendered/mihomo/config.yaml","config":"/etc/box/box.toml"} +``` +- `boxctl firewall status --json` + - required fields: `status`, `mode`, `backend`, `backend_selected`, `dns_hijack_mode`, `dns_coexist_mode`, `dns_coexist_mode_active`, `backend_capabilities`, `last_error`, `backend_available`, `cap_tproxy`, `cap_ipv4`, `cap_ipv6`, `dry_run_supported`, `tailscale_bypass_applied`, `tailscale_mark_rule`, `tailscale_table_present`, `chain_mangle`, `chain_nat`, `chain_dns_mangle`, `chain_dns_nat`, `route_rule`, `route_table_installed` + - conditional fields: `error` is emitted only when `last_error` is non-empty + - healthy example: +```json +{"status":"enabled","mode":"mixed","backend":"iptables","backend_selected":"iptables","dns_hijack_mode":"redirect","dns_coexist_mode":"preserve_tailnet","dns_coexist_mode_active":"preserve_tailnet","backend_capabilities":"backend=iptables,available=true,ipv4=true,ipv6=false,tproxy=true,dry_run=true","last_error":"","backend_available":true,"cap_tproxy":true,"cap_ipv4":true,"cap_ipv6":false,"dry_run_supported":true,"tailscale_bypass_applied":true,"tailscale_mark_rule":true,"tailscale_table_present":true,"chain_mangle":true,"chain_nat":true,"chain_dns_mangle":true,"chain_dns_nat":true,"route_rule":true,"route_table_installed":true} +``` + - error example: +```json +{"status":"disabled","mode":"tun","backend":"iptables","backend_selected":"iptables","dns_hijack_mode":"disable","dns_coexist_mode":"preserve_tailnet","dns_coexist_mode_active":"preserve_tailnet","backend_capabilities":"backend=iptables,available=false,ipv4=false,ipv6=false,tproxy=false,dry_run=true","last_error":"iptables inspection unavailable (need root/CAP_NET_ADMIN or kernel support)","backend_available":false,"cap_tproxy":false,"cap_ipv4":false,"cap_ipv6":false,"dry_run_supported":true,"tailscale_bypass_applied":false,"tailscale_mark_rule":false,"tailscale_table_present":false,"chain_mangle":false,"chain_nat":false,"chain_dns_mangle":false,"chain_dns_nat":false,"route_rule":false,"route_table_installed":false,"error":"iptables inspection unavailable (need root/CAP_NET_ADMIN or kernel support)"} +``` + +## Real-Kernel Validation + +- Run with root privileges: + - `sudo ./tests/integration/test_real_kernel.sh` +- Suite behavior: + - creates an isolated network namespace for safety + - runs backend checks for `iptables` and `nftables` when each is usable + - validates `enable|renew|disable` idempotency and BOX artifact cleanup + - verifies tailscale coexistence invariants (fwmark rule + table `52` route) remain intact + - verifies coexistence differences between `preserve_tailnet` and `strict_box` +- Skip semantics: + - exits with `SKIP: ...` when root/CAP_SYS_ADMIN/CAP_NET_ADMIN or backend tools are unavailable. + +## Package Smoke Validation + +- Arch package smoke script: + - `tests/integration/test_arch_package_smoke.sh ` +- Validates packaged install layout and status/dry-run commands without mutating host state: + - `boxctl service status --json` + - `boxctl firewall status --json` + - `boxctl firewall dry-run` +- Verifies unit files exist in package root and runs `systemd-analyze verify` when available. + + +## Overlay Contract + +- Source config files are never edited in place. +- Rendered runtime config path: + - `${BOX_RUN_DIR}/rendered/mihomo/config.yaml` + - `${BOX_RUN_DIR}/rendered/sing-box/config.json` +- Default run dir is `/run/box`, with repo-local fallback via runtime path resolver. + +## Known Gaps + +- UID/GID/interface/MAC policy filters are placeholders. +- nftables IPv6-specific tailnet chain rules are pending. +- Kernel feature probing remains lightweight (tool-level + basic tproxy probe). +- API reload is still TODO for both cores. diff --git a/docs/linux-port/10-steering-prompt-tailscale-dns.md b/docs/linux-port/10-steering-prompt-tailscale-dns.md new file mode 100644 index 0000000..e325905 --- /dev/null +++ b/docs/linux-port/10-steering-prompt-tailscale-dns.md @@ -0,0 +1,41 @@ +# 10 - Steering Prompt (Tailscale + DNS Safe) + +Use this prompt when asking an AI to implement or modify firewall/network logic for this project. + +```text +You are implementing Linux-native Box networking in this repository. + +Hard constraints: +1. Do not modify anything under `box-reference/`; it is reference-only. +2. Native code lives at repo root (`cmd/`, `lib/`, `systemd/`, `tests/`, `docs/`). +3. Keep behavior Linux-native (no Android-only commands/paths in runtime flow). + +Coexistence requirements (must pass): +1. Preserve Tailscale routing and DNS behavior. +2. Never delete/flush non-BOX policy rules or routes. +3. Never alter Tailscale-owned policy routing entries (commonly table `52` and fwmark rules like `0x80000/0xff0000`). +4. Always bypass interception for `tailscale0` traffic. +5. Always bypass tailnet CIDRs: + - IPv4: `100.64.0.0/10` + - IPv6: `fd7a:115c:a1e0::/48` +6. Exclude Tailscale DNS resolver from DNS hijack: + - `100.100.100.100:53` +7. Keep Box rule/table IDs configurable and namespaced. + +DNS behavior constraints: +1. Host may use `systemd-resolved` with per-link DNS and route-only domains. +2. Host may have proxy-tun DNS `~.` on another interface. +3. Implement `dns_coexist_mode` with default `preserve_tailnet`. +4. In `preserve_tailnet`, MagicDNS (`*.ts.net` and local tailnet suffix) must keep working. + +Required tests: +1. Repeated `firewall enable|renew|disable` is idempotent. +2. No leaked BOX-owned rules/routes after disable. +3. Tailscale reachability works before/after firewall operations. +4. MagicDNS resolution works before/after firewall operations. + +When done: +- Summarize changed files. +- Show exact commands used for verification. +- List residual risks and TODOs. +``` diff --git a/docs/linux-port/README.md b/docs/linux-port/README.md index 52f0965..d8a2546 100644 --- a/docs/linux-port/README.md +++ b/docs/linux-port/README.md @@ -14,6 +14,8 @@ All new Linux-native implementation code should live at repository root (`cmd/`, - `06-component-network-policy-watchers.md` - `07-component-installer-packaging.md` - `08-delivery-plan-and-risk-register.md` +- `09-phase2-firewall-supervisor-tests.md` +- `10-steering-prompt-tailscale-dns.md` ## Source Baseline All analysis is based on: diff --git a/etc/box/box.toml b/etc/box/box.toml new file mode 100644 index 0000000..83b1fbf --- /dev/null +++ b/etc/box/box.toml @@ -0,0 +1,95 @@ +[core] +selected = "mihomo" +bin_dir = "/usr/local/bin" +workdir = "/var/lib/box" +config_source = "/etc/box/profiles/config.yaml" + +[network] +mode = "tun" +tproxy_port = 9898 +redir_port = 9797 +dns_port = 1053 +dns_hijack_mode = "tproxy" +dns_coexist_mode = "preserve_tailnet" +tailscale_iface = "tailscale0" +tailnet_ipv4_cidr = "100.64.0.0/10" +tailnet_ipv6_cidr = "fd7a:115c:a1e0::/48" +tailscale_dns_resolver = "100.100.100.100" +tailscale_fwmark = "0x80000/0xff0000" +tailscale_route_table = 52 + +[firewall] +backend = "iptables" +route_table = 2024 +route_pref = 100 +fwmark = "16777216/16777216" + +[policy] +enabled = false +proxy_mode = "core" +debounce_seconds = 3 +use_module_on_wifi_disconnect = false +disable_marker = "/run/box/disable" +allow_ifaces = [] +ignore_ifaces = [] +allow_ssids = [] +ignore_ssids = [] +allow_bssids = [] +ignore_bssids = [] + +[updater] +artifact_dir = "/var/lib/box/artifacts" +staging_dir = "/var/lib/box/staging" +checksum_policy = "optional" +kernel_interval = "daily" +subs_interval = "hourly" +geo_interval = "daily" +dashboard_interval = "weekly" + +[updater.kernel] +source = "auto" +release_api_url = "" +release_repo = "" +release_channel = "stable" +release_tag = "" +asset_regex = "" +checksum_asset_regex = "" +release_os = "linux" +release_arch = "" +archive_member_regex = "" +url = "" +file = "" +checksum = "" +checksum_file = "" +target = "" + +[updater.subs] +url = "" +file = "" +checksum = "" +checksum_file = "" +target = "" + +[updater.geo] +source = "auto" +release_api_url = "" +release_repo = "" +release_channel = "stable" +release_tag = "" +asset_regex = "" +checksum_asset_regex = "" +release_os = "linux" +release_arch = "" +archive_member_regex = "" +url = "" +file = "" +checksum = "" +checksum_file = "" +target = "" + +[updater.dashboard] +url = "" +file = "" +checksum = "" +checksum_file = "" +target = "" diff --git a/lib/common.sh b/lib/common.sh new file mode 100755 index 0000000..342c853 --- /dev/null +++ b/lib/common.sh @@ -0,0 +1,273 @@ +#!/usr/bin/env bash + +# shellcheck shell=bash + +set -euo pipefail + +BOX_LIB_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +BOX_REPO_ROOT="$(cd "${BOX_LIB_DIR}/.." && pwd)" + +BOX_ETC_DIR_DEFAULT="/etc/box" +BOX_VAR_DIR_DEFAULT="/var/lib/box" +BOX_RUN_DIR_DEFAULT="/run/box" +BOX_LOG_DIR_DEFAULT="/var/log/box" + +BOX_DEV_ROOT="${BOX_REPO_ROOT}/.box-dev" +BOX_DEV_VAR_DIR="${BOX_DEV_ROOT}/var" +BOX_DEV_RUN_DIR="${BOX_DEV_ROOT}/run" +BOX_DEV_LOG_DIR="${BOX_DEV_ROOT}/log" + +BOX_VAR_DIR="${BOX_VAR_DIR:-${BOX_VAR_DIR_DEFAULT}}" +BOX_RUN_DIR="${BOX_RUN_DIR:-${BOX_RUN_DIR_DEFAULT}}" +BOX_LOG_DIR="${BOX_LOG_DIR:-${BOX_LOG_DIR_DEFAULT}}" +BOX_LOCK_DIR="${BOX_LOCK_DIR:-${BOX_RUN_DIR}/locks}" +BOX_OUTPUT_FORMAT="${BOX_OUTPUT_FORMAT:-text}" +BOX_LOG_TO_FILE="${BOX_LOG_TO_FILE:-1}" +BOX_TRACE_COMMANDS="${BOX_TRACE_COMMANDS:-0}" + +E_CONFIG=10 +E_CORE_START=20 +E_FIREWALL_APPLY=30 +E_POLICY=40 +E_UPDATE=50 + +resolve_runtime_path() { + local preferred="$1" + local fallback="$2" + local resolved="${preferred}" + + if [[ ! -d "${preferred}" ]]; then + mkdir -p "${preferred}" 2>/dev/null || resolved="${fallback}" + fi + + if [[ "${resolved}" == "${preferred}" && ! -w "${preferred}" ]]; then + resolved="${fallback}" + fi + + mkdir -p "${resolved}" 2>/dev/null || true + printf '%s\n' "${resolved}" +} + +init_runtime_paths() { + BOX_VAR_DIR="$(resolve_runtime_path "${BOX_VAR_DIR}" "${BOX_DEV_VAR_DIR}")" + BOX_RUN_DIR="$(resolve_runtime_path "${BOX_RUN_DIR}" "${BOX_DEV_RUN_DIR}")" + BOX_LOG_DIR="$(resolve_runtime_path "${BOX_LOG_DIR}" "${BOX_DEV_LOG_DIR}")" + BOX_LOCK_DIR="${BOX_RUN_DIR}/locks" + mkdir -p "${BOX_LOCK_DIR}" "${BOX_RUN_DIR}/state" 2>/dev/null || true + export BOX_VAR_DIR BOX_RUN_DIR BOX_LOG_DIR BOX_LOCK_DIR +} + +timestamp_utc() { + date -u +"%Y-%m-%dT%H:%M:%SZ" +} + +log() { + local level="${1:-INFO}" + local component="${2:-main}" + local event_id="${3:-GENERIC}" + local message="${4:-}" + local escaped_message ts log_line log_file + + escaped_message="${message//\\/\\\\}" + escaped_message="${escaped_message//\"/\\\"}" + escaped_message="${escaped_message//$'\n'/\\n}" + escaped_message="${escaped_message//$'\r'/\\r}" + escaped_message="${escaped_message//$'\t'/\\t}" + + ts="$(timestamp_utc)" + log_line="ts=${ts} level=${level} component=${component} event_id=${event_id} msg=\"${escaped_message}\"" + printf '%s\n' "${log_line}" >&2 + if [[ "${BOX_LOG_TO_FILE}" == "1" ]]; then + init_runtime_paths + log_file="${BOX_LOG_DIR}/${component}.log" + printf '%s\n' "${log_line}" >>"${log_file}" 2>/dev/null || true + fi +} + +require_cmd() { + local cmd="${1:?missing command name}" + if ! command -v "${cmd}" >/dev/null 2>&1; then + log "ERROR" "common" "E_MISSING_CMD" "required command not found: ${cmd}" + return 127 + fi +} + +require_root() { + if [[ "${BOX_UNSAFE_SKIP_ROOT_CHECK:-0}" == "1" ]]; then + return 0 + fi + if [[ "${EUID}" -ne 0 ]]; then + log "ERROR" "common" "E_ROOT_REQUIRED" "this action requires root privileges" + return 1 + fi +} + +json_escape() { + local raw="${1:-}" + raw="${raw//\\/\\\\}" + raw="${raw//\"/\\\"}" + raw="${raw//$'\n'/\\n}" + raw="${raw//$'\r'/\\r}" + raw="${raw//$'\t'/\\t}" + printf '%s' "${raw}" +} + +json_pair() { + local key="${1:?missing key}" + local value="${2:-}" + printf '"%s":"%s"' "$(json_escape "${key}")" "$(json_escape "${value}")" +} + +json_num_pair() { + local key="${1:?missing key}" + local value="${2:-0}" + printf '"%s":%s' "$(json_escape "${key}")" "${value}" +} + +json_bool_pair() { + local key="${1:?missing key}" + local value="${2:-false}" + if [[ "${value}" == "true" || "${value}" == "1" ]]; then + printf '"%s":true' "$(json_escape "${key}")" + else + printf '"%s":false' "$(json_escape "${key}")" + fi +} + +trace_cmd() { + local component="${1:-trace}" + shift || true + if [[ "${BOX_TRACE_COMMANDS}" == "1" ]]; then + local cmd + printf -v cmd '%q ' "$@" + log "DEBUG" "${component}" "TRACE_CMD" "action=${BOX_TRACE_ACTION:-unknown} cmd=${cmd% }" + fi +} + +trace_external_command() { + local raw="${1:-}" + local trimmed token kind unresolved_cmd=0 + + [[ -n "${raw}" ]] || return 0 + + trimmed="${raw#"${raw%%[![:space:]]*}"}" + case "${trimmed}" in + [A-Za-z_]*=*) return 0 ;; + esac + while [[ -n "${trimmed}" ]]; do + token="${trimmed%%[[:space:]]*}" + if [[ "${token}" == *=* ]]; then + trimmed="${trimmed#"${token}"}" + trimmed="${trimmed#"${trimmed%%[![:space:]]*}"}" + continue + fi + break + done + + token="${trimmed%%[[:space:];|&]*}" + [[ -n "${token}" ]] || return 0 + case "${token}" in + awk|date|mkdir) + return 0 + ;; + esac + + case "${token}" in + \$*|\"\$*|\'\$*) + unresolved_cmd=1 + ;; + esac + if [[ "${unresolved_cmd}" == "0" ]]; then + kind="$(type -t -- "${token}" 2>/dev/null || true)" + [[ "${kind}" == "file" ]] || return 0 + fi + + log "DEBUG" "${BOX_TRACE_COMPONENT:-trace}" "TRACE_CMD" \ + "action=${BOX_TRACE_ACTION:-unknown} cmd=${raw}" +} + +enable_command_trace() { + local component="${1:-trace}" + local action="${2:-unknown}" + if [[ "${BOX_TRACE_COMMANDS}" != "1" ]]; then + return 0 + fi + BOX_TRACE_COMPONENT="${component}" + BOX_TRACE_ACTION="${action}" + export BOX_TRACE_COMPONENT BOX_TRACE_ACTION + if shopt -qo functrace; then + BOX_TRACE_FUNCTRACE_RESTORE="keep" + else + BOX_TRACE_FUNCTRACE_RESTORE="unset" + set -o functrace + fi + export BOX_TRACE_FUNCTRACE_RESTORE + trap 'if [[ "${_BOX_TRACE_GUARD:-0}" == "0" ]]; then _BOX_TRACE_GUARD=1; trace_external_command "${BASH_COMMAND:-}"; _BOX_TRACE_GUARD=0; fi' DEBUG +} + +disable_command_trace() { + if [[ "${BOX_TRACE_COMMANDS}" != "1" ]]; then + return 0 + fi + trap - DEBUG + if [[ "${BOX_TRACE_FUNCTRACE_RESTORE:-keep}" == "unset" ]]; then + set +o functrace + fi + unset BOX_TRACE_COMPONENT BOX_TRACE_ACTION + unset BOX_TRACE_FUNCTRACE_RESTORE +} + +lock_path_for() { + local name="${1:?missing lock name}" + init_runtime_paths + printf '%s/%s.lock\n' "${BOX_LOCK_DIR}" "${name}" +} + +with_lock() { + local lock_name="${1:?missing lock name}" + local timeout_sec="${2:?missing lock timeout}" + shift 2 + local lock_file fd rc + + lock_file="$(lock_path_for "${lock_name}")" + exec {fd}>"${lock_file}" + if ! flock -w "${timeout_sec}" "${fd}"; then + log "ERROR" "common" "E_LOCK_TIMEOUT" "lock timeout: ${lock_name}" + exec {fd}>&- + return 1 + fi + + "$@" + rc=$? + flock -u "${fd}" || true + exec {fd}>&- + return "${rc}" +} + +try_lock() { + local lock_name="${1:?missing lock name}" + local timeout_sec="${2:-0}" + local lock_file fd + + lock_file="$(lock_path_for "${lock_name}")" + exec {fd}>"${lock_file}" + if ! flock -w "${timeout_sec}" "${fd}"; then + exec {fd}>&- + return 1 + fi + flock -u "${fd}" || true + exec {fd}>&- + return 0 +} + +is_pid_alive() { + local pid="${1:-}" + [[ -n "${pid}" ]] && kill -0 "${pid}" 2>/dev/null +} + +read_pid_file() { + local pid_file="${1:?missing pid file}" + if [[ -f "${pid_file}" ]]; then + tr -d '[:space:]' <"${pid_file}" + fi +} diff --git a/lib/config.sh b/lib/config.sh new file mode 100755 index 0000000..df3a03b --- /dev/null +++ b/lib/config.sh @@ -0,0 +1,683 @@ +#!/usr/bin/env bash + +# shellcheck shell=bash + +set -euo pipefail + +BOX_CONFIG_FILE="${BOX_CONFIG_FILE:-}" +BOX_CONFIG_SOURCE="${BOX_CONFIG_SOURCE:-}" + +# Runtime config values; initialized via config_defaults(). +BOX_CORE="" +BOX_NETWORK_MODE="" +BOX_TPROXY_PORT="" +BOX_REDIR_PORT="" +BOX_DNS_PORT="" +BOX_DNS_HIJACK_MODE="" +BOX_DNS_COEXIST_MODE="" +BOX_TAILSCALE_IFACE="" +BOX_TAILNET_IPV4_CIDR="" +BOX_TAILNET_IPV6_CIDR="" +BOX_TAILSCALE_DNS_RESOLVER="" +BOX_TAILSCALE_FWMARK="" +BOX_TAILSCALE_ROUTE_TABLE="" + +BOX_FIREWALL_BACKEND="" +BOX_ROUTE_TABLE="" +BOX_ROUTE_PREF="" +BOX_FWMARK="" + +BOX_POLICY_ENABLED="" +BOX_POLICY_PROXY_MODE="" +BOX_POLICY_DEBOUNCE_SECONDS="" +BOX_POLICY_USE_MODULE_ON_WIFI_DISCONNECT="" +BOX_POLICY_DISABLE_MARKER="" +declare -a BOX_POLICY_ALLOW_IFACES=() +declare -a BOX_POLICY_IGNORE_IFACES=() +declare -a BOX_POLICY_ALLOW_SSIDS=() +declare -a BOX_POLICY_IGNORE_SSIDS=() +declare -a BOX_POLICY_ALLOW_BSSIDS=() +declare -a BOX_POLICY_IGNORE_BSSIDS=() + +BOX_CORE_BIN_DIR="" +BOX_CORE_WORKDIR="" +BOX_CORE_CONFIG_SOURCE="" + +BOX_UPDATER_ARTIFACT_DIR="" +BOX_UPDATER_STAGING_DIR="" +BOX_UPDATER_CHECKSUM_POLICY="" +BOX_UPDATER_KERNEL_INTERVAL="" +BOX_UPDATER_SUBS_INTERVAL="" +BOX_UPDATER_GEO_INTERVAL="" +BOX_UPDATER_DASHBOARD_INTERVAL="" +BOX_UPDATER_KERNEL_URL="" +BOX_UPDATER_KERNEL_FILE="" +BOX_UPDATER_KERNEL_SOURCE="" +BOX_UPDATER_KERNEL_RELEASE_API_URL="" +BOX_UPDATER_KERNEL_RELEASE_REPO="" +BOX_UPDATER_KERNEL_RELEASE_CHANNEL="" +BOX_UPDATER_KERNEL_RELEASE_TAG="" +BOX_UPDATER_KERNEL_ASSET_REGEX="" +BOX_UPDATER_KERNEL_CHECKSUM_ASSET_REGEX="" +BOX_UPDATER_KERNEL_RELEASE_OS="" +BOX_UPDATER_KERNEL_RELEASE_ARCH="" +BOX_UPDATER_KERNEL_ARCHIVE_MEMBER_REGEX="" +BOX_UPDATER_KERNEL_CHECKSUM="" +BOX_UPDATER_KERNEL_CHECKSUM_FILE="" +BOX_UPDATER_KERNEL_TARGET="" +BOX_UPDATER_SUBS_URL="" +BOX_UPDATER_SUBS_FILE="" +BOX_UPDATER_SUBS_CHECKSUM="" +BOX_UPDATER_SUBS_CHECKSUM_FILE="" +BOX_UPDATER_SUBS_TARGET="" +BOX_UPDATER_GEO_URL="" +BOX_UPDATER_GEO_FILE="" +BOX_UPDATER_GEO_SOURCE="" +BOX_UPDATER_GEO_RELEASE_API_URL="" +BOX_UPDATER_GEO_RELEASE_REPO="" +BOX_UPDATER_GEO_RELEASE_CHANNEL="" +BOX_UPDATER_GEO_RELEASE_TAG="" +BOX_UPDATER_GEO_ASSET_REGEX="" +BOX_UPDATER_GEO_CHECKSUM_ASSET_REGEX="" +BOX_UPDATER_GEO_RELEASE_OS="" +BOX_UPDATER_GEO_RELEASE_ARCH="" +BOX_UPDATER_GEO_ARCHIVE_MEMBER_REGEX="" +BOX_UPDATER_GEO_CHECKSUM="" +BOX_UPDATER_GEO_CHECKSUM_FILE="" +BOX_UPDATER_GEO_TARGET="" +BOX_UPDATER_DASHBOARD_URL="" +BOX_UPDATER_DASHBOARD_FILE="" +BOX_UPDATER_DASHBOARD_CHECKSUM="" +BOX_UPDATER_DASHBOARD_CHECKSUM_FILE="" +BOX_UPDATER_DASHBOARD_TARGET="" + +config_defaults() { + BOX_CORE="mihomo" + BOX_NETWORK_MODE="tun" + BOX_TPROXY_PORT="9898" + BOX_REDIR_PORT="9797" + BOX_DNS_PORT="1053" + BOX_DNS_HIJACK_MODE="tproxy" + BOX_DNS_COEXIST_MODE="preserve_tailnet" + BOX_TAILSCALE_IFACE="tailscale0" + BOX_TAILNET_IPV4_CIDR="100.64.0.0/10" + BOX_TAILNET_IPV6_CIDR="fd7a:115c:a1e0::/48" + BOX_TAILSCALE_DNS_RESOLVER="100.100.100.100" + BOX_TAILSCALE_FWMARK="0x80000/0xff0000" + BOX_TAILSCALE_ROUTE_TABLE="52" + BOX_FIREWALL_BACKEND="iptables" + BOX_ROUTE_TABLE="2024" + BOX_ROUTE_PREF="100" + BOX_FWMARK="16777216/16777216" + BOX_POLICY_ENABLED="false" + BOX_POLICY_PROXY_MODE="core" + BOX_POLICY_DEBOUNCE_SECONDS="3" + BOX_POLICY_USE_MODULE_ON_WIFI_DISCONNECT="false" + BOX_POLICY_DISABLE_MARKER="${BOX_RUN_DIR}/disable" + BOX_POLICY_ALLOW_IFACES=() + BOX_POLICY_IGNORE_IFACES=() + BOX_POLICY_ALLOW_SSIDS=() + BOX_POLICY_IGNORE_SSIDS=() + BOX_POLICY_ALLOW_BSSIDS=() + BOX_POLICY_IGNORE_BSSIDS=() + BOX_CORE_BIN_DIR="/usr/local/bin" + BOX_CORE_WORKDIR="${BOX_VAR_DIR_DEFAULT}" + BOX_CORE_CONFIG_SOURCE="/etc/box/profiles/config.yaml" + BOX_UPDATER_ARTIFACT_DIR="${BOX_VAR_DIR_DEFAULT}/artifacts" + BOX_UPDATER_STAGING_DIR="${BOX_VAR_DIR_DEFAULT}/staging" + BOX_UPDATER_CHECKSUM_POLICY="optional" + BOX_UPDATER_KERNEL_INTERVAL="daily" + BOX_UPDATER_SUBS_INTERVAL="hourly" + BOX_UPDATER_GEO_INTERVAL="daily" + BOX_UPDATER_DASHBOARD_INTERVAL="weekly" + BOX_UPDATER_KERNEL_URL="" + BOX_UPDATER_KERNEL_FILE="" + BOX_UPDATER_KERNEL_SOURCE="auto" + BOX_UPDATER_KERNEL_RELEASE_API_URL="" + BOX_UPDATER_KERNEL_RELEASE_REPO="" + BOX_UPDATER_KERNEL_RELEASE_CHANNEL="stable" + BOX_UPDATER_KERNEL_RELEASE_TAG="" + BOX_UPDATER_KERNEL_ASSET_REGEX="" + BOX_UPDATER_KERNEL_CHECKSUM_ASSET_REGEX="" + BOX_UPDATER_KERNEL_RELEASE_OS="linux" + BOX_UPDATER_KERNEL_RELEASE_ARCH="" + BOX_UPDATER_KERNEL_ARCHIVE_MEMBER_REGEX="" + BOX_UPDATER_KERNEL_CHECKSUM="" + BOX_UPDATER_KERNEL_CHECKSUM_FILE="" + BOX_UPDATER_KERNEL_TARGET="" + BOX_UPDATER_SUBS_URL="" + BOX_UPDATER_SUBS_FILE="" + BOX_UPDATER_SUBS_CHECKSUM="" + BOX_UPDATER_SUBS_CHECKSUM_FILE="" + BOX_UPDATER_SUBS_TARGET="" + BOX_UPDATER_GEO_URL="" + BOX_UPDATER_GEO_FILE="" + BOX_UPDATER_GEO_SOURCE="auto" + BOX_UPDATER_GEO_RELEASE_API_URL="" + BOX_UPDATER_GEO_RELEASE_REPO="" + BOX_UPDATER_GEO_RELEASE_CHANNEL="stable" + BOX_UPDATER_GEO_RELEASE_TAG="" + BOX_UPDATER_GEO_ASSET_REGEX="" + BOX_UPDATER_GEO_CHECKSUM_ASSET_REGEX="" + BOX_UPDATER_GEO_RELEASE_OS="linux" + BOX_UPDATER_GEO_RELEASE_ARCH="" + BOX_UPDATER_GEO_ARCHIVE_MEMBER_REGEX="" + BOX_UPDATER_GEO_CHECKSUM="" + BOX_UPDATER_GEO_CHECKSUM_FILE="" + BOX_UPDATER_GEO_TARGET="" + BOX_UPDATER_DASHBOARD_URL="" + BOX_UPDATER_DASHBOARD_FILE="" + BOX_UPDATER_DASHBOARD_CHECKSUM="" + BOX_UPDATER_DASHBOARD_CHECKSUM_FILE="" + BOX_UPDATER_DASHBOARD_TARGET="" +} + +# Keep sourced-state deterministic even before load_config is called. +config_defaults + +trim_space() { + local value="${1:-}" + value="${value#"${value%%[![:space:]]*}"}" + value="${value%"${value##*[![:space:]]}"}" + printf '%s' "${value}" +} + +strip_inline_comment() { + local value="${1:-}" + local trimmed first_char + trimmed="${value#"${value%%[![:space:]]*}"}" + first_char="${trimmed:0:1}" + if [[ "${first_char}" == "\"" || "${first_char}" == "'" ]]; then + printf '%s' "${value}" + return + fi + value="${value%%#*}" + printf '%s' "${value}" +} + +toml_value() { + local file="${1:?missing toml file}" + local section="${2:?missing section}" + local key="${3:?missing key}" + awk -v target_section="${section}" -v target_key="${key}" ' + BEGIN { section = "" } + /^[[:space:]]*#/ { next } + /^[[:space:]]*$/ { next } + /^[[:space:]]*\[/ { + line = $0 + gsub(/^[[:space:]]*\[/, "", line) + gsub(/\][[:space:]]*$/, "", line) + gsub(/[[:space:]]/, "", line) + section = line + next + } + section == target_section { + line = $0 + if (line ~ "^[[:space:]]*" target_key "[[:space:]]*=") { + sub(/^[^=]*=/, "", line) + print line + exit + } + } + ' "${file}" +} + +normalize_toml_scalar() { + local value + value="$(trim_space "$(strip_inline_comment "${1:-}")")" + if [[ "${value}" =~ ^\"(.*)\"$ ]]; then + printf '%s' "${BASH_REMATCH[1]}" + return + fi + if [[ "${value}" =~ ^\'(.*)\'$ ]]; then + printf '%s' "${BASH_REMATCH[1]}" + return + fi + printf '%s' "${value}" +} + +config_read_array() { + local file="${1:?missing file}" + local section="${2:?missing section}" + local key="${3:?missing key}" + local raw inner token + local -a values=() + + raw="$(toml_value "${file}" "${section}" "${key}" || true)" + if [[ -z "${raw}" ]]; then + return 1 + fi + + raw="$(trim_space "$(strip_inline_comment "${raw}")")" + [[ "${raw}" == \[*\] ]] || return 1 + inner="${raw#[}" + inner="${inner%]}" + inner="${inner//$'\n'/ }" + inner="${inner//$'\r'/ }" + + while IFS= read -r token; do + token="$(trim_space "${token}")" + [[ -n "${token}" ]] || continue + if [[ "${token}" =~ ^\"(.*)\"$ ]]; then + values+=("${BASH_REMATCH[1]}") + elif [[ "${token}" =~ ^\'(.*)\'$ ]]; then + values+=("${BASH_REMATCH[1]}") + else + values+=("${token}") + fi + done < <(printf '%s\n' "${inner}" | awk ' + BEGIN { in_quote = 0; quote = ""; token = "" } + { + line = $0 + for (i = 1; i <= length(line); i++) { + ch = substr(line, i, 1) + if ((ch == "\"" || ch == "'\''")) { + if (in_quote == 0) { + in_quote = 1 + quote = ch + } else if (quote == ch) { + in_quote = 0 + quote = "" + } + token = token ch + continue + } + if (ch == "," && in_quote == 0) { + print token + token = "" + continue + } + token = token ch + } + if (in_quote == 0 && length(token) > 0) { + print token + token = "" + } + } + END { + if (length(token) > 0) { + print token + } + } + ') + + printf '%s\n' "${values[@]}" +} + +config_read_value() { + local file="${1:?missing file}" + local section="${2:?missing section}" + local key="${3:?missing key}" + local raw + raw="$(toml_value "${file}" "${section}" "${key}" || true)" + if [[ -z "${raw}" ]]; then + return 1 + fi + normalize_toml_scalar "${raw}" +} + +config_detect_file() { + local explicit_cfg="${BOX_CONFIG_FILE:-}" + local system_cfg="${BOX_ETC_DIR_DEFAULT}/box.toml" + local dev_cfg="${BOX_REPO_ROOT}/etc/box/box.toml" + + if [[ -n "${explicit_cfg}" ]]; then + if [[ -f "${explicit_cfg}" ]]; then + BOX_CONFIG_FILE="${explicit_cfg}" + BOX_CONFIG_SOURCE="explicit" + return 0 + fi + BOX_CONFIG_SOURCE="explicit-missing" + log "ERROR" "config" "E_CONFIG_FILE" "explicit BOX_CONFIG_FILE does not exist: ${explicit_cfg}" + return "${E_CONFIG}" + fi + + if [[ -f "${system_cfg}" ]]; then + BOX_CONFIG_FILE="${system_cfg}" + BOX_CONFIG_SOURCE="system" + return 0 + fi + + if [[ -f "${dev_cfg}" ]]; then + BOX_CONFIG_FILE="${dev_cfg}" + BOX_CONFIG_SOURCE="dev-fallback" + return 0 + fi + + BOX_CONFIG_FILE="" + BOX_CONFIG_SOURCE="defaults-only" + return 1 +} + +validate_port() { + local value="${1:-}" + [[ "${value}" =~ ^[0-9]+$ ]] && (( value >= 1 && value <= 65535 )) +} + +validate_uint() { + local value="${1:-}" + [[ "${value}" =~ ^[0-9]+$ ]] +} + +validate_bool_string() { + case "${1:-}" in + true|false|1|0) return 0 ;; + *) return 1 ;; + esac +} + +validate_config() { + case "${BOX_CORE}" in + mihomo|sing-box) ;; + *) + log "ERROR" "config" "E_CONFIG_CORE" "unsupported core: ${BOX_CORE}" + return "${E_CONFIG}" + ;; + esac + + case "${BOX_NETWORK_MODE}" in + tun|tproxy|redirect|mixed|enhance) ;; + *) + log "ERROR" "config" "E_CONFIG_MODE" "invalid network mode: ${BOX_NETWORK_MODE}" + return "${E_CONFIG}" + ;; + esac + + if ! validate_port "${BOX_TPROXY_PORT}"; then + log "ERROR" "config" "E_CONFIG_PORT" "invalid tproxy port: ${BOX_TPROXY_PORT}" + return "${E_CONFIG}" + fi + if ! validate_port "${BOX_REDIR_PORT}"; then + log "ERROR" "config" "E_CONFIG_PORT" "invalid redirect port: ${BOX_REDIR_PORT}" + return "${E_CONFIG}" + fi + if ! validate_port "${BOX_DNS_PORT}"; then + log "ERROR" "config" "E_CONFIG_PORT" "invalid dns port: ${BOX_DNS_PORT}" + return "${E_CONFIG}" + fi + + case "${BOX_DNS_HIJACK_MODE}" in + tproxy|redirect|disable) ;; + *) + log "ERROR" "config" "E_CONFIG_DNS_MODE" "invalid dns_hijack_mode: ${BOX_DNS_HIJACK_MODE}" + return "${E_CONFIG}" + ;; + esac + + case "${BOX_DNS_COEXIST_MODE}" in + preserve_tailnet|strict_box) ;; + *) + log "ERROR" "config" "E_CONFIG_DNS_COEXIST" "invalid dns_coexist_mode: ${BOX_DNS_COEXIST_MODE}" + return "${E_CONFIG}" + ;; + esac + + if ! validate_uint "${BOX_ROUTE_TABLE}"; then + log "ERROR" "config" "E_CONFIG_ROUTE_TABLE" "route_table must be numeric: ${BOX_ROUTE_TABLE}" + return "${E_CONFIG}" + fi + if ! validate_uint "${BOX_TAILSCALE_ROUTE_TABLE}"; then + log "ERROR" "config" "E_CONFIG_TAILSCALE_ROUTE_TABLE" \ + "tailscale_route_table must be numeric: ${BOX_TAILSCALE_ROUTE_TABLE}" + return "${E_CONFIG}" + fi + if ! validate_uint "${BOX_ROUTE_PREF}"; then + log "ERROR" "config" "E_CONFIG_ROUTE_PREF" "route_pref must be numeric: ${BOX_ROUTE_PREF}" + return "${E_CONFIG}" + fi + + if [[ "${BOX_ROUTE_TABLE}" == "${BOX_TAILSCALE_ROUTE_TABLE}" ]]; then + log "ERROR" "config" "E_CONFIG_ROUTE_TABLE" \ + "box route_table (${BOX_ROUTE_TABLE}) must differ from tailscale_route_table (${BOX_TAILSCALE_ROUTE_TABLE})" + return "${E_CONFIG}" + fi + + if [[ "${BOX_FWMARK}" == "${BOX_TAILSCALE_FWMARK}" ]]; then + log "ERROR" "config" "E_CONFIG_FWMARK" \ + "box fwmark (${BOX_FWMARK}) must differ from tailscale_fwmark (${BOX_TAILSCALE_FWMARK})" + return "${E_CONFIG}" + fi + + if [[ -z "${BOX_TAILSCALE_IFACE}" || -z "${BOX_TAILNET_IPV4_CIDR}" || -z "${BOX_TAILSCALE_DNS_RESOLVER}" ]]; then + log "ERROR" "config" "E_CONFIG_TAILSCALE" "tailscale coexist fields must not be empty" + return "${E_CONFIG}" + fi + + case "${BOX_FIREWALL_BACKEND}" in + iptables|nftables) ;; + *) + log "ERROR" "config" "E_CONFIG_FW_BACKEND" "unsupported firewall backend: ${BOX_FIREWALL_BACKEND}" + return "${E_CONFIG}" + ;; + esac + + if ! validate_bool_string "${BOX_POLICY_ENABLED}"; then + log "ERROR" "config" "E_CONFIG_POLICY_ENABLED" "policy.enabled must be true|false|1|0: ${BOX_POLICY_ENABLED}" + return "${E_CONFIG}" + fi + + case "${BOX_POLICY_PROXY_MODE}" in + core|whitelist|blacklist) ;; + *) + log "ERROR" "config" "E_CONFIG_POLICY_MODE" "unsupported policy.proxy_mode: ${BOX_POLICY_PROXY_MODE}" + return "${E_CONFIG}" + ;; + esac + + if ! validate_uint "${BOX_POLICY_DEBOUNCE_SECONDS}"; then + log "ERROR" "config" "E_CONFIG_POLICY_DEBOUNCE" "policy.debounce_seconds must be numeric: ${BOX_POLICY_DEBOUNCE_SECONDS}" + return "${E_CONFIG}" + fi + + if ! validate_bool_string "${BOX_POLICY_USE_MODULE_ON_WIFI_DISCONNECT}"; then + log "ERROR" "config" "E_CONFIG_POLICY_DISCONNECT" \ + "policy.use_module_on_wifi_disconnect must be true|false|1|0: ${BOX_POLICY_USE_MODULE_ON_WIFI_DISCONNECT}" + return "${E_CONFIG}" + fi + + case "${BOX_UPDATER_CHECKSUM_POLICY}" in + off|optional|required) ;; + *) + log "ERROR" "config" "E_CONFIG_UPDATER_CHECKSUM" \ + "unsupported updater checksum_policy: ${BOX_UPDATER_CHECKSUM_POLICY}" + return "${E_CONFIG}" + ;; + esac + + if [[ -z "${BOX_UPDATER_ARTIFACT_DIR}" || -z "${BOX_UPDATER_STAGING_DIR}" ]]; then + log "ERROR" "config" "E_CONFIG_UPDATER_DIR" "updater artifact_dir and staging_dir must not be empty" + return "${E_CONFIG}" + fi + + case "${BOX_UPDATER_KERNEL_SOURCE}" in + auto|file|url|release) ;; + *) + log "ERROR" "config" "E_CONFIG_UPDATER_KERNEL_SOURCE" \ + "unsupported updater.kernel.source: ${BOX_UPDATER_KERNEL_SOURCE}" + return "${E_CONFIG}" + ;; + esac + + case "${BOX_UPDATER_GEO_SOURCE}" in + auto|file|url|release) ;; + *) + log "ERROR" "config" "E_CONFIG_UPDATER_GEO_SOURCE" \ + "unsupported updater.geo.source: ${BOX_UPDATER_GEO_SOURCE}" + return "${E_CONFIG}" + ;; + esac + + case "${BOX_UPDATER_KERNEL_RELEASE_CHANNEL}" in + stable|prerelease|any) ;; + *) + log "ERROR" "config" "E_CONFIG_UPDATER_KERNEL_CHANNEL" \ + "unsupported updater.kernel.release_channel: ${BOX_UPDATER_KERNEL_RELEASE_CHANNEL}" + return "${E_CONFIG}" + ;; + esac + + case "${BOX_UPDATER_GEO_RELEASE_CHANNEL}" in + stable|prerelease|any) ;; + *) + log "ERROR" "config" "E_CONFIG_UPDATER_GEO_CHANNEL" \ + "unsupported updater.geo.release_channel: ${BOX_UPDATER_GEO_RELEASE_CHANNEL}" + return "${E_CONFIG}" + ;; + esac + + if [[ "${BOX_UPDATER_GEO_SOURCE}" == "release" && + -z "${BOX_UPDATER_GEO_RELEASE_REPO}" && + -z "${BOX_UPDATER_GEO_RELEASE_API_URL}" ]]; then + log "ERROR" "config" "E_CONFIG_UPDATER_GEO_RELEASE" \ + "updater.geo release source requires release_repo or release_api_url" + return "${E_CONFIG}" + fi +} + +load_config() { + config_defaults + + if ! config_detect_file; then + if [[ "${BOX_CONFIG_SOURCE}" == "explicit-missing" ]]; then + return "${E_CONFIG}" + fi + log "WARN" "config" "W_CONFIG_DEFAULTS" "no box.toml found; using defaults" + validate_config + export BOX_CONFIG_FILE BOX_CONFIG_SOURCE + export BOX_CORE BOX_NETWORK_MODE BOX_TPROXY_PORT BOX_REDIR_PORT BOX_DNS_PORT BOX_DNS_HIJACK_MODE BOX_DNS_COEXIST_MODE + export BOX_TAILSCALE_IFACE BOX_TAILNET_IPV4_CIDR BOX_TAILNET_IPV6_CIDR BOX_TAILSCALE_DNS_RESOLVER BOX_TAILSCALE_FWMARK BOX_TAILSCALE_ROUTE_TABLE + export BOX_FIREWALL_BACKEND BOX_ROUTE_TABLE BOX_ROUTE_PREF BOX_FWMARK + export BOX_POLICY_ENABLED BOX_POLICY_PROXY_MODE BOX_POLICY_DEBOUNCE_SECONDS BOX_POLICY_USE_MODULE_ON_WIFI_DISCONNECT BOX_POLICY_DISABLE_MARKER + export BOX_CORE_BIN_DIR BOX_CORE_WORKDIR BOX_CORE_CONFIG_SOURCE + export BOX_UPDATER_ARTIFACT_DIR BOX_UPDATER_STAGING_DIR BOX_UPDATER_CHECKSUM_POLICY + export BOX_UPDATER_KERNEL_INTERVAL BOX_UPDATER_SUBS_INTERVAL BOX_UPDATER_GEO_INTERVAL BOX_UPDATER_DASHBOARD_INTERVAL + export BOX_UPDATER_KERNEL_URL BOX_UPDATER_KERNEL_FILE BOX_UPDATER_KERNEL_SOURCE BOX_UPDATER_KERNEL_RELEASE_API_URL + export BOX_UPDATER_KERNEL_RELEASE_REPO BOX_UPDATER_KERNEL_RELEASE_CHANNEL BOX_UPDATER_KERNEL_RELEASE_TAG + export BOX_UPDATER_KERNEL_ASSET_REGEX BOX_UPDATER_KERNEL_CHECKSUM_ASSET_REGEX BOX_UPDATER_KERNEL_RELEASE_OS BOX_UPDATER_KERNEL_RELEASE_ARCH + export BOX_UPDATER_KERNEL_ARCHIVE_MEMBER_REGEX BOX_UPDATER_KERNEL_CHECKSUM BOX_UPDATER_KERNEL_CHECKSUM_FILE BOX_UPDATER_KERNEL_TARGET + export BOX_UPDATER_SUBS_URL BOX_UPDATER_SUBS_FILE BOX_UPDATER_SUBS_CHECKSUM BOX_UPDATER_SUBS_CHECKSUM_FILE BOX_UPDATER_SUBS_TARGET + export BOX_UPDATER_GEO_URL BOX_UPDATER_GEO_FILE BOX_UPDATER_GEO_SOURCE BOX_UPDATER_GEO_RELEASE_API_URL + export BOX_UPDATER_GEO_RELEASE_REPO BOX_UPDATER_GEO_RELEASE_CHANNEL BOX_UPDATER_GEO_RELEASE_TAG + export BOX_UPDATER_GEO_ASSET_REGEX BOX_UPDATER_GEO_CHECKSUM_ASSET_REGEX BOX_UPDATER_GEO_RELEASE_OS BOX_UPDATER_GEO_RELEASE_ARCH + export BOX_UPDATER_GEO_ARCHIVE_MEMBER_REGEX BOX_UPDATER_GEO_CHECKSUM BOX_UPDATER_GEO_CHECKSUM_FILE BOX_UPDATER_GEO_TARGET + export BOX_UPDATER_DASHBOARD_URL BOX_UPDATER_DASHBOARD_FILE BOX_UPDATER_DASHBOARD_CHECKSUM BOX_UPDATER_DASHBOARD_CHECKSUM_FILE BOX_UPDATER_DASHBOARD_TARGET + return 0 + fi + + log "INFO" "config" "CONFIG_SOURCE" "loaded ${BOX_CONFIG_SOURCE} config from ${BOX_CONFIG_FILE}" + + BOX_CORE="$(config_read_value "${BOX_CONFIG_FILE}" "core" "selected" || printf '%s' "${BOX_CORE}")" + BOX_CORE_BIN_DIR="$(config_read_value "${BOX_CONFIG_FILE}" "core" "bin_dir" || printf '%s' "${BOX_CORE_BIN_DIR}")" + BOX_CORE_WORKDIR="$(config_read_value "${BOX_CONFIG_FILE}" "core" "workdir" || printf '%s' "${BOX_CORE_WORKDIR}")" + BOX_CORE_CONFIG_SOURCE="$(config_read_value "${BOX_CONFIG_FILE}" "core" "config_source" || printf '%s' "${BOX_CORE_CONFIG_SOURCE}")" + + BOX_NETWORK_MODE="$(config_read_value "${BOX_CONFIG_FILE}" "network" "mode" || printf '%s' "${BOX_NETWORK_MODE}")" + BOX_TPROXY_PORT="$(config_read_value "${BOX_CONFIG_FILE}" "network" "tproxy_port" || printf '%s' "${BOX_TPROXY_PORT}")" + BOX_REDIR_PORT="$(config_read_value "${BOX_CONFIG_FILE}" "network" "redir_port" || printf '%s' "${BOX_REDIR_PORT}")" + BOX_DNS_PORT="$(config_read_value "${BOX_CONFIG_FILE}" "network" "dns_port" || printf '%s' "${BOX_DNS_PORT}")" + BOX_DNS_HIJACK_MODE="$(config_read_value "${BOX_CONFIG_FILE}" "network" "dns_hijack_mode" || printf '%s' "${BOX_DNS_HIJACK_MODE}")" + BOX_DNS_COEXIST_MODE="$(config_read_value "${BOX_CONFIG_FILE}" "network" "dns_coexist_mode" || printf '%s' "${BOX_DNS_COEXIST_MODE}")" + BOX_TAILSCALE_IFACE="$(config_read_value "${BOX_CONFIG_FILE}" "network" "tailscale_iface" || printf '%s' "${BOX_TAILSCALE_IFACE}")" + BOX_TAILNET_IPV4_CIDR="$(config_read_value "${BOX_CONFIG_FILE}" "network" "tailnet_ipv4_cidr" || printf '%s' "${BOX_TAILNET_IPV4_CIDR}")" + BOX_TAILNET_IPV6_CIDR="$(config_read_value "${BOX_CONFIG_FILE}" "network" "tailnet_ipv6_cidr" || printf '%s' "${BOX_TAILNET_IPV6_CIDR}")" + BOX_TAILSCALE_DNS_RESOLVER="$(config_read_value "${BOX_CONFIG_FILE}" "network" "tailscale_dns_resolver" || printf '%s' "${BOX_TAILSCALE_DNS_RESOLVER}")" + BOX_TAILSCALE_FWMARK="$(config_read_value "${BOX_CONFIG_FILE}" "network" "tailscale_fwmark" || printf '%s' "${BOX_TAILSCALE_FWMARK}")" + BOX_TAILSCALE_ROUTE_TABLE="$(config_read_value "${BOX_CONFIG_FILE}" "network" "tailscale_route_table" || printf '%s' "${BOX_TAILSCALE_ROUTE_TABLE}")" + + BOX_FIREWALL_BACKEND="$(config_read_value "${BOX_CONFIG_FILE}" "firewall" "backend" || printf '%s' "${BOX_FIREWALL_BACKEND}")" + BOX_ROUTE_TABLE="$(config_read_value "${BOX_CONFIG_FILE}" "firewall" "route_table" || printf '%s' "${BOX_ROUTE_TABLE}")" + BOX_ROUTE_PREF="$(config_read_value "${BOX_CONFIG_FILE}" "firewall" "route_pref" || printf '%s' "${BOX_ROUTE_PREF}")" + BOX_FWMARK="$(config_read_value "${BOX_CONFIG_FILE}" "firewall" "fwmark" || printf '%s' "${BOX_FWMARK}")" + + BOX_POLICY_ENABLED="$(config_read_value "${BOX_CONFIG_FILE}" "policy" "enabled" || printf '%s' "${BOX_POLICY_ENABLED}")" + BOX_POLICY_PROXY_MODE="$(config_read_value "${BOX_CONFIG_FILE}" "policy" "proxy_mode" || printf '%s' "${BOX_POLICY_PROXY_MODE}")" + BOX_POLICY_DEBOUNCE_SECONDS="$(config_read_value "${BOX_CONFIG_FILE}" "policy" "debounce_seconds" || printf '%s' "${BOX_POLICY_DEBOUNCE_SECONDS}")" + BOX_POLICY_USE_MODULE_ON_WIFI_DISCONNECT="$(config_read_value "${BOX_CONFIG_FILE}" "policy" "use_module_on_wifi_disconnect" || printf '%s' "${BOX_POLICY_USE_MODULE_ON_WIFI_DISCONNECT}")" + BOX_POLICY_DISABLE_MARKER="$(config_read_value "${BOX_CONFIG_FILE}" "policy" "disable_marker" || printf '%s' "${BOX_POLICY_DISABLE_MARKER}")" + mapfile -t BOX_POLICY_ALLOW_IFACES < <(config_read_array "${BOX_CONFIG_FILE}" "policy" "allow_ifaces" || true) + mapfile -t BOX_POLICY_IGNORE_IFACES < <(config_read_array "${BOX_CONFIG_FILE}" "policy" "ignore_ifaces" || true) + mapfile -t BOX_POLICY_ALLOW_SSIDS < <(config_read_array "${BOX_CONFIG_FILE}" "policy" "allow_ssids" || true) + mapfile -t BOX_POLICY_IGNORE_SSIDS < <(config_read_array "${BOX_CONFIG_FILE}" "policy" "ignore_ssids" || true) + mapfile -t BOX_POLICY_ALLOW_BSSIDS < <(config_read_array "${BOX_CONFIG_FILE}" "policy" "allow_bssids" || true) + mapfile -t BOX_POLICY_IGNORE_BSSIDS < <(config_read_array "${BOX_CONFIG_FILE}" "policy" "ignore_bssids" || true) + + BOX_UPDATER_ARTIFACT_DIR="$(config_read_value "${BOX_CONFIG_FILE}" "updater" "artifact_dir" || printf '%s' "${BOX_UPDATER_ARTIFACT_DIR}")" + BOX_UPDATER_STAGING_DIR="$(config_read_value "${BOX_CONFIG_FILE}" "updater" "staging_dir" || printf '%s' "${BOX_UPDATER_STAGING_DIR}")" + BOX_UPDATER_CHECKSUM_POLICY="$(config_read_value "${BOX_CONFIG_FILE}" "updater" "checksum_policy" || printf '%s' "${BOX_UPDATER_CHECKSUM_POLICY}")" + BOX_UPDATER_KERNEL_INTERVAL="$(config_read_value "${BOX_CONFIG_FILE}" "updater" "kernel_interval" || printf '%s' "${BOX_UPDATER_KERNEL_INTERVAL}")" + BOX_UPDATER_SUBS_INTERVAL="$(config_read_value "${BOX_CONFIG_FILE}" "updater" "subs_interval" || printf '%s' "${BOX_UPDATER_SUBS_INTERVAL}")" + BOX_UPDATER_GEO_INTERVAL="$(config_read_value "${BOX_CONFIG_FILE}" "updater" "geo_interval" || printf '%s' "${BOX_UPDATER_GEO_INTERVAL}")" + BOX_UPDATER_DASHBOARD_INTERVAL="$(config_read_value "${BOX_CONFIG_FILE}" "updater" "dashboard_interval" || printf '%s' "${BOX_UPDATER_DASHBOARD_INTERVAL}")" + + BOX_UPDATER_KERNEL_URL="$(config_read_value "${BOX_CONFIG_FILE}" "updater.kernel" "url" || printf '%s' "${BOX_UPDATER_KERNEL_URL}")" + BOX_UPDATER_KERNEL_FILE="$(config_read_value "${BOX_CONFIG_FILE}" "updater.kernel" "file" || printf '%s' "${BOX_UPDATER_KERNEL_FILE}")" + BOX_UPDATER_KERNEL_SOURCE="$(config_read_value "${BOX_CONFIG_FILE}" "updater.kernel" "source" || printf '%s' "${BOX_UPDATER_KERNEL_SOURCE}")" + BOX_UPDATER_KERNEL_RELEASE_API_URL="$(config_read_value "${BOX_CONFIG_FILE}" "updater.kernel" "release_api_url" || printf '%s' "${BOX_UPDATER_KERNEL_RELEASE_API_URL}")" + BOX_UPDATER_KERNEL_RELEASE_REPO="$(config_read_value "${BOX_CONFIG_FILE}" "updater.kernel" "release_repo" || printf '%s' "${BOX_UPDATER_KERNEL_RELEASE_REPO}")" + BOX_UPDATER_KERNEL_RELEASE_CHANNEL="$(config_read_value "${BOX_CONFIG_FILE}" "updater.kernel" "release_channel" || printf '%s' "${BOX_UPDATER_KERNEL_RELEASE_CHANNEL}")" + BOX_UPDATER_KERNEL_RELEASE_TAG="$(config_read_value "${BOX_CONFIG_FILE}" "updater.kernel" "release_tag" || printf '%s' "${BOX_UPDATER_KERNEL_RELEASE_TAG}")" + BOX_UPDATER_KERNEL_ASSET_REGEX="$(config_read_value "${BOX_CONFIG_FILE}" "updater.kernel" "asset_regex" || printf '%s' "${BOX_UPDATER_KERNEL_ASSET_REGEX}")" + BOX_UPDATER_KERNEL_CHECKSUM_ASSET_REGEX="$(config_read_value "${BOX_CONFIG_FILE}" "updater.kernel" "checksum_asset_regex" || printf '%s' "${BOX_UPDATER_KERNEL_CHECKSUM_ASSET_REGEX}")" + BOX_UPDATER_KERNEL_RELEASE_OS="$(config_read_value "${BOX_CONFIG_FILE}" "updater.kernel" "release_os" || printf '%s' "${BOX_UPDATER_KERNEL_RELEASE_OS}")" + BOX_UPDATER_KERNEL_RELEASE_ARCH="$(config_read_value "${BOX_CONFIG_FILE}" "updater.kernel" "release_arch" || printf '%s' "${BOX_UPDATER_KERNEL_RELEASE_ARCH}")" + BOX_UPDATER_KERNEL_ARCHIVE_MEMBER_REGEX="$(config_read_value "${BOX_CONFIG_FILE}" "updater.kernel" "archive_member_regex" || printf '%s' "${BOX_UPDATER_KERNEL_ARCHIVE_MEMBER_REGEX}")" + BOX_UPDATER_KERNEL_CHECKSUM="$(config_read_value "${BOX_CONFIG_FILE}" "updater.kernel" "checksum" || printf '%s' "${BOX_UPDATER_KERNEL_CHECKSUM}")" + BOX_UPDATER_KERNEL_CHECKSUM_FILE="$(config_read_value "${BOX_CONFIG_FILE}" "updater.kernel" "checksum_file" || printf '%s' "${BOX_UPDATER_KERNEL_CHECKSUM_FILE}")" + BOX_UPDATER_KERNEL_TARGET="$(config_read_value "${BOX_CONFIG_FILE}" "updater.kernel" "target" || printf '%s' "${BOX_UPDATER_KERNEL_TARGET}")" + + BOX_UPDATER_SUBS_URL="$(config_read_value "${BOX_CONFIG_FILE}" "updater.subs" "url" || printf '%s' "${BOX_UPDATER_SUBS_URL}")" + BOX_UPDATER_SUBS_FILE="$(config_read_value "${BOX_CONFIG_FILE}" "updater.subs" "file" || printf '%s' "${BOX_UPDATER_SUBS_FILE}")" + BOX_UPDATER_SUBS_CHECKSUM="$(config_read_value "${BOX_CONFIG_FILE}" "updater.subs" "checksum" || printf '%s' "${BOX_UPDATER_SUBS_CHECKSUM}")" + BOX_UPDATER_SUBS_CHECKSUM_FILE="$(config_read_value "${BOX_CONFIG_FILE}" "updater.subs" "checksum_file" || printf '%s' "${BOX_UPDATER_SUBS_CHECKSUM_FILE}")" + BOX_UPDATER_SUBS_TARGET="$(config_read_value "${BOX_CONFIG_FILE}" "updater.subs" "target" || printf '%s' "${BOX_UPDATER_SUBS_TARGET}")" + + BOX_UPDATER_GEO_URL="$(config_read_value "${BOX_CONFIG_FILE}" "updater.geo" "url" || printf '%s' "${BOX_UPDATER_GEO_URL}")" + BOX_UPDATER_GEO_FILE="$(config_read_value "${BOX_CONFIG_FILE}" "updater.geo" "file" || printf '%s' "${BOX_UPDATER_GEO_FILE}")" + BOX_UPDATER_GEO_SOURCE="$(config_read_value "${BOX_CONFIG_FILE}" "updater.geo" "source" || printf '%s' "${BOX_UPDATER_GEO_SOURCE}")" + BOX_UPDATER_GEO_RELEASE_API_URL="$(config_read_value "${BOX_CONFIG_FILE}" "updater.geo" "release_api_url" || printf '%s' "${BOX_UPDATER_GEO_RELEASE_API_URL}")" + BOX_UPDATER_GEO_RELEASE_REPO="$(config_read_value "${BOX_CONFIG_FILE}" "updater.geo" "release_repo" || printf '%s' "${BOX_UPDATER_GEO_RELEASE_REPO}")" + BOX_UPDATER_GEO_RELEASE_CHANNEL="$(config_read_value "${BOX_CONFIG_FILE}" "updater.geo" "release_channel" || printf '%s' "${BOX_UPDATER_GEO_RELEASE_CHANNEL}")" + BOX_UPDATER_GEO_RELEASE_TAG="$(config_read_value "${BOX_CONFIG_FILE}" "updater.geo" "release_tag" || printf '%s' "${BOX_UPDATER_GEO_RELEASE_TAG}")" + BOX_UPDATER_GEO_ASSET_REGEX="$(config_read_value "${BOX_CONFIG_FILE}" "updater.geo" "asset_regex" || printf '%s' "${BOX_UPDATER_GEO_ASSET_REGEX}")" + BOX_UPDATER_GEO_CHECKSUM_ASSET_REGEX="$(config_read_value "${BOX_CONFIG_FILE}" "updater.geo" "checksum_asset_regex" || printf '%s' "${BOX_UPDATER_GEO_CHECKSUM_ASSET_REGEX}")" + BOX_UPDATER_GEO_RELEASE_OS="$(config_read_value "${BOX_CONFIG_FILE}" "updater.geo" "release_os" || printf '%s' "${BOX_UPDATER_GEO_RELEASE_OS}")" + BOX_UPDATER_GEO_RELEASE_ARCH="$(config_read_value "${BOX_CONFIG_FILE}" "updater.geo" "release_arch" || printf '%s' "${BOX_UPDATER_GEO_RELEASE_ARCH}")" + BOX_UPDATER_GEO_ARCHIVE_MEMBER_REGEX="$(config_read_value "${BOX_CONFIG_FILE}" "updater.geo" "archive_member_regex" || printf '%s' "${BOX_UPDATER_GEO_ARCHIVE_MEMBER_REGEX}")" + BOX_UPDATER_GEO_CHECKSUM="$(config_read_value "${BOX_CONFIG_FILE}" "updater.geo" "checksum" || printf '%s' "${BOX_UPDATER_GEO_CHECKSUM}")" + BOX_UPDATER_GEO_CHECKSUM_FILE="$(config_read_value "${BOX_CONFIG_FILE}" "updater.geo" "checksum_file" || printf '%s' "${BOX_UPDATER_GEO_CHECKSUM_FILE}")" + BOX_UPDATER_GEO_TARGET="$(config_read_value "${BOX_CONFIG_FILE}" "updater.geo" "target" || printf '%s' "${BOX_UPDATER_GEO_TARGET}")" + + BOX_UPDATER_DASHBOARD_URL="$(config_read_value "${BOX_CONFIG_FILE}" "updater.dashboard" "url" || printf '%s' "${BOX_UPDATER_DASHBOARD_URL}")" + BOX_UPDATER_DASHBOARD_FILE="$(config_read_value "${BOX_CONFIG_FILE}" "updater.dashboard" "file" || printf '%s' "${BOX_UPDATER_DASHBOARD_FILE}")" + BOX_UPDATER_DASHBOARD_CHECKSUM="$(config_read_value "${BOX_CONFIG_FILE}" "updater.dashboard" "checksum" || printf '%s' "${BOX_UPDATER_DASHBOARD_CHECKSUM}")" + BOX_UPDATER_DASHBOARD_CHECKSUM_FILE="$(config_read_value "${BOX_CONFIG_FILE}" "updater.dashboard" "checksum_file" || printf '%s' "${BOX_UPDATER_DASHBOARD_CHECKSUM_FILE}")" + BOX_UPDATER_DASHBOARD_TARGET="$(config_read_value "${BOX_CONFIG_FILE}" "updater.dashboard" "target" || printf '%s' "${BOX_UPDATER_DASHBOARD_TARGET}")" + + validate_config + + # validate_config constrains BOX_CORE and BOX_CONFIG_FILE/BOX_CONFIG_SOURCE inputs. + # For sing-box, if the config source remains the default YAML profile path, rewrite + # it to the JSON profile path before export; sing-box expects JSON at runtime. + if [[ "${BOX_CORE}" == "sing-box" && "${BOX_CORE_CONFIG_SOURCE}" == "/etc/box/profiles/config.yaml" ]]; then + BOX_CORE_CONFIG_SOURCE="/etc/box/profiles/config.json" + fi + + export BOX_CONFIG_FILE BOX_CONFIG_SOURCE + export BOX_CORE BOX_NETWORK_MODE BOX_TPROXY_PORT BOX_REDIR_PORT BOX_DNS_PORT BOX_DNS_HIJACK_MODE BOX_DNS_COEXIST_MODE + export BOX_TAILSCALE_IFACE BOX_TAILNET_IPV4_CIDR BOX_TAILNET_IPV6_CIDR BOX_TAILSCALE_DNS_RESOLVER BOX_TAILSCALE_FWMARK BOX_TAILSCALE_ROUTE_TABLE + export BOX_FIREWALL_BACKEND BOX_ROUTE_TABLE BOX_ROUTE_PREF BOX_FWMARK + export BOX_POLICY_ENABLED BOX_POLICY_PROXY_MODE BOX_POLICY_DEBOUNCE_SECONDS BOX_POLICY_USE_MODULE_ON_WIFI_DISCONNECT BOX_POLICY_DISABLE_MARKER + export BOX_CORE_BIN_DIR BOX_CORE_WORKDIR BOX_CORE_CONFIG_SOURCE + export BOX_UPDATER_ARTIFACT_DIR BOX_UPDATER_STAGING_DIR BOX_UPDATER_CHECKSUM_POLICY + export BOX_UPDATER_KERNEL_INTERVAL BOX_UPDATER_SUBS_INTERVAL BOX_UPDATER_GEO_INTERVAL BOX_UPDATER_DASHBOARD_INTERVAL + export BOX_UPDATER_KERNEL_URL BOX_UPDATER_KERNEL_FILE BOX_UPDATER_KERNEL_SOURCE BOX_UPDATER_KERNEL_RELEASE_API_URL + export BOX_UPDATER_KERNEL_RELEASE_REPO BOX_UPDATER_KERNEL_RELEASE_CHANNEL BOX_UPDATER_KERNEL_RELEASE_TAG + export BOX_UPDATER_KERNEL_ASSET_REGEX BOX_UPDATER_KERNEL_CHECKSUM_ASSET_REGEX BOX_UPDATER_KERNEL_RELEASE_OS BOX_UPDATER_KERNEL_RELEASE_ARCH + export BOX_UPDATER_KERNEL_ARCHIVE_MEMBER_REGEX BOX_UPDATER_KERNEL_CHECKSUM BOX_UPDATER_KERNEL_CHECKSUM_FILE BOX_UPDATER_KERNEL_TARGET + export BOX_UPDATER_SUBS_URL BOX_UPDATER_SUBS_FILE BOX_UPDATER_SUBS_CHECKSUM BOX_UPDATER_SUBS_CHECKSUM_FILE BOX_UPDATER_SUBS_TARGET + export BOX_UPDATER_GEO_URL BOX_UPDATER_GEO_FILE BOX_UPDATER_GEO_SOURCE BOX_UPDATER_GEO_RELEASE_API_URL + export BOX_UPDATER_GEO_RELEASE_REPO BOX_UPDATER_GEO_RELEASE_CHANNEL BOX_UPDATER_GEO_RELEASE_TAG + export BOX_UPDATER_GEO_ASSET_REGEX BOX_UPDATER_GEO_CHECKSUM_ASSET_REGEX BOX_UPDATER_GEO_RELEASE_OS BOX_UPDATER_GEO_RELEASE_ARCH + export BOX_UPDATER_GEO_ARCHIVE_MEMBER_REGEX BOX_UPDATER_GEO_CHECKSUM BOX_UPDATER_GEO_CHECKSUM_FILE BOX_UPDATER_GEO_TARGET + export BOX_UPDATER_DASHBOARD_URL BOX_UPDATER_DASHBOARD_FILE BOX_UPDATER_DASHBOARD_CHECKSUM BOX_UPDATER_DASHBOARD_CHECKSUM_FILE BOX_UPDATER_DASHBOARD_TARGET +} diff --git a/lib/firewall/backend_iptables.sh b/lib/firewall/backend_iptables.sh new file mode 100644 index 0000000..c0b8573 --- /dev/null +++ b/lib/firewall/backend_iptables.sh @@ -0,0 +1,637 @@ +#!/usr/bin/env bash + +# shellcheck shell=bash + +set -euo pipefail + +BOX_CHAIN_MANGLE="BOX_MANGLE" +BOX_CHAIN_NAT="BOX_NAT" +BOX_CHAIN_DNS_MANGLE="BOX_DNS_MANGLE" +BOX_CHAIN_DNS_NAT="BOX_DNS_NAT" + +FW_BACKEND_AVAILABLE="false" +FW_CHAIN_MANGLE="false" +FW_CHAIN_NAT="false" +FW_CHAIN_DNS_MANGLE="false" +FW_CHAIN_DNS_NAT="false" +FW_ROUTE_RULE="false" +FW_ROUTE_TABLE_INSTALLED="false" +FW_CAP_TPROXY="false" +FW_TAILSCALE_MARK_RULE="false" +FW_TAILSCALE_TABLE_PRESENT="false" +FW_TAILSCALE_BYPASS_APPLIED="false" +FW_CAP_IPV4="false" +FW_CAP_IPV6="false" +FW_DRY_RUN_SUPPORTED="true" +FW_DNS_COEXIST_MODE_ACTIVE="preserve_tailnet" +FW_CAP_DETAILS="" +FW_LAST_ERROR="" + +iptables_cmd() { + if [[ -n "${BOX_IPTABLES_CMD:-}" ]]; then + printf '%s\n' "${BOX_IPTABLES_CMD}" + return 0 + fi + command -v iptables >/dev/null 2>&1 && printf '%s\n' "iptables" +} + +ip_cmd() { + if [[ -n "${BOX_IP_CMD:-}" ]]; then + printf '%s\n' "${BOX_IP_CMD}" + return 0 + fi + command -v ip >/dev/null 2>&1 && printf '%s\n' "ip" +} + +fwmark_normalize() { + local raw="${1:?missing fwmark}" + local value mask + + if [[ "${raw}" == */* ]]; then + value="${raw%%/*}" + mask="${raw##*/}" + else + value="${raw}" + mask="0xffffffff" + fi + + if ! [[ "${value}" =~ ^(0[xX][0-9a-fA-F]+|[0-9]+)$ ]]; then + return 1 + fi + if ! [[ "${mask}" =~ ^(0[xX][0-9a-fA-F]+|[0-9]+)$ ]]; then + return 1 + fi + + printf '%u/%u\n' "$((value))" "$((mask))" +} + +fwmark_equal() { + local left="${1:?missing left fwmark}" + local right="${2:?missing right fwmark}" + local left_norm right_norm + + left_norm="$(fwmark_normalize "${left}" 2>/dev/null || true)" + right_norm="$(fwmark_normalize "${right}" 2>/dev/null || true)" + [[ -n "${left_norm}" && -n "${right_norm}" && "${left_norm}" == "${right_norm}" ]] +} + +backend_iptables_rule_line_fwmark() { + local line="${1:-}" + local i + read -r -a parts <<<"${line}" + for ((i = 0; i < ${#parts[@]}; i++)); do + if [[ "${parts[$i]}" == "fwmark" && $((i + 1)) -lt ${#parts[@]} ]]; then + printf '%s\n' "${parts[$((i + 1))]}" + return 0 + fi + done + return 1 +} + +backend_iptables_rule_line_table() { + local line="${1:-}" + local i + read -r -a parts <<<"${line}" + for ((i = 0; i < ${#parts[@]}; i++)); do + if [[ ( "${parts[$i]}" == "lookup" || "${parts[$i]}" == "table" ) && $((i + 1)) -lt ${#parts[@]} ]]; then + printf '%s\n' "${parts[$((i + 1))]}" + return 0 + fi + done + return 1 +} + +iptables_table_has_chain() { + local table="${1:?missing table}" + local chain="${2:?missing chain}" + local ipt + ipt="$(iptables_cmd)" + "${ipt}" -t "${table}" -S "${chain}" >/dev/null 2>&1 +} + +iptables_ensure_chain() { + local table="${1:?missing table}" + local chain="${2:?missing chain}" + local ipt + + ipt="$(iptables_cmd)" + if ! iptables_table_has_chain "${table}" "${chain}"; then + "${ipt}" -t "${table}" -N "${chain}" + fi +} + +iptables_add_if_missing() { + local table="${1:?missing table}" + shift + local ipt + + ipt="$(iptables_cmd)" + if ! "${ipt}" -t "${table}" -C "$@" >/dev/null 2>&1; then + if ! "${ipt}" -t "${table}" -A "$@"; then + return 1 + fi + # iptables-nft can emit append failures on stderr in some kernels while + # still returning success; verify rule presence to enforce correctness. + if ! "${ipt}" -t "${table}" -C "$@" >/dev/null 2>&1; then + return 1 + fi + fi + return 0 +} + +iptables_try_add_if_missing_quiet() { + local table="${1:?missing table}" + shift + local ipt + + ipt="$(iptables_cmd)" + if "${ipt}" -t "${table}" -C "$@" >/dev/null 2>&1; then + return 0 + fi + if ! "${ipt}" -t "${table}" -A "$@" >/dev/null 2>&1; then + return 1 + fi + "${ipt}" -t "${table}" -C "$@" >/dev/null 2>&1 +} + +backend_iptables_rule_desc() { + local rendered + printf -v rendered '%q ' "$@" + printf '%s\n' "${rendered% }" +} + +backend_iptables_ensure_chain_checked() { + local table="${1:?missing table}" + local chain="${2:?missing chain}" + if ! iptables_ensure_chain "${table}" "${chain}"; then + FW_LAST_ERROR="failed to ensure chain table=${table} chain=${chain}" + return "${E_FIREWALL_APPLY}" + fi +} + +backend_iptables_add_rule_checked() { + local table="${1:?missing table}" + shift + if ! iptables_add_if_missing "${table}" "$@"; then + FW_LAST_ERROR="failed to add rule table=${table} rule=$(backend_iptables_rule_desc "$@")" + return "${E_FIREWALL_APPLY}" + fi +} + +iptables_rule_exists() { + local table="${1:?missing table}" + shift + local ipt + ipt="$(iptables_cmd)" + "${ipt}" -t "${table}" -C "$@" >/dev/null 2>&1 +} + +iptables_delete_all() { + local table="${1:?missing table}" + shift + local ipt + + ipt="$(iptables_cmd || true)" + [[ -n "${ipt}" ]] || return 0 + + while "${ipt}" -t "${table}" -C "$@" >/dev/null 2>&1; do + "${ipt}" -t "${table}" -D "$@" >/dev/null 2>&1 || break + done +} + +iptables_flush_chain_if_exists() { + local table="${1:?missing table}" + local chain="${2:?missing chain}" + local ipt + ipt="$(iptables_cmd || true)" + [[ -n "${ipt}" ]] || return 0 + if iptables_table_has_chain "${table}" "${chain}"; then + "${ipt}" -t "${table}" -F "${chain}" >/dev/null 2>&1 || true + fi +} + +iptables_delete_chain_if_exists() { + local table="${1:?missing table}" + local chain="${2:?missing chain}" + local ipt + ipt="$(iptables_cmd || true)" + [[ -n "${ipt}" ]] || return 0 + if iptables_table_has_chain "${table}" "${chain}"; then + "${ipt}" -t "${table}" -X "${chain}" >/dev/null 2>&1 || true + fi +} + +backend_iptables_probe_tproxy() { + if [[ -n "${BOX_CAP_TPROXY:-}" ]]; then + [[ "${BOX_CAP_TPROXY}" == "1" || "${BOX_CAP_TPROXY}" == "true" ]] && return 0 + return 1 + fi + local ipt + ipt="$(iptables_cmd || true)" + [[ -n "${ipt}" ]] || return 1 + "${ipt}" -j TPROXY --help >/dev/null 2>&1 +} + +backend_iptables_init() { + if [[ -z "$(iptables_cmd)" || -z "$(ip_cmd)" ]]; then + FW_LAST_ERROR="iptables/ip command not found" + log "ERROR" "firewall" "E_FW_PREREQ" "${FW_LAST_ERROR}" + return "${E_FIREWALL_APPLY}" + fi + return 0 +} + +backend_iptables_cleanup() { + local ipt ip_tool + + ipt="$(iptables_cmd || true)" + ip_tool="$(ip_cmd || true)" + + if [[ -n "${ipt}" ]]; then + iptables_delete_all mangle PREROUTING -j "${BOX_CHAIN_MANGLE}" + iptables_delete_all mangle OUTPUT -j "${BOX_CHAIN_MANGLE}" + iptables_delete_all nat PREROUTING -j "${BOX_CHAIN_NAT}" + iptables_delete_all nat OUTPUT -j "${BOX_CHAIN_NAT}" + + # Drop internal jumps first, then delete child chains. + iptables_flush_chain_if_exists mangle "${BOX_CHAIN_MANGLE}" + iptables_flush_chain_if_exists nat "${BOX_CHAIN_NAT}" + iptables_flush_chain_if_exists mangle "${BOX_CHAIN_DNS_MANGLE}" + iptables_flush_chain_if_exists nat "${BOX_CHAIN_DNS_NAT}" + + iptables_delete_chain_if_exists mangle "${BOX_CHAIN_DNS_MANGLE}" + iptables_delete_chain_if_exists nat "${BOX_CHAIN_DNS_NAT}" + iptables_delete_chain_if_exists mangle "${BOX_CHAIN_MANGLE}" + iptables_delete_chain_if_exists nat "${BOX_CHAIN_NAT}" + + # Idempotent second pass for backends that need another round after detach. + iptables_flush_chain_if_exists mangle "${BOX_CHAIN_DNS_MANGLE}" + iptables_flush_chain_if_exists nat "${BOX_CHAIN_DNS_NAT}" + iptables_delete_chain_if_exists mangle "${BOX_CHAIN_DNS_MANGLE}" + iptables_delete_chain_if_exists nat "${BOX_CHAIN_DNS_NAT}" + iptables_delete_chain_if_exists mangle "${BOX_CHAIN_MANGLE}" + iptables_delete_chain_if_exists nat "${BOX_CHAIN_NAT}" + fi + + if [[ -n "${ip_tool}" ]]; then + backend_iptables_prune_box_policy_rules + while "${ip_tool}" route show table "${BOX_ROUTE_TABLE}" 2>/dev/null | grep -Fq "local default dev lo"; do + "${ip_tool}" route del local default dev lo table "${BOX_ROUTE_TABLE}" >/dev/null 2>&1 || break + done + fi +} + +backend_iptables_apply_anti_loop() { + backend_iptables_add_rule_checked mangle "${BOX_CHAIN_MANGLE}" -i lo -j RETURN + backend_iptables_add_rule_checked mangle "${BOX_CHAIN_MANGLE}" -d 127.0.0.0/8 -j RETURN + backend_iptables_add_rule_checked nat "${BOX_CHAIN_NAT}" -i lo -j RETURN + backend_iptables_add_rule_checked nat "${BOX_CHAIN_NAT}" -d 127.0.0.0/8 -j RETURN +} + +backend_iptables_apply_tailscale_bypass() { + # Preserve tailscale transport and route ownership. + backend_iptables_add_rule_checked mangle "${BOX_CHAIN_MANGLE}" -i "${BOX_TAILSCALE_IFACE}" -j RETURN + backend_iptables_add_rule_checked mangle "${BOX_CHAIN_MANGLE}" -o "${BOX_TAILSCALE_IFACE}" -j RETURN + backend_iptables_add_rule_checked nat "${BOX_CHAIN_NAT}" -i "${BOX_TAILSCALE_IFACE}" -j RETURN + backend_iptables_add_rule_checked nat "${BOX_CHAIN_NAT}" -o "${BOX_TAILSCALE_IFACE}" -j RETURN + + backend_iptables_add_rule_checked mangle "${BOX_CHAIN_MANGLE}" -d "${BOX_TAILNET_IPV4_CIDR}" -j RETURN + backend_iptables_add_rule_checked nat "${BOX_CHAIN_NAT}" -d "${BOX_TAILNET_IPV4_CIDR}" -j RETURN + + # Keep existing tailscale-marked packets out of Box interception. + backend_iptables_add_rule_checked mangle "${BOX_CHAIN_MANGLE}" -m mark --mark "${BOX_TAILSCALE_FWMARK}" -j RETURN + + # Preserve MagicDNS resolver reachability. + backend_iptables_add_rule_checked mangle "${BOX_CHAIN_DNS_MANGLE}" -d "${BOX_TAILSCALE_DNS_RESOLVER}" -p udp --dport 53 -j RETURN + backend_iptables_add_rule_checked mangle "${BOX_CHAIN_DNS_MANGLE}" -d "${BOX_TAILSCALE_DNS_RESOLVER}" -p tcp --dport 53 -j RETURN + backend_iptables_add_rule_checked nat "${BOX_CHAIN_DNS_NAT}" -d "${BOX_TAILSCALE_DNS_RESOLVER}" -p udp --dport 53 -j RETURN + backend_iptables_add_rule_checked nat "${BOX_CHAIN_DNS_NAT}" -d "${BOX_TAILSCALE_DNS_RESOLVER}" -p tcp --dport 53 -j RETURN + + # IPv6 tailnet bypass is handled by not touching ip6tables in this backend. + # TODO(phase-3): add dedicated ip6tables/nft backend for explicit v6 chain rules. +} + +backend_iptables_apply_policy_placeholders() { + # TODO(phase-3): UID/GID/interface/MAC policy graph. + backend_iptables_add_rule_checked mangle "${BOX_CHAIN_MANGLE}" -m comment --comment "BOX_POLICY_PLACEHOLDER" -j RETURN +} + +backend_iptables_ensure_policy_route() { + local ip_tool + ip_tool="$(ip_cmd)" + + backend_iptables_prune_box_policy_rules + if ! "${ip_tool}" rule add fwmark "${BOX_FWMARK}" table "${BOX_ROUTE_TABLE}" pref "${BOX_ROUTE_PREF}" >/dev/null 2>&1; then + FW_LAST_ERROR="failed to install ip rule fwmark=${BOX_FWMARK} table=${BOX_ROUTE_TABLE} pref=${BOX_ROUTE_PREF}" + return "${E_FIREWALL_APPLY}" + fi + if ! "${ip_tool}" route show table "${BOX_ROUTE_TABLE}" 2>/dev/null | grep -Fq "local default dev lo"; then + if ! "${ip_tool}" route add local default dev lo table "${BOX_ROUTE_TABLE}" >/dev/null 2>&1; then + FW_LAST_ERROR="failed to install local route for table=${BOX_ROUTE_TABLE}" + return "${E_FIREWALL_APPLY}" + fi + fi + + return 0 +} + +backend_iptables_rule_line_matches_box() { + local line="${1:-}" + backend_iptables_rule_line_matches_mark_table "${line}" "${BOX_FWMARK}" "${BOX_ROUTE_TABLE}" +} + +backend_iptables_rule_line_matches_mark_table() { + local line="${1:-}" + local mark="${2:?missing mark}" + local table="${3:?missing table}" + local line_mark line_table + line_mark="$(backend_iptables_rule_line_fwmark "${line}" 2>/dev/null || true)" + line_table="$(backend_iptables_rule_line_table "${line}" 2>/dev/null || true)" + [[ -n "${line_mark}" && "${line_table}" == "${table}" ]] || return 1 + fwmark_equal "${line_mark}" "${mark}" +} + +backend_iptables_rule_line_pref() { + local line="${1:-}" + if [[ "${line}" =~ (^|[[:space:]])pref[[:space:]]+([0-9]+)($|[[:space:]]) ]]; then + printf '%s\n' "${BASH_REMATCH[2]}" + return 0 + fi + return 1 +} + +backend_iptables_prune_box_policy_rules() { + local ip_tool line pref + ip_tool="$(ip_cmd || true)" + [[ -n "${ip_tool}" ]] || return 0 + + while IFS= read -r line; do + [[ -n "${line}" ]] || continue + if backend_iptables_rule_line_matches_box "${line}"; then + if pref="$(backend_iptables_rule_line_pref "${line}" 2>/dev/null || true)" && [[ -n "${pref}" ]]; then + "${ip_tool}" rule del pref "${pref}" >/dev/null 2>&1 || true + else + # Fallback delete pattern when pref is unavailable. + "${ip_tool}" rule del fwmark "${BOX_FWMARK}" table "${BOX_ROUTE_TABLE}" >/dev/null 2>&1 || true + fi + fi + done < <("${ip_tool}" rule list 2>/dev/null || true) +} + +backend_iptables_has_policy_rule() { + local mark="${1:?missing mark}" + local table="${2:?missing table}" + local ip_tool line + ip_tool="$(ip_cmd || true)" + [[ -n "${ip_tool}" ]] || return 1 + while IFS= read -r line; do + [[ -n "${line}" ]] || continue + if backend_iptables_rule_line_matches_mark_table "${line}" "${mark}" "${table}"; then + return 0 + fi + done < <("${ip_tool}" rule list 2>/dev/null || true) + return 1 +} + +backend_iptables_apply_mode_rules() { + local mode="${1:?missing mode}" + + case "${mode}" in + tun) + backend_iptables_add_rule_checked mangle "${BOX_CHAIN_MANGLE}" -j RETURN + ;; + redirect) + backend_iptables_add_rule_checked nat "${BOX_CHAIN_NAT}" -p tcp -j REDIRECT --to-ports "${BOX_REDIR_PORT}" + ;; + tproxy) + if backend_iptables_probe_tproxy \ + && iptables_try_add_if_missing_quiet mangle "${BOX_CHAIN_MANGLE}" -p tcp -j TPROXY --on-port "${BOX_TPROXY_PORT}" --tproxy-mark "${BOX_FWMARK}" \ + && iptables_try_add_if_missing_quiet mangle "${BOX_CHAIN_MANGLE}" -p udp -j TPROXY --on-port "${BOX_TPROXY_PORT}" --tproxy-mark "${BOX_FWMARK}"; then + : + else + iptables_delete_all mangle "${BOX_CHAIN_MANGLE}" -p tcp -j TPROXY --on-port "${BOX_TPROXY_PORT}" --tproxy-mark "${BOX_FWMARK}" + iptables_delete_all mangle "${BOX_CHAIN_MANGLE}" -p udp -j TPROXY --on-port "${BOX_TPROXY_PORT}" --tproxy-mark "${BOX_FWMARK}" + log "WARN" "firewall" "FW_TPROXY_DOWNGRADE" "TPROXY unavailable; using MARK fallback" + backend_iptables_add_rule_checked mangle "${BOX_CHAIN_MANGLE}" -p tcp -j MARK --set-xmark "${BOX_FWMARK}" + backend_iptables_add_rule_checked mangle "${BOX_CHAIN_MANGLE}" -p udp -j MARK --set-xmark "${BOX_FWMARK}" + fi + if ! backend_iptables_ensure_policy_route; then + return "${E_FIREWALL_APPLY}" + fi + ;; + mixed|enhance) + backend_iptables_add_rule_checked nat "${BOX_CHAIN_NAT}" -p tcp -j REDIRECT --to-ports "${BOX_REDIR_PORT}" + if backend_iptables_probe_tproxy \ + && iptables_try_add_if_missing_quiet mangle "${BOX_CHAIN_MANGLE}" -p udp -j TPROXY --on-port "${BOX_TPROXY_PORT}" --tproxy-mark "${BOX_FWMARK}"; then + : + else + iptables_delete_all mangle "${BOX_CHAIN_MANGLE}" -p udp -j TPROXY --on-port "${BOX_TPROXY_PORT}" --tproxy-mark "${BOX_FWMARK}" + log "WARN" "firewall" "FW_TPROXY_DOWNGRADE" "TPROXY unavailable for UDP; using MARK fallback" + backend_iptables_add_rule_checked mangle "${BOX_CHAIN_MANGLE}" -p udp -j MARK --set-xmark "${BOX_FWMARK}" + fi + if ! backend_iptables_ensure_policy_route; then + return "${E_FIREWALL_APPLY}" + fi + ;; + *) + FW_LAST_ERROR="unsupported network mode: ${mode}" + return "${E_FIREWALL_APPLY}" + ;; + esac + + return 0 +} + +backend_iptables_apply_dns_strategy() { + local dns_mode="${1:?missing dns mode}" + + case "${dns_mode}" in + disable) + backend_iptables_add_rule_checked mangle "${BOX_CHAIN_DNS_MANGLE}" -j RETURN + backend_iptables_add_rule_checked nat "${BOX_CHAIN_DNS_NAT}" -j RETURN + ;; + redirect) + backend_iptables_add_rule_checked nat "${BOX_CHAIN_DNS_NAT}" -p udp --dport 53 -j REDIRECT --to-ports "${BOX_DNS_PORT}" + backend_iptables_add_rule_checked nat "${BOX_CHAIN_DNS_NAT}" -p tcp --dport 53 -j REDIRECT --to-ports "${BOX_DNS_PORT}" + ;; + tproxy) + if backend_iptables_probe_tproxy \ + && iptables_try_add_if_missing_quiet mangle "${BOX_CHAIN_DNS_MANGLE}" -p udp --dport 53 -j TPROXY --on-port "${BOX_DNS_PORT}" --tproxy-mark "${BOX_FWMARK}" \ + && iptables_try_add_if_missing_quiet mangle "${BOX_CHAIN_DNS_MANGLE}" -p tcp --dport 53 -j TPROXY --on-port "${BOX_DNS_PORT}" --tproxy-mark "${BOX_FWMARK}"; then + if ! backend_iptables_ensure_policy_route; then + return "${E_FIREWALL_APPLY}" + fi + else + iptables_delete_all mangle "${BOX_CHAIN_DNS_MANGLE}" -p udp --dport 53 -j TPROXY --on-port "${BOX_DNS_PORT}" --tproxy-mark "${BOX_FWMARK}" + iptables_delete_all mangle "${BOX_CHAIN_DNS_MANGLE}" -p tcp --dport 53 -j TPROXY --on-port "${BOX_DNS_PORT}" --tproxy-mark "${BOX_FWMARK}" + log "WARN" "firewall" "FW_DNS_TPROXY_DOWNGRADE" "DNS tproxy unavailable; redirecting DNS instead" + backend_iptables_add_rule_checked nat "${BOX_CHAIN_DNS_NAT}" -p udp --dport 53 -j REDIRECT --to-ports "${BOX_DNS_PORT}" + backend_iptables_add_rule_checked nat "${BOX_CHAIN_DNS_NAT}" -p tcp --dport 53 -j REDIRECT --to-ports "${BOX_DNS_PORT}" + fi + ;; + *) + FW_LAST_ERROR="unsupported dns_hijack_mode: ${dns_mode}" + return "${E_FIREWALL_APPLY}" + ;; + esac + + return 0 +} + +backend_iptables_create_base() { + backend_iptables_ensure_chain_checked mangle "${BOX_CHAIN_MANGLE}" + backend_iptables_ensure_chain_checked nat "${BOX_CHAIN_NAT}" + backend_iptables_ensure_chain_checked mangle "${BOX_CHAIN_DNS_MANGLE}" + backend_iptables_ensure_chain_checked nat "${BOX_CHAIN_DNS_NAT}" + + backend_iptables_add_rule_checked mangle PREROUTING -j "${BOX_CHAIN_MANGLE}" + backend_iptables_add_rule_checked mangle OUTPUT -j "${BOX_CHAIN_MANGLE}" + backend_iptables_add_rule_checked nat PREROUTING -j "${BOX_CHAIN_NAT}" + backend_iptables_add_rule_checked nat OUTPUT -j "${BOX_CHAIN_NAT}" + + backend_iptables_add_rule_checked mangle "${BOX_CHAIN_MANGLE}" -j "${BOX_CHAIN_DNS_MANGLE}" + backend_iptables_add_rule_checked nat "${BOX_CHAIN_NAT}" -j "${BOX_CHAIN_DNS_NAT}" +} + +backend_iptables_apply_mode() { + local mode="${1:?missing network mode}" + + FW_LAST_ERROR="" + if ! backend_iptables_init; then + FW_LAST_ERROR="${FW_LAST_ERROR:-failed to initialize iptables backend}" + return "${E_FIREWALL_APPLY}" + fi + + if [[ "${BOX_FIREWALL_DRY_RUN:-0}" == "1" ]]; then + backend_iptables_dry_run "${mode}" + return 0 + fi + + if ! backend_iptables_cleanup; then + FW_LAST_ERROR="failed to cleanup existing firewall state" + return "${E_FIREWALL_APPLY}" + fi + + if ! backend_iptables_create_base; then + FW_LAST_ERROR="failed to create base chains" + backend_iptables_cleanup || true + return "${E_FIREWALL_APPLY}" + fi + + if ! backend_iptables_apply_anti_loop; then + FW_LAST_ERROR="failed to apply anti-loop rules" + backend_iptables_cleanup || true + return "${E_FIREWALL_APPLY}" + fi + + if [[ "${BOX_DNS_COEXIST_MODE}" == "preserve_tailnet" ]]; then + if ! backend_iptables_apply_tailscale_bypass; then + FW_LAST_ERROR="failed to apply tailscale bypass" + backend_iptables_cleanup || true + return "${E_FIREWALL_APPLY}" + fi + FW_TAILSCALE_BYPASS_APPLIED="true" + else + FW_TAILSCALE_BYPASS_APPLIED="false" + fi + + if ! backend_iptables_apply_policy_placeholders; then + FW_LAST_ERROR="failed to apply policy placeholder rules" + backend_iptables_cleanup || true + return "${E_FIREWALL_APPLY}" + fi + + if ! backend_iptables_apply_mode_rules "${mode}"; then + FW_LAST_ERROR="${FW_LAST_ERROR:-failed to apply mode rules}" + backend_iptables_cleanup || true + return "${E_FIREWALL_APPLY}" + fi + + if ! backend_iptables_apply_dns_strategy "${BOX_DNS_HIJACK_MODE}"; then + FW_LAST_ERROR="${FW_LAST_ERROR:-failed to apply dns strategy}" + backend_iptables_cleanup || true + return "${E_FIREWALL_APPLY}" + fi + + return 0 +} + +backend_iptables_dry_run() { + local mode="${1:?missing mode}" + printf '# dry-run backend=iptables mode=%s dns=%s coexist=%s\n' "${mode}" "${BOX_DNS_HIJACK_MODE}" "${BOX_DNS_COEXIST_MODE}" + printf 'iptables -t mangle -D PREROUTING -j %s\n' "${BOX_CHAIN_MANGLE}" + printf 'iptables -t mangle -D OUTPUT -j %s\n' "${BOX_CHAIN_MANGLE}" + printf 'iptables -t nat -D PREROUTING -j %s\n' "${BOX_CHAIN_NAT}" + printf 'iptables -t nat -D OUTPUT -j %s\n' "${BOX_CHAIN_NAT}" + printf 'iptables -t mangle -N %s ; iptables -t nat -N %s\n' "${BOX_CHAIN_MANGLE}" "${BOX_CHAIN_NAT}" + printf 'iptables -t mangle -N %s ; iptables -t nat -N %s\n' "${BOX_CHAIN_DNS_MANGLE}" "${BOX_CHAIN_DNS_NAT}" + printf 'iptables mode rules for %s and dns strategy %s\n' "${mode}" "${BOX_DNS_HIJACK_MODE}" + printf 'ip rule add fwmark %s table %s pref %s\n' "${BOX_FWMARK}" "${BOX_ROUTE_TABLE}" "${BOX_ROUTE_PREF}" + printf 'ip route add local default dev lo table %s\n' "${BOX_ROUTE_TABLE}" +} + +backend_iptables_collect_status() { + local ipt ip_tool + + FW_BACKEND_AVAILABLE="false" + FW_CHAIN_MANGLE="false" + FW_CHAIN_NAT="false" + FW_CHAIN_DNS_MANGLE="false" + FW_CHAIN_DNS_NAT="false" + FW_ROUTE_RULE="false" + FW_ROUTE_TABLE_INSTALLED="false" + FW_CAP_TPROXY="false" + FW_TAILSCALE_MARK_RULE="false" + FW_TAILSCALE_TABLE_PRESENT="false" + FW_TAILSCALE_BYPASS_APPLIED="false" + FW_CAP_IPV4="false" + FW_CAP_IPV6="false" + FW_DRY_RUN_SUPPORTED="true" + FW_DNS_COEXIST_MODE_ACTIVE="${BOX_DNS_COEXIST_MODE}" + FW_CAP_DETAILS="backend=iptables,available=false,ipv4=false,ipv6=false,tproxy=false,dry_run=true" + FW_LAST_ERROR="" + + ipt="$(iptables_cmd || true)" + ip_tool="$(ip_cmd || true)" + + if [[ -z "${ipt}" || -z "${ip_tool}" ]]; then + FW_LAST_ERROR="iptables/ip command not found" + export FW_BACKEND_AVAILABLE FW_CHAIN_MANGLE FW_CHAIN_NAT FW_CHAIN_DNS_MANGLE FW_CHAIN_DNS_NAT + export FW_ROUTE_RULE FW_ROUTE_TABLE_INSTALLED FW_CAP_TPROXY FW_TAILSCALE_MARK_RULE FW_TAILSCALE_TABLE_PRESENT FW_TAILSCALE_BYPASS_APPLIED + export FW_CAP_IPV4 FW_CAP_IPV6 FW_DRY_RUN_SUPPORTED FW_DNS_COEXIST_MODE_ACTIVE FW_CAP_DETAILS FW_LAST_ERROR + return 0 + fi + + if ! "${ipt}" -t mangle -S >/dev/null 2>&1; then + FW_LAST_ERROR="iptables inspection unavailable (need root/CAP_NET_ADMIN or kernel support)" + export FW_BACKEND_AVAILABLE FW_CHAIN_MANGLE FW_CHAIN_NAT FW_CHAIN_DNS_MANGLE FW_CHAIN_DNS_NAT + export FW_ROUTE_RULE FW_ROUTE_TABLE_INSTALLED FW_CAP_TPROXY FW_TAILSCALE_MARK_RULE FW_TAILSCALE_TABLE_PRESENT FW_TAILSCALE_BYPASS_APPLIED + export FW_CAP_IPV4 FW_CAP_IPV6 FW_DRY_RUN_SUPPORTED FW_DNS_COEXIST_MODE_ACTIVE FW_CAP_DETAILS FW_LAST_ERROR + return 0 + fi + + FW_BACKEND_AVAILABLE="true" + FW_CAP_IPV4="true" + if iptables_table_has_chain mangle "${BOX_CHAIN_MANGLE}"; then FW_CHAIN_MANGLE="true"; fi + if iptables_table_has_chain nat "${BOX_CHAIN_NAT}"; then FW_CHAIN_NAT="true"; fi + if iptables_table_has_chain mangle "${BOX_CHAIN_DNS_MANGLE}"; then FW_CHAIN_DNS_MANGLE="true"; fi + if iptables_table_has_chain nat "${BOX_CHAIN_DNS_NAT}"; then FW_CHAIN_DNS_NAT="true"; fi + + if backend_iptables_has_policy_rule "${BOX_FWMARK}" "${BOX_ROUTE_TABLE}"; then FW_ROUTE_RULE="true"; fi + if "${ip_tool}" route show table "${BOX_ROUTE_TABLE}" 2>/dev/null | grep -Fq "local default dev lo"; then FW_ROUTE_TABLE_INSTALLED="true"; fi + if backend_iptables_probe_tproxy; then FW_CAP_TPROXY="true"; fi + FW_CAP_DETAILS="backend=iptables,available=true,ipv4=true,ipv6=false,tproxy=${FW_CAP_TPROXY},dry_run=true" + if backend_iptables_has_policy_rule "${BOX_TAILSCALE_FWMARK}" "${BOX_TAILSCALE_ROUTE_TABLE}"; then + FW_TAILSCALE_MARK_RULE="true" + fi + if "${ip_tool}" route show table "${BOX_TAILSCALE_ROUTE_TABLE}" 2>/dev/null | grep -q '.'; then + FW_TAILSCALE_TABLE_PRESENT="true" + fi + if iptables_rule_exists mangle "${BOX_CHAIN_MANGLE}" -i "${BOX_TAILSCALE_IFACE}" -j RETURN; then + FW_TAILSCALE_BYPASS_APPLIED="true" + fi + + export FW_BACKEND_AVAILABLE FW_CHAIN_MANGLE FW_CHAIN_NAT FW_CHAIN_DNS_MANGLE FW_CHAIN_DNS_NAT + export FW_ROUTE_RULE FW_ROUTE_TABLE_INSTALLED FW_CAP_TPROXY FW_TAILSCALE_MARK_RULE FW_TAILSCALE_TABLE_PRESENT FW_TAILSCALE_BYPASS_APPLIED + export FW_CAP_IPV4 FW_CAP_IPV6 FW_DRY_RUN_SUPPORTED FW_DNS_COEXIST_MODE_ACTIVE FW_CAP_DETAILS FW_LAST_ERROR +} diff --git a/lib/firewall/backend_nft.sh b/lib/firewall/backend_nft.sh new file mode 100644 index 0000000..ca9eb50 --- /dev/null +++ b/lib/firewall/backend_nft.sh @@ -0,0 +1,449 @@ +#!/usr/bin/env bash + +# shellcheck shell=bash + +set -euo pipefail + +BOX_NFT_TABLE_INET="box_mangle" +BOX_NFT_TABLE_IP="box_nat" + +nft_cmd() { + if [[ -n "${BOX_NFT_CMD:-}" ]]; then + printf '%s\n' "${BOX_NFT_CMD}" + return 0 + fi + command -v nft >/dev/null 2>&1 && printf '%s\n' "nft" +} + +nft_ip_cmd() { + if [[ -n "${BOX_IP_CMD:-}" ]]; then + printf '%s\n' "${BOX_IP_CMD}" + return 0 + fi + command -v ip >/dev/null 2>&1 && printf '%s\n' "ip" +} + +nft_mark_value() { + local mark="${1:?missing mark}" + printf '%s\n' "${mark%%/*}" +} + +nft_mark_mask() { + local mark="${1:?missing mark}" + if [[ "${mark}" == */* ]]; then + printf '%s\n' "${mark##*/}" + else + printf '%s\n' "0xffffffff" + fi +} + +nft_fwmark_normalize() { + local raw="${1:?missing fwmark}" + local value mask + + if [[ "${raw}" == */* ]]; then + value="${raw%%/*}" + mask="${raw##*/}" + else + value="${raw}" + mask="0xffffffff" + fi + + if ! [[ "${value}" =~ ^(0[xX][0-9a-fA-F]+|[0-9]+)$ ]]; then + return 1 + fi + if ! [[ "${mask}" =~ ^(0[xX][0-9a-fA-F]+|[0-9]+)$ ]]; then + return 1 + fi + + printf '%u/%u\n' "$((value))" "$((mask))" +} + +nft_fwmark_equal() { + local left="${1:?missing left fwmark}" + local right="${2:?missing right fwmark}" + local left_norm right_norm + + left_norm="$(nft_fwmark_normalize "${left}" 2>/dev/null || true)" + right_norm="$(nft_fwmark_normalize "${right}" 2>/dev/null || true)" + [[ -n "${left_norm}" && -n "${right_norm}" && "${left_norm}" == "${right_norm}" ]] +} + +backend_nft_rule_line_fwmark() { + local line="${1:-}" + local i + read -r -a parts <<<"${line}" + for ((i = 0; i < ${#parts[@]}; i++)); do + if [[ "${parts[$i]}" == "fwmark" && $((i + 1)) -lt ${#parts[@]} ]]; then + printf '%s\n' "${parts[$((i + 1))]}" + return 0 + fi + done + return 1 +} + +backend_nft_rule_line_table() { + local line="${1:-}" + local i + read -r -a parts <<<"${line}" + for ((i = 0; i < ${#parts[@]}; i++)); do + if [[ ( "${parts[$i]}" == "lookup" || "${parts[$i]}" == "table" ) && $((i + 1)) -lt ${#parts[@]} ]]; then + printf '%s\n' "${parts[$((i + 1))]}" + return 0 + fi + done + return 1 +} + +backend_nft_probe_tproxy() { + if [[ "${BOX_NFT_FORCE_NO_TPROXY:-0}" == "1" ]]; then + return 1 + fi + if [[ -n "${BOX_CAP_TPROXY:-}" ]]; then + [[ "${BOX_CAP_TPROXY}" == "1" || "${BOX_CAP_TPROXY}" == "true" ]] && return 0 + return 1 + fi + local nft + nft="$(nft_cmd || true)" + [[ -n "${nft}" ]] || return 1 + "${nft}" describe tproxy >/dev/null 2>&1 +} + +backend_nft_init() { + local nft ip_tool + nft="$(nft_cmd || true)" + ip_tool="$(nft_ip_cmd || true)" + if [[ -z "${nft}" || -z "${ip_tool}" ]]; then + FW_LAST_ERROR="nft/ip command not found" + log "ERROR" "firewall" "E_FW_PREREQ" "${FW_LAST_ERROR}" + return "${E_FIREWALL_APPLY}" + fi + return 0 +} + +backend_nft_rule_line_matches_box() { + local line="${1:-}" + local line_mark line_table + line_mark="$(backend_nft_rule_line_fwmark "${line}" 2>/dev/null || true)" + line_table="$(backend_nft_rule_line_table "${line}" 2>/dev/null || true)" + [[ -n "${line_mark}" && "${line_table}" == "${BOX_ROUTE_TABLE}" ]] || return 1 + nft_fwmark_equal "${line_mark}" "${BOX_FWMARK}" +} + +backend_nft_rule_line_pref() { + local line="${1:-}" + if [[ "${line}" =~ (^|[[:space:]])pref[[:space:]]+([0-9]+)($|[[:space:]]) ]]; then + printf '%s\n' "${BASH_REMATCH[2]}" + return 0 + fi + return 1 +} + +backend_nft_prune_box_policy_rules() { + local ip_tool line pref + ip_tool="$(nft_ip_cmd || true)" + [[ -n "${ip_tool}" ]] || return 0 + + while IFS= read -r line; do + [[ -n "${line}" ]] || continue + if backend_nft_rule_line_matches_box "${line}"; then + pref="$(backend_nft_rule_line_pref "${line}" 2>/dev/null || true)" + if [[ -n "${pref}" ]]; then + "${ip_tool}" rule del pref "${pref}" >/dev/null 2>&1 || true + else + "${ip_tool}" rule del fwmark "${BOX_FWMARK}" table "${BOX_ROUTE_TABLE}" >/dev/null 2>&1 || true + fi + fi + done < <("${ip_tool}" rule list 2>/dev/null || true) +} + +backend_nft_ensure_policy_route() { + local ip_tool + ip_tool="$(nft_ip_cmd)" + backend_nft_prune_box_policy_rules + + "${ip_tool}" rule add fwmark "${BOX_FWMARK}" table "${BOX_ROUTE_TABLE}" pref "${BOX_ROUTE_PREF}" >/dev/null 2>&1 || true + if ! "${ip_tool}" route show table "${BOX_ROUTE_TABLE}" 2>/dev/null | grep -Fq "local default dev lo"; then + "${ip_tool}" route add local default dev lo table "${BOX_ROUTE_TABLE}" >/dev/null 2>&1 || true + fi +} + +backend_nft_cleanup() { + local nft ip_tool + nft="$(nft_cmd || true)" + ip_tool="$(nft_ip_cmd || true)" + + if [[ -n "${nft}" ]]; then + "${nft}" delete table inet "${BOX_NFT_TABLE_INET}" >/dev/null 2>&1 || true + "${nft}" delete table ip "${BOX_NFT_TABLE_IP}" >/dev/null 2>&1 || true + fi + + if [[ -n "${ip_tool}" ]]; then + backend_nft_prune_box_policy_rules + while "${ip_tool}" route show table "${BOX_ROUTE_TABLE}" 2>/dev/null | grep -Fq "local default dev lo"; do + "${ip_tool}" route del local default dev lo table "${BOX_ROUTE_TABLE}" >/dev/null 2>&1 || break + done + fi +} + +backend_nft_ruleset_debug_path() { + init_runtime_paths + printf '%s/state/nft-last-ruleset.nft\n' "${BOX_RUN_DIR}" +} + +backend_nft_stderr_debug_path() { + init_runtime_paths + printf '%s/state/nft-last-error.log\n' "${BOX_RUN_DIR}" +} + +backend_nft_record_failure_artifacts() { + local ruleset="${1:-}" + local stderr_file="${2:-}" + local ruleset_path error_path + + ruleset_path="$(backend_nft_ruleset_debug_path)" + error_path="$(backend_nft_stderr_debug_path)" + + printf '%s\n' "${ruleset}" >"${ruleset_path}" + if [[ -n "${stderr_file}" && -f "${stderr_file}" ]]; then + cp -f "${stderr_file}" "${error_path}" 2>/dev/null || true + else + : >"${error_path}" + fi + + log "ERROR" "firewall" "E_NFT_RULESET_DUMP" \ + "nft failure artifacts written ruleset=${ruleset_path} stderr=${error_path}" +} + +backend_nft_build_ruleset() { + local mode="${1:?missing mode}" + local mark_value tailscale_mark tailscale_mask + mark_value="$(nft_mark_value "${BOX_FWMARK}")" + tailscale_mark="$(nft_mark_value "${BOX_TAILSCALE_FWMARK}")" + tailscale_mask="$(nft_mark_mask "${BOX_TAILSCALE_FWMARK}")" + + cat </dev/null 2>"${stderr_file}"; then + # Some kernels expose nft userspace tproxy tokens but fail loading tproxy expressions. + # Retry once with explicit non-tproxy fallback before failing apply. + if backend_nft_probe_tproxy; then + log "WARN" "firewall" "FW_TPROXY_DOWNGRADE" "nft apply with tproxy failed; retrying without tproxy expressions" + backend_nft_cleanup || true + BOX_NFT_FORCE_NO_TPROXY=1 + ruleset="$(backend_nft_build_ruleset "${mode}")" || { + unset BOX_NFT_FORCE_NO_TPROXY + rm -f "${stderr_file}" + FW_LAST_ERROR="failed to build nft fallback ruleset" + return "${E_FIREWALL_APPLY}" + } + if ! printf '%s\n' "${ruleset}" | "${nft}" -f - >/dev/null 2>"${stderr_file}"; then + unset BOX_NFT_FORCE_NO_TPROXY + backend_nft_record_failure_artifacts "${ruleset}" "${stderr_file}" + rm -f "${stderr_file}" + FW_LAST_ERROR="nft apply failed" + backend_nft_cleanup || true + return "${E_FIREWALL_APPLY}" + fi + unset BOX_NFT_FORCE_NO_TPROXY + else + backend_nft_record_failure_artifacts "${ruleset}" "${stderr_file}" + rm -f "${stderr_file}" + FW_LAST_ERROR="nft apply failed" + backend_nft_cleanup || true + return "${E_FIREWALL_APPLY}" + fi + fi + rm -f "${stderr_file}" + + case "${mode}" in + tproxy|mixed|enhance) backend_nft_ensure_policy_route ;; + esac + if [[ "${BOX_DNS_HIJACK_MODE}" == "tproxy" ]]; then + backend_nft_ensure_policy_route + fi + + if [[ "${BOX_DNS_COEXIST_MODE}" == "preserve_tailnet" ]]; then + FW_TAILSCALE_BYPASS_APPLIED="true" + fi + return 0 +} + +backend_nft_collect_status() { + local nft ip_tool + FW_BACKEND_AVAILABLE="false" + FW_CHAIN_MANGLE="false" + FW_CHAIN_NAT="false" + FW_CHAIN_DNS_MANGLE="false" + FW_CHAIN_DNS_NAT="false" + FW_ROUTE_RULE="false" + FW_ROUTE_TABLE_INSTALLED="false" + FW_CAP_TPROXY="false" + FW_TAILSCALE_MARK_RULE="false" + FW_TAILSCALE_TABLE_PRESENT="false" + FW_TAILSCALE_BYPASS_APPLIED="false" + FW_CAP_IPV4="false" + FW_CAP_IPV6="false" + FW_DRY_RUN_SUPPORTED="true" + FW_DNS_COEXIST_MODE_ACTIVE="${BOX_DNS_COEXIST_MODE}" + FW_CAP_DETAILS="backend=nftables,available=false,ipv4=false,ipv6=false,tproxy=false,dry_run=true" + FW_LAST_ERROR="" + + nft="$(nft_cmd || true)" + ip_tool="$(nft_ip_cmd || true)" + if [[ -z "${nft}" || -z "${ip_tool}" ]]; then + FW_LAST_ERROR="nft/ip command not found" + return 0 + fi + + if ! "${nft}" list tables >/dev/null 2>&1; then + FW_LAST_ERROR="nftables inspection unavailable (need root/CAP_NET_ADMIN or kernel support)" + return 0 + fi + + FW_BACKEND_AVAILABLE="true" + FW_CAP_IPV4="true" + if "${nft}" list table inet "${BOX_NFT_TABLE_INET}" >/dev/null 2>&1; then + FW_CHAIN_MANGLE="true" + FW_CHAIN_DNS_MANGLE="true" + fi + if "${nft}" list table ip "${BOX_NFT_TABLE_IP}" >/dev/null 2>&1; then + FW_CHAIN_NAT="true" + FW_CHAIN_DNS_NAT="true" + fi + if backend_nft_probe_tproxy; then FW_CAP_TPROXY="true"; fi + FW_CAP_DETAILS="backend=nftables,available=true,ipv4=true,ipv6=false,tproxy=${FW_CAP_TPROXY},dry_run=true" + if "${ip_tool}" rule list 2>/dev/null | grep -Eq "fwmark[[:space:]]+${BOX_FWMARK}[[:space:]]+(lookup|table)[[:space:]]+${BOX_ROUTE_TABLE}"; then FW_ROUTE_RULE="true"; fi + if "${ip_tool}" route show table "${BOX_ROUTE_TABLE}" 2>/dev/null | grep -Fq "local default dev lo"; then FW_ROUTE_TABLE_INSTALLED="true"; fi + if "${ip_tool}" rule list 2>/dev/null | grep -Eq "fwmark[[:space:]]+${BOX_TAILSCALE_FWMARK}[[:space:]]+(lookup|table)[[:space:]]+${BOX_TAILSCALE_ROUTE_TABLE}"; then + FW_TAILSCALE_MARK_RULE="true" + fi + if "${ip_tool}" route show table "${BOX_TAILSCALE_ROUTE_TABLE}" 2>/dev/null | grep -q '.'; then + FW_TAILSCALE_TABLE_PRESENT="true" + fi + if "${nft}" list chain inet "${BOX_NFT_TABLE_INET}" box_main 2>/dev/null | grep -Fq "iifname \"${BOX_TAILSCALE_IFACE}\" return"; then + FW_TAILSCALE_BYPASS_APPLIED="true" + fi +} diff --git a/lib/firewall/firewall.sh b/lib/firewall/firewall.sh new file mode 100644 index 0000000..6891d75 --- /dev/null +++ b/lib/firewall/firewall.sh @@ -0,0 +1,258 @@ +#!/usr/bin/env bash + +# shellcheck shell=bash + +set -euo pipefail + +source "${BOX_LIB_DIR}/firewall/backend_iptables.sh" +source "${BOX_LIB_DIR}/firewall/backend_nft.sh" + +firewall_state_file() { + init_runtime_paths + printf '%s/firewall.state\n' "${BOX_RUN_DIR}" +} + +firewall_write_state() { + local mode="${1:?missing mode}" + local status="${2:?missing status}" + local state_file + state_file="$(firewall_state_file)" + cat >"${state_file}" < [--json]\n' >&2 + return 2 + ;; + esac +} diff --git a/lib/policy/context.sh b/lib/policy/context.sh new file mode 100644 index 0000000..eb675bc --- /dev/null +++ b/lib/policy/context.sh @@ -0,0 +1,116 @@ +#!/usr/bin/env bash + +# shellcheck shell=bash + +set -euo pipefail + +POLICY_CTX_HAS_NETWORK="false" +POLICY_CTX_WIFI_CONNECTED="false" +POLICY_CTX_WIFI_CANDIDATE="false" +POLICY_CTX_SSID="" +POLICY_CTX_BSSID="" +declare -a POLICY_CTX_IFACES=() + +policy_ip_cmd() { + printf '%s\n' "${BOX_IP_CMD:-ip}" +} + +policy_nmcli_cmd() { + printf '%s\n' "${BOX_NMCLI_CMD:-nmcli}" +} + +policy_iw_cmd() { + printf '%s\n' "${BOX_IW_CMD:-iw}" +} + +policy_bool_true() { + case "${1:-false}" in + true|1|yes|on) return 0 ;; + *) return 1 ;; + esac +} + +policy_reset_context() { + POLICY_CTX_HAS_NETWORK="false" + POLICY_CTX_WIFI_CONNECTED="false" + POLICY_CTX_WIFI_CANDIDATE="false" + POLICY_CTX_SSID="" + POLICY_CTX_BSSID="" + POLICY_CTX_IFACES=() +} + +policy_collect_ifaces() { + local ip_tool line iface + local -a ifaces=() + + ip_tool="$(policy_ip_cmd)" + if ! command -v "${ip_tool}" >/dev/null 2>&1; then + return 0 + fi + + while IFS= read -r line; do + iface="$(sed -n 's/^[0-9][0-9]*: \([^:[:space:]]\+\).*/\1/p' <<<"${line}")" + iface="${iface%@*}" + [[ -n "${iface}" && "${iface}" != "lo" ]] || continue + case "${iface}" in + wlan*|wl*|wifi*) POLICY_CTX_WIFI_CANDIDATE="true" ;; + esac + if [[ " ${ifaces[*]} " != *" ${iface} "* ]]; then + ifaces+=("${iface}") + fi + done < <("${ip_tool}" -o link show up 2>/dev/null || true) + + POLICY_CTX_IFACES=("${ifaces[@]}") + if [[ "${#POLICY_CTX_IFACES[@]}" -gt 0 ]]; then + POLICY_CTX_HAS_NETWORK="true" + fi +} + +policy_collect_wifi_nmcli() { + local nmcli_tool line + + nmcli_tool="$(policy_nmcli_cmd)" + command -v "${nmcli_tool}" >/dev/null 2>&1 || return 1 + + while IFS= read -r line; do + case "${line}" in + yes:*|true:*|\**) + POLICY_CTX_WIFI_CONNECTED="true" + IFS=: read -r _ POLICY_CTX_SSID POLICY_CTX_BSSID <<<"${line}" + return 0 + ;; + esac + done < <("${nmcli_tool}" -t -f active,ssid,bssid dev wifi 2>/dev/null || true) + return 1 +} + +policy_collect_wifi_iw() { + local iw_tool iface line + + iw_tool="$(policy_iw_cmd)" + command -v "${iw_tool}" >/dev/null 2>&1 || return 1 + + for iface in "${POLICY_CTX_IFACES[@]}"; do + while IFS= read -r line; do + case "${line}" in + Connected\ to\ *) + POLICY_CTX_WIFI_CONNECTED="true" + POLICY_CTX_BSSID="$(awk '{print $3}' <<<"${line}")" + ;; + SSID:*) + POLICY_CTX_SSID="${line#SSID: }" + ;; + esac + done < <("${iw_tool}" dev "${iface}" link 2>/dev/null || true) + if policy_bool_true "${POLICY_CTX_WIFI_CONNECTED}"; then + return 0 + fi + done + return 1 +} + +policy_collect_context() { + policy_reset_context + policy_collect_ifaces + policy_collect_wifi_nmcli || policy_collect_wifi_iw || true +} diff --git a/lib/policy/engine.sh b/lib/policy/engine.sh new file mode 100644 index 0000000..12e8dba --- /dev/null +++ b/lib/policy/engine.sh @@ -0,0 +1,151 @@ +#!/usr/bin/env bash + +# shellcheck shell=bash + +set -euo pipefail + +POLICY_DESIRED_STATE="disabled" +POLICY_APPLIED_STATE="unchanged" +POLICY_REASON="" +POLICY_LAST_ERROR="" + +policy_array_matches() { + local needle="${1:-}" + shift || true + local pattern glob + [[ -n "${needle}" ]] || return 1 + for pattern in "$@"; do + [[ -n "${pattern}" ]] || continue + glob="${pattern//+/*}" + # shellcheck disable=SC2254 + case "${needle}" in + ${glob}) return 0 ;; + esac + done + return 1 +} + +policy_ifaces_match_any() { + local iface + for iface in "${POLICY_CTX_IFACES[@]}"; do + if policy_array_matches "${iface}" "$@"; then + return 0 + fi + done + return 1 +} + +policy_disable_marker_present() { + [[ -n "${BOX_POLICY_DISABLE_MARKER}" && -e "${BOX_POLICY_DISABLE_MARKER}" ]] +} + +policy_service_healthy() { + local pid + pid="$(read_pid_file "$(service_pid_file_readonly)" || true)" + is_pid_alive "${pid}" +} + +policy_evaluate_desired_state() { + POLICY_DESIRED_STATE="disabled" + POLICY_REASON="" + POLICY_LAST_ERROR="" + + policy_collect_context + + if ! policy_bool_true "${BOX_POLICY_ENABLED}"; then + POLICY_REASON="policy disabled in config" + return 0 + fi + + if policy_disable_marker_present; then + POLICY_REASON="disable marker present" + return 0 + fi + + if policy_bool_true "${POLICY_CTX_WIFI_CANDIDATE}" && ! policy_bool_true "${POLICY_CTX_WIFI_CONNECTED}"; then + if policy_bool_true "${BOX_POLICY_USE_MODULE_ON_WIFI_DISCONNECT}"; then + POLICY_DESIRED_STATE="enabled" + POLICY_REASON="wifi identity unavailable; using disconnect fallback" + else + POLICY_REASON="wifi identity unavailable; using disconnect fallback" + fi + return 0 + fi + + if ! policy_bool_true "${POLICY_CTX_HAS_NETWORK}" && ! policy_bool_true "${POLICY_CTX_WIFI_CONNECTED}"; then + if policy_bool_true "${BOX_POLICY_USE_MODULE_ON_WIFI_DISCONNECT}"; then + POLICY_DESIRED_STATE="enabled" + POLICY_REASON="wifi disconnect fallback enabled" + else + POLICY_REASON="no active network" + fi + return 0 + fi + + case "${BOX_POLICY_PROXY_MODE}" in + core) + POLICY_DESIRED_STATE="enabled" + POLICY_REASON="core mode" + ;; + whitelist) + if policy_ifaces_match_any "${BOX_POLICY_ALLOW_IFACES[@]}" || \ + policy_array_matches "${POLICY_CTX_SSID}" "${BOX_POLICY_ALLOW_SSIDS[@]}" || \ + policy_array_matches "${POLICY_CTX_BSSID}" "${BOX_POLICY_ALLOW_BSSIDS[@]}"; then + POLICY_DESIRED_STATE="enabled" + POLICY_REASON="whitelist match" + else + POLICY_REASON="no whitelist match" + fi + ;; + blacklist) + if policy_ifaces_match_any "${BOX_POLICY_IGNORE_IFACES[@]}" || \ + policy_array_matches "${POLICY_CTX_SSID}" "${BOX_POLICY_IGNORE_SSIDS[@]}" || \ + policy_array_matches "${POLICY_CTX_BSSID}" "${BOX_POLICY_IGNORE_BSSIDS[@]}"; then + POLICY_REASON="blacklist match" + else + POLICY_DESIRED_STATE="enabled" + POLICY_REASON="no blacklist match" + fi + ;; + esac +} + +policy_apply_desired_state() { + local state="${1:?missing desired state}" + + POLICY_APPLIED_STATE="unchanged" + case "${state}" in + enabled) + if policy_service_healthy; then + POLICY_APPLIED_STATE="unchanged" + return 0 + fi + if ! service_start; then + POLICY_LAST_ERROR="failed to start service for desired enabled state" + return "${E_POLICY}" + fi + POLICY_APPLIED_STATE="started" + ;; + disabled) + if ! policy_service_healthy; then + POLICY_APPLIED_STATE="unchanged" + return 0 + fi + if ! service_stop; then + POLICY_LAST_ERROR="failed to stop service for desired disabled state" + return "${E_POLICY}" + fi + POLICY_APPLIED_STATE="stopped" + ;; + *) + POLICY_LAST_ERROR="unsupported desired state: ${state}" + return "${E_POLICY}" + ;; + esac +} + +policy_refresh_firewall_if_running() { + if policy_service_healthy; then + policy_spawn_firewall_refresh + fi +} diff --git a/lib/policy/policy.sh b/lib/policy/policy.sh new file mode 100644 index 0000000..413eccb --- /dev/null +++ b/lib/policy/policy.sh @@ -0,0 +1,430 @@ +#!/usr/bin/env bash + +# shellcheck shell=bash + +set -euo pipefail + +source "${BOX_LIB_DIR}/policy/context.sh" +source "${BOX_LIB_DIR}/policy/engine.sh" + +policy_pid_file() { + init_runtime_paths + printf '%s/policy.pid\n' "${BOX_RUN_DIR}" +} + +policy_state_file() { + init_runtime_paths + printf '%s/policy.state\n' "${BOX_RUN_DIR}" +} + +policy_refresh_pid_file() { + init_runtime_paths + printf '%s/policy-refresh.pid\n' "${BOX_RUN_DIR}" +} + +policy_state_readonly_file() { + local run_dir + if [[ -d "${BOX_RUN_DIR}" ]]; then + run_dir="${BOX_RUN_DIR}" + elif [[ -d "${BOX_DEV_RUN_DIR}" ]]; then + run_dir="${BOX_DEV_RUN_DIR}" + else + run_dir="${BOX_RUN_DIR}" + fi + printf '%s/policy.state\n' "${run_dir}" +} + +policy_join_csv() { + local IFS=, + printf '%s' "$*" +} + +policy_json_array() { + local first=1 item + printf '[' + for item in "$@"; do + [[ -n "${item}" ]] || continue + if [[ "${first}" == "0" ]]; then + printf ',' + fi + first=0 + printf '"%s"' "$(json_escape "${item}")" + done + printf ']' +} + +policy_active_ifaces_json() { + local raw item + local -a items=() + + raw="$(policy_snapshot_get "active_ifaces" "")" + if [[ -z "${raw}" ]]; then + printf '[]' + return 0 + fi + + IFS=',' read -r -a items <<<"${raw}" + for item in "${items[@]}"; do + [[ -n "${item}" ]] || continue + policy_json_array "${items[@]}" + return 0 + done + + printf '[]' +} + +policy_write_state() { + local status="${1:?missing status}" + local last_event="${2:-}" + local last_event_ts="${3:-}" + local last_refresh_ts="${4:-}" + local state_file state_dir tmp_file + + state_file="$(policy_state_file)" + state_dir="$(dirname "${state_file}")" + tmp_file="$(mktemp "${state_dir}/policy.state.tmp.XXXXXX")" + cat >"${tmp_file}" <>"${BOX_LOG_DIR}/policy.log" 2>&1 + ) & + printf '%s\n' "$!" >"${pid_file}" +} + +policy_apply_cycle() { + local event_label="${1:-manual}" + local event_ts="${2:-$(timestamp_utc)}" + local refresh_requested="${3:-false}" + local refresh_ts="" + + POLICY_LAST_ERROR="" + if ! policy_evaluate_desired_state; then + POLICY_LAST_ERROR="policy evaluation failed" + policy_write_state "error" "${event_label}" "${event_ts}" "" + return "${E_POLICY}" + fi + + if ! policy_apply_desired_state "${POLICY_DESIRED_STATE}"; then + policy_write_state "error" "${event_label}" "${event_ts}" "" + return "${E_POLICY}" + fi + + if policy_bool_true "${refresh_requested}" && [[ "${POLICY_DESIRED_STATE}" == "enabled" ]]; then + if ! policy_refresh_firewall_if_running; then + POLICY_LAST_ERROR="firewall refresh failed after address event" + policy_write_state "error" "${event_label}" "${event_ts}" "" + return "${E_POLICY}" + fi + refresh_ts="$(timestamp_utc)" + fi + + policy_write_state "${POLICY_DESIRED_STATE}" "${event_label}" "${event_ts}" "${refresh_ts}" + log "INFO" "policy" "POLICY_APPLIED" \ + "desired=${POLICY_DESIRED_STATE} applied=${POLICY_APPLIED_STATE} reason=${POLICY_REASON} event=${event_label}" +} + +policy_monitor_loop() { + local pid_file line pending_event="" pending_ts="" refresh_requested="false" next_line + local marker_state previous_marker_state event_pid="" + + require_root || return 1 + load_config + init_runtime_paths + + pid_file="$(policy_pid_file)" + printf '%s\n' "$$" >"${pid_file}" + previous_marker_state="$(if policy_disable_marker_present; then printf 'present'; else printf 'absent'; fi)" + coproc POLICY_EVENTS { policy_event_source; } + event_pid="${POLICY_EVENTS_PID:-}" + trap '[[ -n "'"${event_pid}"'" ]] && kill "'"${event_pid}"'" >/dev/null 2>&1 || true; policy_monitor_cleanup; exit 0' EXIT INT TERM + + policy_apply_cycle "startup" "$(timestamp_utc)" "false" + + while true; do + marker_state="$(if policy_disable_marker_present; then printf 'present'; else printf 'absent'; fi)" + if [[ "${marker_state}" != "${previous_marker_state}" ]]; then + previous_marker_state="${marker_state}" + policy_apply_cycle "disable-marker" "$(timestamp_utc)" "false" + continue + fi + + if IFS= read -r -t 1 line <&"${POLICY_EVENTS[0]}"; then + pending_event="${line}" + pending_ts="$(timestamp_utc)" + case "${line}" in + *inet*|*address*|*Deleted*) refresh_requested="true" ;; + esac + + while IFS= read -r -t "${BOX_POLICY_DEBOUNCE_SECONDS}" next_line <&"${POLICY_EVENTS[0]}"; do + pending_event="${next_line}" + pending_ts="$(timestamp_utc)" + case "${next_line}" in + *inet*|*address*|*Deleted*) refresh_requested="true" ;; + esac + marker_state="$(if policy_disable_marker_present; then printf 'present'; else printf 'absent'; fi)" + if [[ "${marker_state}" != "${previous_marker_state}" ]]; then + previous_marker_state="${marker_state}" + pending_event="disable-marker" + pending_ts="$(timestamp_utc)" + refresh_requested="false" + break + fi + done + + policy_apply_cycle "${pending_event:-event}" "${pending_ts}" "${refresh_requested}" + refresh_requested="false" + continue + fi + done +} + +policy_enable() { + require_root || return 1 + load_config + init_runtime_paths + + local pid_file pid attempt + pid_file="$(policy_pid_file)" + pid="$(read_pid_file "${pid_file}" || true)" + if is_pid_alive "${pid}"; then + log "INFO" "policy" "POLICY_ALREADY_RUNNING" "policy watcher already running pid=${pid}" + return 0 + fi + + rm -f "${pid_file}" + nohup "${BOXCTL_SELF_PATH}" policy monitor >>"${BOX_LOG_DIR}/policy.log" 2>&1 & + disown || true + + for attempt in $(seq 1 15); do + sleep 0.2 + pid="$(read_pid_file "${pid_file}" || true)" + if is_pid_alive "${pid}"; then + log "INFO" "policy" "POLICY_STARTED" "policy watcher started pid=${pid}" + return 0 + fi + done + + if ! is_pid_alive "${pid:-}"; then + log "ERROR" "policy" "E_POLICY" "failed to start policy watcher" + return "${E_POLICY}" + fi +} + +policy_disable() { + require_root || return 1 + load_config + init_runtime_paths + + local pid_file pid + pid_file="$(policy_pid_file)" + pid="$(read_pid_file "${pid_file}" || true)" + if is_pid_alive "${pid}"; then + kill -TERM "${pid}" >/dev/null 2>&1 || true + sleep 1 + if is_pid_alive "${pid}"; then + kill -KILL "${pid}" >/dev/null 2>&1 || true + fi + fi + rm -f "${pid_file}" + policy_clear_state + log "INFO" "policy" "POLICY_STOPPED" "policy watcher stopped" +} + +policy_evaluate() { + require_root || return 1 + load_config + init_runtime_paths + policy_apply_cycle "manual-evaluate" "$(timestamp_utc)" "false" +} + +policy_status_text() { + load_config + local pid_file pid watcher_running + policy_snapshot_load + + pid_file="$(policy_pid_file)" + pid="$(read_pid_file "${pid_file}" || true)" + watcher_running="false" + if is_pid_alive "${pid}"; then + watcher_running="true" + else + pid="0" + fi + + printf 'status=%s\n' "$(policy_snapshot_get "status" "inactive")" + printf 'policy_enabled=%s\n' "${BOX_POLICY_ENABLED}" + printf 'watcher_running=%s\n' "${watcher_running}" + printf 'pid=%s\n' "${pid}" + printf 'desired_state=%s\n' "$(policy_snapshot_get "desired_state" "disabled")" + printf 'applied_state=%s\n' "$(policy_snapshot_get "applied_state" "unchanged")" + printf 'proxy_mode=%s\n' "${BOX_POLICY_PROXY_MODE}" + printf 'debounce_seconds=%s\n' "${BOX_POLICY_DEBOUNCE_SECONDS}" + printf 'active_ifaces=%s\n' "$(policy_snapshot_get "active_ifaces" "")" + printf 'wifi_connected=%s\n' "$(policy_snapshot_get "wifi_connected" "false")" + printf 'ssid=%s\n' "$(policy_snapshot_get "ssid" "")" + printf 'bssid=%s\n' "$(policy_snapshot_get "bssid" "")" + printf 'disable_marker_present=%s\n' "$(policy_snapshot_get "disable_marker_present" "false")" + printf 'last_reason=%s\n' "$(policy_snapshot_get "last_reason" "")" + printf 'last_error=%s\n' "$(policy_snapshot_get "last_error" "")" + printf 'last_event=%s\n' "$(policy_snapshot_get "last_event" "")" + printf 'last_event_ts=%s\n' "$(policy_snapshot_get "last_event_ts" "")" + printf 'last_refresh_ts=%s\n' "$(policy_snapshot_get "last_refresh_ts" "")" +} + +policy_status_json() { + load_config + local pid_file pid watcher_running + policy_snapshot_load + + pid_file="$(policy_pid_file)" + pid="$(read_pid_file "${pid_file}" || true)" + watcher_running="false" + if is_pid_alive "${pid}"; then + watcher_running="true" + else + pid="0" + fi + + printf '{%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s}\n' \ + "$(json_pair "status" "$(policy_snapshot_get "status" "inactive")")" \ + "$(json_bool_pair "policy_enabled" "${BOX_POLICY_ENABLED}")" \ + "$(json_bool_pair "watcher_running" "${watcher_running}")" \ + "$(json_num_pair "pid" "${pid}")" \ + "$(json_pair "desired_state" "$(policy_snapshot_get "desired_state" "disabled")")" \ + "$(json_pair "applied_state" "$(policy_snapshot_get "applied_state" "unchanged")")" \ + "$(json_pair "proxy_mode" "${BOX_POLICY_PROXY_MODE}")" \ + "$(json_num_pair "debounce_seconds" "${BOX_POLICY_DEBOUNCE_SECONDS}")" \ + "\"active_ifaces\":$(policy_active_ifaces_json)" \ + "$(json_bool_pair "wifi_connected" "$(policy_snapshot_get "wifi_connected" "false")")" \ + "$(json_pair "ssid" "$(policy_snapshot_get "ssid" "")")" \ + "$(json_pair "bssid" "$(policy_snapshot_get "bssid" "")")" \ + "$(json_bool_pair "disable_marker_present" "$(policy_snapshot_get "disable_marker_present" "false")")" \ + "$(json_pair "last_reason" "$(policy_snapshot_get "last_reason" "")")" \ + "$(json_pair "last_error" "$(policy_snapshot_get "last_error" "")")" \ + "$(json_pair "last_event" "$(policy_snapshot_get "last_event" "")")" \ + "$(json_pair "last_event_ts" "$(policy_snapshot_get "last_event_ts" "")")" \ + "$(json_pair "last_refresh_ts" "$(policy_snapshot_get "last_refresh_ts" "")")" +} + +policy_status() { + if [[ "${BOX_OUTPUT_FORMAT}" == "json" ]]; then + policy_status_json + else + policy_status_text + fi +} + +policy_cmd() { + local action="${1:-}" + case "${action}" in + evaluate) policy_evaluate ;; + enable) policy_enable ;; + disable) policy_disable ;; + status) policy_status ;; + monitor) policy_monitor_loop ;; + *) + printf 'usage: boxctl policy [--json]\n' >&2 + return 2 + ;; + esac +} diff --git a/lib/supervisor/adapter_mihomo.sh b/lib/supervisor/adapter_mihomo.sh new file mode 100755 index 0000000..cf941e5 --- /dev/null +++ b/lib/supervisor/adapter_mihomo.sh @@ -0,0 +1,46 @@ +#!/usr/bin/env bash + +# shellcheck shell=bash + +set -euo pipefail + +adapter_mihomo_resolve_bin() { + if command -v mihomo >/dev/null 2>&1; then + command -v mihomo + return 0 + fi + + if [[ -x "${BOX_CORE_BIN_DIR}/mihomo" ]]; then + printf '%s\n' "${BOX_CORE_BIN_DIR}/mihomo" + return 0 + fi + + return 1 +} + +adapter_mihomo_check_config() { + local bin="${1:?missing mihomo binary path}" + local rendered_config="${2:?missing rendered config path}" + local workdir="${3:?missing workdir}" + "${bin}" -t -d "${workdir}" -f "${rendered_config}" >/dev/null +} + +adapter_mihomo_start() { + local bin="${1:?missing mihomo binary path}" + local rendered_config="${2:?missing rendered config path}" + local workdir="${3:?missing workdir}" + local service_log="${4:?missing service log path}" + + "${bin}" -d "${workdir}" -f "${rendered_config}" >>"${service_log}" 2>&1 & + printf '%s\n' "$!" +} + +adapter_mihomo_reload() { + local msg="mihomo reload is not implemented yet; restart is required" + if declare -F log >/dev/null 2>&1; then + log "WARN" "service" "MIHOMO_RELOAD_UNIMPLEMENTED" "${msg}" + else + printf 'WARN: %s\n' "${msg}" >&2 + fi + return 1 +} diff --git a/lib/supervisor/adapter_sing_box.sh b/lib/supervisor/adapter_sing_box.sh new file mode 100755 index 0000000..aca7414 --- /dev/null +++ b/lib/supervisor/adapter_sing_box.sh @@ -0,0 +1,46 @@ +#!/usr/bin/env bash + +# shellcheck shell=bash + +set -euo pipefail + +adapter_sing_box_resolve_bin() { + if command -v sing-box >/dev/null 2>&1; then + command -v sing-box + return 0 + fi + + if [[ -x "${BOX_CORE_BIN_DIR}/sing-box" ]]; then + printf '%s\n' "${BOX_CORE_BIN_DIR}/sing-box" + return 0 + fi + + return 1 +} + +adapter_sing_box_check_config() { + local bin="${1:?missing sing-box binary path}" + local rendered_config="${2:?missing rendered config path}" + local workdir="${3:?missing workdir}" + "${bin}" check -c "${rendered_config}" -D "${workdir}" >/dev/null +} + +adapter_sing_box_start() { + local bin="${1:?missing sing-box binary path}" + local rendered_config="${2:?missing rendered config path}" + local workdir="${3:?missing workdir}" + local service_log="${4:?missing service log path}" + + "${bin}" run -c "${rendered_config}" -D "${workdir}" >>"${service_log}" 2>&1 & + printf '%s\n' "$!" +} + +adapter_sing_box_reload() { + local msg="sing-box reload is not implemented yet; restart is required" + if declare -F log >/dev/null 2>&1; then + log "WARN" "service" "SING_BOX_RELOAD_UNIMPLEMENTED" "${msg}" + else + printf 'WARN: %s\n' "${msg}" >&2 + fi + return 1 +} diff --git a/lib/supervisor/mutator_mihomo.sh b/lib/supervisor/mutator_mihomo.sh new file mode 100644 index 0000000..a756789 --- /dev/null +++ b/lib/supervisor/mutator_mihomo.sh @@ -0,0 +1,46 @@ +#!/usr/bin/env bash + +# shellcheck shell=bash + +set -euo pipefail + +yaml_set_scalar() { + local file="${1:?missing file}" + local key="${2:?missing key}" + local value="${3:?missing value}" + + if grep -Eq "^[[:space:]]*${key}:" "${file}"; then + sed -i -E "s|^[[:space:]]*${key}:.*$|${key}: ${value}|" "${file}" + else + printf '%s: %s\n' "${key}" "${value}" >>"${file}" + fi +} + +mutator_mihomo_render_overlay() { + local source_file="${1:?missing source file}" + local rendered_file="${2:?missing rendered file}" + + if [[ -f "${source_file}" ]]; then + cp -f "${source_file}" "${rendered_file}" + else + cat >"${rendered_file}" <>"${rendered_file}" +} diff --git a/lib/supervisor/mutator_sing_box.sh b/lib/supervisor/mutator_sing_box.sh new file mode 100644 index 0000000..46375e7 --- /dev/null +++ b/lib/supervisor/mutator_sing_box.sh @@ -0,0 +1,62 @@ +#!/usr/bin/env bash + +# shellcheck shell=bash + +set -euo pipefail + +mutator_sing_box_write_overlay_env() { + local rendered_file="${1:?missing rendered file}" + cat >"${rendered_file}.overlay.env" <"${rendered_file}" + mutator_sing_box_write_overlay_env "${rendered_file}" + return 0 + fi + + if [[ -f "${source_file}" ]]; then + cp -f "${source_file}" "${rendered_file}" + else + cat >"${rendered_file}" <"${state_file}" <"${pid_file}" + write_runtime_snapshot "starting" "${new_pid}" "${rendered_path}" + + if ! firewall_enable; then + log "ERROR" "service" "E_FIREWALL_APPLY" "firewall enable failed; stopping core" + kill -TERM "${new_pid}" >/dev/null 2>&1 || true + rm -f "${pid_file}" + write_runtime_snapshot "failed" "0" "${rendered_path}" + return "${E_FIREWALL_APPLY}" + fi + + write_runtime_snapshot "healthy" "${new_pid}" "${rendered_path}" + log "INFO" "service" "SERVICE_STARTED" "service started core=${BOX_CORE} pid=${new_pid} overlay=${rendered_path}" +} + +service_stop_locked() { + require_root || return 1 + load_config + init_runtime_paths + + local pid_file pid rendered_path + pid_file="$(service_pid_file)" + pid="$(read_pid_file "${pid_file}" || true)" + rendered_path="$(rendered_config_path)" + + firewall_disable || true + + if is_pid_alive "${pid}"; then + kill -TERM "${pid}" >/dev/null 2>&1 || true + sleep 1 + if is_pid_alive "${pid}"; then + kill -KILL "${pid}" >/dev/null 2>&1 || true + fi + fi + + rm -f "${pid_file}" + write_runtime_snapshot "stopped" "0" "${rendered_path}" + log "INFO" "service" "SERVICE_STOPPED" "service stopped" +} + +service_start() { + with_lock "service" 30 service_start_locked +} + +service_stop() { + with_lock "service" 30 service_stop_locked +} + +service_restart_locked() { + service_stop_locked + service_start_locked +} + +service_restart() { + with_lock "service" 30 service_restart_locked +} + +service_reload_locked() { + local rc=0 + case "${BOX_CORE}" in + mihomo) + adapter_mihomo_reload >/dev/null 2>&1 || rc=$? + ;; + sing-box) + adapter_sing_box_reload >/dev/null 2>&1 || rc=$? + ;; + *) + rc="${E_CORE_START}" + ;; + esac + + if [[ "${rc}" -eq 0 ]]; then + log "INFO" "service" "SERVICE_RELOADED" "service reloaded core=${BOX_CORE}" + return 0 + fi + + log "WARN" "service" "SERVICE_RELOAD_FALLBACK" "reload unsupported or failed for core=${BOX_CORE}; restarting" + service_restart_locked +} + +service_reload() { + with_lock "service" 30 service_reload_locked +} + +service_print_status_text() { + local status="${1:?missing status}" + local pid="${2:-0}" + local rendered_path="${3:-}" + printf 'status=%s\n' "${status}" + printf 'core=%s\n' "${BOX_CORE}" + printf 'pid=%s\n' "${pid:-0}" + printf 'mode=%s\n' "${BOX_NETWORK_MODE}" + printf 'dns_hijack_mode=%s\n' "${BOX_DNS_HIJACK_MODE}" + printf 'rendered_config=%s\n' "${rendered_path}" + printf 'config=%s\n' "${BOX_CONFIG_FILE:-none}" +} + +service_print_status_json() { + local status="${1:?missing status}" + local pid="${2:-0}" + local rendered_path="${3:-}" + printf '{%s,%s,%s,%s,%s,%s,%s}\n' \ + "$(json_pair "status" "${status}")" \ + "$(json_pair "core" "${BOX_CORE}")" \ + "$(json_num_pair "pid" "${pid:-0}")" \ + "$(json_pair "mode" "${BOX_NETWORK_MODE}")" \ + "$(json_pair "dns_hijack_mode" "${BOX_DNS_HIJACK_MODE}")" \ + "$(json_pair "rendered_config" "${rendered_path}")" \ + "$(json_pair "config" "${BOX_CONFIG_FILE:-none}")" +} + +service_status() { + local previous_log_to_file="${BOX_LOG_TO_FILE:-1}" + BOX_LOG_TO_FILE=0 + if ! load_config; then + BOX_LOG_TO_FILE="${previous_log_to_file}" + return "${E_CONFIG}" + fi + BOX_LOG_TO_FILE="${previous_log_to_file}" + + local pid_file pid status rendered_path + pid_file="$(service_pid_file_readonly)" + pid="$(read_pid_file "${pid_file}" || true)" + rendered_path="$(rendered_config_expected_path)" + + if is_pid_alive "${pid}"; then + status="healthy" + else + status="stopped" + pid="0" + fi + + if [[ "${BOX_OUTPUT_FORMAT}" == "json" ]]; then + service_print_status_json "${status}" "${pid}" "${rendered_path}" + else + service_print_status_text "${status}" "${pid}" "${rendered_path}" + fi +} + +supervisor_cmd() { + local action="${1:-}" + case "${action}" in + start) service_start ;; + stop) service_stop ;; + restart) service_restart ;; + reload) service_reload ;; + status) service_status ;; + *) + printf 'usage: boxctl service [--json]\n' >&2 + return 2 + ;; + esac +} diff --git a/lib/updater/fetcher.sh b/lib/updater/fetcher.sh new file mode 100644 index 0000000..7cba712 --- /dev/null +++ b/lib/updater/fetcher.sh @@ -0,0 +1,55 @@ +#!/usr/bin/env bash + +# shellcheck shell=bash + +set -euo pipefail + +updater_curl_cmd() { + if [[ -n "${BOX_CURL_CMD:-}" ]]; then + printf '%s\n' "${BOX_CURL_CMD}" + return 0 + fi + command -v curl >/dev/null 2>&1 && printf '%s\n' "curl" +} + +updater_fetch_ref() { + local component="${1:?missing component}" + local ref="${2:?missing ref}" + local kind="${3:?missing kind}" + local output="${4:?missing output path}" + local curl_bin + + mkdir -p "$(dirname "${output}")" + + case "${kind}" in + file) + if [[ -d "${ref}" ]]; then + cp -a "${ref}" "${output}" + elif [[ -f "${ref}" ]]; then + cp -f "${ref}" "${output}" + else + log "ERROR" "updater" "E_UPDATE_FETCH" "source file does not exist for component=${component}: ${ref}" + return "${E_UPDATE}" + fi + ;; + url) + if [[ "${ref}" == file://* ]]; then + updater_fetch_ref "${component}" "${ref#file://}" "file" "${output}" + return 0 + fi + curl_bin="$(updater_curl_cmd || true)" + if [[ -z "${curl_bin}" ]]; then + log "ERROR" "updater" "E_UPDATE_FETCH" "curl is required for url fetch: ${ref}" + return "${E_UPDATE}" + fi + if ! "${curl_bin}" -fsSL "${ref}" -o "${output}"; then + log "ERROR" "updater" "E_UPDATE_FETCH" "download failed for component=${component}: ${ref}" + return "${E_UPDATE}" + fi + ;; + *) + log "ERROR" "updater" "E_UPDATE_FETCH" "unsupported source kind=${kind} for component=${component}" + return "${E_UPDATE}" + ;; + esac +} diff --git a/lib/updater/installer.sh b/lib/updater/installer.sh new file mode 100644 index 0000000..c0b02e8 --- /dev/null +++ b/lib/updater/installer.sh @@ -0,0 +1,157 @@ +#!/usr/bin/env bash + +# shellcheck shell=bash + +set -euo pipefail + +UP_INSTALL_BACKUP="" +UP_INSTALL_CHANGED="false" +UP_INSTALL_CHECKSUM="" + +updater_install_reset() { + UP_INSTALL_BACKUP="" + UP_INSTALL_CHANGED="false" + UP_INSTALL_CHECKSUM="" +} + +updater_restore_backup() { + local target="${1:?missing target}" + if [[ -n "${UP_INSTALL_BACKUP}" && -e "${UP_INSTALL_BACKUP}" ]]; then + mkdir -p "$(dirname "${target}")" + rm -rf "${target}" + mv -f "${UP_INSTALL_BACKUP}" "${target}" + UP_INSTALL_BACKUP="" + fi +} + +updater_backup_target() { + local target="${1:?missing target}" + if [[ -e "${target}" ]]; then + UP_INSTALL_BACKUP="${target}.bak.$$" + rm -rf "${UP_INSTALL_BACKUP}" + mv -f "${target}" "${UP_INSTALL_BACKUP}" + fi +} + +updater_install_file() { + local component="${1:?missing component}" + local staged_file="${2:?missing staged file}" + local target="${3:?missing target}" + local make_executable="${4:-false}" + local target_checksum staged_checksum temp_target + + mkdir -p "$(dirname "${target}")" + staged_checksum="$(updater_compute_sha256 "${staged_file}" || true)" + target_checksum="$(updater_compute_sha256 "${target}" || true)" + if [[ -n "${staged_checksum}" && "${staged_checksum}" == "${target_checksum}" ]]; then + UP_INSTALL_CHANGED="false" + UP_INSTALL_CHECKSUM="${staged_checksum}" + return 0 + fi + + temp_target="${target}.new.$$" + rm -f "${temp_target}" + if ! cp -f "${staged_file}" "${temp_target}"; then + rm -f "${temp_target}" + log "ERROR" "updater" "E_UPDATE_INSTALL" "failed to stage file install for component=${component} target=${target}" + return "${E_UPDATE}" + fi + + if [[ "${make_executable}" == "true" ]]; then + chmod 0755 "${temp_target}" + fi + + updater_backup_target "${target}" + if ! mv -f "${temp_target}" "${target}"; then + rm -f "${temp_target}" + updater_restore_backup "${target}" + log "ERROR" "updater" "E_UPDATE_INSTALL" "failed to finalize file install for component=${component} target=${target}" + return "${E_UPDATE}" + fi + UP_INSTALL_CHANGED="true" + UP_INSTALL_CHECKSUM="${staged_checksum}" + log "INFO" "updater" "UPDATE_INSTALLED" "installed component=${component} target=${target}" +} + +updater_extract_archive() { + local archive="${1:?missing archive}" + local destination="${2:?missing destination}" + local format_hint="${3:-${archive}}" + + mkdir -p "${destination}" + case "${format_hint}" in + *.zip) + if ! command -v unzip >/dev/null 2>&1; then + log "ERROR" "updater" "E_UPDATE_ARCHIVE" "unzip is required for archive format: ${format_hint}" + return "${E_UPDATE}" + fi + unzip -oq "${archive}" -d "${destination}" >/dev/null + ;; + *.tar.gz|*.tgz) tar -xzf "${archive}" -C "${destination}" ;; + *.tar) tar -xf "${archive}" -C "${destination}" ;; + *.tar.xz) tar -xJf "${archive}" -C "${destination}" ;; + *) + log "ERROR" "updater" "E_UPDATE_ARCHIVE" "unsupported archive format: ${format_hint}" + return "${E_UPDATE}" + ;; + esac +} + +updater_extract_gzip() { + local archive="${1:?missing archive}" + local destination="${2:?missing destination}" + + mkdir -p "$(dirname "${destination}")" + if ! command -v gzip >/dev/null 2>&1; then + log "ERROR" "updater" "E_UPDATE_ARCHIVE" "gzip is required for gzip payload extraction" + return "${E_UPDATE}" + fi + if ! gzip -dc "${archive}" >"${destination}"; then + rm -f "${destination}" + log "ERROR" "updater" "E_UPDATE_ARCHIVE" "failed to extract gzip payload: ${archive}" + return "${E_UPDATE}" + fi +} + +updater_install_directory() { + local component="${1:?missing component}" + local staged_dir="${2:?missing staged dir}" + local target="${3:?missing target}" + local source_checksum="${4:-}" + local state_checksum="${5:-}" + local temp_target + + mkdir -p "$(dirname "${target}")" + if [[ -n "${source_checksum}" && -n "${state_checksum}" && "${source_checksum}" == "${state_checksum}" && -d "${target}" ]]; then + UP_INSTALL_CHANGED="false" + UP_INSTALL_CHECKSUM="${source_checksum}" + return 0 + fi + + temp_target="${target}.new.$$" + rm -rf "${temp_target}" + mkdir -p "${temp_target}" + if ! cp -a "${staged_dir}/." "${temp_target}/"; then + rm -rf "${temp_target}" + log "ERROR" "updater" "E_UPDATE_INSTALL" "failed to stage directory install for component=${component} target=${target}" + return "${E_UPDATE}" + fi + + updater_backup_target "${target}" + rm -rf "${target}" + if ! mv -f "${temp_target}" "${target}"; then + rm -rf "${temp_target}" + updater_restore_backup "${target}" + log "ERROR" "updater" "E_UPDATE_INSTALL" "failed to finalize directory install for component=${component} target=${target}" + return "${E_UPDATE}" + fi + UP_INSTALL_CHANGED="true" + UP_INSTALL_CHECKSUM="${source_checksum}" + log "INFO" "updater" "UPDATE_INSTALLED" "installed component=${component} target=${target}" +} + +updater_install_nochange() { + local checksum="${1:-}" + UP_INSTALL_CHANGED="false" + UP_INSTALL_CHECKSUM="${checksum}" +} diff --git a/lib/updater/resolver.sh b/lib/updater/resolver.sh new file mode 100644 index 0000000..c04a5c3 --- /dev/null +++ b/lib/updater/resolver.sh @@ -0,0 +1,501 @@ +#!/usr/bin/env bash + +# shellcheck shell=bash + +set -euo pipefail + +UP_COMPONENT="" +UP_SOURCE_REF="" +UP_SOURCE_NAME="" +UP_SOURCE_KIND="" +UP_CHECKSUM_REF="" +UP_CHECKSUM_NAME="" +UP_CHECKSUM_KIND="" +UP_TARGET_PATH="" +UP_INSTALL_KIND="" +UP_INTERVAL="" +UP_REQUIRES_HANDOFF="false" +UP_ARCHIVE_MEMBER_REGEX="" + +updater_component_configured() { + local component="${1:?missing component}" + local source_mode source_ref="" release_repo="" release_api_url="" dashboard_target="" + + case "${component}" in + kernel) + source_mode="${BOX_UPDATER_KERNEL_SOURCE}" + source_ref="${BOX_UPDATER_KERNEL_URL:-${BOX_UPDATER_KERNEL_FILE:-}}" + release_repo="${BOX_UPDATER_KERNEL_RELEASE_REPO}" + release_api_url="${BOX_UPDATER_KERNEL_RELEASE_API_URL}" + ;; + subs) + source_mode="auto" + source_ref="${BOX_UPDATER_SUBS_URL:-${BOX_UPDATER_SUBS_FILE:-}}" + ;; + geo) + source_mode="${BOX_UPDATER_GEO_SOURCE}" + source_ref="${BOX_UPDATER_GEO_URL:-${BOX_UPDATER_GEO_FILE:-}}" + release_repo="${BOX_UPDATER_GEO_RELEASE_REPO}" + release_api_url="${BOX_UPDATER_GEO_RELEASE_API_URL}" + ;; + dashboard) + source_mode="auto" + source_ref="${BOX_UPDATER_DASHBOARD_URL:-${BOX_UPDATER_DASHBOARD_FILE:-}}" + dashboard_target="${BOX_UPDATER_DASHBOARD_TARGET:-$(updater_dashboard_resolve_from_core_config target || true)}" + ;; + *) + return 1 + ;; + esac + + case "${source_mode}" in + release) + [[ -n "${release_repo}" || -n "${release_api_url}" ]] + ;; + auto) + [[ -n "${source_ref}" || -n "${release_repo}" || -n "${release_api_url}" || -n "${dashboard_target}" ]] + ;; + *) + [[ -n "${source_ref}" ]] + ;; + esac +} + +updater_source_kind() { + local ref="${1:-}" + case "${ref}" in + http://*|https://*|file://*) printf '%s\n' "url" ;; + *) printf '%s\n' "file" ;; + esac +} + +updater_install_kind_for_name() { + local component="${1:?missing component}" + local name="${2:-}" + case "${component}:${name}" in + dashboard:*.zip|dashboard:*.tar|dashboard:*.tar.gz|dashboard:*.tgz|dashboard:*.tar.xz) printf '%s\n' "archive-dir" ;; + dashboard:*) printf '%s\n' "directory" ;; + kernel:*.tar|kernel:*.tar.gz|kernel:*.tgz|kernel:*.tar.xz) printf '%s\n' "archive-file" ;; + kernel:*.gz) printf '%s\n' "gzip-file" ;; + geo:*.tar|geo:*.tar.gz|geo:*.tgz|geo:*.tar.xz) printf '%s\n' "archive-file" ;; + *) printf '%s\n' "file" ;; + esac +} + +updater_jq_cmd() { + command -v jq >/dev/null 2>&1 && printf '%s\n' "jq" +} + +updater_release_os() { + local override="${1:-}" + local uname_s + if [[ -n "${override}" ]]; then + printf '%s\n' "${override}" + return 0 + fi + uname_s="$(uname -s | tr '[:upper:]' '[:lower:]')" + case "${uname_s}" in + linux) printf '%s\n' "linux" ;; + darwin) printf '%s\n' "darwin" ;; + *) printf '%s\n' "${uname_s}" ;; + esac +} + +updater_release_arch() { + local override="${1:-}" + local uname_m + if [[ -n "${override}" ]]; then + printf '%s\n' "${override}" + return 0 + fi + uname_m="$(uname -m)" + case "${uname_m}" in + x86_64|amd64) printf '%s\n' "amd64" ;; + aarch64|arm64) printf '%s\n' "arm64" ;; + i386|i686) printf '%s\n' "386" ;; + armv7l|armv7|armhf) printf '%s\n' "armv7" ;; + armv6l|armv6) printf '%s\n' "armv6" ;; + *) printf '%s\n' "${uname_m}" ;; + esac +} + +updater_release_arch_regex() { + local arch="${1:-}" + case "${arch}" in + amd64) printf '%s\n' '(amd64|x86_64)' ;; + arm64) printf '%s\n' '(arm64|aarch64)' ;; + 386) printf '%s\n' '(386|i386|i686|x86)' ;; + armv7) printf '%s\n' '(armv7|armv7l|armhf)' ;; + armv6) printf '%s\n' '(armv6|armv6l)' ;; + *) printf '%s\n' "${arch}" ;; + esac +} + +updater_release_default_repo() { + local component="${1:?missing component}" + case "${component}:${BOX_CORE}" in + kernel:mihomo) printf '%s\n' "MetaCubeX/mihomo" ;; + kernel:sing-box) printf '%s\n' "SagerNet/sing-box" ;; + *) printf '%s\n' "" ;; + esac +} + +updater_release_default_asset_regex() { + local component="${1:?missing component}" + local os_name="${2:-}" + local arch_name="${3:-}" + local arch_regex + arch_regex="$(updater_release_arch_regex "${arch_name}")" + case "${component}" in + kernel) + printf '%s\n' "${BOX_CORE}.*${os_name}.*${arch_regex}" + ;; + geo) + printf '%s\n' "$(basename "${BOX_UPDATER_GEO_TARGET:-geo.dat}")" + ;; + *) + printf '%s\n' '' + ;; + esac +} + +updater_release_default_checksum_regex() { + printf '%s\n' '(sha256|checksums?)' +} + +updater_release_default_archive_member_regex() { + local component="${1:?missing component}" + case "${component}" in + kernel) printf '%s\n' "(^|/)${BOX_CORE}$" ;; + geo) printf '%s\n' "(^|/)[^/]+\\.(dat|db|mmdb)$" ;; + *) printf '%s\n' '' ;; + esac +} + +updater_dashboard_default_url() { + printf '%s\n' "https://github.com/Zephyruso/zashboard/releases/latest/download/dist.zip" +} + +updater_dashboard_normalize_target() { + local raw_target="${1:-}" + local base_dir="${2:-}" + if [[ -z "${raw_target}" ]]; then + return 1 + fi + case "${raw_target}" in + /*) printf '%s\n' "${raw_target}" ;; + *) printf '%s\n' "${base_dir}/${raw_target}" ;; + esac +} + +updater_dashboard_default_target() { + local config_file="${BOX_CORE_CONFIG_SOURCE}" + local config_dir + + [[ -f "${config_file}" ]] || return 1 + config_dir="$(cd "$(dirname "${config_file}")" && pwd)" + printf '%s\n' "${config_dir}/dashboard" +} + +updater_dashboard_read_mihomo_value() { + local config_file="${1:?missing config file}" + local key_regex="${2:?missing key regex}" + awk -F: -v wanted="${key_regex}" ' + $0 ~ "^[[:space:]]*" wanted "[[:space:]]*:" { + value = substr($0, index($0, ":") + 1) + gsub(/^[[:space:]]+|[[:space:]]+$/, "", value) + gsub(/^"|"$/, "", value) + print value + exit + } + ' "${config_file}" +} + +updater_dashboard_read_sing_box_value() { + local config_file="${1:?missing config file}" + local jq_expr="${2:?missing jq expression}" + local jq_bin + jq_bin="$(updater_jq_cmd || true)" + [[ -n "${jq_bin}" ]] || return 1 + "${jq_bin}" -r "${jq_expr} // empty" "${config_file}" +} + +updater_dashboard_resolve_from_core_config() { + local field="${1:?missing field}" + local config_file="${BOX_CORE_CONFIG_SOURCE}" + local config_dir result="" + + [[ -f "${config_file}" ]] || return 1 + config_dir="$(cd "$(dirname "${config_file}")" && pwd)" + + case "${BOX_CORE}:${field}" in + mihomo:target) + result="$(updater_dashboard_read_mihomo_value "${config_file}" 'external-ui' || true)" + ;; + mihomo:url) + result="$(updater_dashboard_read_mihomo_value "${config_file}" 'external-ui-download-url|external_ui_download_url|external-ui-url|external_ui_url' || true)" + ;; + sing-box:target) + result="$(updater_dashboard_read_sing_box_value "${config_file}" '.experimental.clash_api.external_ui // (.. | objects | .external_ui? // empty)' || true)" + ;; + sing-box:url) + result="$(updater_dashboard_read_sing_box_value "${config_file}" '.experimental.clash_api.external_ui_download_url // (.. | objects | .external_ui_download_url? // empty)' || true)" + ;; + esac + + if [[ "${field}" == "target" ]]; then + if [[ -n "${result}" ]]; then + updater_dashboard_normalize_target "${result}" "${config_dir}" + return 0 + fi + updater_dashboard_default_target + return 0 + fi + + [[ -n "${result}" ]] || return 1 + printf '%s\n' "${result}" +} + +updater_release_metadata_ref() { + local repo="${1:-}" + local channel="${2:-stable}" + local tag="${3:-}" + local api_url="${4:-}" + + if [[ -n "${api_url}" ]]; then + printf '%s\n' "${api_url}" + return 0 + fi + + if [[ -z "${repo}" ]]; then + return 1 + fi + + if [[ -n "${tag}" ]]; then + printf 'https://api.github.com/repos/%s/releases/tags/%s\n' "${repo}" "${tag}" + return 0 + fi + + case "${channel}" in + stable) printf 'https://api.github.com/repos/%s/releases/latest\n' "${repo}" ;; + prerelease|any) printf 'https://api.github.com/repos/%s/releases\n' "${repo}" ;; + *) return 1 ;; + esac +} + +updater_release_pick_filter() { + local channel="${1:-stable}" + case "${channel}" in + stable) printf '%s\n' '.prerelease != true' ;; + prerelease) printf '%s\n' '.prerelease == true' ;; + any) printf '%s\n' 'true' ;; + *) return 1 ;; + esac +} + +updater_release_select_asset() { + local metadata_file="${1:?missing metadata file}" + local channel="${2:?missing channel}" + local tag="${3:-}" + local asset_regex="${4:?missing asset regex}" + local jq_bin filter tag_filter + + jq_bin="$(updater_jq_cmd || true)" + if [[ -z "${jq_bin}" ]]; then + log "ERROR" "updater" "E_UPDATE_RESOLVE" "jq is required for release metadata resolution" + return "${E_UPDATE}" + fi + + filter="$(updater_release_pick_filter "${channel}")" + tag_filter='true' + if [[ -n "${tag}" ]]; then + filter='true' + tag_filter=".tag_name == \"${tag}\"" + fi + + "${jq_bin}" -r \ + --arg regex "${asset_regex}" \ + " + def selected_release: + if type == \"array\" then + ([ .[] | select(${filter}) | select(${tag_filter}) ][0]) + else + . + end; + selected_release + | .assets[]? + | select(.name | test(\$regex; \"i\")) + | [.name, .browser_download_url] + | @tsv + " "${metadata_file}" | head -n 1 +} + +updater_resolve_release() { + local component="${1:?missing component}" + local repo="${2:-}" + local channel="${3:-stable}" + local tag="${4:-}" + local api_url="${5:-}" + local asset_regex="${6:-}" + local checksum_asset_regex="${7:-}" + local metadata_ref metadata_kind metadata_file selected_asset selected_checksum + + metadata_ref="$(updater_release_metadata_ref "${repo}" "${channel}" "${tag}" "${api_url}" || true)" + if [[ -z "${metadata_ref}" ]]; then + log "ERROR" "updater" "E_UPDATE_RESOLVE" "unable to build release metadata source for component=${component}" + return "${E_UPDATE}" + fi + metadata_kind="$(updater_source_kind "${metadata_ref}")" + metadata_file="$(mktemp)" + if ! updater_fetch_ref "${component}-metadata" "${metadata_ref}" "${metadata_kind}" "${metadata_file}"; then + rm -f "${metadata_file}" + log "ERROR" "updater" "E_UPDATE_RESOLVE" "failed to fetch release metadata for component=${component}" + return "${E_UPDATE}" + fi + + selected_asset="$(updater_release_select_asset "${metadata_file}" "${channel}" "${tag}" "${asset_regex}" || true)" + if [[ -z "${selected_asset}" ]]; then + rm -f "${metadata_file}" + log "ERROR" "updater" "E_UPDATE_RESOLVE" "no matching release asset for component=${component} regex=${asset_regex}" + return "${E_UPDATE}" + fi + + UP_SOURCE_NAME="${selected_asset%%$'\t'*}" + UP_SOURCE_REF="${selected_asset#*$'\t'}" + UP_SOURCE_KIND="$(updater_source_kind "${UP_SOURCE_REF}")" + + if [[ -n "${checksum_asset_regex}" && -z "${UP_CHECKSUM_REF}" ]]; then + selected_checksum="$(updater_release_select_asset "${metadata_file}" "${channel}" "${tag}" "${checksum_asset_regex}" || true)" + if [[ -n "${selected_checksum}" ]]; then + UP_CHECKSUM_NAME="${selected_checksum%%$'\t'*}" + UP_CHECKSUM_REF="${selected_checksum#*$'\t'}" + UP_CHECKSUM_KIND="$(updater_source_kind "${UP_CHECKSUM_REF}")" + fi + fi + + rm -f "${metadata_file}" +} + +updater_resolve_component() { + local component="${1:?missing component}" + local source_mode="auto" source_ref="" source_name="" checksum_ref="" checksum_name="" + local release_repo="" release_channel="stable" release_tag="" release_api_url="" asset_regex="" checksum_asset_regex="" + local release_os="linux" release_arch="" archive_member_regex="" + + UP_COMPONENT="${component}" + UP_SOURCE_REF="" + UP_SOURCE_NAME="" + UP_SOURCE_KIND="" + UP_CHECKSUM_REF="" + UP_CHECKSUM_NAME="" + UP_CHECKSUM_KIND="" + UP_TARGET_PATH="" + UP_INSTALL_KIND="file" + UP_INTERVAL="" + UP_REQUIRES_HANDOFF="false" + UP_ARCHIVE_MEMBER_REGEX="" + + case "${component}" in + kernel) + source_mode="${BOX_UPDATER_KERNEL_SOURCE}" + source_ref="${BOX_UPDATER_KERNEL_URL:-${BOX_UPDATER_KERNEL_FILE:-}}" + checksum_ref="${BOX_UPDATER_KERNEL_CHECKSUM_FILE:-${BOX_UPDATER_KERNEL_CHECKSUM:-}}" + UP_TARGET_PATH="${BOX_UPDATER_KERNEL_TARGET:-${BOX_CORE_BIN_DIR}/${BOX_CORE}}" + UP_INTERVAL="${BOX_UPDATER_KERNEL_INTERVAL}" + UP_REQUIRES_HANDOFF="true" + release_repo="${BOX_UPDATER_KERNEL_RELEASE_REPO}" + release_channel="${BOX_UPDATER_KERNEL_RELEASE_CHANNEL}" + release_tag="${BOX_UPDATER_KERNEL_RELEASE_TAG}" + release_api_url="${BOX_UPDATER_KERNEL_RELEASE_API_URL}" + release_os="$(updater_release_os "${BOX_UPDATER_KERNEL_RELEASE_OS}")" + release_arch="$(updater_release_arch "${BOX_UPDATER_KERNEL_RELEASE_ARCH}")" + asset_regex="${BOX_UPDATER_KERNEL_ASSET_REGEX:-$(updater_release_default_asset_regex kernel "${release_os}" "${release_arch}")}" + checksum_asset_regex="${BOX_UPDATER_KERNEL_CHECKSUM_ASSET_REGEX:-$(updater_release_default_checksum_regex)}" + archive_member_regex="${BOX_UPDATER_KERNEL_ARCHIVE_MEMBER_REGEX:-$(updater_release_default_archive_member_regex kernel)}" + ;; + subs) + source_ref="${BOX_UPDATER_SUBS_URL:-${BOX_UPDATER_SUBS_FILE:-}}" + checksum_ref="${BOX_UPDATER_SUBS_CHECKSUM_FILE:-${BOX_UPDATER_SUBS_CHECKSUM:-}}" + UP_TARGET_PATH="${BOX_UPDATER_SUBS_TARGET:-${BOX_CORE_CONFIG_SOURCE}}" + UP_INTERVAL="${BOX_UPDATER_SUBS_INTERVAL}" + UP_REQUIRES_HANDOFF="true" + ;; + geo) + source_mode="${BOX_UPDATER_GEO_SOURCE}" + source_ref="${BOX_UPDATER_GEO_URL:-${BOX_UPDATER_GEO_FILE:-}}" + checksum_ref="${BOX_UPDATER_GEO_CHECKSUM_FILE:-${BOX_UPDATER_GEO_CHECKSUM:-}}" + UP_TARGET_PATH="${BOX_UPDATER_GEO_TARGET:-${BOX_UPDATER_ARTIFACT_DIR}/geo/geo.dat}" + UP_INTERVAL="${BOX_UPDATER_GEO_INTERVAL}" + UP_REQUIRES_HANDOFF="false" + release_repo="${BOX_UPDATER_GEO_RELEASE_REPO}" + release_channel="${BOX_UPDATER_GEO_RELEASE_CHANNEL}" + release_tag="${BOX_UPDATER_GEO_RELEASE_TAG}" + release_api_url="${BOX_UPDATER_GEO_RELEASE_API_URL}" + release_os="$(updater_release_os "${BOX_UPDATER_GEO_RELEASE_OS}")" + release_arch="$(updater_release_arch "${BOX_UPDATER_GEO_RELEASE_ARCH}")" + asset_regex="${BOX_UPDATER_GEO_ASSET_REGEX:-$(updater_release_default_asset_regex geo "${release_os}" "${release_arch}")}" + checksum_asset_regex="${BOX_UPDATER_GEO_CHECKSUM_ASSET_REGEX}" + archive_member_regex="${BOX_UPDATER_GEO_ARCHIVE_MEMBER_REGEX:-$(updater_release_default_archive_member_regex geo)}" + ;; + dashboard) + source_ref="${BOX_UPDATER_DASHBOARD_URL:-${BOX_UPDATER_DASHBOARD_FILE:-}}" + checksum_ref="${BOX_UPDATER_DASHBOARD_CHECKSUM_FILE:-${BOX_UPDATER_DASHBOARD_CHECKSUM:-}}" + UP_TARGET_PATH="${BOX_UPDATER_DASHBOARD_TARGET:-$(updater_dashboard_resolve_from_core_config target || printf '%s' "${BOX_UPDATER_ARTIFACT_DIR}/dashboard/current")}" + UP_INTERVAL="${BOX_UPDATER_DASHBOARD_INTERVAL}" + if [[ -z "${source_ref}" ]]; then + source_ref="$(updater_dashboard_resolve_from_core_config url || true)" + fi + if [[ -z "${source_ref}" ]]; then + source_ref="$(updater_dashboard_default_url)" + fi + ;; + *) + log "ERROR" "updater" "E_UPDATE_COMPONENT" "unsupported update component: ${component}" + return "${E_UPDATE}" + ;; + esac + + case "${source_mode}" in + auto) + if [[ -n "${source_ref}" ]]; then + : + elif [[ -n "${release_repo}" || -n "${release_api_url}" ]]; then + source_mode="release" + fi + ;; + esac + + if [[ "${source_mode}" == "release" ]]; then + if [[ -z "${release_repo}" ]]; then + release_repo="$(updater_release_default_repo "${component}")" + fi + UP_CHECKSUM_REF="${checksum_ref}" + if [[ -n "${UP_CHECKSUM_REF}" ]]; then + UP_CHECKSUM_KIND="$(updater_source_kind "${UP_CHECKSUM_REF}")" + checksum_name="$(basename "${UP_CHECKSUM_REF}")" + UP_CHECKSUM_NAME="${checksum_name}" + fi + if ! updater_resolve_release "${component}" "${release_repo}" "${release_channel}" "${release_tag}" "${release_api_url}" "${asset_regex}" "${checksum_asset_regex}"; then + return "${E_UPDATE}" + fi + else + if [[ -z "${source_ref}" ]]; then + log "ERROR" "updater" "E_UPDATE_CONFIG" "no source configured for component=${component}" + return "${E_UPDATE}" + fi + source_name="$(basename "${source_ref}")" + UP_SOURCE_REF="${source_ref}" + UP_SOURCE_NAME="${source_name}" + UP_SOURCE_KIND="$(updater_source_kind "${UP_SOURCE_REF}")" + if [[ -n "${checksum_ref}" ]]; then + checksum_name="$(basename "${checksum_ref}")" + UP_CHECKSUM_REF="${checksum_ref}" + UP_CHECKSUM_NAME="${checksum_name}" + UP_CHECKSUM_KIND="$(updater_source_kind "${UP_CHECKSUM_REF}")" + fi + fi + + UP_INSTALL_KIND="$(updater_install_kind_for_name "${component}" "${UP_SOURCE_NAME:-$(basename "${UP_SOURCE_REF}")}")" + UP_ARCHIVE_MEMBER_REGEX="${archive_member_regex}" + return 0 +} diff --git a/lib/updater/updater.sh b/lib/updater/updater.sh new file mode 100644 index 0000000..babd607 --- /dev/null +++ b/lib/updater/updater.sh @@ -0,0 +1,561 @@ +#!/usr/bin/env bash + +# shellcheck shell=bash + +set -euo pipefail + +source "${BOX_LIB_DIR}/updater/resolver.sh" +source "${BOX_LIB_DIR}/updater/fetcher.sh" +source "${BOX_LIB_DIR}/updater/verifier.sh" +source "${BOX_LIB_DIR}/updater/installer.sh" + +updater_state_root() { + init_runtime_paths + local path="${BOX_VAR_DIR}/state/update" + mkdir -p "${path}" + printf '%s\n' "${path}" +} + +updater_snapshot_file() { + init_runtime_paths + mkdir -p "${BOX_VAR_DIR}/state" + printf '%s/state/update-state.json\n' "${BOX_VAR_DIR}" +} + +updater_state_file() { + local component="${1:?missing component}" + printf '%s/%s.state\n' "$(updater_state_root)" "${component}" +} + +updater_read_state_value() { + local component="${1:?missing component}" + local key="${2:?missing key}" + local file + file="$(updater_state_file "${component}")" + [[ -f "${file}" ]] || return 1 + awk -F= -v wanted="${key}" '$1==wanted {print substr($0, index($0, "=")+1); exit}' "${file}" +} + +updater_write_state_component() { + local component="${1:?missing component}" + local status="${2:?missing status}" + local error_message="${3:-}" + local source_ref="${4:-}" + local target_path="${5:-}" + local checksum="${6:-}" + local handoff="${7:-none}" + local attempt_ts success_ts file + + file="$(updater_state_file "${component}")" + attempt_ts="$(timestamp_utc)" + success_ts="$(updater_read_state_value "${component}" "last_success_ts" || true)" + if [[ "${status}" == "success" || "${status}" == "unchanged" ]]; then + success_ts="${attempt_ts}" + fi + + cat >"${file}" <"${snapshot_file}" +} + +updater_interval_for() { + local component="${1:?missing component}" + case "${component}" in + kernel) printf '%s\n' "${BOX_UPDATER_KERNEL_INTERVAL}" ;; + subs) printf '%s\n' "${BOX_UPDATER_SUBS_INTERVAL}" ;; + geo) printf '%s\n' "${BOX_UPDATER_GEO_INTERVAL}" ;; + dashboard) printf '%s\n' "${BOX_UPDATER_DASHBOARD_INTERVAL}" ;; + *) printf '%s\n' "" ;; + esac +} + +updater_service_running() { + local pid + pid="$(read_pid_file "$(service_pid_file_readonly)" || true)" + is_pid_alive "${pid}" +} + +updater_handoff_runtime() { + local component="${1:?missing component}" + local active_bin + + if [[ "${component}" == "dashboard" || "${component}" == "geo" ]]; then + printf '%s\n' "none" + return 0 + fi + + if ! updater_service_running; then + printf '%s\n' "none" + return 0 + fi + + case "${component}" in + kernel) + active_bin="$(resolve_core_bin || true)" + if [[ -z "${active_bin}" || "${active_bin}" != "${UP_TARGET_PATH}" ]]; then + printf '%s\n' "none" + return 0 + fi + ;; + subs) + case "${BOX_CORE}" in + sing-box) + if adapter_sing_box_reload; then + printf '%s\n' "reload" + return 0 + fi + ;; + mihomo) + adapter_mihomo_reload >/dev/null 2>&1 || true + ;; + esac + ;; + esac + + if service_restart; then + printf '%s\n' "restart" + return 0 + fi + + log "ERROR" "updater" "E_UPDATE_HANDOFF" "runtime handoff failed for component=${component}" + return "${E_UPDATE}" +} + +updater_recover_runtime_after_restore() { + local component="${1:?missing component}" + + case "${component}" in + dashboard|geo) + return 0 + ;; + kernel) + if ! updater_service_running; then + if ! service_restart; then + return "${E_UPDATE}" + fi + else + if ! service_restart; then + return "${E_UPDATE}" + fi + fi + ;; + subs) + if ! service_restart; then + return "${E_UPDATE}" + fi + ;; + esac +} + +updater_validate_staged_component() { + local component="${1:?missing component}" + local staged_path="${2:?missing staged path}" + local core_bin rendered_tmp + + case "${component}" in + kernel) + if [[ ! -s "${staged_path}" ]]; then + log "ERROR" "updater" "E_UPDATE_VALIDATE" "kernel artifact is empty: ${staged_path}" + return "${E_UPDATE}" + fi + chmod 0755 "${staged_path}" + ;; + subs) + core_bin="$(resolve_core_bin || true)" + if [[ -z "${core_bin}" ]]; then + log "ERROR" "updater" "E_UPDATE_VALIDATE" "cannot validate subscriptions: core binary not found" + return "${E_UPDATE}" + fi + rendered_tmp="$(mktemp)" + if [[ "${BOX_CORE}" == "mihomo" ]]; then + rendered_tmp="${rendered_tmp}.yaml" + mutator_mihomo_render_overlay "${staged_path}" "${rendered_tmp}" + else + rendered_tmp="${rendered_tmp}.json" + mutator_sing_box_render_overlay "${staged_path}" "${rendered_tmp}" + fi + if ! check_core_config "${core_bin}" "${rendered_tmp}" "${BOX_CORE_WORKDIR}"; then + rm -f "${rendered_tmp}" "${rendered_tmp}.overlay.env" + log "ERROR" "updater" "E_UPDATE_VALIDATE" "subscription payload failed core validation" + return "${E_UPDATE}" + fi + rm -f "${rendered_tmp}" "${rendered_tmp}.overlay.env" + ;; + geo) + if [[ ! -s "${staged_path}" ]]; then + log "ERROR" "updater" "E_UPDATE_VALIDATE" "geo artifact is empty: ${staged_path}" + return "${E_UPDATE}" + fi + ;; + dashboard) + if [[ ! -d "${staged_path}" ]]; then + log "ERROR" "updater" "E_UPDATE_VALIDATE" "dashboard payload is not a directory: ${staged_path}" + return "${E_UPDATE}" + fi + if ! find "${staged_path}" -mindepth 1 -print -quit | grep -q .; then + log "ERROR" "updater" "E_UPDATE_VALIDATE" "dashboard payload is empty: ${staged_path}" + return "${E_UPDATE}" + fi + if [[ ! -f "${staged_path}/index.html" ]]; then + log "ERROR" "updater" "E_UPDATE_VALIDATE" "dashboard payload missing index.html: ${staged_path}" + return "${E_UPDATE}" + fi + ;; + esac +} + +updater_stage_dir() { + init_runtime_paths + mkdir -p "${BOX_UPDATER_STAGING_DIR}" + printf '%s/%s\n' "${BOX_UPDATER_STAGING_DIR}" "$1" +} + +updater_cleanup_paths() { + local path + for path in "$@"; do + [[ -n "${path}" ]] || continue + rm -rf "${path}" 2>/dev/null || true + done +} + +updater_component_checksum() { + local install_kind="${1:?missing install kind}" + local path="${2:?missing path}" + case "${install_kind}" in + file|archive-file|gzip-file) updater_compute_sha256 "${path}" ;; + directory|archive-dir) updater_compute_tree_sha256 "${path}" ;; + *) return 1 ;; + esac +} + +updater_find_archive_member() { + local component="${1:?missing component}" + local unpack_dir="${2:?missing unpack dir}" + local member_regex="${3:-}" + local candidate relative_path + + while IFS= read -r candidate; do + relative_path="${candidate#"${unpack_dir}"/}" + if [[ -z "${member_regex}" || "${relative_path}" =~ ${member_regex} ]]; then + printf '%s\n' "${candidate}" + return 0 + fi + done < <(find "${unpack_dir}" -type f | LC_ALL=C sort) + + log "ERROR" "updater" "E_UPDATE_ARCHIVE_MEMBER" \ + "no archive member matched for component=${component} regex=${member_regex:-}" + return "${E_UPDATE}" +} + +updater_dashboard_payload_root() { + local unpack_dir="${1:?missing unpack dir}" + local index_file candidate best_dir="" + local top_entries=() + + while IFS= read -r index_file; do + candidate="$(dirname "${index_file}")" + if [[ -z "${best_dir}" || "${#candidate}" -gt "${#best_dir}" ]]; then + best_dir="${candidate}" + fi + done < <(find "${unpack_dir}" -type f -name 'index.html' | LC_ALL=C sort) + + if [[ -n "${best_dir}" ]]; then + printf '%s\n' "${best_dir}" + return 0 + fi + + mapfile -t top_entries < <(find "${unpack_dir}" -mindepth 1 -maxdepth 1 | LC_ALL=C sort) + if [[ "${#top_entries[@]}" == "1" && -d "${top_entries[0]}" ]]; then + printf '%s\n' "${top_entries[0]}" + return 0 + fi + + printf '%s\n' "${unpack_dir}" +} + +updater_apply_component() { + local component="${1:?missing component}" + local source_basename fetch_output unpack_dir="" install_source="" source_checksum previous_checksum target_checksum handoff result_status + local validate_path="" install_path="" target_install_kind="" + + if ! updater_resolve_component "${component}"; then + updater_write_state_component "${component}" "error" "component not configured" "" "" "" "none" + return "${E_UPDATE}" + fi + + updater_install_reset + source_basename="${UP_SOURCE_NAME:-$(basename "${UP_SOURCE_REF}")}" + fetch_output="$(updater_stage_dir "${component}.${source_basename}.download")" + rm -rf "${fetch_output}" + + if ! updater_fetch_ref "${component}" "${UP_SOURCE_REF}" "${UP_SOURCE_KIND}" "${fetch_output}"; then + updater_write_state_component "${component}" "error" "download failed" "${UP_SOURCE_REF}" "${UP_TARGET_PATH}" "" "none" + return "${E_UPDATE}" + fi + + if ! updater_verify_checksum "${component}" "${fetch_output}" "${UP_CHECKSUM_REF}" "${UP_CHECKSUM_KIND}"; then + updater_write_state_component "${component}" "error" "checksum verification failed" "${UP_SOURCE_REF}" "${UP_TARGET_PATH}" "" "none" + updater_cleanup_paths "${fetch_output}" + return "${E_UPDATE}" + fi + + case "${UP_INSTALL_KIND}" in + file) + validate_path="${fetch_output}" + install_path="${fetch_output}" + ;; + gzip-file) + install_source="$(updater_stage_dir "${component}.extract.$$")" + updater_cleanup_paths "${install_source}" + if ! updater_extract_gzip "${fetch_output}" "${install_source}"; then + updater_write_state_component "${component}" "error" "gzip extraction failed" "${UP_SOURCE_REF}" "${UP_TARGET_PATH}" "" "none" + updater_cleanup_paths "${fetch_output}" "${install_source}" + return "${E_UPDATE}" + fi + validate_path="${install_source}" + install_path="${install_source}" + ;; + archive-file|archive-dir) + unpack_dir="$(updater_stage_dir "${component}.unpack.$$")" + updater_cleanup_paths "${unpack_dir}" + if ! updater_extract_archive "${fetch_output}" "${unpack_dir}" "${UP_SOURCE_NAME:-${UP_SOURCE_REF}}"; then + updater_write_state_component "${component}" "error" "archive extraction failed" "${UP_SOURCE_REF}" "${UP_TARGET_PATH}" "" "none" + updater_cleanup_paths "${fetch_output}" "${unpack_dir}" + return "${E_UPDATE}" + fi + if [[ "${UP_INSTALL_KIND}" == "archive-file" ]]; then + install_source="$(updater_find_archive_member "${component}" "${unpack_dir}" "${UP_ARCHIVE_MEMBER_REGEX}")" || { + updater_write_state_component "${component}" "error" "archive member not found" "${UP_SOURCE_REF}" "${UP_TARGET_PATH}" "" "none" + updater_cleanup_paths "${fetch_output}" "${unpack_dir}" + return "${E_UPDATE}" + } + validate_path="${install_source}" + install_path="${install_source}" + else + install_source="$(updater_dashboard_payload_root "${unpack_dir}")" + validate_path="${install_source}" + install_path="${install_source}" + fi + ;; + directory) + validate_path="${fetch_output}" + install_path="${fetch_output}" + ;; + *) + updater_write_state_component "${component}" "error" "unsupported install kind" "${UP_SOURCE_REF}" "${UP_TARGET_PATH}" "" "none" + updater_cleanup_paths "${fetch_output}" + return "${E_UPDATE}" + ;; + esac + + if ! updater_validate_staged_component "${component}" "${validate_path}"; then + updater_write_state_component "${component}" "error" "validation failed" "${UP_SOURCE_REF}" "${UP_TARGET_PATH}" "" "none" + updater_cleanup_paths "${fetch_output}" "${unpack_dir}" + return "${E_UPDATE}" + fi + + source_checksum="$(updater_component_checksum "${UP_INSTALL_KIND}" "${install_path}" || true)" + previous_checksum="$(updater_read_state_value "${component}" "installed_sha256" || true)" + if [[ -n "${source_checksum}" && -n "${previous_checksum}" && "${source_checksum}" == "${previous_checksum}" ]]; then + if [[ "${UP_INSTALL_KIND}" == "archive-dir" || "${UP_INSTALL_KIND}" == "directory" ]]; then + target_install_kind="directory" + else + target_install_kind="file" + fi + target_checksum="$(updater_component_checksum "${target_install_kind}" "${UP_TARGET_PATH}" || true)" + if [[ -n "${target_checksum}" && "${target_checksum}" == "${source_checksum}" ]]; then + updater_install_nochange "${source_checksum}" + fi + fi + + case "${UP_INSTALL_KIND}" in + file|archive-file|gzip-file) + if [[ "${UP_INSTALL_CHANGED}" != "false" || -z "${UP_INSTALL_CHECKSUM}" ]]; then + if ! updater_install_file "${component}" "${install_path}" "${UP_TARGET_PATH}" "$([[ "${component}" == "kernel" ]] && printf 'true' || printf 'false')"; then + updater_write_state_component "${component}" "error" "install failed" "${UP_SOURCE_REF}" "${UP_TARGET_PATH}" "" "none" + updater_cleanup_paths "${fetch_output}" "${unpack_dir}" + return "${E_UPDATE}" + fi + fi + updater_cleanup_paths "${fetch_output}" "${unpack_dir}" + ;; + archive-dir) + if [[ "${UP_INSTALL_CHANGED}" != "false" || -z "${UP_INSTALL_CHECKSUM}" ]]; then + if ! updater_install_directory "${component}" "${install_path}" "${UP_TARGET_PATH}" "${source_checksum}" "${previous_checksum}"; then + updater_write_state_component "${component}" "error" "install failed" "${UP_SOURCE_REF}" "${UP_TARGET_PATH}" "" "none" + updater_cleanup_paths "${fetch_output}" "${unpack_dir}" + return "${E_UPDATE}" + fi + fi + updater_cleanup_paths "${fetch_output}" "${unpack_dir}" + ;; + directory) + if [[ "${UP_INSTALL_CHANGED}" != "false" || -z "${UP_INSTALL_CHECKSUM}" ]]; then + if ! updater_install_directory "${component}" "${fetch_output}" "${UP_TARGET_PATH}" "${source_checksum}" "${previous_checksum}"; then + updater_write_state_component "${component}" "error" "install failed" "${UP_SOURCE_REF}" "${UP_TARGET_PATH}" "" "none" + updater_cleanup_paths "${fetch_output}" + return "${E_UPDATE}" + fi + fi + updater_cleanup_paths "${fetch_output}" + ;; + esac + + if [[ "${UP_INSTALL_CHANGED}" == "true" && "${UP_REQUIRES_HANDOFF}" == "true" ]]; then + if ! handoff="$(updater_handoff_runtime "${component}")"; then + updater_restore_backup "${UP_TARGET_PATH}" + updater_recover_runtime_after_restore "${component}" || true + updater_write_state_component "${component}" "error" "runtime handoff failed" "${UP_SOURCE_REF}" "${UP_TARGET_PATH}" "${UP_INSTALL_CHECKSUM}" "failed" + return "${E_UPDATE}" + fi + else + handoff="none" + fi + + result_status="success" + if [[ "${UP_INSTALL_CHANGED}" != "true" ]]; then + result_status="unchanged" + fi + updater_write_state_component "${component}" "${result_status}" "" "${UP_SOURCE_REF}" "${UP_TARGET_PATH}" "${UP_INSTALL_CHECKSUM}" "${handoff}" + return 0 +} + +updater_status_json() { + local snapshot + snapshot="$(updater_snapshot_file)" + if [[ -f "${snapshot}" ]]; then + cat "${snapshot}" + return 0 + fi + updater_write_snapshot + cat "${snapshot}" +} + +updater_status_text() { + local component configured status last_error target interval + printf 'artifact_dir=%s\n' "${BOX_UPDATER_ARTIFACT_DIR}" + printf 'staging_dir=%s\n' "${BOX_UPDATER_STAGING_DIR}" + printf 'checksum_policy=%s\n' "${BOX_UPDATER_CHECKSUM_POLICY}" + for component in kernel subs geo dashboard; do + configured="false" + if updater_component_configured "${component}"; then + configured="true" + fi + status="$(updater_read_state_value "${component}" "last_status" || printf 'never')" + last_error="$(updater_read_state_value "${component}" "last_error" || true)" + target="$(updater_read_state_value "${component}" "target_path" || true)" + interval="$(updater_interval_for "${component}")" + printf '[%s]\n' "${component}" + printf 'configured=%s\n' "${configured}" + printf 'interval=%s\n' "${interval}" + printf 'status=%s\n' "${status}" + printf 'target=%s\n' "${target}" + printf 'last_error=%s\n' "${last_error}" + done +} + +updater_status() { + load_config + if [[ "${BOX_OUTPUT_FORMAT}" == "json" ]]; then + updater_status_json + else + updater_status_text + fi +} + +updater_run_locked() { + local component="${1:?missing component}" + load_config + init_runtime_paths + mkdir -p "${BOX_UPDATER_ARTIFACT_DIR}" "${BOX_UPDATER_STAGING_DIR}" "${BOX_VAR_DIR}/state" + + case "${component}" in + kernel|subs|geo|dashboard) + updater_apply_component "${component}" + ;; + all) + local current rc=0 any_configured=0 + for current in kernel subs geo dashboard; do + if updater_component_configured "${current}"; then + any_configured=1 + updater_apply_component "${current}" || rc=$? + else + updater_write_state_component "${current}" "skipped" "component not configured" "" "" "" "none" + fi + done + if [[ "${any_configured}" != "1" ]]; then + log "ERROR" "updater" "E_UPDATE_CONFIG" "no updater components configured" + return "${E_UPDATE}" + fi + return "${rc}" + ;; + *) + log "ERROR" "updater" "E_UPDATE_COMPONENT" "unsupported update action=${component}" + return 2 + ;; + esac +} + +updater_run() { + local component="${1:?missing component}" + with_lock "update" 120 updater_run_locked "${component}" +} + +updater_cmd() { + local action="${1:-}" + case "${action}" in + kernel|subs|geo|dashboard|all) updater_run "${action}" ;; + status) updater_status ;; + *) + printf 'usage: boxctl update [--json]\n' >&2 + return 2 + ;; + esac +} diff --git a/lib/updater/verifier.sh b/lib/updater/verifier.sh new file mode 100644 index 0000000..6727eac --- /dev/null +++ b/lib/updater/verifier.sh @@ -0,0 +1,118 @@ +#!/usr/bin/env bash + +# shellcheck shell=bash + +set -euo pipefail + +updater_sha256_cmd() { + command -v sha256sum >/dev/null 2>&1 && printf '%s\n' "sha256sum" +} + +updater_compute_sha256() { + local path="${1:?missing path}" + local sha_cmd + sha_cmd="$(updater_sha256_cmd || true)" + [[ -n "${sha_cmd}" ]] || return 1 + "${sha_cmd}" "${path}" 2>/dev/null | awk '{print $1}' +} + +updater_compute_tree_sha256() { + local path="${1:?missing path}" + local sha_cmd + sha_cmd="$(updater_sha256_cmd || true)" + [[ -n "${sha_cmd}" ]] || return 1 + + if [[ -f "${path}" ]]; then + updater_compute_sha256 "${path}" + return 0 + fi + + [[ -d "${path}" ]] || return 1 + + ( + cd "${path}" + find . -mindepth 1 -print | LC_ALL=C sort | while IFS= read -r entry; do + if [[ -f "${entry}" ]]; then + printf 'file %s %s\n' "${entry}" "$("${sha_cmd}" "${entry}" | awk '{print $1}')" + elif [[ -d "${entry}" ]]; then + printf 'dir %s\n' "${entry}" + fi + done | "${sha_cmd}" | awk '{print $1}' + ) +} + +updater_parse_checksum_text() { + local checksum_text="${1:-}" + checksum_text="$(trim_space "${checksum_text}")" + checksum_text="${checksum_text%% *}" + printf '%s\n' "${checksum_text}" +} + +updater_expected_checksum() { + local ref="${1:-}" + local kind="${2:-}" + local value="" + local tmp_file + + [[ -n "${ref}" ]] || return 1 + + if [[ -z "${kind}" ]]; then + if [[ "${ref}" =~ ^[0-9a-fA-F]{64}$ ]]; then + printf '%s\n' "${ref}" + return 0 + fi + kind="$(updater_source_kind "${ref}")" + fi + + if [[ "${kind}" == "file" && "${ref}" =~ ^[0-9a-fA-F]{64}$ ]]; then + printf '%s\n' "${ref}" + return 0 + fi + + tmp_file="$(mktemp)" + if ! updater_fetch_ref "checksum" "${ref}" "${kind}" "${tmp_file}"; then + rm -f "${tmp_file}" + return 1 + fi + value="$(updater_parse_checksum_text "$(head -n 1 "${tmp_file}")")" + rm -f "${tmp_file}" + [[ "${value}" =~ ^[0-9a-fA-F]{64}$ ]] || return 1 + printf '%s\n' "${value}" +} + +updater_verify_checksum() { + local component="${1:?missing component}" + local artifact_path="${2:?missing artifact path}" + local checksum_ref="${3:-}" + local checksum_kind="${4:-}" + local expected actual + + case "${BOX_UPDATER_CHECKSUM_POLICY}" in + off) return 0 ;; + esac + + if [[ -z "${checksum_ref}" ]]; then + if [[ "${BOX_UPDATER_CHECKSUM_POLICY}" == "required" ]]; then + log "ERROR" "updater" "E_UPDATE_CHECKSUM" "checksum required but not configured for component=${component}" + return "${E_UPDATE}" + fi + return 0 + fi + + expected="$(updater_expected_checksum "${checksum_ref}" "${checksum_kind}" || true)" + if [[ -z "${expected}" ]]; then + log "ERROR" "updater" "E_UPDATE_CHECKSUM" "failed to resolve checksum for component=${component}" + return "${E_UPDATE}" + fi + + if [[ -d "${artifact_path}" ]]; then + actual="$(updater_compute_tree_sha256 "${artifact_path}" || true)" + else + actual="$(updater_compute_sha256 "${artifact_path}" || true)" + fi + if [[ -z "${actual}" || "${actual}" != "${expected}" ]]; then + log "ERROR" "updater" "E_UPDATE_CHECKSUM" \ + "checksum mismatch for component=${component} expected=${expected:-unknown} actual=${actual:-unknown}" + return "${E_UPDATE}" + fi +} diff --git a/packaging/arch/PKGBUILD b/packaging/arch/PKGBUILD new file mode 100644 index 0000000..315d7ba --- /dev/null +++ b/packaging/arch/PKGBUILD @@ -0,0 +1,46 @@ +# Maintainer: JadenJSJ + +pkgname=box4linux +pkgver=0.3.0 +pkgrel=1 +pkgdesc="Linux-native Box control plane" +arch=('any') +url="https://github.com/JSJ-Experiments/box4linux" +license=('custom') +depends=('bash' 'coreutils' 'curl' 'gawk' 'gzip' 'iproute2' 'iptables' 'jq' 'unzip' 'util-linux') +optdepends=( + 'nftables: nftables backend support' + 'mihomo: mihomo core runtime' + 'sing-box: sing-box core runtime' + 'iw: Wi-Fi link details for policy watcher fallback' + 'networkmanager: nmcli integration for policy watcher SSID/BSSID detection' + 'systemd: unit lifecycle management' +) +backup=('etc/box/box.toml') +install='box4linux.install' +source=() +sha256sums=() + +package() { + local repo_root="${startdir}/../.." + + install -d "${pkgdir}/usr/lib/box4linux" + cp -a "${repo_root}/lib" "${pkgdir}/usr/lib/box4linux/" + + install -Dm755 "${repo_root}/cmd/boxctl" "${pkgdir}/usr/lib/box4linux/cmd/boxctl" + install -d "${pkgdir}/usr/bin" + ln -sf ../lib/box4linux/cmd/boxctl "${pkgdir}/usr/bin/boxctl" + + install -Dm644 "${repo_root}/etc/box/box.toml" "${pkgdir}/etc/box/box.toml" + + local unit + for unit in "${repo_root}"/systemd/*; do + install -Dm644 "${unit}" "${pkgdir}/usr/lib/systemd/system/$(basename "${unit}")" + done + + install -Dm644 "${repo_root}/README.md" "${pkgdir}/usr/share/doc/${pkgname}/README.md" + install -d "${pkgdir}/usr/share/doc/${pkgname}/linux-port" + cp -a "${repo_root}/docs/linux-port/." "${pkgdir}/usr/share/doc/${pkgname}/linux-port/" + install -Dm755 "${repo_root}/packaging/scripts/systemd-lifecycle.sh" \ + "${pkgdir}/usr/share/doc/${pkgname}/systemd-lifecycle.sh" +} diff --git a/packaging/arch/box4linux.install b/packaging/arch/box4linux.install new file mode 100644 index 0000000..fc19083 --- /dev/null +++ b/packaging/arch/box4linux.install @@ -0,0 +1,47 @@ +post_install() { + if command -v systemctl >/dev/null 2>&1; then + systemctl daemon-reload >/dev/null 2>&1 || true + fi + printf 'box4linux installed.\n' + printf 'Enable services with: sudo /usr/share/doc/box4linux/systemd-lifecycle.sh enable\n' + printf 'Config file: /etc/box/box.toml (pacman backup entry; upgrades keep local edits as .pacnew)\n' +} + +post_upgrade() { + if command -v systemctl >/dev/null 2>&1; then + systemctl daemon-reload >/dev/null 2>&1 || true + fi + printf 'box4linux upgraded. Review /etc/box/box.toml.pacnew if present.\n' +} + +pre_remove() { + if command -v /usr/bin/boxctl >/dev/null 2>&1; then + /usr/bin/boxctl policy disable >/dev/null 2>&1 || true + /usr/bin/boxctl firewall disable >/dev/null 2>&1 || true + /usr/bin/boxctl service stop >/dev/null 2>&1 || true + fi + if command -v systemctl >/dev/null 2>&1; then + systemctl disable --now \ + box-policy.service \ + box-firewall.service \ + box.service \ + box-update-kernel.timer \ + box-update-subs.timer \ + box-update-geo.timer \ + box-update-dashboard.timer \ + box-update-all.timer \ + box-update-kernel.service \ + box-update-subs.service \ + box-update-geo.service \ + box-update-dashboard.service \ + box-update-all.service >/dev/null 2>&1 || true + systemctl daemon-reload >/dev/null 2>&1 || true + fi +} + +post_remove() { + if command -v systemctl >/dev/null 2>&1; then + systemctl daemon-reload >/dev/null 2>&1 || true + fi + printf 'box4linux removed. Runtime data under /var/lib/box and local config backups are preserved unless purged manually.\n' +} diff --git a/packaging/scripts/systemd-lifecycle.sh b/packaging/scripts/systemd-lifecycle.sh new file mode 100755 index 0000000..9f95d6b --- /dev/null +++ b/packaging/scripts/systemd-lifecycle.sh @@ -0,0 +1,123 @@ +#!/usr/bin/env bash + +set -euo pipefail + +action="${1:-status}" + +require_systemctl() { + if ! command -v systemctl >/dev/null 2>&1; then + printf 'systemctl not found; this helper requires systemd.\n' >&2 + exit 1 + fi +} + +usage() { + cat < + +Notes: + - disable/restart actions only manage units and do not delete /etc/box or /var/lib/box. + - enable starts the core units, conditionally enables policy if configured, and enables the default + scheduled updater timer (box-update-all.timer). + - use package removal + manual purge only when full cleanup is explicitly desired. +USAGE +} + +policy_enabled_in_config() { + local boxctl_path output + + boxctl_path="$(command -v boxctl || true)" + [[ -n "${boxctl_path}" ]] || return 1 + + if ! output="$("${boxctl_path}" policy status --json 2>/dev/null)"; then + return 1 + fi + + grep -q '"policy_enabled":true' <<<"${output}" +} + +enable_units() { + require_systemctl + systemctl daemon-reload + systemctl enable box.service box-firewall.service + if policy_enabled_in_config; then + systemctl enable box-policy.service + systemctl start box-policy.service + else + systemctl disable box-policy.service >/dev/null 2>&1 || true + systemctl stop box-policy.service >/dev/null 2>&1 || true + fi + systemctl enable box-update-all.timer + systemctl start box.service + systemctl start box-update-all.timer + systemctl reload-or-restart box-firewall.service || true +} + +disable_units() { + require_systemctl + systemctl stop \ + box-policy.service \ + box-firewall.service \ + box.service \ + box-update-kernel.timer \ + box-update-subs.timer \ + box-update-geo.timer \ + box-update-dashboard.timer \ + box-update-all.timer || true + systemctl disable \ + box-policy.service \ + box-firewall.service \ + box.service \ + box-update-kernel.timer \ + box-update-subs.timer \ + box-update-geo.timer \ + box-update-dashboard.timer \ + box-update-all.timer || true + systemctl daemon-reload +} + +restart_units() { + require_systemctl + systemctl daemon-reload + systemctl restart box.service + if policy_enabled_in_config; then + systemctl try-restart box-policy.service || true + else + systemctl stop box-policy.service >/dev/null 2>&1 || true + fi + systemctl reload-or-restart box-firewall.service || true + systemctl try-restart \ + box-update-kernel.timer \ + box-update-subs.timer \ + box-update-geo.timer \ + box-update-dashboard.timer \ + box-update-all.timer || true +} + +status_units() { + require_systemctl + systemctl --no-pager --full status \ + box.service \ + box-firewall.service \ + box-policy.service \ + box-update-kernel.timer \ + box-update-subs.timer \ + box-update-geo.timer \ + box-update-dashboard.timer \ + box-update-all.timer || true +} + +case "${action}" in + enable) enable_units ;; + disable) disable_units ;; + restart) restart_units ;; + status) status_units ;; + -h|--help|help) + usage + ;; + *) + usage >&2 + exit 2 + ;; +esac diff --git a/systemd/box-firewall.service b/systemd/box-firewall.service new file mode 100644 index 0000000..6479698 --- /dev/null +++ b/systemd/box-firewall.service @@ -0,0 +1,17 @@ +[Unit] +Description=Box Linux Firewall Manager +PartOf=box.service +After=network-online.target +Wants=network-online.target + +[Service] +Type=oneshot +RemainAfterExit=yes +ExecStart=/usr/bin/boxctl firewall enable +ExecStop=/usr/bin/boxctl firewall disable +ExecReload=/usr/bin/boxctl firewall renew +TimeoutStartSec=60 +TimeoutStopSec=30 + +[Install] +WantedBy=multi-user.target diff --git a/systemd/box-policy.service b/systemd/box-policy.service new file mode 100644 index 0000000..ee07c53 --- /dev/null +++ b/systemd/box-policy.service @@ -0,0 +1,16 @@ +[Unit] +Description=Box Linux Policy Watcher +PartOf=box.service +After=box.service network-online.target +Wants=network-online.target + +[Service] +Type=simple +ExecStart=/usr/bin/boxctl policy monitor +ExecStop=/usr/bin/boxctl policy disable +Restart=on-failure +RestartSec=3 +TimeoutStopSec=15 + +[Install] +WantedBy=multi-user.target diff --git a/systemd/box-update-all.service b/systemd/box-update-all.service new file mode 100644 index 0000000..466da88 --- /dev/null +++ b/systemd/box-update-all.service @@ -0,0 +1,9 @@ +[Unit] +Description=Box Combined Updater +After=network-online.target +Wants=network-online.target + +[Service] +Type=oneshot +ExecStart=/usr/bin/boxctl update all +TimeoutStartSec=600 diff --git a/systemd/box-update-all.timer b/systemd/box-update-all.timer new file mode 100644 index 0000000..13de50a --- /dev/null +++ b/systemd/box-update-all.timer @@ -0,0 +1,11 @@ +[Unit] +Description=Schedule Combined Box Updates + +[Timer] +OnBootSec=30m +OnUnitActiveSec=12h +Persistent=true +Unit=box-update-all.service + +[Install] +WantedBy=timers.target diff --git a/systemd/box-update-dashboard.service b/systemd/box-update-dashboard.service new file mode 100644 index 0000000..5aacb11 --- /dev/null +++ b/systemd/box-update-dashboard.service @@ -0,0 +1,9 @@ +[Unit] +Description=Box Dashboard Updater +After=network-online.target +Wants=network-online.target + +[Service] +Type=oneshot +ExecStart=/usr/bin/boxctl update dashboard +TimeoutStartSec=180 diff --git a/systemd/box-update-dashboard.timer b/systemd/box-update-dashboard.timer new file mode 100644 index 0000000..6a40bbe --- /dev/null +++ b/systemd/box-update-dashboard.timer @@ -0,0 +1,11 @@ +[Unit] +Description=Schedule Box Dashboard Updates + +[Timer] +OnBootSec=20m +OnUnitActiveSec=7d +Persistent=true +Unit=box-update-dashboard.service + +[Install] +WantedBy=timers.target diff --git a/systemd/box-update-geo.service b/systemd/box-update-geo.service new file mode 100644 index 0000000..a87e33c --- /dev/null +++ b/systemd/box-update-geo.service @@ -0,0 +1,9 @@ +[Unit] +Description=Box Geo Data Updater +After=network-online.target +Wants=network-online.target + +[Service] +Type=oneshot +ExecStart=/usr/bin/boxctl update geo +TimeoutStartSec=180 diff --git a/systemd/box-update-geo.timer b/systemd/box-update-geo.timer new file mode 100644 index 0000000..08bdbf5 --- /dev/null +++ b/systemd/box-update-geo.timer @@ -0,0 +1,11 @@ +[Unit] +Description=Schedule Box Geo Data Updates + +[Timer] +OnBootSec=15m +OnUnitActiveSec=1d +Persistent=true +Unit=box-update-geo.service + +[Install] +WantedBy=timers.target diff --git a/systemd/box-update-kernel.service b/systemd/box-update-kernel.service new file mode 100644 index 0000000..9aeb93c --- /dev/null +++ b/systemd/box-update-kernel.service @@ -0,0 +1,9 @@ +[Unit] +Description=Box Kernel Updater +After=network-online.target +Wants=network-online.target + +[Service] +Type=oneshot +ExecStart=/usr/bin/boxctl update kernel +TimeoutStartSec=300 diff --git a/systemd/box-update-kernel.timer b/systemd/box-update-kernel.timer new file mode 100644 index 0000000..5a982d2 --- /dev/null +++ b/systemd/box-update-kernel.timer @@ -0,0 +1,11 @@ +[Unit] +Description=Schedule Box Kernel Updates + +[Timer] +OnBootSec=10m +OnUnitActiveSec=1d +Persistent=true +Unit=box-update-kernel.service + +[Install] +WantedBy=timers.target diff --git a/systemd/box-update-subs.service b/systemd/box-update-subs.service new file mode 100644 index 0000000..2596150 --- /dev/null +++ b/systemd/box-update-subs.service @@ -0,0 +1,9 @@ +[Unit] +Description=Box Subscription Updater +After=network-online.target +Wants=network-online.target + +[Service] +Type=oneshot +ExecStart=/usr/bin/boxctl update subs +TimeoutStartSec=180 diff --git a/systemd/box-update-subs.timer b/systemd/box-update-subs.timer new file mode 100644 index 0000000..bf98a9f --- /dev/null +++ b/systemd/box-update-subs.timer @@ -0,0 +1,11 @@ +[Unit] +Description=Schedule Box Subscription Updates + +[Timer] +OnBootSec=5m +OnUnitActiveSec=1h +Persistent=true +Unit=box-update-subs.service + +[Install] +WantedBy=timers.target diff --git a/systemd/box.service b/systemd/box.service new file mode 100644 index 0000000..abca728 --- /dev/null +++ b/systemd/box.service @@ -0,0 +1,18 @@ +[Unit] +Description=Box Linux Core Supervisor +After=network-online.target +Wants=network-online.target + +[Service] +Type=forking +PIDFile=/run/box/box.pid +ExecStart=/usr/bin/boxctl service start +ExecStop=/usr/bin/boxctl service stop +ExecReload=/usr/bin/boxctl service reload +Restart=on-failure +RestartSec=3 +TimeoutStartSec=120 +TimeoutStopSec=60 + +[Install] +WantedBy=multi-user.target diff --git a/tests/fixtures/mockbin/curl b/tests/fixtures/mockbin/curl new file mode 100755 index 0000000..67001f8 --- /dev/null +++ b/tests/fixtures/mockbin/curl @@ -0,0 +1,35 @@ +#!/usr/bin/env bash + +set -euo pipefail + +output="" +url="" + +while (($# > 0)); do + case "$1" in + -o) + output="${2:?missing curl output path}" + shift 2 + ;; + -f|-s|-S|-L|-fsSL) + shift + ;; + *) + url="$1" + shift + ;; + esac +done + +if [[ -n "${MOCK_CURL_FAIL_URL:-}" && "${url}" == "${MOCK_CURL_FAIL_URL}" ]]; then + printf 'mock curl failed for %s\n' "${url}" >&2 + exit 22 +fi + +if [[ -n "${MOCK_CURL_RESPONSE_FILE:-}" && -n "${output}" ]]; then + cp -f "${MOCK_CURL_RESPONSE_FILE}" "${output}" + exit 0 +fi + +printf 'mock curl has no response configured for %s\n' "${url}" >&2 +exit 22 diff --git a/tests/fixtures/mockbin/ip b/tests/fixtures/mockbin/ip new file mode 100755 index 0000000..3320aab --- /dev/null +++ b/tests/fixtures/mockbin/ip @@ -0,0 +1,100 @@ +#!/usr/bin/env bash + +set -euo pipefail + +STATE_FILE="${MOCK_IP_STATE:?MOCK_IP_STATE is required}" +mkdir -p "$(dirname "${STATE_FILE}")" +touch "${STATE_FILE}" + +opt_brief="false" +cmd="${1:-}" +if [[ "${cmd}" == "-o" ]]; then + opt_brief="true" + cmd="${2:-}" + subcmd="${3:-}" + shift 3 || true +else + subcmd="${2:-}" + shift 2 || true +fi + +extract_table_arg() { + local prev="" + local token + for token in "$@"; do + if [[ "${prev}" == "table" ]]; then + printf '%s\n' "${token}" + return 0 + fi + prev="${token}" + done + printf 'main\n' +} + +case "${cmd}:${subcmd}" in + link:show) + if [[ "${opt_brief}" == "true" && "${1:-}" == "up" && -n "${MOCK_IP_LINKS_FILE:-}" && -f "${MOCK_IP_LINKS_FILE}" ]]; then + cat "${MOCK_IP_LINKS_FILE}" + fi + ;; + addr:show) + if [[ "${opt_brief}" == "true" && -n "${MOCK_IP_ADDRS_FILE:-}" && -f "${MOCK_IP_ADDRS_FILE}" ]]; then + cat "${MOCK_IP_ADDRS_FILE}" + fi + ;; + monitor:link|monitor:route|monitor:address) + if [[ -n "${MOCK_IP_MONITOR_FILE:-}" ]]; then + mkdir -p "$(dirname "${MOCK_IP_MONITOR_FILE}")" + touch "${MOCK_IP_MONITOR_FILE}" + tail -n 0 -F "${MOCK_IP_MONITOR_FILE}" + fi + ;; + monitor:*) + if [[ -n "${MOCK_IP_MONITOR_FILE:-}" ]]; then + mkdir -p "$(dirname "${MOCK_IP_MONITOR_FILE}")" + touch "${MOCK_IP_MONITOR_FILE}" + tail -n 0 -F "${MOCK_IP_MONITOR_FILE}" + fi + ;; + rule:list) + grep -F "RULE|" "${STATE_FILE}" | sed 's/^RULE|//' || true + ;; + rule:add) + rule="$*" + if ! grep -Fqx "RULE|${rule}" "${STATE_FILE}"; then + printf 'RULE|%s\n' "${rule}" >>"${STATE_FILE}" + fi + ;; + rule:del) + rule="$*" + if [[ "${1:-}" == "pref" && -n "${2:-}" ]]; then + pref="${2}" + grep -Ev "RULE\\|.*(^| )pref ${pref}( |$)" "${STATE_FILE}" >"${STATE_FILE}.tmp" || true + else + grep -Fv "RULE|${rule}" "${STATE_FILE}" >"${STATE_FILE}.tmp" || true + fi + mv "${STATE_FILE}.tmp" "${STATE_FILE}" + ;; + route:show) + if [[ "${1:-}" == "table" ]]; then + table="${2:-main}" + grep -F "ROUTE|${table}|" "${STATE_FILE}" | sed "s/^ROUTE|${table}|//" || true + fi + ;; + route:add) + route="$*" + table="$(extract_table_arg "$@")" + if ! grep -Fqx "ROUTE|${table}|${route}" "${STATE_FILE}"; then + printf 'ROUTE|%s|%s\n' "${table}" "${route}" >>"${STATE_FILE}" + fi + ;; + route:del) + route="$*" + table="$(extract_table_arg "$@")" + grep -Fv "ROUTE|${table}|${route}" "${STATE_FILE}" >"${STATE_FILE}.tmp" || true + mv "${STATE_FILE}.tmp" "${STATE_FILE}" + ;; + *) + exit 0 + ;; +esac diff --git a/tests/fixtures/mockbin/iptables b/tests/fixtures/mockbin/iptables new file mode 100755 index 0000000..b9fa844 --- /dev/null +++ b/tests/fixtures/mockbin/iptables @@ -0,0 +1,124 @@ +#!/usr/bin/env bash + +set -euo pipefail + +STATE_FILE="${MOCK_IPTABLES_STATE:?MOCK_IPTABLES_STATE is required}" +mkdir -p "$(dirname "${STATE_FILE}")" +touch "${STATE_FILE}" + +table="filter" +args=("$@") +idx=0 +while (( idx < ${#args[@]} )); do + if [[ "${args[idx]}" == "-t" ]]; then + table="${args[idx+1]}" + unset 'args[idx]' 'args[idx+1]' + break + fi + idx=$((idx + 1)) +done + +clean_args=() +for item in "${args[@]}"; do + [[ -n "${item:-}" ]] && clean_args+=("${item}") +done +args=("${clean_args[@]}") + +if [[ "${args[*]}" == *"TPROXY --help"* ]]; then + exit 0 +fi + +chain_exists() { + local chain="$1" + case "${table}:${chain}" in + filter:INPUT|filter:FORWARD|filter:OUTPUT|\ + nat:PREROUTING|nat:INPUT|nat:OUTPUT|nat:POSTROUTING|\ + mangle:PREROUTING|mangle:INPUT|mangle:FORWARD|mangle:OUTPUT|mangle:POSTROUTING|\ + raw:PREROUTING|raw:OUTPUT|\ + security:INPUT|security:FORWARD|security:OUTPUT) + return 0 + ;; + esac + grep -Fqx "CHAIN|${table}|${chain}" "${STATE_FILE}" +} + +missing_chain_error() { + printf 'No chain/target/match by that name.\n' >&2 + exit 1 +} + +rule_exists() { + local chain="$1" + local rule="$2" + grep -Fqx "RULE|${table}|${chain}|${rule}" "${STATE_FILE}" +} + +case "${args[0]:-}" in + -S) + if [[ "${#args[@]}" -eq 1 ]]; then + grep -F "CHAIN|${table}|" "${STATE_FILE}" | sed "s/^CHAIN|${table}|/-N /" || true + exit 0 + fi + chain="${args[1]}" + if ! chain_exists "${chain}"; then + exit 1 + fi + printf -- '-N %s\n' "${chain}" + grep -F "RULE|${table}|${chain}|" "${STATE_FILE}" | sed "s/^RULE|${table}|${chain}|/-A ${chain} /" || true + ;; + -N) + chain="${args[1]}" + if chain_exists "${chain}"; then + exit 1 + fi + printf 'CHAIN|%s|%s\n' "${table}" "${chain}" >>"${STATE_FILE}" + ;; + -X) + chain="${args[1]}" + if ! chain_exists "${chain}"; then + missing_chain_error + fi + grep -Fv "CHAIN|${table}|${chain}" "${STATE_FILE}" >"${STATE_FILE}.tmp" || true + grep -Fv "RULE|${table}|${chain}|" "${STATE_FILE}.tmp" >"${STATE_FILE}.tmp2" || true + mv "${STATE_FILE}.tmp2" "${STATE_FILE}" + rm -f "${STATE_FILE}.tmp" + ;; + -F) + chain="${args[1]}" + if ! chain_exists "${chain}"; then + missing_chain_error + fi + grep -Fv "RULE|${table}|${chain}|" "${STATE_FILE}" >"${STATE_FILE}.tmp" || true + mv "${STATE_FILE}.tmp" "${STATE_FILE}" + ;; + -A) + chain="${args[1]}" + if ! chain_exists "${chain}"; then + missing_chain_error + fi + rule="${args[*]:2}" + if ! rule_exists "${chain}" "${rule}"; then + printf 'RULE|%s|%s|%s\n' "${table}" "${chain}" "${rule}" >>"${STATE_FILE}" + fi + ;; + -C) + chain="${args[1]}" + if ! chain_exists "${chain}"; then + missing_chain_error + fi + rule="${args[*]:2}" + rule_exists "${chain}" "${rule}" + ;; + -D) + chain="${args[1]}" + if ! chain_exists "${chain}"; then + missing_chain_error + fi + rule="${args[*]:2}" + grep -Fv "RULE|${table}|${chain}|${rule}" "${STATE_FILE}" >"${STATE_FILE}.tmp" || true + mv "${STATE_FILE}.tmp" "${STATE_FILE}" + ;; + *) + exit 0 + ;; +esac diff --git a/tests/fixtures/mockbin/iw b/tests/fixtures/mockbin/iw new file mode 100755 index 0000000..2494ee2 --- /dev/null +++ b/tests/fixtures/mockbin/iw @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +set -euo pipefail + +if [[ -n "${MOCK_IW_LINK_FILE:-}" && -f "${MOCK_IW_LINK_FILE}" ]]; then + cat "${MOCK_IW_LINK_FILE}" +fi + diff --git a/tests/fixtures/mockbin/mihomo b/tests/fixtures/mockbin/mihomo new file mode 100755 index 0000000..2f87a49 --- /dev/null +++ b/tests/fixtures/mockbin/mihomo @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -euo pipefail + +if [[ "${1:-}" == "-t" ]]; then + exit 0 +fi + +trap 'exit 0' TERM INT +while true; do + sleep 1 +done diff --git a/tests/fixtures/mockbin/nft b/tests/fixtures/mockbin/nft new file mode 100755 index 0000000..ef3109c --- /dev/null +++ b/tests/fixtures/mockbin/nft @@ -0,0 +1,105 @@ +#!/usr/bin/env bash + +set -euo pipefail + +STATE_FILE="${MOCK_NFT_STATE:?MOCK_NFT_STATE is required}" +mkdir -p "$(dirname "${STATE_FILE}")" +touch "${STATE_FILE}" + +dedupe() { + sort -u "${STATE_FILE}" >"${STATE_FILE}.tmp" + mv "${STATE_FILE}.tmp" "${STATE_FILE}" +} + +remove_table() { + local family="${1:?missing family}" + local table="${2:?missing table}" + grep -Ev "^(TABLE|CHAIN|RULE)\|${family}\|${table}(\||$)" "${STATE_FILE}" >"${STATE_FILE}.tmp" || true + mv "${STATE_FILE}.tmp" "${STATE_FILE}" +} + +handle_ruleset_line() { + local line="$1" + local family table chain rest + line="${line#"${line%%[![:space:]]*}"}" + [[ -n "${line}" ]] || return 0 + [[ "${line}" == \#* ]] && return 0 + + case "${line}" in + add\ table\ *) + read -r _ _ family table <<<"${line}" + printf 'TABLE|%s|%s\n' "${family}" "${table}" >>"${STATE_FILE}" + ;; + add\ chain\ *) + family="$(awk '{print $3}' <<<"${line}")" + table="$(awk '{print $4}' <<<"${line}")" + chain="$(awk '{print $5}' <<<"${line}")" + printf 'CHAIN|%s|%s|%s\n' "${family}" "${table}" "${chain}" >>"${STATE_FILE}" + ;; + add\ rule\ *) + family="$(awk '{print $3}' <<<"${line}")" + table="$(awk '{print $4}' <<<"${line}")" + chain="$(awk '{print $5}' <<<"${line}")" + rest="${line#add rule ${family} ${table} ${chain} }" + printf 'RULE|%s|%s|%s|%s\n' "${family}" "${table}" "${chain}" "${rest}" >>"${STATE_FILE}" + ;; + delete\ table\ *) + read -r _ _ family table <<<"${line}" + remove_table "${family}" "${table}" + ;; + esac +} + +cmd="${1:-}" +case "${cmd}" in + describe) + if [[ "${2:-}" == "tproxy" ]]; then + exit 0 + fi + ;; + list) + case "${2:-}" in + table) + family="${3:-}" + table="${4:-}" + if grep -Fq "TABLE|${family}|${table}" "${STATE_FILE}"; then + printf 'table %s %s {}\n' "${family}" "${table}" + exit 0 + fi + exit 1 + ;; + chain) + family="${3:-}" + table="${4:-}" + chain="${5:-}" + if ! grep -Fq "CHAIN|${family}|${table}|${chain}" "${STATE_FILE}"; then + exit 1 + fi + grep -F "RULE|${family}|${table}|${chain}|" "${STATE_FILE}" | sed "s|^RULE|rule|" + exit 0 + ;; + esac + ;; + delete) + if [[ "${2:-}" == "table" ]]; then + remove_table "${3:-}" "${4:-}" + exit 0 + fi + ;; + -f) + input="${2:-}" + if [[ "${input}" == "-" ]]; then + while IFS= read -r line; do + handle_ruleset_line "${line}" + done + else + while IFS= read -r line; do + handle_ruleset_line "${line}" + done <"${input}" + fi + dedupe + exit 0 + ;; +esac + +exit 0 diff --git a/tests/fixtures/mockbin/nmcli b/tests/fixtures/mockbin/nmcli new file mode 100755 index 0000000..b57e198 --- /dev/null +++ b/tests/fixtures/mockbin/nmcli @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +set -euo pipefail + +args="$*" + +case "${args}" in + *"-t -f active,ssid,bssid dev wifi"*) + if [[ -n "${MOCK_NMCLI_WIFI_FILE:-}" && -f "${MOCK_NMCLI_WIFI_FILE}" ]]; then + cat "${MOCK_NMCLI_WIFI_FILE}" + fi + ;; + *"-t -f DEVICE,TYPE,STATE dev status"*) + if [[ -n "${MOCK_NMCLI_DEV_FILE:-}" && -f "${MOCK_NMCLI_DEV_FILE}" ]]; then + cat "${MOCK_NMCLI_DEV_FILE}" + fi + ;; +esac + diff --git a/tests/fixtures/mockbin/sing-box b/tests/fixtures/mockbin/sing-box new file mode 100755 index 0000000..a52f536 --- /dev/null +++ b/tests/fixtures/mockbin/sing-box @@ -0,0 +1,16 @@ +#!/usr/bin/env bash + +set -euo pipefail + +if [[ "${1:-}" == "check" ]]; then + exit 0 +fi + +if [[ "${1:-}" == "run" ]]; then + trap 'exit 0' TERM INT + while true; do + sleep 1 + done +fi + +exit 0 diff --git a/tests/integration/test_arch_package_smoke.sh b/tests/integration/test_arch_package_smoke.sh new file mode 100755 index 0000000..084c524 --- /dev/null +++ b/tests/integration/test_arch_package_smoke.sh @@ -0,0 +1,190 @@ +#!/usr/bin/env bash + +set -euo pipefail + +PKG_PATH="${1:-}" + +if [[ -z "${PKG_PATH}" ]]; then + printf 'usage: %s \n' "$0" >&2 + exit 2 +fi +if [[ ! -f "${PKG_PATH}" ]]; then + printf 'package not found: %s\n' "${PKG_PATH}" >&2 + exit 1 +fi + +TMP_ROOT="$(mktemp -d)" +CONFIG_PATH="${TMP_ROOT}/etc/box/box.smoke.toml" +cleanup() { + rm -rf "${TMP_ROOT}" +} +trap cleanup EXIT + +extract_pkg() { + local pkg="${1:?missing pkg path}" + local dst="${2:?missing destination}" + + if command -v bsdtar >/dev/null 2>&1; then + bsdtar -xf "${pkg}" -C "${dst}" + return 0 + fi + + if tar --help 2>/dev/null | grep -q -- '--zstd'; then + tar --zstd -xf "${pkg}" -C "${dst}" + return 0 + fi + + if command -v unzstd >/dev/null 2>&1; then + unzstd -c "${pkg}" | tar -xf - -C "${dst}" + return 0 + fi + + printf 'no extractor available for %s (need bsdtar or tar --zstd or unzstd)\n' "${pkg}" >&2 + return 1 +} + +assert_file() { + local path="${1:?missing path}" + if [[ ! -f "${path}" ]]; then + printf 'expected file missing: %s\n' "${path}" >&2 + exit 1 + fi +} + +run_installed_boxctl() { + BOX_LIB_DIR="${TMP_ROOT}/usr/lib/box4linux/lib" \ + BOX_CONFIG_FILE="${CONFIG_PATH}" \ + BOX_RUN_DIR="${TMP_ROOT}/run/box" \ + BOX_VAR_DIR="${TMP_ROOT}/var/lib/box" \ + BOX_LOG_DIR="${TMP_ROOT}/var/log/box" \ + BOX_LOG_TO_FILE=0 \ + "${TMP_ROOT}/usr/bin/boxctl" "$@" +} + +extract_pkg "${PKG_PATH}" "${TMP_ROOT}" + +assert_file "${TMP_ROOT}/usr/bin/boxctl" +assert_file "${TMP_ROOT}/usr/lib/box4linux/lib/common.sh" +assert_file "${TMP_ROOT}/usr/lib/box4linux/lib/updater/updater.sh" +assert_file "${TMP_ROOT}/etc/box/box.toml" +assert_file "${TMP_ROOT}/usr/lib/systemd/system/box.service" +assert_file "${TMP_ROOT}/usr/lib/systemd/system/box-firewall.service" +assert_file "${TMP_ROOT}/usr/lib/systemd/system/box-policy.service" +assert_file "${TMP_ROOT}/usr/lib/systemd/system/box-update-all.service" +assert_file "${TMP_ROOT}/usr/lib/systemd/system/box-update-all.timer" + +cat >"${CONFIG_PATH}" <"${TMP_ROOT}/tmp/geo.dat" + +service_json="$(run_installed_boxctl service status --json)" +firewall_json="$(run_installed_boxctl firewall status --json)" +policy_json="$(run_installed_boxctl policy status --json)" +update_json="$(run_installed_boxctl update status --json)" +run_installed_boxctl update geo >/dev/null +dry_run_output="$(run_installed_boxctl firewall dry-run)" + +if [[ "${service_json}" != *'"status"'* ]]; then + printf 'service status json missing status field: %s\n' "${service_json}" >&2 + exit 1 +fi +if [[ "${firewall_json}" != *'"backend"'* ]]; then + printf 'firewall status json missing backend field: %s\n' "${firewall_json}" >&2 + exit 1 +fi +if [[ "${policy_json}" != *'"policy_enabled"'* ]]; then + printf 'policy status json missing policy_enabled field: %s\n' "${policy_json}" >&2 + exit 1 +fi +if [[ "${update_json}" != *'"components"'* ]]; then + printf 'update status json missing components field: %s\n' "${update_json}" >&2 + exit 1 +fi +if [[ "${dry_run_output}" != *'# dry-run backend='* ]]; then + printf 'firewall dry-run output did not contain dry-run header\n' >&2 + printf '%s\n' "${dry_run_output}" >&2 + exit 1 +fi +if [[ ! -f "${TMP_ROOT}/var/lib/box/artifacts/geo/geo.dat" ]]; then + printf 'installed updater execution did not create geo artifact\n' >&2 + exit 1 +fi +for dep in curl gzip jq unzip; do + if ! grep -Fxq "depend = ${dep}" "${TMP_ROOT}/.PKGINFO"; then + printf 'package metadata missing expected updater dependency: %s\n' "${dep}" >&2 + cat "${TMP_ROOT}/.PKGINFO" >&2 + exit 1 + fi +done + +if command -v systemd-analyze >/dev/null 2>&1; then + mkdir -p "${TMP_ROOT}/usr/lib/systemd/system" + for target in sysinit.target basic.target network-online.target multi-user.target; do + if [[ ! -f "${TMP_ROOT}/usr/lib/systemd/system/${target}" ]]; then + cat >"${TMP_ROOT}/usr/lib/systemd/system/${target}" </dev/null | grep -q -- '--root='; then + systemd-analyze --root="${TMP_ROOT}" verify \ + "${TMP_ROOT}/usr/lib/systemd/system/box.service" \ + "${TMP_ROOT}/usr/lib/systemd/system/box-firewall.service" \ + "${TMP_ROOT}/usr/lib/systemd/system/box-policy.service" \ + "${TMP_ROOT}/usr/lib/systemd/system/box-update-all.service" \ + "${TMP_ROOT}/usr/lib/systemd/system/box-update-all.timer" >/dev/null + else + systemd-analyze verify \ + "${TMP_ROOT}/usr/lib/systemd/system/box.service" \ + "${TMP_ROOT}/usr/lib/systemd/system/box-firewall.service" \ + "${TMP_ROOT}/usr/lib/systemd/system/box-policy.service" \ + "${TMP_ROOT}/usr/lib/systemd/system/box-update-all.service" \ + "${TMP_ROOT}/usr/lib/systemd/system/box-update-all.timer" >/dev/null + fi +else + printf 'SKIP: systemd-analyze not available\n' +fi + +printf 'PASS: arch package smoke test (%s)\n' "${PKG_PATH}" diff --git a/tests/integration/test_docker_privileged.sh b/tests/integration/test_docker_privileged.sh new file mode 100755 index 0000000..89e6e78 --- /dev/null +++ b/tests/integration/test_docker_privileged.sh @@ -0,0 +1,60 @@ +#!/usr/bin/env bash + +set -euo pipefail + +ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)" +ENV_FILE="${BOX_DOCKER_ENV_FILE:-${ROOT_DIR}/.box-test-subscription.env}" +IMAGE="${BOX_DOCKER_IMAGE:-archlinux:base-devel}" +CONTAINER_NAME="box4linux-test-$RANDOM-$$" +PHASE2_CMD="${BOX_DOCKER_PHASE2_CMD:-./tests/integration/test_phase2.sh}" +REAL_CMD="${BOX_DOCKER_REAL_CMD:-./tests/integration/test_real_kernel.sh}" +RUN_PACKAGE_BUILD="${BOX_DOCKER_RUN_PACKAGE_BUILD:-0}" + +cleanup() { + docker rm -f "${CONTAINER_NAME}" >/dev/null 2>&1 || true +} +trap cleanup EXIT + +require_cmd() { + local cmd="${1:?missing command}" + if ! command -v "${cmd}" >/dev/null 2>&1; then + printf 'missing command: %s\n' "${cmd}" >&2 + exit 1 + fi +} + +log() { + printf '[docker-test] %s\n' "$1" +} + +require_cmd docker + +docker_env_args=() +if [[ -f "${ENV_FILE}" ]]; then + docker_env_args+=(--env-file "${ENV_FILE}") + log "using env file ${ENV_FILE}" +else + log "no env file found at ${ENV_FILE}; continuing without subscription env" +fi + +container_cmd=$'set -euo pipefail\n' +container_cmd+=$'pacman -Syu --noconfirm --needed bash coreutils gawk grep iproute2 iptables nftables procps-ng sed tar >/dev/null\n' +container_cmd+=$'cd /work\n' +container_cmd+=$'printf "[container] kernel=%s\\n" "$(uname -r)"\n' +container_cmd+=$'if [[ -n "${BOX_TEST_SUBSCRIPTION_URL:-}" ]]; then printf "[container] subscription env present\\n"; else printf "[container] subscription env absent\\n"; fi\n' +container_cmd+=$'bash -lc "${BOX_DOCKER_PHASE2_CMD}"\n' +container_cmd+=$'bash -lc "${BOX_DOCKER_REAL_CMD}"\n' +container_cmd+=$'if [[ "${BOX_DOCKER_RUN_PACKAGE_BUILD:-0}" == "1" ]]; then cd packaging/arch && makepkg --noconfirm -f; fi\n' + +log "starting privileged container ${CONTAINER_NAME} from ${IMAGE}" +docker run --rm --name "${CONTAINER_NAME}" \ + --privileged \ + --network host \ + -e BOX_DOCKER_RUN_PACKAGE_BUILD="${RUN_PACKAGE_BUILD}" \ + -e BOX_DOCKER_PHASE2_CMD="${PHASE2_CMD}" \ + -e BOX_DOCKER_REAL_CMD="${REAL_CMD}" \ + "${docker_env_args[@]}" \ + -v "${ROOT_DIR}:/work" \ + -w /work \ + "${IMAGE}" \ + bash -lc "${container_cmd}" diff --git a/tests/integration/test_phase2.sh b/tests/integration/test_phase2.sh new file mode 100755 index 0000000..13401ce --- /dev/null +++ b/tests/integration/test_phase2.sh @@ -0,0 +1,472 @@ +#!/usr/bin/env bash + +set -euo pipefail + +ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)" +BOXCTL="${ROOT_DIR}/cmd/boxctl" +TMP_DIR="$(mktemp -d)" +MOCK_DIR="${ROOT_DIR}/tests/fixtures/mockbin" + +cleanup() { + if [[ -f "${TMP_DIR}/run/box.pid" ]]; then + pid="$(tr -d '[:space:]' <"${TMP_DIR}/run/box.pid" || true)" + if [[ -n "${pid:-}" ]]; then + kill -TERM "${pid}" >/dev/null 2>&1 || true + fi + fi + rm -rf "${TMP_DIR}" +} +trap cleanup EXIT + +export PATH="${MOCK_DIR}:${PATH}" +export MOCK_IPTABLES_STATE="${TMP_DIR}/mock/iptables.state" +export MOCK_IP_STATE="${TMP_DIR}/mock/ip.state" +export MOCK_NFT_STATE="${TMP_DIR}/mock/nft.state" +export BOX_IPTABLES_CMD="${MOCK_DIR}/iptables" +export BOX_IP_CMD="${MOCK_DIR}/ip" +export BOX_NFT_CMD="${MOCK_DIR}/nft" +export BOX_UNSAFE_SKIP_ROOT_CHECK=1 +export BOX_CAP_TPROXY=1 +export BOX_RUN_DIR="${TMP_DIR}/run" +export BOX_VAR_DIR="${TMP_DIR}/var" +export BOX_LOG_DIR="${TMP_DIR}/log" + +mkdir -p "${TMP_DIR}/mock" "${TMP_DIR}/profiles" "${BOX_RUN_DIR}" "${BOX_VAR_DIR}" "${BOX_LOG_DIR}" +touch "${MOCK_IPTABLES_STATE}" "${MOCK_IP_STATE}" "${MOCK_NFT_STATE}" + +MIHOMO_SOURCE="${TMP_DIR}/profiles/mihomo.yaml" +SING_SOURCE="${TMP_DIR}/profiles/sing-box.json" +CONFIG_FILE="${TMP_DIR}/box.toml" +export BOX_CONFIG_FILE="${CONFIG_FILE}" + +cat >"${MIHOMO_SOURCE}" <<'EOF' +mode: rule +mixed-port: 7890 +rules: + - MATCH,DIRECT +EOF + +cat >"${SING_SOURCE}" <<'EOF' +{ + "log": { "level": "info" }, + "outbounds": [ { "type": "direct", "tag": "direct" } ] +} +EOF + +write_config() { + local core="${1:?missing core}" + local mode="${2:?missing mode}" + local dns_mode="${3:?missing dns mode}" + local source="${4:?missing source config path}" + local coexist_mode="${5:-preserve_tailnet}" + local route_pref="${6:-100}" + local backend="${7:-iptables}" + cat >"${CONFIG_FILE}" <&1)"; then + printf 'FAILED: boxctl %s\n%s\n' "$*" "${output}" >&2 + exit 1 + fi + printf '%s\n' "${output}" +} + +must_fail() { + local output + if output="$("$BOXCTL" "$@" 2>&1)"; then + printf 'UNEXPECTED SUCCESS: boxctl %s\n%s\n' "$*" "${output}" >&2 + exit 1 + fi + printf '%s\n' "${output}" +} + +assert_contains() { + local haystack="${1:?missing haystack}" + local needle="${2:?missing needle}" + if [[ "${haystack}" != *"${needle}"* ]]; then + printf 'ASSERT CONTAINS FAILED: expected [%s] in [%s]\n' "${needle}" "${haystack}" >&2 + exit 1 + fi +} + +assert_not_contains() { + local haystack="${1:?missing haystack}" + local needle="${2:?missing needle}" + if [[ "${haystack}" == *"${needle}"* ]]; then + printf 'ASSERT NOT CONTAINS FAILED: unexpected [%s] in [%s]\n' "${needle}" "${haystack}" >&2 + exit 1 + fi +} + +assert_file_exists() { + local path="${1:?missing path}" + if [[ ! -f "${path}" ]]; then + printf 'ASSERT FILE FAILED: missing %s\n' "${path}" >&2 + exit 1 + fi +} + +assert_no_duplicate_rules_iptables() { + local dup_count + dup_count="$(grep '^RULE|' "${MOCK_IPTABLES_STATE}" | sort | uniq -d | wc -l | tr -d '[:space:]')" + if [[ "${dup_count}" != "0" ]]; then + printf 'ASSERT DUPLICATE RULE FAILED: %s duplicate rules in %s\n' "${dup_count}" "${MOCK_IPTABLES_STATE}" >&2 + exit 1 + fi +} + +assert_no_duplicate_rules_nft() { + local dup_count + dup_count="$(grep '^RULE|' "${MOCK_NFT_STATE}" | sort | uniq -d | wc -l | tr -d '[:space:]')" + if [[ "${dup_count}" != "0" ]]; then + printf 'ASSERT DUPLICATE NFT RULE FAILED: %s duplicate rules in %s\n' "${dup_count}" "${MOCK_NFT_STATE}" >&2 + exit 1 + fi +} + +assert_no_box_artifacts() { + if grep -q 'BOX_' "${MOCK_IPTABLES_STATE}"; then + printf 'ASSERT CLEANUP FAILED: BOX chains/rules still present\n' >&2 + cat "${MOCK_IPTABLES_STATE}" >&2 + exit 1 + fi + if grep -Eq 'RULE\|.*(16777216/16777216|table 2024|lookup 2024)' "${MOCK_IP_STATE}" || \ + grep -Eq 'ROUTE\|2024\|' "${MOCK_IP_STATE}"; then + printf 'ASSERT CLEANUP FAILED: box ip rule/route artifacts still present\n' >&2 + cat "${MOCK_IP_STATE}" >&2 + exit 1 + fi + if grep -Eq '(TABLE|CHAIN|RULE)\|(inet|ip)\|(box_mangle|box_nat)' "${MOCK_NFT_STATE}"; then + printf 'ASSERT CLEANUP FAILED: nft BOX tables/chains/rules still present\n' >&2 + cat "${MOCK_NFT_STATE}" >&2 + exit 1 + fi +} + +seed_tailscale_state() { + cat >"${MOCK_IP_STATE}" <<'EOF' +RULE|fwmark 0x80000/0xff0000 lookup 52 +ROUTE|52|local 100.100.100.100 dev lo table 52 +EOF +} + +assert_tailscale_state_preserved() { + if ! grep -Fq 'RULE|fwmark 0x80000/0xff0000 lookup 52' "${MOCK_IP_STATE}"; then + printf 'ASSERT TAILSCALE FAILED: fwmark rule missing\n' >&2 + cat "${MOCK_IP_STATE}" >&2 + exit 1 + fi + if ! grep -Fq 'ROUTE|52|local 100.100.100.100 dev lo table 52' "${MOCK_IP_STATE}"; then + printf 'ASSERT TAILSCALE FAILED: table 52 route missing\n' >&2 + cat "${MOCK_IP_STATE}" >&2 + exit 1 + fi +} + +assert_magicdns_bypass_rules() { + if ! grep -Fq 'RULE|mangle|BOX_DNS_MANGLE|-d 100.100.100.100 -p udp --dport 53 -j RETURN' "${MOCK_IPTABLES_STATE}"; then + printf 'ASSERT MAGICDNS FAILED: UDP resolver bypass missing\n' >&2 + cat "${MOCK_IPTABLES_STATE}" >&2 + exit 1 + fi + if ! grep -Fq 'RULE|mangle|BOX_DNS_MANGLE|-d 100.100.100.100 -p tcp --dport 53 -j RETURN' "${MOCK_IPTABLES_STATE}"; then + printf 'ASSERT MAGICDNS FAILED: TCP resolver bypass missing\n' >&2 + cat "${MOCK_IPTABLES_STATE}" >&2 + exit 1 + fi + if ! grep -Fq 'RULE|nat|BOX_DNS_NAT|-d 100.100.100.100 -p udp --dport 53 -j RETURN' "${MOCK_IPTABLES_STATE}"; then + printf 'ASSERT MAGICDNS FAILED: NAT resolver bypass missing\n' >&2 + cat "${MOCK_IPTABLES_STATE}" >&2 + exit 1 + fi + if ! grep -Fq 'RULE|nat|BOX_DNS_NAT|-d 100.100.100.100 -p tcp --dport 53 -j RETURN' "${MOCK_IPTABLES_STATE}"; then + printf 'ASSERT MAGICDNS FAILED: NAT TCP resolver bypass missing\n' >&2 + cat "${MOCK_IPTABLES_STATE}" >&2 + exit 1 + fi +} + +assert_tailscale_bypass_rules() { + if ! grep -Fq 'RULE|mangle|BOX_MANGLE|-i tailscale0 -j RETURN' "${MOCK_IPTABLES_STATE}"; then + printf 'ASSERT TAILSCALE FAILED: iface bypass missing\n' >&2 + cat "${MOCK_IPTABLES_STATE}" >&2 + exit 1 + fi + if ! grep -Fq 'RULE|mangle|BOX_MANGLE|-m mark --mark 0x80000/0xff0000 -j RETURN' "${MOCK_IPTABLES_STATE}"; then + printf 'ASSERT TAILSCALE FAILED: mark bypass missing\n' >&2 + cat "${MOCK_IPTABLES_STATE}" >&2 + exit 1 + fi + if ! grep -Fq 'RULE|mangle|BOX_MANGLE|-d 100.64.0.0/10 -j RETURN' "${MOCK_IPTABLES_STATE}"; then + printf 'ASSERT TAILSCALE FAILED: CIDR bypass missing\n' >&2 + cat "${MOCK_IPTABLES_STATE}" >&2 + exit 1 + fi +} + +assert_nft_tailscale_bypass_rules() { + if ! grep -Fq 'RULE|inet|box_mangle|box_main|iifname "tailscale0" return' "${MOCK_NFT_STATE}"; then + printf 'ASSERT NFT TAILSCALE FAILED: iface bypass missing\n' >&2 + cat "${MOCK_NFT_STATE}" >&2 + exit 1 + fi + if ! grep -Fq 'RULE|inet|box_mangle|box_dns|ip daddr 100.100.100.100 udp dport 53 return' "${MOCK_NFT_STATE}"; then + printf 'ASSERT NFT MAGICDNS FAILED: resolver bypass missing\n' >&2 + cat "${MOCK_NFT_STATE}" >&2 + exit 1 + fi +} + +assert_no_tailscale_bypass_rules() { + if grep -Fq 'RULE|mangle|BOX_MANGLE|-i tailscale0 -j RETURN' "${MOCK_IPTABLES_STATE}" || \ + grep -Fq 'RULE|mangle|BOX_MANGLE|-m mark --mark 0x80000/0xff0000 -j RETURN' "${MOCK_IPTABLES_STATE}" || \ + grep -Fq 'RULE|mangle|BOX_DNS_MANGLE|-d 100.100.100.100 -p udp --dport 53 -j RETURN' "${MOCK_IPTABLES_STATE}" || \ + grep -Fq 'RULE|mangle|BOX_DNS_MANGLE|-d 100.100.100.100 -p tcp --dport 53 -j RETURN' "${MOCK_IPTABLES_STATE}" || \ + grep -Fq 'RULE|nat|BOX_DNS_NAT|-d 100.100.100.100 -p udp --dport 53 -j RETURN' "${MOCK_IPTABLES_STATE}" || \ + grep -Fq 'RULE|nat|BOX_DNS_NAT|-d 100.100.100.100 -p tcp --dport 53 -j RETURN' "${MOCK_IPTABLES_STATE}"; then + printf 'ASSERT STRICT BOX FAILED: tailscale bypass rule unexpectedly present\n' >&2 + cat "${MOCK_IPTABLES_STATE}" >&2 + exit 1 + fi +} + +assert_nft_no_tailscale_bypass_rules() { + if grep -Fq 'RULE|inet|box_mangle|box_main|iifname "tailscale0" return' "${MOCK_NFT_STATE}" || \ + grep -Fq 'RULE|inet|box_mangle|box_dns|ip daddr 100.100.100.100 udp dport 53 return' "${MOCK_NFT_STATE}"; then + printf 'ASSERT NFT STRICT BOX FAILED: tailscale bypass rule unexpectedly present\n' >&2 + cat "${MOCK_NFT_STATE}" >&2 + exit 1 + fi +} + +assert_box_route_pref() { + local expected_pref="${1:?missing expected pref}" + local matching total + matching="$(grep -Ec "^RULE\\|fwmark 16777216/16777216 (lookup|table) 2024 pref ${expected_pref}$" "${MOCK_IP_STATE}" || true)" + total="$(grep -Ec "^RULE\\|fwmark 16777216/16777216 (lookup|table) 2024 pref " "${MOCK_IP_STATE}" || true)" + if [[ "${matching}" != "1" || "${total}" != "1" ]]; then + printf 'ASSERT ROUTE PREF FAILED: expected exactly one pref=%s rule; found matching=%s total=%s\n' \ + "${expected_pref}" "${matching}" "${total}" >&2 + cat "${MOCK_IP_STATE}" >&2 + exit 1 + fi +} + +run_firewall_mode_case() { + local mode="${1:?missing mode}" + local dns_mode="${2:?missing dns mode}" + local coexist_mode="${3:-preserve_tailnet}" + local route_pref="${4:-100}" + local backend="${5:-iptables}" + local expect_box_route="false" + write_config "mihomo" "${mode}" "${dns_mode}" "${MIHOMO_SOURCE}" "${coexist_mode}" "${route_pref}" "${backend}" + seed_tailscale_state + + case "${mode}" in + tproxy|mixed|enhance) expect_box_route="true" ;; + esac + if [[ "${dns_mode}" == "tproxy" ]]; then + expect_box_route="true" + fi + + must_run firewall enable >/dev/null + must_run firewall renew >/dev/null + must_run firewall renew >/dev/null + if [[ "${backend}" == "iptables" ]]; then + assert_no_duplicate_rules_iptables + else + assert_no_duplicate_rules_nft + fi + if [[ "${coexist_mode}" == "preserve_tailnet" && "${backend}" == "iptables" ]]; then + assert_tailscale_bypass_rules + assert_magicdns_bypass_rules + elif [[ "${coexist_mode}" == "preserve_tailnet" && "${backend}" == "nftables" ]]; then + assert_nft_tailscale_bypass_rules + elif [[ "${backend}" == "iptables" ]]; then + assert_no_tailscale_bypass_rules + else + assert_nft_no_tailscale_bypass_rules + fi + assert_tailscale_state_preserved + + status_json="$(must_run firewall status --json)" + assert_contains "${status_json}" "\"status\":\"enabled\"" + assert_contains "${status_json}" "\"mode\":\"${mode}\"" + assert_contains "${status_json}" "\"backend\":\"${backend}\"" + assert_contains "${status_json}" "\"backend_selected\":\"${backend}\"" + assert_contains "${status_json}" "\"backend_capabilities\":" + assert_contains "${status_json}" "\"backend_available\":true" + assert_contains "${status_json}" "\"dns_hijack_mode\":\"${dns_mode}\"" + assert_contains "${status_json}" "\"dns_coexist_mode\":\"${coexist_mode}\"" + assert_contains "${status_json}" "\"dns_coexist_mode_active\":\"${coexist_mode}\"" + assert_contains "${status_json}" "\"cap_ipv4\":true" + assert_contains "${status_json}" "\"cap_ipv6\":false" + assert_contains "${status_json}" "\"dry_run_supported\":true" + assert_contains "${status_json}" "\"last_error\":\"\"" + assert_not_contains "${status_json}" "\"error\":" + assert_contains "${status_json}" "\"tailscale_iface\":\"tailscale0\"" + assert_contains "${status_json}" "\"tailscale_mark_rule\":true" + assert_contains "${status_json}" "\"tailscale_table_present\":true" + if [[ "${coexist_mode}" == "preserve_tailnet" ]]; then + assert_contains "${status_json}" "\"tailscale_bypass_applied\":true" + else + assert_contains "${status_json}" "\"tailscale_bypass_applied\":false" + fi + if [[ "${expect_box_route}" == "true" ]]; then + assert_box_route_pref "${route_pref}" + fi + + must_run firewall disable >/dev/null + assert_tailscale_state_preserved + assert_no_box_artifacts +} + +printf '[1/11] firewall mode/dns apply+renew+disable idempotency (preserve_tailnet)\n' +run_firewall_mode_case "tun" "disable" "preserve_tailnet" "100" "iptables" +run_firewall_mode_case "tproxy" "tproxy" "preserve_tailnet" "100" "iptables" +run_firewall_mode_case "redirect" "redirect" "preserve_tailnet" "100" "iptables" +run_firewall_mode_case "mixed" "tproxy" "preserve_tailnet" "100" "iptables" +run_firewall_mode_case "enhance" "redirect" "preserve_tailnet" "100" "iptables" + +printf '[2/11] nftables backend mode/dns apply+renew+disable idempotency (preserve_tailnet)\n' +run_firewall_mode_case "tun" "disable" "preserve_tailnet" "100" "nftables" +run_firewall_mode_case "tproxy" "tproxy" "preserve_tailnet" "100" "nftables" +run_firewall_mode_case "redirect" "redirect" "preserve_tailnet" "100" "nftables" +run_firewall_mode_case "mixed" "tproxy" "preserve_tailnet" "100" "nftables" +run_firewall_mode_case "enhance" "redirect" "preserve_tailnet" "100" "nftables" + +printf '[3/11] coexist mode strict_box rule differences\n' +run_firewall_mode_case "tproxy" "tproxy" "strict_box" "100" "iptables" +run_firewall_mode_case "tproxy" "tproxy" "strict_box" "100" "nftables" + +printf '[4/11] route_pref convergence across renew\n' +write_config "mihomo" "tproxy" "tproxy" "${MIHOMO_SOURCE}" "preserve_tailnet" "100" "iptables" +seed_tailscale_state +must_run firewall enable >/dev/null +assert_box_route_pref "100" +write_config "mihomo" "tproxy" "tproxy" "${MIHOMO_SOURCE}" "preserve_tailnet" "333" "iptables" +must_run firewall renew >/dev/null +assert_box_route_pref "333" +must_run firewall disable >/dev/null +assert_tailscale_state_preserved +assert_no_box_artifacts + +printf '[5/11] firewall dry-run surfaces intended operations\n' +write_config "mihomo" "tproxy" "tproxy" "${MIHOMO_SOURCE}" "preserve_tailnet" "100" "nftables" +dryrun_output="$(must_run firewall dry-run)" +assert_contains "${dryrun_output}" "dry-run" +assert_contains "${dryrun_output}" "backend=nftables" + +printf '[6/11] trace mode logs external commands with action context\n' +BOX_TRACE_COMMANDS=1 +export BOX_TRACE_COMMANDS +trace_output="$(must_run firewall status --json)" +unset BOX_TRACE_COMMANDS +assert_contains "${trace_output}" "event_id=TRACE_CMD" +assert_contains "${trace_output}" "action=status" +assert_contains "${trace_output}" "cmd=" + +printf '[7/11] explicit BOX_CONFIG_FILE missing fails fast\n' +missing_cfg="${TMP_DIR}/missing-explicit-box.toml" +BOX_CONFIG_FILE="${missing_cfg}" +export BOX_CONFIG_FILE +fail_output="$(must_fail firewall status --json)" +assert_contains "${fail_output}" "explicit BOX_CONFIG_FILE does not exist" +BOX_CONFIG_FILE="${CONFIG_FILE}" +export BOX_CONFIG_FILE + +printf '[8/11] status json conditional error field\n' +write_config "mihomo" "tun" "disable" "${MIHOMO_SOURCE}" "preserve_tailnet" "100" "iptables" +saved_iptables_cmd="${BOX_IPTABLES_CMD}" +BOX_IPTABLES_CMD="${TMP_DIR}/missing-iptables" +export BOX_IPTABLES_CMD +error_status_json="$(must_run firewall status --json)" +assert_contains "${error_status_json}" "\"last_error\":\"iptables inspection unavailable (need root/CAP_NET_ADMIN or kernel support)\"" +assert_contains "${error_status_json}" "\"error\":\"iptables inspection unavailable (need root/CAP_NET_ADMIN or kernel support)\"" +BOX_IPTABLES_CMD="${saved_iptables_cmd}" +export BOX_IPTABLES_CMD + +printf '[9/11] service status side-effect free\n' +write_config "mihomo" "tun" "disable" "${MIHOMO_SOURCE}" "preserve_tailnet" "100" "iptables" +rm -rf "${BOX_RUN_DIR}/rendered" +must_run service status --json >/dev/null +if [[ -d "${BOX_RUN_DIR}/rendered" ]]; then + printf 'ASSERT STATUS SIDE-EFFECT FAILED: rendered directory created by service status\n' >&2 + exit 1 +fi + +printf '[10/11] service lifecycle + mihomo overlay\n' +write_config "mihomo" "mixed" "redirect" "${MIHOMO_SOURCE}" "preserve_tailnet" "100" "iptables" +seed_tailscale_state +mihomo_checksum_before="$(sha256sum "${MIHOMO_SOURCE}" | awk '{print $1}')" +must_run service start >/dev/null +assert_tailscale_state_preserved +service_json="$(must_run service status --json)" +assert_contains "${service_json}" "\"status\":\"healthy\"" +assert_contains "${service_json}" "\"core\":\"mihomo\"" +assert_contains "${service_json}" "/rendered/mihomo/config.yaml" +assert_file_exists "${BOX_RUN_DIR}/rendered/mihomo/config.yaml" +mihomo_checksum_after="$(sha256sum "${MIHOMO_SOURCE}" | awk '{print $1}')" +if [[ "${mihomo_checksum_before}" != "${mihomo_checksum_after}" ]]; then + printf 'ASSERT SOURCE MUTATION FAILED: mihomo source config changed\n' >&2 + exit 1 +fi +must_run service restart >/dev/null +service_json="$(must_run service status --json)" +assert_contains "${service_json}" "\"status\":\"healthy\"" +must_run service reload >/dev/null +service_json="$(must_run service status --json)" +assert_contains "${service_json}" "\"status\":\"healthy\"" +must_run service stop >/dev/null +assert_tailscale_state_preserved +service_json="$(must_run service status --json)" +assert_contains "${service_json}" "\"status\":\"stopped\"" + +printf '[11/11] service lifecycle + sing-box overlay\n' +write_config "sing-box" "tproxy" "tproxy" "${SING_SOURCE}" "preserve_tailnet" "100" "iptables" +seed_tailscale_state +sing_checksum_before="$(sha256sum "${SING_SOURCE}" | awk '{print $1}')" +must_run service start >/dev/null +assert_tailscale_state_preserved +service_json="$(must_run service status --json)" +assert_contains "${service_json}" "\"core\":\"sing-box\"" +assert_contains "${service_json}" "/rendered/sing-box/config.json" +assert_file_exists "${BOX_RUN_DIR}/rendered/sing-box/config.json" +sing_checksum_after="$(sha256sum "${SING_SOURCE}" | awk '{print $1}')" +if [[ "${sing_checksum_before}" != "${sing_checksum_after}" ]]; then + printf 'ASSERT SOURCE MUTATION FAILED: sing-box source config changed\n' >&2 + exit 1 +fi +must_run service stop >/dev/null +assert_tailscale_state_preserved +assert_no_box_artifacts + +printf 'PASS: integration phase2 checks completed\n' diff --git a/tests/integration/test_policy.sh b/tests/integration/test_policy.sh new file mode 100755 index 0000000..277eb4d --- /dev/null +++ b/tests/integration/test_policy.sh @@ -0,0 +1,340 @@ +#!/usr/bin/env bash + +set -euo pipefail + +ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)" +BOXCTL="${ROOT_DIR}/cmd/boxctl" +TMP_DIR="$(mktemp -d)" +MOCK_DIR="${ROOT_DIR}/tests/fixtures/mockbin" + +cleanup() { + local pid + if [[ -f "${BOX_RUN_DIR}/policy.pid" ]]; then + pid="$(tr -d '[:space:]' <"${BOX_RUN_DIR}/policy.pid" || true)" + if [[ -n "${pid:-}" ]]; then + kill -TERM "${pid}" >/dev/null 2>&1 || true + fi + fi + if [[ -f "${BOX_RUN_DIR}/box.pid" ]]; then + pid="$(tr -d '[:space:]' <"${BOX_RUN_DIR}/box.pid" || true)" + if [[ -n "${pid:-}" ]]; then + kill -TERM "${pid}" >/dev/null 2>&1 || true + fi + fi + rm -rf "${TMP_DIR}" +} +trap cleanup EXIT + +export PATH="${MOCK_DIR}:${PATH}" +export MOCK_IPTABLES_STATE="${TMP_DIR}/mock/iptables.state" +export MOCK_IP_STATE="${TMP_DIR}/mock/ip.state" +export MOCK_NFT_STATE="${TMP_DIR}/mock/nft.state" +export MOCK_IP_LINKS_FILE="${TMP_DIR}/mock/ip.links" +export MOCK_IP_ADDRS_FILE="${TMP_DIR}/mock/ip.addrs" +export MOCK_IP_MONITOR_FILE="${TMP_DIR}/mock/ip.monitor" +export MOCK_NMCLI_WIFI_FILE="${TMP_DIR}/mock/nmcli.wifi" +export MOCK_NMCLI_DEV_FILE="${TMP_DIR}/mock/nmcli.dev" +export BOX_IPTABLES_CMD="${MOCK_DIR}/iptables" +export BOX_IP_CMD="${MOCK_DIR}/ip" +export BOX_NFT_CMD="${MOCK_DIR}/nft" +export BOX_NMCLI_CMD="${MOCK_DIR}/nmcli" +export BOX_IW_CMD="${MOCK_DIR}/iw" +export BOX_UNSAFE_SKIP_ROOT_CHECK=1 +export BOX_CAP_TPROXY=1 +export BOX_RUN_DIR="${TMP_DIR}/run" +export BOX_VAR_DIR="${TMP_DIR}/var" +export BOX_LOG_DIR="${TMP_DIR}/log" +export BOX_CONFIG_FILE="${TMP_DIR}/box.toml" + +PROFILE_DIR="${TMP_DIR}/profiles" +mkdir -p "${TMP_DIR}/mock" "${PROFILE_DIR}" "${BOX_RUN_DIR}" "${BOX_VAR_DIR}" "${BOX_LOG_DIR}" +touch "${MOCK_IPTABLES_STATE}" "${MOCK_IP_STATE}" "${MOCK_NFT_STATE}" "${MOCK_IP_MONITOR_FILE}" + +cat >"${PROFILE_DIR}/mihomo.yaml" <<'EOF_MIHOMO' +mode: rule +mixed-port: 7890 +rules: + - MATCH,DIRECT +EOF_MIHOMO + +must_run() { + local output + if ! output="$("${BOXCTL}" "$@" 2>&1)"; then + printf 'FAILED: boxctl %s\n%s\n' "$*" "${output}" >&2 + exit 1 + fi + printf '%s\n' "${output}" +} + +must_fail() { + local output + if output="$("${BOXCTL}" "$@" 2>&1)"; then + printf 'UNEXPECTED SUCCESS: boxctl %s\n%s\n' "$*" "${output}" >&2 + exit 1 + fi + printf '%s\n' "${output}" +} + +assert_contains() { + local haystack="${1:?missing haystack}" + local needle="${2:?missing needle}" + if [[ "${haystack}" != *"${needle}"* ]]; then + printf 'ASSERT CONTAINS FAILED: expected [%s] in [%s]\n' "${needle}" "${haystack}" >&2 + exit 1 + fi +} + +assert_not_contains() { + local haystack="${1:?missing haystack}" + local needle="${2:?missing needle}" + if [[ "${haystack}" == *"${needle}"* ]]; then + printf 'ASSERT NOT CONTAINS FAILED: unexpected [%s] in [%s]\n' "${needle}" "${haystack}" >&2 + exit 1 + fi +} + +force_reset_runtime() { + local pid + if [[ -f "${BOX_RUN_DIR}/policy.pid" ]]; then + pid="$(tr -d '[:space:]' <"${BOX_RUN_DIR}/policy.pid" || true)" + if [[ -n "${pid:-}" ]]; then + kill -TERM "${pid}" >/dev/null 2>&1 || true + fi + fi + if [[ -f "${BOX_RUN_DIR}/box.pid" ]]; then + pid="$(tr -d '[:space:]' <"${BOX_RUN_DIR}/box.pid" || true)" + if [[ -n "${pid:-}" ]]; then + kill -TERM "${pid}" >/dev/null 2>&1 || true + fi + fi + sleep 1 + rm -rf "${BOX_RUN_DIR}" "${BOX_VAR_DIR}" "${BOX_LOG_DIR}" + mkdir -p "${BOX_RUN_DIR}" "${BOX_VAR_DIR}" "${BOX_LOG_DIR}" + : >"${MOCK_IP_MONITOR_FILE}" +} + +wait_for_contains() { + local command=("${@:1:$#-1}") + local needle="${!#}" + local attempt output + for attempt in $(seq 1 50); do + output="$("${command[@]}" 2>&1 || true)" + if [[ "${output}" == *"${needle}"* ]]; then + printf '%s\n' "${output}" + return 0 + fi + sleep 0.2 + done + printf 'WAIT FAILED: missing [%s]\nLast output:\n%s\n' "${needle}" "${output:-}" >&2 + exit 1 +} + +service_status_json() { + must_run service status --json +} + +policy_status_json() { + must_run policy status --json +} + +write_policy_config() { + local policy_block="${1:-enabled = true +proxy_mode = \"core\" +debounce_seconds = 1 +use_module_on_wifi_disconnect = false +disable_marker = \"${BOX_RUN_DIR}/disable\" +allow_ifaces = [] +ignore_ifaces = [] +allow_ssids = [] +ignore_ssids = [] +allow_bssids = [] +ignore_bssids = []}" + cat >"${BOX_CONFIG_FILE}" <"${MOCK_IP_LINKS_FILE}" <<'EOF_LINK' +2: eth0: mtu 1500 state UP mode DEFAULT group default qlen 1000 +EOF_LINK + cat >"${MOCK_IP_ADDRS_FILE}" <<'EOF_ADDR' +2: eth0 inet 192.168.1.10/24 brd 192.168.1.255 scope global eth0 +EOF_ADDR + : >"${MOCK_NMCLI_WIFI_FILE}" +} + +write_active_wifi() { + cat >"${MOCK_IP_LINKS_FILE}" <<'EOF_LINK' +3: wlan0: mtu 1500 state UP mode DEFAULT group default qlen 1000 +EOF_LINK + cat >"${MOCK_IP_ADDRS_FILE}" <<'EOF_ADDR' +3: wlan0 inet 10.0.0.20/24 brd 10.0.0.255 scope global wlan0 +EOF_ADDR + cat >"${MOCK_NMCLI_WIFI_FILE}" <<'EOF_WIFI' +yes:TrustedWiFi:AA:BB:CC:DD:EE:FF +EOF_WIFI +} + +write_wifi_without_identity() { + cat >"${MOCK_IP_LINKS_FILE}" <<'EOF_LINK' +3: wlan0: mtu 1500 state UP mode DEFAULT group default qlen 1000 +EOF_LINK + cat >"${MOCK_IP_ADDRS_FILE}" <<'EOF_ADDR' +3: wlan0 inet 10.0.0.20/24 brd 10.0.0.255 scope global wlan0 +EOF_ADDR + : >"${MOCK_NMCLI_WIFI_FILE}" +} + +write_no_network() { + : >"${MOCK_IP_LINKS_FILE}" + : >"${MOCK_IP_ADDRS_FILE}" + : >"${MOCK_NMCLI_WIFI_FILE}" +} + +printf '[1/8] policy evaluate starts service in core mode with active network\n' +write_active_ethernet +write_policy_config "" +must_run policy evaluate >/dev/null +status_json="$(service_status_json)" +assert_contains "${status_json}" '"status":"healthy"' +policy_json="$(policy_status_json)" +assert_contains "${policy_json}" '"desired_state":"enabled"' +assert_contains "${policy_json}" '"applied_state":"started"' + +printf '[2/8] whitelist policy disables service when there is no match\n' +must_run service stop >/dev/null +wait_for_contains "${BOXCTL}" service status --json '"status":"stopped"' >/dev/null +write_active_ethernet +write_policy_config 'enabled = true +proxy_mode = "whitelist" +debounce_seconds = 1 +use_module_on_wifi_disconnect = false +disable_marker = "'"${BOX_RUN_DIR}"'/disable" +allow_ifaces = ["wlan+"]' +must_run policy evaluate >/dev/null +status_json="$(service_status_json)" +assert_contains "${status_json}" '"status":"stopped"' +policy_json="$(policy_status_json)" +assert_contains "${policy_json}" '"desired_state":"disabled"' +assert_contains "${policy_json}" '"last_reason":"no whitelist match"' + +printf '[3/8] whitelist policy enables service from SSID match\n' +write_active_wifi +write_policy_config 'enabled = true +proxy_mode = "whitelist" +debounce_seconds = 1 +use_module_on_wifi_disconnect = false +disable_marker = "'"${BOX_RUN_DIR}"'/disable" +allow_ifaces = [] +allow_ssids = ["TrustedWiFi"]' +must_run policy evaluate >/dev/null +status_json="$(service_status_json)" +assert_contains "${status_json}" '"status":"healthy"' +policy_json="$(policy_status_json)" +assert_contains "${policy_json}" '"ssid":"TrustedWiFi"' +assert_contains "${policy_json}" '"desired_state":"enabled"' + +printf '[4/8] disable marker forces disabled state\n' +touch "${BOX_RUN_DIR}/disable" +must_run policy evaluate >/dev/null +status_json="$(service_status_json)" +assert_contains "${status_json}" '"status":"stopped"' +policy_json="$(policy_status_json)" +assert_contains "${policy_json}" '"disable_marker_present":true' +assert_contains "${policy_json}" '"desired_state":"disabled"' +rm -f "${BOX_RUN_DIR}/disable" + +printf '[5/8] wifi identity fallback uses disconnect policy on unknown wlan context\n' +force_reset_runtime +write_wifi_without_identity +write_policy_config 'enabled = true +proxy_mode = "whitelist" +debounce_seconds = 1 +use_module_on_wifi_disconnect = true +disable_marker = "'"${BOX_RUN_DIR}"'/disable" +allow_ifaces = [] +allow_ssids = ["TrustedWiFi"]' +must_run policy evaluate >/dev/null +policy_json="$(policy_status_json)" +assert_contains "${policy_json}" '"desired_state":"enabled"' +assert_contains "${policy_json}" '"last_reason":"wifi identity unavailable; using disconnect fallback"' + +printf '[6/8] policy watcher enable/disable manages lifecycle and status\n' +write_active_ethernet +write_policy_config "" +must_run policy enable >/dev/null +policy_json="$(wait_for_contains "${BOXCTL}" policy status --json '"watcher_running":true')" +assert_contains "${policy_json}" '"policy_enabled":true' +must_run policy disable >/dev/null +policy_json="$(wait_for_contains "${BOXCTL}" policy status --json '"watcher_running":false')" +assert_contains "${policy_json}" '"watcher_running":false' + +printf '[7/8] disable marker changes trigger watcher reevaluation\n' +force_reset_runtime +write_active_ethernet +write_policy_config "" +must_run service start >/dev/null +must_run policy enable >/dev/null +touch "${BOX_RUN_DIR}/disable" +policy_json="$(wait_for_contains "${BOXCTL}" policy status --json '"last_reason":"disable marker present"')" +assert_contains "${policy_json}" '"desired_state":"disabled"' +status_json="$(service_status_json)" +assert_contains "${status_json}" '"status":"stopped"' +rm -f "${BOX_RUN_DIR}/disable" +policy_json="$(wait_for_contains "${BOXCTL}" policy status --json '"desired_state":"enabled"')" +status_json="$(wait_for_contains "${BOXCTL}" service status --json '"status":"healthy"')" +must_run policy disable >/dev/null + +printf '[8/8] address event triggers firewall refresh and blacklist transition stops service\n' +force_reset_runtime +write_active_ethernet +write_policy_config 'enabled = true +proxy_mode = "blacklist" +debounce_seconds = 1 +use_module_on_wifi_disconnect = false +disable_marker = "'"${BOX_RUN_DIR}"'/disable" +allow_ifaces = [] +ignore_ifaces = ["wlan+"]' +must_run service start >/dev/null +status_json="$(wait_for_contains "${BOXCTL}" service status --json '"status":"healthy"')" +must_run policy enable >/dev/null +printf 'Deleted 2: eth0 inet 192.168.1.10/24 scope global eth0\n' >>"${MOCK_IP_MONITOR_FILE}" +policy_json="$(wait_for_contains "${BOXCTL}" policy status --json '"last_refresh_ts":"20')" +assert_contains "${policy_json}" '"watcher_running":true' +write_active_wifi +printf '3: wlan0: \n' >>"${MOCK_IP_MONITOR_FILE}" +policy_json="$(wait_for_contains "${BOXCTL}" policy status --json '"last_reason":"blacklist match"')" +assert_contains "${policy_json}" '"desired_state":"disabled"' +status_json="$(service_status_json)" +assert_contains "${status_json}" '"status":"stopped"' +must_run policy disable >/dev/null + +printf 'PASS: policy integration checks completed\n' diff --git a/tests/integration/test_real_kernel.sh b/tests/integration/test_real_kernel.sh new file mode 100755 index 0000000..8c60068 --- /dev/null +++ b/tests/integration/test_real_kernel.sh @@ -0,0 +1,417 @@ +#!/usr/bin/env bash + +set -euo pipefail + +ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)" +BOXCTL="${ROOT_DIR}/cmd/boxctl" +TMP_DIR="$(mktemp -d)" +NS="box-rt-$RANDOM-$$" + +cleanup() { + ip netns del "${NS}" >/dev/null 2>&1 || true + rm -rf "${TMP_DIR}" +} +trap cleanup EXIT + +skip() { + printf 'SKIP: %s\n' "$1" + exit 0 +} + +fail() { + printf 'FAIL: %s\n' "$1" >&2 + exit 1 +} + +require_cmd() { + local cmd="${1:?missing cmd}" + command -v "${cmd}" >/dev/null 2>&1 || skip "missing command: ${cmd}" +} + +assert_contains_text() { + local haystack="${1:?missing haystack}" + local needle="${2:?missing needle}" + local context="${3:-}" + if [[ "${haystack}" != *"${needle}"* ]]; then + fail "expected text not found: ${needle} (${context})" + fi +} + +assert_not_contains_text() { + local haystack="${1:?missing haystack}" + local needle="${2:?missing needle}" + local context="${3:-}" + if [[ "${haystack}" == *"${needle}"* ]]; then + fail "unexpected text found: ${needle} (${context})" + fi +} + +if [[ "${EUID}" -ne 0 ]]; then + skip "requires root/CAP_NET_ADMIN" +fi + +require_cmd ip + +if ! ip netns add "${NS}" >/dev/null 2>&1; then + skip "cannot create netns (CAP_SYS_ADMIN likely unavailable)" +fi +if ! ip netns exec "${NS}" ip link set lo up >/dev/null 2>&1; then + skip "cannot configure netns loopback" +fi + +BOX_RUN_DIR="${TMP_DIR}/run" +BOX_VAR_DIR="${TMP_DIR}/var" +BOX_LOG_DIR="${TMP_DIR}/log" +CONFIG_FILE="${TMP_DIR}/box.toml" +mkdir -p "${BOX_RUN_DIR}" "${BOX_VAR_DIR}" "${BOX_LOG_DIR}" + +write_config() { + local backend="${1:?missing backend}" + local coexist_mode="${2:-preserve_tailnet}" + local route_pref="${3:-180}" + + cat >"${CONFIG_FILE}" </dev/null 2>&1 || true + ip -n "${NS}" rule add fwmark 0x80000/0xff0000 table 52 pref 52 >/dev/null 2>&1 || true + ip -n "${NS}" route del local 100.100.100.100 dev lo table 52 >/dev/null 2>&1 || true + ip -n "${NS}" route add local 100.100.100.100 dev lo table 52 >/dev/null 2>&1 || true +} + +assert_tailscale_invariants() { + local rules routes + rules="$(ip -n "${NS}" rule list 2>/dev/null || true)" + routes="$(ip -n "${NS}" route show table 52 2>/dev/null || true)" + + if [[ ! "${rules}" =~ fwmark[[:space:]]+0x80000/0xff0000[[:space:]]+(lookup|table)[[:space:]]+52 ]]; then + fail "tailscale fwmark/table-52 rule missing" + fi + assert_contains_text "${routes}" "100.100.100.100" "tailscale table 52 route" +} + +normalize_fwmark() { + local raw="${1:-}" + local value mask + if [[ "${raw}" == */* ]]; then + value="${raw%%/*}" + mask="${raw##*/}" + else + value="${raw}" + mask="0xffffffff" + fi + if ! [[ "${value}" =~ ^(0[xX][0-9a-fA-F]+|[0-9]+)$ ]]; then + return 1 + fi + if ! [[ "${mask}" =~ ^(0[xX][0-9a-fA-F]+|[0-9]+)$ ]]; then + return 1 + fi + printf '%u/%u\n' "$((value))" "$((mask))" +} + +extract_rule_token_after() { + local line="${1:-}" + local token="${2:?missing token}" + local i + read -r -a parts <<<"${line}" + for ((i = 0; i < ${#parts[@]}; i++)); do + if [[ "${parts[$i]}" == "${token}" && $((i + 1)) -lt ${#parts[@]} ]]; then + printf '%s\n' "${parts[$((i + 1))]}" + return 0 + fi + done + return 1 +} + +extract_rule_table() { + local line="${1:-}" + local i + read -r -a parts <<<"${line}" + for ((i = 0; i < ${#parts[@]}; i++)); do + if [[ ( "${parts[$i]}" == "lookup" || "${parts[$i]}" == "table" ) && $((i + 1)) -lt ${#parts[@]} ]]; then + printf '%s\n' "${parts[$((i + 1))]}" + return 0 + fi + done + return 1 +} + +extract_rule_pref() { + local line="${1:-}" + if [[ "${line}" =~ ^[[:space:]]*([0-9]+): ]]; then + printf '%s\n' "${BASH_REMATCH[1]}" + return 0 + fi + if [[ "${line}" =~ (^|[[:space:]])pref[[:space:]]+([0-9]+)($|[[:space:]]) ]]; then + printf '%s\n' "${BASH_REMATCH[2]}" + return 0 + fi + return 1 +} + +assert_box_route_pref_singleton() { + local expected_pref="${1:?missing pref}" + local rules count line rule_mark rule_table rule_pref expected_mark_norm rule_mark_norm + rules="$(ip -n "${NS}" rule list 2>/dev/null || true)" + + expected_mark_norm="$(normalize_fwmark "16777216/16777216" 2>/dev/null || true)" + count=0 + while IFS= read -r line; do + [[ -n "${line}" ]] || continue + rule_mark="$(extract_rule_token_after "${line}" "fwmark" 2>/dev/null || true)" + rule_table="$(extract_rule_table "${line}" 2>/dev/null || true)" + rule_pref="$(extract_rule_pref "${line}" 2>/dev/null || true)" + [[ -n "${rule_mark}" && "${rule_table}" == "2024" && "${rule_pref}" == "${expected_pref}" ]] || continue + rule_mark_norm="$(normalize_fwmark "${rule_mark}" 2>/dev/null || true)" + [[ -n "${rule_mark_norm}" && "${rule_mark_norm}" == "${expected_mark_norm}" ]] || continue + count=$((count + 1)) + done <<<"${rules}" + + if [[ "${count}" != "1" ]]; then + fail "expected one BOX fwmark policy rule with pref=${expected_pref}; got ${count}" + fi +} + +iptables_box_object_count() { + local out_mangle out_nat + out_mangle="$(ip netns exec "${NS}" iptables -t mangle -S 2>/dev/null || true)" + out_nat="$(ip netns exec "${NS}" iptables -t nat -S 2>/dev/null || true)" + printf '%s\n%s\n' "${out_mangle}" "${out_nat}" | grep -c 'BOX_' || true +} + +nft_box_object_count() { + local tables inet_table ip_table + tables="$(ip netns exec "${NS}" nft list tables 2>/dev/null || true)" + inet_table="$(ip netns exec "${NS}" nft list table inet box_mangle 2>/dev/null || true)" + ip_table="$(ip netns exec "${NS}" nft list table ip box_nat 2>/dev/null || true)" + { + printf '%s\n' "${tables}" + printf '%s\n' "${inet_table}" + printf '%s\n' "${ip_table}" + } | grep -Ec '(box_mangle|box_nat|box_main|box_dns)' || true +} + +assert_no_box_artifacts() { + local backend="${1:?missing backend}" + local rules routes table + rules="$(ip -n "${NS}" rule list 2>/dev/null || true)" + routes="$(ip -n "${NS}" route show table 2024 2>/dev/null || true)" + + if printf '%s\n' "${rules}" | grep -Eq 'fwmark[[:space:]]+16777216/16777216[[:space:]]+(lookup|table)[[:space:]]+2024'; then + fail "BOX fwmark policy rule leaked" + fi + if printf '%s\n' "${routes}" | grep -Fq 'local default dev lo'; then + fail "BOX route table entry leaked" + fi + + case "${backend}" in + iptables) + table="$(ip netns exec "${NS}" iptables -t mangle -S 2>/dev/null || true)" + if printf '%s\n' "${table}" | grep -Fq 'BOX_'; then + fail "iptables mangle BOX artifacts leaked" + fi + table="$(ip netns exec "${NS}" iptables -t nat -S 2>/dev/null || true)" + if printf '%s\n' "${table}" | grep -Fq 'BOX_'; then + fail "iptables nat BOX artifacts leaked" + fi + ;; + nftables) + table="$(ip netns exec "${NS}" nft list tables 2>/dev/null || true)" + if printf '%s\n' "${table}" | grep -Eq 'table (inet box_mangle|ip box_nat)'; then + fail "nftables BOX tables leaked" + fi + ;; + esac +} + +assert_preserve_tailnet_rules() { + local backend="${1:?missing backend}" + local out + case "${backend}" in + iptables) + out="$(ip netns exec "${NS}" iptables -t mangle -S BOX_MANGLE 2>/dev/null || true)" + assert_contains_text "${out}" "-i tailscale0 -j RETURN" "iptables tailscale bypass" + out="$(ip netns exec "${NS}" iptables -t mangle -S BOX_DNS_MANGLE 2>/dev/null || true)" + assert_contains_text "${out}" "100.100.100.100" "iptables magicdns bypass" + ;; + nftables) + out="$(ip netns exec "${NS}" nft list chain inet box_mangle box_main 2>/dev/null || true)" + assert_contains_text "${out}" "iifname \"tailscale0\" return" "nft tailscale bypass" + out="$(ip netns exec "${NS}" nft list chain inet box_mangle box_dns 2>/dev/null || true)" + assert_contains_text "${out}" "100.100.100.100" "nft magicdns bypass" + ;; + esac +} + +assert_strict_box_rules() { + local backend="${1:?missing backend}" + local out + case "${backend}" in + iptables) + out="$(ip netns exec "${NS}" iptables -t mangle -S BOX_MANGLE 2>/dev/null || true)" + assert_not_contains_text "${out}" "-i tailscale0 -j RETURN" "iptables strict_box tailscale bypass" + out="$(ip netns exec "${NS}" iptables -t mangle -S BOX_DNS_MANGLE 2>/dev/null || true)" + assert_not_contains_text "${out}" "100.100.100.100" "iptables strict_box magicdns bypass" + ;; + nftables) + out="$(ip netns exec "${NS}" nft list chain inet box_mangle box_main 2>/dev/null || true)" + assert_not_contains_text "${out}" "iifname \"tailscale0\" return" "nft strict_box tailscale bypass" + out="$(ip netns exec "${NS}" nft list chain inet box_mangle box_dns 2>/dev/null || true)" + assert_not_contains_text "${out}" "100.100.100.100" "nft strict_box magicdns bypass" + ;; + esac +} + +backend_usable() { + local backend="${1:?missing backend}" + case "${backend}" in + iptables) + command -v iptables >/dev/null 2>&1 || return 1 + ip netns exec "${NS}" iptables -t mangle -S >/dev/null 2>&1 + ;; + nftables) + command -v nft >/dev/null 2>&1 || return 1 + ip netns exec "${NS}" nft list tables >/dev/null 2>&1 || return 1 + ip netns exec "${NS}" sh -c ' + set -e + nft delete table inet box_probe >/dev/null 2>&1 || true + nft delete table ip box_probe_nat >/dev/null 2>&1 || true + nft add table inet box_probe + nft add chain inet box_probe prerouting "{ type filter hook prerouting priority mangle; policy accept; }" + nft add chain inet box_probe output "{ type route hook output priority mangle; policy accept; }" + nft add table ip box_probe_nat + nft add chain ip box_probe_nat prerouting "{ type nat hook prerouting priority dstnat; policy accept; }" + nft add chain ip box_probe_nat output "{ type nat hook output priority -100; policy accept; }" + nft delete table inet box_probe + nft delete table ip box_probe_nat + ' >/dev/null 2>&1 + ;; + *) + return 1 + ;; + esac +} + +run_backend_case() { + local backend="${1:?missing backend}" + local count_before count_after status_json preflight_output + + if ! backend_usable "${backend}"; then + printf 'SKIP: backend=%s unavailable in this kernel/runtime\n' "${backend}" + return 0 + fi + + write_config "${backend}" "preserve_tailnet" "180" + seed_tailscale_state + if ! preflight_output="$(run_boxctl firewall enable 2>&1)"; then + printf 'SKIP: backend=%s runtime apply unavailable: %s\n' "${backend}" "${preflight_output##*$'\n'}" + run_boxctl firewall disable >/dev/null 2>&1 || true + return 0 + fi + run_boxctl firewall disable >/dev/null 2>&1 || true + + printf '[backend=%s] preserve_tailnet lifecycle\n' "${backend}" + write_config "${backend}" "preserve_tailnet" "180" + seed_tailscale_state + + run_boxctl firewall disable >/dev/null 2>&1 || true + run_boxctl firewall enable >/dev/null + status_json="$(run_boxctl firewall status --json)" + assert_contains_text "${status_json}" "\"backend\":\"${backend}\"" "status backend" + assert_contains_text "${status_json}" "\"backend_available\":true" "status backend availability" + assert_contains_text "${status_json}" "\"dns_coexist_mode_active\":\"preserve_tailnet\"" "status coexist active" + + if [[ "${backend}" == "iptables" ]]; then + count_before="$(iptables_box_object_count)" + else + count_before="$(nft_box_object_count)" + fi + + run_boxctl firewall renew >/dev/null + run_boxctl firewall renew >/dev/null + + if [[ "${backend}" == "iptables" ]]; then + count_after="$(iptables_box_object_count)" + else + count_after="$(nft_box_object_count)" + fi + if [[ "${count_before}" != "${count_after}" ]]; then + fail "backend=${backend} object count changed across renew (${count_before} -> ${count_after})" + fi + + assert_tailscale_invariants + assert_box_route_pref_singleton "180" + assert_preserve_tailnet_rules "${backend}" + + run_boxctl firewall disable >/dev/null + assert_tailscale_invariants + assert_no_box_artifacts "${backend}" + + printf '[backend=%s] strict_box rule-difference\n' "${backend}" + write_config "${backend}" "strict_box" "180" + seed_tailscale_state + run_boxctl firewall enable >/dev/null + status_json="$(run_boxctl firewall status --json)" + assert_contains_text "${status_json}" "\"dns_coexist_mode_active\":\"strict_box\"" "status coexist strict_box" + assert_contains_text "${status_json}" "\"tailscale_bypass_applied\":false" "status strict_box bypass flag" + assert_strict_box_rules "${backend}" + run_boxctl firewall disable >/dev/null + assert_tailscale_invariants + assert_no_box_artifacts "${backend}" + + printf 'PASS: backend=%s real-kernel checks\n' "${backend}" + return 0 +} + +executed=0 + +for backend in iptables nftables; do + if backend_usable "${backend}"; then + executed=$((executed + 1)) + fi + run_backend_case "${backend}" +done + +if [[ "${executed}" -eq 0 ]]; then + skip "no usable firewall backend in this environment" +fi + +printf 'PASS: real-kernel firewall validation completed\n' diff --git a/tests/integration/test_updater.sh b/tests/integration/test_updater.sh new file mode 100755 index 0000000..4dcf23b --- /dev/null +++ b/tests/integration/test_updater.sh @@ -0,0 +1,615 @@ +#!/usr/bin/env bash + +set -euo pipefail + +ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)" +BOXCTL="${ROOT_DIR}/cmd/boxctl" +TMP_DIR="$(mktemp -d)" +MOCK_DIR="${ROOT_DIR}/tests/fixtures/mockbin" + +cleanup() { + local pid + if [[ -f "${BOX_RUN_DIR}/box.pid" ]]; then + pid="$(tr -d '[:space:]' <"${BOX_RUN_DIR}/box.pid" || true)" + if [[ -n "${pid:-}" ]]; then + kill -TERM "${pid}" >/dev/null 2>&1 || true + fi + fi + rm -rf "${TMP_DIR}" +} +trap cleanup EXIT + +export PATH="${MOCK_DIR}:${PATH}" +export MOCK_IPTABLES_STATE="${TMP_DIR}/mock/iptables.state" +export MOCK_IP_STATE="${TMP_DIR}/mock/ip.state" +export MOCK_NFT_STATE="${TMP_DIR}/mock/nft.state" +export BOX_IPTABLES_CMD="${MOCK_DIR}/iptables" +export BOX_IP_CMD="${MOCK_DIR}/ip" +export BOX_NFT_CMD="${MOCK_DIR}/nft" +export BOX_CURL_CMD="${MOCK_DIR}/curl" +export BOX_UNSAFE_SKIP_ROOT_CHECK=1 +export BOX_CAP_TPROXY=1 +export BOX_RUN_DIR="${TMP_DIR}/run" +export BOX_VAR_DIR="${TMP_DIR}/var" +export BOX_LOG_DIR="${TMP_DIR}/log" + +CONFIG_FILE="${TMP_DIR}/box.toml" +INSTALLED_BIN_DIR="${TMP_DIR}/installed-bin" +PROFILE_DIR="${TMP_DIR}/profiles" +SOURCE_DIR="${TMP_DIR}/sources" +ARTIFACT_DIR="${BOX_VAR_DIR}/artifacts" +STAGING_DIR="${BOX_VAR_DIR}/staging" +ACTIVE_MOCK_DIR="${TMP_DIR}/active-mockbin" + +export BOX_CONFIG_FILE="${CONFIG_FILE}" +mkdir -p "${TMP_DIR}/mock" "${BOX_RUN_DIR}" "${BOX_VAR_DIR}" "${BOX_LOG_DIR}" "${INSTALLED_BIN_DIR}" "${PROFILE_DIR}" "${SOURCE_DIR}" "${ACTIVE_MOCK_DIR}" +touch "${MOCK_IPTABLES_STATE}" "${MOCK_IP_STATE}" "${MOCK_NFT_STATE}" + +for required_cmd in jq zip unzip gzip; do + if ! command -v "${required_cmd}" >/dev/null 2>&1; then + printf 'missing required test command: %s\n' "${required_cmd}" >&2 + exit 1 + fi +done + +PATH_ORIG="${PATH}" +for helper in curl ip iptables nft sing-box; do + ln -sf "${MOCK_DIR}/${helper}" "${ACTIVE_MOCK_DIR}/${helper}" +done + +sha256_of() { + sha256sum "$1" | awk '{print $1}' +} + +must_run() { + local output + if ! output="$(${BOXCTL} "$@" 2>&1)"; then + printf 'FAILED: boxctl %s\n%s\n' "$*" "${output}" >&2 + exit 1 + fi + printf '%s\n' "${output}" +} + +must_fail() { + local output + if output="$(${BOXCTL} "$@" 2>&1)"; then + printf 'UNEXPECTED SUCCESS: boxctl %s\n%s\n' "$*" "${output}" >&2 + exit 1 + fi + printf '%s\n' "${output}" +} + +assert_contains() { + local haystack="${1:?missing haystack}" + local needle="${2:?missing needle}" + if [[ "${haystack}" != *"${needle}"* ]]; then + printf 'ASSERT CONTAINS FAILED: expected [%s] in [%s]\n' "${needle}" "${haystack}" >&2 + exit 1 + fi +} + +assert_not_contains() { + local haystack="${1:?missing haystack}" + local needle="${2:?missing needle}" + if [[ "${haystack}" == *"${needle}"* ]]; then + printf 'ASSERT NOT CONTAINS FAILED: unexpected [%s] in [%s]\n' "${needle}" "${haystack}" >&2 + exit 1 + fi +} + +assert_file_contains() { + local path="${1:?missing path}" + local needle="${2:?missing needle}" + if ! grep -Fq "${needle}" "${path}"; then + printf 'ASSERT FILE CONTAINS FAILED: expected [%s] in %s\n' "${needle}" "${path}" >&2 + cat "${path}" >&2 || true + exit 1 + fi +} + +assert_dir_exists() { + local path="${1:?missing path}" + if [[ ! -d "${path}" ]]; then + printf 'ASSERT DIR FAILED: missing %s\n' "${path}" >&2 + exit 1 + fi +} + +assert_pid_changed() { + local before="${1:?missing before pid}" + local after="${2:?missing after pid}" + if [[ -z "${before}" || -z "${after}" || "${before}" == "${after}" ]]; then + printf 'ASSERT PID CHANGED FAILED: before=%s after=%s\n' "${before}" "${after}" >&2 + exit 1 + fi +} + +assert_pid_same() { + local before="${1:?missing before pid}" + local after="${2:?missing after pid}" + if [[ -z "${before}" || -z "${after}" || "${before}" != "${after}" ]]; then + printf 'ASSERT PID SAME FAILED: before=%s after=%s\n' "${before}" "${after}" >&2 + exit 1 + fi +} + +service_pid() { + local status_json + status_json="$(must_run service status --json)" + printf '%s\n' "${status_json}" | sed -n 's/.*"pid":\([0-9][0-9]*\).*/\1/p' +} + +write_common_config() { + local core="${1:?missing core}" + local source="${2:?missing source}" + local updater_block="${3:-}" + cat >"${CONFIG_FILE}" <"${SOURCE_DIR}/kernel-v1" <<'EOF_KERNEL1' +#!/usr/bin/env bash +printf 'mock-kernel-v1\n' +EOF_KERNEL1 +chmod +x "${SOURCE_DIR}/kernel-v1" + +cat >"${SOURCE_DIR}/kernel-v2" <<'EOF_KERNEL2' +#!/usr/bin/env bash +printf 'mock-kernel-v2\n' +EOF_KERNEL2 +chmod +x "${SOURCE_DIR}/kernel-v2" + +cat >"${SOURCE_DIR}/geo-v1.dat" <<'EOF_GEO1' +geo-version-1 +EOF_GEO1 + +cat >"${SOURCE_DIR}/geo-v2.dat" <<'EOF_GEO2' +geo-version-2 +EOF_GEO2 + +cat >"${PROFILE_DIR}/mihomo-live.yaml" <<'EOF_MIHOMO_LIVE' +mode: rule +mixed-port: 7890 +rules: + - MATCH,DIRECT +EOF_MIHOMO_LIVE + +cat >"${PROFILE_DIR}/mihomo-dashboard.yaml" <"${SOURCE_DIR}/mihomo-updated.yaml" <<'EOF_MIHOMO_UPDATE' +mode: rule +mixed-port: 7891 +rules: + - MATCH,REJECT +EOF_MIHOMO_UPDATE + +cat >"${PROFILE_DIR}/sing-live.json" <<'EOF_SING_LIVE' +{ + "log": { "level": "info" }, + "outbounds": [ + { "type": "direct", "tag": "direct" } + ] +} +EOF_SING_LIVE + +cat >"${PROFILE_DIR}/sing-dashboard-generic.json" <"${SOURCE_DIR}/sing-updated.json" <<'EOF_SING_UPDATE' +{ + "log": { "level": "warn" }, + "outbounds": [ + { "type": "direct", "tag": "direct" } + ] +} +EOF_SING_UPDATE + +mkdir -p "${SOURCE_DIR}/dashboard-v1" +cat >"${SOURCE_DIR}/dashboard-v1/index.html" <<'EOF_DASH' +dashboard-v1 +EOF_DASH +( + cd "${SOURCE_DIR}/dashboard-v1" + tar -czf "${SOURCE_DIR}/dashboard-v1.tar.gz" . +) + +mkdir -p "${SOURCE_DIR}/dashboard-zip/dist" +cat >"${SOURCE_DIR}/dashboard-zip/dist/index.html" <<'EOF_DASH_ZIP' +dashboard-zip +EOF_DASH_ZIP +( + cd "${SOURCE_DIR}/dashboard-zip" + zip -qr "${SOURCE_DIR}/dashboard-v1.zip" . +) + +mkdir -p "${SOURCE_DIR}/sing-box-release" +cat >"${SOURCE_DIR}/sing-box-release/sing-box" <<'EOF_SING_RELEASE' +#!/usr/bin/env bash +printf 'mock-release-sing-box\n' +EOF_SING_RELEASE +chmod +x "${SOURCE_DIR}/sing-box-release/sing-box" +( + cd "${SOURCE_DIR}" + tar -czf "${SOURCE_DIR}/sing-box-1.0.0-linux-amd64.tar.gz" sing-box-release +) + +cat >"${SOURCE_DIR}/mihomo-release-bin" <<'EOF_MIHOMO_RELEASE' +#!/usr/bin/env bash +printf 'mock-release-mihomo\n' +EOF_MIHOMO_RELEASE +chmod +x "${SOURCE_DIR}/mihomo-release-bin" +gzip -c "${SOURCE_DIR}/mihomo-release-bin" >"${SOURCE_DIR}/mihomo-linux-amd64-v1.0.0.gz" + +cat >"${SOURCE_DIR}/geo-release-prerelease.dat" <<'EOF_GEO_RELEASE' +geo-release-prerelease +EOF_GEO_RELEASE + +KERNEL_V1_SHA="$(sha256_of "${SOURCE_DIR}/kernel-v1")" +KERNEL_V2_SHA="$(sha256_of "${SOURCE_DIR}/kernel-v2")" +GEO_V1_SHA="$(sha256_of "${SOURCE_DIR}/geo-v1.dat")" +DASHBOARD_ZIP_SHA="$(sha256_of "${SOURCE_DIR}/dashboard-v1.zip")" +SUBS_MIHOMO_SHA="$(sha256_of "${SOURCE_DIR}/mihomo-updated.yaml")" +SUBS_SING_SHA="$(sha256_of "${SOURCE_DIR}/sing-updated.json")" +RELEASE_KERNEL_SHA="$(sha256_of "${SOURCE_DIR}/sing-box-1.0.0-linux-amd64.tar.gz")" +RELEASE_KERNEL_BIN_SHA="$(sha256_of "${SOURCE_DIR}/sing-box-release/sing-box")" +MIHOMO_RELEASE_SHA="$(sha256_of "${SOURCE_DIR}/mihomo-linux-amd64-v1.0.0.gz")" +MIHOMO_RELEASE_BIN_SHA="$(sha256_of "${SOURCE_DIR}/mihomo-release-bin")" +RELEASE_GEO_SHA="$(sha256_of "${SOURCE_DIR}/geo-release-prerelease.dat")" + +cat >"${SOURCE_DIR}/sing-box-1.0.0-linux-amd64.sha256" <"${SOURCE_DIR}/geo-release-prerelease.sha256" <"${SOURCE_DIR}/mihomo-linux-amd64-v1.0.0.sha256" <"${SOURCE_DIR}/kernel-release-stable.json" <"${SOURCE_DIR}/mihomo-release-stable.json" <"${SOURCE_DIR}/geo-release-list.json" </dev/null +assert_file_contains "${INSTALLED_BIN_DIR}/mihomo" 'mock-kernel-v1' +status_json="$(must_run update status --json)" +assert_contains "${status_json}" '"kernel":{"configured":true,"status":"success"' +assert_contains "${status_json}" '"last_handoff":"none"' +must_run update kernel >/dev/null +status_json="$(must_run update status --json)" +assert_contains "${status_json}" '"kernel":{"configured":true,"status":"unchanged"' +assert_contains "${status_json}" '"installed_sha256":"'"${KERNEL_V1_SHA}"'"' + +printf '[3/16] checksum mismatch fails safely\n' +write_common_config "mihomo" "${PROFILE_DIR}/mihomo-live.yaml" "[updater.kernel] +file = \"${SOURCE_DIR}/kernel-v2\" +checksum = \"0000000000000000000000000000000000000000000000000000000000000000\" +target = \"${INSTALLED_BIN_DIR}/mihomo\"" +must_fail update kernel >/dev/null +status_json="$(must_run update status --json)" +assert_contains "${status_json}" '"kernel":{"configured":true,"status":"error"' +assert_contains "${status_json}" '"last_error":"checksum verification failed"' +assert_file_contains "${INSTALLED_BIN_DIR}/mihomo" 'mock-kernel-v1' + +printf '[4/16] download failure is reported\n' +export MOCK_CURL_FAIL_URL='https://updates.invalid/geo.dat' +write_common_config "mihomo" "${PROFILE_DIR}/mihomo-live.yaml" "[updater.geo] +url = \"https://updates.invalid/geo.dat\" +target = \"${ARTIFACT_DIR}/geo/geo.dat\"" +must_fail update geo >/dev/null +status_json="$(must_run update status --json)" +assert_contains "${status_json}" '"geo":{"configured":true,"status":"error"' +assert_contains "${status_json}" '"last_error":"download failed"' +unset MOCK_CURL_FAIL_URL + +printf '[5/16] update all installs geo and dashboard payloads\n' +write_common_config "mihomo" "${PROFILE_DIR}/mihomo-live.yaml" "[updater.kernel] +file = \"${SOURCE_DIR}/kernel-v1\" +checksum = \"${KERNEL_V1_SHA}\" +target = \"${INSTALLED_BIN_DIR}/mihomo\" + +[updater.geo] +file = \"${SOURCE_DIR}/geo-v1.dat\" +checksum = \"${GEO_V1_SHA}\" +target = \"${ARTIFACT_DIR}/geo/geo.dat\" + +[updater.dashboard] +file = \"${SOURCE_DIR}/dashboard-v1.zip\" +checksum = \"${DASHBOARD_ZIP_SHA}\" +target = \"${ARTIFACT_DIR}/dashboard/current\"" +must_run update all >/dev/null +assert_file_contains "${ARTIFACT_DIR}/geo/geo.dat" 'geo-version-1' +assert_dir_exists "${ARTIFACT_DIR}/dashboard/current" +assert_file_contains "${ARTIFACT_DIR}/dashboard/current/index.html" 'dashboard-zip' +status_json="$(must_run update status --json)" +assert_contains "${status_json}" '"geo":{"configured":true,"status":"success"' +assert_contains "${status_json}" '"dashboard":{"configured":true,"status":"success"' +assert_contains "${status_json}" '"subs":{"configured":false,"status":"skipped"' + +printf '[6/16] dashboard updater can derive target and url from core config\n' +write_common_config "mihomo" "${PROFILE_DIR}/mihomo-dashboard.yaml" "" +status_json="$(must_run update status --json)" +assert_contains "${status_json}" '"dashboard":{"configured":true' +must_run update dashboard >/dev/null +assert_file_contains "${PROFILE_DIR}/ui/dashboard/index.html" 'dashboard-zip' +status_json="$(must_run update status --json)" +assert_contains "${status_json}" "\"target_path\":\"${PROFILE_DIR}/ui/dashboard\"" +rm -rf "${PROFILE_DIR}/ui/dashboard" +must_run update all >/dev/null +assert_file_contains "${PROFILE_DIR}/ui/dashboard/index.html" 'dashboard-zip' + +printf '[7/16] dashboard updater falls back to ./dashboard relative to core config\n' +export MOCK_CURL_RESPONSE_FILE="${SOURCE_DIR}/dashboard-v1.zip" +rm -rf "${PROFILE_DIR}/dashboard" +write_common_config "mihomo" "${PROFILE_DIR}/mihomo-live.yaml" "" +status_json="$(must_run update status --json)" +assert_contains "${status_json}" '"dashboard":{"configured":true' +must_run update all >/dev/null +assert_file_contains "${PROFILE_DIR}/dashboard/index.html" 'dashboard-zip' +unset MOCK_CURL_RESPONSE_FILE + +printf '[8/16] sing-box dashboard discovery accepts generic external_ui keys\n' +write_common_config "sing-box" "${PROFILE_DIR}/sing-dashboard-generic.json" "" +status_json="$(must_run update status --json)" +assert_contains "${status_json}" '"dashboard":{"configured":true' +must_run update dashboard >/dev/null +assert_file_contains "${PROFILE_DIR}/ui/sing-dashboard/index.html" 'dashboard-zip' + +printf '[9/16] kernel release resolver selects stable archive asset\n' +write_common_config "sing-box" "${PROFILE_DIR}/sing-live.json" "[updater.kernel] +source = \"release\" +release_api_url = \"file://${SOURCE_DIR}/kernel-release-stable.json\" +release_channel = \"stable\" +asset_regex = \"sing-box-.*linux.*(amd64|x86_64).*tar.gz$\" +checksum_asset_regex = \"sha256$\" +target = \"${INSTALLED_BIN_DIR}/sing-box\"" +must_run update kernel >/dev/null +assert_file_contains "${INSTALLED_BIN_DIR}/sing-box" 'mock-release-sing-box' +status_json="$(must_run update status --json)" +assert_contains "${status_json}" "\"source_ref\":\"file://${SOURCE_DIR}/sing-box-1.0.0-linux-amd64.tar.gz\"" +assert_contains "${status_json}" "\"installed_sha256\":\"${RELEASE_KERNEL_BIN_SHA}\"" + +printf '[10/16] mihomo kernel release resolver installs gzip asset\n' +write_common_config "mihomo" "${PROFILE_DIR}/mihomo-live.yaml" "[updater.kernel] +source = \"release\" +release_api_url = \"file://${SOURCE_DIR}/mihomo-release-stable.json\" +release_channel = \"stable\" +asset_regex = \"mihomo-.*linux.*(amd64|x86_64).*gz$\" +checksum_asset_regex = \"sha256$\" +target = \"${INSTALLED_BIN_DIR}/mihomo-release\"" +must_run update kernel >/dev/null +assert_file_contains "${INSTALLED_BIN_DIR}/mihomo-release" 'mock-release-mihomo' +status_json="$(must_run update status --json)" +assert_contains "${status_json}" "\"source_ref\":\"file://${SOURCE_DIR}/mihomo-linux-amd64-v1.0.0.gz\"" +assert_contains "${status_json}" "\"installed_sha256\":\"${MIHOMO_RELEASE_BIN_SHA}\"" + +printf '[11/16] geo release resolver selects prerelease asset\n' +write_common_config "mihomo" "${PROFILE_DIR}/mihomo-live.yaml" "[updater.geo] +source = \"release\" +release_api_url = \"file://${SOURCE_DIR}/geo-release-list.json\" +release_channel = \"prerelease\" +asset_regex = \"geo-release-prerelease.dat$\" +checksum_asset_regex = \"geo-release-prerelease.sha256$\" +target = \"${ARTIFACT_DIR}/geo/geo-release.dat\"" +must_run update geo >/dev/null +assert_file_contains "${ARTIFACT_DIR}/geo/geo-release.dat" 'geo-release-prerelease' +status_json="$(must_run update status --json)" +assert_contains "${status_json}" "\"source_ref\":\"file://${SOURCE_DIR}/geo-release-prerelease.dat\"" + +printf '[12/16] geo update does not restart running service\n' +write_common_config "mihomo" "${PROFILE_DIR}/mihomo-live.yaml" "[updater.geo] +file = \"${SOURCE_DIR}/geo-v2.dat\" +checksum = \"$(sha256_of "${SOURCE_DIR}/geo-v2.dat")\" +target = \"${ARTIFACT_DIR}/geo/geo.dat\"" +must_run service start >/dev/null +geo_pid_before="$(service_pid)" +must_run update geo >/dev/null +geo_pid_after="$(service_pid)" +assert_pid_same "${geo_pid_before}" "${geo_pid_after}" +status_json="$(must_run update status --json)" +assert_contains "${status_json}" '"geo":{"configured":true,"status":"success"' +assert_contains "${status_json}" '"last_handoff":"none"' +must_run service stop >/dev/null + +printf '[13/16] inactive kernel target does not restart running service\n' +write_common_config "mihomo" "${PROFILE_DIR}/mihomo-live.yaml" "[updater.kernel] +file = \"${SOURCE_DIR}/kernel-v2\" +checksum = \"${KERNEL_V2_SHA}\" +target = \"${INSTALLED_BIN_DIR}/mihomo\"" +must_run service start >/dev/null +kernel_pid_before="$(service_pid)" +must_run update kernel >/dev/null +kernel_pid_after="$(service_pid)" +assert_pid_same "${kernel_pid_before}" "${kernel_pid_after}" +status_json="$(must_run update status --json)" +assert_contains "${status_json}" '"kernel":{"configured":true,"status":"success"' +assert_contains "${status_json}" '"last_handoff":"none"' +must_run service stop >/dev/null + +printf '[14/16] failed kernel handoff restores old binary and service\n' +cat >"${INSTALLED_BIN_DIR}/mihomo" <<'EOF_ACTIVE_GOOD' +#!/usr/bin/env bash +if [[ "${1:-}" == "-t" ]]; then + exit 0 +fi +trap 'exit 0' TERM INT +while true; do + sleep 1 +done +EOF_ACTIVE_GOOD +chmod +x "${INSTALLED_BIN_DIR}/mihomo" +cat >"${SOURCE_DIR}/kernel-bad" <<'EOF_BAD_KERNEL' +#!/usr/bin/env bash +exit 1 +EOF_BAD_KERNEL +chmod +x "${SOURCE_DIR}/kernel-bad" +PATH="${ACTIVE_MOCK_DIR}:/usr/bin:/bin" +write_common_config "mihomo" "${PROFILE_DIR}/mihomo-live.yaml" "[updater.kernel] +file = \"${SOURCE_DIR}/kernel-bad\" +checksum = \"$(sha256_of "${SOURCE_DIR}/kernel-bad")\" +target = \"${INSTALLED_BIN_DIR}/mihomo\"" +must_run service start >/dev/null +recovery_pid_before="$(service_pid)" +must_fail update kernel >/dev/null +recovery_pid_after="$(service_pid)" +assert_pid_changed "${recovery_pid_before}" "${recovery_pid_after}" +assert_file_contains "${INSTALLED_BIN_DIR}/mihomo" 'while true; do' +must_run service stop >/dev/null +PATH="${PATH_ORIG}" + +printf '[15/16] mihomo subscription update restarts running service\n' +write_common_config "mihomo" "${PROFILE_DIR}/mihomo-live.yaml" "[updater.subs] +file = \"${SOURCE_DIR}/mihomo-updated.yaml\" +checksum = \"${SUBS_MIHOMO_SHA}\" +target = \"${PROFILE_DIR}/mihomo-live.yaml\"" +must_run service start >/dev/null +mihomo_pid_before="$(service_pid)" +must_run update subs >/dev/null +mihomo_pid_after="$(service_pid)" +assert_pid_changed "${mihomo_pid_before}" "${mihomo_pid_after}" +assert_file_contains "${PROFILE_DIR}/mihomo-live.yaml" 'MATCH,REJECT' +status_json="$(must_run update status --json)" +assert_contains "${status_json}" '"subs":{"configured":true,"status":"success"' +assert_contains "${status_json}" '"last_handoff":"restart"' +must_run service stop >/dev/null + +printf '[16/16] sing-box subscription update falls back to restart\n' +write_common_config "sing-box" "${PROFILE_DIR}/sing-live.json" "[updater.subs] +file = \"${SOURCE_DIR}/sing-updated.json\" +checksum = \"${SUBS_SING_SHA}\" +target = \"${PROFILE_DIR}/sing-live.json\"" +must_run service start >/dev/null +sing_pid_before="$(service_pid)" +must_run update subs >/dev/null +sing_pid_after="$(service_pid)" +assert_pid_changed "${sing_pid_before}" "${sing_pid_after}" +assert_file_contains "${PROFILE_DIR}/sing-live.json" '"level": "warn"' +status_json="$(must_run update status --json)" +assert_contains "${status_json}" '"subs":{"configured":true,"status":"success"' +assert_contains "${status_json}" '"last_handoff":"restart"' +must_run service stop >/dev/null + +printf 'PASS: updater integration checks completed\n' diff --git a/tests/lint_shell.sh b/tests/lint_shell.sh new file mode 100755 index 0000000..5c7e3ce --- /dev/null +++ b/tests/lint_shell.sh @@ -0,0 +1,75 @@ +#!/usr/bin/env bash + +set -euo pipefail + +ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +MODE="${1:-all}" +cd "${ROOT_DIR}" + +shell_files=( + "cmd/boxctl" + "lib/*.sh" + "lib/firewall/*.sh" + "lib/policy/*.sh" + "lib/supervisor/*.sh" + "lib/updater/*.sh" + "tests/integration/test_phase2.sh" + "tests/integration/test_policy.sh" + "tests/integration/test_updater.sh" + "tests/integration/test_real_kernel.sh" + "tests/integration/test_arch_package_smoke.sh" + "tests/integration/test_docker_privileged.sh" + "tests/fixtures/mockbin/curl" + "tests/fixtures/mockbin/ip" + "tests/fixtures/mockbin/iptables" + "packaging/scripts/systemd-lifecycle.sh" + "packaging/arch/box4linux.install" +) + +run_bash_syntax() { + # shellcheck disable=SC2086 + bash -n ${shell_files[*]} +} + +run_shellcheck() { + if ! command -v shellcheck >/dev/null 2>&1; then + printf 'shellcheck not available; skipping\n' >&2 + return 0 + fi + + shellcheck -e SC1091,SC2034 \ + cmd/boxctl \ + lib/*.sh \ + lib/firewall/*.sh \ + lib/policy/*.sh \ + lib/supervisor/*.sh \ + lib/updater/*.sh \ + tests/integration/test_phase2.sh \ + tests/integration/test_policy.sh \ + tests/integration/test_updater.sh \ + tests/integration/test_real_kernel.sh \ + tests/integration/test_arch_package_smoke.sh \ + tests/integration/test_docker_privileged.sh \ + tests/fixtures/mockbin/curl \ + tests/fixtures/mockbin/iw \ + tests/fixtures/mockbin/nmcli \ + packaging/scripts/systemd-lifecycle.sh + shellcheck -e SC1091,SC2034 -s sh packaging/arch/box4linux.install +} + +case "${MODE}" in + all) + run_bash_syntax + run_shellcheck + ;; + syntax) + run_bash_syntax + ;; + shellcheck) + run_shellcheck + ;; + *) + printf 'usage: %s [all|syntax|shellcheck]\n' "$0" >&2 + exit 2 + ;; +esac