diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml new file mode 100644 index 0000000..e8f8444 --- /dev/null +++ b/.github/workflows/build.yml @@ -0,0 +1,212 @@ +name: Build & Package + +on: + push: + branches: [main] + tags: ['v*'] + pull_request: + branches: [main] + +env: + ARCH: arm64 + +jobs: + lint: + name: Lint + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: '3.11' + + - name: Install ruff + run: pip install ruff + + - name: Lint Python services + run: ruff check services/ + + # Reusable version computation — PR builds get a ~prN suffix + version: + name: Compute version + runs-on: ubuntu-latest + outputs: + version: ${{ steps.version.outputs.version }} + steps: + - uses: actions/checkout@v4 + + - name: Read version + id: version + run: | + base=$(cat VERSION | tr -d '[:space:]') + if [ "${{ github.event_name }}" = "pull_request" ]; then + echo "version=${base}~pr${{ github.event.number }}" >> "$GITHUB_OUTPUT" + else + echo "version=${base}" >> "$GITHUB_OUTPUT" + fi + + build-python-packages: + name: Package Python services + runs-on: ubuntu-latest + needs: [lint, version] + steps: + - uses: actions/checkout@v4 + + - name: Install tools + run: | + curl -sfL https://github.com/goreleaser/nfpm/releases/download/v2.41.1/nfpm_2.41.1_linux_amd64.tar.gz \ + | tar xzf - -C /usr/local/bin nfpm + pip install pyyaml + + - name: Generate packaging files + run: python3 packaging/generate.py + + - name: Build Python .deb packages + working-directory: packaging/generated + env: + VERSION: ${{ needs.version.outputs.version }} + run: | + mkdir -p ../../dist + for svc in autopilot-manager connection-manager service-manager system-manager; do + echo "Packaging ark-${svc}..." + nfpm package --config "ark-${svc}.yaml" --packager deb --target ../../dist/ + done + + - name: Build Bash .deb packages + working-directory: packaging/generated + env: + VERSION: ${{ needs.version.outputs.version }} + run: | + for svc in hotspot-updater jetson-can; do + echo "Packaging ark-${svc}..." + nfpm package --config "ark-${svc}.yaml" --packager deb --target ../../dist/ + done + + - name: Upload Python/Bash package artifacts + uses: actions/upload-artifact@v4 + with: + name: python-bash-packages + path: dist/*.deb + + build-cpp-packages: + name: Build & package C++ services + runs-on: ubuntu-latest + needs: [lint, version] + steps: + - uses: actions/checkout@v4 + with: + submodules: recursive + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Set up QEMU for ARM64 + uses: docker/setup-qemu-action@v3 + with: + platforms: arm64 + + - name: Build C++ services and package + run: | + docker buildx build \ + --platform linux/arm64 \ + --load \ + -t ark-builder \ + -f packaging/Dockerfile.build . + + docker run --rm \ + --platform linux/arm64 \ + -v "$PWD:/build" \ + -w /build \ + -e VERSION=${{ needs.version.outputs.version }} \ + -e ARCH=arm64 \ + ark-builder \ + bash -c "packaging/build-packages.sh build-cpp && packaging/build-packages.sh package" + + - name: Upload C++ package artifacts + uses: actions/upload-artifact@v4 + with: + name: cpp-packages + path: dist/*.deb + + build-frontend-package: + name: Build & package frontend + runs-on: ubuntu-latest + needs: version + steps: + - uses: actions/checkout@v4 + + - name: Set up Node.js + uses: actions/setup-node@v4 + with: + node-version: '20' + + - name: Install tools + run: | + curl -sfL https://github.com/goreleaser/nfpm/releases/download/v2.41.1/nfpm_2.41.1_linux_amd64.tar.gz \ + | tar xzf - -C /usr/local/bin nfpm + pip install pyyaml + + - name: Build frontend + working-directory: frontend/ark-ui/ark-ui + run: | + npm ci + npm run build + mkdir -p ../../../build/ark-ui + cp -r dist ../../../build/ark-ui/ + + - name: Generate packaging files + run: python3 packaging/generate.py + + - name: Package frontend + working-directory: packaging/generated + env: + VERSION: ${{ needs.version.outputs.version }} + run: | + mkdir -p ../../dist + nfpm package --config ark-ui.yaml --packager deb --target ../../dist/ + + - name: Upload frontend package artifact + uses: actions/upload-artifact@v4 + with: + name: frontend-package + path: dist/*.deb + + release: + name: Create Release + runs-on: ubuntu-latest + if: startsWith(github.ref, 'refs/tags/v') + needs: [build-python-packages, build-cpp-packages, build-frontend-package, version] + permissions: + contents: write + steps: + - uses: actions/checkout@v4 + + - name: Download all artifacts + uses: actions/download-artifact@v4 + with: + path: dist + merge-multiple: true + + - name: Build meta-packages + run: | + curl -sfL https://github.com/goreleaser/nfpm/releases/download/v2.41.1/nfpm_2.41.1_linux_amd64.tar.gz \ + | tar xzf - -C /usr/local/bin nfpm + pip install pyyaml + python3 packaging/generate.py + + cd packaging/generated + for pkg in ark-companion-base ark-companion-jetson ark-companion-pi ark-companion-ubuntu; do + echo "Packaging ${pkg}..." + VERSION="${{ needs.version.outputs.version }}" ARCH=arm64 nfpm package \ + --config "${pkg}.yaml" \ + --packager deb \ + --target ../../dist/ + done + + - name: Create GitHub Release + uses: softprops/action-gh-release@v2 + with: + files: dist/*.deb + generate_release_notes: true diff --git a/.github/workflows/publish-apt.yml b/.github/workflows/publish-apt.yml new file mode 100644 index 0000000..829fc1a --- /dev/null +++ b/.github/workflows/publish-apt.yml @@ -0,0 +1,93 @@ +name: Publish APT Repository + +on: + release: + types: [published] + +jobs: + publish: + name: Publish to APT repository + runs-on: ubuntu-latest + permissions: + contents: write + steps: + - name: Install reprepro + run: sudo apt-get update && sudo apt-get install -y reprepro + + - name: Import GPG key + env: + GPG_PRIVATE_KEY: ${{ secrets.APT_GPG_PRIVATE_KEY }} + GPG_PASSPHRASE: ${{ secrets.APT_GPG_PASSPHRASE }} + run: | + echo "${GPG_PRIVATE_KEY}" | gpg --batch --import + # Trust the imported key ultimately + GPG_KEYID=$(gpg --list-secret-keys --keyid-format long | grep sec | head -1 | awk '{print $2}' | cut -d'/' -f2) + echo "${GPG_KEYID}:6:" | gpg --import-ownertrust + + - name: Check out gh-pages branch + id: checkout + run: | + git init repo + cd repo + git remote add origin "https://x-access-token:${{ github.token }}@github.com/${{ github.repository }}.git" + + if git ls-remote --exit-code origin gh-pages; then + git fetch origin gh-pages + git checkout gh-pages + echo "existed=true" >> "$GITHUB_OUTPUT" + else + git checkout --orphan gh-pages + git commit --allow-empty -m "Initialize gh-pages" + echo "existed=false" >> "$GITHUB_OUTPUT" + fi + + - name: Check out packaging config from main + uses: actions/checkout@v4 + with: + path: main-src + sparse-checkout: packaging/apt + + - name: Initialize reprepro config + working-directory: repo + run: | + # Only copy config if conf/ doesn't exist yet (first run) + if [ ! -d conf ]; then + mkdir -p conf + cp ../main-src/packaging/apt/distributions conf/ + cp ../main-src/packaging/apt/options conf/ + fi + + - name: Download release .deb assets + env: + GH_TOKEN: ${{ github.token }} + run: | + mkdir -p debs + gh release download "${{ github.event.release.tag_name }}" \ + --repo "${{ github.repository }}" \ + --pattern "*.deb" \ + --dir debs + + - name: Add packages to repository + working-directory: repo + env: + GPG_PASSPHRASE: ${{ secrets.APT_GPG_PASSPHRASE }} + run: | + for deb in ../debs/*.deb; do + echo "Adding $(basename "$deb")..." + reprepro includedeb stable "$deb" + done + + - name: Export GPG public key + working-directory: repo + run: | + gpg --export --armor > ark-archive-keyring.asc + gpg --export > ark-archive-keyring.gpg + + - name: Commit and push to gh-pages + working-directory: repo + run: | + git config user.name "github-actions[bot]" + git config user.email "github-actions[bot]@users.noreply.github.com" + git add -A + git commit -m "Update APT repository for ${{ github.event.release.tag_name }}" || echo "No changes to commit" + git push origin gh-pages diff --git a/.gitignore b/.gitignore index 8df0344..4dce57c 100644 --- a/.gitignore +++ b/.gitignore @@ -2,4 +2,8 @@ libmavsdk* polaris.key output.txt user.env -CLAUDE.md +build/ +dist/ +packaging/generated/ +__pycache__/ +*.pyc diff --git a/ARCHITECTURE.md b/ARCHITECTURE.md new file mode 100644 index 0000000..57f610a --- /dev/null +++ b/ARCHITECTURE.md @@ -0,0 +1,207 @@ +# ARK-OS Architecture + +## Overview + +ARK-OS is a modular collection of services for drone companion computers. It runs on +ARK Electronics platforms (ARK Jetson Carrier, ARK Pi6X Flow) and can also be used on +Ubuntu desktop machines for development and testing. It provides mavlink routing, video +streaming, flight log management, firmware updates, RTK corrections, remote ID, and a +web-based management UI. + +### Design Principles + +- **Modular** — Each service is independent. Install only what you need. +- **Simple** — No orchestration frameworks. Just systemd, nginx, and straightforward + REST APIs. +- **Extensible** — Adding a new service means adding a directory, a manifest, and an + entry in `packages.yaml`. The system discovers and manages it automatically. +- **Consistent** — Every service follows the same structure: code, manifest, and + generated systemd unit. All services are packaged and installed as `.deb` packages. + +## System Architecture + +``` +┌──────────────────────────────────────────────────────┐ +│ Browser (ARK UI) │ +│ http://jetson.local or http://pi6x.local │ +└──────────────────┬───────────────────────────────────┘ + │ +┌──────────────────▼───────────────────────────────────┐ +│ nginx (port 80) │ +│ ├── / → Vue SPA static files │ +│ ├── /api/network/* → connection-manager :3001 │ +│ ├── /api/service/* → service-manager :3002 │ +│ ├── /api/autopilot/* → autopilot-manager :3003 │ +│ ├── /api/system/* → system-manager :3004 │ +│ └── /flight-review → flight-review :5006 │ +└──────────────────────────────────────────────────────┘ + │ +┌──────────────────▼───────────────────────────────────┐ +│ Backend Services (systemd user services) │ +│ │ +│ Python REST APIs C++ Services │ +│ ├── connection-manager ├── mavlink-router │ +│ ├── service-manager ├── logloader │ +│ ├── autopilot-manager ├── rtsp-server │ +│ └── system-manager ├── polaris │ +│ ├── dds-agent │ +│ └── rid-transmitter (Jetson) │ +└──────────────────┬───────────────────────────────────┘ + │ + ┌────────┴────────┐ + │ USB (MAVLink) │ High-speed UART (DDS) + ▼ ▼ +┌──────────────────────────────────────────────────────┐ +│ Flight Controller (PX4) │ +└──────────────────────────────────────────────────────┘ +``` + +## Platforms + +ARK-OS supports three platforms: + +| Platform | Description | Typical use | +|----------|-------------|-------------| +| `jetson` | NVIDIA Jetson (ARK Jetson Carrier) | Production flight computer | +| `pi` | Raspberry Pi (ARK Pi6X Flow) | Production flight computer | +| `ubuntu` | Ubuntu desktop/laptop | Development and testing | + +Each service declares which platforms it supports in its manifest (`platform` field). +The special value `"all"` means the service runs on all platforms including ubuntu. + +## Services + +| Service | Type | Port | Platform | Purpose | +|---------|------|------|----------|---------| +| mavlink-router | C++ | — | all | Routes MAVLink from FC USB to UDP endpoints | +| dds-agent | C++ | — | jetson, pi, ubuntu | Bridges PX4 uORB ↔ ROS2 topics over serial/UDP | +| logloader | C++ | — | jetson, pi, ubuntu | Downloads flight logs from FC, uploads to review servers | +| flight-review | Custom | 5006 | jetson, pi, ubuntu | Local PX4 Flight Review server | +| rtsp-server | C++ | 5600 | all | RTSP video stream from CSI cameras | +| polaris | C++ | — | jetson, pi, ubuntu | RTK corrections via PointOne GNSS service | +| service-manager | Python | 3002 | all | REST API for systemd service management | +| system-manager | Python | 3004 | all | REST API for system management (power, updates, etc.) | +| autopilot-manager | Python | 3003 | all | REST API for flight controller management | +| connection-manager | Python | 3001 | all | REST API for network/connection management | +| rid-transmitter | C++ | — | jetson | RemoteID broadcast via Bluetooth | +| jetson-can | Bash | — | jetson | Enables Jetson CAN bus interface | +| hotspot-updater | Bash | — | all | Updates default WiFi hotspot name | + +## Service Anatomy + +Every service follows the same structure: + +``` +services// +├── .manifest.json # Metadata (see below) +├── # Python script, C++ source, or bash script +└── config.toml (optional) # Default configuration +``` + +Systemd unit files are **generated** by `packaging/generate.py` from `packages.yaml` — +they are not stored in the service directory. + +### Manifest Schema + +The manifest tells service-manager how to discover and present the service: + +```json +{ + "version": "1.0.0", + "displayName": "Human Readable Name", + "description": "What this service does.", + "platform": ["jetson", "pi", "ubuntu"], + "configFile": "config.toml", + "visible": true, + "requires_sudo": false +} +``` + +- **platform** — Which targets this service supports. Values: `"jetson"`, `"pi"`, + `"ubuntu"`, or `"all"` (shorthand for all platforms) +- **visible** — Whether the service appears in the ARK UI for user enable/disable +- **requires_sudo** — Whether the systemd unit runs as a system service (vs user service) +- **configFile** — If set, the UI exposes a config editor for this service + +### Systemd Integration + +- User services: `/etc/systemd/user/.service` +- System services (requires_sudo): `/etc/systemd/system/.service` +- Core services auto-enable+start on deb install; optional services (`default_enabled: false` + in `packages.yaml`) are installed dormant — the user enables them via the web UI +- service-manager controls lifecycle via `systemctl --user` commands + +## Frontend + +- **Vue.js SPA** built with `npm run build`, served as static files by nginx +- **nginx** handles reverse proxying, CORS, WebSocket upgrades, and access logging +- Proxy config split into reusable snippets: `ark-proxy.conf` (HTTP) and `ark-ws.conf` (WebSocket) +- Source: `frontend/ark-ui/` +- Served from: `/var/www/ark-ui/html/` + +## Packaging & Deployment + +All services are distributed as Debian packages (`.deb`) built with [nfpm](https://nfpm.goreleaser.com/). +This is the **only** install method — both CI/CD and local development use deb packages. + +### Package Definitions + +All packages are defined in `packaging/packages.yaml`. Running `python3 packaging/generate.py` +produces nfpm configs, systemd units, and install/remove scripts in `packaging/generated/`. + +### Install Paths + +| Content | Path | +|---------|------| +| Binaries & scripts | `/opt/ark/bin/` | +| Default configs | `/opt/ark/share//` | +| Systemd units (user) | `/etc/systemd/user/` | +| Systemd units (system) | `/etc/systemd/system/` | +| Frontend | `/var/www/ark-ui/html/` | +| Nginx config | `/etc/nginx/sites-available/ark-ui` | + +### Local Development + +Use `service_control.sh` to build, package, and install services locally: + +```bash +./tools/service_control.sh install service-manager # Single service +./tools/service_control.sh install # All platform-appropriate services +./tools/service_control.sh uninstall service-manager # Remove +./tools/service_control.sh list # Show available + installed +./tools/service_control.sh status # Show systemd status +``` + +This requires `nfpm` to be installed locally. + +### Platform Meta-packages + +| Package | Description | +|---------|-------------| +| `ark-companion-base` | Core services for all platforms + optional services (installed disabled) | +| `ark-companion-jetson` | Base + Jetson-specific services (rid-transmitter, jetson-can) | +| `ark-companion-pi` | Base (Raspberry Pi) | +| `ark-companion-ubuntu` | Base (Ubuntu desktop dev) | + +### Package Lifecycle + +```bash +sudo dpkg -i ark-_1.0.0_arm64.deb # Install (core: enable+start; optional: dormant) +sudo dpkg -i ark-_1.1.0_arm64.deb # Update (same command) +sudo dpkg -r ark- # Remove (prerm stops + disables) +``` + +### CI/CD + +GitHub Actions pipeline (`.github/workflows/build.yml`): +1. **Lint** — ruff on Python services +2. **Build** — Cross-compile C++ for ARM64, package Python services, build frontend +3. **Release** — Attach `.deb` artifacts to GitHub Release on version tags + +## Adding a New Service + +1. Create `services//` with your code +2. Create `.manifest.json` following the schema above +3. Add an entry in `packaging/packages.yaml` defining the service type, dependencies, and systemd config +4. Run `python3 packaging/generate.py` to generate packaging files +5. The service will be auto-discovered by service-manager via its manifest diff --git a/CLAUDE.md b/CLAUDE.md new file mode 100644 index 0000000..003b88b --- /dev/null +++ b/CLAUDE.md @@ -0,0 +1,182 @@ +# ARK-OS + +Modular drone companion computer platform by ARK Electronics. Runs on ARK Jetson +Carrier, ARK Pi6X Flow, and Ubuntu desktop (dev/test). Provides MAVLink routing, +video streaming, flight log management, firmware updates, RTK corrections, RemoteID, +and a web-based management UI. + +**Architecture details**: [`ARCHITECTURE.md`](ARCHITECTURE.md) +**Improvement roadmap**: [`claude_plan/CLAUDE.md`](claude_plan/CLAUDE.md) + +## Design Principles + +- **Modular** — Each service is an independent systemd unit + deb package +- **Simple** — systemd + nginx + REST APIs, no orchestration frameworks +- **Manifest-driven** — `packages.yaml` is the single source of truth for packaging +- **Consistent** — Every service follows the same structure: code, manifest, generated unit + +## Key Conventions + +### Source of Truth + +- **`packaging/packages.yaml`** defines all deb packages, dependencies, systemd config, and + install paths. Run `python3 packaging/generate.py` to regenerate nfpm configs, systemd + units, and install/remove scripts. +- **`.manifest.json`** in each service directory defines UI metadata (display name, + platform support, config file, visibility). +- **`default_enabled`** in `packages.yaml` controls whether a service is auto-started on deb + install. Core services default to `true`; optional services (logloader, flight-review, + polaris, rid-transmitter, rtsp-server, dds-agent, jetson-can) are `false` — installed but + dormant until the user enables them via the web UI. + +### Install Paths (deb packages) + +| Content | Path | +|---------|------| +| Binaries & scripts | `/opt/ark/bin/` | +| Default configs | `/opt/ark/share//` | +| Systemd units (user) | `/etc/systemd/user/` | +| Systemd units (system) | `/etc/systemd/system/` | +| Frontend | `/var/www/ark-ui/html/` | +| Nginx config | `/etc/nginx/sites-available/ark-ui` | + +### Config Path Strategy + +Services use a two-tier config lookup: +1. **User config** at `~/.config/ark//config.toml` — writable, persists across upgrades +2. **Default config** at `/opt/ark/share//config.toml` — installed by deb, read-only + +Services that write runtime state (e.g. logloader's SQLite DB) use +`~/.local/share/ark//` as a writable data directory. + +## Repository Layout + +``` +ARK-OS/ +├── services/ # All 13 services (Python, C++, Bash) +├── frontend/ # Vue.js SPA + nginx config +├── packaging/ # packages.yaml, generate.py, build scripts +├── platform/ # Platform-specific scripts and configs +│ ├── common/ # Shared across all platforms +│ ├── jetson/ # NVIDIA Jetson specific +│ └── pi/ # Raspberry Pi specific +├── tools/ # service_control.sh, install_software.sh +├── libs/ # External libraries (mavsdk-examples) +├── tests/ # Test files +├── ARCHITECTURE.md # Full architecture documentation +└── VERSION # Current version (used by CI) +``` + +## Services + +| Service | Type | Port | Platform | Purpose | +|---------|------|------|----------|---------| +| mavlink-router | C++ | — | all | Routes MAVLink from FC to UDP endpoints | +| dds-agent | C++ | — | all | Bridges PX4 uORB ↔ ROS2 topics | +| logloader | C++ | — | all | Downloads flight logs, uploads to review servers | +| flight-review | Custom | 5006 | all | Local PX4 Flight Review server | +| rtsp-server | C++ | 5600 | all | RTSP video stream from CSI cameras | +| polaris | C++ | — | all | RTK corrections via PointOne GNSS | +| service-manager | Python | 3002 | all | REST API: systemd service management | +| system-manager | Python | 3004 | all | REST API: system management | +| autopilot-manager | Python | 3003 | all | REST API: flight controller management | +| connection-manager | Python | 3001 | all | REST API: network management | +| rid-transmitter | C++ | — | jetson | RemoteID broadcast via Bluetooth | +| jetson-can | Bash | — | jetson | Enables Jetson CAN bus interface | +| hotspot-updater | Bash | — | all | Updates default WiFi hotspot name | + +## Submodule Ownership + +| Submodule | Owner | Editable? | +|-----------|-------|-----------| +| `services/logloader/logloader` | ARK | Yes | +| `services/polaris/polaris-client-mavlink` | ARK | Yes | +| `services/rid-transmitter/RemoteIDTransmitter` | ARK | Yes | +| `services/rtsp-server/rtsp-server` | ARK | Yes | +| `libs/mavsdk-examples` | ARK | Yes | +| `services/flight-review/flight_review` | PX4 (upstream) | No | +| `services/dds-agent/Micro-XRCE-DDS-Agent` | eProsima (upstream) | No | +| `services/mavlink-router/mavlink-router` | upstream | No | + +## Install Workflows + +### 1. Source Build (development) + +For developers working on the codebase. Requires `nfpm` installed locally. + +```bash +git clone --recurse-submodules https://github.com/ARK-Electronics/ARK-OS.git +cd ARK-OS +./tools/service_control.sh install # Build + install all services +./tools/service_control.sh install logloader # Single service +./tools/service_control.sh status # Check running services +``` + +### 2. Deb Download (PR testing) + +Download `.deb` artifacts from a GitHub Actions CI run to test a PR. +Platform meta-packages: `ark-companion-jetson`, `ark-companion-pi`, `ark-companion-ubuntu` +(all depend on `ark-companion-base` which pulls in all services). + +```bash +# Download debs from the GitHub Actions artifacts for the PR +sudo dpkg -i ark-*.deb +``` + +### 3. APT Upgrade (end users) + +*Not yet implemented* — see [`claude_plan/P1-apt-repository.md`](claude_plan/P1-apt-repository.md). +Once the APT repository is set up: + +```bash +sudo apt update && sudo apt upgrade +``` + +## Common Tasks + +```bash +# Regenerate packaging files after editing packages.yaml +python3 packaging/generate.py + +# Build all deb packages locally +./packaging/build-packages.sh + +# Lint Python services +ruff check services/ + +# Run frontend dev server +cd frontend/ark-ui/ark-ui && npm run serve + +# Check service logs +journalctl --user -u logloader -f +``` + +## Platform Details + +| Platform | Hostname | User | Hardware | +|----------|----------|------|----------| +| jetson | jetson.local | jetson | ARK Jetson Carrier | +| pi | pi6x.local | pi | ARK Pi6X Flow | +| ubuntu | — | — | Desktop/laptop (dev) | + +## Build Notes + +- C++ services: CMake (or Meson for mavlink-router), cross-compiled for ARM64 in CI +- C++ flags: `-Wall -Wextra -Werror -Wpedantic`, C++20 +- Python: 3.9+, linted with ruff +- Frontend: Vue.js SPA, built with npm, served by nginx +- CI: GitHub Actions (`.github/workflows/build.yml`) — lint, build, package, release + +## Session Workflow + +When starting a new Claude session on this project: +1. Read this file for project context +2. Read `claude_plan/CLAUDE.md` for the current improvement roadmap +3. Check `claude_plan/completed/` for recently finished work + +Before ending a session that made changes: +1. Follow the **End-of-Session Checklist** in `claude_plan/CLAUDE.md` +2. Ensure this file, `ARCHITECTURE.md`, and `claude_plan/CLAUDE.md` are all up to date +3. Record completion notes with session IDs so future sessions can retrieve full context + +Session transcripts are stored at `~/.claude/projects/-home-jake-code-ark-ARK-OS/.jsonl` diff --git a/VERSION b/VERSION new file mode 100644 index 0000000..3eefcb9 --- /dev/null +++ b/VERSION @@ -0,0 +1 @@ +1.0.0 diff --git a/claude_plan/CLAUDE.md b/claude_plan/CLAUDE.md new file mode 100644 index 0000000..8dbb153 --- /dev/null +++ b/claude_plan/CLAUDE.md @@ -0,0 +1,123 @@ +# ARK-OS Improvement Plans + +This directory contains prioritized, actionable improvement plans for ARK-OS. Each plan +follows a consistent template and contains enough detail for implementation. + +## How This Works + +1. **Read the plan** — Each `.md` file is a self-contained improvement plan +2. **Check dependencies** — Some plans depend on others being completed first +3. **Implement** — Follow the steps in the plan file +4. **Record completion** — Move the plan to `completed/` with a date prefix and notes +5. **Update docs** — Verify and update top-level docs to reflect the changes + +### Recording Completed Work + +When a plan (or a significant chunk of a plan) is finished: + +```bash +mv claude_plan/P0-path-migration-cleanup.md claude_plan/completed/2025-01-15-P0-path-migration-cleanup.md +``` + +Add a **Completion Notes** section at the bottom of the moved file with: + +```markdown +## Completion Notes + +- **Date**: 2025-01-15 +- **Session ID**: 098dc835-7d1e-467f-8ec5-8e34d6687f4b +- **Transcript**: ~/.claude/projects/-home-jake-code-ark-ARK-OS/.jsonl +- **Planning session**: .jsonl (if different) +- **Summary**: <1-3 sentences on what was done> +- **Deviations**: +- **Follow-up**: +``` + +The session ID is the UUID filename of the `.jsonl` transcript in +`~/.claude/projects/-home-jake-code-ark-ARK-OS/`. Use `ls -lt` to find the most recent +one, or check the plan's original text for a transcript reference. + +### End-of-Session Checklist + +**After every session that modifies code or completes a plan**, verify and update: + +1. **`CLAUDE.md` (project root)** — Does it still accurately describe: + - Install paths and conventions? + - Migration status (remove the note once P0-path-migration is done)? + - Services table (if services were added/removed/renamed)? + - Submodule ownership table? + - Install workflows? + +2. **`ARCHITECTURE.md`** — Does it still accurately describe: + - System architecture diagram? + - Service table (ports, platforms)? + - Packaging and deployment info? + - "Adding a New Service" instructions? + +3. **This file (`claude_plan/CLAUDE.md`)** — Update: + - Move completed plans to `completed/` + - Update priority matrix (remove completed rows, adjust dependencies) + - Add any new plans discovered during implementation + +4. **`~/.claude/projects/-home-jake-code-ark-ARK-OS/memory/MEMORY.md`** — Update: + - Confirmed patterns and conventions + - Any new architectural decisions + - Remove outdated information + +The goal: a future Claude session starting from `CLAUDE.md` should have an accurate, +up-to-date picture of the project without needing to re-discover anything. + +## Priority Matrix + +### P0 — Critical (fix now) + +These are bugs or security issues in the current codebase. + +| Plan | Description | Dependencies | +|------|-------------|--------------| +| ~~P0-path-migration-cleanup~~ | ~~Complete `~/.local/` → `/opt/ark/` path migration~~ | **Done** ([completed/2026-02-24-P0-path-migration-cleanup.md](completed/2026-02-24-P0-path-migration-cleanup.md)) | +| ~~P0-security-hardening~~ | ~~Fix command injection, add input validation~~ | **Done** ([completed/2026-02-24-P0-security-hardening.md](completed/2026-02-24-P0-security-hardening.md)) | +| ~~P0-default-disabled-services~~ | ~~Default-disabled services + platform meta-packages~~ | **Done** ([completed/2026-02-24-P0-default-disabled-services.md](completed/2026-02-24-P0-default-disabled-services.md)) | + +### P1 — High (next quarter) + +Important improvements that add significant value. + +| Plan | Description | Dependencies | +|------|-------------|--------------| +| [P1-apt-repository.md](P1-apt-repository.md) | Hosted APT repo for OTA updates | ~~P0-default-disabled-services~~ | +| [P1-testing-framework.md](P1-testing-framework.md) | Unit/integration testing strategy | ~~P0-security~~ | +| [P1-flask-to-fastapi.md](P1-flask-to-fastapi.md) | Migrate Python services to FastAPI | ~~P0-path-migration~~, ~~P0-security~~ | + +### P2 — Medium (this half) + +Feature improvements and modernization. + +| Plan | Description | Dependencies | +|------|-------------|--------------| +| [P2-webrtc-video.md](P2-webrtc-video.md) | WebRTC video in UI + UVC camera support | None | +| [P2-vite-migration.md](P2-vite-migration.md) | Migrate vue-cli-service → Vite | None | + +### P3 — Backburner + +Future considerations, not actively planned. + +| Plan | Description | Dependencies | +|------|-------------|--------------| +| [P3-mavlink2rest.md](P3-mavlink2rest.md) | MAVLink REST/WebSocket API bridge | P1-flask-to-fastapi | +| [P3-zenoh-support.md](P3-zenoh-support.md) | Zenoh daemon alongside DDS agent | None | + +## Plan File Template + +Every plan follows this structure: + +```markdown +# Title +## Problem +## Solution +## Files to Modify (exact paths) +## Implementation Steps +## Acceptance Criteria +## Dependencies +## Effort Estimate +``` diff --git a/claude_plan/P1-apt-repository.md b/claude_plan/P1-apt-repository.md new file mode 100644 index 0000000..d6ce95f --- /dev/null +++ b/claude_plan/P1-apt-repository.md @@ -0,0 +1,98 @@ +# P1: APT Repository for OTA Updates + +## Problem + +Currently, users install ARK-OS by either: +1. Running `install.sh` on-device (legacy, builds from source) +2. Downloading `.deb` files from GitHub Releases and manually installing with `dpkg` + +There is no `apt update && apt upgrade` workflow for end users. This means no automatic +dependency resolution, no easy rollback, and a manual update process. + +## Solution + +A hosted APT repository on GitHub Pages (gh-pages branch) managed by `reprepro`. +Devices can update with standard Debian tooling: + +```bash +sudo apt update && sudo apt upgrade # Updates all ARK packages +``` + +### Repository Structure + +``` +deb https://ark-electronics.github.io/ARK-OS stable main # Release builds +``` + +**Hosting**: GitHub Pages on `gh-pages` branch of ARK-OS. +**Scope**: Stable releases only (tagged `v*`). Testing repo deferred. +**Tool**: `reprepro` (simple, file-based, maintains state on gh-pages). + +## Files + +| File | Purpose | +|------|---------| +| `packaging/apt/distributions` | reprepro distribution config (stable, arm64, main) | +| `packaging/apt/options` | reprepro options | +| `.github/workflows/publish-apt.yml` | CI workflow: on release, publish debs to gh-pages APT repo | +| `platform/common/scripts/setup_apt_repo.sh` | Device-side script to add the ARK APT source | + +## How It Works + +### CI Workflow (`publish-apt.yml`) + +Triggered by `release` event (type: `published`): + +1. Installs `reprepro` and imports GPG signing key from secrets +2. Checks out `gh-pages` branch (creates orphan branch on first run) +3. Copies reprepro config from `packaging/apt/` if not already initialized +4. Downloads all `.deb` assets from the GitHub Release +5. Runs `reprepro includedeb stable ` to add packages +6. Exports the public GPG key as `ark-archive-keyring.gpg` +7. Commits and pushes to `gh-pages` + +### Device Setup (`setup_apt_repo.sh`) + +Run once per device to configure the APT source: + +```bash +sudo bash platform/common/scripts/setup_apt_repo.sh +``` + +This downloads the GPG keyring, adds the source list entry, and runs `apt update`. + +## Manual Steps Required + +Before the workflow can run: + +1. **Generate GPG signing key** (one-time): + ```bash + gpg --full-generate-key # RSA 4096, "ARK Electronics " + ``` + +2. **Add GitHub Actions secrets** to the ARK-OS repo: + - `APT_GPG_PRIVATE_KEY`: output of `gpg --armor --export-secret-keys ` + - `APT_GPG_PASSPHRASE`: passphrase for the key (if set) + +3. **Enable GitHub Pages** on the repo: Settings > Pages > Source: `gh-pages` branch + +4. **(Optional)** Configure custom domain `apt.arkelectron.com` pointing to GitHub Pages. + If using a custom domain, update `REPO_URL` in `setup_apt_repo.sh`. + +## Acceptance Criteria + +- [ ] `apt update` successfully fetches package list from the ARK repo +- [ ] `apt install ark-companion-base` installs all ARK-OS packages with dependencies +- [ ] `apt upgrade` updates installed packages to latest version +- [ ] Release tags in CI automatically publish to the stable repo +- [ ] Packages are GPG-signed and `apt` verifies signatures +- [ ] Setup script works on fresh Jetson and Pi devices + +## Dependencies + +None — all P0 prerequisites are complete. + +## Effort Estimate + +Small. The files are implemented; remaining work is manual setup (GPG key, secrets, +GitHub Pages) and verification on a real device. diff --git a/claude_plan/P1-flask-to-fastapi.md b/claude_plan/P1-flask-to-fastapi.md new file mode 100644 index 0000000..96cacfc --- /dev/null +++ b/claude_plan/P1-flask-to-fastapi.md @@ -0,0 +1,192 @@ +# P1: Migrate Python Services from Flask to FastAPI + +## Problem + +All four Python REST services use Flask, a synchronous WSGI framework. This works but has +limitations: +- No native async support (important for services that wait on subprocess/network calls) +- No automatic request validation or OpenAPI docs +- Manual JSON serialization +- Flask's development server is used in production (no gunicorn/uwsgi configured) + +## Solution + +Migrate each Python service from Flask to FastAPI. FastAPI provides: +- Automatic request/response validation via Pydantic models +- Auto-generated OpenAPI docs (useful for debugging on-device) +- Native async for subprocess calls +- Built-in CORS middleware +- Uvicorn as production ASGI server (lightweight, suitable for embedded) + +### Migration Strategy + +Migrate one service at a time, starting with the simplest (service-manager). Each migration +follows the same pattern: Flask routes → FastAPI routes, manual validation → Pydantic models. + +## Files to Modify + +### Per service (repeat for each): + +| Service | Main file | Manifest | +|---------|-----------|----------| +| service-manager | `services/service-manager/service_manager.py` | `services/service-manager/service-manager.manifest.json` | +| connection-manager | `services/connection-manager/connection_manager.py` | `services/connection-manager/connection-manager.manifest.json` | +| system-manager | `services/system-manager/system_manager.py` | `services/system-manager/system-manager.manifest.json` | +| autopilot-manager | `services/autopilot-manager/autopilot_manager.py` | `services/autopilot-manager/autopilot-manager.manifest.json` | + +### Packaging changes: + +| File | Change | +|------|--------| +| `packaging/packages.yaml` | Update Python dependencies (flask → fastapi + uvicorn) | +| `packaging/generate.py` | Update exec_start template for uvicorn | +| `tests/test_*.py` | Update test clients (Flask test_client → FastAPI TestClient) | + +## Implementation Steps + +### Step 1: Migrate service-manager (simplest first) + +```python +# Before (Flask): +from flask import Flask, jsonify, request +app = Flask(__name__) + +@app.route("/services", methods=["GET"]) +def get_services(): + services = discover_services() + return jsonify(services) + +@app.route("/restart/", methods=["POST"]) +def restart_service(service_name): + validate_service_name(service_name) + result = run_systemctl("restart", service_name) + return jsonify(result) + +if __name__ == "__main__": + app.run(host="0.0.0.0", port=3002) +``` + +```python +# After (FastAPI): +from fastapi import FastAPI, HTTPException +from pydantic import BaseModel, field_validator +import uvicorn + +app = FastAPI(title="ARK Service Manager") + +class ServiceName(BaseModel): + name: str + + @field_validator("name") + @classmethod + def validate_name(cls, v): + if not re.match(r'^[a-zA-Z0-9][a-zA-Z0-9._-]{0,63}$', v): + raise ValueError("Invalid service name") + return v + +@app.get("/services") +async def get_services(): + services = await discover_services() + return services + +@app.post("/restart/{service_name}") +async def restart_service(service_name: str): + ServiceName(name=service_name) # Validates + result = await run_systemctl("restart", service_name) + return result + +if __name__ == "__main__": + uvicorn.run(app, host="0.0.0.0", port=3002) +``` + +### Step 2: Update systemd exec_start + +In `packaging/packages.yaml` or `generate.py`, the exec command changes from: +``` +python3 /opt/ark/bin/service_manager.py +``` +to: +``` +python3 /opt/ark/bin/service_manager.py +``` +(FastAPI's `uvicorn.run()` is called from the script itself, so no change needed if +the script uses `if __name__ == "__main__": uvicorn.run(...)`) + +### Step 3: Update dependencies in packages.yaml + +```yaml +# Add to each Python service or create a shared dependency +depends: [python3-fastapi, python3-uvicorn] +# Or if not available as system packages: +# Include a requirements.txt and pip install in postinst +``` + +Note: FastAPI and Uvicorn may need to be installed via pip on the target if not available +as system deb packages. Consider vendoring or adding a pip install step to postinst. + +### Step 4: Make subprocess calls async + +```python +import asyncio + +async def run_systemctl(operation: str, service_name: str) -> dict: + proc = await asyncio.create_subprocess_exec( + "systemctl", "--user", operation, service_name, + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.PIPE + ) + stdout, stderr = await proc.communicate() + return {"returncode": proc.returncode, "stdout": stdout.decode(), "stderr": stderr.decode()} +``` + +### Step 5: Update tests + +```python +# Before (Flask): +from service_manager import app +client = app.test_client() +response = client.get("/services") + +# After (FastAPI): +from fastapi.testclient import TestClient +from service_manager import app +client = TestClient(app) +response = client.get("/services") +# Same assertions work — TestClient has the same interface +``` + +### Step 6: Update nginx proxy config + +FastAPI/Uvicorn should work with the existing nginx proxy config since it still listens +on the same ports. No changes needed unless WebSocket endpoints are added. + +### Step 7: Migrate remaining services + +Repeat steps 1-5 for connection-manager, system-manager, and autopilot-manager. +Order by complexity: +1. service-manager (simplest, fewest endpoints) +2. system-manager (moderate) +3. autopilot-manager (moderate, has MAVLink-related logic) +4. connection-manager (most complex, nmcli interaction) + +## Acceptance Criteria + +- [ ] All four Python services use FastAPI + Uvicorn +- [ ] All existing API endpoints maintain the same URL structure and response format +- [ ] nginx reverse proxy still works without config changes +- [ ] OpenAPI docs accessible at `http://device.local/api/service/docs` (etc.) +- [ ] All subprocess calls use async (`asyncio.create_subprocess_exec`) +- [ ] Pydantic models validate all request inputs +- [ ] Tests updated and passing +- [ ] Services start and stop cleanly via systemd + +## Dependencies + +- **P0-path-migration-cleanup** — Complete first so we're not migrating code that will change +- **P0-security-hardening** — The validation logic from P0 should be built into Pydantic models + +## Effort Estimate + +Medium-large. Each service migration is ~1 session (mechanical translation). The main risk +is ensuring the nginx proxy and frontend still work correctly with the new backend. Testing +on-device is important. Total estimate: 4-5 sessions. diff --git a/claude_plan/P1-testing-framework.md b/claude_plan/P1-testing-framework.md new file mode 100644 index 0000000..92e724a --- /dev/null +++ b/claude_plan/P1-testing-framework.md @@ -0,0 +1,231 @@ +# P1: Testing Framework + +## Problem + +ARK-OS has no automated tests. This makes refactoring risky, security fixes hard to verify, +and regressions easy to introduce. The CI pipeline lints but doesn't test. + +A common concern: "How do you test drone software without hardware?" The answer is that +most of the codebase is standard software (HTTP APIs, config parsing, string formatting, +state machines) that has nothing to do with hardware. You test YOUR logic, not the hardware. + +## Solution + +Add pytest-based testing for Python services, with mocking for system calls. Integrate +into CI so tests run before package builds. + +### What You're Actually Testing + +| Layer | Example | Hardware needed? | +|-------|---------|-----------------| +| Input validation | Service name regex, hostname check | No | +| Config parsing | TOML loading, default values | No | +| API response shape | JSON structure, status codes | No | +| State machines | Service lifecycle transitions | No | +| Command construction | Correct systemctl arguments | No | +| Error handling | Graceful failure on bad input | No | +| Integration | Deb package contents, config generation | No | + +### What You're NOT Testing (yet) + +| Layer | Example | Approach | +|-------|---------|----------| +| MAVLink communication | Actual FC interaction | Hardware-in-the-loop (manual) | +| Camera streaming | RTSP pipeline | Device with camera (manual) | +| CAN bus | Jetson CAN interface | Jetson hardware (manual) | + +## Files to Modify + +| File | Change | +|------|--------| +| `.github/workflows/build.yml` | Add pytest job before build jobs | +| `pyproject.toml` | Add pytest config | +| New: `tests/conftest.py` | Shared fixtures | +| New: `tests/test_service_manager.py` | service-manager unit tests | +| New: `tests/test_connection_manager.py` | connection-manager unit tests | +| New: `tests/test_system_manager.py` | system-manager unit tests | +| New: `tests/test_autopilot_manager.py` | autopilot-manager unit tests | +| New: `tests/test_packaging.py` | Package generation integration tests | + +## Implementation Steps + +### Step 1: Set up pytest infrastructure + +Update `pyproject.toml`: +```toml +[tool.pytest.ini_options] +testpaths = ["tests"] +pythonpath = ["services/service-manager", "services/connection-manager", + "services/system-manager", "services/autopilot-manager"] +``` + +Create `tests/conftest.py` with shared fixtures: +```python +import pytest +from unittest.mock import patch + +@pytest.fixture +def mock_subprocess(): + """Mock subprocess.run for all tests that need it.""" + with patch("subprocess.run") as mock: + mock.return_value.returncode = 0 + mock.return_value.stdout = "" + mock.return_value.stderr = "" + yield mock +``` + +### Step 2: Write tests for P0-security fixes first + +These tests validate that the security hardening from P0 actually works: + +```python +# tests/test_service_manager.py + +import pytest +from service_manager import app, validate_service_name + +class TestServiceNameValidation: + def test_valid_names(self): + assert validate_service_name("logloader") == "logloader" + assert validate_service_name("mavlink-router") == "mavlink-router" + assert validate_service_name("dds-agent") == "dds-agent" + + def test_rejects_injection(self): + with pytest.raises(ValueError): + validate_service_name("; rm -rf /") + with pytest.raises(ValueError): + validate_service_name("foo$(whoami)") + with pytest.raises(ValueError): + validate_service_name("") + +class TestServiceManagerAPI: + @pytest.fixture + def client(self): + app.config["TESTING"] = True + with app.test_client() as client: + yield client + + def test_get_services(self, client, mock_subprocess): + response = client.get("/services") + assert response.status_code == 200 + assert isinstance(response.json, list) + + def test_restart_invalid_service(self, client): + response = client.post("/restart/; rm -rf /") + assert response.status_code == 400 + + def test_logs_invalid_service(self, client): + response = client.get("/logs/$(whoami)") + assert response.status_code == 400 +``` + +### Step 3: Write API tests for each service + +Use Flask's test client to test endpoints without running the server: + +```python +# tests/test_connection_manager.py + +class TestConnectionManagerAPI: + @pytest.fixture + def client(self): + from connection_manager import app + app.config["TESTING"] = True + with app.test_client() as client: + yield client + + def test_get_connections(self, client, mock_subprocess): + mock_subprocess.return_value.stdout = "WiFi:wifi:wlan0\n" + response = client.get("/connections") + assert response.status_code == 200 + + def test_set_hostname_rejects_invalid(self, client): + response = client.post("/hostname", json={"hostname": "; whoami"}) + assert response.status_code == 400 +``` + +### Step 4: Write config/packaging integration tests + +```python +# tests/test_packaging.py + +import yaml +import json +import os + +def test_packages_yaml_valid(): + """Verify packages.yaml parses and has required fields.""" + with open("packaging/packages.yaml") as f: + data = yaml.safe_load(f) + assert "services" in data + for name, svc in data["services"].items(): + assert "type" in svc, f"Service {name} missing type" + assert "description" in svc, f"Service {name} missing description" + +def test_manifests_valid(): + """Verify all manifest files parse and have required fields.""" + for root, dirs, files in os.walk("services"): + for f in files: + if f.endswith(".manifest.json"): + path = os.path.join(root, f) + with open(path) as fh: + manifest = json.load(fh) + assert "displayName" in manifest, f"{path} missing displayName" + assert "platform" in manifest, f"{path} missing platform" + +def test_every_service_has_manifest(): + """Every service in packages.yaml has a corresponding manifest.""" + with open("packaging/packages.yaml") as f: + data = yaml.safe_load(f) + for name in data["services"]: + manifest = f"services/{name}/{name}.manifest.json" + assert os.path.exists(manifest), f"Missing manifest: {manifest}" +``` + +### Step 5: Add pytest to CI + +Add to `.github/workflows/build.yml` before the build jobs: + +```yaml +test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + submodules: recursive + - uses: actions/setup-python@v5 + with: + python-version: "3.11" + - name: Install dependencies + run: pip install pytest flask PyYAML + - name: Run tests + run: pytest tests/ -v +``` + +### Step 6: Add testing documentation + +Add a section to the project README or a `tests/README.md` explaining: +- How to run tests locally: `pytest tests/ -v` +- How to add tests for a new service +- The mocking philosophy (test YOUR code, mock system boundaries) + +## Acceptance Criteria + +- [ ] `pytest tests/ -v` passes with zero failures +- [ ] CI runs tests before package builds +- [ ] Every Python service has at least basic API endpoint tests +- [ ] Input validation from P0-security is covered by tests +- [ ] Config parsing and manifest loading are tested +- [ ] `packages.yaml` structure is validated by tests +- [ ] Tests run in <30 seconds (no hardware, no network) + +## Dependencies + +- **P0-security-hardening** — Tests should validate the security fixes. Can be developed + in parallel, but the validation functions being tested come from P0. + +## Effort Estimate + +Medium. Writing the test infrastructure (conftest.py, CI integration) is ~1 session. +Writing comprehensive tests for all 4 Python services is ~2-3 sessions. The packaging +tests are straightforward. Total estimate: 3-4 sessions. diff --git a/claude_plan/P2-vite-migration.md b/claude_plan/P2-vite-migration.md new file mode 100644 index 0000000..903426d --- /dev/null +++ b/claude_plan/P2-vite-migration.md @@ -0,0 +1,132 @@ +# P2: Migrate Frontend from vue-cli-service to Vite + +## Problem + +The ARK UI frontend uses `vue-cli-service` (webpack-based) for building. Vue CLI is in +maintenance mode — the Vue ecosystem has moved to Vite, which offers: +- 10-50x faster dev server startup (native ESM, no bundling in dev) +- Faster production builds (Rollup-based) +- Better Vue 3 integration +- Active maintenance and ecosystem support + +## Solution + +Migrate the frontend build tooling from vue-cli-service to Vite while keeping the +existing Vue components unchanged. + +## Files to Modify + +| File | Change | +|------|--------| +| `frontend/ark-ui/ark-ui/package.json` | Replace vue-cli deps with vite + @vitejs/plugin-vue | +| `frontend/ark-ui/ark-ui/vite.config.js` | New Vite config (replaces vue.config.js) | +| `frontend/ark-ui/ark-ui/vue.config.js` | Delete | +| `frontend/ark-ui/ark-ui/index.html` | Move from public/ to root, add ` + + +``` + +### Step 4: Update import paths + +Vite uses native ESM, so some imports may need adjustment: +- `require()` → `import` +- Environment variables: `process.env.VUE_APP_*` → `import.meta.env.VITE_*` + +### Step 5: Update CI build command + +In `.github/workflows/build.yml`, the build step should still work if `npm run build` +is already used. Verify the output goes to `dist/` as expected. + +### Step 6: Test production build + +```bash +cd frontend/ark-ui/ark-ui +npm install +npm run build +# Verify dist/ contains the expected static files +# Verify the built app works when served by nginx +``` + +## Acceptance Criteria + +- [ ] `npm run dev` starts Vite dev server successfully +- [ ] `npm run build` produces production bundle in `dist/` +- [ ] All existing pages and components work identically +- [ ] API proxy works in development mode +- [ ] CI build completes successfully +- [ ] Built files serve correctly from nginx on device +- [ ] No vue-cli-service dependencies remain in package.json + +## Dependencies + +None — can be done independently. + +## Effort Estimate + +Small. This is a well-documented migration path. The main risk is environment variable +renaming and any `require()` calls that need to become `import`. Estimate 1-2 sessions. diff --git a/claude_plan/P2-webrtc-video.md b/claude_plan/P2-webrtc-video.md new file mode 100644 index 0000000..2d8dfc2 --- /dev/null +++ b/claude_plan/P2-webrtc-video.md @@ -0,0 +1,195 @@ +# P2: WebRTC Video in UI + UVC Camera Support + +## Problem + +The rtsp-server currently only supports CSI cameras via GStreamer. Users with USB cameras +(UVC devices) cannot stream video. Additionally, the ARK UI has no video page — users must +use a separate RTSP client (like VLC) to view the stream. + +WebRTC would allow video to be viewed directly in the browser without plugins or external +applications. + +## Solution + +Two improvements: +1. **UVC camera support** in rtsp-server — Add V4L2 source pipeline for USB cameras +2. **WebRTC streaming** — Add a WebRTC endpoint so the browser can receive video directly + +### Architecture + +``` +Camera (CSI/UVC) → GStreamer → rtsp-server → RTSP stream (existing) + → WebRTC stream (new) + +Browser → ARK UI → WebRTC JS client → rtsp-server WebRTC endpoint +``` + +## Files to Modify + +### rtsp-server (C++ submodule, ARK-owned) + +| File | Change | +|------|--------| +| `services/rtsp-server/rtsp-server/src/main.cpp` | Add V4L2 pipeline, WebRTC support | +| `services/rtsp-server/rtsp-server/config.toml` | Add `source_type` (csi/uvc/auto) | +| `services/rtsp-server/rtsp-server/CMakeLists.txt` | Add GStreamer WebRTC dependencies | + +### Frontend (ARK UI) + +| File | Change | +|------|--------| +| New: `frontend/ark-ui/ark-ui/src/views/VideoView.vue` | Video page with WebRTC player | +| `frontend/ark-ui/ark-ui/src/router/index.js` | Add /video route | +| `frontend/ark-ui/ark-ui/src/components/Sidebar.vue` (or equivalent) | Add Video nav link | + +### Packaging + +| File | Change | +|------|--------| +| `packaging/packages.yaml` | Add GStreamer WebRTC dependencies to rtsp-server | +| `frontend/ark-proxy.conf` or `ark-ui.nginx` | Add WebSocket proxy for WebRTC signaling | + +## Implementation Steps + +### Step 1: Add UVC camera detection + +Add auto-detection of camera type in rtsp-server: + +```cpp +// Detect available cameras +bool has_csi_camera(); // Check /dev/video0 with V4L2 caps +bool has_uvc_camera(); // Check /dev/video* for UVC devices + +std::string get_pipeline(const std::string& source_type) { + if (source_type == "csi" || (source_type == "auto" && has_csi_camera())) { + return "nvarguscamerasrc ! video/x-raw(memory:NVMM),width=1920,height=1080 ! ..."; + } else if (source_type == "uvc" || (source_type == "auto" && has_uvc_camera())) { + return "v4l2src device=/dev/video0 ! video/x-raw,width=1280,height=720 ! ..."; + } else { + return "videotestsrc ! ..."; // Test pattern fallback + } +} +``` + +### Step 2: Update rtsp-server config + +```toml +# config.toml +[camera] +source_type = "auto" # "csi", "uvc", "auto", "test" +device = "/dev/video0" # For UVC, auto-detected if not set +width = 1280 +height = 720 +framerate = 30 +``` + +### Step 3: Add WebRTC support via GStreamer + +Use GStreamer's `webrtcbin` element for WebRTC: + +```cpp +// WebRTC pipeline +// camera → encoder → payloader → webrtcbin +auto pipeline = gst_parse_launch( + "v4l2src ! videoconvert ! x264enc tune=zerolatency ! " + "rtph264pay ! webrtcbin name=webrtc", nullptr); +``` + +Add a simple HTTP/WebSocket signaling endpoint for SDP exchange. This could be: +- A lightweight embedded HTTP server (cpp-httplib, already vendored in logloader) +- A separate signaling endpoint proxied through nginx + +### Step 4: Create Video page in ARK UI + +```vue + + + + +``` + +### Step 5: Add nginx proxy for WebRTC signaling + +Add to nginx config: +```nginx +location /api/video/ws { + proxy_pass http://127.0.0.1:5601; # WebRTC signaling port + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "upgrade"; +} +``` + +### Step 6: Update packaging + +Add WebRTC GStreamer dependencies: +```yaml +rtsp-server: + depends: + - libgstreamer1.0-0 + - libgstreamer-plugins-base1.0-0 + - gstreamer1.0-plugins-ugly + - gstreamer1.0-rtsp + - gstreamer1.0-nice # ICE/STUN/TURN for WebRTC + - gstreamer1.0-plugins-bad # webrtcbin element +``` + +## Acceptance Criteria + +- [ ] rtsp-server detects and streams from UVC cameras (`/dev/video*`) +- [ ] `config.toml` supports `source_type` = csi, uvc, auto, test +- [ ] Auto-detection correctly identifies CSI vs UVC cameras +- [ ] WebRTC stream viewable in Chrome/Firefox without plugins +- [ ] ARK UI has a Video page accessible from navigation +- [ ] Video page shows live stream with <500ms latency +- [ ] RTSP stream still works alongside WebRTC (existing clients unaffected) +- [ ] Works on both Jetson (CSI + UVC) and Pi (UVC) + +## Dependencies + +None — can be developed independently, but benefits from P0-path-migration being done +first so the config paths are correct. + +## Effort Estimate + +Large. This is a significant feature addition: +- UVC support in rtsp-server: 1-2 sessions +- WebRTC via GStreamer webrtcbin: 2-3 sessions (signaling is the complex part) +- Frontend Video page: 1 session +- Testing on actual hardware: 1-2 sessions +Total estimate: 5-8 sessions. diff --git a/claude_plan/P3-mavlink2rest.md b/claude_plan/P3-mavlink2rest.md new file mode 100644 index 0000000..c631254 --- /dev/null +++ b/claude_plan/P3-mavlink2rest.md @@ -0,0 +1,112 @@ +# P3: MAVLink REST/WebSocket API Bridge + +## Problem + +Currently, the only way to interact with MAVLink data from the web UI is through the +Python services (autopilot-manager), which expose a limited set of MAVLink information +via custom REST endpoints. There is no general-purpose way to subscribe to arbitrary +MAVLink messages or send commands from the browser. + +## Solution + +Add a MAVLink-to-REST/WebSocket bridge that exposes the full MAVLink message set via +HTTP and WebSocket APIs. This enables: +- Real-time telemetry in the browser via WebSocket +- Sending MAVLink commands via REST +- Third-party integrations without custom service code + +### Options + +1. **mavlink2rest** (Rust) — Existing open-source project by Blue Robotics. Provides + REST + WebSocket API for MAVLink. Well-tested, used in BlueOS. + - Pro: Battle-tested, maintained, full MAVLink coverage + - Con: Adds Rust dependency, another service to maintain + +2. **Custom Python bridge** — Build on top of pymavlink in a new service + - Pro: Same tech stack as existing services + - Con: Significant effort to match mavlink2rest's feature set + +3. **Extend autopilot-manager** — Add WebSocket support to existing service + - Pro: No new service + - Con: Mixes concerns, harder to maintain + +**Recommended**: Option 1 (mavlink2rest) — it's proven and feature-complete. + +## Files to Modify + +| File | Change | +|------|--------| +| New: `services/mavlink2rest/` | Service directory with manifest | +| `packaging/packages.yaml` | Add mavlink2rest service definition | +| `frontend/ark-ui.nginx` | Add proxy for mavlink2rest API | + +## Implementation Steps + +### Step 1: Add mavlink2rest as a service + +```yaml +# packages.yaml +mavlink2rest: + type: custom + description: "MAVLink REST/WebSocket API bridge" + contents: + - src: services/mavlink2rest/mavlink2rest + dst: /opt/ark/bin/mavlink2rest + mode: "0755" + systemd: + exec_start: /opt/ark/bin/mavlink2rest --connect udpin:0.0.0.0:14551 + after: [mavlink-router.service] + wants: [mavlink-router.service] +``` + +### Step 2: Configure mavlink-router endpoint + +Add a UDP endpoint in mavlink-router config for mavlink2rest: +``` +[UdpEndpoint mavlink2rest] +Mode = Normal +Address = 127.0.0.1 +Port = 14551 +``` + +### Step 3: Add nginx proxy + +```nginx +location /api/mavlink/ { + proxy_pass http://127.0.0.1:8088/; +} +location /api/mavlink/ws { + proxy_pass http://127.0.0.1:8088/ws; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "upgrade"; +} +``` + +### Step 4: Frontend integration + +Use WebSocket in Vue components for real-time telemetry: +```javascript +const ws = new WebSocket(`ws://${location.host}/api/mavlink/ws`); +ws.onmessage = (event) => { + const msg = JSON.parse(event.data); + // Update telemetry display +}; +``` + +## Acceptance Criteria + +- [ ] mavlink2rest runs as a systemd service +- [ ] REST API accessible at `/api/mavlink/` +- [ ] WebSocket provides real-time MAVLink messages +- [ ] Integrates with mavlink-router via UDP endpoint +- [ ] Service discoverable by service-manager + +## Dependencies + +- **P1-flask-to-fastapi** — Not strictly required, but good to modernize backend first + +## Effort Estimate + +Medium. Most of the work is integration (packaging, nginx config, mavlink-router config). +The mavlink2rest binary itself is pre-built. Estimate 2-3 sessions including testing. diff --git a/claude_plan/P3-zenoh-support.md b/claude_plan/P3-zenoh-support.md new file mode 100644 index 0000000..c34399b --- /dev/null +++ b/claude_plan/P3-zenoh-support.md @@ -0,0 +1,106 @@ +# P3: Zenoh Support Alongside DDS Agent + +## Problem + +The current PX4-ROS2 bridge uses Micro-XRCE-DDS-Agent, which works but has limitations: +- DDS discovery can be slow and resource-heavy on embedded systems +- No native pub/sub for non-ROS2 consumers +- Limited to the DDS ecosystem + +Zenoh is a lightweight pub/sub protocol that can bridge DDS, MQTT, and REST. It's gaining +traction in the robotics community as a more efficient alternative to raw DDS for +resource-constrained systems. + +## Solution + +Add an optional Zenoh daemon that runs alongside the DDS agent, bridging DDS topics to +Zenoh's lightweight protocol. This enables: +- Efficient pub/sub for web clients and mobile apps +- Bridge to MQTT for IoT integrations +- Lower overhead than full DDS for companion computer use cases + +**Important**: This does NOT replace the DDS agent. It adds Zenoh as an additional +transport option. + +## Files to Modify + +| File | Change | +|------|--------| +| New: `services/zenoh-daemon/` | Service directory with manifest and config | +| `packaging/packages.yaml` | Add zenoh-daemon service definition | + +## Implementation Steps + +### Step 1: Evaluate Zenoh-DDS bridge + +Test the `zenoh-bridge-dds` binary: +```bash +# Run alongside DDS agent +zenoh-bridge-dds --scope /ark --dds-domain 0 +``` + +Verify it correctly bridges PX4 uORB topics published by the DDS agent. + +### Step 2: Add as an ARK-OS service + +```yaml +# packages.yaml +zenoh-daemon: + type: custom + description: "Zenoh daemon with DDS bridge for lightweight pub/sub" + contents: + - src: services/zenoh-daemon/zenoh-bridge-dds + dst: /opt/ark/bin/zenoh-bridge-dds + mode: "0755" + - src: services/zenoh-daemon/config.json5 + dst: /opt/ark/share/zenoh-daemon/config.json5 + type: config + systemd: + exec_start: /opt/ark/bin/zenoh-bridge-dds -c /opt/ark/share/zenoh-daemon/config.json5 + after: [dds-agent.service] + wants: [dds-agent.service] +``` + +### Step 3: Configure Zenoh + +```json5 +// config.json5 +{ + mode: "peer", + listen: { endpoints: ["tcp/0.0.0.0:7447"] }, + plugins: { + dds: { + scope: "/ark", + domain: 0, + allow: "VehicleStatus|SensorCombined|VehicleGpsPosition" + } + } +} +``` + +### Step 4: Frontend integration (optional) + +Zenoh has a JavaScript client that could connect directly from the browser: +```javascript +const z = await zenoh.open({ connect: { endpoints: [`tcp/${location.hostname}:7447`] } }); +const sub = z.subscribe('/ark/**', (sample) => { + console.log(sample.key, sample.value); +}); +``` + +## Acceptance Criteria + +- [ ] Zenoh daemon runs alongside DDS agent without conflicts +- [ ] PX4 uORB topics are accessible via Zenoh protocol +- [ ] Service is optional (not included in ark-companion meta-package) +- [ ] Configurable topic filtering +- [ ] Resource usage is acceptable on Jetson/Pi + +## Dependencies + +None — independent of other plans. + +## Effort Estimate + +Medium. Main effort is testing Zenoh-DDS bridge compatibility with the PX4 topic set +and measuring resource usage on embedded targets. Estimate 2-3 sessions. diff --git a/claude_plan/completed/.gitkeep b/claude_plan/completed/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/claude_plan/completed/2026-02-24-P0-default-disabled-services.md b/claude_plan/completed/2026-02-24-P0-default-disabled-services.md new file mode 100644 index 0000000..5eeaf78 --- /dev/null +++ b/claude_plan/completed/2026-02-24-P0-default-disabled-services.md @@ -0,0 +1,53 @@ +# Default-disabled services + platform meta-packages + +## Problem + +Every service was `systemctl enable`d + `restart`ed on deb install. The old install flow +used interactive prompts and env vars (`default.env`) to selectively install services and +configure them. This was fragile, non-standard, and incompatible with a future apt repository. + +## Solution + +Install all services via platform meta-packages, but only enable core infrastructure by +default. Optional services are installed dormant — users enable them via the web UI. + +## Changes Made + +1. **`packaging/packages.yaml`** — Added `default_enabled: false` to 7 optional services; + replaced single `ark-companion` meta-package with 4 platform-specific ones + (ark-companion-base, ark-companion-jetson, ark-companion-pi, ark-companion-ubuntu) + +2. **`packaging/generate.py`** — Modified postinst generators to accept `default_enabled`; + when `False`, postinst only runs `daemon-reload`. Fixed content path prefix (`../../`) + for correct resolution from `packaging/generated/`. + +3. **`tools/service_control.sh`** — Removed `is_service_enabled()` and `configure_service()`; + added explicit `systemctl enable+restart` after `dpkg -i` so dev workflow always starts. + +4. **Submodule install.sh scripts** — Removed env var config substitution blocks from + logloader, polaris, rid-transmitter. + +5. **All 13 manifests** — Removed `env_var`, `install_script`, `install_files` fields; + changed `jetson-can` to `visible: true`. + +6. **`tools/install_software.sh`** — Removed `default.env` sourcing, `ask_yes_no()`, + interactive prompts, installation summary, `INSTALL_JETPACK` conditional. + +7. **`default.env`** — Deleted. + +8. **`.github/workflows/build.yml`** — Updated all nfpm steps to run `generate.py` first + and work from `packaging/generated/`; release builds 4 platform meta-packages. + +## Completion Notes + +- **Date**: 2026-02-24 +- **Session ID**: 86152669-592c-43af-b1b9-11ceba788866 +- **Transcript**: ~/.claude/projects/-home-jake-code-ark-ARK-OS/86152669-592c-43af-b1b9-11ceba788866.jsonl +- **Planning session**: fcbeabc2-9653-48b4-96d3-39cd0d5c1d4a.jsonl +- **Summary**: Implemented default-disabled services and platform meta-packages. Core services + auto-enable on deb install; optional services install dormant. Removed all legacy env var + config logic and interactive prompts. Fixed CI to generate packaging files before nfpm. +- **Deviations**: Also fixed pre-existing CI bug where nfpm config paths assumed `packaging/` + but generated files are in `packaging/generated/`. Fixed content source paths from `../` to + `../../` in generate.py for correct resolution. +- **Follow-up**: None diff --git a/claude_plan/completed/2026-02-24-P0-path-migration-cleanup.md b/claude_plan/completed/2026-02-24-P0-path-migration-cleanup.md new file mode 100644 index 0000000..e11f3fd --- /dev/null +++ b/claude_plan/completed/2026-02-24-P0-path-migration-cleanup.md @@ -0,0 +1,187 @@ +# P0: Complete Path Migration Cleanup + +## Problem + +The project recently migrated install paths from `~/.local/bin/` + `~/.local/share/` (XDG) +to `/opt/ark/bin/` + `/opt/ark/share/`. Packaging (`packages.yaml`) and systemd units are +correct, but **service source code still hardcodes old paths**. This is a runtime bug: +debs install configs to `/opt/ark/share//` but services look for them in +`~/.local/share//`. + +Additionally, old per-service `install.sh` scripts remain in submodules, creating confusion +about how installation works. + +## Solution + +### Config Path Strategy (two-tier lookup) + +Services need a two-tier config lookup: +1. **User config** at `~/.config/ark//config.toml` — writable, persists across upgrades +2. **Default config** at `/opt/ark/share//config.toml` — installed by deb, read-only + +On startup, each service checks for user config first, falls back to default. Services that +write runtime state (logloader's SQLite DB, downloaded logs) use `~/.local/share/ark//` +as a writable data directory. + +``` +Read config: ~/.config/ark//config.toml → /opt/ark/share//config.toml +Write data: ~/.local/share/ark// +Binaries: /opt/ark/bin/ +``` + +## Files to Modify + +### C++ submodules (ARK-owned, we can edit) + +| File | Line(s) | Current Path | New Behavior | +|------|---------|-------------|--------------| +| `services/logloader/logloader/src/main.cpp` | 21, 38 | `$HOME/.local/share/logloader/` | Two-tier config + writable data dir | +| `services/polaris/polaris-client-mavlink/src/main.cpp` | 22 | `$HOME/.local/share/polaris/config.toml` | Two-tier config lookup | +| `services/rid-transmitter/RemoteIDTransmitter/src/main.cpp` | 23 | `$HOME/.local/share/rid-transmitter/config.toml` | Two-tier config lookup | +| `services/rtsp-server/rtsp-server/src/main.cpp` | 24 | `$HOME/.local/share/rtsp-server/config.toml` | Two-tier config lookup | + +### Python services + +| File | Line(s) | Issue | +|------|---------|-------| +| `services/service-manager/service_manager.py` | 68, 88, 105 | Hardcoded `~/.local/share` for manifest/config discovery and `~/.config/systemd/user` for unit files | +| `services/autopilot-manager/autopilot_manager.py` | 458, 538 | Hardcoded `~/.local/bin/` for `reset_fmu_*.py` and `px_uploader.py` | + +### Shell scripts + +| File | Line(s) | Issue | +|------|---------|-------| +| `platform/common/scripts/flash_firmware.sh` | 23, 28, 34 | Hardcoded `~/.local/bin/` for `reset_fmu_wait_bl.py`, `px_uploader.py`, `reset_fmu_fast.py` | +| `services/flight-review/start_flight_review.sh` | 5 | Hardcoded `~/.local/share/flight_review/app/serve.py` | + +### Legacy files to delete + +These old per-service install scripts are superseded by deb packaging: + +- `services/logloader/logloader/install.sh` +- `services/rid-transmitter/RemoteIDTransmitter/install.sh` +- `services/polaris/polaris-client-mavlink/install.sh` +- `services/rtsp-server/rtsp-server/install.sh` + +### Documentation to update + +- `services/logloader/logloader/README.md` — Update path references +- `services/rid-transmitter/RemoteIDTransmitter/README.md` — Update config path +- `README.md` — Clarify deb as primary install method, `install.sh` as legacy/dev + +## Implementation Steps + +### Step 1: Create shared C++ config helper + +Create a small header-only utility (or inline in each `main.cpp`) for two-tier config lookup: + +```cpp +#include +#include +#include + +namespace ark { + +// Returns path to config file: user override > default +inline std::string find_config(const std::string& service_name, + const std::string& filename = "config.toml") { + const char* home = std::getenv("HOME"); + if (!home) home = "/tmp"; + + // User config (writable, survives upgrades) + auto user_config = std::filesystem::path(home) / ".config/ark" / service_name / filename; + if (std::filesystem::exists(user_config)) { + return user_config.string(); + } + + // Default config (installed by deb) + auto default_config = std::filesystem::path("/opt/ark/share") / service_name / filename; + if (std::filesystem::exists(default_config)) { + return default_config.string(); + } + + // Fallback to user location (will be created on first run) + return user_config.string(); +} + +// Returns writable data directory for a service +inline std::string data_dir(const std::string& service_name) { + const char* home = std::getenv("HOME"); + if (!home) home = "/tmp"; + auto dir = std::filesystem::path(home) / ".local/share/ark" / service_name; + std::filesystem::create_directories(dir); + return dir.string(); +} + +} // namespace ark +``` + +### Step 2: Update C++ services + +For each C++ service (`logloader`, `polaris`, `rid-transmitter`, `rtsp-server`): +1. Replace hardcoded `$HOME/.local/share//config.toml` with `ark::find_config("")` +2. For logloader: use `ark::data_dir("logloader")` for SQLite DB and log storage +3. Build and verify config is found correctly + +### Step 3: Update Python services + +**service_manager.py:** +- Change manifest discovery path from `~/.local/share` to `/opt/ark/share` +- Change config lookup to check `~/.config/ark//` then `/opt/ark/share//` +- Update systemd unit path: `/etc/systemd/user/` (debs install here, not `~/.config/systemd/user/`) + +**autopilot_manager.py:** +- Change `~/.local/bin/` references to `/opt/ark/bin/` + +### Step 4: Update shell scripts + +**flash_firmware.sh:** +- Change `~/.local/bin/reset_fmu_wait_bl.py` → `/opt/ark/bin/reset_fmu_wait_bl.py` +- Change `~/.local/bin/px_uploader.py` → `/opt/ark/bin/px_uploader.py` +- Change `~/.local/bin/reset_fmu_fast.py` → `/opt/ark/bin/reset_fmu_fast.py` + +**start_flight_review.sh:** +- Change `~/.local/share/flight_review/app/serve.py` → `/opt/ark/share/flight-review/app/serve.py` + +### Step 5: Delete legacy install scripts + +Remove the four `install.sh` files from submodules. These will need to be committed in +their respective submodule repos. + +### Step 6: Update documentation + +- Update `README.md` to clarify the three install workflows +- Update submodule READMEs with correct paths + +## Acceptance Criteria + +- [ ] All C++ services find config at `/opt/ark/share//config.toml` (default) + or `~/.config/ark//config.toml` (user override) +- [ ] Logloader writes data to `~/.local/share/ark/logloader/` (not `~/.local/share/logloader/`) +- [ ] `service_manager.py` discovers manifests and configs from `/opt/ark/share/` +- [ ] `autopilot_manager.py` references scripts at `/opt/ark/bin/` +- [ ] `flash_firmware.sh` references scripts at `/opt/ark/bin/` +- [ ] `start_flight_review.sh` references the correct serve.py path +- [ ] No `install.sh` files remain in submodules +- [ ] `grep -r '~/.local' services/ platform/` returns zero hits (excluding git history) +- [ ] Services start correctly after a fresh `dpkg -i` install + +## Dependencies + +None — this is a foundational fix. + +## Effort Estimate + +Medium. ~15 files across 6 repos (main + 4 C++ submodules + flight-review wrapper). +The C++ changes are mechanical. The service_manager.py changes require care to maintain +backward compatibility during transition. Estimate 2-3 focused sessions. + +## Completion Notes + +- **Date**: 2026-02-24 +- **Session ID**: fc9f2ca7-2003-48fe-b4f3-9213def9ca93 +- **Transcript**: ~/.claude/projects/-home-jake-code-ark-ARK-OS/fc9f2ca7-2003-48fe-b4f3-9213def9ca93.jsonl +- **Planning session**: a0399aa8-6f9f-4357-8e39-c814c7e82711.jsonl +- **Summary**: Completed full path migration from `~/.local/` to `/opt/ark/`. Updated all 4 C++ submodules (logloader, polaris, rid-transmitter, rtsp-server) with two-tier config lookup. Fixed Python services (service_manager.py, autopilot_manager.py) and shell scripts (flash_firmware.sh, start_flight_review.sh). Added config.toml, manifest.json, and helper scripts to deb packages via packages.yaml + generate.py auto-include. Deleted 4 legacy install.sh files from submodules. Updated submodule READMEs. +- **Deviations**: Used inline two-tier config lookup in each main.cpp rather than a shared header, since the 4 submodules are separate repos and a shared header would need to be duplicated anyway. The `reset_fmu_*.py` scripts are not included in the autopilot-manager deb (platform-specific GPIO dependencies — noted in the plan as future work). +- **Follow-up**: Submodule changes need to be committed and pushed in their respective repos (logloader, polaris-client-mavlink, RemoteIDTransmitter, rtsp-server). The main repo needs the submodule pointers updated after those pushes. diff --git a/claude_plan/completed/2026-02-24-P0-security-hardening.md b/claude_plan/completed/2026-02-24-P0-security-hardening.md new file mode 100644 index 0000000..dc4b03a --- /dev/null +++ b/claude_plan/completed/2026-02-24-P0-security-hardening.md @@ -0,0 +1,189 @@ +# P0: Security Hardening + +## Problem + +Several Python services use `subprocess.run(command, shell=True)` with string-interpolated +user input, creating command injection vulnerabilities. Service names and other parameters +from HTTP requests are inserted directly into shell commands without validation. + +### Current Vulnerabilities + +**service_manager.py** — 3 instances of `shell=True`: +- Line 33: `run_systemctl()` — `f"systemctl --user {operation} {service_name}"` +- Line 56: `get_service_status()` — `f"systemctl --user is-{status_type} {service_name}"` +- Line 213: `get_logs()` — `f"journalctl --user -u {service_name} -n {num_lines} --no-pager -o cat"` + +**connection_manager.py** — 1 instance: +- Line 68: `CommandExecutor.run_command()` — Generic command executor with `shell=True` + +An attacker with network access to the management ports could inject shell commands via +crafted service names (e.g., `; rm -rf /`) or connection parameters. + +## Solution + +1. Replace all `shell=True` with parameterized `subprocess.run([...])` (list form) +2. Add input validation for all external inputs +3. Validate service names, hostnames, and config content at API boundaries + +## Files to Modify + +| File | Changes | +|------|---------| +| `services/service-manager/service_manager.py` | Replace shell=True (3 sites), add service name validation | +| `services/connection-manager/connection_manager.py` | Replace shell=True (1 site), add hostname/SSID validation | +| `services/system-manager/system_manager.py` | Audit for injection, add input validation | +| `services/autopilot-manager/autopilot_manager.py` | Audit for injection, add input validation | + +## Implementation Steps + +### Step 1: Add input validation helpers + +Add a validation module or inline validators for common inputs: + +```python +import re + +def validate_service_name(name: str) -> str: + """Validate service name: alphanumeric, hyphens, underscores only.""" + if not re.match(r'^[a-zA-Z0-9][a-zA-Z0-9._-]{0,63}$', name): + raise ValueError(f"Invalid service name: {name}") + return name + +def validate_hostname(hostname: str) -> str: + """Validate hostname per RFC 1123.""" + if not re.match(r'^[a-zA-Z0-9]([a-zA-Z0-9.-]{0,253}[a-zA-Z0-9])?$', hostname): + raise ValueError(f"Invalid hostname: {hostname}") + return hostname + +def validate_positive_int(value, max_val: int = 10000) -> int: + """Validate positive integer within bounds.""" + n = int(value) + if n < 1 or n > max_val: + raise ValueError(f"Value out of range: {n}") + return n +``` + +### Step 2: Fix service_manager.py + +Replace shell=True with list-form subprocess calls: + +```python +# Before (vulnerable): +command = f"systemctl --user {operation} {service_name}" +process = subprocess.run(command, shell=True, capture_output=True, text=True, timeout=10) + +# After (safe): +service_name = validate_service_name(service_name) +process = subprocess.run( + ["systemctl", "--user", operation, service_name], + capture_output=True, text=True, timeout=10 +) +``` + +Apply same pattern to `get_service_status()` and `get_logs()`: + +```python +# get_logs - before: +command = f"journalctl --user -u {service_name} -n {num_lines} --no-pager -o cat" + +# get_logs - after: +service_name = validate_service_name(service_name) +num_lines = validate_positive_int(num_lines, max_val=10000) +process = subprocess.run( + ["journalctl", "--user", "-u", service_name, "-n", str(num_lines), "--no-pager", "-o", "cat"], + capture_output=True, text=True, timeout=10 +) +``` + +### Step 3: Fix connection_manager.py + +Replace the generic `CommandExecutor.run_command()` shell executor. Instead of passing +full command strings, use list-form for each specific nmcli operation: + +```python +# Before (vulnerable): +result = subprocess.run(command, shell=True, check=True, capture_output=True, text=True) + +# After (safe) — one method per operation: +def get_connections(self): + return subprocess.run( + ["nmcli", "-t", "-f", "NAME,TYPE,DEVICE", "connection", "show", "--active"], + capture_output=True, text=True, check=True + ) + +def connect_wifi(self, ssid: str, password: str): + validate_ssid(ssid) + return subprocess.run( + ["nmcli", "device", "wifi", "connect", ssid, "password", password], + capture_output=True, text=True, check=True, timeout=30 + ) +``` + +### Step 4: Add config file validation + +For any endpoint that accepts config file content: +- Enforce size limit (e.g., 64KB max) +- Prevent path traversal in filenames (reject `..`, absolute paths) +- Validate TOML syntax before writing + +```python +def validate_config_content(content: str, max_size: int = 65536) -> str: + if len(content) > max_size: + raise ValueError(f"Config too large: {len(content)} bytes (max {max_size})") + # Verify valid TOML + import tomllib + tomllib.loads(content) + return content + +def validate_config_path(path: str, allowed_dir: str) -> str: + resolved = os.path.realpath(path) + if not resolved.startswith(os.path.realpath(allowed_dir)): + raise ValueError(f"Path traversal detected: {path}") + return resolved +``` + +### Step 5: Audit remaining services + +Review `system_manager.py` and `autopilot_manager.py` for similar patterns: +- Search for `subprocess.run`, `os.system`, `os.popen` +- Ensure all external input is validated before use +- Replace any remaining `shell=True` usage + +## Acceptance Criteria + +- [x] Zero instances of `shell=True` in Python services +- [x] All service names validated with `^[a-zA-Z0-9][a-zA-Z0-9._-]{0,63}$` +- [x] All hostnames validated per RFC 1123 +- [x] Config file writes enforce size limits and path traversal prevention +- [x] `grep -r 'shell=True' services/` returns zero hits (only upstream mavlink-router submodule) +- [ ] All existing API endpoints still work correctly after changes (needs runtime testing) +- [x] Invalid inputs return 400 status with descriptive error messages + +## Dependencies + +None — this is a standalone security fix. Should be done before P1-testing-framework +so tests can validate the security fixes. + +## Effort Estimate + +Small-medium. ~4 files to modify, mostly mechanical replacement of subprocess calls. +The connection_manager.py refactor is the most involved since it has a generic command +executor that needs to be split into specific methods. Estimate 1-2 focused sessions. + +## Completion Notes + +- **Date**: 2026-02-24 +- **Session ID**: b906d938-9539-443c-b27c-bff4b9713b85 +- **Transcript**: ~/.claude/projects/-home-jake-code-ark-ARK-OS/b906d938-9539-443c-b27c-bff4b9713b85.jsonl +- **Summary**: Eliminated all `shell=True` subprocess calls in ARK-OS Python services. + Added input validation (service names, connection names, SSIDs, hostnames, IP addresses, + APNs, interface names) at API boundaries. Added config file size limits and path traversal + prevention. Converted ~40 string-form shell commands in connection_manager.py to list-form. + Replaced piped shell commands with Python-native parsing. Audited system_manager.py and + autopilot_manager.py (both already clean). +- **Deviations**: Kept `CommandExecutor` class structure in connection_manager.py rather than + splitting into per-operation methods — the list-form approach is equally safe and minimizes + code churn. Did not add TOML syntax validation to config writes (configs may not always be + TOML format per manifest configFile). +- **Follow-up**: Runtime testing needed on target devices to confirm all API endpoints work + correctly with the list-form subprocess calls. diff --git a/default.env b/default.env deleted file mode 100644 index 3818104..0000000 --- a/default.env +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash -# Software ENV variables -export INSTALL_DDS_AGENT="y" -export INSTALL_RTSP_SERVER="y" -export INSTALL_RID_TRANSMITTER="y" -export MANUFACTURER_CODE="ARK1" -export SERIAL_NUMBER="C0FFEE123" -export INSTALL_LOGLOADER="y" -export USER_EMAIL="" -export UPLOAD_TO_FLIGHT_REVIEW="n" -export PUBLIC_LOGS="n" -export INSTALL_POLARIS="y" -export POLARIS_API_KEY="" -export INSTALL_JETPACK="y" diff --git a/frontend/ark-proxy.conf b/frontend/ark-proxy.conf new file mode 100644 index 0000000..b2ae849 --- /dev/null +++ b/frontend/ark-proxy.conf @@ -0,0 +1,5 @@ +proxy_http_version 1.1; +proxy_set_header Host $host; +proxy_set_header X-Real-IP $remote_addr; +proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; +proxy_cache_bypass $http_upgrade; diff --git a/frontend/ark-ui.nginx b/frontend/ark-ui.nginx index 937ca48..73d8490 100644 --- a/frontend/ark-ui.nginx +++ b/frontend/ark-ui.nginx @@ -3,34 +3,63 @@ server { server_name localhost; client_max_body_size 500M; - # Static files + # CORS headers (replaces Express cors middleware) + add_header Access-Control-Allow-Origin * always; + add_header Access-Control-Allow-Methods "GET, POST, PUT, DELETE, OPTIONS" always; + add_header Access-Control-Allow-Headers "Content-Type, Authorization" always; + if ($request_method = OPTIONS) { return 204; } + + # Access logging (replaces Express morgan middleware) + access_log /var/log/nginx/ark-ui-access.log; + + # Static SPA location / { root /var/www/ark-ui/html; index index.html; try_files $uri $uri/ /index.html; } - # API Gateway - all API traffic goes to Express server - location /api/ { - proxy_pass http://localhost:3000; - proxy_http_version 1.1; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection 'upgrade'; - proxy_set_header Host $host; - proxy_cache_bypass $http_upgrade; + # --- Direct service proxies (no Express middleman) --- + + location /api/network/ { + proxy_pass http://localhost:3001; + include /etc/nginx/snippets/ark-proxy.conf; } - # Unified WebSocket handling for all Socket.IO connections - location /socket.io/ { - proxy_pass http://localhost:3000; - proxy_http_version 1.1; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection 'upgrade'; - proxy_set_header Host $host; - proxy_cache_bypass $http_upgrade; + location /socket.io/network-stats/ { + proxy_pass http://localhost:3001; + include /etc/nginx/snippets/ark-ws.conf; } + location /api/service/ { + proxy_pass http://localhost:3002; + include /etc/nginx/snippets/ark-proxy.conf; + } + location /api/autopilot/ { + proxy_pass http://localhost:3003; + include /etc/nginx/snippets/ark-proxy.conf; + } + + location /socket.io/autopilot-firmware-upload/ { + proxy_pass http://localhost:3003; + include /etc/nginx/snippets/ark-ws.conf; + } + + location /api/system/ { + proxy_pass http://localhost:3004; + include /etc/nginx/snippets/ark-proxy.conf; + } + + # JSON error responses (replaces Express error handler) + error_page 502 /502.json; + location = /502.json { + internal; + default_type application/json; + return 502 '{"error": "Service unavailable"}'; + } + + # Flight review location /flight-review { rewrite ^/flight-review$ /flight-review/browse redirect; rewrite ^/flight-review/(.*)$ /$1 break; @@ -47,6 +76,5 @@ server { sub_filter '"/js/' '"/flight-review/js/'; sub_filter '"/css/' '"/flight-review/css/'; sub_filter_once off; - } } diff --git a/frontend/ark-ui/install.sh b/frontend/ark-ui/install.sh index eb2505e..f82e6aa 100755 --- a/frontend/ark-ui/install.sh +++ b/frontend/ark-ui/install.sh @@ -33,12 +33,8 @@ nvm alias default 20 # Install global Vue CLI npm install -g @vue/cli @vue/cli-service@latest -# Install backend dependencies -cd backend -npm install - # Install frontend dependencies and build project -cd ../ark-ui +cd ark-ui npm install npm run build diff --git a/frontend/ark-ws.conf b/frontend/ark-ws.conf new file mode 100644 index 0000000..06e78a4 --- /dev/null +++ b/frontend/ark-ws.conf @@ -0,0 +1,7 @@ +proxy_http_version 1.1; +proxy_set_header Host $host; +proxy_set_header X-Real-IP $remote_addr; +proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; +proxy_set_header Upgrade $http_upgrade; +proxy_set_header Connection "upgrade"; +proxy_cache_bypass $http_upgrade; diff --git a/packaging/Dockerfile.build b/packaging/Dockerfile.build new file mode 100644 index 0000000..835be8a --- /dev/null +++ b/packaging/Dockerfile.build @@ -0,0 +1,50 @@ +FROM ubuntu:22.04 + +ARG DEBIAN_FRONTEND=noninteractive + +# Base build dependencies +RUN apt-get update && apt-get install -y \ + build-essential \ + cmake \ + ninja-build \ + meson \ + pkg-config \ + git \ + curl \ + python3 \ + python3-pip \ + # Logloader / Polaris / RID dependencies + libssl-dev \ + libsqlite3-dev \ + libgflags-dev \ + libgoogle-glog-dev \ + libboost-all-dev \ + libbluetooth-dev \ + # RTSP server dependencies + libgstreamer1.0-dev \ + libgstreamer-plugins-base1.0-dev \ + libgstreamer-plugins-bad1.0-dev \ + libgstrtspserver-1.0-dev \ + # Python + YAML for package generation + python3-yaml \ + # nfpm for packaging + && rm -rf /var/lib/apt/lists/* + +# Build MAVSDK as a static library (pinned version — bump as needed) +ARG MAVSDK_VERSION=v3.15.0 +RUN git clone --recurse-submodules --depth=1 -b ${MAVSDK_VERSION} \ + https://github.com/mavlink/MAVSDK.git /tmp/mavsdk && \ + cmake -B /tmp/mavsdk/build -S /tmp/mavsdk \ + -DCMAKE_BUILD_TYPE=Release \ + -DBUILD_SHARED_LIBS=OFF \ + -DCMAKE_INSTALL_PREFIX=/usr/local \ + && \ + cmake --build /tmp/mavsdk/build -j$(nproc) && \ + cmake --install /tmp/mavsdk/build && \ + ldconfig && \ + rm -rf /tmp/mavsdk + +# Install nfpm +RUN curl -sfL https://install.goreleaser.com/github.com/goreleaser/nfpm.sh | sh -s -- -b /usr/local/bin + +WORKDIR /build diff --git a/packaging/README.md b/packaging/README.md new file mode 100644 index 0000000..3324191 --- /dev/null +++ b/packaging/README.md @@ -0,0 +1,181 @@ +# ARK-OS Packaging + +ARK-OS services are packaged as individual `.deb` files. Each service can be installed, updated, and rolled back independently using standard Debian package tools (`dpkg`, `apt`). + +## Quick Reference + +```bash +# Install a service +sudo dpkg -i ark-autopilot-manager_1.0.0_arm64.deb + +# Update a service (same command — installs over the old version) +sudo dpkg -i ark-autopilot-manager_1.1.0_arm64.deb + +# Rollback to a previous version +sudo dpkg -i ark-autopilot-manager_1.0.0_arm64.deb + +# Remove a service +sudo dpkg -r ark-autopilot-manager + +# Install everything at once (meta-package) +sudo dpkg -i ark-companion_1.0.0_arm64.deb +``` + +## How It Works + +All packaging is driven by a single config file and a generator: + +``` +packaging/ +├── packages.yaml # Single source of truth — all packages declared here +├── generate.py # Reads packages.yaml + service manifests → generates all configs +├── build-packages.sh # Calls generate.py, then builds and packages with nfpm +├── Dockerfile.build # CI build environment (cross-compilation) +├── README.md +└── generated/ # gitignored — produced by generate.py + ├── ark-*.yaml # nfpm package configs + ├── scripts/ # postinst/prerm shell scripts + └── service-files/ # systemd unit files +``` + +### Adding a New Service + +Add ~5 lines to `packages.yaml`: + +```yaml +services: + my-new-service: + type: python # or cpp, bash, custom + description: "My new service" + script: my_new_service.py + depends: [some-package] +``` + +Then run `python3 generate.py` — it produces the nfpm config, systemd unit, and install/remove scripts automatically. + +### Type Defaults + +Each service type has sensible defaults (see `generate.py` TYPE_DEFAULTS): + +| | `python` | `cpp` | `bash` | +|---|---|---|---| +| depends | `python3, python3-flask` + extras | extras only | extras only | +| exec_start | `python3 /opt/ark/bin/{script}` | `/opt/ark/bin/{binary}` | `/opt/ark/bin/{script}` | +| environment | `PYTHONUNBUFFERED=1` | — | — | +| restart | `on-failure` | `on-failure` | `on-failure` | + +Services only specify what differs from the defaults. + +## Packages + +| Package | Type | Contents | +|---------|------|----------| +| `ark-autopilot-manager` | Python | autopilot_manager.py + systemd unit | +| `ark-connection-manager` | Python | connection_manager.py + systemd unit | +| `ark-service-manager` | Python | service_manager.py + systemd unit | +| `ark-system-manager` | Python | system_manager.py + systemd unit | +| `ark-logloader` | C++ binary | logloader + systemd unit | +| `ark-mavlink-router` | C++ binary | mavlink-routerd + start script + config + systemd unit | +| `ark-polaris` | C++ binary | polaris-client-mavlink + systemd unit | +| `ark-rid-transmitter` | C++ binary | rid-transmitter + systemd unit (Jetson only) | +| `ark-rtsp-server` | C++ binary | rtsp-server + systemd unit | +| `ark-dds-agent` | C++ binary | MicroXRCEAgent + start script + systemd unit | +| `ark-flight-review` | Python app | flight_review app + start script + systemd unit | +| `ark-ui` | Frontend | Vue dist + nginx config + proxy snippets | +| `ark-hotspot-updater` | Bash | update script + systemd unit | +| `ark-jetson-can` | Bash | CAN scripts + systemd unit (Jetson only) | +| **`ark-companion`** | **Meta** | **Depends on all core packages above** | + +## Install Paths + +Packaged services install to standardized paths: + +| What | Path | +|------|------| +| Binaries & scripts | `/opt/ark/bin/` | +| Default configs | `/opt/ark/share//` | +| Systemd units (user) | `/etc/systemd/user/` | +| Systemd units (root) | `/etc/systemd/system/` | +| Frontend files | `/var/www/ark-ui/html/` | +| Nginx config | `/etc/nginx/sites-available/ark-ui` | + +## What Happens on Install/Update + +Each `.deb` includes postinst/prerm scripts that automatically: + +1. **On install/update (postinst):** reload systemd, enable the service, restart it +2. **On remove (prerm):** stop the service, disable it + +You don't need to manually restart services after installing a `.deb`. + +## PR Testing Workflow + +This is the primary use case for packaging — testing a single service change from a PR without rebuilding everything: + +1. Developer pushes a PR that modifies `autopilot-manager` +2. GitHub Actions CI builds `ark-autopilot-manager_1.0.0-pr42_arm64.deb` +3. The `.deb` is attached as a build artifact on the PR +4. Tester downloads it and copies to the device: + ```bash + scp ark-autopilot-manager_1.0.0-pr42_arm64.deb user@device:~ + ssh user@device sudo dpkg -i ark-autopilot-manager_1.0.0-pr42_arm64.deb + ``` +5. The service restarts automatically with the new code +6. To rollback: install the previous `.deb` or the stable release version + +## Building Packages Locally + +### Python/Bash services (no compilation needed) + +```bash +cd packaging +python3 generate.py +VERSION=1.0.0 ARCH=arm64 nfpm package --config generated/ark-autopilot-manager.yaml --packager deb --target ../dist/ +``` + +Or use the build script: + +```bash +./packaging/build-packages.sh package-python +``` + +### C++ services (need ARM64 compilation) + +On an ARM64 device (Jetson/Pi), you can build natively: + +```bash +./packaging/build-packages.sh all +# Packages appear in dist/ +``` + +For cross-compilation (x86 host → ARM64 target), the CI uses Docker + QEMU. See `.github/workflows/build.yml`. + +### Frontend + +```bash +cd frontend/ark-ui/ark-ui +npm ci && npm run build +mkdir -p build/ark-ui && cp -r dist build/ark-ui/ +cd ../../../packaging +python3 generate.py +VERSION=1.0.0 ARCH=arm64 nfpm package --config generated/ark-ui.yaml --packager deb --target ../dist/ +``` + +## How This Relates to the Legacy install.sh + +The existing `install.sh` / `tools/install_software.sh` still works and builds everything from source on-device. The `.deb` packages are a parallel, better path: + +- **Legacy (`install.sh`):** Clones submodules, compiles on-device, copies files to `~/.local/bin/`. Good for development. +- **Packages (`.deb`):** Pre-built binaries, installs to `/opt/ark/bin/`, managed by dpkg. Good for deployment and updates. + +Both can coexist. Once packaging is stable, `install.sh` can become a thin wrapper that installs the `.deb` packages. + +## Architecture: What Docker Is For + +The `Dockerfile.build` is **not deployed to the device**. It's a CI build environment that contains all the compiler toolchains and libraries needed to cross-compile ARM64 C++ binaries on GitHub's x86 runners. The flow is: + +``` +GitHub Actions (x86) → Docker + QEMU (emulated ARM64) → compile C++ → nfpm → .deb files → GitHub Release +``` + +The device only ever sees the final `.deb` files. diff --git a/packaging/apt/distributions b/packaging/apt/distributions new file mode 100644 index 0000000..b4f9145 --- /dev/null +++ b/packaging/apt/distributions @@ -0,0 +1,8 @@ +Origin: ARK Electronics +Label: ARK-OS +Suite: stable +Codename: stable +Architectures: arm64 +Components: main +Description: ARK-OS companion computer packages +SignWith: default diff --git a/packaging/apt/options b/packaging/apt/options new file mode 100644 index 0000000..10c27c2 --- /dev/null +++ b/packaging/apt/options @@ -0,0 +1,2 @@ +verbose +ask-passphrase diff --git a/packaging/build-packages.sh b/packaging/build-packages.sh new file mode 100755 index 0000000..7103d60 --- /dev/null +++ b/packaging/build-packages.sh @@ -0,0 +1,187 @@ +#!/bin/bash +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" +BUILD_DIR="$PROJECT_ROOT/build" +OUTPUT_DIR="$PROJECT_ROOT/dist" +VERSION=$(cat "$PROJECT_ROOT/VERSION" | tr -d '[:space:]') +ARCH="${ARCH:-arm64}" + +echo "=== ARK-OS Package Builder ===" +echo "Version: $VERSION" +echo "Architecture: $ARCH" +echo "" + +mkdir -p "$BUILD_DIR" "$OUTPUT_DIR" + +# ─── Generate packaging files from packages.yaml ─── + +generate() { + echo "Generating packaging files..." + python3 "$SCRIPT_DIR/generate.py" --output-dir "$SCRIPT_DIR/generated" +} + +# ─── Build C++ submodule services ─── + +build_cpp_service() { + local name="$1" + local src_dir="$2" + local build_subdir="$3" + local extra_cmake_args="${4:-}" + + echo "Building $name..." + mkdir -p "$BUILD_DIR/$build_subdir" + pushd "$src_dir" > /dev/null + + if [ -f "Makefile" ] && grep -q "cmake" Makefile 2>/dev/null; then + # cmake-based (logloader, rid-transmitter) + cmake -B "$BUILD_DIR/$build_subdir" -H. $extra_cmake_args + cmake --build "$BUILD_DIR/$build_subdir" -j"$(nproc)" + elif [ -f "meson.build" ]; then + # meson-based (mavlink-router) + meson setup "$BUILD_DIR/$build_subdir" --prefix=/opt/ark -Dsystemdsystemunitdir= + ninja -C "$BUILD_DIR/$build_subdir" + elif [ -f "CMakeLists.txt" ]; then + cmake -B "$BUILD_DIR/$build_subdir" -H. $extra_cmake_args + cmake --build "$BUILD_DIR/$build_subdir" -j"$(nproc)" + fi + + popd > /dev/null + echo "$name built successfully" +} + +build_frontend() { + echo "Building frontend..." + pushd "$PROJECT_ROOT/frontend/ark-ui/ark-ui" > /dev/null + npm ci + npm run build + mkdir -p "$BUILD_DIR/ark-ui" + cp -r dist "$BUILD_DIR/ark-ui/" + popd > /dev/null +} + +# ─── Package services with nfpm ─── + +package_service() { + local yaml_file="$1" + local pkg_name + pkg_name=$(basename "$yaml_file" .yaml) + + echo "Packaging $pkg_name..." + pushd "$SCRIPT_DIR/generated" > /dev/null + VERSION="$VERSION" ARCH="$ARCH" nfpm package \ + --config "$yaml_file" \ + --packager deb \ + --target "$OUTPUT_DIR/" + popd > /dev/null + echo "$pkg_name packaged" +} + +# ─── Main ─── + +case "${1:-all}" in + build-cpp) + echo "--- Building C++ services ---" + build_cpp_service logloader "$PROJECT_ROOT/services/logloader/logloader" logloader + build_cpp_service mavlink-router "$PROJECT_ROOT/services/mavlink-router/mavlink-router" mavlink-router + build_cpp_service dds-agent "$PROJECT_ROOT/services/dds-agent/Micro-XRCE-DDS-Agent" dds-agent + build_cpp_service polaris "$PROJECT_ROOT/services/polaris/polaris-client-mavlink" polaris + build_cpp_service rid-transmitter "$PROJECT_ROOT/services/rid-transmitter/RemoteIDTransmitter" rid-transmitter + build_cpp_service rtsp-server "$PROJECT_ROOT/services/rtsp-server/rtsp-server" rtsp-server + ;; + build-frontend) + echo "--- Building frontend ---" + build_frontend + ;; + package) + echo "--- Packaging all services ---" + generate + for yaml in "$SCRIPT_DIR/generated"/ark-*.yaml; do + package_service "$yaml" + done + ;; + package-python) + echo "--- Packaging Python services (no build needed) ---" + generate + for svc in autopilot-manager connection-manager service-manager system-manager; do + package_service "$SCRIPT_DIR/generated/ark-${svc}.yaml" + done + ;; + package-bash) + echo "--- Packaging Bash services (no build needed) ---" + generate + for svc in hotspot-updater jetson-can; do + package_service "$SCRIPT_DIR/generated/ark-${svc}.yaml" + done + ;; + all) + echo "--- Full build + package ---" + build_cpp_service logloader "$PROJECT_ROOT/services/logloader/logloader" logloader + build_cpp_service mavlink-router "$PROJECT_ROOT/services/mavlink-router/mavlink-router" mavlink-router + build_cpp_service dds-agent "$PROJECT_ROOT/services/dds-agent/Micro-XRCE-DDS-Agent" dds-agent + build_cpp_service polaris "$PROJECT_ROOT/services/polaris/polaris-client-mavlink" polaris + build_cpp_service rid-transmitter "$PROJECT_ROOT/services/rid-transmitter/RemoteIDTransmitter" rid-transmitter + build_cpp_service rtsp-server "$PROJECT_ROOT/services/rtsp-server/rtsp-server" rtsp-server + build_frontend + + generate + for yaml in "$SCRIPT_DIR/generated"/ark-*.yaml; do + package_service "$yaml" + done + + echo "" + echo "=== Build complete ===" + echo "Packages in: $OUTPUT_DIR/" + ls -lh "$OUTPUT_DIR/"*.deb 2>/dev/null || echo "(no packages found)" + ;; + build-service) + # Build a single service by name (reads type from packages.yaml) + SERVICE_NAME="${2:?Usage: $0 build-service }" + SERVICE_TYPE=$(python3 -c " +import yaml +with open('$SCRIPT_DIR/packages.yaml') as f: + cfg = yaml.safe_load(f) +svc = cfg.get('services', {}).get('$SERVICE_NAME', {}) +print(svc.get('type', 'unknown')) +") + case "$SERVICE_TYPE" in + cpp) + BUILD_SRC=$(python3 -c " +import yaml +with open('$SCRIPT_DIR/packages.yaml') as f: + cfg = yaml.safe_load(f) +svc = cfg['services']['$SERVICE_NAME'] +print(svc.get('build_dir', '')) +") + if [ -z "$BUILD_SRC" ]; then + echo "Error: No build_dir for $SERVICE_NAME" + exit 1 + fi + build_cpp_service "$SERVICE_NAME" "$PROJECT_ROOT/$BUILD_SRC" "$SERVICE_NAME" + ;; + python|bash|custom) + echo "$SERVICE_NAME is type '$SERVICE_TYPE' — no build step needed." + ;; + *) + echo "Error: Unknown service '$SERVICE_NAME'" + exit 1 + ;; + esac + ;; + package-service) + # Package a single service by name + SERVICE_NAME="${2:?Usage: $0 package-service }" + generate + YAML_FILE="$SCRIPT_DIR/generated/ark-${SERVICE_NAME}.yaml" + if [ ! -f "$YAML_FILE" ]; then + echo "Error: No generated config for $SERVICE_NAME (expected $YAML_FILE)" + exit 1 + fi + package_service "$YAML_FILE" + ;; + *) + echo "Usage: $0 [build-cpp|build-frontend|package|package-python|package-bash|build-service|package-service|all]" + exit 1 + ;; +esac diff --git a/packaging/generate.py b/packaging/generate.py new file mode 100644 index 0000000..037a1b9 --- /dev/null +++ b/packaging/generate.py @@ -0,0 +1,424 @@ +#!/usr/bin/env python3 +"""Generate nfpm configs, systemd units, and install scripts from packages.yaml. + +Usage: python3 generate.py [--output-dir DIR] +""" + +import argparse +import json +import os +from pathlib import Path + +import yaml + +SCRIPT_DIR = Path(__file__).resolve().parent +PROJECT_ROOT = SCRIPT_DIR.parent + +# ─── Type defaults ───────────────────────────────────────────────────────────── + +TYPE_DEFAULTS = { + "python": { + "base_depends": ["python3", "python3-flask"], + "exec_start": lambda name, cfg: f"python3 /opt/ark/bin/{cfg['script']}", + "contents_src": lambda name, cfg: f"services/{name}/{cfg['script']}", + "contents_dst": lambda name, cfg: f"/opt/ark/bin/{cfg['script']}", + "after": ["network-online.target", "syslog.target"], + "wants": ["network.target", "network-online.target", "syslog.target"], + "environment": {"PYTHONUNBUFFERED": "1"}, + "restart": "on-failure", + }, + "cpp": { + "base_depends": [], + "exec_start": lambda name, cfg: f"/opt/ark/bin/{cfg.get('binary', name)}", + "contents_src": lambda name, cfg: f"build/{name}/{cfg.get('binary', name)}", + "contents_dst": lambda name, cfg: f"/opt/ark/bin/{cfg.get('binary', name)}", + "after": ["syslog.target", "network.target"], + "wants": ["network.target"], + "environment": {}, + "restart": "on-failure", + }, + "bash": { + "base_depends": [], + "exec_start": lambda name, cfg: f"/opt/ark/bin/{cfg['script']}", + "contents_src": lambda name, cfg: f"services/{name}/{cfg['script']}", + "contents_dst": lambda name, cfg: f"/opt/ark/bin/{cfg['script']}", + "after": ["network-online.target", "syslog.target"], + "wants": ["network.target"], + "environment": {}, + "restart": "on-failure", + }, +} + +# ─── Manifest reading ───────────────────────────────────────────────────────── + +def read_manifest(name): + """Read a service's .manifest.json if it exists.""" + manifest_path = PROJECT_ROOT / "services" / name / f"{name}.manifest.json" + if manifest_path.exists(): + with open(manifest_path) as f: + return json.load(f) + return {} + + +def is_system_service(name, manifest): + """Determine if a service runs as a system (root) service.""" + return manifest.get("requires_sudo", False) + +# ─── Systemd unit generation ────────────────────────────────────────────────── + +def generate_systemd_unit(name, cfg, manifest): + """Generate a systemd .service file.""" + svc_type = cfg.get("type", "custom") + defaults = TYPE_DEFAULTS.get(svc_type, TYPE_DEFAULTS["cpp"]) + sd = cfg.get("systemd", {}) + system_svc = is_system_service(name, manifest) + + # Description: use explicit override, or derive from manifest displayName + description = sd.get("description") + if not description: + display_name = manifest.get("displayName", name.replace("-", " ").title()) + description = f"ARK {display_name}" + + after_list = sd.get("after", defaults["after"]) + wants_list = sd.get("wants", defaults["wants"]) + + # [Unit] + unit_lines = [ + "[Unit]", + f"Description={description}", + ] + + if sd.get("condition_path_is_directory"): + unit_lines.append(f"ConditionPathIsDirectory={sd['condition_path_is_directory']}") + + if wants_list: + unit_lines.append(f"Wants={' '.join(wants_list)}") + if after_list: + unit_lines.append(f"After={' '.join(after_list)}") + + # [Service] + svc_lines = ["", "[Service]"] + + service_type = sd.get("type", "simple") + svc_lines.append(f"Type={service_type}") + + # Environment + env = dict(defaults["environment"]) + env.update(sd.get("environment", {})) + for key, val in env.items(): + svc_lines.append(f'Environment="{key}={val}"') + + # ExecStartPre + if sd.get("exec_start_pre"): + svc_lines.append(f"ExecStartPre={sd['exec_start_pre']}") + + # ExecStart + exec_start = sd.get("exec_start") + if not exec_start: + fn = defaults.get("exec_start") + exec_start = fn(name, cfg) if callable(fn) else fn + svc_lines.append(f"ExecStart={exec_start}") + + # Restart + restart = sd.get("restart", defaults["restart"]) + if restart and restart is not False: + svc_lines.append(f"Restart={restart}") + svc_lines.append("RestartSec=5") + + # Resource controls + if sd.get("nice") is not None: + svc_lines.append(f"Nice={sd['nice']}") + if sd.get("cpu_weight") is not None: + svc_lines.append(f"CPUWeight={sd['cpu_weight']}") + if sd.get("kill_mode"): + svc_lines.append(f"KillMode={sd['kill_mode']}") + + # [Install] + wanted_by = "multi-user.target" if system_svc else "default.target" + install_lines = ["", "[Install]", f"WantedBy={wanted_by}"] + + return "\n".join(unit_lines + svc_lines + install_lines) + "\n" + +# ─── Script generation ───────────────────────────────────────────────────────── + +def generate_postinst_user(name, default_enabled=True): + enable_lines = "" + if default_enabled: + enable_lines = f""" sudo -u "$SUDO_USER" XDG_RUNTIME_DIR="$RUNTIME_DIR" systemctl --user enable "{name}.service" + sudo -u "$SUDO_USER" XDG_RUNTIME_DIR="$RUNTIME_DIR" systemctl --user restart "{name}.service" +""" + enable_lines_no_sudo = f""" systemctl --user enable "{name}.service" + systemctl --user restart "{name}.service" +""" + else: + enable_lines = "" + enable_lines_no_sudo = "" + + return f"""#!/bin/bash +loginctl enable-linger "${{SUDO_USER:-$USER}}" 2>/dev/null || true +if [ -n "$SUDO_USER" ]; then + RUNTIME_DIR="/run/user/$(id -u "$SUDO_USER")" + sudo -u "$SUDO_USER" XDG_RUNTIME_DIR="$RUNTIME_DIR" systemctl --user daemon-reload +{enable_lines}else + systemctl --user daemon-reload +{enable_lines_no_sudo}fi +""" + + +def generate_prerm_user(name): + return f"""#!/bin/bash +if [ -n "$SUDO_USER" ]; then + RUNTIME_DIR="/run/user/$(id -u "$SUDO_USER")" + sudo -u "$SUDO_USER" XDG_RUNTIME_DIR="$RUNTIME_DIR" systemctl --user stop "{name}.service" 2>/dev/null || true + sudo -u "$SUDO_USER" XDG_RUNTIME_DIR="$RUNTIME_DIR" systemctl --user disable "{name}.service" 2>/dev/null || true +else + systemctl --user stop "{name}.service" 2>/dev/null || true + systemctl --user disable "{name}.service" 2>/dev/null || true +fi +""" + + +def generate_postinst_system(name, default_enabled=True): + if default_enabled: + return f"""#!/bin/bash +systemctl daemon-reload +systemctl enable "{name}.service" +systemctl restart "{name}.service" +""" + else: + return f"""#!/bin/bash +systemctl daemon-reload +""" + + +def generate_prerm_system(name): + return f"""#!/bin/bash +systemctl stop "{name}.service" 2>/dev/null || true +systemctl disable "{name}.service" 2>/dev/null || true +""" + +# ─── nfpm YAML generation (string-based for exact formatting) ────────────────── + +def _q(s): + """Quote a string for nfpm YAML output.""" + return f'"{s}"' + + +def _content_block(src, dst, mode=None, entry_type=None): + """Format a single nfpm contents entry.""" + lines = [f" - src: {src}", f" dst: {dst}"] + if mode: + lines.append(" file_info:") + lines.append(f" mode: {mode}") + if entry_type: + lines.append(f" type: {entry_type}") + return "\n".join(lines) + + +def generate_nfpm_yaml(name, cfg, defaults_cfg, manifest): + """Generate an nfpm YAML config string for a service package.""" + svc_type = cfg.get("type", "custom") + type_defaults = TYPE_DEFAULTS.get(svc_type, {}) + system_svc = is_system_service(name, manifest) + pkg_name = f"ark-{name}" + + # Header + lines = [ + f"name: {pkg_name}", + f'version: "${{VERSION}}"', + f'arch: "${{ARCH}}"', + "platform: linux", + f'maintainer: {_q(defaults_cfg["maintainer"])}', + f'description: {_q(cfg.get("description", manifest.get("description", "")))}', + f'vendor: {_q(defaults_cfg["vendor"])}', + f'homepage: {_q(defaults_cfg["homepage"])}', + f'license: {_q(defaults_cfg["license"])}', + ] + + # Dependencies + base_deps = list(type_defaults.get("base_depends", [])) + extra_deps = cfg.get("depends", []) + all_deps = base_deps + extra_deps + if all_deps: + lines.append("") + lines.append("depends:") + for dep in all_deps: + lines.append(f" - {dep}") + + # Contents + content_blocks = [] + + if svc_type == "custom": + for item in cfg.get("contents", []): + mode = item.get("mode") + content_blocks.append( + _content_block(f"../../{item['src']}", item["dst"], + mode=mode, entry_type=item.get("type"))) + else: + fn_src = type_defaults["contents_src"] + fn_dst = type_defaults["contents_dst"] + content_blocks.append( + _content_block(f"../../{fn_src(name, cfg)}", fn_dst(name, cfg), mode="0755")) + + for item in cfg.get("extra_contents", []): + if item.get("type"): + content_blocks.append( + _content_block(f"../../{item['src']}", item["dst"], + entry_type=item["type"])) + else: + content_blocks.append( + _content_block(f"../../{item['src']}", item["dst"], mode="0755")) + + # Auto-include manifest.json if it exists + manifest_src = PROJECT_ROOT / "services" / name / f"{name}.manifest.json" + if manifest_src.exists(): + content_blocks.append( + _content_block(f"../../services/{name}/{name}.manifest.json", + f"/opt/ark/share/{name}/{name}.manifest.json")) + + # Systemd unit + unit_dir = "/etc/systemd/system" if system_svc else "/etc/systemd/user" + content_blocks.append( + _content_block(f"./service-files/{name}.service", + f"{unit_dir}/{name}.service", entry_type="config")) + + lines.append("") + lines.append("contents:") + lines.append(("\n\n").join(content_blocks)) + + # Scripts + lines.append("") + lines.append("scripts:") + lines.append(f" postinstall: ./scripts/postinst-{name}.sh") + lines.append(f" preremove: ./scripts/prerm-{name}.sh") + + return "\n".join(lines) + "\n" + + +def generate_nfpm_custom_yaml(pkg_name, cfg, defaults_cfg): + """Generate an nfpm YAML config string for a custom (non-service) package.""" + lines = [ + f"name: {pkg_name}", + f'version: "${{VERSION}}"', + f'arch: "${{ARCH}}"', + "platform: linux", + f'maintainer: {_q(defaults_cfg["maintainer"])}', + f'description: {_q(cfg.get("description", ""))}', + f'vendor: {_q(defaults_cfg["vendor"])}', + f'homepage: {_q(defaults_cfg["homepage"])}', + f'license: {_q(defaults_cfg["license"])}', + ] + + if cfg.get("depends"): + lines.append("") + lines.append("depends:") + for dep in cfg["depends"]: + lines.append(f" - {dep}") + + if cfg.get("contents"): + content_blocks = [] + for item in cfg["contents"]: + content_blocks.append( + _content_block(f"../../{item['src']}", item["dst"], + entry_type=item.get("type"))) + lines.append("") + lines.append("contents:") + lines.append(("\n\n").join(content_blocks)) + + has_scripts = cfg.get("postinst") or cfg.get("prerm") + if has_scripts: + lines.append("") + lines.append("scripts:") + if cfg.get("postinst"): + lines.append(f" postinstall: ./scripts/postinst-{pkg_name}.sh") + if cfg.get("prerm"): + lines.append(f" preremove: ./scripts/prerm-{pkg_name}.sh") + + return "\n".join(lines) + "\n" + +# ─── Main ────────────────────────────────────────────────────────────────────── + +def main(): + parser = argparse.ArgumentParser(description="Generate packaging files from packages.yaml") + parser.add_argument("--output-dir", default=str(SCRIPT_DIR / "generated"), + help="Output directory (default: packaging/generated/)") + args = parser.parse_args() + + output_dir = Path(args.output_dir) + + with open(SCRIPT_DIR / "packages.yaml") as f: + config = yaml.safe_load(f) + + defaults_cfg = config["defaults"] + services = config.get("services", {}) + custom_packages = config.get("custom_packages", {}) + + (output_dir / "scripts").mkdir(parents=True, exist_ok=True) + (output_dir / "service-files").mkdir(parents=True, exist_ok=True) + + generated_files = [] + + # ── Generate service packages ── + + for name, cfg in services.items(): + manifest = read_manifest(name) + system_svc = is_system_service(name, manifest) + + # Systemd unit + unit = generate_systemd_unit(name, cfg, manifest) + unit_path = output_dir / "service-files" / f"{name}.service" + unit_path.write_text(unit) + generated_files.append(str(unit_path.relative_to(output_dir))) + + # Install/remove scripts + default_enabled = cfg.get("default_enabled", True) + if system_svc: + postinst = generate_postinst_system(name, default_enabled) + prerm = generate_prerm_system(name) + else: + postinst = generate_postinst_user(name, default_enabled) + prerm = generate_prerm_user(name) + + postinst_path = output_dir / "scripts" / f"postinst-{name}.sh" + prerm_path = output_dir / "scripts" / f"prerm-{name}.sh" + postinst_path.write_text(postinst) + prerm_path.write_text(prerm) + os.chmod(postinst_path, 0o755) + os.chmod(prerm_path, 0o755) + generated_files.extend([ + str(postinst_path.relative_to(output_dir)), + str(prerm_path.relative_to(output_dir)), + ]) + + # nfpm config + nfpm_yaml = generate_nfpm_yaml(name, cfg, defaults_cfg, manifest) + nfpm_path = output_dir / f"ark-{name}.yaml" + nfpm_path.write_text(nfpm_yaml) + generated_files.append(str(nfpm_path.relative_to(output_dir))) + + # ── Generate custom packages ── + + for pkg_name, cfg in custom_packages.items(): + nfpm_yaml = generate_nfpm_custom_yaml(pkg_name, cfg, defaults_cfg) + nfpm_path = output_dir / f"{pkg_name}.yaml" + nfpm_path.write_text(nfpm_yaml) + generated_files.append(str(nfpm_path.relative_to(output_dir))) + + if cfg.get("postinst"): + p = output_dir / "scripts" / f"postinst-{pkg_name}.sh" + p.write_text(cfg["postinst"]) + os.chmod(p, 0o755) + generated_files.append(str(p.relative_to(output_dir))) + if cfg.get("prerm"): + p = output_dir / "scripts" / f"prerm-{pkg_name}.sh" + p.write_text(cfg["prerm"]) + os.chmod(p, 0o755) + generated_files.append(str(p.relative_to(output_dir))) + + print(f"Generated {len(generated_files)} files in {output_dir}/") + for f in sorted(generated_files): + print(f" {f}") + + +if __name__ == "__main__": + main() diff --git a/packaging/packages.yaml b/packaging/packages.yaml new file mode 100644 index 0000000..2ccb01b --- /dev/null +++ b/packaging/packages.yaml @@ -0,0 +1,274 @@ +# ─── ARK-OS Package Registry ─── +# Single source of truth for all .deb packages. +# Run `python3 generate.py` to produce nfpm configs, systemd units, and scripts. + +defaults: + maintainer: "ARK Electronics " + vendor: "ARK Electronics" + homepage: "https://github.com/ARK-Electronics/ARK-OS" + license: "MIT" + +# ─── Services ────────────────────────────────────────────────────────────────── +# Each service type has sensible defaults (see generate.py TYPE_DEFAULTS). +# Only specify what differs from the defaults. + +services: + + # ── Python services ── + + autopilot-manager: + type: python + description: "Autopilot management service for ARK companion computers" + script: autopilot_manager.py + depends: [python3-pymavlink] + extra_contents: + - src: platform/common/scripts/px_uploader.py + dst: /opt/ark/bin/px_uploader.py + - src: platform/common/scripts/flash_firmware.sh + dst: /opt/ark/bin/flash_firmware.sh + systemd: + after: [default.target, network-online.target, syslog.target] + wants: [default.target, network-online.target] + + connection-manager: + type: python + description: "Network connection management service for ARK companion computers" + script: connection_manager.py + depends: [network-manager] + systemd: + description: "ARK Connection Manager" + after: [network-online.target, syslog.target, NetworkManager.service, ModemManager.service] + wants: [network.target, network-online.target, syslog.target, NetworkManager.service, ModemManager.service] + + service-manager: + type: python + description: "Systemd service manager for ARK companion computers" + script: service_manager.py + + system-manager: + type: python + description: "Linux system management service for ARK companion computers" + script: system_manager.py + depends: [python3-psutil] + + # ── C++ services ── + + logloader: + type: cpp + default_enabled: false + description: "Automatic ULog download and upload service for ARK companion computers" + build_dir: services/logloader/logloader + depends: [libssl3, libsqlite3-0] + extra_contents: + - src: services/logloader/logloader/config.toml + dst: /opt/ark/share/logloader/config.toml + type: config + systemd: + after: [syslog.target, network.target, mavlink-router.service] + wants: [network.target] + environment: + SSL_CERT_FILE: /etc/ssl/certs/ca-certificates.crt + restart: always + nice: 10 + cpu_weight: 50 + + mavlink-router: + type: cpp + description: "MAVLink message router for ARK companion computers" + binary: mavlink-routerd + build_dir: services/mavlink-router/mavlink-router + build_system: meson + extra_contents: + - src: services/mavlink-router/start_mavlink_router.sh + dst: /opt/ark/bin/start_mavlink_router.sh + - src: services/mavlink-router/main.conf + dst: /opt/ark/share/mavlink-router/main.conf + type: config + - src: platform/common/scripts/vbus_enable.py + dst: /opt/ark/bin/vbus_enable.py + - src: platform/common/scripts/vbus_disable.py + dst: /opt/ark/bin/vbus_disable.py + systemd: + type: exec + exec_start: /opt/ark/bin/start_mavlink_router.sh + after: [network-online.target, syslog.target] + wants: [network.target] + + polaris: + type: cpp + default_enabled: false + description: "Polaris RTK corrections client for MAVLink" + binary: polaris-client-mavlink + build_dir: services/polaris/polaris-client-mavlink + depends: [libssl3, libgflags2.2, libgoogle-glog0v6, libboost-system1.74.0] + extra_contents: + - src: services/polaris/polaris-client-mavlink/config.toml + dst: /opt/ark/share/polaris/config.toml + type: config + systemd: + description: "ARK Polaris RTK Corrections" + after: [syslog.target, network.target, mavlink-router.service] + wants: [network.target] + restart: always + + rid-transmitter: + type: cpp + default_enabled: false + description: "RemoteID Bluetooth transmitter for ARK companion computers" + build_dir: services/rid-transmitter/RemoteIDTransmitter + depends: [libbluetooth3] + extra_contents: + - src: services/rid-transmitter/RemoteIDTransmitter/config.toml + dst: /opt/ark/share/rid-transmitter/config.toml + type: config + systemd: + description: "ARK RemoteID Bluetooth Transmitter" + condition_path_is_directory: /sys/class/bluetooth + after: [syslog.target, network-online.target] + wants: [network-online.target] + restart: false + + rtsp-server: + type: cpp + default_enabled: false + description: "RTSP server for connected cameras on ARK companion computers" + build_dir: services/rtsp-server/rtsp-server + extra_contents: + - src: services/rtsp-server/rtsp-server/config.toml + dst: /opt/ark/share/rtsp-server/config.toml + type: config + depends: + - libgstreamer1.0-0 + - libgstreamer-plugins-base1.0-0 + - gstreamer1.0-plugins-ugly + - gstreamer1.0-rtsp + systemd: + after: [syslog.target, network.target, mavlink-router.service] + wants: [network.target] + restart: always + + dds-agent: + type: cpp + default_enabled: false + description: "Micro XRCE-DDS Agent for PX4-ROS2 bridge on ARK companion computers" + binary: MicroXRCEAgent + build_dir: services/dds-agent/Micro-XRCE-DDS-Agent + extra_contents: + - src: services/dds-agent/start_dds_agent.sh + dst: /opt/ark/bin/start_dds_agent.sh + systemd: + description: "ARK Micro-XRCE-DDS-Agent" + exec_start: /opt/ark/bin/start_dds_agent.sh + after: [dev-ttyTHS1.device, dev-ttyAMA4.device, syslog.target, network-online.target] + wants: [network.target] + exec_start_pre: /bin/sleep 2 + + flight-review: + type: custom + default_enabled: false + description: "PX4 Flight Review server for ARK companion computers" + depends: [python3, python3-pip, libsqlite3-0, libfftw3-3] + contents: + - src: services/flight-review/flight_review/app/ + dst: /opt/ark/share/flight-review/app/ + - src: services/flight-review/start_flight_review.sh + dst: /opt/ark/bin/start_flight_review.sh + mode: "0755" + systemd: + description: "ARK PX4 Flight Review" + exec_start: /opt/ark/bin/start_flight_review.sh + after: [syslog.target, network.target, nginx.service] + wants: [network.target] + restart: always + + # ── Bash services ── + + hotspot-updater: + type: bash + description: "Hotspot name updater for ARK companion computers" + script: update_hotspot_default.sh + depends: [network-manager] + systemd: + description: "ARK Hotspot Name Updater" + after: [network-online.target, syslog.target, NetworkManager.service] + wants: [network.target, network-online.target, syslog.target, NetworkManager.service] + kill_mode: process + + jetson-can: + type: bash + default_enabled: false + description: "Jetson CAN interface enabler for ARK companion computers" + script: start_can_interface.sh + extra_contents: + - src: services/jetson-can/stop_can_interface.sh + dst: /opt/ark/bin/stop_can_interface.sh + systemd: + description: "ARK Jetson CAN Interface" + after: [syslog.target, network-online.target] + wants: [network.target] + kill_mode: process + +# ─── Custom packages (non-service) ──────────────────────────────────────────── + +custom_packages: + + ark-ui: + description: "ARK UI web application with nginx reverse proxy" + depends: [nginx] + contents: + - src: build/ark-ui/dist/ + dst: /var/www/ark-ui/html/ + - src: frontend/ark-ui.nginx + dst: /etc/nginx/sites-available/ark-ui + type: config + - src: frontend/ark-proxy.conf + dst: /etc/nginx/snippets/ark-proxy.conf + type: config + - src: frontend/ark-ws.conf + dst: /etc/nginx/snippets/ark-ws.conf + type: config + postinst: | + #!/bin/bash + # Enable nginx site and reload + ln -sf /etc/nginx/sites-available/ark-ui /etc/nginx/sites-enabled/ark-ui + rm -f /etc/nginx/sites-enabled/default 2>/dev/null + nginx -t && systemctl reload nginx + prerm: | + #!/bin/bash + rm -f /etc/nginx/sites-enabled/ark-ui 2>/dev/null + nginx -t && systemctl reload nginx 2>/dev/null || true + + ark-companion-base: + description: "ARK-OS base — core services for all platforms" + depends: + # Core (enabled by default) + - ark-autopilot-manager + - ark-connection-manager + - ark-service-manager + - ark-system-manager + - ark-mavlink-router + - ark-hotspot-updater + - ark-ui + # Optional (installed disabled) + - ark-logloader + - ark-polaris + - ark-dds-agent + - ark-rtsp-server + - ark-flight-review + + ark-companion-jetson: + description: "ARK-OS for Jetson" + depends: + - ark-companion-base + - ark-rid-transmitter + - ark-jetson-can + + ark-companion-pi: + description: "ARK-OS for Raspberry Pi" + depends: + - ark-companion-base + + ark-companion-ubuntu: + description: "ARK-OS for Ubuntu desktop (dev)" + depends: + - ark-companion-base diff --git a/platform/common/scripts/flash_firmware.sh b/platform/common/scripts/flash_firmware.sh index ede3578..3c5e104 100755 --- a/platform/common/scripts/flash_firmware.sh +++ b/platform/common/scripts/flash_firmware.sh @@ -20,18 +20,18 @@ fi systemctl --user stop mavlink-router &>/dev/null -python3 ~/.local/bin/reset_fmu_wait_bl.py &>/dev/null +python3 /opt/ark/bin/reset_fmu_wait_bl.py &>/dev/null echo "Flashing $SERIALDEVICE" # If the device is found and file exists, run the uploader script and filter JSON output -python3 -u ~/.local/bin/px_uploader.py --json-progress --port $SERIALDEVICE $FW_PATH 2>&1 | while IFS= read -r line +python3 -u /opt/ark/bin/px_uploader.py --json-progress --port $SERIALDEVICE $FW_PATH 2>&1 | while IFS= read -r line do echo "$line" | jq -c 'select(type == "object")' 2>/dev/null || : done # TODO: maybe need a delay here for ardupilot -python3 ~/.local/bin/reset_fmu_fast.py &>/dev/null +python3 /opt/ark/bin/reset_fmu_fast.py &>/dev/null sleep 3 diff --git a/platform/common/scripts/setup_apt_repo.sh b/platform/common/scripts/setup_apt_repo.sh new file mode 100755 index 0000000..b00ca0c --- /dev/null +++ b/platform/common/scripts/setup_apt_repo.sh @@ -0,0 +1,16 @@ +#!/bin/bash +# Add ARK-OS APT repository to system +set -euo pipefail + +REPO_URL="https://ark-electronics.github.io/ARK-OS" +KEYRING_PATH="/usr/share/keyrings/ark-archive-keyring.gpg" + +# Download and install GPG keyring +curl -fsSL "${REPO_URL}/ark-archive-keyring.gpg" | sudo tee "${KEYRING_PATH}" > /dev/null + +# Add repository source +echo "deb [signed-by=${KEYRING_PATH} arch=arm64] ${REPO_URL} stable main" \ + | sudo tee /etc/apt/sources.list.d/ark.list > /dev/null + +sudo apt update +echo "ARK-OS APT repository configured successfully." diff --git a/platform/common/scripts/vbus_disable.py b/platform/common/scripts/vbus_disable.py new file mode 100644 index 0000000..843bc50 --- /dev/null +++ b/platform/common/scripts/vbus_disable.py @@ -0,0 +1,82 @@ +#!/usr/bin/env python3 + +# Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +import os +import sys + + +def detect_platform(): + try: + with open("/proc/device-tree/model") as f: + model = f.read() + if "NVIDIA" in model: + return "jetson" + if "Raspberry Pi" in model: + return "pi" + except FileNotFoundError: + pass + return "ubuntu" + + +def disable_jetson(): + import Jetson.GPIO as GPIO + + vbus_det_pin = 32 + + with open("/etc/nv_tegra_release") as f: + jetpack_version = f.read() + if "R36" in jetpack_version: + print("Jetpack version is R36, skipping VBUS Disable") + return + + GPIO.setmode(GPIO.BOARD) + GPIO.setup(vbus_det_pin, GPIO.OUT, initial=GPIO.HIGH) + value = GPIO.LOW + print("Outputting {} to pin {}".format(value, vbus_det_pin)) + GPIO.output(vbus_det_pin, value) + + +def disable_pi(): + import RPi.GPIO as GPIO + + vbus_det_pin = 27 + + GPIO.setwarnings(False) + GPIO.setmode(GPIO.BCM) + GPIO.setup(vbus_det_pin, GPIO.OUT, initial=GPIO.HIGH) + value = GPIO.LOW + print("Outputting {} to pin {}".format(value, vbus_det_pin)) + GPIO.output(vbus_det_pin, value) + + +def main(): + platform = detect_platform() + if platform == "jetson": + disable_jetson() + elif platform == "pi": + disable_pi() + else: + print("VBUS disable not supported on platform: {}".format(platform)) + sys.exit(0) + + +if __name__ == "__main__": + main() diff --git a/platform/common/scripts/vbus_enable.py b/platform/common/scripts/vbus_enable.py new file mode 100644 index 0000000..ae2a228 --- /dev/null +++ b/platform/common/scripts/vbus_enable.py @@ -0,0 +1,82 @@ +#!/usr/bin/env python3 + +# Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +import os +import sys + + +def detect_platform(): + try: + with open("/proc/device-tree/model") as f: + model = f.read() + if "NVIDIA" in model: + return "jetson" + if "Raspberry Pi" in model: + return "pi" + except FileNotFoundError: + pass + return "ubuntu" + + +def enable_jetson(): + import Jetson.GPIO as GPIO + + vbus_det_pin = 32 + + with open("/etc/nv_tegra_release") as f: + jetpack_version = f.read() + if "R36" in jetpack_version: + print("Jetpack version is R36, skipping VBUS Enable") + return + + GPIO.setmode(GPIO.BOARD) + GPIO.setup(vbus_det_pin, GPIO.OUT, initial=GPIO.HIGH) + value = GPIO.HIGH + print("Outputting {} to pin {}".format(value, vbus_det_pin)) + GPIO.output(vbus_det_pin, value) + + +def enable_pi(): + import RPi.GPIO as GPIO + + vbus_det_pin = 27 + + GPIO.setwarnings(False) + GPIO.setmode(GPIO.BCM) + GPIO.setup(vbus_det_pin, GPIO.OUT, initial=GPIO.HIGH) + value = GPIO.HIGH + print("Outputting {} to pin {}".format(value, vbus_det_pin)) + GPIO.output(vbus_det_pin, value) + + +def main(): + platform = detect_platform() + if platform == "jetson": + enable_jetson() + elif platform == "pi": + enable_pi() + else: + print("VBUS enable not supported on platform: {}".format(platform)) + sys.exit(0) + + +if __name__ == "__main__": + main() diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..6ff3155 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,12 @@ +[tool.ruff] +line-length = 120 +target-version = "py39" +extend-exclude = [ + "services/flight-review", + "**/third_party", + "**/libraries", +] + +[tool.ruff.lint] +select = ["E", "F", "W"] +ignore = ["E501", "E402", "E722"] diff --git a/services/ark-ui-backend/ark-ui-backend.manifest.json b/services/ark-ui-backend/ark-ui-backend.manifest.json deleted file mode 100644 index e63d458..0000000 --- a/services/ark-ui-backend/ark-ui-backend.manifest.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "displayName": "ARK UI Backend", - "description": "Backend for ARK UI Web App.", - "platform": ["jetson", "pi"], - "configFile": "", - "visible": false, - "requires_sudo": false, - "env_var": "", - "install_script": "install_ark_ui.sh", - "install_files": ["start_ark_ui_backend.sh"] -} diff --git a/services/ark-ui-backend/ark-ui-backend.service b/services/ark-ui-backend/ark-ui-backend.service deleted file mode 100644 index 1088516..0000000 --- a/services/ark-ui-backend/ark-ui-backend.service +++ /dev/null @@ -1,13 +0,0 @@ -[Unit] -Description=ARK UI Backend Service -Wants=default.target network-online.target -After=default.target network-online.target syslog.target nginx.service - -[Service] -Type=simple -ExecStart=%h/.local/bin/start_ark_ui_backend.sh -Restart=on-failure -Environment="PATH=%h/.local/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin" - -[Install] -WantedBy=default.target diff --git a/services/ark-ui-backend/install_ark_ui.sh b/services/ark-ui-backend/install_ark_ui.sh deleted file mode 100755 index 6765773..0000000 --- a/services/ark-ui-backend/install_ark_ui.sh +++ /dev/null @@ -1,49 +0,0 @@ -#!/bin/bash - -# Determine PROJECT_ROOT as two levels up from this script's location -SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" -PROJECT_ROOT="$( cd "$SCRIPT_DIR/../.." &> /dev/null && pwd )" -source "$PROJECT_ROOT/tools/functions.sh" - -echo "Installing ARK-UI" - -# clean up old nginx -sudo rm /etc/nginx/sites-enabled/ark-ui &>/dev/null -sudo rm /etc/nginx/sites-available/ark-ui &>/dev/null -sudo rm -rf /var/www/ark-ui &>/dev/null - -pushd . -cd $PROJECT_ROOT/frontend/ark-ui -./install.sh -popd - -DEPLOY_PATH="/var/www/ark-ui" - -# Copy nginx config -sudo cp $PROJECT_ROOT/frontend/ark-ui.nginx /etc/nginx/sites-available/ark-ui - -# Copy frontend and backend files to deployment path -sudo mkdir -p $DEPLOY_PATH/html -sudo mkdir -p $DEPLOY_PATH/api -sudo cp -r $PROJECT_ROOT/frontend/ark-ui/ark-ui/dist/* $DEPLOY_PATH/html/ -sudo cp -r $PROJECT_ROOT/frontend/ark-ui/backend/* $DEPLOY_PATH/api/ - -# Set permissions: www-data owns the path and has read/write permissions -sudo chown -R www-data:www-data $DEPLOY_PATH -sudo chmod -R 755 $DEPLOY_PATH - -if [ ! -L /etc/nginx/sites-enabled/ark-ui ]; then - sudo ln -s /etc/nginx/sites-available/ark-ui /etc/nginx/sites-enabled/ark-ui -fi - -# Remove default configuration -sudo rm /etc/nginx/sites-enabled/default &>/dev/null - -# To check that it can run -sudo -u www-data stat $DEPLOY_PATH - -# Test the configuration and restart nginx -sudo nginx -t -sudo systemctl restart nginx - -echo "Finished $(basename $BASH_SOURCE)" diff --git a/services/ark-ui-backend/start_ark_ui_backend.sh b/services/ark-ui-backend/start_ark_ui_backend.sh deleted file mode 100755 index 6f7f9e8..0000000 --- a/services/ark-ui-backend/start_ark_ui_backend.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash - -export NVM_DIR="$HOME/.config/nvm" -source $NVM_DIR/nvm.sh - -# Specify the Node version -nvm use 20.15.0 - -# Start your application -cd /var/www/ark-ui/api -exec npm start diff --git a/services/autopilot-manager/autopilot-manager.manifest.json b/services/autopilot-manager/autopilot-manager.manifest.json index 0e3b58d..97d31f5 100644 --- a/services/autopilot-manager/autopilot-manager.manifest.json +++ b/services/autopilot-manager/autopilot-manager.manifest.json @@ -1,11 +1,9 @@ { + "version": "1.0.0", "displayName": "Autopilot Manager", "description": "Microservice backend for managing the autopilot", "platform": ["all"], "configFile": "", "visible": false, - "requires_sudo": false, - "env_var": "", - "install_script": "", - "install_files": ["autopilot_manager.py"] + "requires_sudo": false } diff --git a/services/autopilot-manager/autopilot-manager.service b/services/autopilot-manager/autopilot-manager.service deleted file mode 100644 index eb4ff7c..0000000 --- a/services/autopilot-manager/autopilot-manager.service +++ /dev/null @@ -1,14 +0,0 @@ -[Unit] -Description=Microservice backend for autopilot mavlink interactions -After=network-online.target syslog.target -Wants=network.target network-online.target syslog.target - -[Service] -Type=simple -ExecStart=python3 %h/.local/bin/autopilot_manager.py -Restart=on-failure -RestartSec=5 -Environment="PYTHONUNBUFFERED=1" - -[Install] -WantedBy=default.target diff --git a/services/autopilot-manager/autopilot_manager.py b/services/autopilot-manager/autopilot_manager.py index 06190a6..5b14a6e 100644 --- a/services/autopilot-manager/autopilot_manager.py +++ b/services/autopilot-manager/autopilot_manager.py @@ -6,6 +6,7 @@ import time import argparse import logging +import select from datetime import datetime import socket from flask import Flask, jsonify, request @@ -208,8 +209,10 @@ def process_messages(self): try: current_time = time.time() - # Periodically check device status (every 2 seconds) - if current_time - last_device_check_time > 2: + # Check device status via USB — skip when MAVLink is connected + # since heartbeats already confirm the device is present + mavlink_connected = self.is_mavlink_connected() + if not mavlink_connected and current_time - last_device_check_time > 5: self.update_device_status() last_device_check_time = current_time @@ -217,9 +220,23 @@ def process_messages(self): time.sleep(1) continue - # Use blocking mode with a timeout - this is more efficient than sleep - # as it will wake up immediately when a message arrives - msg = self.mav_connection.recv_match(blocking=True, timeout=0.5) + # Use select() on the underlying socket for efficient waiting. + # This wakes instantly on data and sleeps cleanly when idle. + try: + sock = self.mav_connection.port.fileno() if hasattr(self.mav_connection.port, 'fileno') else None + if sock is not None: + ready, _, _ = select.select([self.mav_connection.port], [], [], 2.0) + if not ready: + # Timeout — no data available, loop back for housekeeping + msg = None + else: + msg = self.mav_connection.recv_match(blocking=False) + else: + # Fallback for connections without a selectable socket + msg = self.mav_connection.recv_match(blocking=True, timeout=2.0) + except (AttributeError, OSError): + # Fallback if socket access fails + msg = self.mav_connection.recv_match(blocking=True, timeout=2.0) if msg: # Ignore messages that do not originate from the autopilot @@ -438,7 +455,7 @@ def reset_fmu(self, mode="wait_bl"): script = "reset_fmu_wait_bl.py" if mode == "wait_bl" else "reset_fmu_fast.py" try: logger.debug(f"Resetting FMU using {script}") - result = subprocess.run(["python3", os.path.expanduser(f"~/.local/bin/{script}")], + result = subprocess.run(["python3", f"/opt/ark/bin/{script}"], check=False, capture_output=True, text=True) @@ -515,10 +532,10 @@ def flash_firmware(self, firmware_path, socket_id): self.reset_fmu(mode="wait_bl") # Run px_uploader.py with JSON progress output - logger.debug(f"Starting firmware upload using px_uploader.py") + logger.debug("Starting firmware upload using px_uploader.py") command = [ "python3", "-u", - os.path.expanduser("~/.local/bin/px_uploader.py"), + "/opt/ark/bin/px_uploader.py", "--json-progress", "--port", serial_device, firmware_path ] diff --git a/services/connection-manager/connection-manager.manifest.json b/services/connection-manager/connection-manager.manifest.json index 5956106..f21ea0f 100644 --- a/services/connection-manager/connection-manager.manifest.json +++ b/services/connection-manager/connection-manager.manifest.json @@ -1,11 +1,9 @@ { + "version": "1.0.0", "displayName": "Connections Manager", "description": "Microservice backend for managing connections", "platform": ["all"], "configFile": "", "visible": false, - "requires_sudo": false, - "env_var": "", - "install_script": "", - "install_files": ["connection_manager.py"] + "requires_sudo": false } diff --git a/services/connection-manager/connection-manager.service b/services/connection-manager/connection-manager.service deleted file mode 100644 index f3e54e3..0000000 --- a/services/connection-manager/connection-manager.service +++ /dev/null @@ -1,14 +0,0 @@ -[Unit] -Description=Manages network connections via NetworkManager using Python and nmcli -After=network-online.target syslog.target NetworkManager.service ModemManager.service -Wants=network.target network-online.target syslog.target NetworkManager.service ModemManager.service - -[Service] -Type=simple -ExecStart=python3 %h/.local/bin/connection_manager.py -Restart=on-failure -RestartSec=5 -Environment="PYTHONUNBUFFERED=1" - -[Install] -WantedBy=default.target diff --git a/services/connection-manager/connection_manager.py b/services/connection-manager/connection_manager.py index 5660530..70f50d0 100644 --- a/services/connection-manager/connection_manager.py +++ b/services/connection-manager/connection_manager.py @@ -13,21 +13,15 @@ import eventlet eventlet.monkey_patch() -import os -import sys -import json import time import logging import threading import subprocess import re -import collections from flask import Flask, jsonify, request from flask_cors import CORS -from flask_socketio import SocketIO, emit, disconnect -import psutil +from flask_socketio import SocketIO import argparse -from pathlib import Path def setup_logging(): """Setup simple logging that will be captured by journald via stdout""" @@ -59,19 +53,64 @@ class State: stats_thread_active = False stats_thread = None +_ANSI_RE = re.compile(r'\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])') +# NM connection names: printable chars, no shell metacharacters +_CONNECTION_NAME_RE = re.compile(r'^[a-zA-Z0-9][a-zA-Z0-9 ._-]{0,127}$') +_HOSTNAME_RE = re.compile(r'^[a-zA-Z0-9]([a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?$') +_SSID_RE = re.compile(r'^[a-zA-Z0-9][a-zA-Z0-9 ._-]{0,31}$') +_IP_ADDR_RE = re.compile(r'^(\d{1,3}\.){3}\d{1,3}(/\d{1,2})?$') +_APN_RE = re.compile(r'^[a-zA-Z0-9][a-zA-Z0-9._-]{0,99}$') +_IFACE_RE = re.compile(r'^[a-zA-Z0-9][a-zA-Z0-9._-]{0,15}$') + + def strip_ansi_colors(text): - ansi_escape = re.compile(r'\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])') - return ansi_escape.sub('', text) + return _ANSI_RE.sub('', text) + + +def validate_connection_name(name): + if not name or not _CONNECTION_NAME_RE.match(name): + raise ValueError(f"Invalid connection name: {name}") + return name + + +def validate_ssid(ssid): + if not ssid or not _SSID_RE.match(ssid): + raise ValueError(f"Invalid SSID: {ssid}") + return ssid + + +def validate_ip_address(addr): + if not addr or not _IP_ADDR_RE.match(addr): + raise ValueError(f"Invalid IP address: {addr}") + return addr + + +def validate_apn(apn): + if not apn or not _APN_RE.match(apn): + raise ValueError(f"Invalid APN: {apn}") + return apn + + +def validate_interface(iface): + if not iface or not _IFACE_RE.match(iface): + raise ValueError(f"Invalid interface: {iface}") + return iface + + +def validate_autoconnect(value): + if value not in ("yes", "no"): + raise ValueError(f"Invalid autoconnect value: {value}") + return value + class CommandExecutor: @staticmethod def run_command(command, timeout=30): - """Run a shell command and return its output""" + """Run a command (list form) and return its output""" try: logger.debug(f"Running command: {command}") result = subprocess.run( command, - shell=True, check=True, capture_output=True, text=True, @@ -102,7 +141,8 @@ class NetworkConnectionManager: def get_network_connections(): connections = [] - output = CommandExecutor.safe_run_command("nmcli -t -f NAME,TYPE,DEVICE,AUTOCONNECT,ACTIVE connection show") + output = CommandExecutor.safe_run_command( + ["nmcli", "-t", "-f", "NAME,TYPE,DEVICE,AUTOCONNECT,ACTIVE", "connection", "show"]) if not output: return connections @@ -135,25 +175,33 @@ def get_network_connections(): # Get interface specific properties if type == 'wifi': - connection['mode'] = CommandExecutor.safe_run_command(f"nmcli -g 802-11-wireless.mode con show \"{name}\"") - connection['ssid'] = CommandExecutor.safe_run_command(f"nmcli -g 802-11-wireless.ssid con show \"{name}\"") - connection['ipAddress'] = CommandExecutor.safe_run_command(f"nmcli -g IP4.ADDRESS con show \"{name}\"") + connection['mode'] = CommandExecutor.safe_run_command( + ["nmcli", "-g", "802-11-wireless.mode", "con", "show", name]) + connection['ssid'] = CommandExecutor.safe_run_command( + ["nmcli", "-g", "802-11-wireless.ssid", "con", "show", name]) + connection['ipAddress'] = CommandExecutor.safe_run_command( + ["nmcli", "-g", "IP4.ADDRESS", "con", "show", name]) elif type == 'ethernet': - connection['ipAddress'] = CommandExecutor.safe_run_command(f"nmcli -g ipv4.addresses con show \"{name}\"") - connection['ipMethod'] = CommandExecutor.safe_run_command(f"nmcli -g ipv4.method con show \"{name}\"") + connection['ipAddress'] = CommandExecutor.safe_run_command( + ["nmcli", "-g", "ipv4.addresses", "con", "show", name]) + connection['ipMethod'] = CommandExecutor.safe_run_command( + ["nmcli", "-g", "ipv4.method", "con", "show", name]) if type == 'lte': - connection['apn'] = CommandExecutor.safe_run_command(f"nmcli -g gsm.apn con show \"{name}\"") + connection['apn'] = CommandExecutor.safe_run_command( + ["nmcli", "-g", "gsm.apn", "con", "show", name]) connections.append(connection) # Get all Wifi signal strengths wifi_signals = {} - output = CommandExecutor.safe_run_command("nmcli -t -f SSID,SIGNAL device wifi") - for line in output.strip().split('\n'): - parts = line.split(':') - if len(parts) >= 2: - ssid, signal = parts[:2] - wifi_signals[ssid] = signal + output = CommandExecutor.safe_run_command( + ["nmcli", "-t", "-f", "SSID,SIGNAL", "device", "wifi"]) + if output: + for line in output.strip().split('\n'): + parts = line.split(':') + if len(parts) >= 2: + ssid, signal = parts[:2] + wifi_signals[ssid] = signal # Add signal strength to all matching wifi connections for connection in connections: @@ -192,42 +240,50 @@ def _create_wifi_connection(data): if not ssid: return {'success': False, 'error': 'SSID is required'} - if len(password) < 8 or len(password) > 63: + try: + validate_ssid(ssid) + validate_autoconnect(autoconnect) + except ValueError as e: + return {'success': False, 'error': str(e)} + + if not password or len(password) < 8 or len(password) > 63: return {'success': False, 'error': 'Invalid password'} - if not mode: - return {'success': False, 'error': 'Mode is required'} + if mode not in ('ap', 'infrastructure'): + return {'success': False, 'error': 'Mode must be ap or infrastructure'} # Check if connection with this name already exists - command = f"nmcli -t -f NAME con show" - result = CommandExecutor.safe_run_command(command) + result = CommandExecutor.safe_run_command( + ["nmcli", "-t", "-f", "NAME", "con", "show"]) - if re.search(rf"^{re.escape(ssid)}$", result, re.MULTILINE): + if result and re.search(rf"^{re.escape(ssid)}$", result, re.MULTILINE): return {'success': False, 'error': 'Connection already exists'} # Create the connection - command = f"nmcli con add type wifi ifname '*' con-name \"{ssid}\" autoconnect {autoconnect} ssid \"{ssid}\"" + cmd = ["nmcli", "con", "add", "type", "wifi", "ifname", "*", + "con-name", ssid, "autoconnect", autoconnect, "ssid", ssid] if mode == 'ap': - command += f" 802-11-wireless.mode ap 802-11-wireless.band bg ipv4.method shared" + cmd += ["802-11-wireless.mode", "ap", "802-11-wireless.band", "bg", "ipv4.method", "shared"] - result = CommandExecutor.safe_run_command(command) + result = CommandExecutor.safe_run_command(cmd) if result is None: - return {f"success': False, 'error': 'Failed to create {mode} connection"} + return {'success': False, 'error': f'Failed to create {mode} connection'} # Add password to connection - command = f"nmcli con modify \"{ssid}\" wifi-sec.key-mgmt wpa-psk wifi-sec.psk \"{password}\"" + cmd = ["nmcli", "con", "modify", ssid, + "wifi-sec.key-mgmt", "wpa-psk", "wifi-sec.psk", password] if mode == 'ap': - command += f" 802-11-wireless-security.pmf disable connection.autoconnect-priority -1" + cmd += ["802-11-wireless-security.pmf", "disable", "connection.autoconnect-priority", "-1"] - result = CommandExecutor.safe_run_command(command) + result = CommandExecutor.safe_run_command(cmd) if result is None: return {'success': False, 'error': 'Failed to set password'} # Query SSID to confirm creation - command = f"nmcli -g 802-11-wireless.ssid con show \"{ssid}\"" - ssid = CommandExecutor.safe_run_command(command) + ssid = CommandExecutor.safe_run_command( + ["nmcli", "-g", "802-11-wireless.ssid", "con", "show", ssid]) return {'success': True, 'ssid': ssid, 'mode': mode} @@ -242,27 +298,43 @@ def _create_ethernet_connection(data): if not name: return {'success': False, 'error': 'Name is required'} - if ipMethod == 'manual' and not ipAddress: - return {'success': False, 'error': 'IP address required for static IP'} + try: + validate_connection_name(name) + validate_autoconnect(autoconnect) + except ValueError as e: + return {'success': False, 'error': str(e)} + + if ipMethod not in ('auto', 'manual'): + return {'success': False, 'error': 'Invalid IP method'} + + if ipMethod == 'manual': + if not ipAddress: + return {'success': False, 'error': 'IP address required for static IP'} + try: + validate_ip_address(ipAddress) + except ValueError as e: + return {'success': False, 'error': str(e)} # Check if connection with this name already exists - command = f"nmcli -t -f NAME con show" - result = CommandExecutor.safe_run_command(command) + result = CommandExecutor.safe_run_command( + ["nmcli", "-t", "-f", "NAME", "con", "show"]) - if re.search(rf"^{re.escape(name)}$", result, re.MULTILINE): + if result and re.search(rf"^{re.escape(name)}$", result, re.MULTILINE): return {'success': False, 'error': 'Connection already exists'} # Create base ethernet connection - cmd = f"nmcli connection add type ethernet con-name \"{name}\" ifname '*' autoconnect {autoconnect}" - result = CommandExecutor.safe_run_command(cmd) - + result = CommandExecutor.safe_run_command( + ["nmcli", "connection", "add", "type", "ethernet", + "con-name", name, "ifname", "*", "autoconnect", autoconnect]) + if result is None: return {'success': False, 'error': 'Failed to create ethernet connection'} - + if ipMethod == 'manual' and ipAddress: - command = f"nmcli connection modify \"{name}\" ipv4.method manual ipv4.addresses {ipAddress}" - CommandExecutor.safe_run_command(command) - + CommandExecutor.safe_run_command( + ["nmcli", "connection", "modify", name, + "ipv4.method", "manual", "ipv4.addresses", ipAddress]) + return {'success': True, 'name': name} @staticmethod @@ -275,9 +347,17 @@ def _create_lte_connection(data): if not name: return {'success': False, 'error': 'Name is required'} + try: + validate_connection_name(name) + validate_autoconnect(autoconnect) + if apn: + validate_apn(apn) + except ValueError as e: + return {'success': False, 'error': str(e)} + # Check if any LTE connection already exists. We can only allow 1. - command = f"nmcli -t -f TYPE con show" - result = CommandExecutor.safe_run_command(command) + result = CommandExecutor.safe_run_command( + ["nmcli", "-t", "-f", "TYPE", "con", "show"]) if result is None: return {'success': False, 'error': 'Failed to query connections'} @@ -286,14 +366,13 @@ def _create_lte_connection(data): return {'success': False, 'error': 'An LTE connection already exists'} # Create LTE connection - cmd = f"nmcli connection add type gsm con-name \"{name}\" gsm.apn \"{apn}\" autoconnect {autoconnect}" + cmd = ["nmcli", "connection", "add", "type", "gsm", + "con-name", name, "gsm.apn", apn, "autoconnect", autoconnect] result = CommandExecutor.safe_run_command(cmd) if result is None: return {'success': False, 'error': 'Failed to create LTE connection'} - # TODO: do we need to modify any settings? - return {'success': True, 'name': name} @@ -316,18 +395,27 @@ def _update_wifi_connection(name, data): ssid = data.get('ssid') password = data.get('password') autoconnect = data.get('autoconnect', 'yes') - mode = data.get('mode', 'infrastructure') - command = f"nmcli connection modify \"{name}\"" + try: + validate_connection_name(name) + validate_autoconnect(autoconnect) + if ssid: + validate_ssid(ssid) + except ValueError as e: + return {'success': False, 'error': str(e)} + + cmd = ["nmcli", "connection", "modify", name] if ssid: - command += f" 802-11-wireless.ssid \"{ssid}\"" + cmd += ["802-11-wireless.ssid", ssid] if autoconnect: - command +=f" autoconnect {autoconnect}" + cmd += ["autoconnect", autoconnect] if password: - command += f" wifi-sec.key-mgmt wpa-psk wifi-sec.psk \"{password}\"" + if len(password) < 8 or len(password) > 63: + return {'success': False, 'error': 'Invalid password'} + cmd += ["wifi-sec.key-mgmt", "wpa-psk", "wifi-sec.psk", password] - result = CommandExecutor.safe_run_command(command) + result = CommandExecutor.safe_run_command(cmd) return {'success': result is not None} @staticmethod @@ -336,18 +424,31 @@ def _update_ethernet_connection(name, data): autoconnect = data.get('autoconnect', 'yes') ipAddress = data.get('ipAddress') - command = f"nmcli connection modify \"{name}\"" + try: + validate_connection_name(name) + validate_autoconnect(autoconnect) + except ValueError as e: + return {'success': False, 'error': str(e)} + + if ipMethod not in ('auto', 'manual'): + return {'success': False, 'error': 'Invalid IP method'} + + cmd = ["nmcli", "connection", "modify", name] if autoconnect: - command +=f" autoconnect {autoconnect}" + cmd += ["autoconnect", autoconnect] if ipMethod == 'auto': - command += " ipv4.method auto" + cmd += ["ipv4.method", "auto"] elif ipMethod == 'manual' and ipAddress: - command += f" ipv4.method manual ipv4.addresses {ipAddress}" + try: + validate_ip_address(ipAddress) + except ValueError as e: + return {'success': False, 'error': str(e)} + cmd += ["ipv4.method", "manual", "ipv4.addresses", ipAddress] else: return {'success': False, 'error': 'Missing ipAddress'} - result = CommandExecutor.safe_run_command(command) + result = CommandExecutor.safe_run_command(cmd) return {'success': result is not None} @staticmethod @@ -356,12 +457,20 @@ def _update_lte_connection(name, data): apn = data.get('apn') autoconnect = data.get('autoconnect', 'yes') - cmd = f"nmcli connection modify \"{name}\"" + try: + validate_connection_name(name) + validate_autoconnect(autoconnect) + if apn: + validate_apn(apn) + except ValueError as e: + return {'success': False, 'error': str(e)} + + cmd = ["nmcli", "connection", "modify", name] if apn: - cmd += f" gsm.apn \"{apn}\"" + cmd += ["gsm.apn", apn] if autoconnect: - cmd += f" autoconnect {autoconnect}" + cmd += ["autoconnect", autoconnect] logger.info(f"Updating LTE connection {name}") logger.info(f"apn {apn}") @@ -377,13 +486,14 @@ class WiFiNetworkManager: def scan_wifi_networks(): networks = [] - CommandExecutor.safe_run_command("nmcli device wifi rescan") + CommandExecutor.safe_run_command(["nmcli", "device", "wifi", "rescan"]) # Wait for scan to complete time.sleep(2) # Get scan results - output = CommandExecutor.safe_run_command("nmcli -f SSID,SIGNAL,SECURITY,CHAN device wifi list") + output = CommandExecutor.safe_run_command( + ["nmcli", "-f", "SSID,SIGNAL,SECURITY,CHAN", "device", "wifi", "list"]) if not output: return networks @@ -420,7 +530,7 @@ class HostnameManager: @staticmethod def get_hostname(): """Get the system hostname""" - return CommandExecutor.safe_run_command("hostname") + return CommandExecutor.safe_run_command(["hostname"]) @staticmethod def set_hostname(new_hostname): @@ -429,11 +539,12 @@ def set_hostname(new_hostname): return False, "No hostname provided" # Validate hostname format - if not re.match(r'^[a-zA-Z0-9]([a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])?$', new_hostname): + if not _HOSTNAME_RE.match(new_hostname): return False, "Invalid hostname format" # Change hostname using hostnamectl - result = CommandExecutor.safe_run_command(f"sudo hostnamectl set-hostname {new_hostname}") + result = CommandExecutor.safe_run_command( + ["sudo", "hostnamectl", "set-hostname", new_hostname]) if result is None: return False, "Failed to set hostname" @@ -451,7 +562,7 @@ def get_lte_status(): Returns a dictionary with all modem status information """ - if not CommandExecutor.safe_run_command("systemctl is-active ModemManager"): + if not CommandExecutor.safe_run_command(["systemctl", "is-active", "ModemManager"]): return {"status": "not_found", "message": "ModemManager is not running"} # Initialize comprehensive status structure with all possible fields @@ -504,13 +615,18 @@ def get_lte_status(): try: # Get modem index - modem_index = CommandExecutor.safe_run_command("mmcli -L | grep -oP '(?<=/Modem/)\d+' || echo ''") + modem_list = CommandExecutor.safe_run_command(["mmcli", "-L"]) + modem_index = None + if modem_list: + match = re.search(r'/Modem/(\d+)', modem_list) + if match: + modem_index = match.group(1) if not modem_index: logger.warning("No modem found") return status # Get modem information - modem_info = CommandExecutor.safe_run_command(f"mmcli -m {modem_index}") + modem_info = CommandExecutor.safe_run_command(["mmcli", "-m", modem_index]) if not modem_info: logger.warning(f"Failed to get information for modem {modem_index}") return status @@ -581,7 +697,7 @@ def get_lte_status(): # If we have a SIM path, get SIM info if sim_path: - sim_info = CommandExecutor.safe_run_command(f"mmcli -m {modem_index} --sim {sim_path}") + sim_info = CommandExecutor.safe_run_command(["mmcli", "-m", modem_index, "--sim", sim_path]) if sim_info: for line in sim_info.split('\n'): line = line.strip() @@ -596,7 +712,7 @@ def get_lte_status(): # If we have a bearer path, get bearer info for interface and IP details if bearer_path: - bearer_info = CommandExecutor.safe_run_command(f"mmcli -m {modem_index} --bearer={bearer_path}") + bearer_info = CommandExecutor.safe_run_command(["mmcli", "-m", modem_index, f"--bearer={bearer_path}"]) if bearer_info: for line in bearer_info.split('\n'): line = line.strip() @@ -622,12 +738,18 @@ def get_lte_status(): # Check interface status if we have one if status["interface"]: - interface_status = CommandExecutor.safe_run_command(f"ip link show {status['interface']} | grep 'state'") - if interface_status: - if "UP" in interface_status: - status["interfaceState"] = "up" - else: - status["interfaceState"] = "down" + try: + validate_interface(status["interface"]) + except ValueError: + pass + else: + link_output = CommandExecutor.safe_run_command( + ["ip", "link", "show", status["interface"]]) + if link_output: + if "state UP" in link_output: + status["interfaceState"] = "up" + else: + status["interfaceState"] = "down" # Suggest APN depending on SIM operator if status["simOperatorName"]: @@ -658,7 +780,8 @@ def collect_interface_stats(): stats = {} try: # Get all activated NetworkManager connections - nm_output = CommandExecutor.safe_run_command("nmcli -t -f NAME,TYPE,STATE connection show") + nm_output = CommandExecutor.safe_run_command( + ["nmcli", "-t", "-f", "NAME,TYPE,STATE", "connection", "show"]) if not nm_output: logger.warning("Failed to get NetworkManager connections") return stats @@ -684,7 +807,8 @@ def collect_interface_stats(): interface_type = 'other' # Get the actual IP interface for this connection - ip_iface = CommandExecutor.safe_run_command(f"nmcli -g GENERAL.IP-IFACE connection show \"{name}\"") + ip_iface = CommandExecutor.safe_run_command( + ["nmcli", "-g", "GENERAL.IP-IFACE", "connection", "show", name]) if ip_iface and ip_iface.strip(): device = ip_iface.strip() @@ -703,7 +827,7 @@ def collect_interface_stats(): # Process each active connection to collect statistics for device, info in active_connections.items(): # Get detailed stats using 'ip -s link show' command - stats_output = CommandExecutor.safe_run_command(f"ip -s link show {device}") + stats_output = CommandExecutor.safe_run_command(["ip", "-s", "link", "show", device]) if not stats_output: logger.warning(f"Failed to get stats for device {device}") continue @@ -715,9 +839,12 @@ def collect_interface_stats(): continue # Get IP address - ip_output = CommandExecutor.safe_run_command( - f"ip addr show {device} | grep -w inet | head -1 | awk '{{print $2}}' | cut -d/ -f1" - ) + ip_raw = CommandExecutor.safe_run_command(["ip", "addr", "show", device]) + ip_output = None + if ip_raw: + ip_match = re.search(r'\binet\s+(\d+\.\d+\.\d+\.\d+)', ip_raw) + if ip_match: + ip_output = ip_match.group(1) # Create the stats entry with simplified data device_stats = { @@ -742,25 +869,27 @@ def collect_interface_stats(): # Add connection-type specific information if info['type'] == 'wifi': # Get signal strength for WiFi - signal_output = CommandExecutor.safe_run_command( - f"nmcli -f SIGNAL device wifi list ifname {device} | grep -v SIGNAL | head -1 | awk '{{print $1}}'" - ) - if signal_output and signal_output.isdigit(): - device_stats['signal_strength'] = int(signal_output) + signal_raw = CommandExecutor.safe_run_command( + ["nmcli", "-t", "-f", "SIGNAL", "device", "wifi", "list", "ifname", device]) + if signal_raw: + # -t mode outputs one signal value per line + first_line = signal_raw.strip().split('\n')[0].strip() + if first_line.isdigit(): + device_stats['signal_strength'] = int(first_line) # Get SSID for WiFi - ssid_output = CommandExecutor.safe_run_command(f"nmcli -g 802-11-wireless.ssid connection show '{info['name']}'") + ssid_output = CommandExecutor.safe_run_command( + ["nmcli", "-g", "802-11-wireless.ssid", "connection", "show", info['name']]) if ssid_output: device_stats['ssid'] = ssid_output elif info['type'] == 'lte': # For LTE connections, try to get signal strength from ModemManager - # This is optional and only works if ModemManager is available - signal_output = CommandExecutor.safe_run_command( - "mmcli -m 0 | grep 'signal quality' | awk -F': ' '{print $2}' | awk '{print $1}' | tr -d '%'" - ) - if signal_output and signal_output.isdigit(): - device_stats['signal_strength'] = int(signal_output) + lte_raw = CommandExecutor.safe_run_command(["mmcli", "-m", "0"]) + if lte_raw: + sig_match = re.search(r'signal quality:\s*(\d+)%', lte_raw) + if sig_match: + device_stats['signal_strength'] = int(sig_match.group(1)) stats[device] = device_stats logger.debug(f"Collected stats for {device} ({info['name']}): RX={device_stats['rx_bytes']}, TX={device_stats['tx_bytes']}") @@ -934,7 +1063,7 @@ def get_interface_usage_summary(): if not State.interface_stats: logger.debug("No interface stats available, collecting now") NetworkStatsProcessor.update_interface_stats() - + if not State.interface_stats: logger.debug("No active network interfaces found") return [] @@ -963,15 +1092,15 @@ def get_interface_usage_summary(): 'rxPackets': stats.get('rx_packets', 0), 'txPackets': stats.get('tx_packets', 0) } - + if stats.get('type') == 'wifi': interface_summary['signal'] = stats.get('signal_strength', 0) - + summary.append(interface_summary) # Sort by total bytes (most traffic first) summary.sort(key=lambda x: -x.get('totalBytes', 0)) - + logger.debug(f"Generated summary for {len(summary)} active interfaces") return summary @@ -1016,7 +1145,7 @@ def stats_collection_thread(): while State.stats_thread_active and len(State.active_stats_clients) > 0: try: # Collect interface stats at the configured interval - stats = NetworkStatsProcessor.update_interface_stats() + NetworkStatsProcessor.update_interface_stats() # Send updates to clients at the report interval StatsThread._send_stats_to_clients() @@ -1046,12 +1175,12 @@ def _send_stats_to_clients(): try: # Generate the summary to send to clients summary = NetworkReporting.get_interface_usage_summary() - + # Make sure we have data to send if not summary: logger.warning("No network interfaces found to report stats") return - + # Send update to all connected clients if State.active_stats_clients: socketio.emit('network_stats_update', summary) @@ -1128,7 +1257,7 @@ def handle_stats_disconnect(reason=None): # Stop thread if no clients remain if remaining == 0 and State.stats_thread_active: StatsThread.stop_collection_thread() - + except Exception as e: logger.error(f"Error handling client disconnect: {e}") @@ -1158,24 +1287,40 @@ def api_create_connection(): @app.route('/connections/', methods=['DELETE']) def api_delete_connection(name): logger.info(f"DELETE /connections/{name} called") - result = CommandExecutor.safe_run_command(f"nmcli connection delete \"{name}\"") + try: + validate_connection_name(name) + except ValueError: + return jsonify({'success': False, 'error': 'Invalid connection name'}), 400 + result = CommandExecutor.safe_run_command(["nmcli", "connection", "delete", name]) return jsonify({'success': result is not None}) @app.route('/connections/', methods=['PUT']) def api_update_connection(name): logger.info(f"PUT /connections/{name} called") + try: + validate_connection_name(name) + except ValueError: + return jsonify({'success': False, 'error': 'Invalid connection name'}), 400 return jsonify(ConnectionManager.update_connection(name, request.json)) @app.route('/connections//connect', methods=['POST']) def api_connect_to_network(name): logger.info(f"POST /connections/{name}/connect called") - result = CommandExecutor.safe_run_command(f"nmcli con up \"{name}\"") + try: + validate_connection_name(name) + except ValueError: + return jsonify({'success': False, 'error': 'Invalid connection name'}), 400 + result = CommandExecutor.safe_run_command(["nmcli", "con", "up", name]) return jsonify({'success': result is not None}) @app.route('/connections//disconnect', methods=['POST']) def api_disconnect_from_network(name): logger.info(f"POST /connections/{name}/disconnect called") - result = CommandExecutor.safe_run_command(f"nmcli con down \"{name}\"") + try: + validate_connection_name(name) + except ValueError: + return jsonify({'success': False, 'error': 'Invalid connection name'}), 400 + result = CommandExecutor.safe_run_command(["nmcli", "con", "down", name]) return jsonify({'success': result is not None}) @app.route('/wifi/scan', methods=['GET']) @@ -1216,15 +1361,15 @@ def main(): default='/this/is/an/example', help='Example arg' ) - args = parser.parse_args() + parser.parse_args() ApplicationRunner.start_server() - + @staticmethod def start_server(): host = '127.0.0.1' port = 3001 - debug = False; + debug = False logger.info(f"Starting SocketIO server on {host}:{port}") try: diff --git a/services/dds-agent/dds-agent.manifest.json b/services/dds-agent/dds-agent.manifest.json index ce36a7d..c89993c 100644 --- a/services/dds-agent/dds-agent.manifest.json +++ b/services/dds-agent/dds-agent.manifest.json @@ -1,11 +1,9 @@ { + "version": "1.0.0", "displayName": "DDS Agent", "description": "DDS Bridge between PX4 and ROS2 over High Speed UART.", - "platform": ["jetson", "pi"], + "platform": ["jetson", "pi", "ubuntu"], "configFile": "", "visible": true, - "requires_sudo": false, - "env_var": "INSTALL_DDS_AGENT", - "install_script": "install_dds_agent.sh", - "install_files": ["start_dds_agent.sh"] + "requires_sudo": false } diff --git a/services/dds-agent/dds-agent.service b/services/dds-agent/dds-agent.service deleted file mode 100644 index 88b8d18..0000000 --- a/services/dds-agent/dds-agent.service +++ /dev/null @@ -1,14 +0,0 @@ -[Unit] -Description=Micro-XRCE-DDS-Agent -Wants=network.target -After=dev-ttyTHS1.device dev-ttyAMA4.device syslog.target network-online.target - -[Service] -Type=simple -ExecStart=%h/.local/bin/start_dds_agent.sh -Restart=on-failure -RestartSec=5 -ExecStartPre=/bin/sleep 2 - -[Install] -WantedBy=default.target diff --git a/services/dds-agent/install_dds_agent.sh b/services/dds-agent/install_dds_agent.sh deleted file mode 100755 index 7684154..0000000 --- a/services/dds-agent/install_dds_agent.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash -echo "Installing micro-xrce-dds-agent" -pushd . -cd Micro-XRCE-DDS-Agent -mkdir build && cd build -cmake .. -make -j$(nproc) -sudo make install -sudo ldconfig -popd diff --git a/services/dds-agent/start_dds_agent.sh b/services/dds-agent/start_dds_agent.sh index 0d03a20..067aa39 100644 --- a/services/dds-agent/start_dds_agent.sh +++ b/services/dds-agent/start_dds_agent.sh @@ -33,15 +33,15 @@ echo "Detected platform: $TARGET" case "$TARGET" in jetson) echo "Starting DDS agent for Jetson platform" - exec MicroXRCEAgent serial -b 3000000 -D /dev/ttyTHS1 + exec /opt/ark/bin/MicroXRCEAgent serial -b 3000000 -D /dev/ttyTHS1 ;; pi) echo "Starting DDS agent for Raspberry Pi platform" - exec MicroXRCEAgent serial -b 3000000 -D /dev/ttyAMA4 + exec /opt/ark/bin/MicroXRCEAgent serial -b 3000000 -D /dev/ttyAMA4 ;; ubuntu) echo "Starting DDS agent for Ubuntu desktop" - exec MicroXRCEAgent udp4 -p 8888 + exec /opt/ark/bin/MicroXRCEAgent udp4 -p 8888 ;; *) echo "Unknown platform" diff --git a/services/flight-review/flight-review.manifest.json b/services/flight-review/flight-review.manifest.json index 15174bd..5bab681 100644 --- a/services/flight-review/flight-review.manifest.json +++ b/services/flight-review/flight-review.manifest.json @@ -1,11 +1,9 @@ { + "version": "1.0.0", "displayName": "Flight Review", "description": "Flight Review server hosted locally.", - "platform": ["jetson", "pi"], + "platform": ["jetson", "pi", "ubuntu"], "configFile": "", "visible": true, - "requires_sudo": false, - "env_var": "INSTALL_LOGLOADER", - "install_script": "install_flight_review.sh", - "install_files": ["start_flight_review.sh"] + "requires_sudo": false } diff --git a/services/flight-review/flight-review.service b/services/flight-review/flight-review.service deleted file mode 100644 index 03df4fa..0000000 --- a/services/flight-review/flight-review.service +++ /dev/null @@ -1,13 +0,0 @@ -[Unit] -Description=PX4 Flight Review -Wants=network.target -After=syslog.target network.target nginx.service - -[Service] -Type=simple -ExecStart=%h/.local/bin/start_flight_review.sh -Restart=always -RestartSec=5 - -[Install] -WantedBy=default.target diff --git a/services/flight-review/install_flight_review.sh b/services/flight-review/install_flight_review.sh deleted file mode 100755 index c5a7818..0000000 --- a/services/flight-review/install_flight_review.sh +++ /dev/null @@ -1,49 +0,0 @@ -#!/bin/bash - -# Determine PROJECT_ROOT as two levels up from this script's location -SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" -PROJECT_ROOT="$( cd "$SCRIPT_DIR/../.." &> /dev/null && pwd )" -source "$PROJECT_ROOT/tools/functions.sh" - -echo "Installing flight_review" - -sudo rm -rf /opt/flight_review - -pushd . -cd flight_review - -# Install dependencies -if [ "$TARGET" = "jetson" ]; then - apt_get_install install -y sqlite3 fftw3 libfftw3-dev - sudo pip install -r app/requirements.txt - sudo python3 -m pip install --upgrade pandas scipy matplotlib - -elif [ "$TARGET" = "pi" ]; then - apt_get_install install -y sqlite3 fftw3 libfftw3-dev - # https://www.raspberrypi.com/documentation/computers/os.html#python-on-raspberry-pi - sudo pip install --break-system-packages -r app/requirements.txt - sudo pip install --break-system-packages --upgrade pandas scipy matplotlib -fi - -# Create user config overrides -mkdir -p $XDG_CONFIG_HOME/flight_review -CONFIG_USER_FILE="$XDG_CONFIG_HOME/flight_review/config_user.ini" -touch $CONFIG_USER_FILE - -echo "[general]" >> $CONFIG_USER_FILE -echo "domain_name = $(hostname -f)/flight-review" >> $CONFIG_USER_FILE -echo "verbose_output = 1" >> $CONFIG_USER_FILE -echo "storage_path = /opt/flight_review/data" >> $CONFIG_USER_FILE - -# Copy the app to $XDG_DATA_HOME -APP_DIR="$XDG_DATA_HOME/flight_review/app" -mkdir -p $APP_DIR -cp -r app/* $APP_DIR/ - -popd - -# Make user owner -sudo chown -R $USER:$USER $XDG_DATA_HOME/flight_review - -# Initialize database -$APP_DIR/setup_db.py diff --git a/services/flight-review/start_flight_review.sh b/services/flight-review/start_flight_review.sh index 37e38e2..883f31e 100755 --- a/services/flight-review/start_flight_review.sh +++ b/services/flight-review/start_flight_review.sh @@ -2,4 +2,4 @@ HOSTNAME="$(hostname -f).local" PORT=5006 -exec python3 ~/.local/share/flight_review/app/serve.py --port=$PORT --address=0.0.0.0 --use-xheaders --allow-websocket-origin=$HOSTNAME --allow-websocket-origin=$HOSTNAME:$PORT +exec python3 /opt/ark/share/flight-review/app/serve.py --port=$PORT --address=0.0.0.0 --use-xheaders --allow-websocket-origin=$HOSTNAME --allow-websocket-origin=$HOSTNAME:$PORT diff --git a/services/hotspot-updater/hotspot-updater.manifest.json b/services/hotspot-updater/hotspot-updater.manifest.json index b62fd62..18a4d9c 100644 --- a/services/hotspot-updater/hotspot-updater.manifest.json +++ b/services/hotspot-updater/hotspot-updater.manifest.json @@ -1,11 +1,9 @@ { + "version": "1.0.0", "displayName": "Hotspot Updater", "description": "Updates the hotspot default name", "platform": ["all"], "configFile": "", "visible": false, - "requires_sudo": true, - "env_var": "", - "install_script": "", - "install_files": ["update_hotspot_default.sh"] + "requires_sudo": true } diff --git a/services/hotspot-updater/hotspot-updater.service b/services/hotspot-updater/hotspot-updater.service deleted file mode 100644 index 6882818..0000000 --- a/services/hotspot-updater/hotspot-updater.service +++ /dev/null @@ -1,14 +0,0 @@ -[Unit] -Description=Updates the hotspot name from hotspot-default to - -After=network-online.target syslog.target NetworkManager.service -Wants=network.target network-online.target syslog.target NetworkManager.service - -[Service] -Type=simple -ExecStart=/usr/local/bin/update_hotspot_default.sh -Restart=on-failure -RestartSec=5 -KillMode=process - -[Install] -WantedBy=multi-user.target diff --git a/services/jetson-can/jetson-can.manifest.json b/services/jetson-can/jetson-can.manifest.json index 69f3010..ec2090f 100644 --- a/services/jetson-can/jetson-can.manifest.json +++ b/services/jetson-can/jetson-can.manifest.json @@ -1,11 +1,9 @@ { + "version": "1.0.0", "displayName": "Jetson CAN", "description": "Enables Jetson CAN interface", "platform": ["jetson"], "configFile": "", - "visible": false, - "requires_sudo": true, - "env_var": "", - "install_script": "", - "install_files": ["start_can_interface.sh", "stop_can_interface.sh"] + "visible": true, + "requires_sudo": true } diff --git a/services/jetson-can/jetson-can.service b/services/jetson-can/jetson-can.service deleted file mode 100644 index de9248a..0000000 --- a/services/jetson-can/jetson-can.service +++ /dev/null @@ -1,14 +0,0 @@ -[Unit] -Description=Enable Jetson CAN interfaces -Wants=network.target -After=syslog.target network-online.target - -[Service] -Type=simple -ExecStart=/usr/local/bin/start_can_interface.sh -Restart=on-failure -RestartSec=5 -KillMode=process - -[Install] -WantedBy=multi-user.target diff --git a/services/logloader/install_logloader.sh b/services/logloader/install_logloader.sh deleted file mode 100755 index 4fb87b8..0000000 --- a/services/logloader/install_logloader.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash -# Determine PROJECT_ROOT as two levels up from this script's location -SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" -PROJECT_ROOT="$( cd "$SCRIPT_DIR/../.." &> /dev/null && pwd )" -source "$PROJECT_ROOT/tools/functions.sh" - -echo "Installing logloader" - -pushd . -cd logloader -make install -sudo ldconfig -popd - diff --git a/services/logloader/logloader b/services/logloader/logloader index ec08ce8..af24421 160000 --- a/services/logloader/logloader +++ b/services/logloader/logloader @@ -1 +1 @@ -Subproject commit ec08ce8bd9a52783a8be500acc8ab225d517da8f +Subproject commit af2442141943e0eea37a01f239a06dbcacc6b644 diff --git a/services/logloader/logloader.manifest.json b/services/logloader/logloader.manifest.json index 096571f..5bf53e7 100644 --- a/services/logloader/logloader.manifest.json +++ b/services/logloader/logloader.manifest.json @@ -1,11 +1,9 @@ { + "version": "1.0.0", "displayName": "Logloader", "description": "Automatically downloads log files from the vehicle SD card and optionally uploads to both a local and remote server.", - "platform": ["jetson", "pi"], + "platform": ["jetson", "pi", "ubuntu"], "configFile": "config.toml", "visible": true, - "requires_sudo": false, - "env_var": "INSTALL_LOGLOADER", - "install_script": "install_logloader.sh", - "install_files": [] + "requires_sudo": false } diff --git a/services/logloader/logloader.service b/services/logloader/logloader.service deleted file mode 100644 index c9d83dd..0000000 --- a/services/logloader/logloader.service +++ /dev/null @@ -1,16 +0,0 @@ -[Unit] -Description=Automatic ulog download and upload -Wants=network.target -After=syslog.target network.target mavlink-router.service - -[Service] -Type=simple -Environment=SSL_CERT_FILE=/etc/ssl/certs/ca-certificates.crt -ExecStart=%h/.local/bin/logloader -Restart=always -RestartSec=5 -Nice=10 -CPUWeight=50 - -[Install] -WantedBy=default.target diff --git a/services/mavlink-router/install_mavlink_router.sh b/services/mavlink-router/install_mavlink_router.sh deleted file mode 100755 index 4b78753..0000000 --- a/services/mavlink-router/install_mavlink_router.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash -# Determine PROJECT_ROOT as two levels up from this script's location -SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" -PROJECT_ROOT="$( cd "$SCRIPT_DIR/../.." &> /dev/null && pwd )" -source "$PROJECT_ROOT/tools/functions.sh" - -echo "Installing mavlink-router" - -# remove old config, source, and binary -sudo rm -rf /etc/mavlink-router &>/dev/null -sudo rm -rf ~/code/mavlink-router &>/dev/null -sudo rm /usr/bin/mavlink-routerd &>/dev/null - -pushd . -cd mavlink-router -meson setup build --prefix=$HOME/.local -Dsystemdsystemunitdir= -ninja -C build install -popd - -mkdir -p $XDG_DATA_HOME/mavlink-router/ -cp main.conf $XDG_DATA_HOME/mavlink-router/main.conf diff --git a/services/mavlink-router/mavlink-router.manifest.json b/services/mavlink-router/mavlink-router.manifest.json index 89d1539..1e88f70 100644 --- a/services/mavlink-router/mavlink-router.manifest.json +++ b/services/mavlink-router/mavlink-router.manifest.json @@ -1,11 +1,9 @@ { + "version": "1.0.0", "displayName": "Mavlink Router", "description": "Routes mavlink data between endpoints.", "platform": ["all"], "configFile": "main.conf", "visible": true, - "requires_sudo": false, - "env_var": "", - "install_script": "install_mavlink_router.sh", - "install_files": ["start_mavlink_router.sh"] + "requires_sudo": false } diff --git a/services/mavlink-router/mavlink-router.service b/services/mavlink-router/mavlink-router.service deleted file mode 100644 index 91ce4e8..0000000 --- a/services/mavlink-router/mavlink-router.service +++ /dev/null @@ -1,13 +0,0 @@ -[Unit] -Description=Mavlink Router -Wants=network.target -After=network-online.target syslog.target - -[Service] -Type=exec -ExecStart=%h/.local/bin/start_mavlink_router.sh -Restart=on-failure -RestartSec=5 - -[Install] -WantedBy=default.target diff --git a/services/mavlink-router/start_mavlink_router.sh b/services/mavlink-router/start_mavlink_router.sh index 69971dc..76a7b95 100755 --- a/services/mavlink-router/start_mavlink_router.sh +++ b/services/mavlink-router/start_mavlink_router.sh @@ -1,7 +1,7 @@ #!/bin/bash # Assumes there is a conf file here -export MAVLINK_ROUTERD_CONF_FILE="/home/$USER/.local/share/mavlink-router/main.conf" +export MAVLINK_ROUTERD_CONF_FILE="/opt/ark/share/mavlink-router/main.conf" # 1) Read the currently configured Device path from main.conf CONFIGURED_PATH=$(grep -E '^Device\s*=' "$MAVLINK_ROUTERD_CONF_FILE" \ @@ -28,9 +28,9 @@ else fi # Enable mavlink USB stream first -python3 ~/.local/bin/vbus_enable.py +python3 /opt/ark/bin/vbus_enable.py sleep 3 # Finally, launch mavlink-routerd -~/.local/bin/mavlink-routerd +/opt/ark/bin/mavlink-routerd diff --git a/services/polaris/install_polaris.sh b/services/polaris/install_polaris.sh deleted file mode 100755 index 3d28ace..0000000 --- a/services/polaris/install_polaris.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash -# Determine PROJECT_ROOT as two levels up from this script's location -SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" -PROJECT_ROOT="$( cd "$SCRIPT_DIR/../.." &> /dev/null && pwd )" -source "$PROJECT_ROOT/tools/functions.sh" - -echo "Installing polaris-client-mavlink" - -# Clean up directories -sudo rm -rf ~/polaris-client-mavlink &>/dev/null -sudo rm -rf $XDG_DATA_HOME/polaris-client-mavlink &>/dev/null -sudo rm /usr/local/bin/polaris-client-mavlink &>/dev/null -sudo rm /usr/local/bin/polaris &>/dev/null - -# Install dependencies -apt_get_install install -y libssl-dev libgflags-dev libgoogle-glog-dev libboost-all-dev -pushd . -cd polaris-client-mavlink -make install -sudo ldconfig -popd diff --git a/services/polaris/polaris-client-mavlink b/services/polaris/polaris-client-mavlink index 0f996e4..6478175 160000 --- a/services/polaris/polaris-client-mavlink +++ b/services/polaris/polaris-client-mavlink @@ -1 +1 @@ -Subproject commit 0f996e4e21472b7aed6f270645aab89428ead82d +Subproject commit 64781759c09e7adb65a859f8e5c1e261d9333e62 diff --git a/services/polaris/polaris.manifest.json b/services/polaris/polaris.manifest.json index 880180b..181bbc6 100644 --- a/services/polaris/polaris.manifest.json +++ b/services/polaris/polaris.manifest.json @@ -1,11 +1,9 @@ { + "version": "1.0.0", "displayName": "Polaris Mavlink", "description": "Client for the Polaris RTK Corrections Network to provide corrections over Mavlink.", - "platform": ["jetson", "pi"], + "platform": ["jetson", "pi", "ubuntu"], "configFile": "config.toml", "visible": true, - "requires_sudo": false, - "env_var": "INSTALL_POLARIS", - "install_script": "install_polaris.sh", - "install_files": [] + "requires_sudo": false } diff --git a/services/polaris/polaris.service b/services/polaris/polaris.service deleted file mode 100644 index 2b9cbd0..0000000 --- a/services/polaris/polaris.service +++ /dev/null @@ -1,13 +0,0 @@ -[Unit] -Description=Polaris GNSS corrections service client for MAVLink -Wants=network.target -After=syslog.target network.target mavlink-router.service - -[Service] -Type=simple -ExecStart=%h/.local/bin/polaris-client-mavlink -Restart=always -RestartSec=5 - -[Install] -WantedBy=default.target diff --git a/services/rid-transmitter/RemoteIDTransmitter b/services/rid-transmitter/RemoteIDTransmitter index ad8299e..871cd48 160000 --- a/services/rid-transmitter/RemoteIDTransmitter +++ b/services/rid-transmitter/RemoteIDTransmitter @@ -1 +1 @@ -Subproject commit ad8299e96d17b2031a5b202a0e7bfe323d3f289c +Subproject commit 871cd481bf2b64f1221ff0c11f689b809371afc3 diff --git a/services/rid-transmitter/install_rid_transmitter.sh b/services/rid-transmitter/install_rid_transmitter.sh deleted file mode 100755 index 057d110..0000000 --- a/services/rid-transmitter/install_rid_transmitter.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash -# Determine PROJECT_ROOT as two levels up from this script's location -SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" -PROJECT_ROOT="$( cd "$SCRIPT_DIR/../.." &> /dev/null && pwd )" -source "$PROJECT_ROOT/tools/functions.sh" - -echo "Installing RemoteIDTransmitter" - -pushd . -cd RemoteIDTransmitter -make install -sudo ldconfig -popd diff --git a/services/rid-transmitter/rid-transmitter.manifest.json b/services/rid-transmitter/rid-transmitter.manifest.json index 7a36c65..d53665a 100644 --- a/services/rid-transmitter/rid-transmitter.manifest.json +++ b/services/rid-transmitter/rid-transmitter.manifest.json @@ -1,11 +1,9 @@ { + "version": "1.0.0", "displayName": "RemoteID Transmitter", "description": "Transmit RemoteID packets via Bluetooth", "platform": ["jetson"], "configFile": "config.toml", "visible": true, - "requires_sudo": false, - "env_var": "INSTALL_RID_TRANSMITTER", - "install_script": "install_rid_transmitter.sh", - "install_files": [] + "requires_sudo": false } diff --git a/services/rid-transmitter/rid-transmitter.service b/services/rid-transmitter/rid-transmitter.service deleted file mode 100644 index 389b6c9..0000000 --- a/services/rid-transmitter/rid-transmitter.service +++ /dev/null @@ -1,12 +0,0 @@ -[Unit] -Description=Broadcasts Remote ID data via Bluetooth -ConditionPathIsDirectory=/sys/class/bluetooth -Wants=network-online.target -After=syslog.target network-online.target - -[Service] -Type=simple -ExecStart=%h/.local/bin/rid-transmitter - -[Install] -WantedBy=default.target diff --git a/services/rtsp-server/install_rtsp_server.sh b/services/rtsp-server/install_rtsp_server.sh deleted file mode 100755 index 3f65cbe..0000000 --- a/services/rtsp-server/install_rtsp_server.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/bash -# Determine PROJECT_ROOT as two levels up from this script's location -SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" -PROJECT_ROOT="$( cd "$SCRIPT_DIR/../.." &> /dev/null && pwd )" -source "$PROJECT_ROOT/tools/functions.sh" - -echo "Installing rtsp-server" - -apt_get_install install -y \ - libgstreamer1.0-dev \ - libgstreamer-plugins-base1.0-dev \ - libgstreamer-plugins-bad1.0-dev \ - libgstrtspserver-1.0-dev \ - gstreamer1.0-plugins-ugly \ - gstreamer1.0-tools \ - gstreamer1.0-gl \ - gstreamer1.0-gtk3 \ - gstreamer1.0-rtsp - -if [ "$TARGET" = "pi" ]; then - apt_get_install install -y gstreamer1.0-libcamera - -else - # Ubuntu 22.04, see antimof/UxPlay#121 - sudo apt remove gstreamer1.0-vaapi -fi - -pushd . -cd rtsp-server -make install -sudo ldconfig -popd diff --git a/services/rtsp-server/rtsp-server b/services/rtsp-server/rtsp-server index 978eae6..34d0c1e 160000 --- a/services/rtsp-server/rtsp-server +++ b/services/rtsp-server/rtsp-server @@ -1 +1 @@ -Subproject commit 978eae69c4637fa25c79ca1b0e7abdc5ac71108d +Subproject commit 34d0c1e701fd3df94bd137944f738686ce1ce7ff diff --git a/services/rtsp-server/rtsp-server.manifest.json b/services/rtsp-server/rtsp-server.manifest.json index a355ab6..988e4d1 100644 --- a/services/rtsp-server/rtsp-server.manifest.json +++ b/services/rtsp-server/rtsp-server.manifest.json @@ -1,11 +1,9 @@ { + "version": "1.0.0", "displayName": "RTSP Server", "description": "RTSP Server for connected cameras.", "platform": ["all"], "configFile": "config.toml", "visible": true, - "requires_sudo": false, - "env_var": "INSTALL_RTSP_SERVER", - "install_script": "install_rtsp_server.sh", - "install_files": [] + "requires_sudo": false } diff --git a/services/rtsp-server/rtsp-server.service b/services/rtsp-server/rtsp-server.service deleted file mode 100644 index e0dc64c..0000000 --- a/services/rtsp-server/rtsp-server.service +++ /dev/null @@ -1,13 +0,0 @@ -[Unit] -Description=RTSP Server -Wants=network.target -After=syslog.target network.target mavlink-router.service - -[Service] -Type=simple -ExecStart=%h/.local/bin/rtsp-server -Restart=always -RestartSec=5 - -[Install] -WantedBy=default.target diff --git a/services/service-manager/service-manager.manifest.json b/services/service-manager/service-manager.manifest.json index 21d27b6..788cfef 100644 --- a/services/service-manager/service-manager.manifest.json +++ b/services/service-manager/service-manager.manifest.json @@ -1,11 +1,9 @@ { + "version": "1.0.0", "displayName": "Service Manager", "description": "Microservice backend for managing systemd service", "platform": ["all"], "configFile": "config.toml", "visible": false, - "requires_sudo": false, - "env_var": "", - "install_script": "", - "install_files": ["service_manager.py"] + "requires_sudo": false } diff --git a/services/service-manager/service-manager.service b/services/service-manager/service-manager.service deleted file mode 100644 index 7b2ae9b..0000000 --- a/services/service-manager/service-manager.service +++ /dev/null @@ -1,14 +0,0 @@ -[Unit] -Description=Microservice backend for managing systemd user services -After=network-online.target syslog.target -Wants=network.target network-online.target syslog.target - -[Service] -Type=simple -ExecStart=python3 %h/.local/bin/service_manager.py -Restart=on-failure -RestartSec=5 -Environment="PYTHONUNBUFFERED=1" - -[Install] -WantedBy=default.target diff --git a/services/service-manager/service_manager.py b/services/service-manager/service_manager.py index 7e09353..1cb0bec 100644 --- a/services/service-manager/service_manager.py +++ b/services/service-manager/service_manager.py @@ -13,7 +13,6 @@ """ import os -import sys import json import subprocess import re @@ -23,55 +22,74 @@ app = Flask(__name__) CORS(app) +# Input validation +_ANSI_RE = re.compile(r'\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])') +_SERVICE_NAME_RE = re.compile(r'^[a-zA-Z0-9][a-zA-Z0-9._-]{0,63}$') +_VALID_SYSTEMCTL_OPS = frozenset({"start", "stop", "restart", "enable", "disable"}) +_VALID_STATUS_TYPES = frozenset({"active", "enabled"}) + + +def validate_service_name(name): + if not name or not _SERVICE_NAME_RE.match(name): + raise ValueError(f"Invalid service name: {name}") + return name + + +def validate_positive_int(value, max_val=10000): + n = int(value) + if n < 1 or n > max_val: + raise ValueError(f"Value out of range: {n}") + return n + + class ServiceManager: - + @staticmethod def run_systemctl(operation, service_name): - command = f"systemctl --user {operation} {service_name}" + if operation not in _VALID_SYSTEMCTL_OPS: + return False, f"Invalid operation: {operation}" + service_name = validate_service_name(service_name) try: process = subprocess.run( - command, - shell=True, + ["systemctl", "--user", operation, service_name], capture_output=True, text=True, timeout=10 ) - - ansi_escape = re.compile(r'\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])') - output = ansi_escape.sub('', process.stdout + process.stderr).strip() - + + output = _ANSI_RE.sub('', process.stdout + process.stderr).strip() + if process.returncode == 0: return True, "" else: return False, output or f"Failed to {operation} service (exit code {process.returncode})" - + except Exception as e: return False, str(e) - + @staticmethod def get_service_status(service_name, status_type="active"): - command = f"systemctl --user is-{status_type} {service_name}" + if status_type not in _VALID_STATUS_TYPES: + return "unknown" + service_name = validate_service_name(service_name) try: process = subprocess.run( - command, - shell=True, - capture_output=True, + ["systemctl", "--user", f"is-{status_type}", service_name], + capture_output=True, text=True ) - ansi_escape = re.compile(r'\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])') - return ansi_escape.sub('', process.stdout).strip() or process.stderr.strip() + return _ANSI_RE.sub('', process.stdout).strip() or process.stderr.strip() except: return "unknown" - + @staticmethod def get_service_config_file(service_name): - base_dir = os.path.expanduser("~/.local/share") - service_dir = os.path.join(base_dir, service_name) - + share_dir = f"/opt/ark/share/{service_name}" + config_file_name = "config.toml" - - manifest_file = os.path.join(service_dir, f"{service_name}.manifest.json") + + manifest_file = os.path.join(share_dir, f"{service_name}.manifest.json") if os.path.isfile(manifest_file): try: with open(manifest_file, 'r') as f: @@ -81,14 +99,18 @@ def get_service_config_file(service_name): config_file_name = manifest_config except Exception as e: print(f"Error reading manifest file for {service_name}: {e}") - - return os.path.join(service_dir, config_file_name) - + + # Check user config override first, then fall back to installed default + user_config = os.path.expanduser(f"~/.config/ark/{service_name}/{config_file_name}") + if os.path.isfile(user_config): + return user_config + + return os.path.join(share_dir, config_file_name) + @staticmethod def is_service_visible(service_name): - base_dir = os.path.expanduser("~/.local/share") - manifest_file = os.path.join(base_dir, service_name, f"{service_name}.manifest.json") - + manifest_file = f"/opt/ark/share/{service_name}/{service_name}.manifest.json" + if os.path.isfile(manifest_file): try: with open(manifest_file, 'r') as f: @@ -96,32 +118,31 @@ def is_service_visible(service_name): return str(manifest_data.get("visible", True)).lower() == "true" except: pass - + return True - + @staticmethod def get_service_statuses(): services = [] - - service_dir = os.path.expanduser("~/.config/systemd/user") - base_dir = os.path.expanduser("~/.local/share") - + + service_dir = "/etc/systemd/user" + if not os.path.isdir(service_dir): return {"services": []} - + service_files = [f for f in os.listdir(service_dir) if f.endswith('.service')] - + for service_file in service_files: service_name = service_file.replace('.service', '') - + enabled_status = ServiceManager.get_service_status(service_name, "enabled") active_status = ServiceManager.get_service_status(service_name, "active") - + config_file = ServiceManager.get_service_config_file(service_name) config_file_name = os.path.basename(config_file) if os.path.isfile(config_file) else "" - + visible = "true" if ServiceManager.is_service_visible(service_name) else "false" - + services.append({ "name": service_name, "enabled": enabled_status, @@ -129,141 +150,173 @@ def get_service_statuses(): "config_file": config_file_name, "visible": visible }) - + return {"services": services} - + @staticmethod def start_service(service_name): if not service_name: return {"status": "fail", "message": "No service name provided"} - + success, message = ServiceManager.run_systemctl("start", service_name) - + if success: status = ServiceManager.get_service_status(service_name) if status == "active": return {"status": "success", "service": service_name, "active": status} else: - return {"status": "fail", "service": service_name, + return {"status": "fail", "service": service_name, "message": f"Service started but status is '{status}' instead of 'active'"} else: return {"status": "fail", "service": service_name, "message": message} - + @staticmethod def stop_service(service_name): if not service_name: return {"status": "fail", "message": "No service name provided"} - + success, message = ServiceManager.run_systemctl("stop", service_name) - + if success: status = ServiceManager.get_service_status(service_name) if status == "inactive": return {"status": "success", "service": service_name, "active": status} else: - return {"status": "fail", "service": service_name, + return {"status": "fail", "service": service_name, "message": f"Service stopped but status is '{status}' instead of 'inactive'"} else: return {"status": "fail", "service": service_name, "message": message} - + @staticmethod def restart_service(service_name): if not service_name: return {"status": "fail", "message": "No service name provided"} - + success, message = ServiceManager.run_systemctl("restart", service_name) - + if success: status = ServiceManager.get_service_status(service_name) return {"status": "success", "service": service_name, "active": status} else: return {"status": "fail", "service": service_name, "message": message} - + @staticmethod def enable_service(service_name): if not service_name: return {"status": "fail", "message": "No service name provided"} - + success, message = ServiceManager.run_systemctl("enable", service_name) - + if success: return {"status": "success", "service": service_name, "enabled": "enabled"} else: return {"status": "fail", "service": service_name, "message": message} - + @staticmethod def disable_service(service_name): if not service_name: return {"status": "fail", "message": "No service name provided"} - + success, message = ServiceManager.run_systemctl("disable", service_name) - + if success: return {"status": "success", "service": service_name, "enabled": "disabled"} else: return {"status": "fail", "service": service_name, "message": message} - + @staticmethod def get_logs(service_name, num_lines=50): if not service_name: return {"status": "fail", "message": "No service name provided"} - + try: - command = f"journalctl --user -u {service_name} -n {num_lines} --no-pager -o cat" + service_name = validate_service_name(service_name) + num_lines = validate_positive_int(num_lines, max_val=10000) process = subprocess.run( - command, - shell=True, + ["journalctl", "--user", "-u", service_name, "-n", str(num_lines), "--no-pager", "-o", "cat"], capture_output=True, text=True, timeout=10 ) - - ansi_escape = re.compile(r'\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])') - logs = ansi_escape.sub('', process.stdout).strip() - + + logs = _ANSI_RE.sub('', process.stdout).strip() + return {"status": "success", "service": service_name, "logs": logs} + except ValueError as e: + return {"status": "fail", "service": service_name, "message": str(e)} except Exception as e: return {"status": "fail", "service": service_name, "message": str(e)} - + @staticmethod def get_config(service_name): if not service_name: return {"status": "fail", "data": "No service name provided"} - + config_file = ServiceManager.get_service_config_file(service_name) - + if not os.path.isfile(config_file): config_file_name = os.path.basename(config_file) service_dir = os.path.dirname(config_file) return {"status": "fail", "data": f"{config_file_name} not found in {service_dir}"} - + try: with open(config_file, 'r') as f: config_data = f.read() return {"status": "success", "data": config_data} except Exception as e: return {"status": "fail", "data": f"Error reading config file: {str(e)}"} - + @staticmethod def save_config(service_name, config_data): if not service_name: return {"status": "fail", "data": "No service name provided"} - + + try: + service_name = validate_service_name(service_name) + except ValueError as e: + return {"status": "fail", "data": str(e)} + + # Validate config content size + if len(config_data) > 65536: + return {"status": "fail", "data": f"Config too large: {len(config_data)} bytes (max 65536)"} + config_file = ServiceManager.get_service_config_file(service_name) - + config_file_name = os.path.basename(config_file) + if not os.path.isfile(config_file): - config_file_name = os.path.basename(config_file) service_dir = os.path.dirname(config_file) return {"status": "fail", "data": f"{config_file_name} not found in {service_dir}"} - + + # Always save to user-writable location (~/.config/ark//) + user_config_dir = os.path.expanduser(f"~/.config/ark/{service_name}") + user_config_file = os.path.join(user_config_dir, config_file_name) + + # Prevent path traversal + allowed_base = os.path.realpath(os.path.expanduser("~/.config/ark")) + if not os.path.realpath(user_config_file).startswith(allowed_base + os.sep): + return {"status": "fail", "data": "Invalid config path"} + try: - with open(config_file, 'w') as f: + os.makedirs(user_config_dir, exist_ok=True) + with open(user_config_file, 'w') as f: f.write(config_data) return {"status": "success", "data": "Configuration saved successfully"} except Exception as e: return {"status": "fail", "data": f"Error saving config file: {str(e)}"} # API endpoints +def _get_validated_service_name(): + """Extract and validate service name from request args. Returns (name, error_response).""" + service_name = request.args.get('serviceName') + if not service_name: + return None, (jsonify({"status": "fail", "message": "No service name provided"}), 400) + try: + validate_service_name(service_name) + except ValueError: + return None, (jsonify({"status": "fail", "message": f"Invalid service name: {service_name}"}), 400) + return service_name, None + + @app.route('/statuses', methods=['GET']) def get_service_statuses(): print("GET /statuses called") @@ -271,56 +324,72 @@ def get_service_statuses(): @app.route('/start', methods=['POST']) def start_service(): - service_name = request.args.get('serviceName') + service_name, err = _get_validated_service_name() + if err: + return err print(f"POST /start called for {service_name}") result = ServiceManager.start_service(service_name) return jsonify(result) @app.route('/stop', methods=['POST']) def stop_service(): - service_name = request.args.get('serviceName') + service_name, err = _get_validated_service_name() + if err: + return err print(f"POST /stop called for {service_name}") result = ServiceManager.stop_service(service_name) return jsonify(result) @app.route('/restart', methods=['POST']) def restart_service(): - service_name = request.args.get('serviceName') + service_name, err = _get_validated_service_name() + if err: + return err print(f"POST /restart called for {service_name}") result = ServiceManager.restart_service(service_name) return jsonify(result) @app.route('/enable', methods=['POST']) def enable_service(): - service_name = request.args.get('serviceName') + service_name, err = _get_validated_service_name() + if err: + return err print(f"POST /enable called for {service_name}") result = ServiceManager.enable_service(service_name) return jsonify(result) @app.route('/disable', methods=['POST']) def disable_service(): - service_name = request.args.get('serviceName') + service_name, err = _get_validated_service_name() + if err: + return err print(f"POST /disable called for {service_name}") result = ServiceManager.disable_service(service_name) return jsonify(result) @app.route('/logs', methods=['GET']) def get_service_logs(): - service_name = request.args.get('serviceName') + service_name, err = _get_validated_service_name() + if err: + return err print(f"GET /logs called for {service_name}") result = ServiceManager.get_logs(service_name) return jsonify(result) @app.route('/config', methods=['GET']) def get_service_config(): - service_name = request.args.get('serviceName') + service_name, err = _get_validated_service_name() + if err: + return err print(f"GET /config called for {service_name}") result = ServiceManager.get_config(service_name) return jsonify(result) @app.route('/config', methods=['POST']) def save_service_config(): - service_name = request.args.get('serviceName') + service_name, err = _get_validated_service_name() + if err: + return err config_data = request.json.get('config') print(f"POST /config called for {service_name}") diff --git a/services/system-manager/system-manager.manifest.json b/services/system-manager/system-manager.manifest.json index 0cb82a6..3849722 100644 --- a/services/system-manager/system-manager.manifest.json +++ b/services/system-manager/system-manager.manifest.json @@ -1,11 +1,9 @@ { + "version": "1.0.0", "displayName": "System Manager", "description": "Microservice backend for managing the linux system", "platform": ["all"], "configFile": "", "visible": false, - "requires_sudo": false, - "env_var": "", - "install_script": "", - "install_files": ["system_manager.py"] + "requires_sudo": false } diff --git a/services/system-manager/system-manager.service b/services/system-manager/system-manager.service deleted file mode 100644 index 1c8d732..0000000 --- a/services/system-manager/system-manager.service +++ /dev/null @@ -1,14 +0,0 @@ -[Unit] -Description=Microservice backend for monitoring and managing the linux system -After=network-online.target syslog.target -Wants=network.target network-online.target syslog.target - -[Service] -Type=simple -ExecStart=python3 %h/.local/bin/system_manager.py -Restart=on-failure -RestartSec=5 -Environment="PYTHONUNBUFFERED=1" - -[Install] -WantedBy=default.target diff --git a/services/system-manager/system_manager.py b/services/system-manager/system_manager.py index 06b21b9..48a2e0a 100644 --- a/services/system-manager/system_manager.py +++ b/services/system-manager/system_manager.py @@ -502,7 +502,7 @@ def update_hostname(): host = '127.0.0.1' port = 3004 print(f"Starting System Manager on {host}:{port}") - print(f"Device type detection in progress...") + print("Device type detection in progress...") # Quick device detection for startup message if JetsonCollector.is_jetson(): diff --git a/tools/functions.sh b/tools/functions.sh index 07cd167..572bae4 100755 --- a/tools/functions.sh +++ b/tools/functions.sh @@ -29,12 +29,13 @@ function detect_platform() { fi if [ -f /proc/device-tree/model ] && grep -q "NVIDIA" /proc/device-tree/model; then - export TARGET=jetson return 0 fi - return 1 + # Default to ubuntu for development machines + export TARGET=ubuntu + return 0 } detect_platform diff --git a/tools/install_mavsdk.sh b/tools/install_mavsdk.sh index fe80603..f5d5b94 100755 --- a/tools/install_mavsdk.sh +++ b/tools/install_mavsdk.sh @@ -1,50 +1,46 @@ #!/bin/bash +set -euo pipefail + +# Pinned MAVSDK version — bump as needed +MAVSDK_VERSION="v3.15.0" + function git_clone_retry() { - local url="$1" dir="$2" branch="$3" retries=3 delay=5 + local url="$1" dir="$2" branch="${3:-}" retries=3 delay=5 + local clone_args=(--recurse-submodules) if [ -n "$branch" ]; then - # Clone with a specific branch and avoid shallow clone - until git clone --recurse-submodules -b "$branch" "$url" "$dir"; do - ((retries--)) || return 1 - echo "git clone failed, retrying in $delay seconds..." - rm -rf "$dir" &>/dev/null - sleep $delay - done + clone_args+=(-b "$branch") else - # Shallow clone if no branch is specified - until git clone --recurse-submodules --depth=1 --shallow-submodules "$url" "$dir"; do - ((retries--)) || return 1 - echo "git clone failed, retrying in $delay seconds..." - rm -rf "$dir" &>/dev/null - sleep $delay - done + clone_args+=(--depth=1 --shallow-submodules) fi + + until git clone "${clone_args[@]}" "$url" "$dir"; do + ((retries--)) || return 1 + echo "git clone failed, retrying in $delay seconds..." + rm -rf "$dir" &>/dev/null + sleep $delay + done } -# Check if we are on 20.04 or 22.04 codename=$(lsb_release -c | awk '{print $2}') + if [ "$codename" = "focal" ]; then - echo "Ubuntu 20.04 detected, building MAVSDK from source" - pushd . + echo "Ubuntu 20.04 detected, building MAVSDK ${MAVSDK_VERSION} from source" sudo rm -rf ~/code/MAVSDK - git_clone_retry https://github.com/mavlink/MAVSDK.git ~/code/MAVSDK - cd ~/code/MAVSDK - cmake -Bbuild/default -DCMAKE_BUILD_TYPE=Release -H. + git_clone_retry https://github.com/mavlink/MAVSDK.git ~/code/MAVSDK "$MAVSDK_VERSION" + pushd ~/code/MAVSDK + cmake -B build/default -DCMAKE_BUILD_TYPE=Release -H. cmake --build build/default -j$(nproc) sudo cmake --build build/default --target install sudo ldconfig popd -elif [ "$codename" = "jammy" ] || [ "$codename" = "bookworm" ]; then - echo "Debian 12 detected, downloading the latest release of mavsdk" - release_info=$(curl -s https://api.github.com/repos/mavlink/MAVSDK/releases/latest) - # Assumes arm64 - download_url=$(echo "$release_info" | grep "browser_download_url.*debian12_arm64.deb" | awk -F '"' '{print $4}') - file_name=$(echo "$release_info" | grep "name.*debian12_arm64.deb" | awk -F '"' '{print $4}') - if [ -z "$download_url" ]; then - echo "Download URL not found for arm64.deb package" - exit 1 - fi +elif [ "$codename" = "jammy" ] || [ "$codename" = "bookworm" ]; then + echo "Installing MAVSDK ${MAVSDK_VERSION} .deb package" + # Strip leading 'v' for the download URL + version_num="${MAVSDK_VERSION#v}" + file_name="libmavsdk-dev_${version_num}_debian12_arm64.deb" + download_url="https://github.com/mavlink/MAVSDK/releases/download/${MAVSDK_VERSION}/${file_name}" max_attempts=5 attempt_num=1 @@ -58,24 +54,24 @@ elif [ "$codename" = "jammy" ] || [ "$codename" = "bookworm" ]; then ((attempt_num++)) done -if [ "$success" = true ]; then - echo "Downloading completed successfully." - echo "Installing $file_name" + if [ "$success" = true ]; then + echo "Installing $file_name" + for attempt in {1..5}; do + sudo dpkg -i "$file_name" && break || sleep 5 + done - for attempt in {1..5}; do - sudo dpkg -i "$file_name" && break || sleep 5 - done + if [ $attempt -eq 5 ]; then + echo "Failed to install $file_name after 5 attempts." + exit 1 + fi - if [ $attempt -eq 5 ]; then - echo "Failed to install $file_name after 5 attempts." + rm -f "$file_name" + sudo ldconfig + else + echo "Failed to download after $max_attempts attempts." exit 1 fi - - sudo rm "$file_name" - sudo ldconfig -else - echo "Failed to download the file after $max_attempts attempts." - fi else - echo "Unsupported Ubuntu version, not installing MAVSDK" + echo "Unsupported distro: $codename" + exit 1 fi diff --git a/tools/install_software.sh b/tools/install_software.sh index f141c04..d960e59 100755 --- a/tools/install_software.sh +++ b/tools/install_software.sh @@ -7,11 +7,8 @@ DEFAULT_XDG_DATA_HOME="$HOME/.local/share" export XDG_CONFIG_HOME="${XDG_CONFIG_HOME:-$DEFAULT_XDG_CONF_HOME}" export XDG_DATA_HOME="${XDG_DATA_HOME:-$DEFAULT_XDG_DATA_HOME}" -if ! detect_platform; then - echo "ERROR: This script should be run on the target device (Jetson or Raspberry Pi)." - echo "Running this script on a host computer may cause unintended system modifications." - exit 1 -fi +detect_platform +echo "Detected platform: $TARGET" # Check if system is holding package management lock check_apt_locks() { @@ -49,9 +46,6 @@ if ! check_apt_locks; then echo "Continuing anyway -- apt commands will wait for locks to be released." fi -# Load helper functions -source $(dirname $BASH_SOURCE)/functions.sh - function sudo_refresh_loop() { while true; do sudo -v @@ -59,26 +53,6 @@ function sudo_refresh_loop() { done } -function ask_yes_no() { - local prompt="$1" - local var_name="$2" - local default="$3" - local default_display="${!var_name^^}" # Convert to uppercase for display purposes - - while true; do - echo "$prompt (y/n) [default: $default_display]" - read -r REPLY - if [ -z "$REPLY" ]; then - REPLY="${!var_name}" - fi - case "$REPLY" in - y|Y) eval "export $var_name='y'"; break ;; - n|N) eval "export $var_name='n'"; break ;; - *) echo "Invalid input. Please enter y or n." ;; - esac - done -} - function check_and_add_alias() { local name="$1" local command="$2" @@ -112,118 +86,14 @@ sudo -v sudo_refresh_loop & SUDO_PID=$! -# Source the main configuration file -if [ -f "default.env" ]; then - source "default.env" -else - echo "Configuration file default.env not found!" - exit 1 -fi - export TARGET_DIR="$PWD/platform/$TARGET" export COMMON_DIR="$PWD/platform/common" -if [ -f "user.env" ]; then - echo "Found user.env, skipping interactive prompt" - source "user.env" -else - ask_yes_no "Install micro-xrce-dds-agent?" INSTALL_DDS_AGENT - ask_yes_no "Install logloader?" INSTALL_LOGLOADER - - if [ "$INSTALL_LOGLOADER" = "y" ]; then - ask_yes_no "Upload automatically to PX4 Flight Review?" UPLOAD_TO_FLIGHT_REVIEW - if [ "$UPLOAD_TO_FLIGHT_REVIEW" = "y" ]; then - echo "Please enter your email: " - read -r USER_EMAIL - ask_yes_no "Do you want your logs to be public?" PUBLIC_LOGS - fi - fi - - ask_yes_no "Install rtsp-server?" INSTALL_RTSP_SERVER - - if [ "$TARGET" = "jetson" ]; then - ask_yes_no "Install rid-transmitter?" INSTALL_RID_TRANSMITTER - if [ "$INSTALL_RID_TRANSMITTER" = "y" ]; then - while true; do - echo "Enter Manufacturer Code (4 characters, digits and uppercase letters only, no O or I) [default: $MANUFACTURER_CODE]: " - read -r input - if [ -z "$input" ]; then - # Use the preset value if the input is empty - input=$MANUFACTURER_CODE - fi - if [[ $input =~ ^[A-HJ-NP-Z0-9]{4}$ ]]; then - MANUFACTURER_CODE=$input - break - else - echo "Invalid Manufacturer Code. Please try again." - fi - done - - while true; do - echo "Enter Serial Number (1-15 characters, digits and uppercase letters only, no O or I) [default: $SERIAL_NUMBER]: " - read -r input - if [ -z "$input" ]; then - # Use the preset value if the input is empty - input=$SERIAL_NUMBER - fi - if [[ $input =~ ^[A-HJ-NP-Z0-9]{1,15}$ ]]; then - SERIAL_NUMBER=$input - break - else - echo "Invalid Serial Number. Please try again." - fi - done - fi - fi - - ask_yes_no "Install polaris-client-mavlink?" INSTALL_POLARIS - - if [ "$INSTALL_POLARIS" = "y" ]; then - echo "Enter API key: [default: none]" - read -r POLARIS_API_KEY - fi - - if [ "$TARGET" = "jetson" ]; then - ask_yes_no "Install JetPack?" INSTALL_JETPACK - fi -fi - -echo "" -echo "=== Installation Summary ===" -echo "The following components will be installed:" -echo "" - -[ "$INSTALL_DDS_AGENT" = "y" ] && echo " ✓ micro-xrce-dds-agent" -[ "$INSTALL_LOGLOADER" = "y" ] && echo " ✓ logloader" -[ "$INSTALL_LOGLOADER" = "y" ] && [ "$UPLOAD_TO_FLIGHT_REVIEW" = "y" ] && echo " - Auto-upload to PX4 Flight Review: Yes (Email: $USER_EMAIL, Public: $PUBLIC_LOGS)" -[ "$INSTALL_RTSP_SERVER" = "y" ] && echo " ✓ rtsp-server" - -if [ "$TARGET" = "jetson" ]; then - [ "$INSTALL_RID_TRANSMITTER" = "y" ] && echo " ✓ rid-transmitter (Manufacturer: $MANUFACTURER_CODE, Serial: $SERIAL_NUMBER)" - [ "$INSTALL_JETPACK" = "y" ] && echo " ✓ JetPack" -fi - -[ "$INSTALL_POLARIS" = "y" ] && echo " ✓ polaris-client-mavlink" -[ "$INSTALL_POLARIS" = "y" ] && [ -n "$POLARIS_API_KEY" ] && echo " - API Key configured" - -echo "" -echo "Plus standard components:" -echo " ✓ MAVSDK" -echo " ✓ MAVSDK Examples" -echo " ✓ System dependencies and configuration" -echo "" -echo "============================" -echo "" - ########## validate submodules ########## git submodule update --init --recursive git submodule foreach git reset --hard git submodule foreach git clean -fd -# TODO: some of these dependencies should be part of -# the specific service install script. Next pass should be to install -# each service independently to determine what deps are missing. - ########## jetson-specific holds (before apt update) ########## if [ "$TARGET" = "jetson" ]; then # Upgrading these packages can break things like Wi-Fi. Don't allow these to be updated. @@ -259,12 +129,6 @@ apt_get_install install -y \ ########## jetson dependencies ########## if [ "$TARGET" = "jetson" ]; then - if [ "$INSTALL_JETPACK" = "y" ]; then - echo "Installing JetPack" - apt_get_install install -y nvidia-jetpack - echo "JetPack installation finished" - fi - # Required for FW updating ARK LTE apt_get_install install libqmi-utils -y @@ -282,6 +146,10 @@ elif [ "$TARGET" = "pi" ]; then sudo nmcli radio wifi on # https://www.raspberrypi.com/documentation/computers/os.html#python-on-raspberry-pi PI_PYTHON_INSTALL_ARG="--break-system-packages" + +########## ubuntu (dev machine) — skip hardware-specific packages ########## +elif [ "$TARGET" = "ubuntu" ]; then + echo "Ubuntu dev machine detected — skipping hardware-specific packages" fi # Common python dependencies @@ -302,7 +170,6 @@ sudo usermod -a -G dialout $USER sudo groupadd -f -r gpio sudo usermod -a -G gpio $USER sudo usermod -a -G i2c $USER -mkdir -p $XDG_CONFIG_HOME/systemd/user/ if [ "$TARGET" = "jetson" ]; then sudo systemctl stop nvgetty @@ -329,10 +196,12 @@ sudo systemctl restart systemd-journald journalctl --disk-usage ########## scripts ########## -echo "Installing scripts" -mkdir -p ~/.local/bin -cp $TARGET_DIR/scripts/* ~/.local/bin -cp $COMMON_DIR/scripts/* ~/.local/bin +if [ "$TARGET" != "ubuntu" ]; then + echo "Installing platform scripts" + mkdir -p ~/.local/bin + cp $TARGET_DIR/scripts/* ~/.local/bin + cp $COMMON_DIR/scripts/* ~/.local/bin +fi ########## sudoers permissions ########## echo "Adding sudoers" @@ -360,11 +229,11 @@ done ########## create hotspot connection ########## ~/.local/bin/create_hotspot_default.sh -########## Always install MAVSDK ########## -./tools/install_mavsdk.sh - -########## mavsdk-examples ########## -./tools/install_mavsdk_examples.sh +########## MAVSDK (skip on ubuntu dev machines) ########## +if [ "$TARGET" != "ubuntu" ]; then + ./tools/install_mavsdk.sh + ./tools/install_mavsdk_examples.sh +fi ########## install services ########## echo "Installing services..." diff --git a/tools/service_control.sh b/tools/service_control.sh index a7b3f75..8628aad 100755 --- a/tools/service_control.sh +++ b/tools/service_control.sh @@ -1,14 +1,41 @@ #!/bin/bash -# Source functions and configuration +# Service control script — builds and installs services as .deb packages via nfpm. +# +# Usage: +# ./service_control.sh install [service] Build+package+install a service (or all) +# ./service_control.sh uninstall Remove a service's deb package +# ./service_control.sh list List available and installed services +# ./service_control.sh status Show systemd status of services + +set -euo pipefail + SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" source "$SCRIPT_DIR/functions.sh" -# Set up paths SERVICES_DIR="$(realpath "$SCRIPT_DIR/../services")" +PACKAGING_DIR="$(realpath "$SCRIPT_DIR/../packaging")" +BUILD_DIR="$PROJECT_ROOT/build" +OUTPUT_DIR="$PROJECT_ROOT/dist" +VERSION=$(cat "$PROJECT_ROOT/VERSION" | tr -d '[:space:]') +ARCH=$(dpkg --print-architecture) + +# ─── Helpers ────────────────────────────────────────────────────────────────── + +check_nfpm() { + if ! command -v nfpm &>/dev/null; then + echo "ERROR: nfpm is not installed." + echo "" + echo "Install it with:" + echo " go install github.com/goreleaser/nfpm/v2/cmd/nfpm@latest" + echo " or: curl -sfL https://install.goreleaser.com/github.com/goreleaser/nfpm.sh | sh" + echo "" + echo "See: https://nfpm.goreleaser.com/install/" + exit 1 + fi +} -# Function to read values from a JSON manifest using jq -function read_json_value() { +read_json_value() { local json_file="$1" local property="$2" local default_value="$3" @@ -18,7 +45,8 @@ function read_json_value() { return fi - local value=$(jq -r ".$property // \"$default_value\"" "$json_file" 2>/dev/null) + local value + value=$(jq -r ".$property // \"$default_value\"" "$json_file" 2>/dev/null) if [ "$value" = "null" ]; then echo "$default_value" else @@ -26,23 +54,19 @@ function read_json_value() { fi } -# Function to check if current platform is in the platform list -function is_platform_supported() { +is_platform_supported() { local manifest_file="$1" - # Always expect an array from JSON - local platforms=$(jq -r '.platform | if type == "array" then . else [.] end | @json' "$manifest_file" 2>/dev/null) + local platforms + platforms=$(jq -r '.platform | if type == "array" then . else [.] end | @json' "$manifest_file" 2>/dev/null) - # If jq fails or returns null, default to ["all"] if [ -z "$platforms" ] || [ "$platforms" = "null" ]; then platforms='["all"]' fi - # Check if "all" is in the array if [[ "$platforms" == *"\"all\""* ]]; then return 0 fi - # Check if current platform is in the array if [[ "$platforms" == *"\"$TARGET\""* ]]; then return 0 fi @@ -50,276 +74,242 @@ function is_platform_supported() { return 1 } -# Check if a service is enabled based on its environment variable -function is_service_enabled() { - local manifest_file="$1" - - # Check if service has an environment variable for enabling - local env_var=$(read_json_value "$manifest_file" "env_var" "") +# Look up a service's type from packages.yaml +get_service_type() { + local name="$1" + python3 -c " +import yaml, sys +with open('$PACKAGING_DIR/packages.yaml') as f: + cfg = yaml.safe_load(f) +svc = cfg.get('services', {}).get('$name') +if svc: + print(svc.get('type', 'custom')) +else: + print('unknown') +" +} - # If no environment variable specified, service is always enabled - if [ -z "$env_var" ]; then - return 0 - fi +# ─── Build ──────────────────────────────────────────────────────────────────── - # Check the value of the environment variable - if [ "${!env_var}" = "y" ]; then - return 0 - fi +build_service() { + local name="$1" - return 1 + "$PACKAGING_DIR/build-packages.sh" build-service "$name" } -# Function to install files specified in manifest -function install_service_files() { - local SERVICE_NAME="$1" - local SERVICE_DIR="$2" - local MANIFEST_FILE="$3" - local REQUIRES_SUDO="$4" - - # Get the install files list from the manifest - local install_files=$(jq -r '.install_files | if . == null then [] else . end | .[]' "$MANIFEST_FILE" 2>/dev/null) - - if [ -n "$install_files" ]; then - echo "Installing files for $SERVICE_NAME..." - - for file in $install_files; do - if [ -f "$SERVICE_DIR/$file" ]; then - if [ "$REQUIRES_SUDO" = "true" ]; then - # Install to /usr/local/bin with sudo - sudo cp "$SERVICE_DIR/$file" /usr/local/bin/ - sudo chmod +x "/usr/local/bin/$file" - echo " - Installed $file to /usr/local/bin/" - else - # Install to ~/.local/bin - mkdir -p ~/.local/bin - cp "$SERVICE_DIR/$file" ~/.local/bin/ - chmod +x "$HOME/.local/bin/$file" - echo " - Installed $file to ~/.local/bin/" - fi - else - echo " - Warning: File $file not found in $SERVICE_DIR/" - fi - done - fi -} +# ─── Package ────────────────────────────────────────────────────────────────── -function uninstall_service() { - sudo systemctl stop "$1.service" &>/dev/null - sudo systemctl disable "$1.service" &>/dev/null - systemctl --user stop "$1.service" &>/dev/null - systemctl --user disable "$1.service" &>/dev/null - sudo rm "/etc/systemd/system/$1.service" &>/dev/null - sudo rm "/lib/systemd/system/$1.service" &>/dev/null - rm "$XDG_CONFIG_HOME/systemd/user/$1.service" &>/dev/null - rm -rf "$XDG_DATA_HOME/$1" - sudo systemctl daemon-reload - systemctl --user daemon-reload +package_service() { + local name="$1" + + "$PACKAGING_DIR/build-packages.sh" package-service "$name" } -function install_service() { - local SERVICE_NAME="$1" - local SERVICE_DIR="$SERVICES_DIR/$SERVICE_NAME" - local MANIFEST_FILE="$SERVICE_DIR/$SERVICE_NAME.manifest.json" +# ─── Install ────────────────────────────────────────────────────────────────── + +install_service() { + local name="$1" + local manifest_file="$SERVICES_DIR/$name/$name.manifest.json" - echo "Installing $SERVICE_NAME..." + echo "=== Installing $name ===" - # Check if manifest exists - if [ ! -f "$MANIFEST_FILE" ]; then - echo "Error: Manifest file for $SERVICE_NAME not found." + if [ ! -f "$manifest_file" ]; then + echo "Error: Manifest file for $name not found." return 1 fi - # Check if this service is supported on the current platform - if ! is_platform_supported "$MANIFEST_FILE"; then - echo "Service $SERVICE_NAME is not supported on platform $TARGET, skipping." + if ! is_platform_supported "$manifest_file"; then + echo "Service $name is not supported on platform $TARGET, skipping." return 0 fi - # Check if service is enabled in configuration - if ! is_service_enabled "$MANIFEST_FILE"; then - echo "Service $SERVICE_NAME is disabled in configuration, skipping." - uninstall_service "$SERVICE_NAME" - return 0 + local svc_type + svc_type=$(get_service_type "$name") + + # Build if needed + if [ "$svc_type" = "cpp" ]; then + build_service "$name" fi - # Uninstall first to ensure clean installation - uninstall_service "$SERVICE_NAME" + # Generate configs + package + package_service "$name" - # Get the install script from the manifest - local install_script=$(read_json_value "$MANIFEST_FILE" "install_script" "") + # Install the deb + local deb_file + deb_file=$(ls -t "$OUTPUT_DIR"/ark-"${name}"_*.deb 2>/dev/null | head -1) - # Run service-specific install script if specified in manifest - if [ -n "$install_script" ] && [ -f "$SERVICE_DIR/$install_script" ]; then - echo "Running installation script: $install_script" - # Execute the script with proper path - (cd "$SERVICE_DIR" && bash "./$install_script") + if [ -z "$deb_file" ]; then + echo "Error: No .deb package found for $name" + return 1 fi - # Check if service requires sudo - local requires_sudo=$(read_json_value "$MANIFEST_FILE" "requires_sudo" "false") + echo "Installing $deb_file..." + sudo dpkg -i "$deb_file" - # Install service files specified in manifest - install_service_files "$SERVICE_NAME" "$SERVICE_DIR" "$MANIFEST_FILE" "$requires_sudo" + # Dev workflow: always enable+start the service after install, even if + # the deb postinst doesn't (default_enabled: false services). + local requires_sudo + requires_sudo=$(read_json_value "$manifest_file" "requires_sudo" "false") + if [ "$requires_sudo" = "true" ]; then + sudo systemctl enable "$name.service" + sudo systemctl restart "$name.service" + else + systemctl --user enable "$name.service" + systemctl --user restart "$name.service" + fi - # Install service file - if [ -f "$SERVICE_DIR/$SERVICE_NAME.service" ]; then - if [ "$requires_sudo" = "true" ]; then - # Install as root service - sudo cp "$SERVICE_DIR/$SERVICE_NAME.service" /etc/systemd/system/ - sudo systemctl daemon-reload - sudo systemctl enable "$SERVICE_NAME.service" - sudo systemctl restart "$SERVICE_NAME.service" - else - # Install as user service - mkdir -p $XDG_CONFIG_HOME/systemd/user/ - cp "$SERVICE_DIR/$SERVICE_NAME.service" "$XDG_CONFIG_HOME/systemd/user/" + echo "$name installed successfully." +} + +install_all_services() { + for service_dir in "$SERVICES_DIR"/*/; do + if [ -d "$service_dir" ]; then + local service_name + service_name=$(basename "$service_dir") + local manifest_file="$service_dir/$service_name.manifest.json" - # Add manifest to user data directory - mkdir -p "$XDG_DATA_HOME/$SERVICE_NAME" - cp "$MANIFEST_FILE" "$XDG_DATA_HOME/$SERVICE_NAME/" + # Skip services without a manifest + [ -f "$manifest_file" ] || continue - systemctl --user daemon-reload - systemctl --user enable "$SERVICE_NAME.service" - systemctl --user restart "$SERVICE_NAME.service" + # Skip services not in packages.yaml (e.g. removed/legacy) + local svc_type + svc_type=$(get_service_type "$service_name") + [ "$svc_type" = "unknown" ] && continue + + install_service "$service_name" fi + done +} - echo "$SERVICE_NAME installed successfully." +# ─── Uninstall ──────────────────────────────────────────────────────────────── + +uninstall_service() { + local name="$1" + local pkg_name="ark-${name}" + + if dpkg -s "$pkg_name" &>/dev/null; then + echo "Removing $pkg_name..." + sudo dpkg -r "$pkg_name" + echo "$pkg_name removed." else - echo "Error: Service file for $SERVICE_NAME not found." - return 1 + echo "$pkg_name is not installed." fi - - return 0 } -# Function to install all services -function install_all_services() { - # Discover all service directories - for service_dir in "$SERVICES_DIR"/*; do +# ─── List ───────────────────────────────────────────────────────────────────── + +list_services() { + echo "Available services (platform: $TARGET):" + echo "" + + for service_dir in "$SERVICES_DIR"/*/; do if [ -d "$service_dir" ]; then + local service_name service_name=$(basename "$service_dir") - install_service "$service_name" + local manifest_file="$service_dir/$service_name.manifest.json" + + [ -f "$manifest_file" ] || continue + + if ! is_platform_supported "$manifest_file"; then + continue + fi + + local display_name + display_name=$(read_json_value "$manifest_file" "displayName" "$service_name") + local description + description=$(read_json_value "$manifest_file" "description" "") + local pkg_name="ark-${service_name}" + + local installed="not installed" + if dpkg -s "$pkg_name" &>/dev/null; then + local pkg_ver + pkg_ver=$(dpkg -s "$pkg_name" 2>/dev/null | grep '^Version:' | awk '{print $2}') + installed="installed ($pkg_ver)" + fi + + echo " $display_name ($service_name)" + [ -n "$description" ] && echo " $description" + echo " Package: $pkg_name — $installed" + echo "" fi done } -# Process command line arguments -if [ $# -gt 0 ]; then - case "$1" in - install) - if [ -n "$2" ]; then - install_service "$2" +# ─── Status ─────────────────────────────────────────────────────────────────── + +show_status() { + echo "Service status:" + for service_dir in "$SERVICES_DIR"/*/; do + if [ -d "$service_dir" ]; then + local service_name + service_name=$(basename "$service_dir") + local manifest_file="$service_dir/$service_name.manifest.json" + + [ -f "$manifest_file" ] || continue + + if ! is_platform_supported "$manifest_file"; then + continue + fi + + local requires_sudo + requires_sudo=$(read_json_value "$manifest_file" "requires_sudo" "false") + + echo -n " $service_name: " + local status + if [ "$requires_sudo" = "true" ]; then + status=$(systemctl is-active "$service_name.service" 2>/dev/null) else - install_all_services + status=$(systemctl --user is-active "$service_name.service" 2>/dev/null) fi - ;; - uninstall) - if [ -n "$2" ]; then - uninstall_service "$2" - echo "$2 uninstalled." + + if [ "$status" = "active" ]; then + echo "running" else - echo "Error: Please specify a service to uninstall." - exit 1 + echo "not running" fi - ;; - list) - echo "Available services:" - for service_dir in "$SERVICES_DIR"/*; do - if [ -d "$service_dir" ]; then - service_name=$(basename "$service_dir") - manifest_file="$service_dir/$service_name.manifest.json" - - if [ -f "$manifest_file" ]; then - display_name=$(read_json_value "$manifest_file" "displayName" "$service_name") - description=$(read_json_value "$manifest_file" "description" "") - platforms=$(read_json_value "$manifest_file" "platform" "all") - requires_sudo=$(read_json_value "$manifest_file" "requires_sudo" "false") - env_var=$(read_json_value "$manifest_file" "env_var" "") - install_script=$(read_json_value "$manifest_file" "install_script" "") - install_files=$(jq -r '.install_files | if . == null then "none" else (. | join(", ")) end' "$manifest_file" 2>/dev/null) - - # Skip services that aren't supported on this platform - if ! is_platform_supported "$manifest_file"; then - continue - fi - - # Determine if the service is enabled - if is_service_enabled "$manifest_file"; then - enabled="enabled" - else - enabled="disabled" - fi - - echo " - $display_name ($service_name)" - if [ -n "$description" ]; then - echo " Description: $description" - fi - echo " Platforms: $platforms" - echo " Requires sudo: $requires_sudo" - if [ -n "$env_var" ]; then - echo " Environment variable: $env_var=${!env_var}" - fi - if [ -n "$install_script" ]; then - echo " Install script: $install_script" - fi - echo " Install files: $install_files" - echo " Status: $enabled" - echo "" - else - echo " - $service_name (no manifest)" - fi - fi - done - ;; - status) - echo "Service status:" - for service_dir in "$SERVICES_DIR"/*; do - if [ -d "$service_dir" ]; then - service_name=$(basename "$service_dir") - manifest_file="$service_dir/$service_name.manifest.json" - - # Skip services that aren't supported on this platform - if [ -f "$manifest_file" ] && ! is_platform_supported "$manifest_file"; then - continue - fi - - requires_sudo="false" - if [ -f "$manifest_file" ]; then - requires_sudo=$(read_json_value "$manifest_file" "requires_sudo" "false") - fi - - echo -n " - $service_name: " - if [ "$requires_sudo" = "true" ]; then - status=$(systemctl is-active "$service_name.service" 2>/dev/null) - else - status=$(systemctl --user is-active "$service_name.service" 2>/dev/null) - fi - - if [ "$status" = "active" ]; then - echo "running" - else - echo "not running" - fi - fi - done - ;; - help|--help|-h) - echo "Usage: $0 [command] [service]" - echo "Commands:" - echo " install [service] - Install all services or a specific service" - echo " uninstall - Uninstall a specific service" - echo " list - List available services" - echo " status - Show the status of all services" - echo " help - Show this help message" - ;; - *) - echo "Unknown command: $1" - echo "Use '$0 help' for usage information." + fi + done +} + +# ─── Main ───────────────────────────────────────────────────────────────────── + +case "${1:-help}" in + install) + check_nfpm + mkdir -p "$BUILD_DIR" "$OUTPUT_DIR" + if [ -n "${2:-}" ]; then + install_service "$2" + else + install_all_services + fi + ;; + uninstall) + if [ -n "${2:-}" ]; then + uninstall_service "$2" + else + echo "Error: Please specify a service to uninstall." exit 1 - ;; - esac -else - # Default: install all services - install_all_services -fi + fi + ;; + list) + list_services + ;; + status) + show_status + ;; + help|--help|-h) + echo "Usage: $0 [service]" + echo "" + echo "Commands:" + echo " install [service] Build, package, and install a service (or all)" + echo " uninstall Remove a service's deb package" + echo " list List available services and install status" + echo " status Show systemd status of services" + echo " help Show this help message" + ;; + *) + echo "Unknown command: $1" + echo "Use '$0 help' for usage information." + exit 1 + ;; +esac