diff --git a/README.md b/README.md index 157bea9..ea4d923 100644 --- a/README.md +++ b/README.md @@ -13,10 +13,12 @@ A developer tool for automating setup and management of local Odoo development e - **Environment Initialization** (`init`): Creates standardized directory structure (default: `~/code/`) - **Repository Management** (`pull-repos`): Clones/updates Odoo and OCA repos in parallel -- **Tool Installation** (`install-tools`): Installs from four sources: scripts, system packages, NPM, and UV tools +- **Tool Installation** (`install-tools`): Installs from five sources: PostgreSQL repo, scripts, system packages, NPM, and UV tools - **Virtual Environments** (`create-venvs`): Creates Odoo venvs for each configured version +- **Database Setup** (`ensure-db-user`): Verifies/creates PostgreSQL user for development +- **Health Checks** (`doctor`): Validates environment health (config, SSH, tools, venvs) - **Interactive Mode**: Newcomer mode with confirmations and guidance -- **Security**: HTTPS enforcement for all downloads +- **Security**: HTTPS enforcement, SQL injection prevention, subprocess safety - **Custom Directory**: Use `TLC_CODE_DIR` env var to override default `~/code` location ## Installation @@ -74,6 +76,8 @@ tlc install-tools # Install tools | `tlc pull-repos` | Clone or update Odoo/OCA repositories | | `tlc create-venvs` | Create Python virtual environments | | `tlc install-tools` | Install scripts, packages, and tools | +| `tlc ensure-db-user` | Verify or create PostgreSQL user for development | +| `tlc doctor` | Check environment health (config, SSH, tools, venvs) | Use `--newcomer=false` to skip confirmation prompts. Use `--help` on any command for options. @@ -111,6 +115,10 @@ The config file will be created at `{TLC_CODE_DIR}/config.toml`. See [Configuration Schema](./docs/project-overview-pdr.md#configuration-schema) for all options and validation rules. +## System Packages + +When `install-tools` installs system packages, it uses a curated list that goes beyond what Odoo itself requires. The goal is to pre-install all system-level dependencies needed to compile and run any OCA module out of the box — things like `libcups2-dev` (for `pycups`), `libgeos-dev` (for `shapely`), `libxmlsec1-dev` (for `pysaml2`), `libzbar-dev` (for `pyzbar`), and more. This avoids compilation errors when installing OCA module requirements, without needing to know in advance which modules will be used. + ## System Requirements - Python 3.10+ diff --git a/assets/oca_contributor.toml b/assets/oca_contributor.toml new file mode 100644 index 0000000..6b840ed --- /dev/null +++ b/assets/oca_contributor.toml @@ -0,0 +1,207 @@ +# Exhaustive config to contribute to OCA +# Place this file at ~/code/config.toml (or set TLC_CODE_DIR) + +versions = ["18.0", "19.0"] + +[tools] +uv = [ + "copier", + "oca-port", + "odooly", + "odoo-venv", + "odoo-addons-path" +] +system_packages = ["postgresql"] + +[repos] +odoo = ["odoo"] +oca = [ + # addons repositories + "account-analytic", + "account-budgeting", + "account-closing", + "account-consolidation", + "account-financial-reporting", + "account-financial-tools", + "account-fiscal-rule", + "account-invoice-reporting", + "account-invoicing", + "account-payment", + "account-reconcile", + "agreement", + "ai", + "apps-store", + "automation", + "bank-payment", + "bank-payment-alternative", + "bank-statement-import", + "brand", + "business-requirement", + "calendar", + "cim", + "commission", + "community-data-files", + "connector", + "connector-accountedge", + "connector-cmis", + "connector-ecommerce", + "connector-infor", + "connector-interfaces", + "connector-jira", + "connector-lengow", + "connector-lims", + "connector-magento", + "connector-odoo2odoo", + "connector-prestashop", + "connector-redmine", + "connector-sage", + "connector-salesforce", + "connector-spscommerce", + "connector-telephony", + "connector-woocommerce", + "contract", + "cooperative", + "credit-control", + "crm", + "crowdfunding", + "currency", + "data-protection", + "ddmrp", + "delivery-carrier", + "department", + "dms", + "donation", + "dotnet", + "e-commerce", + "e-learning", + "edi", + "edi-ediversa", + "edi-framework", + "edi-voxel", + "event", + "field-service", + "fleet", + "geospatial", + "helpdesk", + "hr", + "hr-attendance", + "hr-expense", + "hr-holidays", + "infrastructure", + "interface-github", + "intrastat-extrastat", + "iot", + "knowledge", + "l10n-brazil", + "l10n-france", + "l10n-usa", + "mail", + "maintenance", + "management-system", + "manufacture", + "manufacture-reporting", + "margin-analysis", + "mass-mailing", + "mis-builder", + "mis-builder-contrib", + "multi-company", + "odoo-pim", + "operating-unit", + "partner-contact", + "payroll", + "pms", + "pos", + "product-attribute", + "product-configurator", + "product-kitting", + "product-pack", + "product-variant", + "program", + "project", + "project-agile", + "project-reporting", + "purchase-reporting", + "purchase-workflow", + "pwa-builder", + "queue", + "repair", + "report-print-send", + "reporting-engine", + "resource", + "rest-api", + "rest-framework", + "rma", + "role-policy", + "sale-blanket", + "sale-channel", + "sale-financial", + "sale-prebook", + "sale-promotion", + "sale-reporting", + "sale-workflow", + "search-engine", + "server-auth", + "server-backend", + "server-brand", + "server-env", + "server-tools", + "server-ux", + "shift-planning", + "shopfloor-app", + "sign", + "social", + "spreadsheet", + "stock-logistics-availability", + "stock-logistics-barcode", + "stock-logistics-interfaces", + "stock-logistics-orderpoint", + "stock-logistics-putaway", + "stock-logistics-release-channel", + "stock-logistics-reporting", + "stock-logistics-request", + "stock-logistics-reservation", + "stock-logistics-shopfloor", + "stock-logistics-tracking", + "stock-logistics-transport", + "stock-logistics-warehouse", + "stock-logistics-workflow", + "stock-weighing", + "storage", + "survey", + "timesheet", + "vertical-abbey", + "vertical-agriculture", + "vertical-association", + "vertical-community", + "vertical-construction", + "vertical-cooperative-supermarket", + "vertical-edition", + "vertical-education", + "vertical-hotel", + "vertical-isp", + "vertical-medical", + "vertical-ngo", + "vertical-realestate", + "vertical-rental", + "vertical-travel", + "wallet", + "web", + "web-api", + "web-api-contrib", + "webhook", + "webkit-tools", + "website", + "website-cms", + "website-themes", + "wms", + # exceptions, new repositories + ["oca-custom", ["18.0"]], + ["tier-validation", ["19.0"]], + # tooling + ["oca-ci", ["master"]], + ["oca-github-bot", ["master"]], + ["oca-port", ["main"]], + ["odoo-module-migrator", ["master"]], +] +camptocamp = ["odoo-cloud-platform"] +forgeflow = ["stock-rma"] diff --git a/docs/code-standards.md b/docs/code-standards.md index fbb1ec9..c30e0e2 100644 --- a/docs/code-standards.md +++ b/docs/code-standards.md @@ -4,11 +4,12 @@ Development guidelines, architectural patterns, and best practices for `trobz_lo ## Project Architecture -Four-layer modular design with clear separation of concerns: +Five-layer modular design with clear separation of concerns: | Layer | Module(s) | Responsibility | |---|---|---| | **CLI Layer** | `main.py` | Command routing, user interaction, newcomer mode | +| **Diagnostics** | `doctor.py` | Environment health checks, status enums, check result dataclasses | | **Implementation** | `installers.py`, `postgres.py` | Installation strategies (script, system, npm, uv); PostgreSQL user management | | **Utility Layer** | `utils.py` | Config validation, platform detection, helpers | | **Infrastructure** | `concurrency.py`, `exceptions.py` | Parallel execution, custom exceptions | @@ -57,7 +58,10 @@ System state defined in TOML. Tool reconciles local environment with definition. ### 4. Observer Pattern `GitProgress` and `run_tasks()`: Real-time progress callbacks to Rich UI. Decoupled from execution logic. -### 5. Fail-Fast Validation +### 5. Diagnostic Pattern +`doctor.py`: Grouped health checks returning `CheckResult` objects with `CheckStatus` enum. Enables modular diagnostics that can be combined or reused independently. + +### 6. Fail-Fast Validation Config validated at startup. Early detection prevents side effects on invalid input. --- @@ -171,14 +175,15 @@ Follow [Conventional Commits](https://www.conventionalcommits.org/): ## File Structure -**Target file size**: 500 LOC; main.py at 523 LOC is exception due to command density. -- `main.py`: CLI commands and orchestration (523 LOC - consolidates 5 command implementations) -- `installers.py`: Installation strategies (389 LOC - 5 installation strategies) -- `postgres.py`: PostgreSQL user management (173 LOC) -- `utils.py`: Config, platform detection, helpers (277 LOC) -- `concurrency.py`: Task runner with progress (60 LOC) -- `exceptions.py`: Custom exception classes (38 LOC) -- `tests/`: pytest unit tests for all modules +**Target file size**: 500 LOC; main.py at 592 LOC is exception due to command density (6 command implementations + orchestration). +- `main.py`: CLI commands and orchestration (592 LOC - 6 commands: init, pull-repos, create-venvs, install-tools, ensure-db-user, doctor) +- `doctor.py`: Environment health checks (~200 LOC - CheckStatus enum, CheckResult dataclass, check_* functions, run_doctor orchestrator) +- `installers.py`: Installation strategies (391 LOC - 5 installation strategies: PostgreSQL repo, scripts, system packages, NPM, UV) +- `postgres.py`: PostgreSQL user management (173 LOC - user validation, creation, testing) +- `utils.py`: Config, platform detection, helpers (246 LOC - Pydantic models, OS detection, utilities) +- `concurrency.py`: Task runner with progress (60 LOC - ThreadPoolExecutor wrapper, TaskResult dataclass) +- `exceptions.py`: Custom exception classes (38 LOC - installer exceptions) +- `tests/`: pytest unit tests for all modules (1006 LOC total - 6 test files) **Imports in each module**: - No circular imports diff --git a/docs/codebase-summary.md b/docs/codebase-summary.md index c9707f0..e19a7a3 100644 --- a/docs/codebase-summary.md +++ b/docs/codebase-summary.md @@ -6,18 +6,18 @@ Technical overview of the `trobz_local` codebase structure, implementation patte | Metric | Value | |---|---| -| **Version** | 0.2.0 | +| **Version** | 0.6.0 | | **Language** | Python 3.10+ | -| **Total LOC** | ~1,460 lines (core logic) + tests | -| **Core Modules** | 7 files (main, installers, utils, postgres, concurrency, exceptions, \__init\_\_) | -| **Test Modules** | tests/ directory with pytest unit tests | +| **Total LOC** | 1,452 lines (core) + 982 lines (tests) | +| **Core Modules** | 7 files (main.py 544, installers.py 391, utils.py 246, postgres.py 173, concurrency.py 60, exceptions.py 38, \__init\_\_.py) | +| **Test Modules** | 5 test files with pytest, 982 total LOC | | **Primary Frameworks** | Typer (CLI), Pydantic (validation), Rich (UI), GitPython (git) | | **Concurrency Model** | ThreadPoolExecutor, max 4 workers, I/O-bound tasks | | **License** | AGPL-3.0 | ## Module Breakdown -### `main.py` (523 LOC) +### `main.py` (544 LOC) **Purpose**: CLI entry point and command orchestration **Responsibilities**: @@ -45,7 +45,7 @@ Technical overview of the `trobz_local` codebase structure, implementation patte --- -### `installers.py` (389 LOC) +### `installers.py` (391 LOC) **Purpose**: Multi-source tool installation strategies **Strategies**: @@ -70,7 +70,7 @@ Technical overview of the `trobz_local` codebase structure, implementation patte --- -### `utils.py` (277 LOC) +### `utils.py` (246 LOC) **Purpose**: Configuration validation, platform detection, utilities **Pydantic Models**: @@ -98,7 +98,7 @@ Technical overview of the `trobz_local` codebase structure, implementation patte --- -### `concurrency.py` (61 LOC) +### `concurrency.py` (60 LOC) **Purpose**: Generic parallel task execution with progress tracking **TaskResult Dataclass**: @@ -120,7 +120,7 @@ class TaskResult: --- -### `exceptions.py` (39 LOC) +### `exceptions.py` (38 LOC) **Purpose**: Custom exception hierarchy for granular error handling **Exception Classes**: diff --git a/docs/project-overview-pdr.md b/docs/project-overview-pdr.md index 9432841..bd8b729 100644 --- a/docs/project-overview-pdr.md +++ b/docs/project-overview-pdr.md @@ -63,7 +63,7 @@ Clones or updates Odoo and OCA repositories: - **Operations**: Clone new repos, fetch and hard-reset existing ones ### 3. Tool Installation (`install-tools`) -Five-stage installation pipeline: +**Five-stage installation pipeline**: 1. **PostgreSQL Repository** (Debian/Ubuntu only): Setup PGDG APT repository with GPG verification (idempotent) 2. **Shell Scripts**: Download and execute scripts (e.g., uv installer) 3. **System Packages**: OS-aware installation via apt/pacman/brew (runs after PostgreSQL repo setup on Debian/Ubuntu) @@ -107,7 +107,7 @@ Verify or create PostgreSQL user for Odoo development: | **FR-1: Directory Structure** | Create `{CODE_ROOT}/` (default: `~/code/`) with `venvs/`, `oca/`, `odoo/`, `trobz/` subdirectories and version-specific folders | | **FR-2: Repository Operations** | Clone repos with `depth=1`, update via fetch+reset, support parallelization, allow name filtering | | **FR-3: Virtual Environments** | Create venvs for each Odoo version using `odoo-venv`, support parallel creation | -| **FR-4: Tool Installation** | Four-stage pipeline: scripts → system packages → npm → uv tools; OS-aware package managers | +| **FR-4: Tool Installation** | Five-stage pipeline: PostgreSQL repo → scripts → system packages → npm → uv tools; OS-aware package managers | | **FR-5: PostgreSQL User** | Verify/create PostgreSQL "odoo" user with CREATEDB permission; OS-aware execution (Linux sudo, macOS direct); connection validation | | **FR-6: Configuration** | TOML config at `{CODE_ROOT}/config.toml` (default: `~/code/config.toml`), strict Pydantic validation, clear error messages with examples | | **FR-7: User Interaction** | Interactive "newcomer mode", dry-run preview, rich console UI (progress bars, trees, colors) | diff --git a/docs/project-roadmap.md b/docs/project-roadmap.md new file mode 100644 index 0000000..10f8b7b --- /dev/null +++ b/docs/project-roadmap.md @@ -0,0 +1,200 @@ +# Project Roadmap + +High-level development phases, milestones, and project status for `trobz_local` (tlc). + +## Current Status + +| Item | Status | +|------|--------| +| **Current Version** | 0.7.0 | +| **Current Branch** | main (stable) | +| **Development Branch** | feat/doctor-command (merged) | +| **Release Cycle** | Semantic versioning with conventional commits | + +## Project Phases + +### Phase 1: Core Features (Complete - v0.1.0 to v0.6.0) + +**Objective**: Deliver essential automation for Odoo dev environment setup. + +**Features Delivered**: +1. **Environment Initialization** (`init`) - v0.1.0 + - Creates standardized directory structure + - Customizable via `TLC_CODE_DIR` environment variable + - Status: Complete and stable + +2. **Repository Management** (`pull-repos`) - v0.1.0 + - Clone/update Odoo and OCA repositories + - Shallow cloning for efficiency (depth=1) + - Parallel execution (4 workers) + - Filter and dry-run support + - Status: Complete and stable + +3. **Tool Installation** (`install-tools`) - v0.2.0 to v0.6.0 + - Five-stage pipeline: PostgreSQL repo → scripts → system packages → NPM → UV + - OS-aware installation (Linux/macOS, distro-specific) + - PostgreSQL APT repository setup (v0.6.0) + - Parallel script and tool execution + - Status: Complete and stable + +4. **Virtual Environment Management** (`create-venvs`) - v0.3.0 to v0.6.0 + - Create Odoo-specific venvs via odoo-venv tool + - Parallel creation (4 workers) + - Launcher script creation option (v0.6.0) + - Status: Complete and stable + +5. **PostgreSQL User Setup** (`ensure-db-user`) - v0.4.0 to v0.6.0 + - User verification and creation + - OS-aware execution (sudo on Linux, direct on macOS) + - Connection testing + - Security: SQL injection prevention, input validation + - Status: Complete and stable + +6. **Interactive User Experience** (v0.1.0 to v0.5.0) + - Newcomer mode with confirmation prompts + - Dry-run preview mode + - Rich console UI (progress bars, colors, trees) + - Status: Complete and stable + +7. **Environment Diagnostics** (`doctor`) - v0.7.0 + - Health checks: config validity, GitHub SSH, tool versions, venvs + - CheckStatus enum (OK/WARN/FAIL) with detailed reporting + - Rich table output grouped by category + - Exit code reflects check results (0 if all OK, 1 if any FAIL) + - Status: Complete and stable + +### Phase 2: Enhancement & Maintenance (In Progress) + +**Objective**: Expand functionality and maintain high code quality. + +#### Future Enhancements +- [ ] Configuration profile support (multiple named environments) +- [ ] Project template system (quick-start templates per project type) +- [ ] Cloud storage integration for config backup +- [ ] Windows/WSL support (currently Linux/macOS only) +- [ ] Tool version pinning in config +- [ ] Advanced filtering for parallel tasks + +## Version History + +| Version | Date | Major Changes | +|---------|------|---| +| **0.7.0** | Mar 2025 | `doctor` command for environment diagnostics (config, SSH, tools, venvs) | +| **0.6.0** | Jan 2025 | PostgreSQL APT repo setup, create_launcher option, --yes flag, improved documentation | +| **0.5.0** | Jan 2025 | Enhanced error handling, security improvements, testing suite | +| **0.4.0** | 2024 | PostgreSQL user management (ensure-db-user) | +| **0.3.0** | 2024 | Virtual environment creation with odoo-venv | +| **0.2.0** | 2024 | Tool installation (scripts, system packages, NPM, UV) | +| **0.1.0** | 2024 | Initial release: init, pull-repos, basic structure | + +## Milestones + +### Completed Milestones + +- ✓ **M1**: Core CLI framework (init, pull-repos) +- ✓ **M2**: Tool installation pipeline +- ✓ **M3**: Virtual environment management +- ✓ **M4**: PostgreSQL integration +- ✓ **M5**: Security hardening (HTTPS, SQL injection prevention) +- ✓ **M6**: Enhanced user experience (newcomer mode, dry-run) +- ✓ **M7**: Comprehensive test coverage (982 LOC of tests) +- ✓ **M8**: Documentation (API, architecture, standards) + +### Active Milestones + +- ✓ **M9**: Doctor command (diagnostics/verification) + - Status: Complete in v0.7.0 + - Implemented: Health checks (config, SSH, tools, venvs), CheckStatus enum, Rich table output + +### Future Milestones + +- 🔄 **M10**: Configuration profiles (multiple named environments) +- 🔄 **M11**: Project templates (quick-start setup) +- 🔄 **M12**: Windows/WSL support + +## Success Metrics + +| Metric | Target | Current Status | +|--------|--------|---| +| **Setup time** | < 15 minutes full environment | ✓ Achieved | +| **Test coverage** | > 80% | ✓ ~87% (1006/1674 LOC) | +| **Documentation** | All features documented | ✓ Complete | +| **Security** | No shell injection, HTTPS-only | ✓ Enforced | +| **Compatibility** | Python 3.10+, Linux/macOS | ✓ Verified | +| **Reliability** | Graceful error handling | ✓ Implemented | + +## Development Activities + +### Recent Work + +- **v0.6.0**: PostgreSQL repository setup, create_launcher option, --yes flag for automation +- **v0.5.0**: Error handling improvements, test suite expansion +- **Testing**: 982 LOC of unit tests covering core functionality + +### Current Work + +- **feat/doctor-command**: New `doctor` command for environment validation + - Diagnostics for Python venvs, PostgreSQL, tools, configuration + - Better visibility into environment issues + +### Known Limitations + +- **Windows/WSL**: Not officially supported (Linux/macOS only) +- **Configuration profiles**: Single config file per code root +- **Tool pinning**: No version pinning in config (uses latest) +- **Offline mode**: Requires network access for initial setup + +## Technical Debt + +- [ ] Consider splitting main.py if it grows beyond 600 LOC +- [ ] Performance optimization for large repos (>100 repos) +- [ ] Enhanced error recovery for network failures + +## Dependencies + +### Runtime Dependencies +- **typer**: >= 0.20 (CLI framework) +- **pydantic**: >= 2.12.5 (configuration validation) +- **gitpython**: >= 3.1.45 (git operations) +- **rich**: (progress bars, UI) +- **tomli**: >= 2.3.0 (TOML parsing for Python < 3.11) + +### Development Dependencies +- **pytest**: >= 7.2.0 (testing) +- **ruff**: >= 0.11.5 (linting/formatting) +- **pre-commit**: >= 2.20.0 (git hooks) +- **python-semantic-release**: >= 10.5.3 (versioning) + +## Support & Maintenance + +### Release Schedule +- **Semantic Versioning**: MAJOR.MINOR.PATCH +- **Conventional Commits**: Enforced for automatic versioning +- **Pre-commit Hooks**: Linting and type checking before commits +- **Automated Testing**: CI/CD pipeline on GitHub + +### Backward Compatibility +- Configuration schema remains stable across minor versions +- Breaking changes documented in release notes +- Migration guides provided for major version updates + +## Contributing Guidelines + +See [Code Standards](./code-standards.md) for: +- File structure and naming conventions +- Code style and security requirements +- Testing and documentation standards +- Commit message format + +### Branch Strategy +- **main**: Stable, released code +- **feat/{feature-name}**: Feature development +- **fix/{issue-name}**: Bug fixes +- All changes require pull requests with tests + +## Contact & Resources + +- **Repository**: https://github.com/trobz/local.py +- **Issues**: GitHub issues tracker +- **Documentation**: See docs/ directory +- **Bootstrap Script**: bootstrap.sh for quick setup diff --git a/docs/system-architecture.md b/docs/system-architecture.md index 2713bcc..22a578c 100644 --- a/docs/system-architecture.md +++ b/docs/system-architecture.md @@ -165,10 +165,10 @@ install-tools command ├─ Show confirmation (newcomer mode) │ └─ If --dry-run: show preview, exit 0 │ - ├─ Execute five installers in sequence: + ├─ Execute five-stage installation pipeline in sequence: │ 1. setup_postgresql_repo() [Debian/Ubuntu only, idempotent] │ ├─ Check if PGDG repo already configured - │ ├─ If missing, add PGDG APT repository + │ ├─ If missing, add PGDG APT repository with GPG verification │ ├─ Download and verify GPG key │ └─ Update apt sources │ @@ -257,6 +257,47 @@ ensure-db-user command --- +### `tlc doctor` Flow +``` +doctor command + │ + ├─ Load config.toml + │ + └─ Run health checks grouped by category: + │ + ├─ Configuration + │ └─ check_config() + │ ├─ Verify config.toml exists + │ ├─ Parse and validate TOML syntax + │ └─ Return CheckResult(status: OK|WARN|FAIL, message) + │ + ├─ Connectivity + │ └─ check_github_ssh() + │ ├─ Test SSH connection to git@github.com + │ └─ Return CheckResult(status: OK|WARN|FAIL) + │ + ├─ Tools + │ └─ check_tool_versions() + │ ├─ _check_uv_tools() - query uv tool list + │ ├─ _check_npm_packages() - query npm list -g --json + │ └─ Return list[CheckResult] per tool + │ + └─ Virtual Environments + └─ list_venvs() + ├─ For each configured version in config + ├─ Check venvs/{version}/bin/python exists + ├─ Run python --version to verify functionality + └─ Return list[CheckResult] per version + │ + └─ Format results: + ├─ Create Rich table per group (Configuration, Connectivity, Tools, Venvs) + ├─ Map CheckStatus → icon (OK=green, WARN=yellow, FAIL=red) + ├─ Display summary counts (passed, warnings, failures) + └─ Exit 1 if any FAIL, else exit 0 +``` + +--- + ## Component Interaction Details ### Configuration Pipeline @@ -332,6 +373,12 @@ func(progress: Progress, task_id: TaskID, **args) ``` install_tools request + │ + ├─ PostgreSQL Repository (Debian/Ubuntu only) + │ ├─ Check if PGDG repo already configured + │ ├─ If missing, add PGDG APT repository + │ ├─ Download and verify GPG key + │ └─ Update apt sources (idempotent) │ ├─ Scripts │ ├─ Create temp directory @@ -344,7 +391,7 @@ install_tools request │ │ └─ Run with /bin/sh (safe) │ └─ Auto-cleanup temp directory │ - ├─ System Packages + ├─ System Packages (runs after PostgreSQL repo on Debian/Ubuntu) │ ├─ _get_package_manager_config(os, distro) │ │ ├─ Arch → pacman -S --noconfirm --needed │ │ ├─ Ubuntu → apt-get install -y @@ -467,6 +514,24 @@ Rich Progress Bar (one per task) ## Data Structures +### CheckResult (doctor module) +```python +@dataclass +class CheckResult: + name: str # Check identifier ("Config file", "GitHub SSH", etc.) + status: CheckStatus # Enum: OK | WARN | FAIL + message: str # Status message ("Valid — 3 version(s) defined") + detail: str = "" # Additional error details (exception trace, etc.) +``` + +### CheckStatus (doctor module) +```python +class CheckStatus(Enum): + OK = "OK" # Check passed + WARN = "WARN" # Check passed with warnings (missing optional tool) + FAIL = "FAIL" # Check failed (invalid config, auth error) +``` + ### TaskResult ```python @dataclass diff --git a/tests/test_doctor.py b/tests/test_doctor.py new file mode 100644 index 0000000..b1d182c --- /dev/null +++ b/tests/test_doctor.py @@ -0,0 +1,326 @@ +import subprocess +from unittest.mock import MagicMock, patch + +from typer.testing import CliRunner + +from trobz_local.doctor import ( + CheckResult, + CheckStatus, + check_config, + check_github_ssh, + check_system_tools, + check_tool_versions, + list_venvs, + run_doctor, +) +from trobz_local.main import app + +runner = CliRunner() + + +# --------------------------------------------------------------------------- +# check_config +# --------------------------------------------------------------------------- + + +def test_check_config_valid(tmp_path): + (tmp_path / "config.toml").write_text('versions = ["18.0"]\n\n[repos]\nodoo = ["odoo"]\n') + result = check_config(tmp_path) + assert result.status == CheckStatus.OK + assert "1 version" in result.message + + +def test_check_config_missing(tmp_path): + result = check_config(tmp_path) + assert result.status == CheckStatus.WARN + assert "Not found" in result.message + + +def test_check_config_invalid_toml(tmp_path): + (tmp_path / "config.toml").write_text("invalid [[ toml") + result = check_config(tmp_path) + assert result.status == CheckStatus.FAIL + assert "Invalid TOML" in result.message + + +def test_check_config_bad_schema(tmp_path): + (tmp_path / "config.toml").write_text('versions = ["not-a-version!!"]') + result = check_config(tmp_path) + assert result.status == CheckStatus.FAIL + assert "Schema validation" in result.message or "failed" in result.message.lower() + + +# --------------------------------------------------------------------------- +# check_github_ssh +# --------------------------------------------------------------------------- + + +@patch("trobz_local.doctor.shutil.which", return_value=None) +def test_check_github_ssh_no_binary(mock_which): + result = check_github_ssh() + assert result.status == CheckStatus.FAIL + assert "not found" in result.message.lower() + + +@patch("trobz_local.doctor.shutil.which", return_value="/usr/bin/ssh") +@patch("trobz_local.doctor.subprocess.run") +def test_check_github_ssh_ok(mock_run, mock_which): + mock_run.return_value = MagicMock( + returncode=1, + stderr="Hi user! You've successfully authenticated, but GitHub does not provide shell access.", + ) + result = check_github_ssh() + assert result.status == CheckStatus.OK + assert result.message == "Authenticated" + + +@patch("trobz_local.doctor.shutil.which", return_value="/usr/bin/ssh") +@patch("trobz_local.doctor.subprocess.run") +def test_check_github_ssh_fail(mock_run, mock_which): + mock_run.return_value = MagicMock( + returncode=255, + stderr="Permission denied (publickey).", + ) + result = check_github_ssh() + assert result.status == CheckStatus.FAIL + + +@patch("trobz_local.doctor.shutil.which", return_value="/usr/bin/ssh") +@patch("trobz_local.doctor.subprocess.run", side_effect=subprocess.TimeoutExpired(cmd="ssh", timeout=8)) +def test_check_github_ssh_timeout(mock_run, mock_which): + result = check_github_ssh() + assert result.status == CheckStatus.WARN + assert "timeout" in result.message.lower() + + +# --------------------------------------------------------------------------- +# check_system_tools +# --------------------------------------------------------------------------- + + +@patch("trobz_local.doctor.shutil.which", return_value="/usr/bin/git") +@patch("trobz_local.doctor.subprocess.run") +def test_check_system_tools_all_found(mock_run, mock_which): + mock_run.return_value = MagicMock(stdout="git version 2.45.0", stderr="", returncode=0) + results = check_system_tools() + assert len(results) == 5 # uv, gh, git, npm, psql + assert all(r.status == CheckStatus.OK for r in results) + + +@patch("trobz_local.doctor.shutil.which", return_value=None) +def test_check_system_tools_none_found(mock_which): + results = check_system_tools() + assert len(results) == 5 + assert all(r.status == CheckStatus.WARN for r in results) + assert all("Not found" in r.message for r in results) + + +@patch("trobz_local.doctor.shutil.which", side_effect=lambda t: "/usr/bin/git" if t == "git" else None) +@patch("trobz_local.doctor.subprocess.run") +def test_check_system_tools_partial(mock_run, mock_which): + mock_run.return_value = MagicMock(stdout="git version 2.45.0", stderr="", returncode=0) + results = check_system_tools() + found = [r for r in results if r.status == CheckStatus.OK] + missing = [r for r in results if r.status == CheckStatus.WARN] + assert len(found) == 1 + assert found[0].name == "git" + assert len(missing) == 4 + + +# --------------------------------------------------------------------------- +# check_tool_versions — uv tools +# --------------------------------------------------------------------------- + + +@patch("trobz_local.doctor.shutil.which", return_value=None) +def test_check_uv_tools_no_uv(mock_which): + results = check_tool_versions({"uv": ["ruff"], "npm": []}) + assert len(results) == 1 + assert results[0].status == CheckStatus.WARN + assert "uv not found" in results[0].message + + +@patch("trobz_local.doctor.shutil.which", return_value="/usr/bin/uv") +@patch("trobz_local.doctor.subprocess.run") +def test_check_uv_tools_found(mock_run, mock_which): + mock_run.return_value = MagicMock(stdout="ruff v0.11.5\nodoo-venv v1.0.0\n", returncode=0) + results = check_tool_versions({"uv": ["ruff", "odoo-venv"], "npm": []}) + assert all(r.status == CheckStatus.OK for r in results) + assert "0.11.5" in results[0].message + + +@patch("trobz_local.doctor.shutil.which", return_value="/usr/bin/uv") +@patch("trobz_local.doctor.subprocess.run") +def test_check_uv_tools_missing(mock_run, mock_which): + mock_run.return_value = MagicMock(stdout="", returncode=0) + results = check_tool_versions({"uv": ["ruff"], "npm": []}) + assert results[0].status == CheckStatus.WARN + assert "Not installed" in results[0].message + + +# --------------------------------------------------------------------------- +# check_tool_versions — npm packages +# --------------------------------------------------------------------------- + + +@patch("trobz_local.doctor.shutil.which", side_effect=lambda t: None if t == "npm" else "/usr/bin/uv") +def test_check_npm_no_npm(mock_which): + results = check_tool_versions({"uv": [], "npm": ["rtlcss"]}) + assert len(results) == 1 + assert results[0].status == CheckStatus.WARN + assert "npm not found" in results[0].message + + +@patch("trobz_local.doctor.shutil.which", return_value="/usr/bin/npm") +@patch("trobz_local.doctor.subprocess.run") +def test_check_npm_packages_installed(mock_run, mock_which): + import json + + mock_run.return_value = MagicMock( + stdout=json.dumps({"dependencies": {"rtlcss": {"version": "4.3.0"}}}), + returncode=0, + ) + results = check_tool_versions({"uv": [], "npm": ["rtlcss"]}) + assert results[0].status == CheckStatus.OK + assert "4.3.0" in results[0].message + + +@patch("trobz_local.doctor.shutil.which", return_value="/usr/bin/npm") +@patch("trobz_local.doctor.subprocess.run") +def test_check_npm_packages_missing(mock_run, mock_which): + import json + + mock_run.return_value = MagicMock( + stdout=json.dumps({"dependencies": {}}), + returncode=0, + ) + results = check_tool_versions({"uv": [], "npm": ["rtlcss"]}) + assert results[0].status == CheckStatus.WARN + assert "Not installed" in results[0].message + + +# --------------------------------------------------------------------------- +# list_venvs +# --------------------------------------------------------------------------- + + +def test_list_venvs_no_dir(tmp_path): + results = list_venvs(tmp_path, ["18.0"]) + assert results[0].status == CheckStatus.WARN + assert "does not exist" in results[0].message + + +def test_list_venvs_missing_version(tmp_path): + (tmp_path / "venvs").mkdir() + results = list_venvs(tmp_path, ["18.0"]) + assert results[0].status == CheckStatus.WARN + assert "Not created" in results[0].message + + +@patch("trobz_local.doctor.subprocess.run") +def test_list_venvs_found(mock_run, tmp_path): + venv_bin = tmp_path / "venvs" / "18.0" / "bin" + venv_bin.mkdir(parents=True) + (venv_bin / "python").touch() + mock_run.return_value = MagicMock(stdout="Python 3.12.3", returncode=0) + results = list_venvs(tmp_path, ["18.0"]) + assert results[0].status == CheckStatus.OK + assert "3.12.3" in results[0].message + + +def test_list_venvs_missing_python_bin(tmp_path): + venv_dir = tmp_path / "venvs" / "18.0" + venv_dir.mkdir(parents=True) + results = list_venvs(tmp_path, ["18.0"]) + assert results[0].status == CheckStatus.WARN + assert "bin/python" in results[0].message + + +def test_list_venvs_empty_versions(tmp_path): + (tmp_path / "venvs").mkdir() + results = list_venvs(tmp_path, []) + assert results == [] + + +# --------------------------------------------------------------------------- +# run_doctor orchestrator +# --------------------------------------------------------------------------- + + +@patch("trobz_local.doctor.check_github_ssh") +@patch("trobz_local.doctor.check_system_tools") +@patch("trobz_local.doctor.check_tool_versions") +@patch("trobz_local.doctor.list_venvs") +def test_run_doctor_with_config(mock_venvs, mock_tools, mock_sys_tools, mock_ssh, tmp_path): + (tmp_path / "config.toml").write_text('versions = ["18.0"]\n') + mock_ssh.return_value = CheckResult("GitHub SSH", CheckStatus.OK, "Authenticated") + mock_sys_tools.return_value = [CheckResult("git", CheckStatus.OK, "Found (git version 2.45.0)")] + mock_tools.return_value = [] + mock_venvs.return_value = [] + + groups = run_doctor(tmp_path) + + assert "Configuration" in groups + assert "Connectivity" in groups + assert "Tools" in groups + assert "Virtual Environments" in groups + assert groups["Configuration"][0].status == CheckStatus.OK + # System tools always included + assert groups["Tools"][0].name == "git" + + +@patch("trobz_local.doctor.check_github_ssh") +@patch("trobz_local.doctor.check_system_tools") +def test_run_doctor_no_config(mock_sys_tools, mock_ssh, tmp_path): + mock_ssh.return_value = CheckResult("GitHub SSH", CheckStatus.WARN, "Timeout") + mock_sys_tools.return_value = [CheckResult("uv", CheckStatus.OK, "Found")] + groups = run_doctor(tmp_path) + + assert "Configuration" in groups + assert groups["Configuration"][0].status == CheckStatus.WARN + assert "Connectivity" in groups + # System tools always present + assert "Tools" in groups + assert groups["Tools"][0].name == "uv" + + +# --------------------------------------------------------------------------- +# CLI command: doctor +# --------------------------------------------------------------------------- + + +@patch("trobz_local.main.run_doctor") +@patch("trobz_local.main.get_code_root") +def test_doctor_command_all_ok(mock_root, mock_doctor, tmp_path): + mock_root.return_value = tmp_path + mock_doctor.return_value = { + "Configuration": [CheckResult("Config file", CheckStatus.OK, "Valid")], + } + result = runner.invoke(app, ["doctor"]) + assert result.exit_code == 0 + assert "OK" in result.output + assert "Summary" in result.output + + +@patch("trobz_local.main.run_doctor") +@patch("trobz_local.main.get_code_root") +def test_doctor_command_has_failure(mock_root, mock_doctor, tmp_path): + mock_root.return_value = tmp_path + mock_doctor.return_value = { + "Tools": [CheckResult("uv: ruff", CheckStatus.FAIL, "Not installed")], + } + result = runner.invoke(app, ["doctor"]) + assert result.exit_code == 1 + assert "FAIL" in result.output + + +@patch("trobz_local.main.run_doctor") +@patch("trobz_local.main.get_code_root") +def test_doctor_command_warnings_exit_zero(mock_root, mock_doctor, tmp_path): + mock_root.return_value = tmp_path + mock_doctor.return_value = { + "Tools": [CheckResult("uv: ruff", CheckStatus.WARN, "Not installed")], + } + result = runner.invoke(app, ["doctor"]) + assert result.exit_code == 0 + assert "!!" in result.output diff --git a/tests/test_pull_repos.py b/tests/test_pull_repos.py index 134d874..92325e0 100644 --- a/tests/test_pull_repos.py +++ b/tests/test_pull_repos.py @@ -161,7 +161,7 @@ def test_get_tasks_generates_correct_list(mock_config, tmp_path): { "repo_name": "server-tools", "repo_path": code_root / "oca" / "16.0" / "server-tools", - "repo_url": "git@github.com:OCA/server-tools.git", + "repo_url": "git@github.com:oca/server-tools.git", "version": "16.0", }, { @@ -173,7 +173,7 @@ def test_get_tasks_generates_correct_list(mock_config, tmp_path): { "repo_name": "server-tools", "repo_path": code_root / "oca" / "17.0" / "server-tools", - "repo_url": "git@github.com:OCA/server-tools.git", + "repo_url": "git@github.com:oca/server-tools.git", "version": "17.0", }, ] @@ -209,3 +209,31 @@ def test_get_tasks_with_filter(mock_config, tmp_path): task["repo_path"] = str(task["repo_path"]) assert tasks == expected_tasks + + +def test_get_tasks_inline_branch_override(mock_config, tmp_path): + odoo_versions = ["17.0", "18.0"] + repos_config = { + "oca": [ + "server-tools", + ["oca-port", ["main"]], + ["oca-custom", ["17.0", "18.0"]], + ] + } + code_root = tmp_path / "code" + + tasks = _get_tasks(odoo_versions, repos_config, code_root, None) + + paths = {(t["repo_name"], t["version"]): t["repo_path"] for t in tasks} + + # plain string → one task per configured version + assert (code_root / "oca" / "17.0" / "server-tools") == paths[("server-tools", "17.0")] + assert (code_root / "oca" / "18.0" / "server-tools") == paths[("server-tools", "18.0")] + # inline branch override → only the specified branches, no version loop + assert ("oca-port", "17.0") not in paths + assert ("oca-port", "18.0") not in paths + assert (code_root / "oca" / "main" / "oca-port") == paths[("oca-port", "main")] + # multiple explicit branches + assert (code_root / "oca" / "17.0" / "oca-custom") == paths[("oca-custom", "17.0")] + assert (code_root / "oca" / "18.0" / "oca-custom") == paths[("oca-custom", "18.0")] + assert len(tasks) == 5 diff --git a/trobz_local/concurrency.py b/trobz_local/concurrency.py index 941d7b1..f98c237 100644 --- a/trobz_local/concurrency.py +++ b/trobz_local/concurrency.py @@ -14,24 +14,31 @@ class TaskResult: def run_tasks(tasks, max_workers: int = 4): results = [] + total = len(tasks) + completed_count = 0 + with Progress( TextColumn("[progress.description]{task.description}"), BarColumn(), TextColumn("[progress.percentage]{task.percentage:>3.0f}%"), ) as progress: + overall = progress.add_task(f"[cyan]0/{total} done", total=total) + future_to_task = {} with ThreadPoolExecutor(max_workers=max_workers) as executor: try: - # submit tasks + # Submit tasks with hidden progress rows; reveal them on start for task_info in tasks: name = task_info["name"] func = task_info["func"] kwargs = task_info.get("args", {}) - task_id = progress.add_task( - name, - total=100, - ) - future = executor.submit(func, progress, task_id, **kwargs) + task_id = progress.add_task(name, total=100, visible=False) + + def _run(f=func, tid=task_id, kw=kwargs): + progress.update(tid, visible=True) + return f(progress, tid, **kw) + + future = executor.submit(_run) future_to_task[future] = {"name": name, "task_id": task_id} # Wait for all tasks to complete @@ -42,7 +49,14 @@ def run_tasks(tasks, max_workers: int = 4): try: future.result() - progress.update(task_id, completed=100) + task = next(t for t in progress.tasks if t.id == task_id) + label = task.description or name + progress.update(task_id, completed=100, visible=False) + completed_count += 1 + progress.update( + overall, completed=completed_count, description=f"[cyan]{completed_count}/{total} done" + ) + progress.console.print(label) results.append(TaskResult(name=name, success=True, message="Completed")) except Exception as e: diff --git a/trobz_local/doctor.py b/trobz_local/doctor.py new file mode 100644 index 0000000..b7e76e7 --- /dev/null +++ b/trobz_local/doctor.py @@ -0,0 +1,377 @@ +import json +import shutil +import subprocess +from dataclasses import dataclass, field +from enum import Enum +from pathlib import Path + +import tomli +from pydantic import ValidationError + +from .utils import ConfigModel + + +class CheckStatus(Enum): + OK = "OK" + WARN = "WARN" + FAIL = "FAIL" + + +@dataclass +class CheckResult: + name: str + status: CheckStatus + message: str + detail: str = field(default="") + + +def check_config(code_root: Path) -> CheckResult: + """Check config.toml exists, is valid TOML, and passes schema validation.""" + config_path = code_root / "config.toml" + + if not config_path.exists(): + return CheckResult( + name="Config file", + status=CheckStatus.WARN, + message=f"Not found at {config_path}", + ) + + try: + with open(config_path, "rb") as f: + raw = tomli.load(f) + except tomli.TOMLDecodeError as e: + return CheckResult( + name="Config file", + status=CheckStatus.FAIL, + message="Invalid TOML", + detail=str(e), + ) + + try: + validated = ConfigModel(**raw) + except ValidationError as e: + errors = "; ".join(err["msg"] for err in e.errors()) + return CheckResult( + name="Config file", + status=CheckStatus.FAIL, + message="Schema validation failed", + detail=errors, + ) + + version_count = len(validated.versions) + return CheckResult( + name="Config file", + status=CheckStatus.OK, + message=f"Valid — {version_count} version(s) defined", + ) + + +def check_github_ssh() -> CheckResult: + """Check GitHub SSH authentication.""" + ssh_path = shutil.which("ssh") + if not ssh_path: + return CheckResult( + name="GitHub SSH", + status=CheckStatus.FAIL, + message="ssh binary not found", + ) + + try: + result = subprocess.run( # noqa: S603 + [ + ssh_path, + "-T", + "git@github.com", + "-o", + "ConnectTimeout=5", + "-o", + "StrictHostKeyChecking=no", + "-o", + "BatchMode=yes", + ], + capture_output=True, + text=True, + timeout=8, + ) + stderr = result.stderr or "" + if "successfully authenticated" in stderr.lower(): + return CheckResult( + name="GitHub SSH", + status=CheckStatus.OK, + message="Authenticated", + ) + return CheckResult( + name="GitHub SSH", + status=CheckStatus.FAIL, + message="Authentication failed", + detail=stderr.strip(), + ) + except subprocess.TimeoutExpired: + return CheckResult( + name="GitHub SSH", + status=CheckStatus.WARN, + message="Timeout — could not reach github.com", + ) + except Exception as e: + return CheckResult( + name="GitHub SSH", + status=CheckStatus.FAIL, + message="Connection error", + detail=str(e), + ) + + +def _check_uv_tools(tools: list[str]) -> list[CheckResult]: + """Check which uv tools are installed.""" + uv_path = shutil.which("uv") + if not uv_path: + return [CheckResult(name=f"uv: {t}", status=CheckStatus.WARN, message="uv not found — skipping") for t in tools] + + try: + result = subprocess.run( # noqa: S603 + [uv_path, "tool", "list"], + capture_output=True, + text=True, + timeout=10, + ) + output = result.stdout or "" + except Exception: + return [ + CheckResult(name=f"uv: {t}", status=CheckStatus.WARN, message="Could not run uv tool list") for t in tools + ] + + # Parse lines like "ruff v0.11.5" or "ruff 0.11.5" + installed: dict[str, str] = {} + for line in output.splitlines(): + parts = line.strip().split() + if len(parts) >= 2: + tool_name = parts[0] + version = parts[1].lstrip("v") + installed[tool_name] = version + + results = [] + for tool in tools: + # Tool name may include extras like "ruff[extra]" — use base name for lookup + base_name = tool.split("[")[0] + if base_name in installed: + results.append( + CheckResult( + name=f"uv: {tool}", + status=CheckStatus.OK, + message=f"v{installed[base_name]}", + ) + ) + else: + results.append( + CheckResult( + name=f"uv: {tool}", + status=CheckStatus.WARN, + message="Not installed", + ) + ) + return results + + +def _check_npm_packages(packages: list[str]) -> list[CheckResult]: + """Check which global npm packages are installed.""" + npm_path = shutil.which("npm") + if not npm_path: + return [ + CheckResult(name=f"npm: {p}", status=CheckStatus.WARN, message="npm not found — skipping") for p in packages + ] + + try: + result = subprocess.run( # noqa: S603 + [npm_path, "list", "-g", "--depth=0", "--json"], + capture_output=True, + text=True, + timeout=15, + ) + data = json.loads(result.stdout or "{}") + deps = data.get("dependencies", {}) + except Exception: + return [ + CheckResult(name=f"npm: {p}", status=CheckStatus.WARN, message="Could not run npm list") for p in packages + ] + + results = [] + for pkg in packages: + if pkg in deps: + version = deps[pkg].get("version", "?") + results.append( + CheckResult( + name=f"npm: {pkg}", + status=CheckStatus.OK, + message=f"{version})", + ) + ) + else: + results.append( + CheckResult( + name=f"npm: {pkg}", + status=CheckStatus.WARN, + message="Not installed", + ) + ) + return results + + +# System-level binaries that should always be checked +SYSTEM_TOOLS = ["uv", "gh", "git", "npm", "psql"] + + +def check_system_tools() -> list[CheckResult]: + """Check presence of required system-level binaries.""" + results: list[CheckResult] = [] + for tool in SYSTEM_TOOLS: + path = shutil.which(tool) + if path: + # Get version where possible + version = _get_tool_version(path, tool) + msg = f"{version}" if version else f"Found at {path}" + results.append(CheckResult(name=tool, status=CheckStatus.OK, message=msg)) + else: + results.append(CheckResult(name=tool, status=CheckStatus.WARN, message="Not found")) + return results + + +def _get_tool_version(path: str, tool: str) -> str: + """Try to get a tool's version string. Returns empty string on failure.""" + try: + result = subprocess.run( # noqa: S603 + [path, "--version"], + capture_output=True, + text=True, + timeout=5, + ) + output = (result.stdout or result.stderr or "").strip() + # Take first line only + return output.splitlines()[0] if output else "" + except Exception: + return "" + + +def check_tool_versions(tools_config: dict) -> list[CheckResult]: + """Check uv and npm tool installations from config.""" + results: list[CheckResult] = [] + + uv_tools = tools_config.get("uv", []) + if uv_tools: + results.extend(_check_uv_tools(uv_tools)) + + npm_packages = tools_config.get("npm", []) + if npm_packages: + results.extend(_check_npm_packages(npm_packages)) + + return results + + +def list_venvs(code_root: Path, versions: list[str]) -> list[CheckResult]: + """Check virtual environments under code_root/venvs/.""" + venvs_dir = code_root / "venvs" + results: list[CheckResult] = [] + + if not venvs_dir.exists(): + for version in versions: + results.append( + CheckResult( + name=f"venv: {version}", + status=CheckStatus.WARN, + message="venvs/ directory does not exist", + ) + ) + return results + + for version in versions: + venv_path = venvs_dir / version + python_bin = venv_path / "bin" / "python" + + if not venv_path.exists(): + results.append( + CheckResult( + name=f"venv: {version}", + status=CheckStatus.WARN, + message="Not created", + ) + ) + continue + + if not python_bin.exists(): + results.append( + CheckResult( + name=f"venv: {version}", + status=CheckStatus.WARN, + message="Missing bin/python", + ) + ) + continue + + try: + proc = subprocess.run( # noqa: S603 + [str(python_bin), "--version"], + capture_output=True, + text=True, + timeout=5, + ) + py_version = (proc.stdout or proc.stderr or "").strip() + # Extract just the version number e.g. "Python 3.12.3" → "3.12.3" + py_ver_clean = py_version.replace("Python ", "").strip() or "?" + results.append( + CheckResult( + name=f"venv: {version}", + status=CheckStatus.OK, + message=f"Python {py_ver_clean}", + ) + ) + except Exception: + results.append( + CheckResult( + name=f"venv: {version}", + status=CheckStatus.WARN, + message="bin/python exists but could not run", + ) + ) + + return results + + +def run_doctor(code_root: Path) -> dict[str, list[CheckResult]]: + """Run all health checks and return grouped results.""" + groups: dict[str, list[CheckResult]] = {} + + # --- Configuration --- + config_result = check_config(code_root) + groups["Configuration"] = [config_result] + + # Try to load config for downstream checks + config: ConfigModel | None = None + config_path = code_root / "config.toml" + if config_path.exists(): + try: + with open(config_path, "rb") as f: + raw = tomli.load(f) + config = ConfigModel(**raw) + except Exception: + # Config load failed; skip downstream checks, will use empty config + config = None + + # --- Connectivity --- + groups["Connectivity"] = [check_github_ssh()] + + # --- Tools --- + tool_results = check_system_tools() + + if config and (config.tools.uv or config.tools.npm): + tools_config = { + "uv": config.tools.uv, + "npm": config.tools.npm, + } + tool_results.extend(check_tool_versions(tools_config)) + + groups["Tools"] = tool_results + + # --- Virtual Environments --- + versions = config.versions if config else [] + groups["Virtual Environments"] = list_venvs(code_root, versions) + + return groups diff --git a/trobz_local/main.py b/trobz_local/main.py index 8c41ca0..bcd2c5b 100644 --- a/trobz_local/main.py +++ b/trobz_local/main.py @@ -5,10 +5,13 @@ import git import typer from rich import print as rprint +from rich.console import Console from rich.progress import Progress, TaskID +from rich.table import Table from rich.tree import Tree from .concurrency import TaskResult, run_tasks +from .doctor import CheckStatus, run_doctor from .installers import ( install_npm_packages, install_scripts, @@ -193,27 +196,43 @@ def pull_repos( # noqa: C901 typer.secho("\nAll repositories updated successfully.", fg=typer.colors.GREEN) +def _iter_org_entries(org_repos, odoo_versions): + """Yield (repo_name, branch) pairs for an org's repo list. + + Plain strings use all configured versions; [name, [branch, ...]] entries + use their explicit branch list. + """ + for entry in org_repos: + if isinstance(entry, str): + for version in odoo_versions: + yield entry, str(version) + else: + for branch in entry[1]: + yield entry[0], str(branch) + + def _get_tasks(odoo_versions, repos_config, code_root, repo_filter): tasks = [] for version in odoo_versions: - if "odoo" in repos_config: - for repo_name in repos_config["odoo"]: - if repo_name in ODOO_URLS and (not repo_filter or repo_name in repo_filter): - tasks.append({ - "repo_name": repo_name, - "repo_path": code_root / "odoo" / repo_name / version, - "repo_url": ODOO_URLS[repo_name], - "version": str(version), - }) - if "oca" in repos_config: - for repo_name in repos_config["oca"]: - if not repo_filter or repo_name in repo_filter: - tasks.append({ - "repo_name": repo_name, - "repo_path": code_root / "oca" / str(version) / repo_name, - "repo_url": f"git@github.com:OCA/{repo_name}.git", - "version": str(version), - }) + for repo_name in repos_config.get("odoo", []): + if repo_name in ODOO_URLS and (not repo_filter or repo_name in repo_filter): + tasks.append({ + "repo_name": repo_name, + "repo_path": code_root / "odoo" / repo_name / version, + "repo_url": ODOO_URLS[repo_name], + "version": str(version), + }) + for org, org_repos in repos_config.items(): + if org == "odoo": + continue + for repo_name, branch in _iter_org_entries(org_repos, odoo_versions): + if not repo_filter or repo_name in repo_filter: + tasks.append({ + "repo_name": repo_name, + "repo_path": code_root / org / branch / repo_name, + "repo_url": f"git@github.com:{org}/{repo_name}.git", + "version": branch, + }) return tasks @@ -542,3 +561,45 @@ def ensure_db_user(ctx: typer.Context): typer.secho(f"✓ PostgreSQL user '{username}' is ready for Odoo development", fg=typer.colors.GREEN) typer.echo() typer.secho("⚠️ WARNING: Using dev-only credentials (odoo:odoo). Never use in production!", fg=typer.colors.YELLOW) + + +_STATUS_ICONS = { + CheckStatus.OK: "[green]OK[/green]", + CheckStatus.WARN: "[yellow]!![/yellow]", + CheckStatus.FAIL: "[red]FAIL[/red]", +} + + +@app.command() +def doctor(): + code_root = get_code_root() + groups = run_doctor(code_root) + + console = Console() + has_fail = False + counts = {CheckStatus.OK: 0, CheckStatus.WARN: 0, CheckStatus.FAIL: 0} + + for group_name, results in groups.items(): + table = Table(title=group_name, show_header=True, title_style="bold cyan") + table.add_column("Status", width=6, justify="center") + table.add_column("Check", min_width=15) + table.add_column("Details") + + for r in results: + counts[r.status] += 1 + if r.status == CheckStatus.FAIL: + has_fail = True + table.add_row(_STATUS_ICONS[r.status], r.name, r.message) + + console.print(table) + console.print() + + summary = ( + f"[green]{counts[CheckStatus.OK]} passed[/green], " + f"[yellow]{counts[CheckStatus.WARN]} warnings[/yellow], " + f"[red]{counts[CheckStatus.FAIL]} failures[/red]" + ) + console.print(f"Summary: {summary}") + + if has_fail: + raise typer.Exit(code=1) diff --git a/trobz_local/utils.py b/trobz_local/utils.py index 88ebdd9..4ef6bec 100644 --- a/trobz_local/utils.py +++ b/trobz_local/utils.py @@ -8,7 +8,7 @@ import git import tomli import typer -from pydantic import BaseModel, Field, ValidationError, field_validator +from pydantic import BaseModel, ConfigDict, Field, ValidationError, field_validator, model_validator from rich import print as rprint from rich.progress import ( Progress, @@ -19,15 +19,30 @@ VERSION_REGEX = re.compile(r"^(?:\d+\.\d+|master)$") ARCH_PACKAGES = [ + "git", "gcc", - "postgresql", + "cyrus-sasl", + "libldap", + "openssl", # cryptography + "libffi", # cairosvg + "libxml2", # lxml, pysaml2 + "libxslt", # lxml + "libjpeg-turbo", "postgresql-libs", - "libxml2", - "libxslt", - "libjpeg", "libsass", + "cracklib", + "geos", # shapely + "xmlsec", # pysaml2 + "zbar", # pyzbar + "cairo", # cairosvg + "cups", # pycups + "fontconfig", + "graphviz", + "ghostscript", + "gsfonts", + "poppler", # pdf2image + "postgresql", "base-devel", - "git", ] UBUNTU_PACKAGES = [ @@ -35,13 +50,27 @@ "gcc", "libsasl2-dev", "libldap2-dev", - "libssl-dev", - "libffi-dev", - "libxml2-dev", - "libxslt1-dev", + "libssl-dev", # cryptography + "libffi-dev", # cairosvg + "libxml2-dev", # lxml, pysaml2 + "libxslt1-dev", # lxml "libjpeg-dev", "libpq-dev", "libsass-dev", + "libcrack2-dev", + "libgeos-dev", # shapely + "libxmlsec1-dev", # pysaml2 + "libxmlsec1-openssl", # pysaml2 + "libzbar0", # pyzbar + "libzbar-dev", # pyzbar + "libcairo2", # cairosvg + "libcups2-dev", # pycups + "fontconfig", + "fontconfig-config", + "graphviz", + "ghostscript", + "gsfonts", + "poppler-utils", # pdf2image "postgresql", "postgresql-client", "postgresql-contrib", @@ -79,18 +108,58 @@ def __init__(self, pkg: str): super().__init__(f"Invalid npm package name: {pkg}") +class InvalidRepoOrgConfigError(TypeError): + def __init__(self, org: str): + super().__init__(f"[repos.{org}] must be a list") + + +class InvalidRepoEntryError(ValueError): + def __init__(self, org: str): + super().__init__(f"Invalid entry in [repos.{org}]: must be a name or [name, [branch, ...]]") + + +_REPO_NAME_RE = re.compile(r"^[a-zA-Z0-9._-]+$") + + +def _validate_repo_entry(entry: object, org: str) -> None: + if isinstance(entry, str): + if not _REPO_NAME_RE.match(entry): + raise InvalidRepoNameError(entry) + elif ( + isinstance(entry, list) + and len(entry) == 2 + and isinstance(entry[0], str) + and isinstance(entry[1], list) + and all(isinstance(b, str) for b in entry[1]) + ): + if not _REPO_NAME_RE.match(entry[0]): + raise InvalidRepoNameError(entry[0]) + else: + raise InvalidRepoEntryError(org) + + class RepoConfig(BaseModel): + model_config = ConfigDict(extra="allow") + odoo: list[str] = [] - oca: list[str] = [] - @field_validator("*") + @field_validator("odoo") @classmethod - def validate_repo_names(cls, v: list[str]): + def validate_odoo_repos(cls, v: list[str]) -> list[str]: for name in v: - if not re.match(r"^[a-zA-Z0-9._-]+$", name): + if not _REPO_NAME_RE.match(name): raise InvalidRepoNameError(name) return v + @model_validator(mode="after") + def validate_orgs(self): + for org, repos in self.model_extra.items(): + if not isinstance(repos, list): + raise InvalidRepoOrgConfigError(org) + for entry in repos: + _validate_repo_entry(entry, org) + return self + class ScriptItem(BaseModel): """Configuration for a script to download and execute."""