From ec08cb1fac1975271fa7c05d65a7ecc042fe6e96 Mon Sep 17 00:00:00 2001 From: Prince Roshan Date: Sat, 2 May 2026 01:37:40 +0530 Subject: [PATCH 1/3] fix(cli): reorganize internal CLI packages --- AGENTS.md | 4 +- cmd/mcp-runtime/main.go | 9 +- docs/cluster-readiness.md | 2 +- docs/internals/README.md | 15 +- docs/internals/cmd-mcp-runtime.md | 33 +- docs/internals/go-package-reference.md | 2735 +++++++++++------ docs/internals/internal-cli.md | 105 +- docs/internals/pkg-metadata.md | 2 +- docs/scripts/generate_go_package_reference.py | 16 +- internal/cli/access/access.go | 53 +- internal/cli/{access.go => access/manager.go} | 141 +- .../manager_test.go} | 71 +- internal/cli/access/validation.go | 7 + internal/cli/access/validation_test.go | 29 + internal/cli/auth/auth.go | 13 +- internal/cli/auth/auth_test.go | 6 +- internal/cli/bootstrap/bootstrap.go | 76 +- internal/cli/build.go | 261 -- internal/cli/build_test.go | 789 ----- internal/cli/cert.go | 202 -- internal/cli/cert_letsencrypt_test.go | 16 - .../letsencrypt.go} | 133 +- internal/cli/certmanager/letsencrypt_test.go | 49 + internal/cli/certmanager/manager.go | 260 ++ .../manager_test.go} | 258 +- internal/cli/cluster/cert.go | 22 +- internal/cli/cluster/cluster.go | 12 +- internal/cli/{ => cluster}/cluster_test.go | 219 +- internal/cli/cluster/doctor.go | 8 +- .../doctor_impl.go} | 293 +- .../doctor_impl_test.go} | 541 ++-- internal/cli/cluster/ingress.go | 9 + .../cli/{cluster.go => cluster/manager.go} | 287 +- internal/cli/{ => core}/client.go | 19 +- internal/cli/{ => core}/config.go | 5 +- .../config_env_test.go} | 42 +- internal/cli/{ => core}/config_test.go | 20 +- internal/cli/{ => core}/constants.go | 5 +- internal/cli/core/doc.go | 6 + internal/cli/{ => core}/errors.go | 2 +- internal/cli/{ => core}/exec.go | 21 +- internal/cli/{ => core}/exec_test.go | 2 +- internal/cli/{ => core}/kubectl_runner.go | 2 +- internal/cli/{ => core}/main_test.go | 2 +- internal/cli/{ => core}/printer.go | 2 +- internal/cli/{ => core}/printer_test.go | 2 +- internal/cli/core/runtime.go | 54 + internal/cli/core/test_helpers_test.go | 15 + internal/cli/{ => core}/testing.go | 13 +- internal/cli/core/validation.go | 41 + internal/cli/core/validation_test.go | 65 + internal/cli/kube/file.go | 48 + internal/cli/kube/file_test.go | 55 + internal/cli/kube/manifest.go | 128 + internal/cli/kube/manifest_test.go | 129 + internal/cli/kube/patch.go | 106 + internal/cli/kube/patch_test.go | 14 + internal/cli/kubeerr/kubeerr.go | 39 + internal/cli/kubeerr/kubeerr_test.go | 33 + internal/cli/pipeline/command.go | 42 + internal/cli/pipeline/deploy.go | 119 + internal/cli/pipeline/generate.go | 86 + internal/cli/pipeline/pipeline.go | 219 -- internal/cli/pipeline/pipeline_test.go | 30 +- internal/cli/platform_ingress_test.go | 102 - .../baseurl.go} | 8 +- internal/cli/platformapi/baseurl_test.go | 19 + .../client.go} | 121 +- .../client_test.go} | 8 +- internal/cli/platformstatus/kubectl.go | 31 + internal/cli/platformstatus/workloads.go | 124 + internal/cli/registry/config/config.go | 141 + internal/cli/registry/config/config_test.go | 161 + internal/cli/registry/defaults.go | 36 + .../cli/{registry.go => registry/manager.go} | 536 ++-- .../manager_test.go} | 549 +--- internal/cli/registry/ref/ref.go | 27 + internal/cli/registry/ref/ref_test.go | 47 + internal/cli/registry/registry.go | 18 +- internal/cli/registry/resolve/resolver.go | 112 + .../cli/registry/resolve/resolver_test.go | 177 ++ internal/cli/resource_helpers.go | 166 - internal/cli/resource_helpers_test.go | 120 - internal/cli/root/commands.go | 9 +- internal/cli/root/doc.go | 4 +- internal/cli/runtime.go | 59 - .../cli/{sentinel.go => sentinel/manager.go} | 96 +- .../manager_test.go} | 38 +- internal/cli/sentinel/sentinel.go | 10 +- internal/cli/server/build.go | 4 +- internal/cli/server/build_image.go | 192 ++ internal/cli/server/build_image_test.go | 425 +++ internal/cli/{server.go => server/manager.go} | 277 +- .../manager_config_test.go} | 16 +- .../manager_test.go} | 224 +- internal/cli/server/server.go | 34 +- .../cli/server/server_status_print_test.go | 148 + internal/cli/server/validation.go | 11 + internal/cli/server/validation_test.go | 57 + .../assetpath/paths.go} | 22 +- .../assetpath/paths_test.go} | 22 +- internal/cli/setup/config_plan_test.go | 30 + internal/cli/setup/flow.go | 151 + .../helpers_test.go} | 437 +-- .../ingressmanifest/render.go} | 42 +- .../cli/setup/ingressmanifest/render_test.go | 73 + .../cli/{setup_plan.go => setup/plan/plan.go} | 31 +- .../plan_flow_test.go} | 257 +- internal/cli/{setup.go => setup/platform.go} | 1247 ++++---- internal/cli/setup/setup.go | 28 +- .../cli/{setup_steps.go => setup/steps.go} | 20 +- .../steps_test.go} | 50 +- internal/cli/setup/tls_flags_test.go | 45 + internal/cli/status.go | 278 -- .../platform_status_test.go} | 259 +- internal/cli/status/status.go | 115 +- 116 files changed, 8561 insertions(+), 6695 deletions(-) rename internal/cli/{access.go => access/manager.go} (53%) rename internal/cli/{access_test.go => access/manager_test.go} (52%) create mode 100644 internal/cli/access/validation.go create mode 100644 internal/cli/access/validation_test.go delete mode 100644 internal/cli/build.go delete mode 100644 internal/cli/build_test.go delete mode 100644 internal/cli/cert.go delete mode 100644 internal/cli/cert_letsencrypt_test.go rename internal/cli/{cert_letsencrypt.go => certmanager/letsencrypt.go} (62%) create mode 100644 internal/cli/certmanager/letsencrypt_test.go create mode 100644 internal/cli/certmanager/manager.go rename internal/cli/{cert_test.go => certmanager/manager_test.go} (59%) rename internal/cli/{ => cluster}/cluster_test.go (85%) rename internal/cli/{cluster_doctor.go => cluster/doctor_impl.go} (87%) rename internal/cli/{cluster_doctor_test.go => cluster/doctor_impl_test.go} (65%) create mode 100644 internal/cli/cluster/ingress.go rename internal/cli/{cluster.go => cluster/manager.go} (51%) rename internal/cli/{ => core}/client.go (82%) rename internal/cli/{ => core}/config.go (97%) rename internal/cli/{setup_test.go => core/config_env_test.go} (81%) rename internal/cli/{ => core}/config_test.go (88%) rename internal/cli/{ => core}/constants.go (93%) create mode 100644 internal/cli/core/doc.go rename internal/cli/{ => core}/errors.go (99%) rename internal/cli/{ => core}/exec.go (82%) rename internal/cli/{ => core}/exec_test.go (99%) rename internal/cli/{ => core}/kubectl_runner.go (96%) rename internal/cli/{ => core}/main_test.go (98%) rename internal/cli/{ => core}/printer.go (99%) rename internal/cli/{ => core}/printer_test.go (99%) create mode 100644 internal/cli/core/runtime.go create mode 100644 internal/cli/core/test_helpers_test.go rename internal/cli/{ => core}/testing.go (82%) create mode 100644 internal/cli/core/validation.go create mode 100644 internal/cli/core/validation_test.go create mode 100644 internal/cli/kube/file.go create mode 100644 internal/cli/kube/file_test.go create mode 100644 internal/cli/kube/manifest.go create mode 100644 internal/cli/kube/manifest_test.go create mode 100644 internal/cli/kube/patch.go create mode 100644 internal/cli/kube/patch_test.go create mode 100644 internal/cli/kubeerr/kubeerr.go create mode 100644 internal/cli/kubeerr/kubeerr_test.go create mode 100644 internal/cli/pipeline/command.go create mode 100644 internal/cli/pipeline/deploy.go create mode 100644 internal/cli/pipeline/generate.go delete mode 100644 internal/cli/pipeline/pipeline.go delete mode 100644 internal/cli/platform_ingress_test.go rename internal/cli/{platform_url.go => platformapi/baseurl.go} (53%) create mode 100644 internal/cli/platformapi/baseurl_test.go rename internal/cli/{platform_client.go => platformapi/client.go} (83%) rename internal/cli/{platform_client_test.go => platformapi/client_test.go} (91%) create mode 100644 internal/cli/platformstatus/kubectl.go create mode 100644 internal/cli/platformstatus/workloads.go create mode 100644 internal/cli/registry/config/config.go create mode 100644 internal/cli/registry/config/config_test.go create mode 100644 internal/cli/registry/defaults.go rename internal/cli/{registry.go => registry/manager.go} (59%) rename internal/cli/{registry_test.go => registry/manager_test.go} (57%) create mode 100644 internal/cli/registry/ref/ref.go create mode 100644 internal/cli/registry/ref/ref_test.go create mode 100644 internal/cli/registry/resolve/resolver.go create mode 100644 internal/cli/registry/resolve/resolver_test.go delete mode 100644 internal/cli/resource_helpers.go delete mode 100644 internal/cli/resource_helpers_test.go delete mode 100644 internal/cli/runtime.go rename internal/cli/{sentinel.go => sentinel/manager.go} (52%) rename internal/cli/{sentinel_test.go => sentinel/manager_test.go} (52%) create mode 100644 internal/cli/server/build_image.go create mode 100644 internal/cli/server/build_image_test.go rename internal/cli/{server.go => server/manager.go} (67%) rename internal/cli/{server_config_test.go => server/manager_config_test.go} (92%) rename internal/cli/{server_test.go => server/manager_test.go} (74%) create mode 100644 internal/cli/server/server_status_print_test.go create mode 100644 internal/cli/server/validation.go create mode 100644 internal/cli/server/validation_test.go rename internal/cli/{asset_paths.go => setup/assetpath/paths.go} (63%) rename internal/cli/{asset_paths_test.go => setup/assetpath/paths_test.go} (78%) create mode 100644 internal/cli/setup/config_plan_test.go create mode 100644 internal/cli/setup/flow.go rename internal/cli/{setup_helpers_test.go => setup/helpers_test.go} (80%) rename internal/cli/{platform_ingress.go => setup/ingressmanifest/render.go} (69%) create mode 100644 internal/cli/setup/ingressmanifest/render_test.go rename internal/cli/{setup_plan.go => setup/plan/plan.go} (78%) rename internal/cli/{setup_plan_test.go => setup/plan_flow_test.go} (76%) rename internal/cli/{setup.go => setup/platform.go} (56%) rename internal/cli/{setup_steps.go => setup/steps.go} (90%) rename internal/cli/{setup_steps_test.go => setup/steps_test.go} (88%) create mode 100644 internal/cli/setup/tls_flags_test.go delete mode 100644 internal/cli/status.go rename internal/cli/{status_test.go => status/platform_status_test.go} (65%) diff --git a/AGENTS.md b/AGENTS.md index 2c3f937..6f100d3 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -8,7 +8,7 @@ If instructions conflict, prefer **this repo** (`README`, CRDs, `v1alpha1` types | Area | Path | Notes | |------|------|--------| -| User-facing CLI | `cmd/mcp-runtime/`, `internal/cli/root/`, `internal/cli/` | Entrypoint, foldered Cobra command routing, and command behavior for `setup`, `status`, `registry`, `server`, `access`, … | +| User-facing CLI | `cmd/mcp-runtime/`, `internal/cli/root/`, `internal/cli//`, `internal/cli/core/` | Entrypoint, foldered Cobra command routing, command-owned behavior for `setup`, `status`, `registry`, `server`, `access`, …, and shared CLI kernel code | | Operator (controller) | `cmd/operator/`, `internal/operator/` | `MCPServer` reconciliation, ingress, gateway wiring | | API & CRD types | `api/v1alpha1/` | Source of truth for object shapes; CRD YAML in `config/crd/bases/` | | Access control (shared) | `pkg/access/` | Grants, sessions, policy pieces used by API and gateway | @@ -21,7 +21,7 @@ If instructions conflict, prefer **this repo** (`README`, CRDs, `v1alpha1` types | E2E | `test/e2e/`, `test/integration/` | Kind script and envtest-based integration tests | | Agent tool config | `.claude/`, `.codex/skills/` | `.claude/skills` should symlink to `../.codex/skills` so Claude Desktop and the Codex CLI use the same local skills | -**Patterns worth mirroring:** search for similar packages before adding new abstractions; keep CLI errors consistent with `internal/cli/errors.go` and `pkg/errx/`. +**Patterns worth mirroring:** search for similar packages before adding new abstractions; keep CLI errors consistent with `internal/cli/core/errors.go` and `pkg/errx/`. ## Build, test, and quality (before you push) diff --git a/cmd/mcp-runtime/main.go b/cmd/mcp-runtime/main.go index 4de73fa..c9c41da 100644 --- a/cmd/mcp-runtime/main.go +++ b/cmd/mcp-runtime/main.go @@ -8,7 +8,7 @@ import ( "go.uber.org/zap" "go.uber.org/zap/zapcore" - "mcp-runtime/internal/cli" + "mcp-runtime/internal/cli/core" cliroot "mcp-runtime/internal/cli/root" ) @@ -44,9 +44,14 @@ var rootCmd = &cobra.Command{ - MCP server deployments - Platform configuration`, Version: fmt.Sprintf("%s (commit: %s, built: %s)", version, commit, date), + // Runtime errors should not trigger Cobra's usage/help dump; flag/arg + // validation errors still do (those happen before RunE). main() prints + // the error itself, so silence Cobra's own error print to avoid duplicates. + SilenceUsage: true, + SilenceErrors: true, PersistentPreRun: func(cmd *cobra.Command, args []string) { // Set debug mode globally so logStructuredError can check it - cli.SetDebugMode(debug) + core.SetDebugMode(debug) }, } diff --git a/docs/cluster-readiness.md b/docs/cluster-readiness.md index fec7607..b236bed 100644 --- a/docs/cluster-readiness.md +++ b/docs/cluster-readiness.md @@ -424,7 +424,7 @@ Missing pieces are warnings, not errors — the command surfaces them so you can `./bin/mcp-runtime cluster doctor` runs post-install diagnostics: - Detects your distribution (k3s / kind / minikube / docker-desktop / generic). -- Checks the installed MCP Runtime namespaces, CRDs, operator, Traefik ingress, registry, Sentinel, and MCPServer reconciliation path, including readiness of the temporary smoke deployment. +- Checks the installed MCP Runtime namespaces, CRDs, operator, Traefik ingress, registry, Sentinel, and MCPServer reconciliation path. The MCPServer smoke uses an existing ready app image when available; otherwise it falls back to `registry.k8s.io/pause:3.9` and validates deployment/service/ingress reconciliation plus pod scheduling without a TCP readiness wait. - Prefers k3s' bundled Traefik in `kube-system/traefik` when the active cluster is k3s, then falls back to the repo-managed `traefik/traefik` install. - Verifies registry reachability, registry image-pull smoke behavior, and common pod image-pull failures. - Reports `http: server gave HTTP response to HTTPS client` when kubelet/containerd tried HTTPS against the HTTP dev registry, including the affected pod and image where possible. diff --git a/docs/internals/README.md b/docs/internals/README.md index 954e53a..e76b4bf 100644 --- a/docs/internals/README.md +++ b/docs/internals/README.md @@ -102,11 +102,12 @@ Governance-related changes usually span `api/v1alpha1/access_types.go`, `pkg/acc ```mermaid flowchart TB Cmd[cmd/mcp-runtime] --> CLIRoot[internal/cli/root] - CLIRoot --> InternalCLI[internal/cli] - InternalCLI --> Metadata[pkg/metadata] - InternalCLI --> Manifest[pkg/manifest] - InternalCLI --> K8sClient[pkg/k8sclient] - InternalCLI --> Access[pkg/access] + CLIRoot --> CLICommands[internal/cli/] + CLICommands --> CLICore[internal/cli/core] + CLICommands --> Metadata[pkg/metadata] + CLICommands --> Manifest[pkg/manifest] + CLICommands --> K8sClient[pkg/k8sclient] + CLICommands --> Access[pkg/access] CmdOp[cmd/operator] --> Operator[internal/operator] Operator --> API[api/v1alpha1] Operator --> K8sClient @@ -118,7 +119,7 @@ flowchart TB Metadata --> API ``` -Keep shared behavior in `pkg/` only when multiple binaries or services need it. CLI top-level command routing belongs in `internal/cli/root` and `internal/cli/`; CLI-only behavior belongs in `internal/cli`; reconciliation behavior belongs in `internal/operator`; HTTP service glue belongs near the service that owns the endpoint. +Keep shared behavior in `pkg/` only when multiple binaries or services need it. CLI top-level command routing belongs in `internal/cli/root` and `internal/cli/`; CLI-only shared infrastructure belongs in `internal/cli/core`; reconciliation behavior belongs in `internal/operator`; HTTP service glue belongs near the service that owns the endpoint. ## Learning path @@ -148,7 +149,7 @@ workflows. | Change | Read first | Verify with | |---|---|---| -| Add or change a CLI flag | `internal/cli/root`, `internal/cli`, `cmd/mcp-runtime`, golden CLI tests | `go test ./internal/cli/... ./test/golden/... -count=1` | +| Add or change a CLI flag | `internal/cli/root`, `internal/cli/`, `internal/cli/core`, `cmd/mcp-runtime`, golden CLI tests | `go test ./internal/cli/... ./test/golden/... -count=1` | | Change a CRD field | `api/v1alpha1`, CRD YAML, operator reconciliation, docs/API reference | `go test ./api/v1alpha1/... ./internal/operator/... -count=1` | | Change generated manifests | `pkg/metadata`, `pkg/manifest`, `config/`, examples | targeted package tests plus manifest diff review | | Change reconciliation behavior | `internal/operator`, API types, k8s helpers | `go test ./internal/operator/... -race -count=1` | diff --git a/docs/internals/cmd-mcp-runtime.md b/docs/internals/cmd-mcp-runtime.md index 4bc5975..f0f19d1 100644 --- a/docs/internals/cmd-mcp-runtime.md +++ b/docs/internals/cmd-mcp-runtime.md @@ -21,8 +21,9 @@ go doc -cmd ./cmd/mcp-runtime The entrypoint should not contain business logic for setup, registry, server, access, or Sentinel behavior. Route top-level commands through -`internal/cli/root`, and keep command behavior in `internal/cli` until a focused -migration moves that domain into its own command package. +`internal/cli/root`. Command folders should own Cobra wiring and, where already +migrated, package-local managers; shared CLI-only infrastructure lives in +`internal/cli/core`. ## Command Tree @@ -30,16 +31,16 @@ The root command wires these internal command groups: | Command | Routing package | Behavior files | |---|---|---| -| `bootstrap` | `internal/cli/bootstrap` | `internal/cli/bootstrap.go` | -| `cluster` | `internal/cli/cluster` | `internal/cli/cluster.go` and `cluster_doctor.go` | -| `setup` | `internal/cli/setup` | `internal/cli/setup.go`, `setup_plan.go`, `setup_steps.go` | -| `status` | `internal/cli/status` | `internal/cli/status.go` | -| `registry` | `internal/cli/registry` | `internal/cli/registry.go` | -| `server` | `internal/cli/server` | `internal/cli/server.go`, `build.go` | -| `pipeline` | `internal/cli/pipeline` | `internal/cli/pipeline.go` | -| `access` | `internal/cli/access` | `internal/cli/access.go` | -| `auth` | `internal/cli/auth` | `internal/cli/auth.go` | -| `sentinel` | `internal/cli/sentinel` | `internal/cli/sentinel.go` | +| `bootstrap` | `internal/cli/bootstrap` | `bootstrap.go` | +| `cluster` | `internal/cli/cluster` | `cluster.go`, `manager.go`, `doctor.go`, `doctor_impl.go`, `register.go`, … | +| `setup` | `internal/cli/setup` | `setup.go`, `platform.go`, `flow.go`, `steps.go`, `providers.go`, setup-owned helpers under `internal/cli/setup/` | +| `status` | `internal/cli/status` | `status.go`, shared workload/probe helpers in `internal/cli/platformstatus` | +| `registry` | `internal/cli/registry` | `registry.go`, `manager.go`, `defaults.go`, registry-owned helpers under `internal/cli/registry/` | +| `server` | `internal/cli/server` | `server.go`, `manager.go`, `validation.go`, `build.go`, `build_image.go`, server-owned helpers under `internal/cli/server/` | +| `pipeline` | `internal/cli/pipeline` | `command.go`, `generate.go`, `deploy.go` | +| `access` | `internal/cli/access` | `access.go`, `manager.go`, `validation.go` | +| `auth` | `internal/cli/auth` | `auth.go` | +| `sentinel` | `internal/cli/sentinel` | `sentinel.go`, `manager.go`, shared workload/probe helpers in `internal/cli/platformstatus` | When adding a command, wire it here only after the implementation has focused package tests and help text is ready for golden snapshots. @@ -53,10 +54,10 @@ CLI UX changes should preserve these expectations: - Logs are readable in terminals and CI. - Global flags stay minimal; feature-specific flags belong on their command. - Commands that shell out to external tools are testable through runner - abstractions in `internal/cli`. -- Top-level command folders under `internal/cli/` should stay thin - while they delegate to `internal/cli`; move behavior there only as a focused - follow-up with package-local tests. + abstractions in `internal/cli/core`. +- Top-level command folders under `internal/cli/` should keep Cobra + wiring thin and delegate to package-local managers or explicit shared + services. Before changing this package, run: diff --git a/docs/internals/go-package-reference.md b/docs/internals/go-package-reference.md index d935a2e..16bbf10 100644 --- a/docs/internals/go-package-reference.md +++ b/docs/internals/go-package-reference.md @@ -15,7 +15,21 @@ python3 docs/scripts/generate_go_package_reference.py - [Metadata helpers](#metadata-helpers) `mcp-runtime/pkg/metadata` - [Operator internals](#operator-internals) `mcp-runtime/internal/operator` - [CLI command routing](#cli-command-routing) `mcp-runtime/internal/cli/root` -- [CLI internals](#cli-internals) `mcp-runtime/internal/cli` +- [CLI core](#cli-core) `mcp-runtime/internal/cli/core` +- [CLI Kubernetes helpers](#cli-kubernetes-helpers) `mcp-runtime/internal/cli/kube` +- [CLI Kubernetes errors](#cli-kubernetes-errors) `mcp-runtime/internal/cli/kubeerr` +- [CLI cluster](#cli-cluster) `mcp-runtime/internal/cli/cluster` +- [CLI cert-manager](#cli-cert-manager) `mcp-runtime/internal/cli/certmanager` +- [CLI platform API](#cli-platform-api) `mcp-runtime/internal/cli/platformapi` +- [CLI platform status](#cli-platform-status) `mcp-runtime/internal/cli/platformstatus` +- [CLI registry](#cli-registry) `mcp-runtime/internal/cli/registry` +- [CLI registry config](#cli-registry-config) `mcp-runtime/internal/cli/registry/config` +- [CLI registry references](#cli-registry-references) `mcp-runtime/internal/cli/registry/ref` +- [CLI registry resolution](#cli-registry-resolution) `mcp-runtime/internal/cli/registry/resolve` +- [CLI server](#cli-server) `mcp-runtime/internal/cli/server` +- [CLI setup asset paths](#cli-setup-asset-paths) `mcp-runtime/internal/cli/setup/assetpath` +- [CLI setup ingress manifests](#cli-setup-ingress-manifests) `mcp-runtime/internal/cli/setup/ingressmanifest` +- [CLI setup plan](#cli-setup-plan) `mcp-runtime/internal/cli/setup/plan` - [CLI binary](#cli-binary) `mcp-runtime/cmd/mcp-runtime` - [Operator binary](#operator-binary) `mcp-runtime/cmd/operator` @@ -2015,8 +2029,8 @@ go doc -all ./internal/cli/root Package root provides the foldered CLI command routing layer for the mcp-runtime binary. -Each subpackage owns one top-level Cobra command boundary and delegates behavior -to the shared internal/cli implementation package. +Each subpackage owns one top-level Cobra command boundary and uses +internal/cli/core for shared CLI infrastructure. ### Jump To @@ -2038,237 +2052,156 @@ func AddCommands(root *cobra.Command, logger *zap.Logger) AddCommands registers every top-level mcp-runtime command on root. ``` - -## CLI internals + +## CLI core -Package: `cli` -Import path: `mcp-runtime/internal/cli` +Package: `core` +Import path: `mcp-runtime/internal/cli/core` Source command: ```bash -go doc -all ./internal/cli +go doc -all ./internal/cli/core ``` - + ### Overview -_No package overview is documented._ +Package cli contains shared CLI infrastructure used by command packages. + +Command-specific behavior belongs in internal/cli/; this package is +limited to config, constants, errors, runtime composition, process execution, +kubectl clients, terminal output, and test doubles. ### Jump To -- [Overview](#cli-internals-overview) -- [Index](#cli-internals-index) -- [Constants](#cli-internals-constants) -- [Variables](#cli-internals-variables) -- [Functions](#cli-internals-functions) -- [Types](#cli-internals-types) +- [Overview](#cli-core-overview) +- [Index](#cli-core-index) +- [Constants](#cli-core-constants) +- [Variables](#cli-core-variables) +- [Functions](#cli-core-functions) +- [Types](#cli-core-types) - + ### Index -- [`Constants`](#cli-internals-constants) -- [`Variables`](#cli-internals-variables) -- [`func ApplyManifestContentWithNamespace(kubectl KubectlRunner, manifest, namespace string) error`](#cli-internals-func-applymanifestcontentwithnamespace-kubectl-kubectlrunner-manifest-namespace-string-error) -- [`func BuildImage(logger *zap.Logger, serverName, dockerfile, metadataFile, metadataDir, registryURL, tag, context string) error`](#cli-internals-func-buildimage-logger-zap-logger-servername-dockerfile-metadatafile-metadatadir-registryurl-tag-context-string-error) -- [`func BuildOperatorArgs(metricsAddr, probeAddr string, leaderElect, leaderElectChanged bool) []string`](#cli-internals-func-buildoperatorargs-metricsaddr-probeaddr-string-leaderelect-leaderelectchanged-bool-string) -- [`func ClusterIssuerNameForACME(staging bool) string`](#cli-internals-func-clusterissuernameforacme-staging-bool-string) -- [`func Cyan(msg string) string`](#cli-internals-func-cyan-msg-string-string) -- [`func Error(msg string)`](#cli-internals-func-error-msg-string) -- [`func GetAnalyticsIngestURLOverride() string`](#cli-internals-func-getanalyticsingesturloverride-string) -- [`func GetCertTimeout() time.Duration`](#cli-internals-func-getcerttimeout-time-duration) -- [`func GetClusterName() string`](#cli-internals-func-getclustername-string) -- [`func GetDefaultServerPort() int`](#cli-internals-func-getdefaultserverport-int) -- [`func GetDeploymentTimeout() time.Duration`](#cli-internals-func-getdeploymenttimeout-time-duration) -- [`func GetGatewayProxyImageOverride() string`](#cli-internals-func-getgatewayproxyimageoverride-string) -- [`func GetHelperPodTimeout() time.Duration`](#cli-internals-func-gethelperpodtimeout-time-duration) -- [`func GetMcpIngressHost() string`](#cli-internals-func-getmcpingresshost-string) -- [`func GetOperatorImageOverride() string`](#cli-internals-func-getoperatorimageoverride-string) -- [`func GetPlatformIngressHost() string`](#cli-internals-func-getplatformingresshost-string) -- [`func GetRegistryClusterIssuerName() string`](#cli-internals-func-getregistryclusterissuername-string) -- [`func GetRegistryEndpoint() string`](#cli-internals-func-getregistryendpoint-string) -- [`func GetRegistryIngressHost() string`](#cli-internals-func-getregistryingresshost-string) -- [`func GetRegistryPort() int`](#cli-internals-func-getregistryport-int) -- [`func GetSkopeoImage() string`](#cli-internals-func-getskopeoimage-string) -- [`func Green(msg string) string`](#cli-internals-func-green-msg-string-string) -- [`func HasPlatformClient() bool`](#cli-internals-func-hasplatformclient-bool) -- [`func Header(title string)`](#cli-internals-func-header-title-string) -- [`func Info(msg string)`](#cli-internals-func-info-msg-string) -- [`func IsDebugMode() bool`](#cli-internals-func-isdebugmode-bool) -- [`func LogStructuredError(logger *zap.Logger, err error, msg string)`](#cli-internals-func-logstructurederror-logger-zap-logger-err-error-msg-string) -- [`func NewSetupStepFailedError() error`](#cli-internals-func-newsetupstepfailederror-error) -- [`func NewWithSentinel(base error, msg string) error`](#cli-internals-func-newwithsentinel-base-error-msg-string-error) -- [`func NormalizePlatformAPIBaseURL(raw string) string`](#cli-internals-func-normalizeplatformapibaseurl-raw-string-string) -- [`func PrintDoctorReport(r DoctorReport)`](#cli-internals-func-printdoctorreport-r-doctorreport) -- [`func ReadFileAtPath(path string) ([]byte, error)`](#cli-internals-func-readfileatpath-path-string-byte-error) -- [`func Red(msg string) string`](#cli-internals-func-red-msg-string-string) -- [`func ResolveRegularFilePath(file string) (string, error)`](#cli-internals-func-resolveregularfilepath-file-string-string-error) -- [`func RunRegistryProvision(mgr *RegistryManager, url, username, password, operatorImage string) error`](#cli-internals-func-runregistryprovision-mgr-registrymanager-url-username-password-operatorimage-string-error) -- [`func RunRegistryPush(mgr *RegistryManager, image, registryURL, name, mode, helperNamespace string) error`](#cli-internals-func-runregistrypush-mgr-registrymanager-image-registryurl-name-mode-helpernamespace-string-error) -- [`func Section(title string)`](#cli-internals-func-section-title-string) -- [`func SentinelComponentKeys() []string`](#cli-internals-func-sentinelcomponentkeys-string) -- [`func SetDebugMode(enabled bool)`](#cli-internals-func-setdebugmode-enabled-bool) -- [`func SetupPlatform(logger *zap.Logger, plan SetupPlan) error`](#cli-internals-func-setupplatform-logger-zap-logger-plan-setupplan-error) -- [`func ShowPlatformStatus(logger *zap.Logger) error`](#cli-internals-func-showplatformstatus-logger-zap-logger-error) -- [`func SpinnerStart(msg string) func(success bool, finalMsg string)`](#cli-internals-func-spinnerstart-msg-string-func-success-bool-finalmsg-string) -- [`func Step(title string)`](#cli-internals-func-step-title-string) -- [`func Success(msg string)`](#cli-internals-func-success-msg-string) -- [`func Table(data [][]string)`](#cli-internals-func-table-data-string) -- [`func TableBoxed(data [][]string)`](#cli-internals-func-tableboxed-data-string) -- [`func ValidateStorageMode(mode string) error`](#cli-internals-func-validatestoragemode-mode-string-error) -- [`func ValidateTLSSetupCLIFlags(`](#cli-internals-func-validatetlssetupcliflags) -- [`func Warn(msg string)`](#cli-internals-func-warn-msg-string) -- [`func WrapWithSentinel(base, cause error, msg string) error`](#cli-internals-func-wrapwithsentinel-base-cause-error-msg-string-error) -- [`func WrapWithSentinelAndContext(base, cause error, msg string, context map[string]any) error`](#cli-internals-func-wrapwithsentinelandcontext-base-cause-error-msg-string-context-map-string-any-error) -- [`func Yellow(msg string) string`](#cli-internals-func-yellow-msg-string-string) -- [`type AccessManager struct`](#cli-internals-type-accessmanager-struct) -- [`func DefaultAccessManager(logger *zap.Logger) *AccessManager`](#cli-internals-func-defaultaccessmanager-logger-zap-logger-accessmanager) -- [`func NewAccessManager(kubectl *KubectlClient, logger *zap.Logger) *AccessManager`](#cli-internals-func-newaccessmanager-kubectl-kubectlclient-logger-zap-logger-accessmanager) -- [`func (m *AccessManager) ApplyAccessResource(file string) error`](#cli-internals-func-m-accessmanager-applyaccessresource-file-string-error) -- [`func (m *AccessManager) BindUseKubeFlag(cmd *cobra.Command)`](#cli-internals-func-m-accessmanager-bindusekubeflag-cmd-cobra-command) -- [`func (m *AccessManager) DeleteAccessResource(resource, name, namespace string) error`](#cli-internals-func-m-accessmanager-deleteaccessresource-resource-name-namespace-string-error) -- [`func (m *AccessManager) GetAccessResource(resource, name, namespace string) error`](#cli-internals-func-m-accessmanager-getaccessresource-resource-name-namespace-string-error) -- [`func (m *AccessManager) ListAccessResources(resource, namespace string, allNamespaces bool) error`](#cli-internals-func-m-accessmanager-listaccessresources-resource-namespace-string-allnamespaces-bool-error) -- [`func (m *AccessManager) ToggleAccessResource(resource, name, namespace string, value bool) error`](#cli-internals-func-m-accessmanager-toggleaccessresource-resource-name-namespace-string-value-bool-error) -- [`type AnalyticsImageSet struct`](#cli-internals-type-analyticsimageset-struct) -- [`type CLIConfig struct`](#cli-internals-type-cliconfig-struct) -- [`func LoadCLIConfig() *CLIConfig`](#cli-internals-func-loadcliconfig-cliconfig) -- [`type CertManager struct`](#cli-internals-type-certmanager-struct) -- [`func NewCertManager(kubectl KubectlRunner, logger *zap.Logger) *CertManager`](#cli-internals-func-newcertmanager-kubectl-kubectlrunner-logger-zap-logger-certmanager) -- [`func (m *CertManager) Apply() error`](#cli-internals-func-m-certmanager-apply-error) -- [`func (m *CertManager) Status() error`](#cli-internals-func-m-certmanager-status-error) -- [`func (m *CertManager) Wait(timeout time.Duration) error`](#cli-internals-func-m-certmanager-wait-timeout-time-duration-error) -- [`type ClusterManager struct`](#cli-internals-type-clustermanager-struct) -- [`func DefaultClusterManager(logger *zap.Logger) *ClusterManager`](#cli-internals-func-defaultclustermanager-logger-zap-logger-clustermanager) -- [`func NewClusterManager(kubectl *KubectlClient, exec Executor, logger *zap.Logger) *ClusterManager`](#cli-internals-func-newclustermanager-kubectl-kubectlclient-exec-executor-logger-zap-logger-clustermanager) -- [`func (m *ClusterManager) CheckClusterStatus() error`](#cli-internals-func-m-clustermanager-checkclusterstatus-error) -- [`func (m *ClusterManager) ConfigureCluster(ingress ingressOptions) error`](#cli-internals-func-m-clustermanager-configurecluster-ingress-ingressoptions-error) -- [`func (m *ClusterManager) ConfigureClusterWithValues(mode, manifest string, force bool) error`](#cli-internals-func-m-clustermanager-configureclusterwithvalues-mode-manifest-string-force-bool-error) -- [`func (m *ClusterManager) ConfigureKubeconfig(kubeconfig, context string) error`](#cli-internals-func-m-clustermanager-configurekubeconfig-kubeconfig-context-string-error) -- [`func (m *ClusterManager) ConfigureKubeconfigFromProvider(provider, region, clusterName, resourceGroup, project, zone, kubeconfig string) error`](#cli-internals-func-m-clustermanager-configurekubeconfigfromprovider-provider-region-clustername-resourcegroup-project-zone-kubeconfig-string-error) -- [`func (m *ClusterManager) EnsureNamespace(name string) error`](#cli-internals-func-m-clustermanager-ensurenamespace-name-string-error) -- [`func (m *ClusterManager) InitCluster(kubeconfig, context string) error`](#cli-internals-func-m-clustermanager-initcluster-kubeconfig-context-string-error) -- [`func (m *ClusterManager) KubectlRunner() KubectlRunner`](#cli-internals-func-m-clustermanager-kubectlrunner-kubectlrunner) -- [`func (m *ClusterManager) Logger() *zap.Logger`](#cli-internals-func-m-clustermanager-logger-zap-logger) -- [`func (m *ClusterManager) ProvisionCluster(provider, region string, nodeCount int, clusterName string) error`](#cli-internals-func-m-clustermanager-provisioncluster-provider-region-string-nodecount-int-clustername-string-error) -- [`type ClusterManagerAPI interface`](#cli-internals-type-clustermanagerapi-interface) -- [`type Command interface`](#cli-internals-type-command-interface) -- [`type Distribution string`](#cli-internals-type-distribution-string) -- [`func DetectDistribution(kubectl KubectlRunner) Distribution`](#cli-internals-func-detectdistribution-kubectl-kubectlrunner-distribution) -- [`type DoctorCheck struct`](#cli-internals-type-doctorcheck-struct) -- [`type DoctorCheckProgress func(DoctorCheckProgressEvent) func(DoctorCheck)`](#cli-internals-type-doctorcheckprogress-func-doctorcheckprogressevent-func-doctorcheck) -- [`type DoctorCheckProgressEvent struct`](#cli-internals-type-doctorcheckprogressevent-struct) -- [`type DoctorReport struct`](#cli-internals-type-doctorreport-struct) -- [`func RunDoctor(kubectl KubectlRunner) DoctorReport`](#cli-internals-func-rundoctor-kubectl-kubectlrunner-doctorreport) -- [`func RunDoctorAndPrint(kubectl KubectlRunner) DoctorReport`](#cli-internals-func-rundoctorandprint-kubectl-kubectlrunner-doctorreport) -- [`func RunDoctorWithProgress(kubectl KubectlRunner, progress DoctorCheckProgress) DoctorReport`](#cli-internals-func-rundoctorwithprogress-kubectl-kubectlrunner-progress-doctorcheckprogress-doctorreport) -- [`func (r DoctorReport) AllOK() bool`](#cli-internals-func-r-doctorreport-allok-bool) -- [`type ExecSpec struct`](#cli-internals-type-execspec-struct) -- [`type ExecValidator func(ExecSpec) error`](#cli-internals-type-execvalidator-func-execspec-error) -- [`func AllowlistBins(allowed ...string) ExecValidator`](#cli-internals-func-allowlistbins-allowed-string-execvalidator) -- [`func NoControlChars() ExecValidator`](#cli-internals-func-nocontrolchars-execvalidator) -- [`func NoShellMeta() ExecValidator`](#cli-internals-func-noshellmeta-execvalidator) -- [`func PathUnder(root string) ExecValidator`](#cli-internals-func-pathunder-root-string-execvalidator) -- [`type Executor interface`](#cli-internals-type-executor-interface) -- [`type ExternalRegistryConfig struct`](#cli-internals-type-externalregistryconfig-struct) -- [`type KubectlClient struct`](#cli-internals-type-kubectlclient-struct) -- [`func NewKubectlClient(exec Executor) (*KubectlClient, error)`](#cli-internals-func-newkubectlclient-exec-executor-kubectlclient-error) -- [`func (c *KubectlClient) CombinedOutput(args []string) ([]byte, error)`](#cli-internals-func-c-kubectlclient-combinedoutput-args-string-byte-error) -- [`func (c *KubectlClient) CommandArgs(args []string) (Command, error)`](#cli-internals-func-c-kubectlclient-commandargs-args-string-command-error) -- [`func (c *KubectlClient) Output(args []string) ([]byte, error)`](#cli-internals-func-c-kubectlclient-output-args-string-byte-error) -- [`func (c *KubectlClient) Run(args []string) error`](#cli-internals-func-c-kubectlclient-run-args-string-error) -- [`func (c *KubectlClient) RunWithOutput(args []string, stdout, stderr io.Writer) error`](#cli-internals-func-c-kubectlclient-runwithoutput-args-string-stdout-stderr-io-writer-error) -- [`type KubectlRunner interface`](#cli-internals-type-kubectlrunner-interface) -- [`func DefaultKubectlRunner() KubectlRunner`](#cli-internals-func-defaultkubectlrunner-kubectlrunner) -- [`type MockCommand struct`](#cli-internals-type-mockcommand-struct) -- [`func (m *MockCommand) CombinedOutput() ([]byte, error)`](#cli-internals-func-m-mockcommand-combinedoutput-byte-error) -- [`func (m *MockCommand) Output() ([]byte, error)`](#cli-internals-func-m-mockcommand-output-byte-error) -- [`func (m *MockCommand) Run() error`](#cli-internals-func-m-mockcommand-run-error) -- [`func (m *MockCommand) SetStderr(w io.Writer)`](#cli-internals-func-m-mockcommand-setstderr-w-io-writer) -- [`func (m *MockCommand) SetStdin(r io.Reader)`](#cli-internals-func-m-mockcommand-setstdin-r-io-reader) -- [`func (m *MockCommand) SetStdout(w io.Writer)`](#cli-internals-func-m-mockcommand-setstdout-w-io-writer) -- [`type MockExecutor struct`](#cli-internals-type-mockexecutor-struct) -- [`func (m *MockExecutor) Command(name string, args []string, validators ...ExecValidator) (Command, error)`](#cli-internals-func-m-mockexecutor-command-name-string-args-string-validators-execvalidator-command-error) -- [`func (m *MockExecutor) HasCommand(name string) bool`](#cli-internals-func-m-mockexecutor-hascommand-name-string-bool) -- [`func (m *MockExecutor) LastCommand() ExecSpec`](#cli-internals-func-m-mockexecutor-lastcommand-execspec) -- [`func (m *MockExecutor) Reset()`](#cli-internals-func-m-mockexecutor-reset) -- [`type Printer struct`](#cli-internals-type-printer-struct) -- [`func (p *Printer) Cyan(msg string) string`](#cli-internals-func-p-printer-cyan-msg-string-string) -- [`func (p *Printer) Error(msg string)`](#cli-internals-func-p-printer-error-msg-string) -- [`func (p *Printer) Green(msg string) string`](#cli-internals-func-p-printer-green-msg-string-string) -- [`func (p *Printer) Header(title string)`](#cli-internals-func-p-printer-header-title-string) -- [`func (p *Printer) Info(msg string)`](#cli-internals-func-p-printer-info-msg-string) -- [`func (p *Printer) Printf(format string, a ...interface`](#cli-internals-func-p-printer-printf-format-string-a-interface) -- [`func (p *Printer) Println(a ...interface`](#cli-internals-func-p-printer-println-a-interface) -- [`func (p *Printer) Red(msg string) string`](#cli-internals-func-p-printer-red-msg-string-string) -- [`func (p *Printer) Section(title string)`](#cli-internals-func-p-printer-section-title-string) -- [`func (p *Printer) SpinnerStart(msg string) func(success bool, finalMsg string)`](#cli-internals-func-p-printer-spinnerstart-msg-string-func-success-bool-finalmsg-string) -- [`func (p *Printer) Step(title string)`](#cli-internals-func-p-printer-step-title-string) -- [`func (p *Printer) Success(msg string)`](#cli-internals-func-p-printer-success-msg-string) -- [`func (p *Printer) Table(data [][]string)`](#cli-internals-func-p-printer-table-data-string) -- [`func (p *Printer) TableBoxed(data [][]string)`](#cli-internals-func-p-printer-tableboxed-data-string) -- [`func (p *Printer) Warn(msg string)`](#cli-internals-func-p-printer-warn-msg-string) -- [`func (p *Printer) Yellow(msg string) string`](#cli-internals-func-p-printer-yellow-msg-string-string) -- [`type RegistryManager struct`](#cli-internals-type-registrymanager-struct) -- [`func DefaultRegistryManager(logger *zap.Logger) *RegistryManager`](#cli-internals-func-defaultregistrymanager-logger-zap-logger-registrymanager) -- [`func NewRegistryManager(kubectl *KubectlClient, exec Executor, logger *zap.Logger) *RegistryManager`](#cli-internals-func-newregistrymanager-kubectl-kubectlclient-exec-executor-logger-zap-logger-registrymanager) -- [`func (m *RegistryManager) CheckRegistryStatus(namespace string) error`](#cli-internals-func-m-registrymanager-checkregistrystatus-namespace-string-error) -- [`func (m *RegistryManager) LoginRegistry(registryURL, username, password string) error`](#cli-internals-func-m-registrymanager-loginregistry-registryurl-username-password-string-error) -- [`func (m *RegistryManager) PushDirect(source, target string) error`](#cli-internals-func-m-registrymanager-pushdirect-source-target-string-error) -- [`func (m *RegistryManager) PushInCluster(source, target, helperNS string) error`](#cli-internals-func-m-registrymanager-pushincluster-source-target-helperns-string-error) -- [`func (m *RegistryManager) ShowRegistryInfo() error`](#cli-internals-func-m-registrymanager-showregistryinfo-error) -- [`type RegistryManagerAPI interface`](#cli-internals-type-registrymanagerapi-interface) -- [`type Runtime struct`](#cli-internals-type-runtime-struct) -- [`func NewRuntime(logger *zap.Logger) *Runtime`](#cli-internals-func-newruntime-logger-zap-logger-runtime) -- [`func (r *Runtime) AccessManager() *AccessManager`](#cli-internals-func-r-runtime-accessmanager-accessmanager) -- [`func (r *Runtime) ClusterManager() *ClusterManager`](#cli-internals-func-r-runtime-clustermanager-clustermanager) -- [`func (r *Runtime) Executor() Executor`](#cli-internals-func-r-runtime-executor-executor) -- [`func (r *Runtime) KubectlClient() *KubectlClient`](#cli-internals-func-r-runtime-kubectlclient-kubectlclient) -- [`func (r *Runtime) KubectlRunner() KubectlRunner`](#cli-internals-func-r-runtime-kubectlrunner-kubectlrunner) -- [`func (r *Runtime) Logger() *zap.Logger`](#cli-internals-func-r-runtime-logger-zap-logger) -- [`func (r *Runtime) RegistryManager() *RegistryManager`](#cli-internals-func-r-runtime-registrymanager-registrymanager) -- [`func (r *Runtime) SentinelManager() *SentinelManager`](#cli-internals-func-r-runtime-sentinelmanager-sentinelmanager) -- [`func (r *Runtime) ServerManager() *ServerManager`](#cli-internals-func-r-runtime-servermanager-servermanager) -- [`type SentinelManager struct`](#cli-internals-type-sentinelmanager-struct) -- [`func DefaultSentinelManager(logger *zap.Logger) *SentinelManager`](#cli-internals-func-defaultsentinelmanager-logger-zap-logger-sentinelmanager) -- [`func NewSentinelManager(kubectl *KubectlClient, logger *zap.Logger) *SentinelManager`](#cli-internals-func-newsentinelmanager-kubectl-kubectlclient-logger-zap-logger-sentinelmanager) -- [`func (m *SentinelManager) PortForwardSentinelTarget(target string, localPort int, address string) error`](#cli-internals-func-m-sentinelmanager-portforwardsentineltarget-target-string-localport-int-address-string-error) -- [`func (m *SentinelManager) RestartSentinel(component string, restartAll bool) error`](#cli-internals-func-m-sentinelmanager-restartsentinel-component-string-restartall-bool-error) -- [`func (m *SentinelManager) ShowSentinelEvents() error`](#cli-internals-func-m-sentinelmanager-showsentinelevents-error) -- [`func (m *SentinelManager) ShowSentinelStatus() error`](#cli-internals-func-m-sentinelmanager-showsentinelstatus-error) -- [`func (m *SentinelManager) ViewSentinelLogs(component string, follow, previous bool, tail int, since string) error`](#cli-internals-func-m-sentinelmanager-viewsentinellogs-component-string-follow-previous-bool-tail-int-since-string-error) -- [`type ServerManager struct`](#cli-internals-type-servermanager-struct) -- [`func DefaultServerManager(logger *zap.Logger) *ServerManager`](#cli-internals-func-defaultservermanager-logger-zap-logger-servermanager) -- [`func NewServerManager(kubectl *KubectlClient, logger *zap.Logger) *ServerManager`](#cli-internals-func-newservermanager-kubectl-kubectlclient-logger-zap-logger-servermanager) -- [`func (m *ServerManager) ApplyServerFromFile(file string) error`](#cli-internals-func-m-servermanager-applyserverfromfile-file-string-error) -- [`func (m *ServerManager) BindUseKubeFlag(cmd *cobra.Command)`](#cli-internals-func-m-servermanager-bindusekubeflag-cmd-cobra-command) -- [`func (m *ServerManager) CreateServer(name, namespace, image, imageTag string) error`](#cli-internals-func-m-servermanager-createserver-name-namespace-image-imagetag-string-error) -- [`func (m *ServerManager) CreateServerFromFile(file string) error`](#cli-internals-func-m-servermanager-createserverfromfile-file-string-error) -- [`func (m *ServerManager) DeleteServer(name, namespace string) error`](#cli-internals-func-m-servermanager-deleteserver-name-namespace-string-error) -- [`func (m *ServerManager) ExportServer(name, namespace, file string) error`](#cli-internals-func-m-servermanager-exportserver-name-namespace-file-string-error) -- [`func (m *ServerManager) GetServer(name, namespace string) error`](#cli-internals-func-m-servermanager-getserver-name-namespace-string-error) -- [`func (m *ServerManager) InspectServerPolicy(name, namespace string) error`](#cli-internals-func-m-servermanager-inspectserverpolicy-name-namespace-string-error) -- [`func (m *ServerManager) ListServers(namespace string) error`](#cli-internals-func-m-servermanager-listservers-namespace-string-error) -- [`func (m *ServerManager) Logger() *zap.Logger`](#cli-internals-func-m-servermanager-logger-zap-logger) -- [`func (m *ServerManager) PatchServer(name, namespace, patchType, patch, patchFile string) error`](#cli-internals-func-m-servermanager-patchserver-name-namespace-patchtype-patch-patchfile-string-error) -- [`func (m *ServerManager) ServerStatus(namespace string) error`](#cli-internals-func-m-servermanager-serverstatus-namespace-string-error) -- [`func (m *ServerManager) ViewServerLogs(name, namespace string, follow bool) error`](#cli-internals-func-m-servermanager-viewserverlogs-name-namespace-string-follow-bool-error) -- [`type SetupContext struct`](#cli-internals-type-setupcontext-struct) -- [`type SetupDeps struct`](#cli-internals-type-setupdeps-struct) -- [`type SetupPipeline struct`](#cli-internals-type-setuppipeline-struct) -- [`func NewSetupPipeline() *SetupPipeline`](#cli-internals-func-newsetuppipeline-setuppipeline) -- [`func (p *SetupPipeline) Build() []SetupStep`](#cli-internals-func-p-setuppipeline-build-setupstep) -- [`func (p *SetupPipeline) With(step SetupStep) *SetupPipeline`](#cli-internals-func-p-setuppipeline-with-step-setupstep-setuppipeline) -- [`func (p *SetupPipeline) WithIf(condition bool, step SetupStep) *SetupPipeline`](#cli-internals-func-p-setuppipeline-withif-condition-bool-step-setupstep-setuppipeline) -- [`type SetupPlan struct`](#cli-internals-type-setupplan-struct) -- [`func BuildSetupPlan(input SetupPlanInput) SetupPlan`](#cli-internals-func-buildsetupplan-input-setupplaninput-setupplan) -- [`type SetupPlanInput struct`](#cli-internals-type-setupplaninput-struct) -- [`type SetupStep interface`](#cli-internals-type-setupstep-interface) - - +- [`Constants`](#cli-core-constants) +- [`Variables`](#cli-core-variables) +- [`func Cyan(msg string) string`](#cli-core-func-cyan-msg-string-string) +- [`func Error(msg string)`](#cli-core-func-error-msg-string) +- [`func GetAnalyticsIngestURLOverride() string`](#cli-core-func-getanalyticsingesturloverride-string) +- [`func GetCertTimeout() time.Duration`](#cli-core-func-getcerttimeout-time-duration) +- [`func GetClusterName() string`](#cli-core-func-getclustername-string) +- [`func GetDefaultServerPort() int`](#cli-core-func-getdefaultserverport-int) +- [`func GetDeploymentTimeout() time.Duration`](#cli-core-func-getdeploymenttimeout-time-duration) +- [`func GetGatewayProxyImageOverride() string`](#cli-core-func-getgatewayproxyimageoverride-string) +- [`func GetHelperPodTimeout() time.Duration`](#cli-core-func-gethelperpodtimeout-time-duration) +- [`func GetMcpIngressHost() string`](#cli-core-func-getmcpingresshost-string) +- [`func GetOperatorImageOverride() string`](#cli-core-func-getoperatorimageoverride-string) +- [`func GetPlatformIngressHost() string`](#cli-core-func-getplatformingresshost-string) +- [`func GetRegistryClusterIssuerName() string`](#cli-core-func-getregistryclusterissuername-string) +- [`func GetRegistryEndpoint() string`](#cli-core-func-getregistryendpoint-string) +- [`func GetRegistryIngressHost() string`](#cli-core-func-getregistryingresshost-string) +- [`func GetRegistryPort() int`](#cli-core-func-getregistryport-int) +- [`func GetSkopeoImage() string`](#cli-core-func-getskopeoimage-string) +- [`func Green(msg string) string`](#cli-core-func-green-msg-string-string) +- [`func Header(title string)`](#cli-core-func-header-title-string) +- [`func Info(msg string)`](#cli-core-func-info-msg-string) +- [`func IsDebugMode() bool`](#cli-core-func-isdebugmode-bool) +- [`func LogStructuredError(logger *zap.Logger, err error, msg string)`](#cli-core-func-logstructurederror-logger-zap-logger-err-error-msg-string) +- [`func NewSetupStepFailedError() error`](#cli-core-func-newsetupstepfailederror-error) +- [`func NewWithSentinel(base error, msg string) error`](#cli-core-func-newwithsentinel-base-error-msg-string-error) +- [`func Red(msg string) string`](#cli-core-func-red-msg-string-string) +- [`func Section(title string)`](#cli-core-func-section-title-string) +- [`func SetDebugMode(enabled bool)`](#cli-core-func-setdebugmode-enabled-bool) +- [`func SpinnerStart(msg string) func(success bool, finalMsg string)`](#cli-core-func-spinnerstart-msg-string-func-success-bool-finalmsg-string) +- [`func Step(title string)`](#cli-core-func-step-title-string) +- [`func Success(msg string)`](#cli-core-func-success-msg-string) +- [`func SwapDefaultKubectlClient(c *KubectlClient) (restore func())`](#cli-core-func-swapdefaultkubectlclient-c-kubectlclient-restore-func) +- [`func SwapExecCommand(f func(string, ...string) *exec.Cmd) (restore func())`](#cli-core-func-swapexeccommand-f-func-string-string-exec-cmd-restore-func) +- [`func SwapExecExecutor(e Executor) (restore func())`](#cli-core-func-swapexecexecutor-e-executor-restore-func) +- [`func Table(data [][]string)`](#cli-core-func-table-data-string) +- [`func TableBoxed(data [][]string)`](#cli-core-func-tableboxed-data-string) +- [`func ValidateK8sNameAndNamespace(nameLabel string, nameSentinel error, name, namespace string) (string, string, error)`](#cli-core-func-validatek8snameandnamespace-namelabel-string-namesentinel-error-name-namespace-string-string-string-error) +- [`func ValidateManifestField(field, value string) (string, error)`](#cli-core-func-validatemanifestfield-field-value-string-string-error) +- [`func Warn(msg string)`](#cli-core-func-warn-msg-string) +- [`func WrapWithSentinel(base, cause error, msg string) error`](#cli-core-func-wrapwithsentinel-base-cause-error-msg-string-error) +- [`func WrapWithSentinelAndContext(base, cause error, msg string, context map[string]any) error`](#cli-core-func-wrapwithsentinelandcontext-base-cause-error-msg-string-context-map-string-any-error) +- [`func Yellow(msg string) string`](#cli-core-func-yellow-msg-string-string) +- [`type CLIConfig struct`](#cli-core-type-cliconfig-struct) +- [`func LoadCLIConfig() *CLIConfig`](#cli-core-func-loadcliconfig-cliconfig) +- [`type Command interface`](#cli-core-type-command-interface) +- [`func ExecCommandWithValidators(name string, args []string, validators ...ExecValidator) (Command, error)`](#cli-core-func-execcommandwithvalidators-name-string-args-string-validators-execvalidator-command-error) +- [`type ExecSpec struct`](#cli-core-type-execspec-struct) +- [`type ExecValidator func(ExecSpec) error`](#cli-core-type-execvalidator-func-execspec-error) +- [`func AllowlistBins(allowed ...string) ExecValidator`](#cli-core-func-allowlistbins-allowed-string-execvalidator) +- [`func NoControlChars() ExecValidator`](#cli-core-func-nocontrolchars-execvalidator) +- [`func NoShellMeta() ExecValidator`](#cli-core-func-noshellmeta-execvalidator) +- [`func PathUnder(root string) ExecValidator`](#cli-core-func-pathunder-root-string-execvalidator) +- [`type Executor interface`](#cli-core-type-executor-interface) +- [`func DefaultExecutor() Executor`](#cli-core-func-defaultexecutor-executor) +- [`type KubectlClient struct`](#cli-core-type-kubectlclient-struct) +- [`func DefaultKubectlClient() *KubectlClient`](#cli-core-func-defaultkubectlclient-kubectlclient) +- [`func NewKubectlClient(exec Executor) (*KubectlClient, error)`](#cli-core-func-newkubectlclient-exec-executor-kubectlclient-error) +- [`func NewTestKubectlClient(exec Executor) *KubectlClient`](#cli-core-func-newtestkubectlclient-exec-executor-kubectlclient) +- [`func NewTestKubectlClientWithValidators(exec Executor, validators []ExecValidator) *KubectlClient`](#cli-core-func-newtestkubectlclientwithvalidators-exec-executor-validators-execvalidator-kubectlclient) +- [`func (c *KubectlClient) CombinedOutput(args []string) ([]byte, error)`](#cli-core-func-c-kubectlclient-combinedoutput-args-string-byte-error) +- [`func (c *KubectlClient) CommandArgs(args []string) (Command, error)`](#cli-core-func-c-kubectlclient-commandargs-args-string-command-error) +- [`func (c *KubectlClient) Output(args []string) ([]byte, error)`](#cli-core-func-c-kubectlclient-output-args-string-byte-error) +- [`func (c *KubectlClient) Run(args []string) error`](#cli-core-func-c-kubectlclient-run-args-string-error) +- [`func (c *KubectlClient) RunWithOutput(args []string, stdout, stderr io.Writer) error`](#cli-core-func-c-kubectlclient-runwithoutput-args-string-stdout-stderr-io-writer-error) +- [`type KubectlRunner interface`](#cli-core-type-kubectlrunner-interface) +- [`func DefaultKubectlRunner() KubectlRunner`](#cli-core-func-defaultkubectlrunner-kubectlrunner) +- [`type MockCommand struct`](#cli-core-type-mockcommand-struct) +- [`func (m *MockCommand) CombinedOutput() ([]byte, error)`](#cli-core-func-m-mockcommand-combinedoutput-byte-error) +- [`func (m *MockCommand) Output() ([]byte, error)`](#cli-core-func-m-mockcommand-output-byte-error) +- [`func (m *MockCommand) Run() error`](#cli-core-func-m-mockcommand-run-error) +- [`func (m *MockCommand) SetStderr(w io.Writer)`](#cli-core-func-m-mockcommand-setstderr-w-io-writer) +- [`func (m *MockCommand) SetStdin(r io.Reader)`](#cli-core-func-m-mockcommand-setstdin-r-io-reader) +- [`func (m *MockCommand) SetStdout(w io.Writer)`](#cli-core-func-m-mockcommand-setstdout-w-io-writer) +- [`type MockExecutor struct`](#cli-core-type-mockexecutor-struct) +- [`func (m *MockExecutor) Command(name string, args []string, validators ...ExecValidator) (Command, error)`](#cli-core-func-m-mockexecutor-command-name-string-args-string-validators-execvalidator-command-error) +- [`func (m *MockExecutor) HasCommand(name string) bool`](#cli-core-func-m-mockexecutor-hascommand-name-string-bool) +- [`func (m *MockExecutor) LastCommand() ExecSpec`](#cli-core-func-m-mockexecutor-lastcommand-execspec) +- [`func (m *MockExecutor) Reset()`](#cli-core-func-m-mockexecutor-reset) +- [`type Printer struct`](#cli-core-type-printer-struct) +- [`func (p *Printer) Cyan(msg string) string`](#cli-core-func-p-printer-cyan-msg-string-string) +- [`func (p *Printer) Error(msg string)`](#cli-core-func-p-printer-error-msg-string) +- [`func (p *Printer) Green(msg string) string`](#cli-core-func-p-printer-green-msg-string-string) +- [`func (p *Printer) Header(title string)`](#cli-core-func-p-printer-header-title-string) +- [`func (p *Printer) Info(msg string)`](#cli-core-func-p-printer-info-msg-string) +- [`func (p *Printer) Printf(format string, a ...interface`](#cli-core-func-p-printer-printf-format-string-a-interface) +- [`func (p *Printer) Println(a ...interface`](#cli-core-func-p-printer-println-a-interface) +- [`func (p *Printer) Red(msg string) string`](#cli-core-func-p-printer-red-msg-string-string) +- [`func (p *Printer) Section(title string)`](#cli-core-func-p-printer-section-title-string) +- [`func (p *Printer) SpinnerStart(msg string) func(success bool, finalMsg string)`](#cli-core-func-p-printer-spinnerstart-msg-string-func-success-bool-finalmsg-string) +- [`func (p *Printer) Step(title string)`](#cli-core-func-p-printer-step-title-string) +- [`func (p *Printer) Success(msg string)`](#cli-core-func-p-printer-success-msg-string) +- [`func (p *Printer) Table(data [][]string)`](#cli-core-func-p-printer-table-data-string) +- [`func (p *Printer) TableBoxed(data [][]string)`](#cli-core-func-p-printer-tableboxed-data-string) +- [`func (p *Printer) Warn(msg string)`](#cli-core-func-p-printer-warn-msg-string) +- [`func (p *Printer) Yellow(msg string) string`](#cli-core-func-p-printer-yellow-msg-string-string) +- [`type Runtime struct`](#cli-core-type-runtime-struct) +- [`func NewRuntime(logger *zap.Logger) *Runtime`](#cli-core-func-newruntime-logger-zap-logger-runtime) +- [`func (r *Runtime) Config() *CLIConfig`](#cli-core-func-r-runtime-config-cliconfig) +- [`func (r *Runtime) Executor() Executor`](#cli-core-func-r-runtime-executor-executor) +- [`func (r *Runtime) KubectlClient() *KubectlClient`](#cli-core-func-r-runtime-kubectlclient-kubectlclient) +- [`func (r *Runtime) KubectlRunner() KubectlRunner`](#cli-core-func-r-runtime-kubectlrunner-kubectlrunner) +- [`func (r *Runtime) Logger() *zap.Logger`](#cli-core-func-r-runtime-logger-zap-logger) +- [`func (r *Runtime) Printer() *Printer`](#cli-core-func-r-runtime-printer-printer) + + ### Constants ```text +const ( + + // Exported aliases for tests and subpackages (same values as above). + DefaultRegistryEndpoint = defaultRegistryEndpoint + DefaultRegistryIngressHost = defaultRegistryIngressHost +) + Default values + const ( // NamespaceMCPRuntime is the namespace for the MCP runtime operator. NamespaceMCPRuntime = "mcp-runtime" @@ -2278,6 +2211,9 @@ const ( // NamespaceMCPServers is the default namespace for MCP server deployments. NamespaceMCPServers = "mcp-servers" + + // DefaultAnalyticsNamespace is the namespace for the bundled mcp-sentinel stack. + DefaultAnalyticsNamespace = "mcp-sentinel" ) This file defines constants used across the CLI, including: - Kubernetes namespace names @@ -2335,14 +2271,9 @@ const ( SelectorManagedBy = "app.kubernetes.io/managed-by=mcp-runtime" ) Selector strings for kubectl queries. - -const ( - StorageModeDynamic = "dynamic" - StorageModeHostpath = "hostpath" -) ``` - + ### Variables ```text @@ -2492,52 +2423,30 @@ var DefaultCLIConfig = LoadCLIConfig() var DefaultPrinter = &Printer{} DefaultPrinter is the default printer instance used by package-level functions. -``` - - -### Functions - - -```text -func ApplyManifestContentWithNamespace(kubectl KubectlRunner, manifest, namespace string) error -``` - - -```text -func BuildImage(logger *zap.Logger, serverName, dockerfile, metadataFile, metadataDir, registryURL, tag, context string) error -``` - - -```text -func BuildOperatorArgs(metricsAddr, probeAddr string, leaderElect, leaderElectChanged bool) []string - buildOperatorArgs constructs operator command-line arguments from flags. - Only includes flags that were explicitly set. +var ValidK8sName = regexp.MustCompile(`^[a-z0-9]([-a-z0-9]*[a-z0-9])?$`) + ValidK8sName matches Kubernetes resource name requirements (RFC 1123 + subdomain). ``` - -```text -func ClusterIssuerNameForACME(staging bool) string - ClusterIssuerNameForACME returns the ClusterIssuer resource name for Let's - Encrypt. - -``` + +### Functions - + ```text func Cyan(msg string) string Cyan returns cyan text. ``` - + ```text func Error(msg string) Error prints an error message. ``` - + ```text func GetAnalyticsIngestURLOverride() string GetAnalyticsIngestURLOverride returns the analytics ingest URL override, @@ -2545,35 +2454,35 @@ func GetAnalyticsIngestURLOverride() string ``` - + ```text func GetCertTimeout() time.Duration GetCertTimeout returns the certificate issuance timeout. ``` - + ```text func GetClusterName() string GetClusterName returns the cluster label attached to analytics/audit events. ``` - + ```text func GetDefaultServerPort() int GetDefaultServerPort returns the default MCP server port. ``` - + ```text func GetDeploymentTimeout() time.Duration GetDeploymentTimeout returns the deployment wait timeout. ``` - + ```text func GetGatewayProxyImageOverride() string GetGatewayProxyImageOverride returns the gateway proxy image override, @@ -2581,7 +2490,7 @@ func GetGatewayProxyImageOverride() string ``` - + ```text func GetHelperPodTimeout() time.Duration GetHelperPodTimeout returns the helper pod ready timeout (e.g. registry @@ -2589,7 +2498,7 @@ func GetHelperPodTimeout() time.Duration ``` - + ```text func GetMcpIngressHost() string GetMcpIngressHost returns the public MCP / gateway host (mcp. when @@ -2597,7 +2506,7 @@ func GetMcpIngressHost() string ``` - + ```text func GetOperatorImageOverride() string GetOperatorImageOverride returns the operator image override, empty if not @@ -2605,7 +2514,7 @@ func GetOperatorImageOverride() string ``` - + ```text func GetPlatformIngressHost() string GetPlatformIngressHost returns the public dashboard UI host @@ -2615,7 +2524,7 @@ func GetPlatformIngressHost() string ``` - + ```text func GetRegistryClusterIssuerName() string GetRegistryClusterIssuerName returns the cluster issuer name used on the @@ -2623,7 +2532,7 @@ func GetRegistryClusterIssuerName() string ``` - + ```text func GetRegistryEndpoint() string GetRegistryEndpoint returns the configured registry endpoint for image refs @@ -2631,135 +2540,85 @@ func GetRegistryEndpoint() string ``` - + ```text func GetRegistryIngressHost() string GetRegistryIngressHost returns the configured registry ingress host. ``` - + ```text func GetRegistryPort() int GetRegistryPort returns the registry port. ``` - + ```text func GetSkopeoImage() string GetSkopeoImage returns the skopeo image for in-cluster operations. ``` - + ```text func Green(msg string) string Green returns green text. ``` - -```text -func HasPlatformClient() bool -``` - - + ```text func Header(title string) Header prints a header banner. ``` - + ```text func Info(msg string) Info prints an info message. ``` - + ```text func IsDebugMode() bool IsDebugMode returns whether debug mode is enabled. ``` - + ```text func LogStructuredError(logger *zap.Logger, err error, msg string) ``` - + ```text func NewSetupStepFailedError() error ``` - + ```text func NewWithSentinel(base error, msg string) error ``` - -```text -func NormalizePlatformAPIBaseURL(raw string) string - NormalizePlatformAPIBaseURL trims whitespace, trailing slashes, and an - optional trailing /api suffix from a platform base URL. - -``` - - -```text -func PrintDoctorReport(r DoctorReport) - PrintDoctorReport emits a human-readable report using the standard printer. - -``` - - -```text -func ReadFileAtPath(path string) ([]byte, error) -``` - - + ```text func Red(msg string) string Red returns red text. ``` - -```text -func ResolveRegularFilePath(file string) (string, error) -``` - - -```text -func RunRegistryProvision(mgr *RegistryManager, url, username, password, operatorImage string) error - RunRegistryProvision contains the registry provision command flow for folder - packages. - -``` - - -```text -func RunRegistryPush(mgr *RegistryManager, image, registryURL, name, mode, helperNamespace string) error - RunRegistryPush contains the registry push command flow for folder packages. - -``` - - + ```text func Section(title string) Section prints a section header. ``` - -```text -func SentinelComponentKeys() []string -``` - - + ```text func SetDebugMode(enabled bool) SetDebugMode sets the global debug mode flag. When enabled, @@ -2767,176 +2626,108 @@ func SetDebugMode(enabled bool) ``` - -```text -func SetupPlatform(logger *zap.Logger, plan SetupPlan) error -``` - - -```text -func ShowPlatformStatus(logger *zap.Logger) error -``` - - + ```text func SpinnerStart(msg string) func(success bool, finalMsg string) SpinnerStart starts a spinner. ``` - + ```text func Step(title string) Step prints a step header. ``` - + ```text func Success(msg string) Success prints a success message. ``` - -```text -func Table(data [][]string) - Table prints a table. - -``` - - + ```text -func TableBoxed(data [][]string) - TableBoxed prints a boxed table. - -``` - - -```text -func ValidateStorageMode(mode string) error -``` - - -```text -func ValidateTLSSetupCLIFlags( - tlsEnabled bool, - acmeEmailResolved, tlsCIResolved string, - acmeStagingResolved, skipCertManagerInstall bool, -) error - validateTLSSetupCLIFlags enforces ACME / internal-issuer mutual exclusion - and requires --with-tls when any TLS or cert-manager-related options are - set. - -``` - - -```text -func Warn(msg string) - Warn prints a warning message. +func SwapDefaultKubectlClient(c *KubectlClient) (restore func()) + SwapDefaultKubectlClient replaces the shared kubectl client (tests only). ``` - + ```text -func WrapWithSentinel(base, cause error, msg string) error -``` - - -```text -func WrapWithSentinelAndContext(base, cause error, msg string, context map[string]any) error -``` +func SwapExecCommand(f func(string, ...string) *exec.Cmd) (restore func()) + SwapExecCommand replaces the exec.Command seam used by the default executor + (tests only). - -```text -func Yellow(msg string) string - Yellow returns yellow text. ``` - -### Types - - + ```text -type AccessManager struct { - // Has unexported fields. -} +func SwapExecExecutor(e Executor) (restore func()) + SwapExecExecutor replaces the global process executor (tests only). ``` - + ```text -func DefaultAccessManager(logger *zap.Logger) *AccessManager +func Table(data [][]string) + Table prints a table. ``` - + ```text -func NewAccessManager(kubectl *KubectlClient, logger *zap.Logger) *AccessManager +func TableBoxed(data [][]string) + TableBoxed prints a boxed table. ``` - + ```text -func (m *AccessManager) ApplyAccessResource(file string) error +func ValidateK8sNameAndNamespace(nameLabel string, nameSentinel error, name, namespace string) (string, string, error) + ValidateK8sNameAndNamespace validates a name+namespace pair against RFC-1123 + subdomain rules plus ValidateManifestField. nameLabel customizes the + invalid-name error message ("server name", "resource name"); nameSentinel + (may be nil) selects the sentinel error category. ``` - + ```text -func (m *AccessManager) BindUseKubeFlag(cmd *cobra.Command) - BindUseKubeFlag wires the shared --use-kube flag onto the command. +func ValidateManifestField(field, value string) (string, error) + ValidateManifestField rejects control characters, requires non-empty after + trimming, and returns the trimmed value. ``` - + ```text -func (m *AccessManager) DeleteAccessResource(resource, name, namespace string) error +func Warn(msg string) + Warn prints a warning message. ``` - + ```text -func (m *AccessManager) GetAccessResource(resource, name, namespace string) error - +func WrapWithSentinel(base, cause error, msg string) error ``` - + ```text -func (m *AccessManager) ListAccessResources(resource, namespace string, allNamespaces bool) error - ListAccessResources lists grants or sessions via the platform API when - configured, else kubectl. - +func WrapWithSentinelAndContext(base, cause error, msg string, context map[string]any) error ``` - + ```text -func (m *AccessManager) ToggleAccessResource(resource, name, namespace string, value bool) error - +func Yellow(msg string) string + Yellow returns yellow text. ``` - -```text -type AnalyticsImageSet struct { - Ingest string - API string - Processor string - UI string - Traefik string - ClickHouse string - Zookeeper string - Kafka string - Prometheus string - OTelCollector string - Tempo string - Loki string - Promtail string - Grafana string -} - -``` + +### Types - + ```text type CLIConfig struct { // Timeouts @@ -2977,225 +2768,878 @@ type CLIConfig struct { ``` - + ```text func LoadCLIConfig() *CLIConfig LoadCLIConfig loads CLI configuration from environment variables. ``` - + ```text -type CertManager struct { - // Has unexported fields. +type Command interface { + Output() ([]byte, error) + CombinedOutput() ([]byte, error) + Run() error + SetStdout(w io.Writer) + SetStderr(w io.Writer) + SetStdin(r io.Reader) } - CertManager manages cert-manager resources for the platform. + Command represents a command that can be executed. ``` - + ```text -func NewCertManager(kubectl KubectlRunner, logger *zap.Logger) *CertManager - NewCertManager creates a CertManager with the given dependencies. +func ExecCommandWithValidators(name string, args []string, validators ...ExecValidator) (Command, error) + ExecCommandWithValidators runs the named binary with args after validators + pass. ``` - + ```text -func (m *CertManager) Apply() error - Apply installs cert-manager resources required for registry TLS. +type ExecSpec struct { + Name string + Args []string +} ``` - + ```text -func (m *CertManager) Status() error - Status verifies cert-manager installation and required resources. +type ExecValidator func(ExecSpec) error ``` - + ```text -func (m *CertManager) Wait(timeout time.Duration) error - Wait blocks until the registry certificate is Ready or times out. +func AllowlistBins(allowed ...string) ExecValidator ``` - + ```text -type ClusterManager struct { - // Has unexported fields. -} - ClusterManager handles cluster operations with injected dependencies. +func NoControlChars() ExecValidator ``` - + ```text -func DefaultClusterManager(logger *zap.Logger) *ClusterManager - DefaultClusterManager returns a ClusterManager using default clients. +func NoShellMeta() ExecValidator ``` - + ```text -func NewClusterManager(kubectl *KubectlClient, exec Executor, logger *zap.Logger) *ClusterManager - NewClusterManager creates a ClusterManager with the given dependencies. +func PathUnder(root string) ExecValidator ``` - + ```text -func (m *ClusterManager) CheckClusterStatus() error - CheckClusterStatus checks and displays cluster status. +type Executor interface { + Command(name string, args []string, validators ...ExecValidator) (Command, error) +} + Executor creates commands for execution. ``` - + ```text -func (m *ClusterManager) ConfigureCluster(ingress ingressOptions) error - ConfigureCluster configures cluster settings like ingress. +func DefaultExecutor() Executor + DefaultExecutor returns the shared process executor used by CLI commands. ``` - + ```text -func (m *ClusterManager) ConfigureClusterWithValues(mode, manifest string, force bool) error - ConfigureClusterWithValues adapts exported flag values into the internal - ingress options shape. +type KubectlClient struct { + // Has unexported fields. +} + KubectlClient wraps kubectl command execution with validation. ``` - + ```text -func (m *ClusterManager) ConfigureKubeconfig(kubeconfig, context string) error - ConfigureKubeconfig sets KUBECONFIG and optionally switches context. +func DefaultKubectlClient() *KubectlClient + DefaultKubectlClient returns the shared kubectl client used by CLI commands. ``` - + ```text -func (m *ClusterManager) ConfigureKubeconfigFromProvider(provider, region, clusterName, resourceGroup, project, zone, kubeconfig string) error - ConfigureKubeconfigFromProvider updates kubeconfig using a cloud provider - CLI. +func NewKubectlClient(exec Executor) (*KubectlClient, error) + NewKubectlClient creates a KubectlClient with default validators. ``` - + ```text -func (m *ClusterManager) EnsureNamespace(name string) error - EnsureNamespace applies/creates a namespace idempotently. +func NewTestKubectlClient(exec Executor) *KubectlClient + NewTestKubectlClient returns a KubectlClient for tests (no path validators). ``` - + ```text -func (m *ClusterManager) InitCluster(kubeconfig, context string) error - InitCluster initializes cluster configuration. +func NewTestKubectlClientWithValidators(exec Executor, validators []ExecValidator) *KubectlClient + NewTestKubectlClientWithValidators returns a KubectlClient for tests using + the given validator list (or nil for none). ``` - + ```text -func (m *ClusterManager) KubectlRunner() KubectlRunner - KubectlRunner exposes the shared kubectl runner for foldered command - routing. +func (c *KubectlClient) CombinedOutput(args []string) ([]byte, error) + CombinedOutput runs kubectl with the given arguments and returns combined + stdout/stderr. ``` - + ```text -func (m *ClusterManager) Logger() *zap.Logger - Logger exposes the shared logger for foldered command routing. +func (c *KubectlClient) CommandArgs(args []string) (Command, error) + CommandArgs builds a kubectl command with the given arguments. Validates + arguments against configured validators before building. ``` - + ```text -func (m *ClusterManager) ProvisionCluster(provider, region string, nodeCount int, clusterName string) error - ProvisionCluster provisions a new Kubernetes cluster. +func (c *KubectlClient) Output(args []string) ([]byte, error) + Output runs kubectl with the given arguments and returns stdout. ``` - + ```text -type ClusterManagerAPI interface { - InitCluster(kubeconfig, context string) error - ConfigureCluster(opts ingressOptions) error -} +func (c *KubectlClient) Run(args []string) error + Run runs kubectl with the given arguments. ``` - + ```text -type Command interface { - Output() ([]byte, error) - CombinedOutput() ([]byte, error) - Run() error - SetStdout(w io.Writer) - SetStderr(w io.Writer) - SetStdin(r io.Reader) -} - Command represents a command that can be executed. +func (c *KubectlClient) RunWithOutput(args []string, stdout, stderr io.Writer) error + RunWithOutput runs kubectl with the given arguments, piping to the provided + writers. ``` - + ```text -type Distribution string - Distribution identifies a Kubernetes flavor for remediation messaging. +type KubectlRunner interface { + CommandArgs(args []string) (Command, error) + Run(args []string) error + RunWithOutput(args []string, stdout, stderr io.Writer) error +} + KubectlRunner captures the kubectl methods used by setup helpers. -const ( - DistroK3s Distribution = "k3s" - DistroKind Distribution = "kind" - DistroMinikube Distribution = "minikube" - DistroDockerDesktop Distribution = "docker-desktop" - DistroGeneric Distribution = "generic" -) ``` - + ```text -func DetectDistribution(kubectl KubectlRunner) Distribution - DetectDistribution inspects node info to guess which distribution is - running. This is best-effort: callers should treat DistroGeneric as - "probably kubeadm/unknown". +func DefaultKubectlRunner() KubectlRunner + DefaultKubectlRunner returns the shared kubectl runner used by CLI commands. ``` - + ```text -type DoctorCheck struct { - Name string - OK bool - Detail string - Remedy string // Short hint; detailed steps come from the distro checklist. +type MockCommand struct { + Args []string + OutputData []byte + OutputErr error + RunErr error + StdoutW io.Writer + StderrW io.Writer + StdinR io.Reader + RunFunc func() error } - DoctorCheck is a single preflight check result. + MockCommand is a test double for Command interface. ``` - + ```text -type DoctorCheckProgress func(DoctorCheckProgressEvent) func(DoctorCheck) - DoctorCheckProgress is called before each doctor check starts. It returns an - optional completion callback that receives the finished check result. +func (m *MockCommand) CombinedOutput() ([]byte, error) ``` - + ```text -type DoctorCheckProgressEvent struct { - Name string - Detail string - Index int +func (m *MockCommand) Output() ([]byte, error) + +``` + + +```text +func (m *MockCommand) Run() error + +``` + + +```text +func (m *MockCommand) SetStderr(w io.Writer) + +``` + + +```text +func (m *MockCommand) SetStdin(r io.Reader) + +``` + + +```text +func (m *MockCommand) SetStdout(w io.Writer) + +``` + + +```text +type MockExecutor struct { + // Commands records all commands that were created. + Commands []ExecSpec + // DefaultOutput is returned by commands when CommandFunc is nil. + DefaultOutput []byte + // DefaultErr is the error returned by Output/CombinedOutput. + DefaultErr error + // DefaultRunErr is the error returned by Run. + DefaultRunErr error + // CommandFunc allows custom behavior per command. + CommandFunc func(spec ExecSpec) *MockCommand +} + MockExecutor is a test double for Executor interface. + +``` + + +```text +func (m *MockExecutor) Command(name string, args []string, validators ...ExecValidator) (Command, error) + +``` + + +```text +func (m *MockExecutor) HasCommand(name string) bool + HasCommand checks if a command with the given name was executed. + +``` + + +```text +func (m *MockExecutor) LastCommand() ExecSpec + LastCommand returns the most recent command spec. + +``` + + +```text +func (m *MockExecutor) Reset() + Reset clears recorded commands. + +``` + + +```text +type Printer struct { + // Quiet suppresses non-essential output + Quiet bool + // Writer overrides the output destination when set. + Writer io.Writer +} + Printer provides formatted terminal output methods. Use the default instance + via package-level functions. + +``` + + +```text +func (p *Printer) Cyan(msg string) string + Cyan returns cyan-colored text. + +``` + + +```text +func (p *Printer) Error(msg string) + Error prints an error message. Note: Errors are intentionally not suppressed + in quiet mode to ensure critical issues are always visible, even when + non-essential output is disabled. + +``` + + +```text +func (p *Printer) Green(msg string) string + Green returns green-colored text. + +``` + + +```text +func (p *Printer) Header(title string) + Header prints a full-width header banner. + +``` + + +```text +func (p *Printer) Info(msg string) + Info prints an informational message. + +``` + + +```text +func (p *Printer) Printf(format string, a ...interface{}) + Printf prints formatted text. + +``` + + +```text +func (p *Printer) Println(a ...interface{}) + Println prints a plain line. + +``` + + +```text +func (p *Printer) Red(msg string) string + Red returns red-colored text. + +``` + + +```text +func (p *Printer) Section(title string) + Section prints a prominent section header. + +``` + + +```text +func (p *Printer) SpinnerStart(msg string) func(success bool, finalMsg string) + SpinnerStart starts a spinner with the given message. Returns a stop + function. + +``` + + +```text +func (p *Printer) Step(title string) + Step prints a step indicator (e.g., "Step 1: Initialize"). + +``` + + +```text +func (p *Printer) Success(msg string) + Success prints a success message. + +``` + + +```text +func (p *Printer) Table(data [][]string) + Table prints a formatted table. First row is treated as header. + +``` + + +```text +func (p *Printer) TableBoxed(data [][]string) + TableBoxed prints a formatted table with box borders. + +``` + + +```text +func (p *Printer) Warn(msg string) + Warn prints a warning message. Note: Warnings are intentionally not + suppressed in quiet mode to ensure important notices are visible even when + non-essential output is disabled. + +``` + + +```text +func (p *Printer) Yellow(msg string) string + Yellow returns yellow-colored text. + +``` + + +```text +type Runtime struct { + // Has unexported fields. +} + Runtime is the shared CLI facade for wiring common dependencies once and + handing typed managers to the foldered command packages. + +``` + + +```text +func NewRuntime(logger *zap.Logger) *Runtime + NewRuntime builds the shared CLI runtime facade. + +``` + + +```text +func (r *Runtime) Config() *CLIConfig + Config returns the loaded CLI configuration. + +``` + + +```text +func (r *Runtime) Executor() Executor + Executor returns the shared process executor. + +``` + + +```text +func (r *Runtime) KubectlClient() *KubectlClient + KubectlClient returns the shared kubectl client. + +``` + + +```text +func (r *Runtime) KubectlRunner() KubectlRunner + KubectlRunner returns the shared kubectl runner. + +``` + + +```text +func (r *Runtime) Logger() *zap.Logger + Logger returns the shared logger. + +``` + + +```text +func (r *Runtime) Printer() *Printer + Printer returns the shared terminal printer. +``` + + +## CLI Kubernetes helpers + +Package: `kube` +Import path: `mcp-runtime/internal/cli/kube` + +Source command: + +```bash +go doc -all ./internal/cli/kube +``` + + +### Overview + +Package kube contains shared kubectl-oriented helpers for CLI commands. + +### Jump To + +- [Overview](#cli-kubernetes-helpers-overview) +- [Index](#cli-kubernetes-helpers-index) +- [Functions](#cli-kubernetes-helpers-functions) +- [Types](#cli-kubernetes-helpers-types) + + +### Index + +- [`func ApplyManifestContent[T Command](commandArgs func([]string) (T, error), manifest string) error`](#cli-kubernetes-helpers-func-applymanifestcontent-t-command-commandargs-func-string-t-error-manifest-string-error) +- [`func ApplyManifestContentWithNamespace[T Command](commandArgs func([]string) (T, error), manifest, namespace string) error`](#cli-kubernetes-helpers-func-applymanifestcontentwithnamespace-t-command-commandargs-func-string-t-error-manifest-namespace-string-error) +- [`func ApplyManifestFromFile[T Command](commandArgs func([]string) (T, error), file string, stdout, stderr io.Writer) error`](#cli-kubernetes-helpers-func-applymanifestfromfile-t-command-commandargs-func-string-t-error-file-string-stdout-stderr-io-writer-error) +- [`func EnsureNamespace[T Command](commandArgs func([]string) (T, error), name string) error`](#cli-kubernetes-helpers-func-ensurenamespace-t-command-commandargs-func-string-t-error-name-string-error) +- [`func NormalizePatchDocument(raw string) (string, error)`](#cli-kubernetes-helpers-func-normalizepatchdocument-raw-string-string-error) +- [`func NormalizePatchFile(file string) (string, error)`](#cli-kubernetes-helpers-func-normalizepatchfile-file-string-string-error) +- [`func ReadFileAtPath(path string) ([]byte, error)`](#cli-kubernetes-helpers-func-readfileatpath-path-string-byte-error) +- [`func ResolveRegularFilePath(file string) (string, error)`](#cli-kubernetes-helpers-func-resolveregularfilepath-file-string-string-error) +- [`func WriteOutputFile(file string, data []byte) error`](#cli-kubernetes-helpers-func-writeoutputfile-file-string-data-byte-error) +- [`type Command interface`](#cli-kubernetes-helpers-type-command-interface) + + +### Functions + + +```text +func ApplyManifestContent[T Command](commandArgs func([]string) (T, error), manifest string) error + ApplyManifestContent applies manifest YAML from a string via kubectl stdin. + +``` + + +```text +func ApplyManifestContentWithNamespace[T Command](commandArgs func([]string) (T, error), manifest, namespace string) error + ApplyManifestContentWithNamespace applies manifest YAML from stdin, + optionally scoped to a namespace. + +``` + + +```text +func ApplyManifestFromFile[T Command](commandArgs func([]string) (T, error), file string, stdout, stderr io.Writer) error + ApplyManifestFromFile applies a manifest file using kubectl. + +``` + + +```text +func EnsureNamespace[T Command](commandArgs func([]string) (T, error), name string) error + EnsureNamespace applies/creates a namespace idempotently. + +``` + + +```text +func NormalizePatchDocument(raw string) (string, error) + NormalizePatchDocument parses YAML or JSON patch content and returns a JSON + string suitable for kubectl patch --type=json (or merge) style inputs. + +``` + + +```text +func NormalizePatchFile(file string) (string, error) + NormalizePatchFile reads a patch file from disk and returns normalized JSON + like NormalizePatchDocument. + +``` + + +```text +func ReadFileAtPath(path string) ([]byte, error) + ReadFileAtPath reads a regular file without following symlink escapes + outside its parent directory. + +``` + + +```text +func ResolveRegularFilePath(file string) (string, error) + ResolveRegularFilePath resolves a path and rejects directories. + +``` + + +```text +func WriteOutputFile(file string, data []byte) error + WriteOutputFile writes data to a path under a resolved parent directory with + 0600 file permissions and 0750 (or tighter) directory permissions. +``` + + +### Types + + +```text +type Command interface { + SetStdin(io.Reader) + SetStdout(io.Writer) + SetStderr(io.Writer) + Run() error +} + Command is the minimal command shape needed for stdin-based kubectl apply. +``` + + +## CLI Kubernetes errors + +Package: `kubeerr` +Import path: `mcp-runtime/internal/cli/kubeerr` + +Source command: + +```bash +go doc -all ./internal/cli/kubeerr +``` + + +### Overview + +_No package overview is documented._ + +### Jump To + +- [Overview](#cli-kubernetes-errors-overview) +- [Index](#cli-kubernetes-errors-index) +- [Functions](#cli-kubernetes-errors-functions) + + +### Index + +- [`func CommandDetail(output string, fallback error) string`](#cli-kubernetes-errors-func-commanddetail-output-string-fallback-error-string) +- [`func SetupHint(detail string) (string, bool)`](#cli-kubernetes-errors-func-setuphint-detail-string-string-bool) + + +### Functions + + +```text +func CommandDetail(output string, fallback error) string + CommandDetail extracts a single-line error detail from kubectl output or the + exec error. + +``` + + +```text +func SetupHint(detail string) (string, bool) + SetupHint returns a friendlier message when the cluster has not been + provisioned yet. +``` + + +## CLI cluster + +Package: `cluster` +Import path: `mcp-runtime/internal/cli/cluster` + +Source command: + +```bash +go doc -all ./internal/cli/cluster +``` + + +### Overview + +Package cluster owns routing for the cluster top-level command. + +Cluster doctor diagnostics: distribution detection, registry and Traefik checks, +Sentinel probes, and remediation hints. See docs/cluster-readiness.md. + +Package cluster implements cluster operations for the cluster CLI command. + +### Jump To + +- [Overview](#cli-cluster-overview) +- [Index](#cli-cluster-index) +- [Functions](#cli-cluster-functions) +- [Types](#cli-cluster-types) + + +### Index + +- [`func New(runtime *core.Runtime) *cobra.Command`](#cli-cluster-func-new-runtime-core-runtime-cobra-command) +- [`func NewWithManager(mgr *ClusterManager) *cobra.Command`](#cli-cluster-func-newwithmanager-mgr-clustermanager-cobra-command) +- [`func PrintDoctorReport(r DoctorReport)`](#cli-cluster-func-printdoctorreport-r-doctorreport) +- [`type ClusterManager struct`](#cli-cluster-type-clustermanager-struct) +- [`func DefaultClusterManager(logger *zap.Logger) *ClusterManager`](#cli-cluster-func-defaultclustermanager-logger-zap-logger-clustermanager) +- [`func NewClusterManager(kubectl *core.KubectlClient, exec core.Executor, logger *zap.Logger) *ClusterManager`](#cli-cluster-func-newclustermanager-kubectl-core-kubectlclient-exec-core-executor-logger-zap-logger-clustermanager) +- [`func (m *ClusterManager) CheckClusterStatus() error`](#cli-cluster-func-m-clustermanager-checkclusterstatus-error) +- [`func (m *ClusterManager) ConfigureCluster(opts IngressOptions) error`](#cli-cluster-func-m-clustermanager-configurecluster-opts-ingressoptions-error) +- [`func (m *ClusterManager) ConfigureClusterWithValues(mode, manifest string, force bool) error`](#cli-cluster-func-m-clustermanager-configureclusterwithvalues-mode-manifest-string-force-bool-error) +- [`func (m *ClusterManager) ConfigureKubeconfig(kubeconfig, context string) error`](#cli-cluster-func-m-clustermanager-configurekubeconfig-kubeconfig-context-string-error) +- [`func (m *ClusterManager) ConfigureKubeconfigFromProvider(provider, region, clusterName, resourceGroup, project, zone, kubeconfig string) error`](#cli-cluster-func-m-clustermanager-configurekubeconfigfromprovider-provider-region-clustername-resourcegroup-project-zone-kubeconfig-string-error) +- [`func (m *ClusterManager) EnsureNamespace(name string) error`](#cli-cluster-func-m-clustermanager-ensurenamespace-name-string-error) +- [`func (m *ClusterManager) InitCluster(kubeconfig, context string) error`](#cli-cluster-func-m-clustermanager-initcluster-kubeconfig-context-string-error) +- [`func (m *ClusterManager) KubectlRunner() core.KubectlRunner`](#cli-cluster-func-m-clustermanager-kubectlrunner-core-kubectlrunner) +- [`func (m *ClusterManager) Logger() *zap.Logger`](#cli-cluster-func-m-clustermanager-logger-zap-logger) +- [`func (m *ClusterManager) ProvisionCluster(provider, region string, nodeCount int, clusterName string, dryRun bool) error`](#cli-cluster-func-m-clustermanager-provisioncluster-provider-region-string-nodecount-int-clustername-string-dryrun-bool-error) +- [`type Distribution string`](#cli-cluster-type-distribution-string) +- [`func DetectDistribution(kubectl core.KubectlRunner) Distribution`](#cli-cluster-func-detectdistribution-kubectl-core-kubectlrunner-distribution) +- [`type DoctorCheck struct`](#cli-cluster-type-doctorcheck-struct) +- [`type DoctorCheckProgress func(DoctorCheckProgressEvent) func(DoctorCheck)`](#cli-cluster-type-doctorcheckprogress-func-doctorcheckprogressevent-func-doctorcheck) +- [`type DoctorCheckProgressEvent struct`](#cli-cluster-type-doctorcheckprogressevent-struct) +- [`type DoctorReport struct`](#cli-cluster-type-doctorreport-struct) +- [`func RunDoctor(kubectl core.KubectlRunner) DoctorReport`](#cli-cluster-func-rundoctor-kubectl-core-kubectlrunner-doctorreport) +- [`func RunDoctorAndPrint(kubectl core.KubectlRunner) DoctorReport`](#cli-cluster-func-rundoctorandprint-kubectl-core-kubectlrunner-doctorreport) +- [`func RunDoctorWithProgress(kubectl core.KubectlRunner, progress DoctorCheckProgress) DoctorReport`](#cli-cluster-func-rundoctorwithprogress-kubectl-core-kubectlrunner-progress-doctorcheckprogress-doctorreport) +- [`func (r DoctorReport) AllOK() bool`](#cli-cluster-func-r-doctorreport-allok-bool) +- [`type IngressOptions struct`](#cli-cluster-type-ingressoptions-struct) + + +### Functions + + +```text +func New(runtime *core.Runtime) *cobra.Command + New returns the cluster command. + +``` + + +```text +func NewWithManager(mgr *ClusterManager) *cobra.Command + NewWithManager returns the cluster command using the provided manager. + +``` + + +```text +func PrintDoctorReport(r DoctorReport) + PrintDoctorReport emits a human-readable report using the standard printer. +``` + + +### Types + + +```text +type ClusterManager struct { + // Has unexported fields. +} + ClusterManager handles cluster operations with injected dependencies. + +``` + + +```text +func DefaultClusterManager(logger *zap.Logger) *ClusterManager + DefaultClusterManager returns a ClusterManager using default clients. + +``` + + +```text +func NewClusterManager(kubectl *core.KubectlClient, exec core.Executor, logger *zap.Logger) *ClusterManager + NewClusterManager creates a ClusterManager with the given dependencies. + +``` + + +```text +func (m *ClusterManager) CheckClusterStatus() error + CheckClusterStatus checks and displays cluster status. + +``` + + +```text +func (m *ClusterManager) ConfigureCluster(opts IngressOptions) error + ConfigureCluster configures cluster settings like ingress. + +``` + + +```text +func (m *ClusterManager) ConfigureClusterWithValues(mode, manifest string, force bool) error + ConfigureClusterWithValues adapts exported flag values into the internal + ingress options shape. + +``` + + +```text +func (m *ClusterManager) ConfigureKubeconfig(kubeconfig, context string) error + ConfigureKubeconfig sets KUBECONFIG and optionally switches context. + +``` + + +```text +func (m *ClusterManager) ConfigureKubeconfigFromProvider(provider, region, clusterName, resourceGroup, project, zone, kubeconfig string) error + ConfigureKubeconfigFromProvider updates kubeconfig using a cloud provider + CLI. + +``` + + +```text +func (m *ClusterManager) EnsureNamespace(name string) error + EnsureNamespace applies/creates a namespace idempotently. + +``` + + +```text +func (m *ClusterManager) InitCluster(kubeconfig, context string) error + InitCluster initializes cluster configuration. + +``` + + +```text +func (m *ClusterManager) KubectlRunner() core.KubectlRunner + KubectlRunner exposes the shared kubectl runner for foldered command + routing. + +``` + + +```text +func (m *ClusterManager) Logger() *zap.Logger + Logger exposes the shared logger for foldered command routing. + +``` + + +```text +func (m *ClusterManager) ProvisionCluster(provider, region string, nodeCount int, clusterName string, dryRun bool) error + ProvisionCluster provisions a new Kubernetes cluster. When dryRun is true, + it prints the configuration and command that would run without creating any + cluster or calling out to cloud APIs. + +``` + + +```text +type Distribution string + Distribution identifies a Kubernetes flavor for remediation messaging. + +const ( + DistroK3s Distribution = "k3s" + DistroKind Distribution = "kind" + DistroMinikube Distribution = "minikube" + DistroDockerDesktop Distribution = "docker-desktop" + DistroGeneric Distribution = "generic" +) +``` + + +```text +func DetectDistribution(kubectl core.KubectlRunner) Distribution + DetectDistribution inspects node info to guess which distribution is + running. This is best-effort: callers should treat DistroGeneric as + "probably kubeadm/unknown". + +``` + + +```text +type DoctorCheck struct { + Name string + OK bool + Detail string + Remedy string // Short hint; detailed steps come from the distro checklist. +} + DoctorCheck is a single preflight check result. + +``` + + +```text +type DoctorCheckProgress func(DoctorCheckProgressEvent) func(DoctorCheck) + DoctorCheckProgress is called before each doctor check starts. It returns an + optional completion callback that receives the finished check result. + +``` + + +```text +type DoctorCheckProgressEvent struct { + Name string + Detail string + Index int Total int } DoctorCheckProgressEvent describes the check that is about to run. ``` - + ```text type DoctorReport struct { Distribution Distribution @@ -3205,393 +3649,611 @@ type DoctorReport struct { ``` - + ```text -func RunDoctor(kubectl KubectlRunner) DoctorReport +func RunDoctor(kubectl core.KubectlRunner) DoctorReport RunDoctor executes cluster diagnostics and returns a report. ``` - + ```text -func RunDoctorAndPrint(kubectl KubectlRunner) DoctorReport +func RunDoctorAndPrint(kubectl core.KubectlRunner) DoctorReport RunDoctorAndPrint streams doctor progress and results as checks execute. ``` - + ```text -func RunDoctorWithProgress(kubectl KubectlRunner, progress DoctorCheckProgress) DoctorReport +func RunDoctorWithProgress(kubectl core.KubectlRunner, progress DoctorCheckProgress) DoctorReport RunDoctorWithProgress executes cluster diagnostics and calls progress hooks before and after each check. It is useful for UIs that need live feedback. ``` - + ```text func (r DoctorReport) AllOK() bool AllOK reports whether every check passed. ``` - + ```text -type ExecSpec struct { - Name string - Args []string +type IngressOptions struct { + Mode string + Manifest string + Force bool } + IngressOptions captures ingress install settings used by both cluster + configuration and the setup command. +``` + + +## CLI cert-manager + +Package: `certmanager` +Import path: `mcp-runtime/internal/cli/certmanager` +Source command: + +```bash +go doc -all ./internal/cli/certmanager ``` - -```text -type ExecValidator func(ExecSpec) error + +### Overview + +_No package overview is documented._ + +### Jump To + +- [Overview](#cli-cert-manager-overview) +- [Index](#cli-cert-manager-index) +- [Constants](#cli-cert-manager-constants) +- [Functions](#cli-cert-manager-functions) +- [Types](#cli-cert-manager-types) + + +### Index + +- [`Constants`](#cli-cert-manager-constants) +- [`func ACMETLSDNSNames() []string`](#cli-cert-manager-func-acmetlsdnsnames-string) +- [`func ApplyClusterIssuerWithKubectl(kubectl core.KubectlRunner) error`](#cli-cert-manager-func-applyclusterissuerwithkubectl-kubectl-core-kubectlrunner-error) +- [`func ApplyLetsEncryptClusterIssuer(kubectl core.KubectlRunner, email string, staging bool, logger *zap.Logger) error`](#cli-cert-manager-func-applyletsencryptclusterissuer-kubectl-core-kubectlrunner-email-string-staging-bool-logger-zap-logger-error) +- [`func ApplyRegistryCertificateForACME(kubectl core.KubectlRunner, dnsNames []string, issuerName string) error`](#cli-cert-manager-func-applyregistrycertificateforacme-kubectl-core-kubectlrunner-dnsnames-string-issuername-string-error) +- [`func ApplyRegistryCertificateWithKubectl(kubectl core.KubectlRunner) error`](#cli-cert-manager-func-applyregistrycertificatewithkubectl-kubectl-core-kubectlrunner-error) +- [`func CheckCASecretWithKubectl(kubectl core.KubectlRunner) error`](#cli-cert-manager-func-checkcasecretwithkubectl-kubectl-core-kubectlrunner-error) +- [`func CheckCertManagerInstalledWithKubectl(kubectl core.KubectlRunner) error`](#cli-cert-manager-func-checkcertmanagerinstalledwithkubectl-kubectl-core-kubectlrunner-error) +- [`func CheckCertificateWithKubectl(kubectl core.KubectlRunner, name, namespace string) error`](#cli-cert-manager-func-checkcertificatewithkubectl-kubectl-core-kubectlrunner-name-namespace-string-error) +- [`func CheckClusterIssuerWithKubectl(kubectl core.KubectlRunner) error`](#cli-cert-manager-func-checkclusterissuerwithkubectl-kubectl-core-kubectlrunner-error) +- [`func CheckNamedClusterIssuerWithKubectl(kubectl core.KubectlRunner, name string) error`](#cli-cert-manager-func-checknamedclusterissuerwithkubectl-kubectl-core-kubectlrunner-name-string-error) +- [`func ClusterIssuerNameForACME(staging bool) string`](#cli-cert-manager-func-clusterissuernameforacme-staging-bool-string) +- [`func EnsureCertManagerInstalled(kubectl core.KubectlRunner, logger *zap.Logger) error`](#cli-cert-manager-func-ensurecertmanagerinstalled-kubectl-core-kubectlrunner-logger-zap-logger-error) +- [`func PreflightACMEHostnamesPort80(dnsNames []string)`](#cli-cert-manager-func-preflightacmehostnamesport80-dnsnames-string) +- [`func ValidateACMEHostnameForPublicCA() error`](#cli-cert-manager-func-validateacmehostnameforpublicca-error) +- [`func ValidateIngressManifestForACME(ingressManifest string) error`](#cli-cert-manager-func-validateingressmanifestforacme-ingressmanifest-string-error) +- [`func WaitForCertificateReadyWithKubectl(kubectl core.KubectlRunner, name, namespace string, timeout time.Duration) error`](#cli-cert-manager-func-waitforcertificatereadywithkubectl-kubectl-core-kubectlrunner-name-namespace-string-timeout-time-duration-error) +- [`func WaitForTraefikDeploymentForACME(kubectl core.KubectlRunner) error`](#cli-cert-manager-func-waitfortraefikdeploymentforacme-kubectl-core-kubectlrunner-error) +- [`type CertManager struct`](#cli-cert-manager-type-certmanager-struct) +- [`func NewCertManager(kubectl core.KubectlRunner, logger *zap.Logger) *CertManager`](#cli-cert-manager-func-newcertmanager-kubectl-core-kubectlrunner-logger-zap-logger-certmanager) +- [`func (m *CertManager) Apply(dryRun bool) error`](#cli-cert-manager-func-m-certmanager-apply-dryrun-bool-error) +- [`func (m *CertManager) Status() error`](#cli-cert-manager-func-m-certmanager-status-error) +- [`func (m *CertManager) Wait(timeout time.Duration) error`](#cli-cert-manager-func-m-certmanager-wait-timeout-time-duration-error) + + +### Constants +```text +const ( + CertClusterIssuerName = certClusterIssuerName + RegistryCertificateName = registryCertificateName +) ``` - + +### Functions + + ```text -func AllowlistBins(allowed ...string) ExecValidator +func ACMETLSDNSNames() []string +``` + +```text +func ApplyClusterIssuerWithKubectl(kubectl core.KubectlRunner) error ``` - + ```text -func NoControlChars() ExecValidator +func ApplyLetsEncryptClusterIssuer(kubectl core.KubectlRunner, email string, staging bool, logger *zap.Logger) error +``` + +```text +func ApplyRegistryCertificateForACME(kubectl core.KubectlRunner, dnsNames []string, issuerName string) error ``` - + ```text -func NoShellMeta() ExecValidator +func ApplyRegistryCertificateWithKubectl(kubectl core.KubectlRunner) error +``` + +```text +func CheckCASecretWithKubectl(kubectl core.KubectlRunner) error ``` - + ```text -func PathUnder(root string) ExecValidator +func CheckCertManagerInstalledWithKubectl(kubectl core.KubectlRunner) error +``` + +```text +func CheckCertificateWithKubectl(kubectl core.KubectlRunner, name, namespace string) error ``` - + ```text -type Executor interface { - Command(name string, args []string, validators ...ExecValidator) (Command, error) -} - Executor creates commands for execution. +func CheckClusterIssuerWithKubectl(kubectl core.KubectlRunner) error +``` + +```text +func CheckNamedClusterIssuerWithKubectl(kubectl core.KubectlRunner, name string) error ``` - + ```text -type ExternalRegistryConfig struct { - URL string `yaml:"url"` - Username string `yaml:"username,omitempty"` - Password string `yaml:"password,omitempty"` -} +func ClusterIssuerNameForACME(staging bool) string + ClusterIssuerNameForACME returns the ClusterIssuer resource name for Let's + Encrypt. ``` - + ```text -type KubectlClient struct { - // Has unexported fields. -} - KubectlClient wraps kubectl command execution with validation. +func EnsureCertManagerInstalled(kubectl core.KubectlRunner, logger *zap.Logger) error +``` + +```text +func PreflightACMEHostnamesPort80(dnsNames []string) ``` - + ```text -func NewKubectlClient(exec Executor) (*KubectlClient, error) - NewKubectlClient creates a KubectlClient with default validators. +func ValidateACMEHostnameForPublicCA() error +``` + +```text +func ValidateIngressManifestForACME(ingressManifest string) error ``` - + ```text -func (c *KubectlClient) CombinedOutput(args []string) ([]byte, error) - CombinedOutput runs kubectl with the given arguments and returns combined - stdout/stderr. +func WaitForCertificateReadyWithKubectl(kubectl core.KubectlRunner, name, namespace string, timeout time.Duration) error +``` + +```text +func WaitForTraefikDeploymentForACME(kubectl core.KubectlRunner) error ``` - + +### Types + + ```text -func (c *KubectlClient) CommandArgs(args []string) (Command, error) - CommandArgs builds a kubectl command with the given arguments. Validates - arguments against configured validators before building. +type CertManager struct { + // Has unexported fields. +} + CertManager manages cert-manager resources for the platform. ``` - + ```text -func (c *KubectlClient) Output(args []string) ([]byte, error) - Output runs kubectl with the given arguments and returns stdout. +func NewCertManager(kubectl core.KubectlRunner, logger *zap.Logger) *CertManager + NewCertManager creates a CertManager with the given dependencies. ``` - + ```text -func (c *KubectlClient) Run(args []string) error - Run runs kubectl with the given arguments. +func (m *CertManager) Apply(dryRun bool) error + Apply installs cert-manager resources required for registry TLS. When dryRun + is true, the read-only preflight checks still run (to catch obvious problems + like missing cert-manager) but no kubectl apply is performed. ``` - + ```text -func (c *KubectlClient) RunWithOutput(args []string, stdout, stderr io.Writer) error - RunWithOutput runs kubectl with the given arguments, piping to the provided - writers. +func (m *CertManager) Status() error + Status verifies cert-manager installation and required resources. ``` - + ```text -type KubectlRunner interface { - CommandArgs(args []string) (Command, error) - Run(args []string) error - RunWithOutput(args []string, stdout, stderr io.Writer) error -} - KubectlRunner captures the kubectl methods used by setup helpers. +func (m *CertManager) Wait(timeout time.Duration) error + Wait blocks until the registry certificate is Ready or times out. +``` + + +## CLI platform API + +Package: `platformapi` +Import path: `mcp-runtime/internal/cli/platformapi` + +Source command: +```bash +go doc -all ./internal/cli/platformapi ``` - + +### Overview + +_No package overview is documented._ + +### Jump To + +- [Overview](#cli-platform-api-overview) +- [Index](#cli-platform-api-index) +- [Functions](#cli-platform-api-functions) +- [Types](#cli-platform-api-types) + + +### Index + +- [`func HasPlatformClient() bool`](#cli-platform-api-func-hasplatformclient-bool) +- [`func NormalizeBaseURL(raw string) string`](#cli-platform-api-func-normalizebaseurl-raw-string-string) +- [`type PlatformClient struct`](#cli-platform-api-type-platformclient-struct) +- [`func NewPlatformClient() (*PlatformClient, error)`](#cli-platform-api-func-newplatformclient-platformclient-error) +- [`func ResolvePlatformOrKube(useKube bool) (*PlatformClient, bool, error)`](#cli-platform-api-func-resolveplatformorkube-usekube-bool-platformclient-bool-error) +- [`func (c *PlatformClient) ApplyAccessFromYAMLFile(ctx context.Context, path string) error`](#cli-platform-api-func-c-platformclient-applyaccessfromyamlfile-ctx-context-context-path-string-error) +- [`func (c *PlatformClient) DeleteGrant(ctx context.Context, namespace, name string) error`](#cli-platform-api-func-c-platformclient-deletegrant-ctx-context-context-namespace-name-string-error) +- [`func (c *PlatformClient) DeleteSession(ctx context.Context, namespace, name string) error`](#cli-platform-api-func-c-platformclient-deletesession-ctx-context-context-namespace-name-string-error) +- [`func (c *PlatformClient) GetGrant(ctx context.Context, namespace, name string) (sentinelaccess.GrantSummary, error)`](#cli-platform-api-func-c-platformclient-getgrant-ctx-context-context-namespace-name-string-sentinelaccess-grantsummary-error) +- [`func (c *PlatformClient) GetRuntimePolicy(ctx context.Context, namespace, server string) ([]byte, error)`](#cli-platform-api-func-c-platformclient-getruntimepolicy-ctx-context-context-namespace-server-string-byte-error) +- [`func (c *PlatformClient) GetSession(ctx context.Context, namespace, name string) (sentinelaccess.SessionSummary, error)`](#cli-platform-api-func-c-platformclient-getsession-ctx-context-context-namespace-name-string-sentinelaccess-sessionsummary-error) +- [`func (c *PlatformClient) ListGrants(ctx context.Context, namespace string) ([]sentinelaccess.GrantSummary, error)`](#cli-platform-api-func-c-platformclient-listgrants-ctx-context-context-namespace-string-sentinelaccess-grantsummary-error) +- [`func (c *PlatformClient) ListRuntimeServers(ctx context.Context, namespace string) ([]ServerListItem, error)`](#cli-platform-api-func-c-platformclient-listruntimeservers-ctx-context-context-namespace-string-serverlistitem-error) +- [`func (c *PlatformClient) ListSessions(ctx context.Context, namespace string) ([]sentinelaccess.SessionSummary, error)`](#cli-platform-api-func-c-platformclient-listsessions-ctx-context-context-namespace-string-sentinelaccess-sessionsummary-error) +- [`func (c *PlatformClient) PostGrantToggle(ctx context.Context, namespace, name, action string) error`](#cli-platform-api-func-c-platformclient-postgranttoggle-ctx-context-context-namespace-name-action-string-error) +- [`func (c *PlatformClient) PostSessionToggle(ctx context.Context, namespace, name, action string) error`](#cli-platform-api-func-c-platformclient-postsessiontoggle-ctx-context-context-namespace-name-action-string-error) +- [`type ServerListItem struct`](#cli-platform-api-type-serverlistitem-struct) + + +### Functions + + ```text -func DefaultKubectlRunner() KubectlRunner - DefaultKubectlRunner returns the shared kubectl runner used by CLI commands. +func HasPlatformClient() bool +``` + +```text +func NormalizeBaseURL(raw string) string + NormalizeBaseURL trims whitespace, trailing slashes, and an optional + trailing /api suffix from a platform base URL. ``` - + +### Types + + ```text -type MockCommand struct { - Args []string - OutputData []byte - OutputErr error - RunErr error - StdoutW io.Writer - StderrW io.Writer - StdinR io.Reader - RunFunc func() error +type PlatformClient struct { + // Has unexported fields. } - MockCommand is a test double for Command interface. + PlatformClient calls the mcp-sentinel API with an API key. ``` - + ```text -func (m *MockCommand) CombinedOutput() ([]byte, error) +func NewPlatformClient() (*PlatformClient, error) + NewPlatformClient returns a client when platform credentials and + API base URL are configured. If the user is not logged in, returns + authfile.ErrNotFound so the caller can fall back to kubectl. ``` - + ```text -func (m *MockCommand) Output() ([]byte, error) +func ResolvePlatformOrKube(useKube bool) (*PlatformClient, bool, error) + ResolvePlatformOrKube returns a platform API client when useKube is false + and auth resolves; otherwise useKubectl is true. ``` - + ```text -func (m *MockCommand) Run() error +func (c *PlatformClient) ApplyAccessFromYAMLFile(ctx context.Context, path string) error ``` - + ```text -func (m *MockCommand) SetStderr(w io.Writer) +func (c *PlatformClient) DeleteGrant(ctx context.Context, namespace, name string) error ``` - + ```text -func (m *MockCommand) SetStdin(r io.Reader) +func (c *PlatformClient) DeleteSession(ctx context.Context, namespace, name string) error ``` - + ```text -func (m *MockCommand) SetStdout(w io.Writer) +func (c *PlatformClient) GetGrant(ctx context.Context, namespace, name string) (sentinelaccess.GrantSummary, error) ``` - + ```text -type MockExecutor struct { - // Commands records all commands that were created. - Commands []ExecSpec - // DefaultOutput is returned by commands when CommandFunc is nil. - DefaultOutput []byte - // DefaultErr is the error returned by Output/CombinedOutput. - DefaultErr error - // DefaultRunErr is the error returned by Run. - DefaultRunErr error - // CommandFunc allows custom behavior per command. - CommandFunc func(spec ExecSpec) *MockCommand -} - MockExecutor is a test double for Executor interface. +func (c *PlatformClient) GetRuntimePolicy(ctx context.Context, namespace, server string) ([]byte, error) ``` - + ```text -func (m *MockExecutor) Command(name string, args []string, validators ...ExecValidator) (Command, error) +func (c *PlatformClient) GetSession(ctx context.Context, namespace, name string) (sentinelaccess.SessionSummary, error) ``` - + ```text -func (m *MockExecutor) HasCommand(name string) bool - HasCommand checks if a command with the given name was executed. +func (c *PlatformClient) ListGrants(ctx context.Context, namespace string) ([]sentinelaccess.GrantSummary, error) ``` - + ```text -func (m *MockExecutor) LastCommand() ExecSpec - LastCommand returns the most recent command spec. +func (c *PlatformClient) ListRuntimeServers(ctx context.Context, namespace string) ([]ServerListItem, error) ``` - + ```text -func (m *MockExecutor) Reset() - Reset clears recorded commands. +func (c *PlatformClient) ListSessions(ctx context.Context, namespace string) ([]sentinelaccess.SessionSummary, error) ``` - + ```text -type Printer struct { - // Quiet suppresses non-essential output - Quiet bool - // Writer overrides the output destination when set. - Writer io.Writer -} - Printer provides formatted terminal output methods. Use the default instance - via package-level functions. +func (c *PlatformClient) PostGrantToggle(ctx context.Context, namespace, name, action string) error ``` - + ```text -func (p *Printer) Cyan(msg string) string - Cyan returns cyan-colored text. +func (c *PlatformClient) PostSessionToggle(ctx context.Context, namespace, name, action string) error ``` - + ```text -func (p *Printer) Error(msg string) - Error prints an error message. Note: Errors are intentionally not suppressed - in quiet mode to ensure critical issues are always visible, even when - non-essential output is disabled. +type ServerListItem struct { + Name string `json:"name"` + Namespace string `json:"namespace"` + Ready string `json:"ready"` + Status string `json:"status"` + Labels map[string]string `json:"labels"` + Age string `json:"age"` +} + ServerListItem is one row from the platform API runtime servers list. +``` + + +## CLI platform status + +Package: `platformstatus` +Import path: `mcp-runtime/internal/cli/platformstatus` + +Source command: + +```bash +go doc -all ./internal/cli/platformstatus +``` + + +### Overview + +_No package overview is documented._ + +### Jump To + +- [Overview](#cli-platform-status-overview) +- [Index](#cli-platform-status-index) +- [Variables](#cli-platform-status-variables) +- [Functions](#cli-platform-status-functions) +- [Types](#cli-platform-status-types) + + +### Index + +- [`Variables`](#cli-platform-status-variables) +- [`func AnalyticsNamespaceInstalled(clusterReachable bool) (bool, error)`](#cli-platform-status-func-analyticsnamespaceinstalled-clusterreachable-bool-bool-error) +- [`func AnalyticsStackRow(status, details string) []string`](#cli-platform-status-func-analyticsstackrow-status-details-string-string) +- [`func CheckClusterStatusQuiet() error`](#cli-platform-status-func-checkclusterstatusquiet-error) +- [`func WorkloadStatusRow(workload PlatformWorkload, clusterReachable bool) []string`](#cli-platform-status-func-workloadstatusrow-workload-platformworkload-clusterreachable-bool-string) +- [`type PlatformWorkload struct`](#cli-platform-status-type-platformworkload-struct) + + +### Variables +```text +var DefaultPlatformStatusWorkloads = []PlatformWorkload{ + {Component: "ClickHouse", Namespace: core.DefaultAnalyticsNamespace, Kind: "statefulset", Name: "clickhouse"}, + {Component: "Zookeeper", Namespace: core.DefaultAnalyticsNamespace, Kind: "deployment", Name: "zookeeper"}, + {Component: "Kafka", Namespace: core.DefaultAnalyticsNamespace, Kind: "statefulset", Name: "kafka"}, + {Component: "Ingest", Namespace: core.DefaultAnalyticsNamespace, Kind: "deployment", Name: "mcp-sentinel-ingest"}, + {Component: "Processor", Namespace: core.DefaultAnalyticsNamespace, Kind: "deployment", Name: "mcp-sentinel-processor"}, + {Component: "API", Namespace: core.DefaultAnalyticsNamespace, Kind: "deployment", Name: "mcp-sentinel-api"}, + {Component: "UI", Namespace: core.DefaultAnalyticsNamespace, Kind: "deployment", Name: "mcp-sentinel-ui"}, + {Component: "Gateway", Namespace: core.DefaultAnalyticsNamespace, Kind: "deployment", Name: "mcp-sentinel-gateway"}, + {Component: "Prometheus", Namespace: core.DefaultAnalyticsNamespace, Kind: "deployment", Name: "prometheus"}, + {Component: "Grafana", Namespace: core.DefaultAnalyticsNamespace, Kind: "deployment", Name: "grafana"}, + {Component: "OTel Collector", Namespace: core.DefaultAnalyticsNamespace, Kind: "deployment", Name: "otel-collector"}, + {Component: "Tempo", Namespace: core.DefaultAnalyticsNamespace, Kind: "statefulset", Name: "tempo"}, + {Component: "Loki", Namespace: core.DefaultAnalyticsNamespace, Kind: "statefulset", Name: "loki"}, + {Component: "Promtail", Namespace: core.DefaultAnalyticsNamespace, Kind: "daemonset", Name: "promtail"}, +} + DefaultPlatformStatusWorkloads lists bundled analytics stack workloads for + status output. ``` - + +### Functions + + ```text -func (p *Printer) Green(msg string) string - Green returns green-colored text. +func AnalyticsNamespaceInstalled(clusterReachable bool) (bool, error) + AnalyticsNamespaceInstalled reports whether the analytics namespace exists. ``` - + ```text -func (p *Printer) Header(title string) - Header prints a full-width header banner. +func AnalyticsStackRow(status, details string) []string + AnalyticsStackRow builds a table row for the analytics namespace aggregate + status. ``` - + ```text -func (p *Printer) Info(msg string) - Info prints an informational message. +func CheckClusterStatusQuiet() error + CheckClusterStatusQuiet probes cluster connectivity without printing status. ``` - + ```text -func (p *Printer) Printf(format string, a ...interface{}) - Printf prints formatted text. +func WorkloadStatusRow(workload PlatformWorkload, clusterReachable bool) []string + WorkloadStatusRow renders one workload row for platform status tables. +``` + + +### Types + + +```text +type PlatformWorkload struct { + Component string + Namespace string + Kind string + Name string +} + PlatformWorkload identifies a namespaced workload for status tables. +``` + + +## CLI registry + +Package: `registry` +Import path: `mcp-runtime/internal/cli/registry` + +Source command: + +```bash +go doc -all ./internal/cli/registry +``` + + +### Overview -``` +Package registry owns routing for the registry top-level command. - -```text -func (p *Printer) Println(a ...interface{}) - Println prints a plain line. +### Jump To -``` +- [Overview](#cli-registry-overview) +- [Index](#cli-registry-index) +- [Functions](#cli-registry-functions) +- [Types](#cli-registry-types) - -```text -func (p *Printer) Red(msg string) string - Red returns red-colored text. + +### Index -``` +- [`func DefaultGitTag() string`](#cli-registry-func-defaultgittag-string) +- [`func DeployRegistry(logger *zap.Logger, namespace string, port int, registryType, registryStorageSize, manifestPath string) error`](#cli-registry-func-deployregistry-logger-zap-logger-namespace-string-port-int-registrytype-registrystoragesize-manifestpath-string-error) +- [`func New(runtime *core.Runtime) *cobra.Command`](#cli-registry-func-new-runtime-core-runtime-cobra-command) +- [`func NewWithManager(mgr *RegistryManager) *cobra.Command`](#cli-registry-func-newwithmanager-mgr-registrymanager-cobra-command) +- [`func ResolveExternalRegistryConfig(flagCfg *config.ExternalRegistryConfig) (*config.ExternalRegistryConfig, error)`](#cli-registry-func-resolveexternalregistryconfig-flagcfg-config-externalregistryconfig-config-externalregistryconfig-error) +- [`func ResolvePlatformRegistryURL(logger *zap.Logger) string`](#cli-registry-func-resolveplatformregistryurl-logger-zap-logger-string) +- [`func RunRegistryProvision(mgr *RegistryManager, url, username, password, operatorImage string, dryRun bool) error`](#cli-registry-func-runregistryprovision-mgr-registrymanager-url-username-password-operatorimage-string-dryrun-bool-error) +- [`func RunRegistryPush(mgr *RegistryManager, image, registryURL, name, mode, helperNamespace string) error`](#cli-registry-func-runregistrypush-mgr-registrymanager-image-registryurl-name-mode-helpernamespace-string-error) +- [`type RegistryManager struct`](#cli-registry-type-registrymanager-struct) +- [`func DefaultRegistryManager(logger *zap.Logger) *RegistryManager`](#cli-registry-func-defaultregistrymanager-logger-zap-logger-registrymanager) +- [`func NewRegistryManager(kubectl *core.KubectlClient, exec core.Executor, logger *zap.Logger) *RegistryManager`](#cli-registry-func-newregistrymanager-kubectl-core-kubectlclient-exec-core-executor-logger-zap-logger-registrymanager) +- [`func (m *RegistryManager) CheckRegistryStatus(namespace string) error`](#cli-registry-func-m-registrymanager-checkregistrystatus-namespace-string-error) +- [`func (m *RegistryManager) LoginRegistry(registryURL, username, password string) error`](#cli-registry-func-m-registrymanager-loginregistry-registryurl-username-password-string-error) +- [`func (m *RegistryManager) PushDirect(source, target string) error`](#cli-registry-func-m-registrymanager-pushdirect-source-target-string-error) +- [`func (m *RegistryManager) PushInCluster(source, target, helperNS string) error`](#cli-registry-func-m-registrymanager-pushincluster-source-target-helperns-string-error) +- [`func (m *RegistryManager) ShowRegistryInfo() error`](#cli-registry-func-m-registrymanager-showregistryinfo-error) + + +### Functions - + ```text -func (p *Printer) Section(title string) - Section prints a prominent section header. - +func DefaultGitTag() string ``` - + ```text -func (p *Printer) SpinnerStart(msg string) func(success bool, finalMsg string) - SpinnerStart starts a spinner with the given message. Returns a stop - function. - +func DeployRegistry(logger *zap.Logger, namespace string, port int, registryType, registryStorageSize, manifestPath string) error ``` - + ```text -func (p *Printer) Step(title string) - Step prints a step indicator (e.g., "Step 1: Initialize"). +func New(runtime *core.Runtime) *cobra.Command + New returns the registry command. ``` - + ```text -func (p *Printer) Success(msg string) - Success prints a success message. +func NewWithManager(mgr *RegistryManager) *cobra.Command + NewWithManager returns the registry command using the provided manager. ``` - + ```text -func (p *Printer) Table(data [][]string) - Table prints a formatted table. First row is treated as header. - +func ResolveExternalRegistryConfig(flagCfg *config.ExternalRegistryConfig) (*config.ExternalRegistryConfig, error) ``` - + ```text -func (p *Printer) TableBoxed(data [][]string) - TableBoxed prints a formatted table with box borders. - +func ResolvePlatformRegistryURL(logger *zap.Logger) string ``` - + ```text -func (p *Printer) Warn(msg string) - Warn prints a warning message. Note: Warnings are intentionally not - suppressed in quiet mode to ensure important notices are visible even when - non-essential output is disabled. +func RunRegistryProvision(mgr *RegistryManager, url, username, password, operatorImage string, dryRun bool) error + RunRegistryProvision contains the registry provision command flow for folder + packages. ``` - + ```text -func (p *Printer) Yellow(msg string) string - Yellow returns yellow-colored text. - +func RunRegistryPush(mgr *RegistryManager, image, registryURL, name, mode, helperNamespace string) error + RunRegistryPush contains the registry push command flow for folder packages. ``` - + +### Types + + ```text type RegistryManager struct { // Has unexported fields. @@ -3600,195 +4262,358 @@ type RegistryManager struct { ``` - + ```text func DefaultRegistryManager(logger *zap.Logger) *RegistryManager DefaultRegistryManager returns a RegistryManager using default clients. ``` - + ```text -func NewRegistryManager(kubectl *KubectlClient, exec Executor, logger *zap.Logger) *RegistryManager +func NewRegistryManager(kubectl *core.KubectlClient, exec core.Executor, logger *zap.Logger) *RegistryManager NewRegistryManager creates a RegistryManager with the given dependencies. ``` - + ```text func (m *RegistryManager) CheckRegistryStatus(namespace string) error CheckRegistryStatus checks and displays registry status. ``` - + ```text func (m *RegistryManager) LoginRegistry(registryURL, username, password string) error LoginRegistry logs into a container registry. ``` - + ```text func (m *RegistryManager) PushDirect(source, target string) error PushDirect pushes an image directly using docker. ``` - + ```text func (m *RegistryManager) PushInCluster(source, target, helperNS string) error PushInCluster pushes an image using an in-cluster helper pod. ``` - + ```text func (m *RegistryManager) ShowRegistryInfo() error ShowRegistryInfo displays registry connection information. - ``` - -```text -type RegistryManagerAPI interface { - ShowRegistryInfo() error - PushInCluster(source, target, helperNS string) error -} + +## CLI registry config + +Package: `config` +Import path: `mcp-runtime/internal/cli/registry/config` + +Source command: +```bash +go doc -all ./internal/cli/registry/config ``` - + +### Overview + +_No package overview is documented._ + +### Jump To + +- [Overview](#cli-registry-config-overview) +- [Index](#cli-registry-config-index) +- [Variables](#cli-registry-config-variables) +- [Functions](#cli-registry-config-functions) +- [Types](#cli-registry-config-types) + + +### Index + +- [`Variables`](#cli-registry-config-variables) +- [`func Marshal(cfg *ExternalRegistryConfig) ([]byte, error)`](#cli-registry-config-func-marshal-cfg-externalregistryconfig-byte-error) +- [`func Path() (string, error)`](#cli-registry-config-func-path-string-error) +- [`func Save(cfg *ExternalRegistryConfig) error`](#cli-registry-config-func-save-cfg-externalregistryconfig-error) +- [`type Env struct`](#cli-registry-config-type-env-struct) +- [`type ExternalRegistryConfig struct`](#cli-registry-config-type-externalregistryconfig-struct) +- [`func Load() (*ExternalRegistryConfig, error)`](#cli-registry-config-func-load-externalregistryconfig-error) +- [`func Resolve(flagCfg *ExternalRegistryConfig, env Env) (*ExternalRegistryConfig, error)`](#cli-registry-config-func-resolve-flagcfg-externalregistryconfig-env-env-externalregistryconfig-error) + + +### Variables + ```text -type Runtime struct { - // Has unexported fields. -} - Runtime is the shared CLI facade for wiring common dependencies once and - handing typed managers to the foldered command packages. +var ( + ErrURLRequired = errors.New("registry url is required") + ErrURLMissingInConfig = errors.New("registry url missing in config") +) +``` + + +### Functions + +```text +func Marshal(cfg *ExternalRegistryConfig) ([]byte, error) ``` - + ```text -func NewRuntime(logger *zap.Logger) *Runtime - NewRuntime builds the shared CLI runtime facade. +func Path() (string, error) +``` + +```text +func Save(cfg *ExternalRegistryConfig) error ``` - + +### Types + + ```text -func (r *Runtime) AccessManager() *AccessManager - AccessManager returns the access command manager. +type Env struct { + URL string + Username string + Password string +} ``` - + ```text -func (r *Runtime) ClusterManager() *ClusterManager - ClusterManager returns the cluster command manager. +type ExternalRegistryConfig struct { + URL string `yaml:"url"` + Username string `yaml:"username,omitempty"` + Password string `yaml:"password,omitempty"` +} ``` - + ```text -func (r *Runtime) Executor() Executor - Executor returns the shared process executor. +func Load() (*ExternalRegistryConfig, error) ``` - + ```text -func (r *Runtime) KubectlClient() *KubectlClient - KubectlClient returns the shared kubectl client. +func Resolve(flagCfg *ExternalRegistryConfig, env Env) (*ExternalRegistryConfig, error) + Resolve returns external registry config using precedence: flags > env > + config file. +``` + + +## CLI registry references + +Package: `ref` +Import path: `mcp-runtime/internal/cli/registry/ref` + +Source command: +```bash +go doc -all ./internal/cli/registry/ref ``` - + +### Overview + +_No package overview is documented._ + +### Jump To + +- [Overview](#cli-registry-references-overview) +- [Index](#cli-registry-references-index) +- [Functions](#cli-registry-references-functions) + + +### Index + +- [`func DropRegistryPrefix(repo string) string`](#cli-registry-references-func-dropregistryprefix-repo-string-string) +- [`func SplitImage(image string) (string, string)`](#cli-registry-references-func-splitimage-image-string-string-string) + + +### Functions + + ```text -func (r *Runtime) KubectlRunner() KubectlRunner - KubectlRunner returns the shared kubectl runner. +func DropRegistryPrefix(repo string) string + DropRegistryPrefix removes an explicit registry host from an image + repository. ``` - + ```text -func (r *Runtime) Logger() *zap.Logger - Logger returns the shared logger. +func SplitImage(image string) (string, string) + SplitImage returns the repository/name portion and optional tag for an image + reference. +``` + + +## CLI registry resolution +Package: `resolve` +Import path: `mcp-runtime/internal/cli/registry/resolve` + +Source command: + +```bash +go doc -all ./internal/cli/registry/resolve ``` - + +### Overview + +_No package overview is documented._ + +### Jump To + +- [Overview](#cli-registry-resolution-overview) +- [Index](#cli-registry-resolution-index) +- [Functions](#cli-registry-resolution-functions) +- [Types](#cli-registry-resolution-types) + + +### Index + +- [`func GitTag(command CommandFactory) string`](#cli-registry-resolution-func-gittag-command-commandfactory-string) +- [`func PlatformURL(logger *zap.Logger, kubectl KubectlCommand, cfg Config) string`](#cli-registry-resolution-func-platformurl-logger-zap-logger-kubectl-kubectlcommand-cfg-config-string) +- [`type CommandFactory func(name string, args []string) (OutputCommand, error)`](#cli-registry-resolution-type-commandfactory-func-name-string-args-string-outputcommand-error) +- [`type Config struct`](#cli-registry-resolution-type-config-struct) +- [`type KubectlCommand func(args []string) (OutputCommand, error)`](#cli-registry-resolution-type-kubectlcommand-func-args-string-outputcommand-error) +- [`type OutputCommand interface`](#cli-registry-resolution-type-outputcommand-interface) + + +### Functions + + ```text -func (r *Runtime) RegistryManager() *RegistryManager - RegistryManager returns the registry command manager. +func GitTag(command CommandFactory) string + GitTag returns a short git SHA when available, otherwise "latest". ``` - + ```text -func (r *Runtime) SentinelManager() *SentinelManager - SentinelManager returns the sentinel command manager. - +func PlatformURL(logger *zap.Logger, kubectl KubectlCommand, cfg Config) string + PlatformURL resolves the registry host:port used for image names. ``` - + +### Types + + ```text -func (r *Runtime) ServerManager() *ServerManager - ServerManager returns the server command manager. +type CommandFactory func(name string, args []string) (OutputCommand, error) ``` - + ```text -type SentinelManager struct { - // Has unexported fields. +type Config struct { + RegistryEndpoint string + DefaultRegistryEndpoint string + RegistryPort int } ``` - + ```text -func DefaultSentinelManager(logger *zap.Logger) *SentinelManager +type KubectlCommand func(args []string) (OutputCommand, error) ``` - + ```text -func NewSentinelManager(kubectl *KubectlClient, logger *zap.Logger) *SentinelManager - +type OutputCommand interface { + Output() ([]byte, error) +} ``` - -```text -func (m *SentinelManager) PortForwardSentinelTarget(target string, localPort int, address string) error + +## CLI server -``` +Package: `server` +Import path: `mcp-runtime/internal/cli/server` - -```text -func (m *SentinelManager) RestartSentinel(component string, restartAll bool) error +Source command: +```bash +go doc -all ./internal/cli/server ``` - + +### Overview + +Package server owns routing for the server top-level command. + +### Jump To + +- [Overview](#cli-server-overview) +- [Index](#cli-server-index) +- [Functions](#cli-server-functions) +- [Types](#cli-server-types) + + +### Index + +- [`func BuildImage(logger *zap.Logger, serverName, dockerfile, metadataFile, metadataDir, registryURL, tag, context string) error`](#cli-server-func-buildimage-logger-zap-logger-servername-dockerfile-metadatafile-metadatadir-registryurl-tag-context-string-error) +- [`func New(runtime *core.Runtime) *cobra.Command`](#cli-server-func-new-runtime-core-runtime-cobra-command) +- [`func NewWithManager(mgr *ServerManager) *cobra.Command`](#cli-server-func-newwithmanager-mgr-servermanager-cobra-command) +- [`type ServerManager struct`](#cli-server-type-servermanager-struct) +- [`func DefaultServerManager(logger *zap.Logger) *ServerManager`](#cli-server-func-defaultservermanager-logger-zap-logger-servermanager) +- [`func NewServerManager(kubectl *core.KubectlClient, logger *zap.Logger) *ServerManager`](#cli-server-func-newservermanager-kubectl-core-kubectlclient-logger-zap-logger-servermanager) +- [`func (m *ServerManager) ApplyServerFromFile(file string) error`](#cli-server-func-m-servermanager-applyserverfromfile-file-string-error) +- [`func (m *ServerManager) BindUseKubeFlag(cmd *cobra.Command)`](#cli-server-func-m-servermanager-bindusekubeflag-cmd-cobra-command) +- [`func (m *ServerManager) CreateServer(name, namespace, image, imageTag string) error`](#cli-server-func-m-servermanager-createserver-name-namespace-image-imagetag-string-error) +- [`func (m *ServerManager) CreateServerFromFile(file string) error`](#cli-server-func-m-servermanager-createserverfromfile-file-string-error) +- [`func (m *ServerManager) DeleteServer(name, namespace string) error`](#cli-server-func-m-servermanager-deleteserver-name-namespace-string-error) +- [`func (m *ServerManager) ExportServer(name, namespace, file string) error`](#cli-server-func-m-servermanager-exportserver-name-namespace-file-string-error) +- [`func (m *ServerManager) GetServer(name, namespace string) error`](#cli-server-func-m-servermanager-getserver-name-namespace-string-error) +- [`func (m *ServerManager) InspectServerPolicy(name, namespace string) error`](#cli-server-func-m-servermanager-inspectserverpolicy-name-namespace-string-error) +- [`func (m *ServerManager) ListServers(namespace string) error`](#cli-server-func-m-servermanager-listservers-namespace-string-error) +- [`func (m *ServerManager) Logger() *zap.Logger`](#cli-server-func-m-servermanager-logger-zap-logger) +- [`func (m *ServerManager) PatchServer(name, namespace, patchType, patch, patchFile string) error`](#cli-server-func-m-servermanager-patchserver-name-namespace-patchtype-patch-patchfile-string-error) +- [`func (m *ServerManager) ServerStatus(namespace string) error`](#cli-server-func-m-servermanager-serverstatus-namespace-string-error) +- [`func (m *ServerManager) ViewServerLogs(name, namespace string, follow, previous bool, tail int, since string) error`](#cli-server-func-m-servermanager-viewserverlogs-name-namespace-string-follow-previous-bool-tail-int-since-string-error) + + +### Functions + + ```text -func (m *SentinelManager) ShowSentinelEvents() error +func BuildImage(logger *zap.Logger, serverName, dockerfile, metadataFile, metadataDir, registryURL, tag, context string) error + BuildImage builds a Docker image and updates MCP metadata for the server. ``` - + ```text -func (m *SentinelManager) ShowSentinelStatus() error +func New(runtime *core.Runtime) *cobra.Command + New returns the server command. ``` - + ```text -func (m *SentinelManager) ViewSentinelLogs(component string, follow, previous bool, tail int, since string) error - +func NewWithManager(mgr *ServerManager) *cobra.Command + NewWithManager returns the server command using the provided manager. ``` - + +### Types + + ```text type ServerManager struct { // Has unexported fields. @@ -3797,7 +4622,7 @@ type ServerManager struct { ``` - + ```text func DefaultServerManager(logger *zap.Logger) *ServerManager DefaultServerManager returns a ServerManager using the default kubectl @@ -3805,63 +4630,63 @@ func DefaultServerManager(logger *zap.Logger) *ServerManager ``` - + ```text -func NewServerManager(kubectl *KubectlClient, logger *zap.Logger) *ServerManager +func NewServerManager(kubectl *core.KubectlClient, logger *zap.Logger) *ServerManager NewServerManager creates a ServerManager with the given dependencies. ``` - + ```text func (m *ServerManager) ApplyServerFromFile(file string) error ApplyServerFromFile applies an MCPServer manifest from disk. ``` - + ```text func (m *ServerManager) BindUseKubeFlag(cmd *cobra.Command) BindUseKubeFlag wires the shared --use-kube flag onto the command. ``` - + ```text func (m *ServerManager) CreateServer(name, namespace, image, imageTag string) error CreateServer creates a new MCP server with the given parameters. ``` - + ```text func (m *ServerManager) CreateServerFromFile(file string) error CreateServerFromFile creates an MCP server from a YAML file. ``` - + ```text func (m *ServerManager) DeleteServer(name, namespace string) error DeleteServer deletes an MCP server. ``` - + ```text func (m *ServerManager) ExportServer(name, namespace, file string) error ExportServer exports an MCPServer manifest to stdout or a file. ``` - + ```text func (m *ServerManager) GetServer(name, namespace string) error GetServer retrieves details for a specific MCP server. ``` - + ```text func (m *ServerManager) InspectServerPolicy(name, namespace string) error InspectServerPolicy prints the rendered gateway policy ConfigMap content for @@ -3869,21 +4694,21 @@ func (m *ServerManager) InspectServerPolicy(name, namespace string) error ``` - + ```text func (m *ServerManager) ListServers(namespace string) error ListServers lists all MCP servers in the given namespace. ``` - + ```text func (m *ServerManager) Logger() *zap.Logger Logger exposes the manager logger to foldered command packages. ``` - + ```text func (m *ServerManager) PatchServer(name, namespace, patchType, patch, patchFile string) error PatchServer patches an existing MCPServer resource using @@ -3891,137 +4716,182 @@ func (m *ServerManager) PatchServer(name, namespace, patchType, patch, patchFile ``` - + ```text func (m *ServerManager) ServerStatus(namespace string) error ServerStatus shows the status of MCP servers in a namespace. ``` - + ```text -func (m *ServerManager) ViewServerLogs(name, namespace string, follow bool) error +func (m *ServerManager) ViewServerLogs(name, namespace string, follow, previous bool, tail int, since string) error ViewServerLogs views logs from an MCP server. - ``` - -```text -type SetupContext struct { - Plan SetupPlan - ExternalRegistry *ExternalRegistryConfig - UsingExternalRegistry bool - RegistrySecretName string - OperatorImage string - GatewayProxyImage string - AnalyticsImages AnalyticsImageSet -} - SetupContext carries state shared across setup steps. - -``` - - -```text -type SetupDeps struct { - ResolveExternalRegistryConfig func(*ExternalRegistryConfig) (*ExternalRegistryConfig, error) - ClusterManager ClusterManagerAPI - RegistryManager RegistryManagerAPI - LoginRegistry func(logger *zap.Logger, registryURL, username, password string) error - DeployRegistry func(logger *zap.Logger, namespace string, port int, registryType, registryStorageSize, manifestPath string) error - WaitForDeploymentAvailable func(logger *zap.Logger, name, namespace, selector string, timeout time.Duration) error - PrintDeploymentDiagnostics func(deploy, namespace, selector string) - SetupTLS func(logger *zap.Logger, plan SetupPlan) error - BuildOperatorImage func(image string) error - PushOperatorImage func(image string) error - BuildGatewayProxyImage func(image string) error - PushGatewayProxyImage func(image string) error - BuildAnalyticsImage func(image, dockerfilePath, buildContext string) error - PushAnalyticsImage func(image string) error - EnsureNamespace func(namespace string) error - GetPlatformRegistryURL func(logger *zap.Logger) string - PushOperatorImageToInternal func(logger *zap.Logger, sourceImage, targetImage, helperNamespace string) error - PushGatewayProxyImageToInternal func(logger *zap.Logger, sourceImage, targetImage, helperNamespace string) error - PushAnalyticsImageToInternal func(logger *zap.Logger, sourceImage, targetImage, helperNamespace string) error - DeployOperatorManifests func(logger *zap.Logger, operatorImage, gatewayProxyImage string, operatorArgs []string) error - DeployAnalyticsManifests func(logger *zap.Logger, images AnalyticsImageSet, storageMode string) error - ConfigureProvisionedRegistryEnv func(ext *ExternalRegistryConfig, secretName string) error - RestartDeployment func(name, namespace string) error - CheckCRDInstalled func(name string) error - GetDeploymentTimeout func() time.Duration - GetRegistryPort func() int - OperatorImageFor func(ext *ExternalRegistryConfig) string - GatewayProxyImageFor func(ext *ExternalRegistryConfig) string -} + +## CLI setup asset paths -``` +Package: `assetpath` +Import path: `mcp-runtime/internal/cli/setup/assetpath` - -```text -type SetupPipeline struct { - // Has unexported fields. -} - SetupPipeline provides a fluent API for building step sequences. +Source command: +```bash +go doc -all ./internal/cli/setup/assetpath ``` - + +### Overview + +Package assetpath resolves repository-relative asset paths from the current +working directory by walking upward until go.mod, services/, and k8s/ match. + +### Jump To + +- [Overview](#cli-setup-asset-paths-overview) +- [Index](#cli-setup-asset-paths-index) +- [Functions](#cli-setup-asset-paths-functions) + + +### Index + +- [`func IsRepoRoot(dir string) bool`](#cli-setup-asset-paths-func-isreporoot-dir-string-bool) +- [`func ResolveRepoAssetPath(path string) (string, error)`](#cli-setup-asset-paths-func-resolverepoassetpath-path-string-string-error) +- [`func ResolveRepoRoot() (string, error)`](#cli-setup-asset-paths-func-resolvereporoot-string-error) + + +### Functions + + ```text -func NewSetupPipeline() *SetupPipeline +func IsRepoRoot(dir string) bool + IsRepoRoot reports whether dir looks like the mcp-runtime repository root. ``` - + ```text -func (p *SetupPipeline) Build() []SetupStep +func ResolveRepoAssetPath(path string) (string, error) + ResolveRepoAssetPath finds a repo-relative path from the current working + directory by walking upward until the asset exists. The repo assumes a + flattened root layout (for example services/ and k8s/ at the top level). ``` - + ```text -func (p *SetupPipeline) With(step SetupStep) *SetupPipeline +func ResolveRepoRoot() (string, error) + ResolveRepoRoot walks upward from the working directory until IsRepoRoot + reports true. +``` + +## CLI setup ingress manifests + +Package: `ingressmanifest` +Import path: `mcp-runtime/internal/cli/setup/ingressmanifest` + +Source command: + +```bash +go doc -all ./internal/cli/setup/ingressmanifest ``` - -```text -func (p *SetupPipeline) WithIf(condition bool, step SetupStep) *SetupPipeline + +### Overview + +Package ingressmanifest builds YAML for the host-based Sentinel platform UI +Ingress. + +### Jump To + +- [Overview](#cli-setup-ingress-manifests-overview) +- [Index](#cli-setup-ingress-manifests-index) +- [Constants](#cli-setup-ingress-manifests-constants) +- [Functions](#cli-setup-ingress-manifests-functions) + + +### Index + +- [`Constants`](#cli-setup-ingress-manifests-constants) +- [`func RenderPlatformUIIngress(host, issuerName, analyticsNamespace string) string`](#cli-setup-ingress-manifests-func-renderplatformuiingress-host-issuername-analyticsnamespace-string-string) + + +### Constants +```text +const ( + // PlatformIngressName is the Kubernetes Ingress resource name for the dashboard. + PlatformIngressName = "mcp-sentinel-platform-ui" + // PlatformTLSSecretName is the TLS secret name used when TLS is enabled. + PlatformTLSSecretName = "mcp-sentinel-platform-tls" +) ``` - + +### Functions + + ```text -type SetupPlan struct { - Kubeconfig string - Context string - RegistryType string - RegistryStorageSize string - StorageMode string - Ingress ingressOptions - RegistryManifest string - TLSEnabled bool - TestMode bool - StrictProd bool - DeployAnalytics bool - OperatorArgs []string - ACMEmail string - ACMEStaging bool - TLSClusterIssuer string - InstallCertManager bool -} - SetupPlan captures the resolved setup decisions. +func RenderPlatformUIIngress(host, issuerName, analyticsNamespace string) string + RenderPlatformUIIngress emits an Ingress that maps platform. to + the dashboard UI, /api on the same UI service (which reverse-proxies to + mcp-sentinel-api via API_UPSTREAM), and the in-cluster Grafana / Prometheus + paths. When issuerName is set, a TLS section and cert-manager annotation + are added so cert-manager's ingress-shim provisions a Certificate for + platform. into the mcp-sentinel-platform-tls Secret in the same + namespace as the Ingress. +``` + + +## CLI setup plan + +Package: `plan` +Import path: `mcp-runtime/internal/cli/setup/plan` +Source command: + +```bash +go doc -all ./internal/cli/setup/plan ``` - -```text -func BuildSetupPlan(input SetupPlanInput) SetupPlan - BuildSetupPlan resolves CLI inputs into a concrete setup plan. + +### Overview + +Package plan contains pure setup planning types and default resolution. + +### Jump To +- [Overview](#cli-setup-plan-overview) +- [Index](#cli-setup-plan-index) +- [Constants](#cli-setup-plan-constants) +- [Types](#cli-setup-plan-types) + + +### Index + +- [`Constants`](#cli-setup-plan-constants) +- [`type Input struct`](#cli-setup-plan-type-input-struct) +- [`type Plan struct`](#cli-setup-plan-type-plan-struct) +- [`func Build(input Input) Plan`](#cli-setup-plan-func-build-input-input-plan) + + +### Constants + +```text +const ( + StorageModeDynamic = "dynamic" + StorageModeHostpath = "hostpath" +) ``` - + +### Types + + ```text -type SetupPlanInput struct { +type Input struct { Kubeconfig string Context string RegistryType string @@ -4043,17 +4913,38 @@ type SetupPlanInput struct { TLSClusterIssuer string InstallCertManager bool } - SetupPlanInput captures the raw CLI inputs for setup. + Input captures the raw CLI inputs for setup. ``` - + ```text -type SetupStep interface { - Name() string - Run(logger *zap.Logger, deps SetupDeps, ctx *SetupContext) error +type Plan struct { + Kubeconfig string + Context string + RegistryType string + RegistryStorageSize string + StorageMode string + Ingress cluster.IngressOptions + RegistryManifest string + TLSEnabled bool + TestMode bool + StrictProd bool + DeployAnalytics bool + OperatorArgs []string + ACMEmail string + ACMEStaging bool + TLSClusterIssuer string + InstallCertManager bool } - SetupStep models a single setup phase. + Plan captures the resolved setup decisions. + +``` + + +```text +func Build(input Input) Plan + Build resolves CLI inputs into a concrete setup plan. ``` diff --git a/docs/internals/internal-cli.md b/docs/internals/internal-cli.md index 7b4d49d..9472c72 100644 --- a/docs/internals/internal-cli.md +++ b/docs/internals/internal-cli.md @@ -1,15 +1,15 @@ # CLI Internals -Package `internal/cli` implements the command behavior behind the +The `internal/cli` tree implements the command behavior behind the `mcp-runtime` binary. The top-level Cobra command folders live under -`internal/cli/root` and `internal/cli/`; those packages route to this -package while command behavior is split out incrementally. Both layers are -intentionally internal so the CLI can evolve without becoming a public Go API. +`internal/cli/root` and `internal/cli/`, while shared CLI-only kernel +code lives in `internal/cli/core`. All layers are intentionally internal so the +CLI can evolve without becoming a public Go API. `go doc` is still useful for exported constructors and manager types: ```bash -go doc -all ./internal/cli +go doc -all ./internal/cli/core ``` Most command behavior is unexported and should be understood through this page, @@ -31,25 +31,40 @@ tests, and the command help snapshots. | File group | Responsibility | |---|---| -| `constants.go` | namespace, deployment, service, and resource names shared by commands | -| `errors.go` | sentinel error values and wrapping helpers | -| `exec.go`, `kubectl_runner.go` | external command execution and test seams | -| `printer.go`, `output.go` | terminal output formatting | -| `asset_paths.go` | locating repository and manifest assets | -| `config.go` | environment/config defaults for registry, ingress, and setup | -| `resource_helpers.go` | shared Kubernetes resource and manifest helpers | +| `core/constants.go` | namespace, deployment, service, and resource names shared by commands | +| `core/errors.go` | sentinel error values and wrapping helpers | +| `core/exec.go`, `core/kubectl_runner.go` | external command execution and test seams | +| `core/runtime.go` | composition root for shared CLI dependencies (`Config`, logger, kubectl, executor, printer) | +| `core/printer.go` | terminal output formatting | +| `kubeerr/` | shared kubectl error-detail extraction and cluster setup hints | +| `core/config.go` | environment/config defaults for registry, ingress, and setup | +| `kube/` | manifest apply, namespace, and kubectl-oriented helpers shared by command paths | +| `platformapi/` | Sentinel platform API client for auth-backed access and runtime reads | +| `platformapi/baseurl.go` | platform API base URL normalization used by auth and platform API clients | +| `platformstatus/` | shared workload catalog, readiness rows, and quiet kubectl status probes for `status` and `sentinel status` | +| `certmanager/` | cert-manager, private CA, and ACME helpers shared by setup and `cluster cert` | +| `cluster/ingress.go` | ingress configuration option structs shared by setup and cluster managers | +| `registry/` | registry manager, registry deployment, registry push, and platform registry defaults | +| `registry/config/` | provisioned external registry config file loading and precedence | +| `registry/ref/` | shared image reference parsing used by setup image publishing and registry push | +| `registry/resolve/` | registry URL and image tag resolution shared by setup, server build, and registry push | When adding a helper, put it near the command that owns it unless two or more commands genuinely share it. ## Setup -Setup is split across: +Setup is split across `internal/cli/setup/`: -- `setup.go`: Cobra command, setup orchestration, image publishing, manifest - application, verification, and deployment diagnostics. -- `setup_plan.go`: planning and dependency injection seams used by tests. -- `setup_steps.go`: step-level helpers used by setup orchestration. +- `setup.go`: Cobra command and flag wiring. +- `platform.go`: setup orchestration, image publishing, manifest application, + verification, and deployment diagnostics. +- `flow.go`: setup flow validation and user-facing warnings. +- `steps.go`: step-level helpers used by setup orchestration. +- `plan/`: planning and dependency injection seams used by tests. +- `setup/assetpath/`: repo-root and asset path resolution used by setup builds + and manifest rendering. +- `setup/ingressmanifest/`: platform UI ingress manifest rendering. `setup --test-mode` relaxes production guardrails but still builds and pushes the operator, gateway proxy, and Sentinel images with `latest` tags. Pull hosts @@ -68,8 +83,9 @@ Important setup contracts: - Setup verification should fail with diagnostic context instead of reporting success after partial deployment. -Tests: `setup_test.go`, `setup_helpers_test.go`, `setup_plan_test.go`, and -`setup_steps_test.go`. +Tests live with the setup package, including `helpers_test.go`, +`plan_flow_test.go`, `steps_test.go`, `config_plan_test.go`, and +`tls_flags_test.go`. ## Cluster and Doctor @@ -77,18 +93,23 @@ Tests: `setup_test.go`, `setup_helpers_test.go`, `setup_plan_test.go`, and provider-oriented provisioning helpers. `bootstrap.go` performs preflight checks and has the only automated apply path for k3s CoreDNS/local-path prerequisites. -`cluster_doctor.go` is post-install diagnostics. It checks CRDs, workloads, +`internal/cli/cluster/doctor_impl.go` is post-install diagnostics. It checks CRDs, workloads, registry reachability, image pull failures, ingress, and platform components. Registry protocol mismatch detection must inspect regular containers and init containers, and it must surface failed pod inspections instead of returning a false pass. -Tests: `cluster_test.go`, `cluster_doctor_test.go`, and bootstrap-related tests. +Tests: `cluster_test.go`, `doctor_impl_test.go`, and bootstrap-related tests. ## Registry -`registry.go` owns registry status, info, provisioning, login, direct pushes, and -in-cluster helper pushes. +`internal/cli/registry/` owns registry Cobra wiring, status, info, +provisioning, login, deployment, direct pushes, and in-cluster helper pushes. +Shared image reference parsing lives in +`internal/cli/registry/ref/`, registry URL/tag resolution lives in +`internal/cli/registry/resolve/`, and provisioned registry config lives in +`internal/cli/registry/config/` so setup and server build can reuse those rules +without depending on registry command internals. Registry endpoint precedence is intentionally shared with setup and metadata: @@ -101,13 +122,15 @@ The in-cluster push path uses a temporary helper workload and should clean up after itself even on failure. When editing this path, verify both success and diagnostic failure output. -Tests: `registry_test.go`, plus setup tests for runtime image publishing. +Tests live with the registry package, plus setup tests for runtime image +publishing. ## Server and Build `server.go` implements CRUD-style operations for `MCPServer` resources and status/log inspection. `build.go` supports metadata-driven image builds for the -`.mcp` workflow. +`.mcp` workflow. Server-specific input validation lives with the server manager +in `internal/cli/server/validation.go`. Keep these flows distinct: @@ -136,39 +159,41 @@ Tests: `pipeline_test.go` and `pkg/metadata` tests. ## Access -`access.go` provides commands for grants and sessions: +`internal/cli/access/` provides commands for grants and sessions: - `access grant list|get|apply|delete|enable|disable` - `access session list|get|apply|delete|revoke|unrevoke` The implementation patches `spec.disabled` for grants and `spec.revoked` for sessions. Input validation should prevent invalid names/namespaces before they -reach `kubectl`. +reach `kubectl`; that validation lives in `internal/cli/access/validation.go`. -Tests: `access_test.go`. +Tests: `access/manager_test.go` and `access/validation_test.go`. ## Sentinel and Platform API -`sentinel.go`, `auth.go`, `platform_client.go`, and `platform_ingress.go` provide -CLI access to Sentinel APIs, auth flows, and platform ingress resolution. These -commands should stay aligned with `services/api` routes and the public docs. +`internal/cli/sentinel/`, `internal/cli/auth/`, and `internal/cli/platformapi/` +provide CLI access to Sentinel APIs, auth flows, and platform API URL +normalization. These commands should stay aligned with `services/api` routes and +the public docs. -Tests: `sentinel_test.go`, `auth_test.go`, `platform_client_test.go`, and -`platform_ingress_test.go`. +Tests: `sentinel/*_test.go`, `auth/*_test.go`, and `platformapi/*_test.go`. ## Status -`status.go` prints high-level platform health by querying Kubernetes. It should -be quick, readable, and conservative. Deeper diagnosis belongs in -`cluster doctor`. +`internal/cli/status/` prints high-level platform health by querying Kubernetes. +It uses the shared `internal/cli/platformstatus/` workload catalog so top-level +status and `sentinel status` do not drift. Shared kubectl diagnostics live in +`internal/cli/kubeerr/`. Status should be quick, readable, and conservative. +Deeper diagnosis belongs in `cluster doctor`. -Tests: `status_test.go`. +Tests: `status/*_test.go` and shared printer helpers in `status_test.go`. ## Adding a Command -1. Add the command implementation in the closest existing file or a new focused - file under `internal/cli`. -2. Add or update the thin routing package under `internal/cli/`. +1. Add or update the thin Cobra routing package under `internal/cli/`. +2. Put command behavior in a focused manager/service file in that command + package unless the behavior is genuinely shared. 3. Register the top-level command from `internal/cli/root/commands.go`. 4. Add tests with mocked runners or fake dependencies. 5. Build the CLI and inspect `--help`. diff --git a/docs/internals/pkg-metadata.md b/docs/internals/pkg-metadata.md index 686a5e9..eb3b4c1 100644 --- a/docs/internals/pkg-metadata.md +++ b/docs/internals/pkg-metadata.md @@ -82,5 +82,5 @@ Run: ```bash go test ./pkg/metadata/... -count=1 -go test ./internal/cli -run 'TestPipeline|TestBuild' -count=1 +go test ./internal/cli/pipeline ./internal/cli/server -run 'TestPipeline|TestBuild' -count=1 ``` diff --git a/docs/scripts/generate_go_package_reference.py b/docs/scripts/generate_go_package_reference.py index c6373a6..4931e68 100644 --- a/docs/scripts/generate_go_package_reference.py +++ b/docs/scripts/generate_go_package_reference.py @@ -16,7 +16,21 @@ ("Metadata helpers", ["go", "doc", "-all", "./pkg/metadata"]), ("Operator internals", ["go", "doc", "-all", "./internal/operator"]), ("CLI command routing", ["go", "doc", "-all", "./internal/cli/root"]), - ("CLI internals", ["go", "doc", "-all", "./internal/cli"]), + ("CLI core", ["go", "doc", "-all", "./internal/cli/core"]), + ("CLI Kubernetes helpers", ["go", "doc", "-all", "./internal/cli/kube"]), + ("CLI Kubernetes errors", ["go", "doc", "-all", "./internal/cli/kubeerr"]), + ("CLI cluster", ["go", "doc", "-all", "./internal/cli/cluster"]), + ("CLI cert-manager", ["go", "doc", "-all", "./internal/cli/certmanager"]), + ("CLI platform API", ["go", "doc", "-all", "./internal/cli/platformapi"]), + ("CLI platform status", ["go", "doc", "-all", "./internal/cli/platformstatus"]), + ("CLI registry", ["go", "doc", "-all", "./internal/cli/registry"]), + ("CLI registry config", ["go", "doc", "-all", "./internal/cli/registry/config"]), + ("CLI registry references", ["go", "doc", "-all", "./internal/cli/registry/ref"]), + ("CLI registry resolution", ["go", "doc", "-all", "./internal/cli/registry/resolve"]), + ("CLI server", ["go", "doc", "-all", "./internal/cli/server"]), + ("CLI setup asset paths", ["go", "doc", "-all", "./internal/cli/setup/assetpath"]), + ("CLI setup ingress manifests", ["go", "doc", "-all", "./internal/cli/setup/ingressmanifest"]), + ("CLI setup plan", ["go", "doc", "-all", "./internal/cli/setup/plan"]), ("CLI binary", ["go", "doc", "-cmd", "./cmd/mcp-runtime"]), ("Operator binary", ["go", "doc", "-cmd", "./cmd/operator"]), ] diff --git a/internal/cli/access/access.go b/internal/cli/access/access.go index a8e70e5..23fe687 100644 --- a/internal/cli/access/access.go +++ b/internal/cli/access/access.go @@ -4,21 +4,16 @@ package access import ( "github.com/spf13/cobra" - "mcp-runtime/internal/cli" -) - -const ( - grantResource = "mcpaccessgrant" - sessionResource = "mcpagentsession" + "mcp-runtime/internal/cli/core" ) // New returns the access command. -func New(runtime *cli.Runtime) *cobra.Command { - return NewWithManager(runtime.AccessManager()) +func New(runtime *core.Runtime) *cobra.Command { + return NewWithManager(DefaultAccessManager(runtime)) } // NewWithManager returns the access command using the provided manager. -func NewWithManager(mgr *cli.AccessManager) *cobra.Command { +func NewWithManager(mgr *AccessManager) *cobra.Command { cmd := &cobra.Command{ Use: "access", Short: "Manage grants and agent sessions", @@ -34,35 +29,35 @@ to target the cluster with kubectl and a kubeconfig (cluster admin path).`, return cmd } -func newGrantCmd(mgr *cli.AccessManager) *cobra.Command { +func newGrantCmd(mgr *AccessManager) *cobra.Command { cmd := &cobra.Command{ Use: "grant", Short: "Manage MCPAccessGrant resources", } - cmd.AddCommand(newListCmd(mgr, grantResource, "grants")) - cmd.AddCommand(newGetCmd(mgr, grantResource, "grant")) + cmd.AddCommand(newListCmd(mgr, GrantResource, "grants")) + cmd.AddCommand(newGetCmd(mgr, GrantResource, "grant")) cmd.AddCommand(newApplyCmd(mgr, "grant")) - cmd.AddCommand(newDeleteCmd(mgr, grantResource, "grant")) - cmd.AddCommand(newToggleCmd(mgr, grantResource, "disable", "Disable a grant", true)) - cmd.AddCommand(newToggleCmd(mgr, grantResource, "enable", "Enable a grant", false)) + cmd.AddCommand(newDeleteCmd(mgr, GrantResource, "grant")) + cmd.AddCommand(newToggleCmd(mgr, GrantResource, "disable", "Disable a grant", true)) + cmd.AddCommand(newToggleCmd(mgr, GrantResource, "enable", "Enable a grant", false)) return cmd } -func newSessionCmd(mgr *cli.AccessManager) *cobra.Command { +func newSessionCmd(mgr *AccessManager) *cobra.Command { cmd := &cobra.Command{ Use: "session", Short: "Manage MCPAgentSession resources", } - cmd.AddCommand(newListCmd(mgr, sessionResource, "sessions")) - cmd.AddCommand(newGetCmd(mgr, sessionResource, "session")) + cmd.AddCommand(newListCmd(mgr, SessionResource, "sessions")) + cmd.AddCommand(newGetCmd(mgr, SessionResource, "session")) cmd.AddCommand(newApplyCmd(mgr, "session")) - cmd.AddCommand(newDeleteCmd(mgr, sessionResource, "session")) - cmd.AddCommand(newToggleCmd(mgr, sessionResource, "revoke", "Revoke an agent session", true)) - cmd.AddCommand(newToggleCmd(mgr, sessionResource, "unrevoke", "Clear the revoked flag on an agent session", false)) + cmd.AddCommand(newDeleteCmd(mgr, SessionResource, "session")) + cmd.AddCommand(newToggleCmd(mgr, SessionResource, "revoke", "Revoke an agent session", true)) + cmd.AddCommand(newToggleCmd(mgr, SessionResource, "unrevoke", "Clear the revoked flag on an agent session", false)) return cmd } -func newListCmd(mgr *cli.AccessManager, resource, label string) *cobra.Command { +func newListCmd(mgr *AccessManager, resource, label string) *cobra.Command { var namespace string var allNamespaces bool cmd := &cobra.Command{ @@ -77,7 +72,7 @@ func newListCmd(mgr *cli.AccessManager, resource, label string) *cobra.Command { return cmd } -func newGetCmd(mgr *cli.AccessManager, resource, label string) *cobra.Command { +func newGetCmd(mgr *AccessManager, resource, label string) *cobra.Command { var namespace string cmd := &cobra.Command{ Use: "get [name]", @@ -87,11 +82,11 @@ func newGetCmd(mgr *cli.AccessManager, resource, label string) *cobra.Command { return mgr.GetAccessResource(resource, args[0], namespace) }, } - cmd.Flags().StringVar(&namespace, "namespace", cli.NamespaceMCPServers, "Namespace") + cmd.Flags().StringVar(&namespace, "namespace", core.NamespaceMCPServers, "Namespace") return cmd } -func newApplyCmd(mgr *cli.AccessManager, label string) *cobra.Command { +func newApplyCmd(mgr *AccessManager, label string) *cobra.Command { var file string cmd := &cobra.Command{ Use: "apply", @@ -105,7 +100,7 @@ func newApplyCmd(mgr *cli.AccessManager, label string) *cobra.Command { return cmd } -func newDeleteCmd(mgr *cli.AccessManager, resource, label string) *cobra.Command { +func newDeleteCmd(mgr *AccessManager, resource, label string) *cobra.Command { var namespace string cmd := &cobra.Command{ Use: "delete [name]", @@ -115,11 +110,11 @@ func newDeleteCmd(mgr *cli.AccessManager, resource, label string) *cobra.Command return mgr.DeleteAccessResource(resource, args[0], namespace) }, } - cmd.Flags().StringVar(&namespace, "namespace", cli.NamespaceMCPServers, "Namespace") + cmd.Flags().StringVar(&namespace, "namespace", core.NamespaceMCPServers, "Namespace") return cmd } -func newToggleCmd(mgr *cli.AccessManager, resource, use, short string, value bool) *cobra.Command { +func newToggleCmd(mgr *AccessManager, resource, use, short string, value bool) *cobra.Command { var namespace string cmd := &cobra.Command{ Use: use + " [name]", @@ -129,6 +124,6 @@ func newToggleCmd(mgr *cli.AccessManager, resource, use, short string, value boo return mgr.ToggleAccessResource(resource, args[0], namespace, value) }, } - cmd.Flags().StringVar(&namespace, "namespace", cli.NamespaceMCPServers, "Namespace") + cmd.Flags().StringVar(&namespace, "namespace", core.NamespaceMCPServers, "Namespace") return cmd } diff --git a/internal/cli/access.go b/internal/cli/access/manager.go similarity index 53% rename from internal/cli/access.go rename to internal/cli/access/manager.go index 0e6d651..63d551c 100644 --- a/internal/cli/access.go +++ b/internal/cli/access/manager.go @@ -1,4 +1,4 @@ -package cli +package access import ( "context" @@ -9,26 +9,35 @@ import ( "github.com/spf13/cobra" "go.uber.org/zap" + + "mcp-runtime/internal/cli/core" + kubeapply "mcp-runtime/internal/cli/kube" + "mcp-runtime/internal/cli/platformapi" ) const ( - accessGrantResource = "mcpaccessgrant" - accessSessionResource = "mcpagentsession" + // GrantResource is the kubectl resource name for MCPAccessGrant. + GrantResource = "mcpaccessgrant" + // SessionResource is the kubectl resource name for MCPAgentSession. + SessionResource = "mcpagentsession" ) +// AccessManager handles access grant and session operations. type AccessManager struct { - kubectl *KubectlClient + kubectl *core.KubectlClient logger *zap.Logger // useKube forces kubectl; when false, the platform API is used when logged in via mcp-runtime auth. useKube bool } -func NewAccessManager(kubectl *KubectlClient, logger *zap.Logger) *AccessManager { +// NewAccessManager creates an AccessManager with explicit dependencies (tests and advanced wiring). +func NewAccessManager(kubectl *core.KubectlClient, logger *zap.Logger) *AccessManager { return &AccessManager{kubectl: kubectl, logger: logger} } -func DefaultAccessManager(logger *zap.Logger) *AccessManager { - return NewAccessManager(kubectlClient, logger) +// DefaultAccessManager returns an AccessManager using the shared runtime clients. +func DefaultAccessManager(runtime *core.Runtime) *AccessManager { + return NewAccessManager(runtime.KubectlClient(), runtime.Logger()) } // BindUseKubeFlag wires the shared --use-kube flag onto the command. @@ -43,13 +52,13 @@ func (m *AccessManager) accessListQueryNamespace(namespace string, allNamespaces case allNamespaces: return "" default: - return NamespaceMCPServers + return core.NamespaceMCPServers } } // ListAccessResources lists grants or sessions via the platform API when configured, else kubectl. func (m *AccessManager) ListAccessResources(resource, namespace string, allNamespaces bool) error { - plat, kube, err := m.platformOrKube() + plat, kube, err := platformapi.ResolvePlatformOrKube(m.useKube) if err != nil { return err } @@ -68,7 +77,7 @@ func (m *AccessManager) ListAccessResources(resource, namespace string, allNames } if err := m.kubectl.RunWithOutput(args, os.Stdout, os.Stderr); err != nil { - return wrapWithSentinelAndContext(nil, err, fmt.Sprintf("failed to list %s resources: %v", resource, err), map[string]any{ + return core.WrapWithSentinelAndContext(nil, err, fmt.Sprintf("failed to list %s resources: %v", resource, err), map[string]any{ "resource": resource, "namespace": namespace, "component": "access", @@ -77,12 +86,12 @@ func (m *AccessManager) ListAccessResources(resource, namespace string, allNames return nil } -func (m *AccessManager) listAccessPlatform(ctx context.Context, plat *platformClient, resource, nsFilter string) error { +func (m *AccessManager) listAccessPlatform(ctx context.Context, plat *platformapi.PlatformClient, resource, nsFilter string) error { switch resource { - case accessGrantResource: - grants, err := plat.listGrants(ctx, nsFilter) + case GrantResource: + grants, err := plat.ListGrants(ctx, nsFilter) if err != nil { - return wrapWithSentinelAndContext(nil, err, fmt.Sprintf("list grants: %v", err), map[string]any{"component": "access"}) + return core.WrapWithSentinelAndContext(nil, err, fmt.Sprintf("list grants: %v", err), map[string]any{"component": "access"}) } tw := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0) _, _ = fmt.Fprintln(tw, "NAME\tNAMESPACE\tSERVER\tDISABLED") @@ -91,10 +100,10 @@ func (m *AccessManager) listAccessPlatform(ctx context.Context, plat *platformCl } _ = tw.Flush() return nil - case accessSessionResource: - sessions, err := plat.listSessions(ctx, nsFilter) + case SessionResource: + sessions, err := plat.ListSessions(ctx, nsFilter) if err != nil { - return wrapWithSentinelAndContext(nil, err, fmt.Sprintf("list sessions: %v", err), map[string]any{"component": "access"}) + return core.WrapWithSentinelAndContext(nil, err, fmt.Sprintf("list sessions: %v", err), map[string]any{"component": "access"}) } tw := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0) _, _ = fmt.Fprintln(tw, "NAME\tNAMESPACE\tSERVER\tREVOKED") @@ -104,17 +113,18 @@ func (m *AccessManager) listAccessPlatform(ctx context.Context, plat *platformCl _ = tw.Flush() return nil default: - return newWithSentinel(nil, fmt.Sprintf("unsupported access resource %q", resource)) + return core.NewWithSentinel(nil, fmt.Sprintf("unsupported access resource %q", resource)) } } +// GetAccessResource prints one grant or session via platform API or kubectl. func (m *AccessManager) GetAccessResource(resource, name, namespace string) error { - name, namespace, err := validateAccessInput(name, namespace) + name, namespace, err := validateAccessResourceInput(name, namespace) if err != nil { return err } - plat, kube, err := m.platformOrKube() + plat, kube, err := platformapi.ResolvePlatformOrKube(m.useKube) if err != nil { return err } @@ -124,7 +134,7 @@ func (m *AccessManager) GetAccessResource(resource, name, namespace string) erro args := []string{"get", resource, name, "-n", namespace, "-o", "yaml"} if err := m.kubectl.RunWithOutput(args, os.Stdout, os.Stderr); err != nil { - return wrapWithSentinelAndContext(nil, err, fmt.Sprintf("failed to get %s %q in namespace %q: %v", resource, name, namespace, err), map[string]any{ + return core.WrapWithSentinelAndContext(nil, err, fmt.Sprintf("failed to get %s %q in namespace %q: %v", resource, name, namespace, err), map[string]any{ "resource": resource, "name": name, "namespace": namespace, @@ -134,18 +144,18 @@ func (m *AccessManager) GetAccessResource(resource, name, namespace string) erro return nil } -func (m *AccessManager) getAccessPlatform(ctx context.Context, plat *platformClient, resource, name, namespace string) error { +func (m *AccessManager) getAccessPlatform(ctx context.Context, plat *platformapi.PlatformClient, resource, name, namespace string) error { switch resource { - case accessGrantResource: - grant, err := plat.getGrant(ctx, namespace, name) + case GrantResource: + grant, err := plat.GetGrant(ctx, namespace, name) if err != nil { return err } b, _ := json.MarshalIndent(grant, "", " ") _, _ = os.Stdout.Write(append(b, '\n')) return nil - case accessSessionResource: - session, err := plat.getSession(ctx, namespace, name) + case SessionResource: + session, err := plat.GetSession(ctx, namespace, name) if err != nil { return err } @@ -153,26 +163,27 @@ func (m *AccessManager) getAccessPlatform(ctx context.Context, plat *platformCli _, _ = os.Stdout.Write(append(b, '\n')) return nil default: - return newWithSentinel(nil, fmt.Sprintf("unsupported access resource %q", resource)) + return core.NewWithSentinel(nil, fmt.Sprintf("unsupported access resource %q", resource)) } } +// ApplyAccessResource applies a grant or session manifest via platform API or kubectl. func (m *AccessManager) ApplyAccessResource(file string) error { - plat, kube, err := m.platformOrKube() + plat, kube, err := platformapi.ResolvePlatformOrKube(m.useKube) if err != nil { return err } if !kube { - if err := plat.applyAccessFromYAMLFile(context.Background(), file); err != nil { - return wrapWithSentinelAndContext(nil, err, fmt.Sprintf("apply access resource from file %q: %v", file, err), map[string]any{ + if err := plat.ApplyAccessFromYAMLFile(context.Background(), file); err != nil { + return core.WrapWithSentinelAndContext(nil, err, fmt.Sprintf("apply access resource from file %q: %v", file, err), map[string]any{ "file": file, "component": "access", }) } return nil } - if err := applyManifestFromFile(m.kubectl, file, os.Stdout, os.Stderr); err != nil { - return wrapWithSentinelAndContext(nil, err, fmt.Sprintf("failed to apply access resource from file %q: %v", file, err), map[string]any{ + if err := kubeapply.ApplyManifestFromFile(m.kubectl.CommandArgs, file, os.Stdout, os.Stderr); err != nil { + return core.WrapWithSentinelAndContext(nil, err, fmt.Sprintf("failed to apply access resource from file %q: %v", file, err), map[string]any{ "file": file, "component": "access", }) @@ -180,28 +191,29 @@ func (m *AccessManager) ApplyAccessResource(file string) error { return nil } +// DeleteAccessResource deletes a grant or session via platform API or kubectl. func (m *AccessManager) DeleteAccessResource(resource, name, namespace string) error { - name, namespace, err := validateAccessInput(name, namespace) + name, namespace, err := validateAccessResourceInput(name, namespace) if err != nil { return err } - plat, kube, err := m.platformOrKube() + plat, kube, err := platformapi.ResolvePlatformOrKube(m.useKube) if err != nil { return err } if !kube { ctx := context.Background() switch resource { - case accessGrantResource: - err = plat.deleteGrant(ctx, namespace, name) - case accessSessionResource: - err = plat.deleteSession(ctx, namespace, name) + case GrantResource: + err = plat.DeleteGrant(ctx, namespace, name) + case SessionResource: + err = plat.DeleteSession(ctx, namespace, name) default: - return newWithSentinel(nil, fmt.Sprintf("unsupported access resource %q", resource)) + return core.NewWithSentinel(nil, fmt.Sprintf("unsupported access resource %q", resource)) } if err != nil { - return wrapWithSentinelAndContext(nil, err, fmt.Sprintf("delete %s %q: %v", resource, name, err), map[string]any{ + return core.WrapWithSentinelAndContext(nil, err, fmt.Sprintf("delete %s %q: %v", resource, name, err), map[string]any{ "resource": resource, "name": name, "namespace": namespace, @@ -214,7 +226,7 @@ func (m *AccessManager) DeleteAccessResource(resource, name, namespace string) e args := []string{"delete", resource, name, "-n", namespace} if err := m.kubectl.RunWithOutput(args, os.Stdout, os.Stderr); err != nil { - return wrapWithSentinelAndContext(nil, err, fmt.Sprintf("failed to delete %s %q in namespace %q: %v", resource, name, namespace, err), map[string]any{ + return core.WrapWithSentinelAndContext(nil, err, fmt.Sprintf("failed to delete %s %q in namespace %q: %v", resource, name, namespace, err), map[string]any{ "resource": resource, "name": name, "namespace": namespace, @@ -224,36 +236,37 @@ func (m *AccessManager) DeleteAccessResource(resource, name, namespace string) e return nil } +// ToggleAccessResource enables/disables grants or revokes/unrevokes sessions. func (m *AccessManager) ToggleAccessResource(resource, name, namespace string, value bool) error { - name, namespace, err := validateAccessInput(name, namespace) + name, namespace, err := validateAccessResourceInput(name, namespace) if err != nil { return err } - plat, kube, err := m.platformOrKube() + plat, kube, err := platformapi.ResolvePlatformOrKube(m.useKube) if err != nil { return err } if !kube { ctx := context.Background() switch resource { - case accessGrantResource: + case GrantResource: if value { - err = plat.postGrantToggle(ctx, namespace, name, "disable") + err = plat.PostGrantToggle(ctx, namespace, name, "disable") } else { - err = plat.postGrantToggle(ctx, namespace, name, "enable") + err = plat.PostGrantToggle(ctx, namespace, name, "enable") } - case accessSessionResource: + case SessionResource: if value { - err = plat.postSessionToggle(ctx, namespace, name, "revoke") + err = plat.PostSessionToggle(ctx, namespace, name, "revoke") } else { - err = plat.postSessionToggle(ctx, namespace, name, "unrevoke") + err = plat.PostSessionToggle(ctx, namespace, name, "unrevoke") } default: - return newWithSentinel(nil, fmt.Sprintf("unsupported access resource %q", resource)) + return core.NewWithSentinel(nil, fmt.Sprintf("unsupported access resource %q", resource)) } if err != nil { - return wrapWithSentinelAndContext(nil, err, fmt.Sprintf("toggle %s %q: %v", resource, name, err), map[string]any{ + return core.WrapWithSentinelAndContext(nil, err, fmt.Sprintf("toggle %s %q: %v", resource, name, err), map[string]any{ "resource": resource, "name": name, "namespace": namespace, @@ -266,17 +279,17 @@ func (m *AccessManager) ToggleAccessResource(resource, name, namespace string, v patchValue := map[string]any{"spec": map[string]any{}} switch resource { - case accessGrantResource: + case GrantResource: patchValue["spec"].(map[string]any)["disabled"] = value - case accessSessionResource: + case SessionResource: patchValue["spec"].(map[string]any)["revoked"] = value default: - return newWithSentinel(nil, fmt.Sprintf("unsupported access resource %q", resource)) + return core.NewWithSentinel(nil, fmt.Sprintf("unsupported access resource %q", resource)) } data, err := json.Marshal(patchValue) if err != nil { - return wrapWithSentinelAndContext(nil, err, fmt.Sprintf("failed to marshal access patch for %s %q: %v", resource, name, err), map[string]any{ + return core.WrapWithSentinelAndContext(nil, err, fmt.Sprintf("failed to marshal access patch for %s %q: %v", resource, name, err), map[string]any{ "resource": resource, "name": name, "namespace": namespace, @@ -286,7 +299,7 @@ func (m *AccessManager) ToggleAccessResource(resource, name, namespace string, v args := []string{"patch", resource, name, "-n", namespace, "--type", "merge", "--patch", string(data)} if err := m.kubectl.RunWithOutput(args, os.Stdout, os.Stderr); err != nil { - return wrapWithSentinelAndContext(nil, err, fmt.Sprintf("failed to patch %s %q in namespace %q: %v", resource, name, namespace, err), map[string]any{ + return core.WrapWithSentinelAndContext(nil, err, fmt.Sprintf("failed to patch %s %q in namespace %q: %v", resource, name, namespace, err), map[string]any{ "resource": resource, "name": name, "namespace": namespace, @@ -295,19 +308,3 @@ func (m *AccessManager) ToggleAccessResource(resource, name, namespace string, v } return nil } - -func validateAccessInput(name, namespace string) (string, string, error) { - if !validServerName.MatchString(name) { - return "", "", newWithSentinel(nil, fmt.Sprintf("invalid resource name %q: must be lowercase alphanumeric with optional hyphens", name)) - } - - var err error - if name, err = validateManifestValue("name", name); err != nil { - return "", "", err - } - if namespace, err = validateManifestValue("namespace", namespace); err != nil { - return "", "", err - } - - return name, namespace, nil -} diff --git a/internal/cli/access_test.go b/internal/cli/access/manager_test.go similarity index 52% rename from internal/cli/access_test.go rename to internal/cli/access/manager_test.go index 91d8990..27058b4 100644 --- a/internal/cli/access_test.go +++ b/internal/cli/access/manager_test.go @@ -1,24 +1,37 @@ -package cli +package access_test import ( + "io" "os" "strings" "testing" "go.uber.org/zap" + + "mcp-runtime/internal/cli/access" + "mcp-runtime/internal/cli/core" ) +func contains(slice []string, val string) bool { + for _, s := range slice { + if s == val { + return true + } + } + return false +} + func TestAccessManager_ListAccessResources(t *testing.T) { - mock := &MockExecutor{} - kubectl := &KubectlClient{exec: mock, validators: nil} - mgr := NewAccessManager(kubectl, zap.NewNop()) + mock := &core.MockExecutor{} + kubectl := core.NewTestKubectlClient(mock) + mgr := access.NewAccessManager(kubectl, zap.NewNop()) - if err := mgr.ListAccessResources(accessGrantResource, "", true); err != nil { + if err := mgr.ListAccessResources(access.GrantResource, "", true); err != nil { t.Fatalf("unexpected error: %v", err) } cmd := mock.LastCommand() - for _, want := range []string{"get", accessGrantResource, "-A"} { + for _, want := range []string{"get", access.GrantResource, "-A"} { if !contains(cmd.Args, want) { t.Fatalf("expected %q in args, got %v", want, cmd.Args) } @@ -26,16 +39,16 @@ func TestAccessManager_ListAccessResources(t *testing.T) { } func TestAccessManager_GetAccessResource(t *testing.T) { - mock := &MockExecutor{} - kubectl := &KubectlClient{exec: mock, validators: nil} - mgr := NewAccessManager(kubectl, zap.NewNop()) + mock := &core.MockExecutor{} + kubectl := core.NewTestKubectlClient(mock) + mgr := access.NewAccessManager(kubectl, zap.NewNop()) - if err := mgr.GetAccessResource(accessSessionResource, "session-a", "team-a"); err != nil { + if err := mgr.GetAccessResource(access.SessionResource, "session-a", "team-a"); err != nil { t.Fatalf("unexpected error: %v", err) } cmd := mock.LastCommand() - for _, want := range []string{"get", accessSessionResource, "session-a", "-n", "team-a", "-o", "yaml"} { + for _, want := range []string{"get", access.SessionResource, "session-a", "-n", "team-a", "-o", "yaml"} { if !contains(cmd.Args, want) { t.Fatalf("expected %q in args, got %v", want, cmd.Args) } @@ -43,9 +56,18 @@ func TestAccessManager_GetAccessResource(t *testing.T) { } func TestAccessManager_ApplyAccessResource(t *testing.T) { - mock := &MockExecutor{} - kubectl := &KubectlClient{exec: mock, validators: nil} - mgr := NewAccessManager(kubectl, zap.NewNop()) + var applyCmd *core.MockCommand + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + applyCmd = &core.MockCommand{Args: spec.Args} + return applyCmd + }, + } + kubectl, err := core.NewKubectlClient(mock) + if err != nil { + t.Fatalf("NewKubectlClient() error = %v", err) + } + mgr := access.NewAccessManager(kubectl, zap.NewNop()) tmpFile, err := os.CreateTemp("", "access-*.yaml") if err != nil { @@ -64,8 +86,15 @@ func TestAccessManager_ApplyAccessResource(t *testing.T) { } cmd := mock.LastCommand() - if !contains(cmd.Args, "apply") || !contains(cmd.Args, "-f") { - t.Fatalf("expected apply -f args, got %v", cmd.Args) + if !contains(cmd.Args, "apply") || !contains(cmd.Args, "-f") || !contains(cmd.Args, "-") { + t.Fatalf("expected apply -f - args, got %v", cmd.Args) + } + captured, err := io.ReadAll(applyCmd.StdinR) + if err != nil { + t.Fatalf("read stdin: %v", err) + } + if string(captured) != "apiVersion: v1\nkind: ConfigMap\n" { + t.Fatalf("unexpected stdin: %q", string(captured)) } } @@ -75,15 +104,15 @@ func TestAccessManager_ToggleAccessResource(t *testing.T) { resource string wantJSON string }{ - {name: "disable grant", resource: accessGrantResource, wantJSON: `"disabled":true`}, - {name: "revoke session", resource: accessSessionResource, wantJSON: `"revoked":true`}, + {name: "disable grant", resource: access.GrantResource, wantJSON: `"disabled":true`}, + {name: "revoke session", resource: access.SessionResource, wantJSON: `"revoked":true`}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - mock := &MockExecutor{} - kubectl := &KubectlClient{exec: mock, validators: nil} - mgr := NewAccessManager(kubectl, zap.NewNop()) + mock := &core.MockExecutor{} + kubectl := core.NewTestKubectlClient(mock) + mgr := access.NewAccessManager(kubectl, zap.NewNop()) if err := mgr.ToggleAccessResource(tt.resource, "obj-a", "team-a", true); err != nil { t.Fatalf("unexpected error: %v", err) diff --git a/internal/cli/access/validation.go b/internal/cli/access/validation.go new file mode 100644 index 0000000..27d7a6d --- /dev/null +++ b/internal/cli/access/validation.go @@ -0,0 +1,7 @@ +package access + +import "mcp-runtime/internal/cli/core" + +func validateAccessResourceInput(name, namespace string) (string, string, error) { + return core.ValidateK8sNameAndNamespace("resource name", nil, name, namespace) +} diff --git a/internal/cli/access/validation_test.go b/internal/cli/access/validation_test.go new file mode 100644 index 0000000..3f8fcbc --- /dev/null +++ b/internal/cli/access/validation_test.go @@ -0,0 +1,29 @@ +package access + +import "testing" + +func TestValidateAccessResourceInput(t *testing.T) { + name, namespace, err := validateAccessResourceInput("grant-one", "mcp-servers") + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if name != "grant-one" || namespace != "mcp-servers" { + t.Fatalf("unexpected values: name=%q namespace=%q", name, namespace) + } +} + +func TestValidateAccessResourceInputErrors(t *testing.T) { + t.Run("rejects invalid name", func(t *testing.T) { + _, _, err := validateAccessResourceInput("GrantOne", "mcp-servers") + if err == nil { + t.Fatal("expected invalid resource name error") + } + }) + + t.Run("rejects empty namespace", func(t *testing.T) { + _, _, err := validateAccessResourceInput("grant-one", " ") + if err == nil { + t.Fatal("expected empty namespace error") + } + }) +} diff --git a/internal/cli/auth/auth.go b/internal/cli/auth/auth.go index bf2b761..590dad5 100644 --- a/internal/cli/auth/auth.go +++ b/internal/cli/auth/auth.go @@ -18,7 +18,8 @@ import ( "go.uber.org/zap" "golang.org/x/term" - "mcp-runtime/internal/cli" + "mcp-runtime/internal/cli/core" + "mcp-runtime/internal/cli/platformapi" "mcp-runtime/pkg/authfile" ) @@ -42,12 +43,12 @@ type loginFlags struct { skipVerify bool } -func newManager(runtime *cli.Runtime) *manager { +func newManager(runtime *core.Runtime) *manager { return &manager{logger: runtime.Logger()} } // New returns the auth command. -func New(runtime *cli.Runtime) *cobra.Command { +func New(runtime *core.Runtime) *cobra.Command { m := newManager(runtime) cmd := &cobra.Command{ Use: "auth", @@ -104,7 +105,7 @@ func (m *manager) runLogin(cmd *cobra.Command, f loginFlags) error { if apiURL == "" { return fmt.Errorf("api URL is required (set --api-url or %s)", authfile.EnvAPIURL) } - apiURL = cli.NormalizePlatformAPIBaseURL(apiURL) + apiURL = platformapi.NormalizeBaseURL(apiURL) if apiURL == "" { return errors.New("api URL must include scheme and host") } @@ -259,7 +260,7 @@ func loginPlatformPassword(ctx context.Context, apiBaseURL, email, password stri } ctx, cancel := context.WithTimeout(ctx, 30*time.Second) defer cancel() - u := cli.NormalizePlatformAPIBaseURL(apiBaseURL) + "/api/auth/login" + u := platformapi.NormalizeBaseURL(apiBaseURL) + "/api/auth/login" req, err := http.NewRequestWithContext(ctx, http.MethodPost, u, bytes.NewReader(body)) if err != nil { return "", "", err @@ -305,7 +306,7 @@ func fileCredentialsIfRelevant() (*authfile.Credentials, error) { } func verifyPlatformAPIToken(ctx context.Context, apiBaseURL, token string) error { - u := cli.NormalizePlatformAPIBaseURL(apiBaseURL) + "/api/auth/me" + u := platformapi.NormalizeBaseURL(apiBaseURL) + "/api/auth/me" req, err := http.NewRequestWithContext(ctx, http.MethodGet, u, nil) if err != nil { return err diff --git a/internal/cli/auth/auth_test.go b/internal/cli/auth/auth_test.go index 6166521..c83a0d5 100644 --- a/internal/cli/auth/auth_test.go +++ b/internal/cli/auth/auth_test.go @@ -12,7 +12,7 @@ import ( "time" "go.uber.org/zap" - "mcp-runtime/internal/cli" + "mcp-runtime/internal/cli/core" "mcp-runtime/pkg/authfile" ) @@ -66,7 +66,7 @@ func TestAuthLoginSavesAndVerifies(t *testing.T) { } defer func() { httpDoHook = prevHTTPHook }() - cmd := New(cli.NewRuntime(zap.NewNop())) + cmd := New(core.NewRuntime(zap.NewNop())) var out, errb bytes.Buffer cmd.SetOut(&out) cmd.SetErr(&errb) @@ -106,7 +106,7 @@ func TestAuthLoginNormalizesTrailingAPIPath(t *testing.T) { } defer func() { apiTestHook = previousHook }() - cmd := New(cli.NewRuntime(zap.NewNop())) + cmd := New(core.NewRuntime(zap.NewNop())) cmd.SetArgs([]string{"login", "--api-url", "https://platform.example.com/api/", "--token", "good"}) if err := cmd.Execute(); err != nil { t.Fatalf("execute: %v", err) diff --git a/internal/cli/bootstrap/bootstrap.go b/internal/cli/bootstrap/bootstrap.go index 218c832..9f5e8fd 100644 --- a/internal/cli/bootstrap/bootstrap.go +++ b/internal/cli/bootstrap/bootstrap.go @@ -8,21 +8,21 @@ import ( "github.com/spf13/cobra" - "mcp-runtime/internal/cli" + "mcp-runtime/internal/cli/core" ) type manager struct { - kubectl cli.KubectlRunner + kubectl core.KubectlRunner } -func newManager(runtime *cli.Runtime) *manager { +func newManager(runtime *core.Runtime) *manager { return &manager{kubectl: runtime.KubectlRunner()} } -func detectProvider(kubectl cli.KubectlRunner) (string, error) { +func detectProvider(kubectl core.KubectlRunner) (string, error) { out, err := kubectlOutput(kubectl, []string{"get", "nodes", "-o", "jsonpath={range .items[*]}{.status.nodeInfo.kubeletVersion}{\"\\n\"}{end}"}) if err != nil { - return "", cli.WrapWithSentinel(cli.ErrClusterNotAccessible, err, fmt.Sprintf("kubectl get nodes failed: %v", err)) + return "", core.WrapWithSentinel(core.ErrClusterNotAccessible, err, fmt.Sprintf("kubectl get nodes failed: %v", err)) } lower := strings.ToLower(string(out)) switch { @@ -35,40 +35,40 @@ func detectProvider(kubectl cli.KubectlRunner) (string, error) { } } -func runBootstrapPreflight(kubectl cli.KubectlRunner) error { - cli.Info("Preflight: kubectl connectivity") +func runBootstrapPreflight(kubectl core.KubectlRunner) error { + core.Info("Preflight: kubectl connectivity") if err := kubectl.Run([]string{"version", "--client=true"}); err != nil { - return cli.WrapWithSentinel(cli.ErrClusterNotAccessible, err, fmt.Sprintf("kubectl not available: %v", err)) + return core.WrapWithSentinel(core.ErrClusterNotAccessible, err, fmt.Sprintf("kubectl not available: %v", err)) } if err := kubectl.Run([]string{"get", "nodes"}); err != nil { - return cli.WrapWithSentinel(cli.ErrClusterNotAccessible, err, fmt.Sprintf("kubectl cannot reach cluster: %v", err)) + return core.WrapWithSentinel(core.ErrClusterNotAccessible, err, fmt.Sprintf("kubectl cannot reach cluster: %v", err)) } - cli.Info("Preflight: CoreDNS") + core.Info("Preflight: CoreDNS") if err := checkDeploymentExists(kubectl, "kube-system", "coredns"); err != nil { - cli.Warn("CoreDNS not detected (kube-system/deployment coredns). Cluster DNS must be installed for in-cluster service discovery.") + core.Warn("CoreDNS not detected (kube-system/deployment coredns). Cluster DNS must be installed for in-cluster service discovery.") } - cli.Info("Preflight: Default StorageClass") + core.Info("Preflight: Default StorageClass") if err := checkHasDefaultStorageClass(kubectl); err != nil { - cli.Warn(fmt.Sprintf("No default StorageClass detected: %v", err)) + core.Warn(fmt.Sprintf("No default StorageClass detected: %v", err)) } - cli.Info("Preflight: IngressClass traefik") + core.Info("Preflight: IngressClass traefik") if err := kubectl.Run([]string{"get", "ingressclass", "traefik"}); err != nil { - cli.Warn("IngressClass traefik not found. If you plan to use Traefik, install it before running setup (or let setup install it when configured).") + core.Warn("IngressClass traefik not found. If you plan to use Traefik, install it before running setup (or let setup install it when configured).") } - cli.Info("Preflight: MetalLB") + core.Info("Preflight: MetalLB") if err := kubectl.Run([]string{"get", "ns", "metallb-system"}); err != nil { - cli.Warn("MetalLB not detected (namespace metallb-system). If you need LoadBalancer services on bare metal, install MetalLB.") + core.Warn("MetalLB not detected (namespace metallb-system). If you need LoadBalancer services on bare metal, install MetalLB.") } return nil } -func bootstrapApplyK3s(kubectl cli.KubectlRunner) error { - cli.Info("Applying k3s addons: CoreDNS + local-path provisioner (if missing)") +func bootstrapApplyK3s(kubectl core.KubectlRunner) error { + core.Info("Applying k3s addons: CoreDNS + local-path provisioner (if missing)") paths := []string{ "/var/lib/rancher/k3s/server/manifests/coredns.yaml", @@ -82,37 +82,37 @@ func bootstrapApplyK3s(kubectl cli.KubectlRunner) error { } if len(missing) > 0 { msg := fmt.Sprintf("k3s manifests missing on disk (%s); bootstrap --apply expects to run on the k3s server node", strings.Join(missing, ", ")) - return cli.WrapWithSentinel(cli.ErrClusterConfigFailed, fmt.Errorf("missing manifests"), msg) + return core.WrapWithSentinel(core.ErrClusterConfigFailed, fmt.Errorf("missing manifests"), msg) } for _, p := range paths { if err := kubectl.Run([]string{"apply", "-f", p}); err != nil { - return cli.WrapWithSentinel(cli.ErrClusterConfigFailed, err, fmt.Sprintf("failed to apply %s: %v", p, err)) + return core.WrapWithSentinel(core.ErrClusterConfigFailed, err, fmt.Sprintf("failed to apply %s: %v", p, err)) } } - cli.Info("Waiting for kube-system addons to be ready") + core.Info("Waiting for kube-system addons to be ready") if err := kubectl.Run([]string{"rollout", "status", "deployment/coredns", "-n", "kube-system", "--timeout=180s"}); err != nil { - return cli.WrapWithSentinel(cli.ErrDeploymentTimeout, err, fmt.Sprintf("coredns rollout failed: %v", err)) + return core.WrapWithSentinel(core.ErrDeploymentTimeout, err, fmt.Sprintf("coredns rollout failed: %v", err)) } if err := kubectl.Run([]string{"rollout", "status", "deployment/local-path-provisioner", "-n", "kube-system", "--timeout=180s"}); err != nil { - return cli.WrapWithSentinel(cli.ErrDeploymentTimeout, err, fmt.Sprintf("local-path-provisioner rollout failed: %v", err)) + return core.WrapWithSentinel(core.ErrDeploymentTimeout, err, fmt.Sprintf("local-path-provisioner rollout failed: %v", err)) } - cli.Info("Node disk-pressure check") + core.Info("Node disk-pressure check") cond, err := kubectlOutput(kubectl, []string{"get", "nodes", "-o", "jsonpath={range .items[*]}{.metadata.name}{\" \"}{range .status.conditions[?(@.type==\"DiskPressure\")]}{.status}{end}{\"\\n\"}{end}"}) if err == nil { - cli.Info(strings.TrimSpace(string(cond))) + core.Info(strings.TrimSpace(string(cond))) } return nil } -func checkDeploymentExists(kubectl cli.KubectlRunner, namespace, name string) error { +func checkDeploymentExists(kubectl core.KubectlRunner, namespace, name string) error { return kubectl.Run([]string{"get", "deployment", name, "-n", namespace}) } -func checkHasDefaultStorageClass(kubectl cli.KubectlRunner) error { +func checkHasDefaultStorageClass(kubectl core.KubectlRunner) error { out, err := kubectlOutput(kubectl, []string{"get", "storageclass", "-o", "jsonpath={range .items[*]}{.metadata.name}{\" \"}{.metadata.annotations.storageclass\\.kubernetes\\.io/is-default-class}{\"\\n\"}{end}"}) if err != nil { return err @@ -126,7 +126,7 @@ func checkHasDefaultStorageClass(kubectl cli.KubectlRunner) error { return fmt.Errorf("no StorageClass annotated with storageclass.kubernetes.io/is-default-class=true") } -func kubectlOutput(kubectl cli.KubectlRunner, args []string) ([]byte, error) { +func kubectlOutput(kubectl core.KubectlRunner, args []string) ([]byte, error) { cmd, err := kubectl.CommandArgs(args) if err != nil { return nil, err @@ -135,7 +135,7 @@ func kubectlOutput(kubectl cli.KubectlRunner, args []string) ([]byte, error) { } // New returns the bootstrap command. -func New(runtime *cli.Runtime) *cobra.Command { +func New(runtime *core.Runtime) *cobra.Command { var apply bool var provider string mgr := newManager(runtime) @@ -150,7 +150,7 @@ Use this to prepare an existing cluster for running 'mcp-runtime setup'. Note: bootstrap --apply is automated for k3s only and must be executed on the k3s server node (it expects local manifests under /var/lib/rancher/k3s/server/manifests).`, RunE: func(cmd *cobra.Command, args []string) error { - cli.Section("MCP Runtime Bootstrap") + core.Section("MCP Runtime Bootstrap") chosenProvider := provider if chosenProvider == "" || chosenProvider == "auto" { detectedProvider, err := detectProvider(mgr.kubectl) @@ -159,15 +159,15 @@ Note: bootstrap --apply is automated for k3s only and must be executed on the k3 } chosenProvider = detectedProvider } - cli.Info(fmt.Sprintf("Provider: %s", chosenProvider)) + core.Info(fmt.Sprintf("Provider: %s", chosenProvider)) if err := runBootstrapPreflight(mgr.kubectl); err != nil { return err } if !apply { - cli.Success("Bootstrap preflight complete (no changes applied)") - cli.Info("Next: run `./bin/mcp-runtime setup` (or `./bin/mcp-runtime setup --storage-mode hostpath` for single-node dev)") + core.Success("Bootstrap preflight complete (no changes applied)") + core.Info("Next: run `./bin/mcp-runtime setup` (or `./bin/mcp-runtime setup --storage-mode hostpath` for single-node dev)") return nil } @@ -177,13 +177,13 @@ Note: bootstrap --apply is automated for k3s only and must be executed on the k3 return err } case "rke2", "kubeadm", "generic": - cli.Warn("Apply mode is currently only automated for k3s. For other distributions, use the preflight output and install DNS/storage/ingress/load-balancer via your standard platform tooling.") + core.Warn("Apply mode is currently only automated for k3s. For other distributions, use the preflight output and install DNS/storage/ingress/load-balancer via your standard platform tooling.") default: - cli.Warn(fmt.Sprintf("Unknown provider %q; skipping apply", chosenProvider)) + core.Warn(fmt.Sprintf("Unknown provider %q; skipping apply", chosenProvider)) } - cli.Success("Bootstrap complete") - cli.Info("Next: run `./bin/mcp-runtime setup`") + core.Success("Bootstrap complete") + core.Info("Next: run `./bin/mcp-runtime setup`") return nil }, } diff --git a/internal/cli/build.go b/internal/cli/build.go deleted file mode 100644 index 697304d..0000000 --- a/internal/cli/build.go +++ /dev/null @@ -1,261 +0,0 @@ -package cli - -// This file implements the "server build" command for building Docker images. -// It handles Docker image building, metadata file updates, and registry integration. -// -// Example usage: -// mcp-runtime server build image my-server --tag v1.0.0 -// mcp-runtime server build image my-server --dockerfile custom.Dockerfile --registry my-registry.com - -import ( - "fmt" - "os" - "path/filepath" - "strings" - - "go.uber.org/zap" - - "mcp-runtime/pkg/metadata" - - "gopkg.in/yaml.v3" -) - -// yamlMarshal is a test seam for yaml.Marshal. -var yamlMarshal = yaml.Marshal - -func buildImage(logger *zap.Logger, serverName, dockerfile, metadataFile, metadataDir, registryURL, tag, context string) error { - // Get registry URL - if registryURL == "" { - registryURL = getPlatformRegistryURL(logger) - } - - // Get tag - if tag == "" { - tag = getGitTag() - } - - logger.Info("Building image", zap.String("server", serverName)) - - // Determine image name - imageName := fmt.Sprintf("%s/%s", registryURL, serverName) - fullImage := fmt.Sprintf("%s:%s", imageName, tag) - - // Build Docker image - // #nosec G204 -- command arguments are built from trusted inputs and fixed verbs. - buildCmd, err := execCommandWithValidators("docker", []string{ - "build", - "-f", dockerfile, - "-t", fullImage, - context, - }) - if err != nil { - return err - } - buildCmd.SetStdout(os.Stdout) - buildCmd.SetStderr(os.Stderr) - - if err := buildCmd.Run(); err != nil { - wrappedErr := wrapWithSentinelAndContext( - ErrBuildImageFailed, - err, - fmt.Sprintf("failed to build image for %s: %v", serverName, err), - map[string]any{"server": serverName, "image": fullImage, "dockerfile": dockerfile, "component": "build"}, - ) - Error("Failed to build image") - logStructuredError(logger, wrappedErr, "Failed to build image") - return wrappedErr - } - - logger.Info("Image built successfully", zap.String("image", fullImage)) - - // Update metadata file (required for a successful build: CI and scripts rely on non-zero exit) - if err := updateMetadataImage(serverName, imageName, tag, metadataFile, metadataDir); err != nil { - logStructuredError(logger, err, "Image built but metadata update failed") - return err - } - - return nil -} - -func BuildImage(logger *zap.Logger, serverName, dockerfile, metadataFile, metadataDir, registryURL, tag, context string) error { - return buildImage(logger, serverName, dockerfile, metadataFile, metadataDir, registryURL, tag, context) -} - -func updateMetadataImage(serverName, imageName, tag, metadataFile, metadataDir string) error { - // Find the metadata file containing this server - var targetFile string - - if metadataFile != "" { - targetFile = metadataFile - } else { - // Search in metadata directory - files, _ := filepath.Glob(filepath.Join(metadataDir, "*.yaml")) - ymlFiles, _ := filepath.Glob(filepath.Join(metadataDir, "*.yml")) - files = append(files, ymlFiles...) - - for _, file := range files { - registry, err := metadata.LoadFromFile(file) - if err != nil { - continue - } - for _, s := range registry.Servers { - if s.Name == serverName { - targetFile = file - break - } - } - if targetFile != "" { - break - } - } - } - - if targetFile == "" { - err := newWithSentinel(ErrMetadataFileNotFound, fmt.Sprintf("metadata file not found for server %s", serverName)) - Error("Metadata file not found") - // Note: No logger available in this helper function - return err - } - - // Load and update - registry, err := metadata.LoadFromFile(targetFile) - if err != nil { - wrappedErr := wrapWithSentinel(ErrLoadMetadataFailed, err, fmt.Sprintf("failed to load metadata: %v", err)) - Error("Failed to load metadata") - // Note: No logger available in this helper function - return wrappedErr - } - - // Update server image - updated := false - for i := range registry.Servers { - if registry.Servers[i].Name == serverName { - registry.Servers[i].Image = imageName - registry.Servers[i].ImageTag = tag - updated = true - break - } - } - - if !updated { - err := newWithSentinel(ErrServerNotFoundInMetadata, fmt.Sprintf("server %s not found in metadata", serverName)) - Error("Server not found in metadata") - // Note: No logger available in this helper function - return err - } - - // Write back - data, err := yamlMarshal(registry) - if err != nil { - wrappedErr := wrapWithSentinel(ErrMarshalMetadataFailed, err, fmt.Sprintf("failed to marshal metadata: %v", err)) - Error("Failed to marshal metadata") - // Note: No logger available in this helper function - return wrappedErr - } - - fileMode := os.FileMode(0o600) - if info, statErr := os.Stat(targetFile); statErr == nil { - fileMode = info.Mode().Perm() - if fileMode&0o200 == 0 { - writeErr := fmt.Errorf("file is not writable: %s", targetFile) - wrappedErr := wrapWithSentinel(ErrWriteMetadataFailed, writeErr, fmt.Sprintf("failed to write metadata: %v", writeErr)) - Error("Failed to write metadata") - // Note: No logger available in this helper function - return wrappedErr - } - } - - if err := os.WriteFile(targetFile, data, fileMode); err != nil { - wrappedErr := wrapWithSentinel(ErrWriteMetadataFailed, err, fmt.Sprintf("failed to write metadata: %v", err)) - Error("Failed to write metadata") - // Note: No logger available in this helper function - return wrappedErr - } - - return nil -} - -func getPlatformRegistryURL(logger *zap.Logger) string { - const registryServiceDNS = "registry.registry.svc.cluster.local" - - // Respect an explicitly configured endpoint. The implicit local default - // (registry.local) is resolved from the installed registry service below. - if endpoint := strings.TrimSpace(GetRegistryEndpoint()); endpoint != "" && - (endpoint != defaultRegistryEndpoint || registryEndpointExplicitlyConfigured()) { - return endpoint - } - - if os.Getenv("MCP_RUNTIME_TEST_MODE") == "1" { - // Kind contributor clusters configure containerd for this exact host. - // Avoid ClusterIP image refs, which change per cluster and bypass that mirror. - // #nosec G204 -- fixed arguments, no user input. - portCmd, portErr := kubectlClient.CommandArgs([]string{"get", "service", "registry", "-n", "registry", "-o", "jsonpath={.spec.ports[0].port}"}) - var port []byte - if portErr == nil { - port, portErr = portCmd.Output() - } - portValue := strings.TrimSpace(string(port)) - if portErr == nil && portValue != "" { - return fmt.Sprintf("%s:%s", registryServiceDNS, portValue) - } - if logger != nil { - logger.Warn("Could not detect registry service port in test mode, using default service DNS:port") - } - return fmt.Sprintf("%s:%d", registryServiceDNS, GetRegistryPort()) - } - - // Otherwise read registry service IP/port and use the concrete service endpoint, - // preserving the non-test fallback behavior for existing dev clusters. - // #nosec G204 -- fixed arguments, no user input. - ipCmd, ipErr := kubectlClient.CommandArgs([]string{"get", "service", "registry", "-n", "registry", "-o", "jsonpath={.spec.clusterIP}"}) - var clusterIP []byte - if ipErr == nil { - clusterIP, ipErr = ipCmd.Output() - } - - ip := strings.TrimSpace(string(clusterIP)) - // #nosec G204 -- fixed arguments, no user input. - portCmd, portErr := kubectlClient.CommandArgs([]string{"get", "service", "registry", "-n", "registry", "-o", "jsonpath={.spec.ports[0].port}"}) - var port []byte - if portErr == nil { - port, portErr = portCmd.Output() - } - portValue := strings.TrimSpace(string(port)) - if ipErr == nil && ip != "" && portErr == nil && portValue != "" { - return fmt.Sprintf("%s:%s", ip, portValue) - } - if portErr == nil && portValue != "" { - return fmt.Sprintf("%s:%s", registryServiceDNS, portValue) - } - - // Fallback to default - if logger != nil { - logger.Warn("Could not detect registry ingress host or service port, using default service DNS:port") - } - return fmt.Sprintf("%s:%d", registryServiceDNS, GetRegistryPort()) -} - -func registryEndpointExplicitlyConfigured() bool { - if value, ok := os.LookupEnv("MCP_REGISTRY_ENDPOINT"); ok && strings.TrimSpace(value) != "" { - return true - } - if value, ok := os.LookupEnv("MCP_REGISTRY_HOST"); ok && strings.TrimSpace(value) != "" { - return true - } - return false -} - -func getGitTag() string { - // Try to get git SHA - // #nosec G204 -- fixed arguments, no user input. - cmd, err := execCommandWithValidators("git", []string{"rev-parse", "--short", "HEAD"}) - if err == nil { - sha, execErr := cmd.Output() - if execErr == nil && len(sha) > 0 { - return strings.TrimSpace(string(sha)) - } - } - - // Fallback to latest - return "latest" -} diff --git a/internal/cli/build_test.go b/internal/cli/build_test.go deleted file mode 100644 index ae5cfab..0000000 --- a/internal/cli/build_test.go +++ /dev/null @@ -1,789 +0,0 @@ -package cli - -import ( - "errors" - "os" - "path/filepath" - "strings" - "testing" - - "go.uber.org/zap" -) - -func TestGetGitTag(t *testing.T) { - // This test runs in a git repo, so it should return a valid SHA or "latest" - tag := getGitTag() - - if tag == "" { - t.Error("getGitTag should not return empty string") - } - - // Should be either a short SHA (7-8 chars) or "latest" - if tag != "latest" && len(tag) < 7 { - t.Errorf("getGitTag returned unexpected value: %q", tag) - } -} - -func TestBuildImage(t *testing.T) { - logger := zap.NewNop() - - t.Run("builds_image_successfully", func(t *testing.T) { - // Save original executor and restore after test - originalExecutor := execExecutor - defer func() { execExecutor = originalExecutor }() - - // Create mock executor - mock := &MockExecutor{} - execExecutor = mock - - tmp := t.TempDir() - metadataFile := filepath.Join(tmp, "servers.yaml") - if err := os.WriteFile(metadataFile, []byte(`version: v1 -servers: - - name: test-server -`), 0o600); err != nil { - t.Fatalf("write metadata: %v", err) - } - - err := buildImage(logger, "test-server", "Dockerfile", metadataFile, ".", "test-registry", "test-tag", ".") - if err != nil { - t.Fatalf("failed to build image: %v", err) - } - - // Verify docker build was called - if !mock.HasCommand("docker") { - t.Error("expected docker command to be executed") - } - - // Verify the command arguments - last := mock.LastCommand() - if last.Name != "docker" { - t.Errorf("expected docker command, got %q", last.Name) - } - - // Check expected args - expectedArgs := []string{"build", "-f", "Dockerfile", "-t", "test-registry/test-server:test-tag", "."} - if !equalStringSlices(last.Args, expectedArgs) { - t.Errorf("docker args = %v, want %v", last.Args, expectedArgs) - } - }) - - t.Run("returns_error_after_build_when_metadata_missing", func(t *testing.T) { - originalExecutor := execExecutor - defer func() { execExecutor = originalExecutor }() - - mock := &MockExecutor{} - execExecutor = mock - - tmp := t.TempDir() // no *.yaml: updateMetadataFile cannot find server - err := buildImage(logger, "missing-server", "Dockerfile", "", tmp, "test-registry", "test-tag", ".") - if err == nil { - t.Fatal("expected error when metadata file not found for server name") - } - if !errors.Is(err, ErrMetadataFileNotFound) { - t.Fatalf("expected ErrMetadataFileNotFound, got %v", err) - } - }) - - t.Run("returns_error_on_build_failure", func(t *testing.T) { - originalExecutor := execExecutor - defer func() { execExecutor = originalExecutor }() - - // Mock that returns error on Run() - mock := &MockExecutor{ - DefaultRunErr: errors.New("docker build failed"), - } - execExecutor = mock - - err := buildImage(logger, "test-server", "Dockerfile", "", ".", "test-registry", "test-tag", ".") - if err == nil { - t.Error("expected error when docker build fails") - } - }) - - t.Run("uses_git_tag_when_tag_empty", func(t *testing.T) { - originalExecutor := execExecutor - defer func() { execExecutor = originalExecutor }() - - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - if spec.Name == "git" { - // Return a mock git SHA - return &MockCommand{OutputData: []byte("abc1234\n")} - } - // Return success for docker - return &MockCommand{} - }, - } - execExecutor = mock - - tmp := t.TempDir() - metadataFile := filepath.Join(tmp, "servers.yaml") - if err := os.WriteFile(metadataFile, []byte(`version: v1 -servers: - - name: my-server -`), 0o600); err != nil { - t.Fatalf("write metadata: %v", err) - } - - err := buildImage(logger, "my-server", "Dockerfile", metadataFile, ".", "registry.io", "", ".") - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - // Check the docker build was called with git tag - for _, cmd := range mock.Commands { - if cmd.Name == "docker" { - // Should contain the git SHA in the tag - found := false - for _, arg := range cmd.Args { - if arg == "registry.io/my-server:abc1234" { - found = true - break - } - } - if !found { - t.Errorf("expected image tag with git SHA, got args: %v", cmd.Args) - } - } - } - }) - - t.Run("uses_platform_registry_when_registry_empty", func(t *testing.T) { - originalExecutor := execExecutor - defer func() { execExecutor = originalExecutor }() - - originalKubectl := kubectlClient - defer func() { kubectlClient = originalKubectl }() - - origConfig := DefaultCLIConfig - defer func() { DefaultCLIConfig = origConfig }() - DefaultCLIConfig = &CLIConfig{RegistryEndpoint: "", RegistryIngressHost: "", RegistryPort: 5000} - - // Mock kubectl to return service port so the helper falls back to service DNS. - kubectlMock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - for _, arg := range spec.Args { - if arg == "jsonpath={.spec.ports[0].port}" { - return &MockCommand{OutputData: []byte("5000")} - } - } - return &MockCommand{} - }, - } - kubectlClient = &KubectlClient{exec: kubectlMock, validators: nil} - - mock := &MockExecutor{} - execExecutor = mock - - tmp := t.TempDir() - metadataFile := filepath.Join(tmp, "servers.yaml") - if err := os.WriteFile(metadataFile, []byte(`version: v1 -servers: - - name: my-server -`), 0o600); err != nil { - t.Fatalf("write metadata: %v", err) - } - - err := buildImage(logger, "my-server", "Dockerfile", metadataFile, ".", "", "v1.0", ".") - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - // Check that docker was called with the platform registry - for _, cmd := range mock.Commands { - if cmd.Name == "docker" { - found := false - for _, arg := range cmd.Args { - if arg == "registry.registry.svc.cluster.local:5000/my-server:v1.0" { - found = true - break - } - } - if !found { - t.Errorf("expected platform registry in image tag, got args: %v", cmd.Args) - } - } - } - }) - - t.Run("returns_error_when_command_validator_fails", func(t *testing.T) { - originalExecutor := execExecutor - defer func() { execExecutor = originalExecutor }() - - // Create a mock executor that returns an error from Command() - mock := &MockExecutor{} - // We need to use a custom executor that fails validation - failingExecutor := &validatorFailingExecutor{err: errors.New("validator failed")} - execExecutor = failingExecutor - - err := buildImage(logger, "test-server", "Dockerfile", "", ".", "registry", "tag", ".") - if err == nil { - t.Error("expected error when command validator fails") - } - if err.Error() != "validator failed" { - t.Errorf("unexpected error: %v", err) - } - - // Restore for cleanup - execExecutor = mock - }) -} - -func equalStringSlices(a, b []string) bool { - if len(a) != len(b) { - return false - } - for i := range a { - if a[i] != b[i] { - return false - } - } - return true -} - -// validatorFailingExecutor is a test executor that always fails validation. -type validatorFailingExecutor struct { - err error -} - -func (v *validatorFailingExecutor) Command(name string, args []string, validators ...ExecValidator) (Command, error) { - return nil, v.err -} - -func TestGetGitTagWithMock(t *testing.T) { - t.Run("returns_latest_when_git_fails", func(t *testing.T) { - originalExecutor := execExecutor - defer func() { execExecutor = originalExecutor }() - - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - if spec.Name == "git" { - return &MockCommand{OutputErr: errors.New("git not found")} - } - return &MockCommand{} - }, - } - execExecutor = mock - - tag := getGitTag() - if tag != "latest" { - t.Errorf("expected 'latest' when git fails, got %q", tag) - } - }) - - t.Run("returns_latest_when_output_empty", func(t *testing.T) { - originalExecutor := execExecutor - defer func() { execExecutor = originalExecutor }() - - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - if spec.Name == "git" { - return &MockCommand{OutputData: []byte("")} - } - return &MockCommand{} - }, - } - execExecutor = mock - - tag := getGitTag() - if tag != "latest" { - t.Errorf("expected 'latest' when output empty, got %q", tag) - } - }) - - t.Run("returns_trimmed_sha", func(t *testing.T) { - originalExecutor := execExecutor - defer func() { execExecutor = originalExecutor }() - - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - if spec.Name == "git" { - return &MockCommand{OutputData: []byte(" abc1234 \n")} - } - return &MockCommand{} - }, - } - execExecutor = mock - - tag := getGitTag() - if tag != "abc1234" { - t.Errorf("expected 'abc1234', got %q", tag) - } - }) -} - -func TestGetPlatformRegistryURLWithMock(t *testing.T) { - logger := zap.NewNop() - origConfig := DefaultCLIConfig - t.Cleanup(func() { DefaultCLIConfig = origConfig }) - - t.Run("returns_configured_registry_endpoint_when_available", func(t *testing.T) { - DefaultCLIConfig = &CLIConfig{RegistryEndpoint: "10.43.39.164:5000", RegistryIngressHost: "registry.prod.example.com", RegistryPort: 5000} - originalKubectl := kubectlClient - defer func() { kubectlClient = originalKubectl }() - - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - for _, arg := range spec.Args { - if arg == "jsonpath={.spec.clusterIP}" { - return &MockCommand{OutputData: []byte("10.96.201.51")} - } - if arg == "jsonpath={.spec.ports[0].port}" { - return &MockCommand{OutputData: []byte("5000")} - } - } - return &MockCommand{} - }, - } - kubectlClient = &KubectlClient{exec: mock, validators: nil} - - url := getPlatformRegistryURL(logger) - if url != "10.43.39.164:5000" { - t.Errorf("expected configured registry endpoint, got %q", url) - } - }) - - t.Run("non_test_uses_service_ip_for_implicit_default_endpoint", func(t *testing.T) { - DefaultCLIConfig = &CLIConfig{RegistryEndpoint: defaultRegistryEndpoint, RegistryIngressHost: defaultRegistryIngressHost, RegistryPort: 5000} - t.Setenv("MCP_RUNTIME_TEST_MODE", "") - originalKubectl := kubectlClient - defer func() { kubectlClient = originalKubectl }() - - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - for _, arg := range spec.Args { - if arg == "jsonpath={.spec.clusterIP}" { - return &MockCommand{OutputData: []byte("10.96.201.51")} - } - if arg == "jsonpath={.spec.ports[0].port}" { - return &MockCommand{OutputData: []byte("5000")} - } - } - return &MockCommand{} - }, - } - kubectlClient = &KubectlClient{exec: mock, validators: nil} - - url := getPlatformRegistryURL(logger) - if url != "10.96.201.51:5000" { - t.Errorf("expected service IP registry URL, got %q", url) - } - }) - - t.Run("test_mode_prefers_service_dns_over_cluster_ip", func(t *testing.T) { - DefaultCLIConfig = &CLIConfig{RegistryEndpoint: defaultRegistryEndpoint, RegistryIngressHost: defaultRegistryIngressHost, RegistryPort: 5000} - t.Setenv("MCP_RUNTIME_TEST_MODE", "1") - originalKubectl := kubectlClient - defer func() { kubectlClient = originalKubectl }() - - var clusterIPQueried bool - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - for _, arg := range spec.Args { - if arg == "jsonpath={.spec.clusterIP}" { - clusterIPQueried = true - return &MockCommand{OutputData: []byte("10.96.201.51")} - } - if arg == "jsonpath={.spec.ports[0].port}" { - return &MockCommand{OutputData: []byte("5000")} - } - } - return &MockCommand{} - }, - } - kubectlClient = &KubectlClient{exec: mock, validators: nil} - - url := getPlatformRegistryURL(logger) - if url != "registry.registry.svc.cluster.local:5000" { - t.Errorf("expected service DNS registry URL in test mode, got %q", url) - } - if clusterIPQueried { - t.Error("expected test mode to avoid ClusterIP lookup") - } - }) - - t.Run("respects_explicit_default_endpoint_override", func(t *testing.T) { - DefaultCLIConfig = &CLIConfig{RegistryEndpoint: defaultRegistryEndpoint, RegistryIngressHost: defaultRegistryIngressHost, RegistryPort: 5000} - t.Setenv("MCP_REGISTRY_ENDPOINT", defaultRegistryEndpoint) - - url := getPlatformRegistryURL(logger) - if url != defaultRegistryEndpoint { - t.Errorf("expected explicitly configured endpoint %q, got %q", defaultRegistryEndpoint, url) - } - }) - - t.Run("test_mode_respects_explicit_registry_host", func(t *testing.T) { - DefaultCLIConfig = &CLIConfig{RegistryEndpoint: "registry.example.com:5000", RegistryIngressHost: "registry.example.com", RegistryPort: 5000} - t.Setenv("MCP_RUNTIME_TEST_MODE", "1") - t.Setenv("MCP_REGISTRY_HOST", "registry.example.com:5000") - - url := getPlatformRegistryURL(logger) - if url != "registry.example.com:5000" { - t.Errorf("expected explicit registry host, got %q", url) - } - }) - - t.Run("falls_back_to_service_dns_when_cluster_ip_missing", func(t *testing.T) { - DefaultCLIConfig = &CLIConfig{RegistryEndpoint: "", RegistryIngressHost: "", RegistryPort: 5000} - originalKubectl := kubectlClient - defer func() { kubectlClient = originalKubectl }() - - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - for _, arg := range spec.Args { - if arg == "jsonpath={.spec.clusterIP}" { - return &MockCommand{OutputErr: errors.New("kubectl error")} - } - if arg == "jsonpath={.spec.ports[0].port}" { - return &MockCommand{OutputData: []byte("5000")} - } - } - return &MockCommand{} - }, - } - kubectlClient = &KubectlClient{exec: mock, validators: nil} - - url := getPlatformRegistryURL(logger) - if url != "registry.registry.svc.cluster.local:5000" { - t.Errorf("expected service DNS registry URL, got %q", url) - } - }) - - t.Run("returns_default_when_port_command_fails", func(t *testing.T) { - DefaultCLIConfig = &CLIConfig{RegistryEndpoint: "", RegistryIngressHost: "", RegistryPort: 5000} - originalKubectl := kubectlClient - defer func() { kubectlClient = originalKubectl }() - - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - for _, arg := range spec.Args { - if arg == "jsonpath={.spec.clusterIP}" { - return &MockCommand{OutputData: []byte("10.96.201.51")} - } - if arg == "jsonpath={.spec.ports[0].port}" { - return &MockCommand{OutputErr: errors.New("kubectl error")} - } - } - return &MockCommand{} - }, - } - kubectlClient = &KubectlClient{exec: mock, validators: nil} - - url := getPlatformRegistryURL(logger) - if !strings.Contains(url, "registry.registry.svc.cluster.local") { - t.Errorf("expected default registry URL, got %q", url) - } - }) - - t.Run("test_mode_returns_default_when_port_command_fails", func(t *testing.T) { - DefaultCLIConfig = &CLIConfig{RegistryEndpoint: "", RegistryIngressHost: "", RegistryPort: 5001} - t.Setenv("MCP_RUNTIME_TEST_MODE", "1") - originalKubectl := kubectlClient - defer func() { kubectlClient = originalKubectl }() - - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - for _, arg := range spec.Args { - if arg == "jsonpath={.spec.ports[0].port}" { - return &MockCommand{OutputErr: errors.New("kubectl error")} - } - } - return &MockCommand{} - }, - } - kubectlClient = &KubectlClient{exec: mock, validators: nil} - - url := getPlatformRegistryURL(logger) - if url != "registry.registry.svc.cluster.local:5001" { - t.Errorf("expected default service DNS registry URL, got %q", url) - } - }) - - t.Run("returns_service_dns_when_ip_empty", func(t *testing.T) { - DefaultCLIConfig = &CLIConfig{RegistryEndpoint: "", RegistryIngressHost: "", RegistryPort: 5000} - originalKubectl := kubectlClient - defer func() { kubectlClient = originalKubectl }() - - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - for _, arg := range spec.Args { - if arg == "jsonpath={.spec.clusterIP}" { - return &MockCommand{OutputData: []byte("")} - } - if arg == "jsonpath={.spec.ports[0].port}" { - return &MockCommand{OutputData: []byte("5000")} - } - } - return &MockCommand{} - }, - } - kubectlClient = &KubectlClient{exec: mock, validators: nil} - - url := getPlatformRegistryURL(logger) - if !strings.Contains(url, "registry.registry.svc.cluster.local") { - t.Errorf("expected default registry URL, got %q", url) - } - }) - - t.Run("returns_default_when_port_empty", func(t *testing.T) { - DefaultCLIConfig = &CLIConfig{RegistryEndpoint: "", RegistryIngressHost: "", RegistryPort: 5000} - originalKubectl := kubectlClient - defer func() { kubectlClient = originalKubectl }() - - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - for _, arg := range spec.Args { - if arg == "jsonpath={.spec.clusterIP}" { - return &MockCommand{OutputData: []byte("10.96.201.51")} - } - if arg == "jsonpath={.spec.ports[0].port}" { - return &MockCommand{OutputData: []byte("")} - } - } - return &MockCommand{} - }, - } - kubectlClient = &KubectlClient{exec: mock, validators: nil} - - url := getPlatformRegistryURL(logger) - if !strings.Contains(url, "registry.registry.svc.cluster.local") { - t.Errorf("expected default registry URL, got %q", url) - } - }) -} - -func TestUpdateMetadataImage(t *testing.T) { - t.Run("updates_with_explicit_metadata_file", func(t *testing.T) { - // Create temp directory - tmpDir := t.TempDir() - metadataFile := filepath.Join(tmpDir, "servers.yaml") - - // Write initial metadata - initialContent := `version: "1" -servers: - - name: my-server - image: old-registry/my-server - imageTag: old-tag -` - if err := os.WriteFile(metadataFile, []byte(initialContent), 0o600); err != nil { - t.Fatalf("failed to write initial metadata: %v", err) - } - - err := updateMetadataImage("my-server", "new-registry/my-server", "new-tag", metadataFile, "") - if err != nil { - t.Fatalf("updateMetadataImage failed: %v", err) - } - - // Read and verify - content, err := os.ReadFile(metadataFile) - if err != nil { - t.Fatalf("failed to read updated metadata: %v", err) - } - - if !strings.Contains(string(content), "new-registry/my-server") { - t.Errorf("expected new image in metadata, got: %s", content) - } - if !strings.Contains(string(content), "new-tag") { - t.Errorf("expected new tag in metadata, got: %s", content) - } - }) - - t.Run("finds_metadata_in_directory", func(t *testing.T) { - tmpDir := t.TempDir() - metadataDir := filepath.Join(tmpDir, ".mcp") - if err := os.MkdirAll(metadataDir, 0o755); err != nil { - t.Fatalf("failed to create metadata dir: %v", err) - } - - metadataFile := filepath.Join(metadataDir, "servers.yaml") - initialContent := `version: "1" -servers: - - name: discovered-server - image: old-image - imageTag: old -` - if err := os.WriteFile(metadataFile, []byte(initialContent), 0o600); err != nil { - t.Fatalf("failed to write metadata: %v", err) - } - - err := updateMetadataImage("discovered-server", "new-image", "v2.0", "", metadataDir) - if err != nil { - t.Fatalf("updateMetadataImage failed: %v", err) - } - - content, err := os.ReadFile(metadataFile) - if err != nil { - t.Fatalf("failed to read metadata: %v", err) - } - - if !strings.Contains(string(content), "new-image") { - t.Errorf("expected new image, got: %s", content) - } - }) - - t.Run("finds_yml_files", func(t *testing.T) { - tmpDir := t.TempDir() - metadataDir := filepath.Join(tmpDir, ".mcp") - if err := os.MkdirAll(metadataDir, 0o755); err != nil { - t.Fatalf("failed to create metadata dir: %v", err) - } - - metadataFile := filepath.Join(metadataDir, "servers.yml") - initialContent := `version: "1" -servers: - - name: yml-server - image: old-image -` - if err := os.WriteFile(metadataFile, []byte(initialContent), 0o600); err != nil { - t.Fatalf("failed to write metadata: %v", err) - } - - err := updateMetadataImage("yml-server", "new-image", "v1.0", "", metadataDir) - if err != nil { - t.Fatalf("updateMetadataImage failed: %v", err) - } - }) - - t.Run("returns_error_when_file_not_found", func(t *testing.T) { - tmpDir := t.TempDir() - - err := updateMetadataImage("nonexistent-server", "image", "tag", "", tmpDir) - if err == nil { - t.Error("expected error when metadata file not found") - } - if !strings.Contains(err.Error(), "metadata file not found") { - t.Errorf("unexpected error: %v", err) - } - }) - - t.Run("returns_error_when_server_not_in_metadata", func(t *testing.T) { - tmpDir := t.TempDir() - metadataFile := filepath.Join(tmpDir, "servers.yaml") - - initialContent := `version: "1" -servers: - - name: other-server - image: some-image -` - if err := os.WriteFile(metadataFile, []byte(initialContent), 0o600); err != nil { - t.Fatalf("failed to write metadata: %v", err) - } - - err := updateMetadataImage("missing-server", "image", "tag", metadataFile, "") - if err == nil { - t.Error("expected error when server not found in metadata") - } - if !strings.Contains(err.Error(), "not found in metadata") { - t.Errorf("unexpected error: %v", err) - } - }) - - t.Run("returns_error_when_metadata_file_invalid", func(t *testing.T) { - tmpDir := t.TempDir() - metadataFile := filepath.Join(tmpDir, "invalid.yaml") - - if err := os.WriteFile(metadataFile, []byte("not: valid: yaml: content:::"), 0o600); err != nil { - t.Fatalf("failed to write invalid metadata: %v", err) - } - - err := updateMetadataImage("server", "image", "tag", metadataFile, "") - if err == nil { - t.Error("expected error when metadata file is invalid") - } - }) - - t.Run("skips_invalid_files_in_directory_search", func(t *testing.T) { - tmpDir := t.TempDir() - metadataDir := filepath.Join(tmpDir, ".mcp") - if err := os.MkdirAll(metadataDir, 0o755); err != nil { - t.Fatalf("failed to create metadata dir: %v", err) - } - - // Write invalid file first (should be skipped) - invalidFile := filepath.Join(metadataDir, "invalid.yaml") - if err := os.WriteFile(invalidFile, []byte("not: valid: yaml:::"), 0o600); err != nil { - t.Fatalf("failed to write invalid file: %v", err) - } - - // Write valid file with our server - validFile := filepath.Join(metadataDir, "valid.yaml") - validContent := `version: "1" -servers: - - name: target-server - image: old-image -` - if err := os.WriteFile(validFile, []byte(validContent), 0o600); err != nil { - t.Fatalf("failed to write valid file: %v", err) - } - - err := updateMetadataImage("target-server", "new-image", "v1.0", "", metadataDir) - if err != nil { - t.Fatalf("updateMetadataImage should skip invalid files: %v", err) - } - }) - - t.Run("returns_error_when_file_write_fails", func(t *testing.T) { - if os.Geteuid() == 0 { - t.Skip("root can bypass read-only file mode semantics in this environment") - } - - tmpDir := t.TempDir() - metadataFile := filepath.Join(tmpDir, "servers.yaml") - - initialContent := `version: "1" -servers: - - name: my-server - image: old-image -` - if err := os.WriteFile(metadataFile, []byte(initialContent), 0o600); err != nil { - t.Fatalf("failed to write metadata: %v", err) - } - - // Make the file read-only to cause write failure - if err := os.Chmod(metadataFile, 0o400); err != nil { - t.Fatalf("failed to chmod file: %v", err) - } - // Restore permissions for cleanup - defer func() { _ = os.Chmod(metadataFile, 0o600) }() - - err := updateMetadataImage("my-server", "new-image", "v1.0", metadataFile, "") - if err == nil { - t.Error("expected error when file write fails") - } - if !strings.Contains(err.Error(), "failed to write metadata") { - t.Errorf("unexpected error: %v", err) - } - }) - - t.Run("returns_error_when_yaml_marshal_fails", func(t *testing.T) { - tmpDir := t.TempDir() - metadataFile := filepath.Join(tmpDir, "servers.yaml") - - initialContent := `version: "1" -servers: - - name: my-server - image: old-image -` - if err := os.WriteFile(metadataFile, []byte(initialContent), 0o600); err != nil { - t.Fatalf("failed to write metadata: %v", err) - } - - // Save and restore original yamlMarshal - originalMarshal := yamlMarshal - defer func() { yamlMarshal = originalMarshal }() - - // Mock yamlMarshal to return error - yamlMarshal = func(v interface{}) ([]byte, error) { - return nil, errors.New("marshal failed") - } - - err := updateMetadataImage("my-server", "new-image", "v1.0", metadataFile, "") - if err == nil { - t.Error("expected error when yaml marshal fails") - } - if !strings.Contains(err.Error(), "failed to marshal metadata") { - t.Errorf("unexpected error: %v", err) - } - }) -} diff --git a/internal/cli/cert.go b/internal/cli/cert.go deleted file mode 100644 index 07486c4..0000000 --- a/internal/cli/cert.go +++ /dev/null @@ -1,202 +0,0 @@ -package cli - -// This file implements certificate and TLS management functionality. -// It handles cert-manager integration, CA secret management, and certificate provisioning. - -import ( - "fmt" - "os" - "strings" - "time" - - "go.uber.org/zap" -) - -const ( - certManagerNamespace = "cert-manager" - // #nosec G101 -- This is the name of a Kubernetes secret resource, not actual credentials - certCASecretName = "mcp-runtime-ca" - certClusterIssuerName = "mcp-runtime-ca" - registryCertificateName = "registry-cert" - clusterIssuerManifestPath = "config/cert-manager/cluster-issuer.yaml" - registryCertificateManifestPath = "config/cert-manager/example-registry-certificate.yaml" -) - -// CertManager manages cert-manager resources for the platform. -type CertManager struct { - kubectl KubectlRunner - logger *zap.Logger -} - -// NewCertManager creates a CertManager with the given dependencies. -func NewCertManager(kubectl KubectlRunner, logger *zap.Logger) *CertManager { - return &CertManager{kubectl: kubectl, logger: logger} -} - -// Status verifies cert-manager installation and required resources. -func (m *CertManager) Status() error { - Info("Checking cert-manager installation") - if err := checkCertManagerInstalledWithKubectl(m.kubectl); err != nil { - err := wrapWithSentinel(ErrCertManagerNotInstalled, err, "cert-manager not installed. Install it first:\n helm install cert-manager jetstack/cert-manager --namespace cert-manager --create-namespace --set crds.enabled=true") - Error("Cert-manager not installed") - logStructuredError(m.logger, err, "Cert-manager not installed") - return err - } - Info("Checking CA secret") - if err := checkCASecretWithKubectl(m.kubectl); err != nil { - err := newWithSentinel(ErrCASecretNotFound, fmt.Sprintf("CA secret %q not found in cert-manager namespace. Create it first:\n kubectl create secret tls %s --cert=ca.crt --key=ca.key -n %s", certCASecretName, certCASecretName, certManagerNamespace)) - Error("CA secret not found") - logStructuredError(m.logger, err, "CA secret not found") - return err - } - Info("Checking ClusterIssuer") - if err := checkClusterIssuerWithKubectl(m.kubectl); err != nil { - err := newWithSentinel(ErrClusterIssuerNotFound, fmt.Sprintf("ClusterIssuer %q not found. Apply it first:\n kubectl apply -f %s", certClusterIssuerName, clusterIssuerManifestPath)) - Error("ClusterIssuer not found") - logStructuredError(m.logger, err, "ClusterIssuer not found") - return err - } - Info("Checking registry Certificate") - if err := checkCertificateWithKubectl(m.kubectl, registryCertificateName, NamespaceRegistry); err != nil { - err := newWithSentinel(ErrRegistryCertificateNotFound, fmt.Sprintf("registry Certificate not found. Apply it first:\n kubectl apply -f %s", registryCertificateManifestPath)) - Error("Registry Certificate not found") - logStructuredError(m.logger, err, "Registry Certificate not found") - return err - } - Success("Cert-manager resources are present") - return nil -} - -// Apply installs cert-manager resources required for registry TLS. -func (m *CertManager) Apply() error { - Info("Checking cert-manager installation") - if err := checkCertManagerInstalledWithKubectl(m.kubectl); err != nil { - err := wrapWithSentinel(ErrCertManagerNotInstalled, err, "cert-manager not installed. Install it first:\n helm install cert-manager jetstack/cert-manager --namespace cert-manager --create-namespace --set crds.enabled=true") - Error("Cert-manager not installed") - logStructuredError(m.logger, err, "Cert-manager not installed") - return err - } - Info("Checking CA secret") - if err := checkCASecretWithKubectl(m.kubectl); err != nil { - err := newWithSentinel(ErrCASecretNotFound, fmt.Sprintf("CA secret %q not found in cert-manager namespace. Create it first:\n kubectl create secret tls %s --cert=ca.crt --key=ca.key -n %s", certCASecretName, certCASecretName, certManagerNamespace)) - Error("CA secret not found") - logStructuredError(m.logger, err, "CA secret not found") - return err - } - - Info("Applying ClusterIssuer") - if err := applyClusterIssuerWithKubectl(m.kubectl); err != nil { - wrappedErr := wrapWithSentinel(ErrClusterIssuerApplyFailed, err, fmt.Sprintf("failed to apply ClusterIssuer: %v", err)) - Error("Failed to apply ClusterIssuer") - logStructuredError(m.logger, wrappedErr, "Failed to apply ClusterIssuer") - return wrappedErr - } - if err := ensureNamespace(NamespaceRegistry); err != nil { - wrappedErr := wrapWithSentinelAndContext( - ErrCreateRegistryNamespaceFailed, - err, - fmt.Sprintf("failed to create registry namespace: %v", err), - map[string]any{"namespace": NamespaceRegistry, "component": "cert"}, - ) - Error("Failed to create registry namespace") - logStructuredError(m.logger, wrappedErr, "Failed to create registry namespace") - return wrappedErr - } - Info("Applying Certificate for registry") - if err := applyRegistryCertificateWithKubectl(m.kubectl); err != nil { - wrappedErr := wrapWithSentinelAndContext( - ErrApplyCertificateFailed, - err, - fmt.Sprintf("failed to apply Certificate: %v", err), - map[string]any{"certificate": registryCertificateName, "namespace": NamespaceRegistry, "component": "cert"}, - ) - Error("Failed to apply Certificate") - logStructuredError(m.logger, wrappedErr, "Failed to apply Certificate") - return wrappedErr - } - - Success("Cert-manager resources applied") - return nil -} - -// Wait blocks until the registry certificate is Ready or times out. -func (m *CertManager) Wait(timeout time.Duration) error { - Info(fmt.Sprintf("Waiting for certificate to be issued (timeout: %s)", timeout)) - if err := waitForCertificateReadyWithKubectl(m.kubectl, registryCertificateName, NamespaceRegistry, timeout); err != nil { - err := newWithSentinel(ErrCertificateNotReady, fmt.Sprintf("certificate not ready after %s. Check cert-manager logs: kubectl logs -n cert-manager deployment/cert-manager", timeout)) - Error("Certificate not ready") - logStructuredError(m.logger, err, "Certificate not ready") - return err - } - Success("Certificate issued successfully") - return nil -} - -func checkCertManagerInstalledWithKubectl(kubectl KubectlRunner) error { - // #nosec G204 -- fixed kubectl command to check CRD. - if err := kubectl.Run([]string{"get", "crd", CertManagerCRDName}); err != nil { - return ErrCertManagerNotInstalled - } - return nil -} - -func checkCASecretWithKubectl(kubectl KubectlRunner) error { - // #nosec G204 -- fixed kubectl command to check secret. - if err := kubectl.Run([]string{"get", "secret", certCASecretName, "-n", certManagerNamespace}); err != nil { - return ErrCASecretNotFound - } - return nil -} - -func checkClusterIssuerWithKubectl(kubectl KubectlRunner) error { - // #nosec G204 -- fixed kubectl command to check ClusterIssuer. - if err := kubectl.Run([]string{"get", "clusterissuer", certClusterIssuerName}); err != nil { - return wrapWithSentinel(ErrClusterIssuerNotFound, err, fmt.Sprintf("ClusterIssuer %q not found: %v", certClusterIssuerName, err)) - } - return nil -} - -// checkNamedClusterIssuerWithKubectl verifies a cert-manager ClusterIssuer exists -// (e.g. a company-managed CA; setup does not apply it). -func checkNamedClusterIssuerWithKubectl(kubectl KubectlRunner, name string) error { - name = strings.TrimSpace(name) - if name == "" { - return newWithSentinel(ErrClusterIssuerNotFound, "ClusterIssuer name is empty (set --tls-cluster-issuer or MCP_TLS_CLUSTER_ISSUER)") - } - // #nosec G204 -- issuer name is validated, fixed kubectl subresource. - if err := kubectl.Run([]string{"get", "clusterissuer", name}); err != nil { - return wrapWithSentinel(ErrClusterIssuerNotFound, err, fmt.Sprintf("ClusterIssuer %q not found. Install your org issuer first (cert-manager) or fix --tls-cluster-issuer / MCP_TLS_CLUSTER_ISSUER: %v", name, err)) - } - return nil -} - -func checkCertificateWithKubectl(kubectl KubectlRunner, name, namespace string) error { - // #nosec G204 -- fixed kubectl command to check certificate. - if err := kubectl.Run([]string{"get", "certificate", name, "-n", namespace}); err != nil { - return wrapWithSentinel(ErrRegistryCertificateNotFound, err, fmt.Sprintf("Certificate %q not found in namespace %q: %v", name, namespace, err)) - } - return nil -} - -func applyClusterIssuerWithKubectl(kubectl KubectlRunner) error { - // #nosec G204 -- fixed file path from repository. - return kubectl.RunWithOutput([]string{"apply", "-f", clusterIssuerManifestPath}, os.Stdout, os.Stderr) -} - -func applyRegistryCertificateWithKubectl(kubectl KubectlRunner) error { - content, err := os.ReadFile(registryCertificateManifestPath) - if err != nil { - return err - } - manifest := rewriteRegistryHost(string(content), GetRegistryIngressHost()) - return applyManifestContentWithNamespace(kubectl, manifest, NamespaceRegistry) -} - -func waitForCertificateReadyWithKubectl(kubectl KubectlRunner, name, namespace string, timeout time.Duration) error { - // #nosec G204 -- command arguments are built from trusted inputs and fixed verbs. - return kubectl.RunWithOutput([]string{ - "wait", "--for=condition=Ready", - "certificate/" + name, "-n", namespace, - fmt.Sprintf("--timeout=%s", timeout), - }, os.Stdout, os.Stderr) -} diff --git a/internal/cli/cert_letsencrypt_test.go b/internal/cli/cert_letsencrypt_test.go deleted file mode 100644 index e9a054c..0000000 --- a/internal/cli/cert_letsencrypt_test.go +++ /dev/null @@ -1,16 +0,0 @@ -package cli - -import "testing" - -func TestValidateIngressManifestForACME(t *testing.T) { - t.Parallel() - if err := validateIngressManifestForACME("config/ingress/overlays/http"); err == nil { - t.Fatal("expected error for dev http overlay") - } - if err := validateIngressManifestForACME("config/ingress/overlays/prod"); err != nil { - t.Fatalf("prod overlay should be allowed: %v", err) - } - if err := validateIngressManifestForACME(""); err != nil { - t.Fatalf("empty: %v", err) - } -} diff --git a/internal/cli/cert_letsencrypt.go b/internal/cli/certmanager/letsencrypt.go similarity index 62% rename from internal/cli/cert_letsencrypt.go rename to internal/cli/certmanager/letsencrypt.go index 93c2a86..b68eeac 100644 --- a/internal/cli/cert_letsencrypt.go +++ b/internal/cli/certmanager/letsencrypt.go @@ -1,9 +1,10 @@ -package cli +package certmanager import ( "fmt" "io" "net" + "net/url" "os" "path/filepath" "strconv" @@ -11,6 +12,9 @@ import ( "time" "go.uber.org/zap" + + "mcp-runtime/internal/cli/core" + "mcp-runtime/internal/cli/kube" ) const ( @@ -54,7 +58,7 @@ func acmeServerURL(staging bool) string { func acmeTLSDNSNames() []string { seen := make(map[string]struct{}) var out []string - for _, h := range []string{GetRegistryIngressHost(), GetMcpIngressHost()} { + for _, h := range []string{core.GetRegistryIngressHost(), core.GetMcpIngressHost()} { h = strings.TrimSpace(h) if h == "" { continue @@ -68,6 +72,10 @@ func acmeTLSDNSNames() []string { return out } +func ACMETLSDNSNames() []string { + return acmeTLSDNSNames() +} + func validateACMEHostnameForPublicCA() error { names := acmeTLSDNSNames() if len(names) == 0 { @@ -81,6 +89,45 @@ func validateACMEHostnameForPublicCA() error { return nil } +func ValidateACMEHostnameForPublicCA() error { + return validateACMEHostnameForPublicCA() +} + +func isDevRegistryURL(raw string) bool { + trimmed := strings.TrimSpace(strings.TrimSuffix(raw, "/")) + if trimmed == "" { + return true + } + if strings.HasPrefix(strings.ToLower(trimmed), "http://") { + return true + } + + host := trimmed + if strings.Contains(trimmed, "://") { + if parsed, err := url.Parse(trimmed); err == nil && parsed.Host != "" { + host = parsed.Host + } + } + if slash := strings.Index(host, "/"); slash >= 0 { + host = host[:slash] + } + if h, _, err := net.SplitHostPort(host); err == nil { + host = h + } else if idx := strings.LastIndex(host, ":"); idx >= 0 && strings.Count(host, ":") == 1 { + host = host[:idx] + } + + host = strings.ToLower(strings.Trim(host, "[]")) + switch host { + case "", "localhost", "registry.local": + return true + } + if strings.HasSuffix(host, ".local") || strings.HasSuffix(host, ".svc.cluster.local") { + return true + } + return net.ParseIP(host) != nil +} + // validateIngressManifestForACME rejects the dev "http" overlay, which does not listen on 80/443, so Let’s Encrypt HTTP-01 cannot work. func validateIngressManifestForACME(ingressManifest string) error { m := strings.TrimSpace(ingressManifest) @@ -96,16 +143,20 @@ func validateIngressManifestForACME(ingressManifest string) error { return nil } +func ValidateIngressManifestForACME(ingressManifest string) error { + return validateIngressManifestForACME(ingressManifest) +} + // waitForTraefikDeploymentForACME waits for the Traefik this repo installs in namespace "traefik". If it is missing (e.g. skipped install, or another cluster ingress), a warning is printed and we continue. -func waitForTraefikDeploymentForACME(kubectl KubectlRunner) error { +func waitForTraefikDeploymentForACME(kubectl core.KubectlRunner) error { if err := kubectl.RunWithOutput( []string{"get", "deployment", traefikManagedDeployment, "-n", traefikManagedNamespace}, io.Discard, io.Discard, ); err != nil { - Warn("No " + traefikManagedNamespace + "/" + traefikManagedDeployment + " deployment found; skipping Traefik wait. cert-manager still needs the Traefik ingress class to serve HTTP-01, with port 80 on your public hostnames") + core.Warn("No " + traefikManagedNamespace + "/" + traefikManagedDeployment + " deployment found; skipping Traefik wait. cert-manager still needs the Traefik ingress class to serve HTTP-01, with port 80 on your public hostnames") return nil } - Info("Waiting for " + traefikManagedNamespace + "/" + traefikManagedDeployment + " (ingress must be up before the ACME request)") + core.Info("Waiting for " + traefikManagedNamespace + "/" + traefikManagedDeployment + " (ingress must be up before the ACME request)") // #nosec G204 -- fixed resource names; timeout is fixed. if err := kubectl.RunWithOutput([]string{ "wait", "--for=condition=Available", @@ -113,10 +164,14 @@ func waitForTraefikDeploymentForACME(kubectl KubectlRunner) error { }, os.Stdout, os.Stderr); err != nil { return fmt.Errorf("traefik not ready: %w", err) } - Info(traefikManagedNamespace + "/" + traefikManagedDeployment + " is available") + core.Info(traefikManagedNamespace + "/" + traefikManagedDeployment + " is available") return nil } +func WaitForTraefikDeploymentForACME(kubectl core.KubectlRunner) error { + return waitForTraefikDeploymentForACME(kubectl) +} + // preflightACMEHostnamesPort80 tries TCP dials to host:80 from the machine running setup. Failing does not block setup (Operator may be off-node); success helps confirm DNS and a listener before a long cert wait. func preflightACMEHostnamesPort80(dnsNames []string) { for _, h := range dedupeHostnames(dnsNames) { @@ -126,44 +181,48 @@ func preflightACMEHostnamesPort80(dnsNames []string) { addr := net.JoinHostPort(h, "80") c, err := net.DialTimeout("tcp", addr, 5*time.Second) if err != nil { - Warn("From this host, could not open TCP to " + addr + " (" + err.Error() + "). Let's Encrypt will try from the public internet, so check DNS, firewall, and that Traefik listens on port 80. If the cluster is on another network, you can ignore this if port 80 is open publicly") + core.Warn("From this host, could not open TCP to " + addr + " (" + err.Error() + "). Let's Encrypt will try from the public internet, so check DNS, firewall, and that Traefik listens on port 80. If the cluster is on another network, you can ignore this if port 80 is open publicly") continue } _ = c.Close() - Info("TCP to " + addr + " succeeded from this host (a good sign for HTTP-01)") + core.Info("TCP to " + addr + " succeeded from this host (a good sign for HTTP-01)") } } +func PreflightACMEHostnamesPort80(dnsNames []string) { + preflightACMEHostnamesPort80(dnsNames) +} + // ensureCertManagerInstalled applies upstream cert-manager if CRDs are missing and waits for deployments. -func ensureCertManagerInstalled(kubectl KubectlRunner, logger *zap.Logger) error { +func ensureCertManagerInstalled(kubectl core.KubectlRunner, logger *zap.Logger) error { if err := checkCertManagerInstalledWithKubectl(kubectl); err == nil { - Info("cert-manager already installed") + core.Info("cert-manager already installed") return nil } - Info(fmt.Sprintf("Installing cert-manager %s", certManagerRelease)) + core.Info(fmt.Sprintf("Installing cert-manager %s", certManagerRelease)) warnMsg := "If this fails (no network), install cert-manager manually, then re-run setup with --skip-cert-manager-install" - Warn(warnMsg) + core.Warn(warnMsg) url := certManagerInstallManifestURL() // #nosec G204 -- fixed release URL. if err := kubectl.RunWithOutput([]string{"apply", "-f", url}, os.Stdout, os.Stderr); err != nil { - wrapped := wrapWithSentinel(ErrCertManagerInstallFailed, err, fmt.Sprintf("cert-manager install failed: %v. %s", err, warnMsg)) - Error("cert-manager install failed") + wrapped := core.WrapWithSentinel(core.ErrCertManagerInstallFailed, err, fmt.Sprintf("cert-manager install failed: %v. %s", err, warnMsg)) + core.Error("cert-manager install failed") if logger != nil { - logStructuredError(logger, wrapped, "cert-manager install failed") + core.LogStructuredError(logger, wrapped, "cert-manager install failed") } return wrapped } overall := 5 * time.Minute start := time.Now() - Info(fmt.Sprintf("Waiting for cert-manager deployments (combined timeout %s across three deployments)", overall)) + core.Info(fmt.Sprintf("Waiting for cert-manager deployments (combined timeout %s across three deployments)", overall)) for _, dep := range []string{"cert-manager", "cert-manager-cainjector", "cert-manager-webhook"} { remaining := time.Until(start.Add(overall)) if remaining <= 0 { err := fmt.Errorf("timed out waiting for cert-manager before deployment/%s", dep) - wrapped := wrapWithSentinel(ErrCertManagerInstallFailed, err, err.Error()) - Error("cert-manager did not become ready") + wrapped := core.WrapWithSentinel(core.ErrCertManagerInstallFailed, err, err.Error()) + core.Error("cert-manager did not become ready") if logger != nil { - logStructuredError(logger, wrapped, "cert-manager did not become ready") + core.LogStructuredError(logger, wrapped, "cert-manager did not become ready") } return wrapped } @@ -173,36 +232,44 @@ func ensureCertManagerInstalled(kubectl KubectlRunner, logger *zap.Logger) error "deployment/" + dep, "-n", certManagerNamespace, "--timeout=" + remaining.Round(time.Second).String(), }, os.Stdout, os.Stderr); err != nil { - wrapped := wrapWithSentinel(ErrCertManagerInstallFailed, err, fmt.Sprintf("cert-manager component %s not ready: %v", dep, err)) - Error("cert-manager did not become ready") + wrapped := core.WrapWithSentinel(core.ErrCertManagerInstallFailed, err, fmt.Sprintf("cert-manager component %s not ready: %v", dep, err)) + core.Error("cert-manager did not become ready") if logger != nil { - logStructuredError(logger, wrapped, "cert-manager did not become ready") + core.LogStructuredError(logger, wrapped, "cert-manager did not become ready") } return wrapped } } - Info("cert-manager is ready") + core.Info("cert-manager is ready") return nil } -func applyLetsEncryptClusterIssuer(kubectl KubectlRunner, email string, staging bool, logger *zap.Logger) error { +func EnsureCertManagerInstalled(kubectl core.KubectlRunner, logger *zap.Logger) error { + return ensureCertManagerInstalled(kubectl, logger) +} + +func applyLetsEncryptClusterIssuer(kubectl core.KubectlRunner, email string, staging bool, logger *zap.Logger) error { email = strings.TrimSpace(email) if email == "" { return fmt.Errorf("ACME email is required") } name := ClusterIssuerNameForACME(staging) manifest := renderLetsEncryptClusterIssuerManifest(name, email, acmeServerURL(staging)) - if err := applyManifestContent(kubectl, manifest); err != nil { - wrapped := wrapWithSentinel(ErrClusterIssuerApplyFailed, err, fmt.Sprintf("failed to apply Let's Encrypt ClusterIssuer: %v", err)) - Error("Failed to apply ClusterIssuer") + if err := kube.ApplyManifestContent(kubectl.CommandArgs, manifest); err != nil { + wrapped := core.WrapWithSentinel(core.ErrClusterIssuerApplyFailed, err, fmt.Sprintf("failed to apply Let's Encrypt ClusterIssuer: %v", err)) + core.Error("Failed to apply ClusterIssuer") if logger != nil { - logStructuredError(logger, wrapped, "Failed to apply ClusterIssuer") + core.LogStructuredError(logger, wrapped, "Failed to apply ClusterIssuer") } return wrapped } return nil } +func ApplyLetsEncryptClusterIssuer(kubectl core.KubectlRunner, email string, staging bool, logger *zap.Logger) error { + return applyLetsEncryptClusterIssuer(kubectl, email, staging, logger) +} + func renderLetsEncryptClusterIssuerManifest(name, email, serverURL string) string { var b strings.Builder b.WriteString("apiVersion: cert-manager.io/v1\n") @@ -230,13 +297,17 @@ func renderLetsEncryptClusterIssuerManifest(name, email, serverURL string) strin return b.String() } -func applyRegistryCertificateForACME(kubectl KubectlRunner, dnsNames []string, issuerName string) error { +func applyRegistryCertificateForACME(kubectl core.KubectlRunner, dnsNames []string, issuerName string) error { uniq := dedupeHostnames(dnsNames) if len(uniq) == 0 { return fmt.Errorf("registry TLS has no DNS names to request") } manifest := renderRegistryCertificateForACME(registryCertificateName, uniq, issuerName) - return applyManifestContent(kubectl, manifest) + return kube.ApplyManifestContent(kubectl.CommandArgs, manifest) +} + +func ApplyRegistryCertificateForACME(kubectl core.KubectlRunner, dnsNames []string, issuerName string) error { + return applyRegistryCertificateForACME(kubectl, dnsNames, issuerName) } func dedupeHostnames(hs []string) []string { @@ -266,7 +337,7 @@ func renderRegistryCertificateForACME(certName string, dnsNames []string, issuer b.WriteString(certName) b.WriteString("\n") b.WriteString(" namespace: ") - b.WriteString(NamespaceRegistry) + b.WriteString(core.NamespaceRegistry) b.WriteString("\n") b.WriteString("spec:\n") b.WriteString(" secretName: registry-tls\n") diff --git a/internal/cli/certmanager/letsencrypt_test.go b/internal/cli/certmanager/letsencrypt_test.go new file mode 100644 index 0000000..ab60a1a --- /dev/null +++ b/internal/cli/certmanager/letsencrypt_test.go @@ -0,0 +1,49 @@ +package certmanager + +import ( + "testing" + + "mcp-runtime/internal/cli/core" +) + +func TestValidateIngressManifestForACME(t *testing.T) { + t.Parallel() + if err := validateIngressManifestForACME("config/ingress/overlays/http"); err == nil { + t.Fatal("expected error for dev http overlay") + } + if err := validateIngressManifestForACME("config/ingress/overlays/prod"); err != nil { + t.Fatalf("prod overlay should be allowed: %v", err) + } + if err := validateIngressManifestForACME(""); err != nil { + t.Fatalf("empty: %v", err) + } +} + +// TestACMETLSDNSNamesExcludesPlatformHost asserts that the registry-cert SANs +// do NOT include the platform host. The platform Ingress in mcp-sentinel uses +// cert-manager's ingress-shim to mint its own cert; including the platform +// host in the registry-cert would cause a redundant ACME order on every +// renewal (and the secret in the registry namespace cannot be referenced from +// a different namespace by Kubernetes Ingress anyway). +func TestACMETLSDNSNamesExcludesPlatformHost(t *testing.T) { + prev := core.DefaultCLIConfig + t.Cleanup(func() { core.DefaultCLIConfig = prev }) + core.DefaultCLIConfig = &core.CLIConfig{ + RegistryIngressHost: "registry.example.com", + McpIngressHost: "mcp.example.com", + PlatformIngressHost: "platform.example.com", + } + names := acmeTLSDNSNames() + want := map[string]bool{ + "registry.example.com": true, + "mcp.example.com": true, + } + if len(names) != len(want) { + t.Fatalf("expected %d hostnames, got %d (%v)", len(want), len(names), names) + } + for _, n := range names { + if !want[n] { + t.Fatalf("unexpected hostname %q in registry SANs (platform host should be excluded)", n) + } + } +} diff --git a/internal/cli/certmanager/manager.go b/internal/cli/certmanager/manager.go new file mode 100644 index 0000000..ba90089 --- /dev/null +++ b/internal/cli/certmanager/manager.go @@ -0,0 +1,260 @@ +package certmanager + +// This file implements certificate and TLS management functionality. +// It handles cert-manager integration, CA secret management, and certificate provisioning. + +import ( + "fmt" + "os" + "strings" + "time" + + "go.uber.org/zap" + + "mcp-runtime/internal/cli/core" + "mcp-runtime/internal/cli/kube" +) + +const ( + certManagerNamespace = "cert-manager" + // #nosec G101 -- This is the name of a Kubernetes secret resource, not actual credentials + certCASecretName = "mcp-runtime-ca" + certClusterIssuerName = "mcp-runtime-ca" + registryCertificateName = "registry-cert" + clusterIssuerManifestPath = "config/cert-manager/cluster-issuer.yaml" + registryCertificateManifestPath = "config/cert-manager/example-registry-certificate.yaml" +) + +const ( + CertClusterIssuerName = certClusterIssuerName + RegistryCertificateName = registryCertificateName +) + +// CertManager manages cert-manager resources for the platform. +type CertManager struct { + kubectl core.KubectlRunner + logger *zap.Logger +} + +// NewCertManager creates a CertManager with the given dependencies. +func NewCertManager(kubectl core.KubectlRunner, logger *zap.Logger) *CertManager { + return &CertManager{kubectl: kubectl, logger: logger} +} + +// Status verifies cert-manager installation and required resources. +func (m *CertManager) Status() error { + core.Info("Checking cert-manager installation") + if err := checkCertManagerInstalledWithKubectl(m.kubectl); err != nil { + err := core.WrapWithSentinel(core.ErrCertManagerNotInstalled, err, "cert-manager not installed. Install it first:\n helm install cert-manager jetstack/cert-manager --namespace cert-manager --create-namespace --set crds.enabled=true") + core.Error("Cert-manager not installed") + core.LogStructuredError(m.logger, err, "Cert-manager not installed") + return err + } + core.Info("Checking CA secret") + if err := checkCASecretWithKubectl(m.kubectl); err != nil { + err := core.NewWithSentinel(core.ErrCASecretNotFound, fmt.Sprintf("CA secret %q not found in cert-manager namespace. Create it first:\n kubectl create secret tls %s --cert=ca.crt --key=ca.key -n %s", certCASecretName, certCASecretName, certManagerNamespace)) + core.Error("CA secret not found") + core.LogStructuredError(m.logger, err, "CA secret not found") + return err + } + core.Info("Checking ClusterIssuer") + if err := checkClusterIssuerWithKubectl(m.kubectl); err != nil { + err := core.NewWithSentinel(core.ErrClusterIssuerNotFound, fmt.Sprintf("ClusterIssuer %q not found. Apply it first:\n kubectl apply -f %s", certClusterIssuerName, clusterIssuerManifestPath)) + core.Error("ClusterIssuer not found") + core.LogStructuredError(m.logger, err, "ClusterIssuer not found") + return err + } + core.Info("Checking registry Certificate") + if err := checkCertificateWithKubectl(m.kubectl, registryCertificateName, core.NamespaceRegistry); err != nil { + err := core.NewWithSentinel(core.ErrRegistryCertificateNotFound, fmt.Sprintf("registry Certificate not found. Apply it first:\n kubectl apply -f %s", registryCertificateManifestPath)) + core.Error("Registry Certificate not found") + core.LogStructuredError(m.logger, err, "Registry Certificate not found") + return err + } + core.Success("Cert-manager resources are present") + return nil +} + +// Apply installs cert-manager resources required for registry TLS. When dryRun +// is true, the read-only preflight checks still run (to catch obvious problems +// like missing cert-manager) but no kubectl apply is performed. +func (m *CertManager) Apply(dryRun bool) error { + core.Info("Checking cert-manager installation") + if err := checkCertManagerInstalledWithKubectl(m.kubectl); err != nil { + err := core.WrapWithSentinel(core.ErrCertManagerNotInstalled, err, "cert-manager not installed. Install it first:\n helm install cert-manager jetstack/cert-manager --namespace cert-manager --create-namespace --set crds.enabled=true") + core.Error("Cert-manager not installed") + core.LogStructuredError(m.logger, err, "Cert-manager not installed") + return err + } + core.Info("Checking CA secret") + if err := checkCASecretWithKubectl(m.kubectl); err != nil { + err := core.NewWithSentinel(core.ErrCASecretNotFound, fmt.Sprintf("CA secret %q not found in cert-manager namespace. Create it first:\n kubectl create secret tls %s --cert=ca.crt --key=ca.key -n %s", certCASecretName, certCASecretName, certManagerNamespace)) + core.Error("CA secret not found") + core.LogStructuredError(m.logger, err, "CA secret not found") + return err + } + + if dryRun { + core.Info("[dry-run] would apply ClusterIssuer") + core.Info(fmt.Sprintf("[dry-run] would ensure namespace %q exists", core.NamespaceRegistry)) + core.Info(fmt.Sprintf("[dry-run] would apply Certificate %q in namespace %q", registryCertificateName, core.NamespaceRegistry)) + core.Success("Dry-run complete; no resources applied") + return nil + } + + core.Info("Applying ClusterIssuer") + if err := applyClusterIssuerWithKubectl(m.kubectl); err != nil { + wrappedErr := core.WrapWithSentinel(core.ErrClusterIssuerApplyFailed, err, fmt.Sprintf("failed to apply ClusterIssuer: %v", err)) + core.Error("Failed to apply ClusterIssuer") + core.LogStructuredError(m.logger, wrappedErr, "Failed to apply ClusterIssuer") + return wrappedErr + } + if err := kube.EnsureNamespace(m.kubectl.CommandArgs, core.NamespaceRegistry); err != nil { + wrappedErr := core.WrapWithSentinelAndContext( + core.ErrCreateRegistryNamespaceFailed, + err, + fmt.Sprintf("failed to create registry namespace: %v", err), + map[string]any{"namespace": core.NamespaceRegistry, "component": "cert"}, + ) + core.Error("Failed to create registry namespace") + core.LogStructuredError(m.logger, wrappedErr, "Failed to create registry namespace") + return wrappedErr + } + core.Info("Applying Certificate for registry") + if err := applyRegistryCertificateWithKubectl(m.kubectl); err != nil { + wrappedErr := core.WrapWithSentinelAndContext( + core.ErrApplyCertificateFailed, + err, + fmt.Sprintf("failed to apply Certificate: %v", err), + map[string]any{"certificate": registryCertificateName, "namespace": core.NamespaceRegistry, "component": "cert"}, + ) + core.Error("Failed to apply Certificate") + core.LogStructuredError(m.logger, wrappedErr, "Failed to apply Certificate") + return wrappedErr + } + + core.Success("Cert-manager resources applied") + return nil +} + +// Wait blocks until the registry certificate is Ready or times out. +func (m *CertManager) Wait(timeout time.Duration) error { + core.Info(fmt.Sprintf("Waiting for certificate to be issued (timeout: %s)", timeout)) + if err := waitForCertificateReadyWithKubectl(m.kubectl, registryCertificateName, core.NamespaceRegistry, timeout); err != nil { + err := core.NewWithSentinel(core.ErrCertificateNotReady, fmt.Sprintf("certificate not ready after %s. Check cert-manager logs: kubectl logs -n cert-manager deployment/cert-manager", timeout)) + core.Error("Certificate not ready") + core.LogStructuredError(m.logger, err, "Certificate not ready") + return err + } + core.Success("Certificate issued successfully") + return nil +} + +func checkCertManagerInstalledWithKubectl(kubectl core.KubectlRunner) error { + // #nosec G204 -- fixed kubectl command to check CRD. + if err := kubectl.Run([]string{"get", "crd", core.CertManagerCRDName}); err != nil { + return core.ErrCertManagerNotInstalled + } + return nil +} + +func CheckCertManagerInstalledWithKubectl(kubectl core.KubectlRunner) error { + return checkCertManagerInstalledWithKubectl(kubectl) +} + +func checkCASecretWithKubectl(kubectl core.KubectlRunner) error { + // #nosec G204 -- fixed kubectl command to check secret. + if err := kubectl.Run([]string{"get", "secret", certCASecretName, "-n", certManagerNamespace}); err != nil { + return core.ErrCASecretNotFound + } + return nil +} + +func CheckCASecretWithKubectl(kubectl core.KubectlRunner) error { + return checkCASecretWithKubectl(kubectl) +} + +func checkClusterIssuerWithKubectl(kubectl core.KubectlRunner) error { + // #nosec G204 -- fixed kubectl command to check ClusterIssuer. + if err := kubectl.Run([]string{"get", "clusterissuer", certClusterIssuerName}); err != nil { + return core.WrapWithSentinel(core.ErrClusterIssuerNotFound, err, fmt.Sprintf("ClusterIssuer %q not found: %v", certClusterIssuerName, err)) + } + return nil +} + +func CheckClusterIssuerWithKubectl(kubectl core.KubectlRunner) error { + return checkClusterIssuerWithKubectl(kubectl) +} + +// checkNamedClusterIssuerWithKubectl verifies a cert-manager ClusterIssuer exists +// (e.g. a company-managed CA; setup does not apply it). +func checkNamedClusterIssuerWithKubectl(kubectl core.KubectlRunner, name string) error { + name = strings.TrimSpace(name) + if name == "" { + return core.NewWithSentinel(core.ErrClusterIssuerNotFound, "ClusterIssuer name is empty (set --tls-cluster-issuer or MCP_TLS_CLUSTER_ISSUER)") + } + // #nosec G204 -- issuer name is validated, fixed kubectl subresource. + if err := kubectl.Run([]string{"get", "clusterissuer", name}); err != nil { + return core.WrapWithSentinel(core.ErrClusterIssuerNotFound, err, fmt.Sprintf("ClusterIssuer %q not found. Install your org issuer first (cert-manager) or fix --tls-cluster-issuer / MCP_TLS_CLUSTER_ISSUER: %v", name, err)) + } + return nil +} + +func CheckNamedClusterIssuerWithKubectl(kubectl core.KubectlRunner, name string) error { + return checkNamedClusterIssuerWithKubectl(kubectl, name) +} + +func checkCertificateWithKubectl(kubectl core.KubectlRunner, name, namespace string) error { + // #nosec G204 -- fixed kubectl command to check certificate. + if err := kubectl.Run([]string{"get", "certificate", name, "-n", namespace}); err != nil { + return core.WrapWithSentinel(core.ErrRegistryCertificateNotFound, err, fmt.Sprintf("Certificate %q not found in namespace %q: %v", name, namespace, err)) + } + return nil +} + +func CheckCertificateWithKubectl(kubectl core.KubectlRunner, name, namespace string) error { + return checkCertificateWithKubectl(kubectl, name, namespace) +} + +func applyClusterIssuerWithKubectl(kubectl core.KubectlRunner) error { + // #nosec G204 -- fixed file path from repository. + return kubectl.RunWithOutput([]string{"apply", "-f", clusterIssuerManifestPath}, os.Stdout, os.Stderr) +} + +func ApplyClusterIssuerWithKubectl(kubectl core.KubectlRunner) error { + return applyClusterIssuerWithKubectl(kubectl) +} + +func applyRegistryCertificateWithKubectl(kubectl core.KubectlRunner) error { + content, err := os.ReadFile(registryCertificateManifestPath) + if err != nil { + return err + } + manifest := rewriteRegistryHost(string(content), core.GetRegistryIngressHost()) + return kube.ApplyManifestContentWithNamespace(kubectl.CommandArgs, manifest, core.NamespaceRegistry) +} + +func ApplyRegistryCertificateWithKubectl(kubectl core.KubectlRunner) error { + return applyRegistryCertificateWithKubectl(kubectl) +} + +func rewriteRegistryHost(manifest, host string) string { + host = strings.TrimSpace(host) + if host == "" || host == "registry.local" { + return manifest + } + return strings.ReplaceAll(manifest, "registry.local", host) +} + +func waitForCertificateReadyWithKubectl(kubectl core.KubectlRunner, name, namespace string, timeout time.Duration) error { + // #nosec G204 -- command arguments are built from trusted inputs and fixed verbs. + return kubectl.RunWithOutput([]string{ + "wait", "--for=condition=Ready", + "certificate/" + name, "-n", namespace, + fmt.Sprintf("--timeout=%s", timeout), + }, os.Stdout, os.Stderr) +} + +func WaitForCertificateReadyWithKubectl(kubectl core.KubectlRunner, name, namespace string, timeout time.Duration) error { + return waitForCertificateReadyWithKubectl(kubectl, name, namespace, timeout) +} diff --git a/internal/cli/cert_test.go b/internal/cli/certmanager/manager_test.go similarity index 59% rename from internal/cli/cert_test.go rename to internal/cli/certmanager/manager_test.go index 90b5b65..1235cb3 100644 --- a/internal/cli/cert_test.go +++ b/internal/cli/certmanager/manager_test.go @@ -1,20 +1,23 @@ -package cli +package certmanager import ( "bytes" "errors" "io" "os" + "path/filepath" "strings" "testing" "time" "go.uber.org/zap" + + "mcp-runtime/internal/cli/core" ) func TestCheckCertManagerInstalledWithKubectl(t *testing.T) { - mock := &MockExecutor{} - kubectl := &KubectlClient{exec: mock, validators: nil} + mock := &core.MockExecutor{} + kubectl := core.NewTestKubectlClient(mock) if err := checkCertManagerInstalledWithKubectl(kubectl); err != nil { t.Fatalf("unexpected error: %v", err) @@ -22,23 +25,23 @@ func TestCheckCertManagerInstalledWithKubectl(t *testing.T) { if len(mock.Commands) != 1 { t.Fatalf("expected 1 kubectl command, got %d", len(mock.Commands)) } - if !commandHasArgs(mock.Commands[0], "get", "crd", CertManagerCRDName) { + if !commandHasArgs(mock.Commands[0], "get", "crd", core.CertManagerCRDName) { t.Fatalf("unexpected args: %v", mock.Commands[0].Args) } } func TestCheckCertManagerInstalledWithKubectlError(t *testing.T) { - mock := &MockExecutor{DefaultRunErr: errors.New("missing")} - kubectl := &KubectlClient{exec: mock, validators: nil} + mock := &core.MockExecutor{DefaultRunErr: errors.New("missing")} + kubectl := core.NewTestKubectlClient(mock) - if err := checkCertManagerInstalledWithKubectl(kubectl); !errors.Is(err, ErrCertManagerNotInstalled) { - t.Fatalf("expected ErrCertManagerNotInstalled, got %v", err) + if err := checkCertManagerInstalledWithKubectl(kubectl); !errors.Is(err, core.ErrCertManagerNotInstalled) { + t.Fatalf("expected core.ErrCertManagerNotInstalled, got %v", err) } } func TestCheckCASecretWithKubectl(t *testing.T) { - mock := &MockExecutor{} - kubectl := &KubectlClient{exec: mock, validators: nil} + mock := &core.MockExecutor{} + kubectl := core.NewTestKubectlClient(mock) if err := checkCASecretWithKubectl(kubectl); err != nil { t.Fatalf("unexpected error: %v", err) @@ -52,17 +55,17 @@ func TestCheckCASecretWithKubectl(t *testing.T) { } func TestCheckCASecretWithKubectlError(t *testing.T) { - mock := &MockExecutor{DefaultRunErr: errors.New("missing")} - kubectl := &KubectlClient{exec: mock, validators: nil} + mock := &core.MockExecutor{DefaultRunErr: errors.New("missing")} + kubectl := core.NewTestKubectlClient(mock) - if err := checkCASecretWithKubectl(kubectl); !errors.Is(err, ErrCASecretNotFound) { - t.Fatalf("expected ErrCASecretNotFound, got %v", err) + if err := checkCASecretWithKubectl(kubectl); !errors.Is(err, core.ErrCASecretNotFound) { + t.Fatalf("expected core.ErrCASecretNotFound, got %v", err) } } func TestApplyClusterIssuerWithKubectl(t *testing.T) { - mock := &MockExecutor{} - kubectl := &KubectlClient{exec: mock, validators: nil} + mock := &core.MockExecutor{} + kubectl := core.NewTestKubectlClient(mock) if err := applyClusterIssuerWithKubectl(kubectl); err != nil { t.Fatalf("unexpected error: %v", err) @@ -76,9 +79,9 @@ func TestApplyClusterIssuerWithKubectl(t *testing.T) { } func TestApplyRegistryCertificateWithKubectl(t *testing.T) { - orig := DefaultCLIConfig - t.Cleanup(func() { DefaultCLIConfig = orig }) - DefaultCLIConfig = &CLIConfig{RegistryEndpoint: "10.43.39.164:5000", RegistryIngressHost: "registry.prod.example.com"} + orig := core.DefaultCLIConfig + t.Cleanup(func() { core.DefaultCLIConfig = orig }) + core.DefaultCLIConfig = &core.CLIConfig{RegistryEndpoint: "10.43.39.164:5000", RegistryIngressHost: "registry.prod.example.com"} root := repoRootForTest(t) origDir, err := os.Getwd() @@ -92,17 +95,17 @@ func TestApplyRegistryCertificateWithKubectl(t *testing.T) { _ = os.Chdir(origDir) }) - var applyCmd *MockCommand - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - cmd := &MockCommand{Args: spec.Args} + var applyCmd *core.MockCommand + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + cmd := &core.MockCommand{Args: spec.Args} if commandHasArgs(spec, "apply", "-f", "-") { applyCmd = cmd } return cmd }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) if err := applyRegistryCertificateWithKubectl(kubectl); err != nil { t.Fatalf("unexpected error: %v", err) @@ -123,24 +126,24 @@ func TestApplyRegistryCertificateWithKubectl(t *testing.T) { } func TestWaitForCertificateReadyWithKubectl(t *testing.T) { - mock := &MockExecutor{} - kubectl := &KubectlClient{exec: mock, validators: nil} + mock := &core.MockExecutor{} + kubectl := core.NewTestKubectlClient(mock) timeout := 15 * time.Second - if err := waitForCertificateReadyWithKubectl(kubectl, registryCertificateName, NamespaceRegistry, timeout); err != nil { + if err := waitForCertificateReadyWithKubectl(kubectl, registryCertificateName, core.NamespaceRegistry, timeout); err != nil { t.Fatalf("unexpected error: %v", err) } if len(mock.Commands) != 1 { t.Fatalf("expected 1 kubectl command, got %d", len(mock.Commands)) } - if !commandHasArgs(mock.Commands[0], "wait", "--for=condition=Ready", "certificate/"+registryCertificateName, "-n", NamespaceRegistry, "--timeout=15s") { + if !commandHasArgs(mock.Commands[0], "wait", "--for=condition=Ready", "certificate/"+registryCertificateName, "-n", core.NamespaceRegistry, "--timeout=15s") { t.Fatalf("unexpected args: %v", mock.Commands[0].Args) } } func TestCertManagerStatus(t *testing.T) { - mock := &MockExecutor{} - kubectl := &KubectlClient{exec: mock, validators: nil} + mock := &core.MockExecutor{} + kubectl := core.NewTestKubectlClient(mock) manager := NewCertManager(kubectl, zap.NewNop()) if err := manager.Status(); err != nil { @@ -152,16 +155,16 @@ func TestCertManagerStatus(t *testing.T) { } func TestCertManagerStatusMissingCertificate(t *testing.T) { - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - cmd := &MockCommand{Args: spec.Args} - if commandHasArgs(spec, "get", "certificate", registryCertificateName, "-n", NamespaceRegistry) { + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + cmd := &core.MockCommand{Args: spec.Args} + if commandHasArgs(spec, "get", "certificate", registryCertificateName, "-n", core.NamespaceRegistry) { cmd.RunErr = errors.New("missing cert") } return cmd }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) manager := NewCertManager(kubectl, zap.NewNop()) if err := manager.Status(); err == nil { @@ -170,99 +173,91 @@ func TestCertManagerStatusMissingCertificate(t *testing.T) { } func TestCertManagerApplyMissingCASecret(t *testing.T) { - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - cmd := &MockCommand{Args: spec.Args} + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + cmd := &core.MockCommand{Args: spec.Args} if commandHasArgs(spec, "get", "secret", certCASecretName, "-n", certManagerNamespace) { cmd.RunErr = errors.New("missing secret") } return cmd }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) manager := NewCertManager(kubectl, zap.NewNop()) - if err := manager.Apply(); err == nil { + if err := manager.Apply(false); err == nil { t.Fatal("expected error") } } func TestCertManagerApplyClusterIssuerError(t *testing.T) { - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - cmd := &MockCommand{Args: spec.Args} + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + cmd := &core.MockCommand{Args: spec.Args} if commandHasArgs(spec, "apply", "-f", clusterIssuerManifestPath) { cmd.RunErr = errors.New("apply issuer failed") } return cmd }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) manager := NewCertManager(kubectl, zap.NewNop()) - if err := manager.Apply(); err == nil { + if err := manager.Apply(false); err == nil { t.Fatal("expected error") } } func TestCertManagerApplyEnsureNamespaceError(t *testing.T) { - origKubectl := kubectlClient - t.Cleanup(func() { kubectlClient = origKubectl }) - - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - cmd := &MockCommand{Args: spec.Args} + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + cmd := &core.MockCommand{Args: spec.Args} if commandHasArgs(spec, "apply", "-f", "-") { cmd.RunErr = errors.New("apply namespace failed") } return cmd }, } - kubectl := &KubectlClient{exec: mock, validators: nil} - kubectlClient = kubectl + kubectl := core.NewTestKubectlClient(mock) manager := NewCertManager(kubectl, zap.NewNop()) - if err := manager.Apply(); err == nil { + if err := manager.Apply(false); err == nil { t.Fatal("expected error") } } func TestCertManagerApplyRegistryCertificateError(t *testing.T) { - origKubectl := kubectlClient - t.Cleanup(func() { kubectlClient = origKubectl }) - // The registry certificate is applied via `kubectl apply -f - -n registry` with the // manifest content piped over stdin, so match on those args rather than on the // on-disk manifest path. - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - cmd := &MockCommand{Args: spec.Args} - if commandHasArgs(spec, "apply", "-f", "-", "-n", NamespaceRegistry) { + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + cmd := &core.MockCommand{Args: spec.Args} + if commandHasArgs(spec, "apply", "-f", "-", "-n", core.NamespaceRegistry) { cmd.RunErr = errors.New("apply cert failed") } return cmd }, } - kubectl := &KubectlClient{exec: mock, validators: nil} - kubectlClient = kubectl + kubectl := core.NewTestKubectlClient(mock) manager := NewCertManager(kubectl, zap.NewNop()) - if err := manager.Apply(); err == nil { + if err := manager.Apply(false); err == nil { t.Fatal("expected error") } } func TestCertManagerWaitFailure(t *testing.T) { - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - cmd := &MockCommand{Args: spec.Args} - if commandHasArgs(spec, "wait", "--for=condition=Ready", "certificate/"+registryCertificateName, "-n", NamespaceRegistry) { + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + cmd := &core.MockCommand{Args: spec.Args} + if commandHasArgs(spec, "wait", "--for=condition=Ready", "certificate/"+registryCertificateName, "-n", core.NamespaceRegistry) { cmd.RunErr = errors.New("wait failed") } return cmd }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) manager := NewCertManager(kubectl, zap.NewNop()) if err := manager.Wait(time.Second); err == nil { @@ -271,16 +266,16 @@ func TestCertManagerWaitFailure(t *testing.T) { } func TestCertManagerStatusMissingCertManager(t *testing.T) { - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - cmd := &MockCommand{Args: spec.Args} - if commandHasArgs(spec, "get", "crd", CertManagerCRDName) { + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + cmd := &core.MockCommand{Args: spec.Args} + if commandHasArgs(spec, "get", "crd", core.CertManagerCRDName) { cmd.RunErr = errors.New("not found") } return cmd }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) manager := NewCertManager(kubectl, zap.NewNop()) var buf bytes.Buffer @@ -292,16 +287,16 @@ func TestCertManagerStatusMissingCertManager(t *testing.T) { } func TestCertManagerStatusMissingCASecret(t *testing.T) { - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - cmd := &MockCommand{Args: spec.Args} + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + cmd := &core.MockCommand{Args: spec.Args} if commandHasArgs(spec, "get", "secret", certCASecretName, "-n", certManagerNamespace) { cmd.RunErr = errors.New("not found") } return cmd }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) manager := NewCertManager(kubectl, zap.NewNop()) var buf bytes.Buffer @@ -313,16 +308,16 @@ func TestCertManagerStatusMissingCASecret(t *testing.T) { } func TestCertManagerStatusMissingClusterIssuer(t *testing.T) { - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - cmd := &MockCommand{Args: spec.Args} + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + cmd := &core.MockCommand{Args: spec.Args} if commandHasArgs(spec, "get", "clusterissuer", certClusterIssuerName) { cmd.RunErr = errors.New("not found") } return cmd }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) manager := NewCertManager(kubectl, zap.NewNop()) var buf bytes.Buffer @@ -334,29 +329,29 @@ func TestCertManagerStatusMissingClusterIssuer(t *testing.T) { } func TestCertManagerApplyMissingCertManager(t *testing.T) { - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - cmd := &MockCommand{Args: spec.Args} - if commandHasArgs(spec, "get", "crd", CertManagerCRDName) { + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + cmd := &core.MockCommand{Args: spec.Args} + if commandHasArgs(spec, "get", "crd", core.CertManagerCRDName) { cmd.RunErr = errors.New("not found") } return cmd }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) manager := NewCertManager(kubectl, zap.NewNop()) var buf bytes.Buffer setDefaultPrinterWriter(t, &buf) - if err := manager.Apply(); err == nil { + if err := manager.Apply(false); err == nil { t.Fatal("expected error when cert-manager not installed") } } func TestCheckClusterIssuerWithKubectlSuccess(t *testing.T) { - mock := &MockExecutor{} - kubectl := &KubectlClient{exec: mock, validators: nil} + mock := &core.MockExecutor{} + kubectl := core.NewTestKubectlClient(mock) if err := checkClusterIssuerWithKubectl(kubectl); err != nil { t.Fatalf("unexpected error: %v", err) @@ -370,8 +365,8 @@ func TestCheckClusterIssuerWithKubectlSuccess(t *testing.T) { } func TestCheckClusterIssuerWithKubectlError(t *testing.T) { - mock := &MockExecutor{DefaultRunErr: errors.New("not found")} - kubectl := &KubectlClient{exec: mock, validators: nil} + mock := &core.MockExecutor{DefaultRunErr: errors.New("not found")} + kubectl := core.NewTestKubectlClient(mock) if err := checkClusterIssuerWithKubectl(kubectl); err == nil { t.Fatal("expected error when cluster issuer not found") @@ -379,8 +374,8 @@ func TestCheckClusterIssuerWithKubectlError(t *testing.T) { } func TestCheckNamedClusterIssuerWithKubectl(t *testing.T) { - mock := &MockExecutor{} - kubectl := &KubectlClient{exec: mock, validators: nil} + mock := &core.MockExecutor{} + kubectl := core.NewTestKubectlClient(mock) if err := checkNamedClusterIssuerWithKubectl(kubectl, " company-ca "); err != nil { t.Fatalf("unexpected error: %v", err) } @@ -390,27 +385,27 @@ func TestCheckNamedClusterIssuerWithKubectl(t *testing.T) { } func TestCheckNamedClusterIssuerWithKubectlError(t *testing.T) { - mock := &MockExecutor{DefaultRunErr: errors.New("not found")} - kubectl := &KubectlClient{exec: mock, validators: nil} + mock := &core.MockExecutor{DefaultRunErr: errors.New("not found")} + kubectl := core.NewTestKubectlClient(mock) if err := checkNamedClusterIssuerWithKubectl(kubectl, "missing"); err == nil { t.Fatal("expected error") } } func TestCheckNamedClusterIssuerWithKubectlEmptyName(t *testing.T) { - kubectl := &KubectlClient{exec: &MockExecutor{}, validators: nil} + kubectl := core.NewTestKubectlClient(&core.MockExecutor{}) err := checkNamedClusterIssuerWithKubectl(kubectl, " ") if err == nil { t.Fatal("expected error for empty name") } - if !errors.Is(err, ErrClusterIssuerNotFound) { - t.Fatalf("expected ErrClusterIssuerNotFound, got %v", err) + if !errors.Is(err, core.ErrClusterIssuerNotFound) { + t.Fatalf("expected core.ErrClusterIssuerNotFound, got %v", err) } } func TestCheckCertificateWithKubectlError(t *testing.T) { - mock := &MockExecutor{DefaultRunErr: errors.New("not found")} - kubectl := &KubectlClient{exec: mock, validators: nil} + mock := &core.MockExecutor{DefaultRunErr: errors.New("not found")} + kubectl := core.NewTestKubectlClient(mock) if err := checkCertificateWithKubectl(kubectl, "test-cert", "test-ns"); err == nil { t.Fatal("expected error when certificate not found") @@ -418,8 +413,8 @@ func TestCheckCertificateWithKubectlError(t *testing.T) { } func TestApplyClusterIssuerWithKubectlError(t *testing.T) { - mock := &MockExecutor{DefaultRunErr: errors.New("apply failed")} - kubectl := &KubectlClient{exec: mock, validators: nil} + mock := &core.MockExecutor{DefaultRunErr: errors.New("apply failed")} + kubectl := core.NewTestKubectlClient(mock) if err := applyClusterIssuerWithKubectl(kubectl); err == nil { t.Fatal("expected error when apply fails") @@ -427,8 +422,8 @@ func TestApplyClusterIssuerWithKubectlError(t *testing.T) { } func TestApplyRegistryCertificateWithKubectlError(t *testing.T) { - mock := &MockExecutor{DefaultRunErr: errors.New("apply failed")} - kubectl := &KubectlClient{exec: mock, validators: nil} + mock := &core.MockExecutor{DefaultRunErr: errors.New("apply failed")} + kubectl := core.NewTestKubectlClient(mock) if err := applyRegistryCertificateWithKubectl(kubectl); err == nil { t.Fatal("expected error when apply fails") @@ -436,10 +431,59 @@ func TestApplyRegistryCertificateWithKubectlError(t *testing.T) { } func TestWaitForCertificateReadyWithKubectlError(t *testing.T) { - mock := &MockExecutor{DefaultRunErr: errors.New("timeout")} - kubectl := &KubectlClient{exec: mock, validators: nil} + mock := &core.MockExecutor{DefaultRunErr: errors.New("timeout")} + kubectl := core.NewTestKubectlClient(mock) if err := waitForCertificateReadyWithKubectl(kubectl, "test-cert", "test-ns", time.Second); err == nil { t.Fatal("expected error when wait times out") } } + +func commandHasArgs(cmd core.ExecSpec, args ...string) bool { + if len(args) == 0 { + return true + } + for i := 0; i <= len(cmd.Args)-len(args); i++ { + matches := true + for j, arg := range args { + if cmd.Args[i+j] != arg { + matches = false + break + } + } + if matches { + return true + } + } + return false +} + +func repoRootForTest(t *testing.T) string { + t.Helper() + dir, err := os.Getwd() + if err != nil { + t.Fatalf("get working dir: %v", err) + } + for { + if _, err := os.Stat(filepath.Join(dir, "go.mod")); err == nil { + return dir + } + parent := filepath.Dir(dir) + if parent == dir { + t.Fatal("repo root not found") + } + dir = parent + } +} + +func setDefaultPrinterWriter(t *testing.T, w *bytes.Buffer) { + t.Helper() + prevWriter := core.DefaultPrinter.Writer + prevQuiet := core.DefaultPrinter.Quiet + core.DefaultPrinter.Writer = w + core.DefaultPrinter.Quiet = false + t.Cleanup(func() { + core.DefaultPrinter.Writer = prevWriter + core.DefaultPrinter.Quiet = prevQuiet + }) +} diff --git a/internal/cli/cluster/cert.go b/internal/cli/cluster/cert.go index 2f8af9e..87616b0 100644 --- a/internal/cli/cluster/cert.go +++ b/internal/cli/cluster/cert.go @@ -5,11 +5,12 @@ import ( "github.com/spf13/cobra" - "mcp-runtime/internal/cli" + "mcp-runtime/internal/cli/certmanager" + "mcp-runtime/internal/cli/core" ) -func newClusterCertCmd(mgr *cli.ClusterManager) *cobra.Command { - certMgr := cli.NewCertManager(mgr.KubectlRunner(), mgr.Logger()) +func newClusterCertCmd(mgr *ClusterManager) *cobra.Command { + certMgr := certmanager.NewCertManager(mgr.KubectlRunner(), mgr.Logger()) cmd := &cobra.Command{ Use: "cert", Short: "Manage cert-manager resources", @@ -23,7 +24,7 @@ func newClusterCertCmd(mgr *cli.ClusterManager) *cobra.Command { return cmd } -func certMgrStatusCmd(mgr *cli.CertManager) *cobra.Command { +func certMgrStatusCmd(mgr *certmanager.CertManager) *cobra.Command { return &cobra.Command{ Use: "status", Short: "Check cert-manager resources", @@ -34,18 +35,21 @@ func certMgrStatusCmd(mgr *cli.CertManager) *cobra.Command { } } -func certMgrApplyCmd(mgr *cli.CertManager) *cobra.Command { - return &cobra.Command{ +func certMgrApplyCmd(mgr *certmanager.CertManager) *cobra.Command { + var dryRun bool + cmd := &cobra.Command{ Use: "apply", Short: "Apply cert-manager resources", Long: "Apply ClusterIssuer and registry Certificate manifests", RunE: func(cmd *cobra.Command, args []string) error { - return mgr.Apply() + return mgr.Apply(dryRun) }, } + cmd.Flags().BoolVar(&dryRun, "dry-run", false, "Run preflight checks and print the resources that would be applied without modifying the cluster") + return cmd } -func certMgrWaitCmd(mgr *cli.CertManager) *cobra.Command { +func certMgrWaitCmd(mgr *certmanager.CertManager) *cobra.Command { var timeoutDuration time.Duration cmd := &cobra.Command{ Use: "wait", @@ -53,7 +57,7 @@ func certMgrWaitCmd(mgr *cli.CertManager) *cobra.Command { Long: "Wait for the registry certificate to reach Ready state", RunE: func(cmd *cobra.Command, args []string) error { if timeoutDuration == 0 { - timeoutDuration = cli.GetCertTimeout() + timeoutDuration = core.GetCertTimeout() } return mgr.Wait(timeoutDuration) }, diff --git a/internal/cli/cluster/cluster.go b/internal/cli/cluster/cluster.go index e1025dd..8de2b0a 100644 --- a/internal/cli/cluster/cluster.go +++ b/internal/cli/cluster/cluster.go @@ -4,16 +4,16 @@ package cluster import ( "github.com/spf13/cobra" - "mcp-runtime/internal/cli" + "mcp-runtime/internal/cli/core" ) // New returns the cluster command. -func New(runtime *cli.Runtime) *cobra.Command { - return NewWithManager(runtime.ClusterManager()) +func New(runtime *core.Runtime) *cobra.Command { + return NewWithManager(NewClusterManager(runtime.KubectlClient(), runtime.Executor(), runtime.Logger())) } // NewWithManager returns the cluster command using the provided manager. -func NewWithManager(mgr *cli.ClusterManager) *cobra.Command { +func NewWithManager(mgr *ClusterManager) *cobra.Command { cmd := &cobra.Command{ Use: "cluster", Short: "Manage Kubernetes cluster", @@ -87,18 +87,20 @@ func NewWithManager(mgr *cli.ClusterManager) *cobra.Command { var provisionRegion string var nodeCount int var provisionClusterName string + var provisionDryRun bool provisionCmd := &cobra.Command{ Use: "provision", Short: "Provision a new cluster", Long: "Provision a new Kubernetes cluster (requires cloud provider credentials)", RunE: func(cmd *cobra.Command, args []string) error { - return mgr.ProvisionCluster(provisionProvider, provisionRegion, nodeCount, provisionClusterName) + return mgr.ProvisionCluster(provisionProvider, provisionRegion, nodeCount, provisionClusterName, provisionDryRun) }, } provisionCmd.Flags().StringVar(&provisionProvider, "provider", "kind", "Cloud provider (kind, gke, eks, aks)") provisionCmd.Flags().StringVar(&provisionRegion, "region", "us-west-1", "Region for cluster") provisionCmd.Flags().IntVar(&nodeCount, "nodes", 3, "Number of nodes") provisionCmd.Flags().StringVar(&provisionClusterName, "name", "mcp-runtime", "Cluster name (used by supported providers)") + provisionCmd.Flags().BoolVar(&provisionDryRun, "dry-run", false, "Print the cluster config and command that would run without creating any cluster") cmd.AddCommand(initCmd) cmd.AddCommand(statusCmd) diff --git a/internal/cli/cluster_test.go b/internal/cli/cluster/cluster_test.go similarity index 85% rename from internal/cli/cluster_test.go rename to internal/cli/cluster/cluster_test.go index 37ec45c..e20fe20 100644 --- a/internal/cli/cluster_test.go +++ b/internal/cli/cluster/cluster_test.go @@ -1,4 +1,4 @@ -package cli +package cluster import ( "errors" @@ -9,14 +9,25 @@ import ( "github.com/spf13/cobra" "go.uber.org/zap" + + "mcp-runtime/internal/cli/core" ) +func contains(slice []string, val string) bool { + for _, s := range slice { + if s == val { + return true + } + } + return false +} + func TestClusterManager_CheckClusterStatus(t *testing.T) { t.Run("calls kubectl cluster-info", func(t *testing.T) { - mock := &MockExecutor{ + mock := &core.MockExecutor{ DefaultOutput: []byte("Kubernetes control plane is running"), } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) mgr := NewClusterManager(kubectl, mock, zap.NewNop()) err := mgr.CheckClusterStatus() @@ -43,9 +54,9 @@ func TestClusterManager_CheckClusterStatus(t *testing.T) { } func TestClusterConfigRunE_WithProviderAndContext(t *testing.T) { - mockExec := &MockExecutor{} - mockKubectl := &MockExecutor{} - kubectl := &KubectlClient{exec: mockKubectl, validators: nil} + mockExec := &core.MockExecutor{} + mockKubectl := &core.MockExecutor{} + kubectl := core.NewTestKubectlClient(mockKubectl) mgr := NewClusterManager(kubectl, mockExec, zap.NewNop()) configCmd := findClusterSubcommand(t, newTestClusterCommand(mgr), "config") @@ -98,9 +109,9 @@ func TestClusterConfigRunE_WithProviderAndContext(t *testing.T) { } func TestClusterConfigRunE_UnsupportedProvider(t *testing.T) { - mockExec := &MockExecutor{} - mockKubectl := &MockExecutor{} - kubectl := &KubectlClient{exec: mockKubectl, validators: nil} + mockExec := &core.MockExecutor{} + mockKubectl := &core.MockExecutor{} + kubectl := core.NewTestKubectlClient(mockKubectl) mgr := NewClusterManager(kubectl, mockExec, zap.NewNop()) configCmd := findClusterSubcommand(t, newTestClusterCommand(mgr), "config") @@ -173,7 +184,7 @@ func newTestClusterCommand(mgr *ClusterManager) *cobra.Command { return cmd } -func hasCommand(cmds []ExecSpec, name string, args ...string) bool { +func hasCommand(cmds []core.ExecSpec, name string, args ...string) bool { for _, cmd := range cmds { if cmd.Name != name { continue @@ -196,8 +207,8 @@ func containsAll(slice []string, vals []string) bool { func TestClusterManager_EnsureNamespace(t *testing.T) { t.Run("calls kubectl apply with namespace yaml via stdin", func(t *testing.T) { - mock := &MockExecutor{} - kubectl := &KubectlClient{exec: mock, validators: nil} + mock := &core.MockExecutor{} + kubectl := core.NewTestKubectlClient(mock) mgr := NewClusterManager(kubectl, mock, zap.NewNop()) err := mgr.EnsureNamespace("test-ns") @@ -224,10 +235,10 @@ func TestClusterManager_InitCluster(t *testing.T) { t.Fatalf("failed to write kubeconfig: %v", err) } - mock := &MockExecutor{ + mock := &core.MockExecutor{ DefaultOutput: []byte("Switched to context"), } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) mgr := NewClusterManager(kubectl, mock, zap.NewNop()) err := mgr.InitCluster(kubeconfig, "my-context") @@ -257,10 +268,10 @@ func TestClusterManager_ConfigureKubeconfig(t *testing.T) { t.Fatalf("failed to write kubeconfig: %v", err) } - mock := &MockExecutor{ + mock := &core.MockExecutor{ DefaultOutput: []byte("Switched to context"), } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) mgr := NewClusterManager(kubectl, mock, zap.NewNop()) previous := os.Getenv("KUBECONFIG") @@ -301,8 +312,8 @@ func TestClusterManager_ConfigureKubeconfig(t *testing.T) { t.Fatalf("failed to write kubeconfig: %v", err) } - mock := &MockExecutor{} - kubectl := &KubectlClient{exec: mock, validators: nil} + mock := &core.MockExecutor{} + kubectl := core.NewTestKubectlClient(mock) mgr := NewClusterManager(kubectl, mock, zap.NewNop()) previous := os.Getenv("KUBECONFIG") @@ -322,8 +333,8 @@ func TestClusterManager_ConfigureKubeconfig(t *testing.T) { }) t.Run("errors when kubeconfig is missing", func(t *testing.T) { - mock := &MockExecutor{} - kubectl := &KubectlClient{exec: mock, validators: nil} + mock := &core.MockExecutor{} + kubectl := core.NewTestKubectlClient(mock) mgr := NewClusterManager(kubectl, mock, zap.NewNop()) if err := mgr.ConfigureKubeconfig("/path/does/not/exist", ""); err == nil { @@ -334,8 +345,8 @@ func TestClusterManager_ConfigureKubeconfig(t *testing.T) { func TestClusterManager_ConfigureKubeconfigFromProvider(t *testing.T) { t.Run("dispatches to eks config", func(t *testing.T) { - mock := &MockExecutor{} - kubectl := &KubectlClient{exec: mock, validators: nil} + mock := &core.MockExecutor{} + kubectl := core.NewTestKubectlClient(mock) mgr := NewClusterManager(kubectl, mock, zap.NewNop()) err := mgr.ConfigureKubeconfigFromProvider("EKS", "us-west-2", "my-eks", "", "", "", "/tmp/kubeconfig") @@ -362,8 +373,8 @@ func TestClusterManager_ConfigureKubeconfigFromProvider(t *testing.T) { }) t.Run("errors on unsupported provider", func(t *testing.T) { - mock := &MockExecutor{} - kubectl := &KubectlClient{exec: mock, validators: nil} + mock := &core.MockExecutor{} + kubectl := core.NewTestKubectlClient(mock) mgr := NewClusterManager(kubectl, mock, zap.NewNop()) if err := mgr.ConfigureKubeconfigFromProvider("digitalocean", "us-west-2", "cluster", "", "", "", ""); err == nil { @@ -376,8 +387,8 @@ func TestClusterManager_ConfigureKubeconfigFromProvider(t *testing.T) { func TestProvisionEKSCluster(t *testing.T) { t.Run("uses eksctl with args", func(t *testing.T) { - mock := &MockExecutor{} - err := provisionEKSCluster(zap.NewNop(), mock, "us-west-2", 3, "my-eks") + mock := &core.MockExecutor{} + err := provisionEKSCluster(zap.NewNop(), mock, "us-west-2", 3, "my-eks", false) if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -401,8 +412,8 @@ func TestProvisionEKSCluster(t *testing.T) { }) t.Run("defaults cluster name when empty", func(t *testing.T) { - mock := &MockExecutor{} - err := provisionEKSCluster(zap.NewNop(), mock, "us-west-2", 2, "") + mock := &core.MockExecutor{} + err := provisionEKSCluster(zap.NewNop(), mock, "us-west-2", 2, "", false) if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -416,7 +427,7 @@ func TestProvisionEKSCluster(t *testing.T) { func TestConfigureEKSKubeconfig(t *testing.T) { t.Run("uses aws eks update-kubeconfig", func(t *testing.T) { - mock := &MockExecutor{} + mock := &core.MockExecutor{} err := configureEKSKubeconfig(mock, "us-west-2", "my-eks", "/tmp/kubeconfig") if err != nil { t.Fatalf("unexpected error: %v", err) @@ -441,7 +452,7 @@ func TestConfigureEKSKubeconfig(t *testing.T) { }) t.Run("defaults cluster name when empty", func(t *testing.T) { - mock := &MockExecutor{} + mock := &core.MockExecutor{} err := configureEKSKubeconfig(mock, "us-west-2", "", "") if err != nil { t.Fatalf("unexpected error: %v", err) @@ -454,7 +465,7 @@ func TestConfigureEKSKubeconfig(t *testing.T) { }) t.Run("returns error when aws command fails", func(t *testing.T) { - mock := &MockExecutor{DefaultRunErr: errors.New("aws failed")} + mock := &core.MockExecutor{DefaultRunErr: errors.New("aws failed")} err := configureEKSKubeconfig(mock, "us-west-2", "my-eks", "") if err == nil { t.Fatal("expected error when aws fails") @@ -464,11 +475,11 @@ func TestConfigureEKSKubeconfig(t *testing.T) { func TestProvisionCluster(t *testing.T) { t.Run("dispatches to kind", func(t *testing.T) { - mock := &MockExecutor{} - kubectl := &KubectlClient{exec: mock, validators: nil} + mock := &core.MockExecutor{} + kubectl := core.NewTestKubectlClient(mock) mgr := NewClusterManager(kubectl, mock, zap.NewNop()) - err := mgr.ProvisionCluster("kind", "us-west-2", 3, "test-cluster") + err := mgr.ProvisionCluster("kind", "us-west-2", 3, "test-cluster", false) if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -478,11 +489,11 @@ func TestProvisionCluster(t *testing.T) { }) t.Run("returns gke not implemented error", func(t *testing.T) { - mock := &MockExecutor{} - kubectl := &KubectlClient{exec: mock, validators: nil} + mock := &core.MockExecutor{} + kubectl := core.NewTestKubectlClient(mock) mgr := NewClusterManager(kubectl, mock, zap.NewNop()) - err := mgr.ProvisionCluster("gke", "us-west-2", 3, "test-cluster") + err := mgr.ProvisionCluster("gke", "us-west-2", 3, "test-cluster", false) if err == nil { t.Fatal("expected error for gke") } @@ -492,11 +503,11 @@ func TestProvisionCluster(t *testing.T) { }) t.Run("returns aks not implemented error", func(t *testing.T) { - mock := &MockExecutor{} - kubectl := &KubectlClient{exec: mock, validators: nil} + mock := &core.MockExecutor{} + kubectl := core.NewTestKubectlClient(mock) mgr := NewClusterManager(kubectl, mock, zap.NewNop()) - err := mgr.ProvisionCluster("aks", "us-west-2", 3, "test-cluster") + err := mgr.ProvisionCluster("aks", "us-west-2", 3, "test-cluster", false) if err == nil { t.Fatal("expected error for aks") } @@ -506,11 +517,11 @@ func TestProvisionCluster(t *testing.T) { }) t.Run("returns error for unsupported provider", func(t *testing.T) { - mock := &MockExecutor{} - kubectl := &KubectlClient{exec: mock, validators: nil} + mock := &core.MockExecutor{} + kubectl := core.NewTestKubectlClient(mock) mgr := NewClusterManager(kubectl, mock, zap.NewNop()) - err := mgr.ProvisionCluster("unknown", "us-west-2", 3, "test-cluster") + err := mgr.ProvisionCluster("unknown", "us-west-2", 3, "test-cluster", false) if err == nil { t.Fatal("expected error for unknown provider") } @@ -520,11 +531,11 @@ func TestProvisionCluster(t *testing.T) { }) t.Run("eks provisioning", func(t *testing.T) { - mock := &MockExecutor{} - kubectl := &KubectlClient{exec: mock, validators: nil} + mock := &core.MockExecutor{} + kubectl := core.NewTestKubectlClient(mock) mgr := NewClusterManager(kubectl, mock, zap.NewNop()) - err := mgr.ProvisionCluster("eks", "us-west-2", 3, "test-cluster") + err := mgr.ProvisionCluster("eks", "us-west-2", 3, "test-cluster", false) if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -536,11 +547,11 @@ func TestProvisionCluster(t *testing.T) { func TestProvisionKindCluster(t *testing.T) { t.Run("creates cluster with default name", func(t *testing.T) { - mock := &MockExecutor{} - kubectl := &KubectlClient{exec: mock, validators: nil} + mock := &core.MockExecutor{} + kubectl := core.NewTestKubectlClient(mock) mgr := NewClusterManager(kubectl, mock, zap.NewNop()) - err := mgr.provisionKindCluster(3, "") + err := mgr.provisionKindCluster(3, "", false) if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -555,11 +566,11 @@ func TestProvisionKindCluster(t *testing.T) { }) t.Run("creates cluster with custom name", func(t *testing.T) { - mock := &MockExecutor{} - kubectl := &KubectlClient{exec: mock, validators: nil} + mock := &core.MockExecutor{} + kubectl := core.NewTestKubectlClient(mock) mgr := NewClusterManager(kubectl, mock, zap.NewNop()) - err := mgr.provisionKindCluster(2, "my-cluster") + err := mgr.provisionKindCluster(2, "my-cluster", false) if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -571,11 +582,11 @@ func TestProvisionKindCluster(t *testing.T) { }) t.Run("returns error when kind fails", func(t *testing.T) { - mock := &MockExecutor{DefaultRunErr: errors.New("kind failed")} - kubectl := &KubectlClient{exec: mock, validators: nil} + mock := &core.MockExecutor{DefaultRunErr: errors.New("kind failed")} + kubectl := core.NewTestKubectlClient(mock) mgr := NewClusterManager(kubectl, mock, zap.NewNop()) - err := mgr.provisionKindCluster(1, "test") + err := mgr.provisionKindCluster(1, "test", false) if err == nil { t.Fatal("expected error when kind fails") } @@ -584,7 +595,7 @@ func TestProvisionKindCluster(t *testing.T) { func TestProvisionGKECluster(t *testing.T) { t.Run("defaults cluster name", func(t *testing.T) { - err := provisionGKECluster(zap.NewNop(), "us-west-2", 3, "") + err := provisionGKECluster(zap.NewNop(), "us-west-2", 3, "", false) if err == nil { t.Fatal("expected not implemented error") } @@ -596,7 +607,7 @@ func TestProvisionGKECluster(t *testing.T) { func TestProvisionAKSCluster(t *testing.T) { t.Run("defaults cluster name", func(t *testing.T) { - err := provisionAKSCluster(zap.NewNop(), "us-west-2", 3, "") + err := provisionAKSCluster(zap.NewNop(), "us-west-2", 3, "", false) if err == nil { t.Fatal("expected not implemented error") } @@ -607,18 +618,18 @@ func TestProvisionAKSCluster(t *testing.T) { } func TestProvisionEKSClusterError(t *testing.T) { - mock := &MockExecutor{DefaultRunErr: errors.New("eksctl failed")} - err := provisionEKSCluster(zap.NewNop(), mock, "us-west-2", 3, "test") + mock := &core.MockExecutor{DefaultRunErr: errors.New("eksctl failed")} + err := provisionEKSCluster(zap.NewNop(), mock, "us-west-2", 3, "test", false) if err == nil { t.Fatal("expected error when eksctl fails") } } func TestCheckClusterStatusError(t *testing.T) { - mock := &MockExecutor{ + mock := &core.MockExecutor{ DefaultErr: errors.New("cluster not accessible"), } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) mgr := NewClusterManager(kubectl, mock, zap.NewNop()) err := mgr.CheckClusterStatus() @@ -629,11 +640,11 @@ func TestCheckClusterStatusError(t *testing.T) { func TestConfigureCluster(t *testing.T) { t.Run("skips ingress when mode is none", func(t *testing.T) { - mock := &MockExecutor{} - kubectl := &KubectlClient{exec: mock, validators: nil} + mock := &core.MockExecutor{} + kubectl := core.NewTestKubectlClient(mock) mgr := NewClusterManager(kubectl, mock, zap.NewNop()) - err := mgr.ConfigureCluster(ingressOptions{mode: "none"}) + err := mgr.ConfigureCluster(IngressOptions{Mode: "none"}) if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -646,11 +657,11 @@ func TestConfigureCluster(t *testing.T) { }) t.Run("returns error for unsupported ingress", func(t *testing.T) { - mock := &MockExecutor{} - kubectl := &KubectlClient{exec: mock, validators: nil} + mock := &core.MockExecutor{} + kubectl := core.NewTestKubectlClient(mock) mgr := NewClusterManager(kubectl, mock, zap.NewNop()) - err := mgr.ConfigureCluster(ingressOptions{mode: "nginx"}) + err := mgr.ConfigureCluster(IngressOptions{Mode: "nginx"}) if err == nil { t.Fatal("expected error for unsupported ingress") } @@ -660,19 +671,19 @@ func TestConfigureCluster(t *testing.T) { }) t.Run("skips install when ingress already present", func(t *testing.T) { - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - cmd := &MockCommand{Args: spec.Args} + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + cmd := &core.MockCommand{Args: spec.Args} if contains(spec.Args, "ingressclass") { cmd.OutputData = []byte("ingressclass.networking.k8s.io/traefik\n") } return cmd }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) mgr := NewClusterManager(kubectl, mock, zap.NewNop()) - err := mgr.ConfigureCluster(ingressOptions{mode: "traefik"}) + err := mgr.ConfigureCluster(IngressOptions{Mode: "traefik"}) if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -691,19 +702,19 @@ func TestConfigureCluster(t *testing.T) { t.Fatal(err) } - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - cmd := &MockCommand{Args: spec.Args} + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + cmd := &core.MockCommand{Args: spec.Args} if contains(spec.Args, "ingressclass") { cmd.OutputData = []byte("ingressclass.networking.k8s.io/traefik\n") } return cmd }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) mgr := NewClusterManager(kubectl, mock, zap.NewNop()) - err := mgr.ConfigureCluster(ingressOptions{mode: "traefik", force: true, manifest: manifestPath}) + err := mgr.ConfigureCluster(IngressOptions{Mode: "traefik", Force: true, Manifest: manifestPath}) if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -730,11 +741,11 @@ func TestConfigureCluster(t *testing.T) { t.Fatal(err) } - mock := &MockExecutor{} - kubectl := &KubectlClient{exec: mock, validators: nil} + mock := &core.MockExecutor{} + kubectl := core.NewTestKubectlClient(mock) mgr := NewClusterManager(kubectl, mock, zap.NewNop()) - err := mgr.ConfigureCluster(ingressOptions{mode: "traefik", manifest: manifestDir}) + err := mgr.ConfigureCluster(IngressOptions{Mode: "traefik", Manifest: manifestDir}) if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -762,11 +773,11 @@ func TestConfigureCluster(t *testing.T) { t.Fatal(err) } - mock := &MockExecutor{} - kubectl := &KubectlClient{exec: mock, validators: nil} + mock := &core.MockExecutor{} + kubectl := core.NewTestKubectlClient(mock) mgr := NewClusterManager(kubectl, mock, zap.NewNop()) - err := mgr.ConfigureCluster(ingressOptions{mode: "traefik", manifest: kustomizePath}) + err := mgr.ConfigureCluster(IngressOptions{Mode: "traefik", Manifest: kustomizePath}) if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -790,19 +801,19 @@ func TestConfigureCluster(t *testing.T) { t.Fatal(err) } - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - cmd := &MockCommand{Args: spec.Args} + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + cmd := &core.MockCommand{Args: spec.Args} if contains(spec.Args, "apply") { cmd.RunErr = errors.New("apply failed") } return cmd }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) mgr := NewClusterManager(kubectl, mock, zap.NewNop()) - err := mgr.ConfigureCluster(ingressOptions{mode: "traefik", manifest: manifestPath}) + err := mgr.ConfigureCluster(IngressOptions{Mode: "traefik", Manifest: manifestPath}) if err == nil { t.Fatal("expected error when apply fails") } @@ -817,9 +828,9 @@ func TestInitClusterErrors(t *testing.T) { t.Fatal(err) } - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - cmd := &MockCommand{Args: spec.Args} + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + cmd := &core.MockCommand{Args: spec.Args} if contains(spec.Args, "apply") && contains(spec.Args, "-f") && contains(spec.Args, "config/crd/bases/mcpruntime.org_mcpservers.yaml") { @@ -828,7 +839,7 @@ func TestInitClusterErrors(t *testing.T) { return cmd }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) mgr := NewClusterManager(kubectl, mock, zap.NewNop()) err := mgr.InitCluster(kubeconfig, "") @@ -845,10 +856,10 @@ func TestInitClusterErrors(t *testing.T) { } callCount := 0 - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { callCount++ - cmd := &MockCommand{Args: spec.Args} + cmd := &core.MockCommand{Args: spec.Args} // Fail on second apply (namespace creation) if contains(spec.Args, "apply") && callCount > 1 { cmd.RunErr = errors.New("namespace creation failed") @@ -856,7 +867,7 @@ func TestInitClusterErrors(t *testing.T) { return cmd }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) mgr := NewClusterManager(kubectl, mock, zap.NewNop()) err := mgr.InitCluster(kubeconfig, "") @@ -867,8 +878,8 @@ func TestInitClusterErrors(t *testing.T) { } func TestEnsureNamespaceError(t *testing.T) { - mock := &MockExecutor{DefaultRunErr: errors.New("apply failed")} - kubectl := &KubectlClient{exec: mock, validators: nil} + mock := &core.MockExecutor{DefaultRunErr: errors.New("apply failed")} + kubectl := core.NewTestKubectlClient(mock) mgr := NewClusterManager(kubectl, mock, zap.NewNop()) err := mgr.EnsureNamespace("test-ns") @@ -878,8 +889,8 @@ func TestEnsureNamespaceError(t *testing.T) { } func TestConfigureKubeconfigFromProviderAKS(t *testing.T) { - mock := &MockExecutor{} - kubectl := &KubectlClient{exec: mock, validators: nil} + mock := &core.MockExecutor{} + kubectl := core.NewTestKubectlClient(mock) mgr := NewClusterManager(kubectl, mock, zap.NewNop()) err := mgr.ConfigureKubeconfigFromProvider("aks", "us-west-2", "cluster", "rg", "", "", "") @@ -892,8 +903,8 @@ func TestConfigureKubeconfigFromProviderAKS(t *testing.T) { } func TestConfigureKubeconfigFromProviderGKE(t *testing.T) { - mock := &MockExecutor{} - kubectl := &KubectlClient{exec: mock, validators: nil} + mock := &core.MockExecutor{} + kubectl := core.NewTestKubectlClient(mock) mgr := NewClusterManager(kubectl, mock, zap.NewNop()) err := mgr.ConfigureKubeconfigFromProvider("gke", "us-west-2", "cluster", "", "project", "zone", "") @@ -932,8 +943,8 @@ func TestResolveKubeconfigPath(t *testing.T) { } func TestConfigureClusterConfigCmdFlags(t *testing.T) { - mock := &MockExecutor{} - kubectl := &KubectlClient{exec: mock, validators: nil} + mock := &core.MockExecutor{} + kubectl := core.NewTestKubectlClient(mock) mgr := NewClusterManager(kubectl, mock, zap.NewNop()) cmd := findClusterSubcommand(t, newTestClusterCommand(mgr), "config") diff --git a/internal/cli/cluster/doctor.go b/internal/cli/cluster/doctor.go index 2606ecc..9cf015d 100644 --- a/internal/cli/cluster/doctor.go +++ b/internal/cli/cluster/doctor.go @@ -3,10 +3,10 @@ package cluster import ( "github.com/spf13/cobra" - "mcp-runtime/internal/cli" + "mcp-runtime/internal/cli/core" ) -func newClusterDoctorCmd(mgr *cli.ClusterManager) *cobra.Command { +func newClusterDoctorCmd(mgr *ClusterManager) *cobra.Command { return &cobra.Command{ Use: "doctor", Short: "Diagnose MCP Runtime cluster readiness and installed components", @@ -14,9 +14,9 @@ func newClusterDoctorCmd(mgr *cli.ClusterManager) *cobra.Command { "operator/CRD prerequisites, ingress (Traefik) wiring, image pulls, Sentinel, and MCPServer reconciliation are healthy. Prints remediation steps for your distribution " + "when something is missing. See docs/cluster-readiness.md for the full per-distribution checklist.", RunE: func(cmd *cobra.Command, args []string) error { - report := cli.RunDoctorAndPrint(mgr.KubectlRunner()) + report := RunDoctorAndPrint(mgr.KubectlRunner()) if !report.AllOK() { - return cli.NewSetupStepFailedError() + return core.NewSetupStepFailedError() } return nil }, diff --git a/internal/cli/cluster_doctor.go b/internal/cli/cluster/doctor_impl.go similarity index 87% rename from internal/cli/cluster_doctor.go rename to internal/cli/cluster/doctor_impl.go index 598acfe..6686f18 100644 --- a/internal/cli/cluster_doctor.go +++ b/internal/cli/cluster/doctor_impl.go @@ -1,10 +1,6 @@ -package cli - -// This file implements the "cluster doctor" diagnostics command. -// It detects the Kubernetes distribution, checks installed MCP Runtime -// components and registry image-pull health, and prints distribution-specific -// remediation when something is wrong. See docs/cluster-readiness.md for the -// full list of per-distribution prerequisites. +// Cluster doctor diagnostics: distribution detection, registry and Traefik +// checks, Sentinel probes, and remediation hints. See docs/cluster-readiness.md. +package cluster import ( "bufio" @@ -14,6 +10,8 @@ import ( "strconv" "strings" "time" + + "mcp-runtime/internal/cli/core" ) // Distribution identifies a Kubernetes flavor for remediation messaging. @@ -96,6 +94,19 @@ type doctorServicePort struct { NodePort string } +type doctorSmokeTarget struct { + Image string + Port int32 + Source string + WaitForReady bool +} + +type doctorIngressRoute struct { + Name string + Host string + Path string +} + // AllOK reports whether every check passed. func (r DoctorReport) AllOK() bool { for _, c := range r.Checks { @@ -107,31 +118,31 @@ func (r DoctorReport) AllOK() bool { } // RunDoctor executes cluster diagnostics and returns a report. -func RunDoctor(kubectl KubectlRunner) DoctorReport { +func RunDoctor(kubectl core.KubectlRunner) DoctorReport { distro := DetectDistribution(kubectl) return runDoctorChecks(kubectl, distro, nil) } // RunDoctorWithProgress executes cluster diagnostics and calls progress hooks // before and after each check. It is useful for UIs that need live feedback. -func RunDoctorWithProgress(kubectl KubectlRunner, progress DoctorCheckProgress) DoctorReport { +func RunDoctorWithProgress(kubectl core.KubectlRunner, progress DoctorCheckProgress) DoctorReport { distro := DetectDistribution(kubectl) return runDoctorChecks(kubectl, distro, progress) } // RunDoctorAndPrint streams doctor progress and results as checks execute. -func RunDoctorAndPrint(kubectl KubectlRunner) DoctorReport { - Section("Cluster Doctor") - Info("Detecting Kubernetes distribution — reading node kubelet versions, node names, and current context") +func RunDoctorAndPrint(kubectl core.KubectlRunner) DoctorReport { + core.Section("Cluster Doctor") + core.Info("Detecting Kubernetes distribution — reading node kubelet versions, node names, and current context") distro := DetectDistribution(kubectl) - Info(fmt.Sprintf("Distribution: %s", distro)) + core.Info(fmt.Sprintf("Distribution: %s", distro)) report := runDoctorChecks(kubectl, distro, printDoctorCheckProgress) printDoctorReportFooter(report) return report } -func runDoctorChecks(kubectl KubectlRunner, distro Distribution, progress DoctorCheckProgress) DoctorReport { +func runDoctorChecks(kubectl core.KubectlRunner, distro Distribution, progress DoctorCheckProgress) DoctorReport { specs := doctorCheckSpecs(kubectl, distro) checks := make([]DoctorCheck, 0, len(specs)) for i, spec := range specs { @@ -160,7 +171,7 @@ func runDoctorChecks(kubectl KubectlRunner, distro Distribution, progress Doctor } } -func doctorCheckSpecs(kubectl KubectlRunner, distro Distribution) []doctorCheckSpec { +func doctorCheckSpecs(kubectl core.KubectlRunner, distro Distribution) []doctorCheckSpec { return []doctorCheckSpec{ { Name: fmt.Sprintf("namespace %s", doctorMCPServersNamespace), @@ -222,7 +233,7 @@ func doctorCheckSpecs(kubectl KubectlRunner, distro Distribution) []doctorCheckS // DetectDistribution inspects node info to guess which distribution is running. // This is best-effort: callers should treat DistroGeneric as "probably kubeadm/unknown". -func DetectDistribution(kubectl KubectlRunner) Distribution { +func DetectDistribution(kubectl core.KubectlRunner) Distribution { cmd, err := kubectl.CommandArgs([]string{"get", "nodes", "-o", "jsonpath={.items[*].status.nodeInfo.kubeletVersion}"}) if err == nil { if out, err := cmd.Output(); err == nil { @@ -266,7 +277,7 @@ func DetectDistribution(kubectl KubectlRunner) Distribution { return DistroGeneric } -func checkRegistryService(kubectl KubectlRunner) DoctorCheck { +func checkRegistryService(kubectl core.KubectlRunner) DoctorCheck { cmd, err := kubectl.CommandArgs([]string{"get", "svc", "-n", "registry", "registry", "-o", "jsonpath={.spec.ports[0].nodePort}"}) if err != nil { return DoctorCheck{Name: "registry Service", OK: false, Detail: fmt.Sprintf("kubectl error: %v", err), Remedy: "run `./bin/mcp-runtime setup` to install the registry, or check cluster connectivity"} @@ -288,7 +299,7 @@ func checkRegistryService(kubectl KubectlRunner) DoctorCheck { } } -func checkNamespaceExists(kubectl KubectlRunner, namespace string) DoctorCheck { +func checkNamespaceExists(kubectl core.KubectlRunner, namespace string) DoctorCheck { cmd, err := kubectl.CommandArgs([]string{"get", "namespace", namespace, "-o", "jsonpath={.metadata.name}"}) if err != nil { return DoctorCheck{ @@ -315,7 +326,7 @@ func checkNamespaceExists(kubectl KubectlRunner, namespace string) DoctorCheck { } } -func checkNamespaceDefaultServiceAccount(kubectl KubectlRunner, namespace string) DoctorCheck { +func checkNamespaceDefaultServiceAccount(kubectl core.KubectlRunner, namespace string) DoctorCheck { cmd, err := kubectl.CommandArgs([]string{"get", "serviceaccount", "default", "-n", namespace, "-o", "jsonpath={.metadata.name}"}) if err != nil { return DoctorCheck{ @@ -342,7 +353,7 @@ func checkNamespaceDefaultServiceAccount(kubectl KubectlRunner, namespace string } } -func checkNamespacePolicyGuardrails(kubectl KubectlRunner, namespace string) DoctorCheck { +func checkNamespacePolicyGuardrails(kubectl core.KubectlRunner, namespace string) DoctorCheck { cmd, err := kubectl.CommandArgs([]string{"get", "resourcequota,limitrange", "-n", namespace, "--no-headers", "-o", "name"}) if err != nil { return DoctorCheck{ @@ -377,7 +388,7 @@ func checkNamespacePolicyGuardrails(kubectl KubectlRunner, namespace string) Doc } } -func checkNamespacePodAdmission(kubectl KubectlRunner, namespace string) DoctorCheck { +func checkNamespacePodAdmission(kubectl core.KubectlRunner, namespace string) DoctorCheck { podName := fmt.Sprintf("doctor-admission-%d", time.Now().UnixNano()) manifest := fmt.Sprintf(`apiVersion: v1 kind: Pod @@ -416,7 +427,7 @@ spec: } } -func checkMCPServerCRD(kubectl KubectlRunner) DoctorCheck { +func checkMCPServerCRD(kubectl core.KubectlRunner) DoctorCheck { crd := "mcpservers.mcpruntime.org" cmd, err := kubectl.CommandArgs([]string{"get", "crd", crd, "-o", "jsonpath={.metadata.name}"}) if err != nil { @@ -444,7 +455,7 @@ func checkMCPServerCRD(kubectl KubectlRunner) DoctorCheck { } } -func checkOperatorReady(kubectl KubectlRunner) DoctorCheck { +func checkOperatorReady(kubectl core.KubectlRunner) DoctorCheck { deployName := "mcp-runtime-operator-controller-manager" ns := "mcp-runtime" cmd, err := kubectl.CommandArgs([]string{"get", "deploy", "-n", ns, deployName, "-o", "jsonpath={.status.readyReplicas}/{.spec.replicas}"}) @@ -500,7 +511,7 @@ func checkOperatorReady(kubectl KubectlRunner) DoctorCheck { } } -func checkOperatorRecentReconcileErrors(kubectl KubectlRunner) DoctorCheck { +func checkOperatorRecentReconcileErrors(kubectl core.KubectlRunner) DoctorCheck { cmd, err := kubectl.CommandArgs([]string{"logs", "-n", "mcp-runtime", "deploy/mcp-runtime-operator-controller-manager", "--since=10m"}) if err != nil { return DoctorCheck{ @@ -554,7 +565,7 @@ func checkOperatorRecentReconcileErrors(kubectl KubectlRunner) DoctorCheck { } } -func checkTraefikIngressClass(kubectl KubectlRunner) DoctorCheck { +func checkTraefikIngressClass(kubectl core.KubectlRunner) DoctorCheck { cmd, err := kubectl.CommandArgs([]string{"get", "ingressclass", "traefik", "-o", "jsonpath={.metadata.name}"}) if err != nil { return DoctorCheck{ @@ -619,7 +630,7 @@ func traefikRemedy(distro Distribution) string { return "install Traefik deployment/service in namespace `traefik`, or run setup with the repo ingress overlay" } -func checkTraefikDeploymentReady(kubectl KubectlRunner, distro Distribution) DoctorCheck { +func checkTraefikDeploymentReady(kubectl core.KubectlRunner, distro Distribution) DoctorCheck { failures := make([]string, 0, len(doctorTraefikEndpoints(distro))) for _, endpoint := range doctorTraefikEndpoints(distro) { check := checkTraefikDeploymentReadyAt(kubectl, endpoint) @@ -636,7 +647,7 @@ func checkTraefikDeploymentReady(kubectl KubectlRunner, distro Distribution) Doc } } -func checkTraefikDeploymentReadyAt(kubectl KubectlRunner, endpoint doctorTraefikEndpoint) DoctorCheck { +func checkTraefikDeploymentReadyAt(kubectl core.KubectlRunner, endpoint doctorTraefikEndpoint) DoctorCheck { cmd, err := kubectl.CommandArgs([]string{"get", "deploy", "-n", endpoint.Namespace, endpoint.Name, "-o", "jsonpath={.status.readyReplicas}/{.spec.replicas}"}) if err != nil { return DoctorCheck{ @@ -678,7 +689,7 @@ func checkTraefikDeploymentReadyAt(kubectl KubectlRunner, endpoint doctorTraefik } } -func checkTraefikWebEntrypoint(kubectl KubectlRunner, distro Distribution) DoctorCheck { +func checkTraefikWebEntrypoint(kubectl core.KubectlRunner, distro Distribution) DoctorCheck { endpoint, ports, ok := resolveDoctorTraefikWebEndpoint(kubectl, distro) if ok { return DoctorCheck{ @@ -695,7 +706,7 @@ func checkTraefikWebEntrypoint(kubectl KubectlRunner, distro Distribution) Docto } } -func resolveDoctorTraefikWebEndpoint(kubectl KubectlRunner, distro Distribution) (doctorTraefikEndpoint, string, bool) { +func resolveDoctorTraefikWebEndpoint(kubectl core.KubectlRunner, distro Distribution) (doctorTraefikEndpoint, string, bool) { failures := make([]string, 0, len(doctorTraefikEndpoints(distro))) for _, endpoint := range doctorTraefikEndpoints(distro) { ports, err := readTraefikServicePorts(kubectl, endpoint) @@ -714,7 +725,7 @@ func resolveDoctorTraefikWebEndpoint(kubectl KubectlRunner, distro Distribution) return doctorTraefikEndpoint{}, strings.Join(failures, "; "), false } -func readTraefikServicePorts(kubectl KubectlRunner, endpoint doctorTraefikEndpoint) (string, error) { +func readTraefikServicePorts(kubectl core.KubectlRunner, endpoint doctorTraefikEndpoint) (string, error) { cmd, err := kubectl.CommandArgs([]string{"get", "svc", "-n", endpoint.Namespace, endpoint.Name, "-o", "jsonpath={range .spec.ports[*]}{.name}:{.port}:{.nodePort}{\"\\n\"}{end}"}) if err != nil { return "", fmt.Errorf("kubectl error: %v", err) @@ -726,7 +737,7 @@ func readTraefikServicePorts(kubectl KubectlRunner, endpoint doctorTraefikEndpoi return strings.TrimSpace(string(out)), nil } -func checkTraefikServiceExposure(kubectl KubectlRunner, distro Distribution) DoctorCheck { +func checkTraefikServiceExposure(kubectl core.KubectlRunner, distro Distribution) DoctorCheck { failures := make([]string, 0, len(doctorTraefikEndpoints(distro))) for _, endpoint := range doctorTraefikEndpoints(distro) { check := checkTraefikServiceExposureAt(kubectl, endpoint) @@ -743,7 +754,7 @@ func checkTraefikServiceExposure(kubectl KubectlRunner, distro Distribution) Doc } } -func checkTraefikServiceExposureAt(kubectl KubectlRunner, endpoint doctorTraefikEndpoint) DoctorCheck { +func checkTraefikServiceExposureAt(kubectl core.KubectlRunner, endpoint doctorTraefikEndpoint) DoctorCheck { cmd, err := kubectl.CommandArgs([]string{"get", "svc", "-n", endpoint.Namespace, endpoint.Name, "-o", "jsonpath={.spec.type}|{.status.loadBalancer.ingress[0].ip}|{.status.loadBalancer.ingress[0].hostname}|{range .spec.ports[*]}{.name}:{.port}:{.nodePort}{\",\"}{end}"}) if err != nil { return DoctorCheck{ @@ -805,7 +816,7 @@ func checkTraefikServiceExposureAt(kubectl KubectlRunner, endpoint doctorTraefik } } -func checkMCPServersDNSAndNetwork(kubectl KubectlRunner) DoctorCheck { +func checkMCPServersDNSAndNetwork(kubectl core.KubectlRunner) DoctorCheck { podName := fmt.Sprintf("mcp-runtime-doctor-dns-%d", time.Now().UnixNano()) args := []string{ "run", "-n", doctorMCPServersNamespace, @@ -850,8 +861,8 @@ func checkMCPServersDNSAndNetwork(kubectl KubectlRunner) DoctorCheck { } } -func checkIngressRouteProbe(kubectl KubectlRunner, namespace string, distro Distribution) DoctorCheck { - ingressName, err := readKubectlOutput(kubectl, []string{"get", "ingress", "-n", namespace, "-o", "jsonpath={.items[0].metadata.name}"}) +func checkIngressRouteProbe(kubectl core.KubectlRunner, namespace string, distro Distribution) DoctorCheck { + route, err := resolveIngressRouteProbeTarget(kubectl, namespace) if err != nil { return DoctorCheck{ Name: "ingress route probe", @@ -859,34 +870,15 @@ func checkIngressRouteProbe(kubectl KubectlRunner, namespace string, distro Dist Detail: "no ingress resources found in mcp-servers; skipping live route probe", } } - ingressName = strings.TrimSpace(ingressName) - if ingressName == "" { + if route.Name == "" { return DoctorCheck{ Name: "ingress route probe", OK: true, Detail: "no ingress resources found in mcp-servers; skipping live route probe", } } - host, hostErr := readKubectlOutput(kubectl, []string{"get", "ingress", ingressName, "-n", namespace, "-o", "jsonpath={.spec.rules[0].host}"}) - if hostErr != nil { - return DoctorCheck{ - Name: "ingress route probe", - OK: false, - Detail: fmt.Sprintf("failed reading ingress host: %v", hostErr), - Remedy: "inspect ingress rule structure", - } - } - path, pathErr := readKubectlOutput(kubectl, []string{"get", "ingress", ingressName, "-n", namespace, "-o", "jsonpath={.spec.rules[0].http.paths[0].path}"}) - if pathErr != nil { - return DoctorCheck{ - Name: "ingress route probe", - OK: false, - Detail: fmt.Sprintf("failed reading ingress path: %v", pathErr), - Remedy: "inspect ingress rule structure", - } - } - host = strings.TrimSpace(host) - path = doctorNormalizePath(strings.TrimSpace(path)) + host := strings.TrimSpace(route.Host) + path := doctorNormalizePath(strings.TrimSpace(route.Path)) if path == "" { path = "/" } @@ -954,15 +946,41 @@ func checkIngressRouteProbe(kubectl KubectlRunner, namespace string, distro Dist return DoctorCheck{ Name: "ingress route probe", OK: false, - Detail: fmt.Sprintf("ingress %s returned HTTP 404 for path %s", ingressName, path), + Detail: fmt.Sprintf("ingress %s returned HTTP 404 for path %s", route.Name, path), Remedy: "confirm MCPServer ingress path/host matches the public route", } } return DoctorCheck{ Name: "ingress route probe", OK: true, - Detail: fmt.Sprintf("ingress %s returned HTTP %s for %s via %s/%s", ingressName, status, path, traefik.Namespace, traefik.Name), + Detail: fmt.Sprintf("ingress %s returned HTTP %s for %s via %s/%s", route.Name, status, path, traefik.Namespace, traefik.Name), + } +} + +func resolveIngressRouteProbeTarget(kubectl core.KubectlRunner, namespace string) (doctorIngressRoute, error) { + out, err := readKubectlOutput(kubectl, []string{"get", "ingress", "-n", namespace, "-o", "jsonpath={range .items[*]}{.metadata.name}|{.spec.rules[0].host}|{.spec.rules[0].http.paths[0].path}{\"\\n\"}{end}"}) + if err != nil { + return doctorIngressRoute{}, err + } + for _, line := range filterNonEmptyLines(out) { + parts := strings.SplitN(line, "|", 3) + if len(parts) == 0 { + continue + } + name := strings.TrimSpace(parts[0]) + if name == "" || strings.HasPrefix(name, "doctor-smoke-") { + continue + } + route := doctorIngressRoute{Name: name} + if len(parts) > 1 { + route.Host = strings.TrimSpace(parts[1]) + } + if len(parts) > 2 { + route.Path = strings.TrimSpace(parts[2]) + } + return route, nil } + return doctorIngressRoute{}, nil } // checkRegistryReachableFromCluster verifies that an in-cluster pod can talk to @@ -972,7 +990,7 @@ func checkIngressRouteProbe(kubectl KubectlRunner, namespace string, distro Dist // with registries.yaml mirrors) is distribution-specific and surfaced via the // remediation hint, not as a pass/fail check — we can't reach into kubelet // non-destructively. -func checkRegistryReachableFromCluster(kubectl KubectlRunner) DoctorCheck { +func checkRegistryReachableFromCluster(kubectl core.KubectlRunner) DoctorCheck { podName := fmt.Sprintf("mcp-runtime-doctor-curl-%d", time.Now().UnixNano()) args := []string{ "run", "-n", "registry", @@ -1018,7 +1036,7 @@ func checkRegistryReachableFromCluster(kubectl KubectlRunner) DoctorCheck { } } -func checkMCPServersImagePullSecrets(kubectl KubectlRunner, namespace string) DoctorCheck { +func checkMCPServersImagePullSecrets(kubectl core.KubectlRunner, namespace string) DoctorCheck { cmd, err := kubectl.CommandArgs([]string{"get", "serviceaccount", "default", "-n", namespace, "-o", "jsonpath={range .imagePullSecrets[*]}{.name}{\"\\n\"}{end}"}) if err != nil { return DoctorCheck{ @@ -1067,7 +1085,7 @@ func checkMCPServersImagePullSecrets(kubectl KubectlRunner, namespace string) Do } } -func checkMCPServersImagePullSmoke(kubectl KubectlRunner, namespace string) DoctorCheck { +func checkMCPServersImagePullSmoke(kubectl core.KubectlRunner, namespace string) DoctorCheck { image, imageSource := resolveDoctorSmokeImage(kubectl, namespace) podName := fmt.Sprintf("doctor-pull-%d", time.Now().UnixNano()) defer func() { @@ -1107,7 +1125,7 @@ func checkMCPServersImagePullSmoke(kubectl KubectlRunner, namespace string) Doct } } -func checkSentinelSecrets(kubectl KubectlRunner) DoctorCheck { +func checkSentinelSecrets(kubectl core.KubectlRunner) DoctorCheck { if _, err := readKubectlOutput(kubectl, []string{"get", "namespace", doctorSentinelNamespace, "-o", "jsonpath={.metadata.name}"}); err != nil { return DoctorCheck{ Name: "sentinel secrets", @@ -1161,7 +1179,7 @@ func checkSentinelSecrets(kubectl KubectlRunner) DoctorCheck { } } -func checkSentinelAPIAuthProbe(kubectl KubectlRunner) DoctorCheck { +func checkSentinelAPIAuthProbe(kubectl core.KubectlRunner) DoctorCheck { if _, err := readKubectlOutput(kubectl, []string{"get", "namespace", doctorSentinelNamespace, "-o", "jsonpath={.metadata.name}"}); err != nil { return DoctorCheck{ Name: "sentinel API auth probe", @@ -1237,7 +1255,7 @@ func checkSentinelAPIAuthProbe(kubectl KubectlRunner) DoctorCheck { } } -func checkNodeCapacity(kubectl KubectlRunner) DoctorCheck { +func checkNodeCapacity(kubectl core.KubectlRunner) DoctorCheck { cmd, err := kubectl.CommandArgs([]string{"top", "nodes", "--no-headers"}) if err == nil { out, topErr := cmd.CombinedOutput() @@ -1301,7 +1319,7 @@ func checkNodeCapacity(kubectl KubectlRunner) DoctorCheck { } } -func checkPendingPodsByNamespace(kubectl KubectlRunner) DoctorCheck { +func checkPendingPodsByNamespace(kubectl core.KubectlRunner) DoctorCheck { out, err := readKubectlOutput(kubectl, []string{"get", "pods", "-A", "--field-selector=status.phase=Pending", "-o", "custom-columns=NS:.metadata.namespace,NAME:.metadata.name", "--no-headers"}) if err != nil { return DoctorCheck{ @@ -1347,7 +1365,7 @@ type imagePullPodCandidate struct { Messages []string } -func checkRegistryHTTPPullMismatch(kubectl KubectlRunner) DoctorCheck { +func checkRegistryHTTPPullMismatch(kubectl core.KubectlRunner) DoctorCheck { out, err := readKubectlOutput(kubectl, []string{"get", "pods", "-A", "-o", buildImagePullJSONPath()}) if err != nil { return DoctorCheck{ @@ -1503,9 +1521,8 @@ func hasRegistryHTTPPullMismatchMessage(messages []string) bool { return false } -func checkMCPServerReconcileSmoke(kubectl KubectlRunner, namespace string) DoctorCheck { - image := "registry.k8s.io/pause:3.9" - imageSource := "fixed smoke image registry.k8s.io/pause:3.9" +func checkMCPServerReconcileSmoke(kubectl core.KubectlRunner, namespace string) DoctorCheck { + target := resolveDoctorSmokeTarget(kubectl, namespace) name := fmt.Sprintf("doctor-smoke-%d", time.Now().UnixNano()%1_000_000) manifest := fmt.Sprintf(`apiVersion: mcpruntime.org/v1alpha1 kind: MCPServer @@ -1514,13 +1531,13 @@ metadata: namespace: %s spec: image: %s - port: 8088 + port: %d servicePort: 80 publicPathPrefix: %s ingressClass: traefik ingressAnnotations: traefik.ingress.kubernetes.io/router.entrypoints: web -`, name, namespace, strings.TrimSpace(image), name) +`, name, namespace, strings.TrimSpace(target.Image), target.Port, name) cleanup := func() { _ = kubectl.Run([]string{"delete", "mcpserver", name, "-n", namespace, "--ignore-not-found"}) _ = kubectl.Run([]string{"delete", "deploy", name, "-n", namespace, "--ignore-not-found"}) @@ -1556,12 +1573,24 @@ spec: Remedy: "inspect operator reconcile errors and MCPServer status", } } - if err := waitForDoctorDeploymentReady(kubectl, name, namespace, 150*time.Second); err != nil { - return DoctorCheck{ - Name: "MCPServer reconcile smoke", - OK: false, - Detail: fmt.Sprintf("deployment did not become ready: %v", err), - Remedy: "inspect operator reconcile and smoke deployment events", + if target.WaitForReady { + if err := waitForDoctorDeploymentReady(kubectl, name, namespace, 150*time.Second); err != nil { + return DoctorCheck{ + Name: "MCPServer reconcile smoke", + OK: false, + Detail: fmt.Sprintf("deployment did not become ready: %v", err), + Remedy: "inspect operator reconcile and smoke deployment events", + } + } + } + if !target.WaitForReady { + if err := waitForDoctorPodsScheduled(kubectl, name, namespace, 30*time.Second); err != nil { + return DoctorCheck{ + Name: "MCPServer reconcile smoke", + OK: false, + Detail: fmt.Sprintf("deployment pod was not scheduled: %v", err), + Remedy: "inspect operator reconcile and smoke deployment events", + } } } if err := waitForDoctorResource(kubectl, "svc", name, namespace, 150*time.Second); err != nil { @@ -1580,14 +1609,21 @@ spec: Remedy: "inspect operator ingress reconciliation", } } + if target.WaitForReady { + return DoctorCheck{ + Name: "MCPServer reconcile smoke", + OK: true, + Detail: fmt.Sprintf("temporary MCPServer %s reconciled ready deployment/service/ingress using %s", name, target.Source), + } + } return DoctorCheck{ Name: "MCPServer reconcile smoke", OK: true, - Detail: fmt.Sprintf("temporary MCPServer %s reconciled ready deployment/service/ingress using %s", name, imageSource), + Detail: fmt.Sprintf("temporary MCPServer %s reconciled deployment/service/ingress using %s; skipped readiness because the fallback image does not expose the MCP port", name, target.Source), } } -func waitForDoctorResource(kubectl KubectlRunner, resource, name, namespace string, timeout time.Duration) error { +func waitForDoctorResource(kubectl core.KubectlRunner, resource, name, namespace string, timeout time.Duration) error { timeoutTimer := time.NewTimer(timeout) defer timeoutTimer.Stop() ticker := time.NewTicker(2 * time.Second) @@ -1611,7 +1647,7 @@ func waitForDoctorResource(kubectl KubectlRunner, resource, name, namespace stri } } -func waitForDoctorDeploymentReady(kubectl KubectlRunner, name, namespace string, timeout time.Duration) error { +func waitForDoctorDeploymentReady(kubectl core.KubectlRunner, name, namespace string, timeout time.Duration) error { cmd, err := kubectl.CommandArgs([]string{"rollout", "status", "deployment/" + name, "-n", namespace, "--timeout=" + timeout.String()}) if err != nil { return err @@ -1627,6 +1663,25 @@ func waitForDoctorDeploymentReady(kubectl KubectlRunner, name, namespace string, return fmt.Errorf("%w: %s", runErr, detail) } +func waitForDoctorPodsScheduled(kubectl core.KubectlRunner, name, namespace string, timeout time.Duration) error { + timeoutTimer := time.NewTimer(timeout) + defer timeoutTimer.Stop() + ticker := time.NewTicker(2 * time.Second) + defer ticker.Stop() + + for { + out, err := readKubectlOutput(kubectl, []string{"get", "pods", "-n", namespace, "-l", "app=" + name, "-o", "jsonpath={.items[0].spec.nodeName}"}) + if err == nil && strings.TrimSpace(out) != "" { + return nil + } + select { + case <-timeoutTimer.C: + return fmt.Errorf("no scheduled pod found for deployment %s before timeout", name) + case <-ticker.C: + } + } +} + func hasHTTP200Status(body string) bool { for _, line := range strings.Split(body, "\n") { line = strings.TrimSpace(line) @@ -1639,7 +1694,7 @@ func hasHTTP200Status(body string) bool { return false } -func readKubectlOutput(kubectl KubectlRunner, args []string) (string, error) { +func readKubectlOutput(kubectl core.KubectlRunner, args []string) (string, error) { cmd, err := kubectl.CommandArgs(args) if err != nil { return "", err @@ -1765,33 +1820,51 @@ func doctorNormalizePath(value string) string { return trimmed } -func resolveDoctorSmokeImage(kubectl KubectlRunner, preferredNamespace string) (string, string) { - image, err := readKubectlOutput(kubectl, []string{"get", "deploy", "-n", preferredNamespace, "-o", "jsonpath={.items[0].spec.template.spec.containers[0].image}"}) - if err == nil && strings.TrimSpace(image) != "" { - return strings.TrimSpace(image), fmt.Sprintf("deployment in %s", preferredNamespace) - } - all, allErr := readKubectlOutput(kubectl, []string{"get", "deploy", "-A", "-o", "jsonpath={range .items[*]}{.metadata.namespace}|{.metadata.name}|{.spec.template.spec.containers[0].image}{\"\\n\"}{end}"}) - if allErr != nil { - return "registry.k8s.io/pause:3.9", "default fallback image registry.k8s.io/pause:3.9" - } - for _, line := range filterNonEmptyLines(all) { - parts := strings.SplitN(line, "|", 3) - if len(parts) != 3 { - continue - } - img := strings.TrimSpace(parts[2]) - if img == "" { - continue +func resolveDoctorSmokeImage(kubectl core.KubectlRunner, preferredNamespace string) (string, string) { + target := resolveDoctorSmokeTarget(kubectl, preferredNamespace) + return target.Image, target.Source +} + +func resolveDoctorSmokeTarget(kubectl core.KubectlRunner, preferredNamespace string) doctorSmokeTarget { + out, err := readKubectlOutput(kubectl, []string{"get", "deploy", "-n", preferredNamespace, "-o", "jsonpath={range .items[*]}{.metadata.name}|{.status.readyReplicas}|{.spec.template.spec.containers[0].image}|{.spec.template.spec.containers[0].ports[0].containerPort}{\"\\n\"}{end}"}) + if err == nil { + for _, line := range filterNonEmptyLines(out) { + parts := strings.SplitN(line, "|", 4) + if len(parts) < 3 { + continue + } + name := strings.TrimSpace(parts[0]) + ready := strings.TrimSpace(parts[1]) + image := strings.TrimSpace(parts[2]) + if name == "" || strings.HasPrefix(name, "doctor-smoke-") || ready == "" || ready == "0" || image == "" { + continue + } + port := int32(8088) + if len(parts) == 4 { + if parsed, parseErr := strconv.Atoi(strings.TrimSpace(parts[3])); parseErr == nil && parsed > 0 { + port = int32(parsed) + } + } + return doctorSmokeTarget{ + Image: image, + Port: port, + Source: fmt.Sprintf("ready deployment %s/%s", preferredNamespace, name), + WaitForReady: true, + } } - return img, fmt.Sprintf("deployment %s/%s", strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1])) } - return "registry.k8s.io/pause:3.9", "default fallback image registry.k8s.io/pause:3.9" + return doctorSmokeTarget{ + Image: "registry.k8s.io/pause:3.9", + Port: 8088, + Source: "fallback image registry.k8s.io/pause:3.9", + WaitForReady: false, + } } // PrintDoctorReport emits a human-readable report using the standard printer. func PrintDoctorReport(r DoctorReport) { - Section("Cluster Doctor") - Info(fmt.Sprintf("Distribution: %s", r.Distribution)) + core.Section("Cluster Doctor") + core.Info(fmt.Sprintf("Distribution: %s", r.Distribution)) for _, c := range r.Checks { printDoctorCheckResult(c) } @@ -1799,7 +1872,7 @@ func PrintDoctorReport(r DoctorReport) { } func printDoctorCheckProgress(event DoctorCheckProgressEvent) func(DoctorCheck) { - Info(doctorCheckProgressMessage(event)) + core.Info(doctorCheckProgressMessage(event)) return func(c DoctorCheck) { printDoctorCheckResult(c) } @@ -1818,12 +1891,12 @@ func doctorCheckProgressMessage(event DoctorCheckProgressEvent) string { func printDoctorCheckResult(c DoctorCheck) { if c.OK { - Success(doctorCheckMessage(c)) + core.Success(doctorCheckMessage(c)) return } - Error(doctorCheckMessage(c)) + core.Error(doctorCheckMessage(c)) if c.Remedy != "" { - Info(" Remedy: " + c.Remedy) + core.Info(" Remedy: " + c.Remedy) } } @@ -1833,10 +1906,10 @@ func doctorCheckMessage(c DoctorCheck) string { func printDoctorReportFooter(r DoctorReport) { if !r.AllOK() { - Info("") - Info("Full remediation steps per distribution are in docs/cluster-readiness.md.") + core.Info("") + core.Info("Full remediation steps per distribution are in docs/cluster-readiness.md.") if reportHasRegistryOrPullFailure(r) { - Info(remediationHint(r.Distribution)) + core.Info(remediationHint(r.Distribution)) } } } diff --git a/internal/cli/cluster_doctor_test.go b/internal/cli/cluster/doctor_impl_test.go similarity index 65% rename from internal/cli/cluster_doctor_test.go rename to internal/cli/cluster/doctor_impl_test.go index 27e8ebf..76d890b 100644 --- a/internal/cli/cluster_doctor_test.go +++ b/internal/cli/cluster/doctor_impl_test.go @@ -1,10 +1,12 @@ -package cli +package cluster import ( "errors" "fmt" "strings" "testing" + + "mcp-runtime/internal/cli/core" ) func TestDetectDistribution(t *testing.T) { @@ -53,20 +55,20 @@ func TestDetectDistribution(t *testing.T) { for _, tc := range cases { t.Run(tc.name, func(t *testing.T) { - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { switch { case contains(spec.Args, "jsonpath={.items[*].status.nodeInfo.kubeletVersion}"): - return &MockCommand{OutputData: []byte(tc.kubelet)} + return &core.MockCommand{OutputData: []byte(tc.kubelet)} case contains(spec.Args, "jsonpath={.items[*].metadata.name}"): - return &MockCommand{OutputData: []byte(tc.names)} + return &core.MockCommand{OutputData: []byte(tc.names)} case contains(spec.Args, "current-context"): - return &MockCommand{OutputData: []byte(tc.context)} + return &core.MockCommand{OutputData: []byte(tc.context)} } - return &MockCommand{} + return &core.MockCommand{} }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) got := DetectDistribution(kubectl) if got != tc.want { t.Fatalf("DetectDistribution() = %q, want %q", got, tc.want) @@ -77,12 +79,12 @@ func TestDetectDistribution(t *testing.T) { func TestCheckRegistryService(t *testing.T) { t.Run("ok with nodeport", func(t *testing.T) { - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - return &MockCommand{OutputData: []byte("32000")} + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + return &core.MockCommand{OutputData: []byte("32000")} }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) check := checkRegistryService(kubectl) if !check.OK { t.Fatalf("expected OK, got detail=%q", check.Detail) @@ -93,12 +95,12 @@ func TestCheckRegistryService(t *testing.T) { }) t.Run("fails when service missing", func(t *testing.T) { - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - return &MockCommand{OutputErr: errors.New("not found")} + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + return &core.MockCommand{OutputErr: errors.New("not found")} }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) check := checkRegistryService(kubectl) if check.OK { t.Fatal("expected failure when service missing") @@ -111,12 +113,12 @@ func TestCheckRegistryService(t *testing.T) { func TestCheckNamespaceExists(t *testing.T) { t.Run("ok when namespace exists", func(t *testing.T) { - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - return &MockCommand{OutputData: []byte("mcp-servers")} + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + return &core.MockCommand{OutputData: []byte("mcp-servers")} }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) check := checkNamespaceExists(kubectl, "mcp-servers") if !check.OK { t.Fatalf("expected OK, got detail=%q", check.Detail) @@ -124,12 +126,12 @@ func TestCheckNamespaceExists(t *testing.T) { }) t.Run("fails when namespace missing", func(t *testing.T) { - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - return &MockCommand{OutputErr: errors.New("not found")} + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + return &core.MockCommand{OutputErr: errors.New("not found")} }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) check := checkNamespaceExists(kubectl, "mcp-servers") if check.OK { t.Fatal("expected failure when namespace is missing") @@ -139,12 +141,12 @@ func TestCheckNamespaceExists(t *testing.T) { func TestCheckMCPServerCRD(t *testing.T) { t.Run("ok when CRD exists", func(t *testing.T) { - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - return &MockCommand{OutputData: []byte("mcpservers.mcpruntime.org")} + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + return &core.MockCommand{OutputData: []byte("mcpservers.mcpruntime.org")} }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) check := checkMCPServerCRD(kubectl) if !check.OK { t.Fatalf("expected OK, got detail=%q", check.Detail) @@ -152,12 +154,12 @@ func TestCheckMCPServerCRD(t *testing.T) { }) t.Run("fails when CRD missing", func(t *testing.T) { - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - return &MockCommand{OutputErr: errors.New("not found")} + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + return &core.MockCommand{OutputErr: errors.New("not found")} }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) check := checkMCPServerCRD(kubectl) if check.OK { t.Fatal("expected failure when CRD is missing") @@ -167,12 +169,12 @@ func TestCheckMCPServerCRD(t *testing.T) { func TestCheckOperatorReady(t *testing.T) { t.Run("ok when desired replicas are ready", func(t *testing.T) { - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - return &MockCommand{OutputData: []byte("1/1")} + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + return &core.MockCommand{OutputData: []byte("1/1")} }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) check := checkOperatorReady(kubectl) if !check.OK { t.Fatalf("expected OK, got detail=%q", check.Detail) @@ -180,12 +182,12 @@ func TestCheckOperatorReady(t *testing.T) { }) t.Run("fails when not enough replicas are ready", func(t *testing.T) { - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - return &MockCommand{OutputData: []byte("0/1")} + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + return &core.MockCommand{OutputData: []byte("0/1")} }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) check := checkOperatorReady(kubectl) if check.OK { t.Fatal("expected failure for 0/1 ready replicas") @@ -195,12 +197,12 @@ func TestCheckOperatorReady(t *testing.T) { func TestCheckTraefikIngressClass(t *testing.T) { t.Run("ok when ingressClass exists", func(t *testing.T) { - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - return &MockCommand{OutputData: []byte("traefik")} + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + return &core.MockCommand{OutputData: []byte("traefik")} }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) check := checkTraefikIngressClass(kubectl) if !check.OK { t.Fatalf("expected OK, got detail=%q", check.Detail) @@ -208,12 +210,12 @@ func TestCheckTraefikIngressClass(t *testing.T) { }) t.Run("fails when ingressClass is missing", func(t *testing.T) { - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - return &MockCommand{OutputErr: errors.New("not found")} + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + return &core.MockCommand{OutputErr: errors.New("not found")} }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) check := checkTraefikIngressClass(kubectl) if check.OK { t.Fatal("expected failure when ingressClass missing") @@ -223,12 +225,12 @@ func TestCheckTraefikIngressClass(t *testing.T) { func TestCheckTraefikWebEntrypoint(t *testing.T) { t.Run("ok when service exposes named web entrypoint", func(t *testing.T) { - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - return &MockCommand{OutputData: []byte("web:8000:32080\nwebsecure:8443:32443\n")} + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + return &core.MockCommand{OutputData: []byte("web:8000:32080\nwebsecure:8443:32443\n")} }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) check := checkTraefikWebEntrypoint(kubectl, DistroGeneric) if !check.OK { t.Fatalf("expected OK, got detail=%q", check.Detail) @@ -236,18 +238,18 @@ func TestCheckTraefikWebEntrypoint(t *testing.T) { }) t.Run("ok with k3s bundled traefik service", func(t *testing.T) { - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { switch { case contains(spec.Args, "kube-system"): - return &MockCommand{OutputData: []byte("web:80:0\nwebsecure:443:0\n")} + return &core.MockCommand{OutputData: []byte("web:80:0\nwebsecure:443:0\n")} case contains(spec.Args, "traefik"): - return &MockCommand{OutputErr: errors.New("not found")} + return &core.MockCommand{OutputErr: errors.New("not found")} } - return &MockCommand{} + return &core.MockCommand{} }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) check := checkTraefikWebEntrypoint(kubectl, DistroK3s) if !check.OK { t.Fatalf("expected OK for k3s bundled Traefik, got detail=%q", check.Detail) @@ -258,12 +260,12 @@ func TestCheckTraefikWebEntrypoint(t *testing.T) { }) t.Run("fails when service does not expose web entrypoint", func(t *testing.T) { - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - return &MockCommand{OutputData: []byte("admin:9000:32090\n")} + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + return &core.MockCommand{OutputData: []byte("admin:9000:32090\n")} }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) check := checkTraefikWebEntrypoint(kubectl, DistroGeneric) if check.OK { t.Fatal("expected failure when web entrypoint is not exposed") @@ -273,12 +275,12 @@ func TestCheckTraefikWebEntrypoint(t *testing.T) { func TestCheckRegistryReachableFromCluster(t *testing.T) { t.Run("ok on HTTP 200", func(t *testing.T) { - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - return &MockCommand{OutputData: []byte("HTTP/1.1 200 OK\nDocker-Distribution-Api-Version: registry/2.0\n")} + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + return &core.MockCommand{OutputData: []byte("HTTP/1.1 200 OK\nDocker-Distribution-Api-Version: registry/2.0\n")} }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) check := checkRegistryReachableFromCluster(kubectl) if !check.OK { t.Fatalf("expected OK, got detail=%q", check.Detail) @@ -286,12 +288,12 @@ func TestCheckRegistryReachableFromCluster(t *testing.T) { }) t.Run("fails on non-200", func(t *testing.T) { - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - return &MockCommand{OutputData: []byte("HTTP/1.1 503 Service Unavailable\n")} + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + return &core.MockCommand{OutputData: []byte("HTTP/1.1 503 Service Unavailable\n")} }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) check := checkRegistryReachableFromCluster(kubectl) if check.OK { t.Fatal("expected failure for non-200") @@ -299,12 +301,12 @@ func TestCheckRegistryReachableFromCluster(t *testing.T) { }) t.Run("does not false-pass when body includes non-status 200", func(t *testing.T) { - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - return &MockCommand{OutputData: []byte("diagnostic: 200 retries\nHTTP/1.1 503 Service Unavailable\n")} + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + return &core.MockCommand{OutputData: []byte("diagnostic: 200 retries\nHTTP/1.1 503 Service Unavailable\n")} }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) check := checkRegistryReachableFromCluster(kubectl) if check.OK { t.Fatal("expected failure when HTTP status line is not 200") @@ -312,12 +314,12 @@ func TestCheckRegistryReachableFromCluster(t *testing.T) { }) t.Run("fails when helper pod errors", func(t *testing.T) { - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - return &MockCommand{OutputErr: errors.New("pod failed"), RunErr: errors.New("pod failed")} + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + return &core.MockCommand{OutputErr: errors.New("pod failed"), RunErr: errors.New("pod failed")} }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) check := checkRegistryReachableFromCluster(kubectl) if check.OK { t.Fatal("expected failure when helper pod errors") @@ -382,19 +384,19 @@ Events: ---- ------ ---- ---- ------- Warning Failed 31s kubelet Failed to pull image "10.96.64.95:5000/mcp-sentinel-api:latest": failed to resolve reference "10.96.64.95:5000/mcp-sentinel-api:latest": failed to do request: Head "https://10.96.64.95:5000/v2/mcp-sentinel-api/manifests/latest": http: server gave HTTP response to HTTPS client ` - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { switch { case contains(spec.Args, "pods") && contains(spec.Args, "-A"): - return &MockCommand{OutputData: []byte(pods)} + return &core.MockCommand{OutputData: []byte(pods)} case contains(spec.Args, "describe"): - return &MockCommand{OutputData: []byte(describe)} + return &core.MockCommand{OutputData: []byte(describe)} default: - return &MockCommand{} + return &core.MockCommand{} } }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) check := checkRegistryHTTPPullMismatch(kubectl) if check.OK { t.Fatal("expected registry HTTP mismatch to fail") @@ -423,19 +425,19 @@ Events: ---- ------ ---- ---- ------- Warning Failed 31s kubelet Failed to pull image "registry.local/bootstrap:latest": failed to do request: Head "https://registry.local/v2/bootstrap/manifests/latest": http: server gave HTTP response to HTTPS client ` - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { switch { case contains(spec.Args, "pods") && contains(spec.Args, "-A"): - return &MockCommand{OutputData: []byte(pods)} + return &core.MockCommand{OutputData: []byte(pods)} case contains(spec.Args, "describe"): - return &MockCommand{OutputData: []byte(describe)} + return &core.MockCommand{OutputData: []byte(describe)} default: - return &MockCommand{} + return &core.MockCommand{} } }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) check := checkRegistryHTTPPullMismatch(kubectl) if check.OK { t.Fatal("expected registry HTTP mismatch for init container to fail") @@ -453,15 +455,15 @@ Events: // include a reason in parentheses. msg := `failed to pull image "registry.local/bootstrap:latest": failed to do request: Head "https://registry.local/v2/bootstrap/manifests/latest": http: server gave HTTP response to HTTPS client` pods := "mcp-servers|demo-status-abc|registry.local/bootstrap:latest" + sep + "registry.local/demo:latest" + sep + "||" + msg + sep - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { if contains(spec.Args, "describe") { t.Fatalf("did not expect describe call when waiting status message has mismatch") } - return &MockCommand{OutputData: []byte(pods)} + return &core.MockCommand{OutputData: []byte(pods)} }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) check := checkRegistryHTTPPullMismatch(kubectl) if check.OK { t.Fatal("expected registry HTTP mismatch from status message to fail") @@ -481,15 +483,15 @@ Events: describe := `Events: Warning Failed kubelet Failed to pull image "registry.local/demo:latest": not found ` - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { if contains(spec.Args, "describe") { - return &MockCommand{OutputData: []byte(describe)} + return &core.MockCommand{OutputData: []byte(describe)} } - return &MockCommand{OutputData: []byte(pods)} + return &core.MockCommand{OutputData: []byte(pods)} }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) check := checkRegistryHTTPPullMismatch(kubectl) if !check.OK { t.Fatalf("expected OK when no HTTP mismatch event exists, got detail=%q", check.Detail) @@ -501,15 +503,15 @@ Events: "mcp-servers|demo-a|registry.local/demo-a:latest" + sep + "|ErrImagePull" + sep + "|", "mcp-servers|demo-b|registry.local/demo-b:latest" + sep + "|ImagePullBackOff" + sep + "|", }, "\n") - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { if contains(spec.Args, "describe") { - return &MockCommand{OutputErr: errors.New("pods is forbidden")} + return &core.MockCommand{OutputErr: errors.New("pods is forbidden")} } - return &MockCommand{OutputData: []byte(pods)} + return &core.MockCommand{OutputData: []byte(pods)} }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) check := checkRegistryHTTPPullMismatch(kubectl) if check.OK { t.Fatal("expected registry HTTP mismatch check to fail when pod inspection fails") @@ -534,16 +536,16 @@ Events: } pods := strings.Join(lines, "\n") describeCalls := 0 - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { if contains(spec.Args, "describe") { describeCalls++ - return &MockCommand{OutputData: []byte("Events:\n Warning Failed kubelet Failed to pull image: not found\n")} + return &core.MockCommand{OutputData: []byte("Events:\n Warning Failed kubelet Failed to pull image: not found\n")} } - return &MockCommand{OutputData: []byte(pods)} + return &core.MockCommand{OutputData: []byte(pods)} }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) check := checkRegistryHTTPPullMismatch(kubectl) if !check.OK { t.Fatalf("expected OK when no candidates have HTTP mismatch, got detail=%q", check.Detail) @@ -558,30 +560,30 @@ Events: } func TestRunDoctorAggregates(t *testing.T) { - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { switch { case contains(spec.Args, "jsonpath={.items[*].status.nodeInfo.kubeletVersion}"): - return &MockCommand{OutputData: []byte("v1.34.6+k3s1")} + return &core.MockCommand{OutputData: []byte("v1.34.6+k3s1")} case contains(spec.Args, "namespace mcp-servers"): - return &MockCommand{OutputData: []byte("mcp-servers")} + return &core.MockCommand{OutputData: []byte("mcp-servers")} case contains(spec.Args, "crd mcpservers.mcpruntime.org"): - return &MockCommand{OutputData: []byte("mcpservers.mcpruntime.org")} + return &core.MockCommand{OutputData: []byte("mcpservers.mcpruntime.org")} case contains(spec.Args, "mcp-runtime-operator-controller-manager"): - return &MockCommand{OutputData: []byte("1/1")} + return &core.MockCommand{OutputData: []byte("1/1")} case contains(spec.Args, "ingressclass traefik"): - return &MockCommand{OutputData: []byte("traefik")} + return &core.MockCommand{OutputData: []byte("traefik")} case contains(spec.Args, "svc -n traefik traefik"): - return &MockCommand{OutputData: []byte("web:8000:32080\n")} + return &core.MockCommand{OutputData: []byte("web:8000:32080\n")} case contains(spec.Args, "jsonpath={.spec.ports[0].nodePort}"): - return &MockCommand{OutputData: []byte("32000")} + return &core.MockCommand{OutputData: []byte("32000")} case contains(spec.Args, "curl"): - return &MockCommand{OutputData: []byte("HTTP/1.1 503 Service Unavailable\n")} + return &core.MockCommand{OutputData: []byte("HTTP/1.1 503 Service Unavailable\n")} } - return &MockCommand{} + return &core.MockCommand{} }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) report := RunDoctor(kubectl) if report.Distribution != DistroK3s { t.Fatalf("expected DistroK3s, got %q", report.Distribution) @@ -595,30 +597,30 @@ func TestRunDoctorAggregates(t *testing.T) { } func TestRunDoctorWithProgressReportsEachCheck(t *testing.T) { - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { switch { case contains(spec.Args, "jsonpath={.items[*].status.nodeInfo.kubeletVersion}"): - return &MockCommand{OutputData: []byte("v1.34.6+k3s1")} + return &core.MockCommand{OutputData: []byte("v1.34.6+k3s1")} case contains(spec.Args, "namespace mcp-servers"): - return &MockCommand{OutputData: []byte("mcp-servers")} + return &core.MockCommand{OutputData: []byte("mcp-servers")} case contains(spec.Args, "crd mcpservers.mcpruntime.org"): - return &MockCommand{OutputData: []byte("mcpservers.mcpruntime.org")} + return &core.MockCommand{OutputData: []byte("mcpservers.mcpruntime.org")} case contains(spec.Args, "mcp-runtime-operator-controller-manager"): - return &MockCommand{OutputData: []byte("1/1")} + return &core.MockCommand{OutputData: []byte("1/1")} case contains(spec.Args, "ingressclass traefik"): - return &MockCommand{OutputData: []byte("traefik")} + return &core.MockCommand{OutputData: []byte("traefik")} case contains(spec.Args, "svc -n traefik traefik"): - return &MockCommand{OutputData: []byte("web:8000:32080\n")} + return &core.MockCommand{OutputData: []byte("web:8000:32080\n")} case contains(spec.Args, "jsonpath={.spec.ports[0].nodePort}"): - return &MockCommand{OutputData: []byte("32000")} + return &core.MockCommand{OutputData: []byte("32000")} case contains(spec.Args, "curl"): - return &MockCommand{OutputData: []byte("HTTP/1.1 503 Service Unavailable\n")} + return &core.MockCommand{OutputData: []byte("HTTP/1.1 503 Service Unavailable\n")} } - return &MockCommand{} + return &core.MockCommand{} }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) var events []string report := RunDoctorWithProgress(kubectl, func(event DoctorCheckProgressEvent) func(DoctorCheck) { if event.Index <= 0 || event.Total <= 0 { @@ -652,31 +654,27 @@ func TestRunDoctorWithProgressReportsEachCheck(t *testing.T) { } func TestDoctorCurlProbesPassPathValidator(t *testing.T) { - validators := []ExecValidator{NoControlChars(), PathUnder("/workspace")} + validators := []core.ExecValidator{core.NoControlChars(), core.PathUnder("/workspace")} t.Run("ingress route probe", func(t *testing.T) { - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { switch { - case contains(spec.Args, "jsonpath={.items[0].metadata.name}"): - return &MockCommand{OutputData: []byte("demo")} - case contains(spec.Args, "jsonpath={.spec.rules[0].host}"): - return &MockCommand{} - case contains(spec.Args, "jsonpath={.spec.rules[0].http.paths[0].path}"): - return &MockCommand{OutputData: []byte("/demo/mcp")} + case contains(spec.Args, "jsonpath={range .items[*]}{.metadata.name}|{.spec.rules[0].host}|{.spec.rules[0].http.paths[0].path}"): + return &core.MockCommand{OutputData: []byte("doctor-smoke-old||/doctor-smoke-old/mcp\ndemo||/demo/mcp\n")} case contains(spec.Args, "svc"): - return &MockCommand{OutputData: []byte("web:8000:32080\n")} + return &core.MockCommand{OutputData: []byte("web:8000:32080\n")} case contains(spec.Args, "curl"): if contains(spec.Args, "/dev/null") { t.Fatal("doctor curl helper should not pass /dev/null through kubectl validators") } - return &MockCommand{OutputData: []byte("200")} + return &core.MockCommand{OutputData: []byte("200")} default: - return &MockCommand{} + return &core.MockCommand{} } }, } - kubectl := &KubectlClient{exec: mock, validators: validators} + kubectl := core.NewTestKubectlClientWithValidators(mock, validators) check := checkIngressRouteProbe(kubectl, "mcp-servers", DistroGeneric) if !check.OK { t.Fatalf("expected OK, got detail=%q", check.Detail) @@ -684,24 +682,24 @@ func TestDoctorCurlProbesPassPathValidator(t *testing.T) { }) t.Run("sentinel API auth probe", func(t *testing.T) { - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { switch { case contains(spec.Args, "namespace"): - return &MockCommand{OutputData: []byte(doctorSentinelNamespace)} + return &core.MockCommand{OutputData: []byte(doctorSentinelNamespace)} case contains(spec.Args, "jsonpath={.data.UI_API_KEY}"): - return &MockCommand{OutputData: []byte("dGVzdA==")} + return &core.MockCommand{OutputData: []byte("dGVzdA==")} case contains(spec.Args, "curl"): if contains(spec.Args, "/dev/null") { t.Fatal("doctor curl helper should not pass /dev/null through kubectl validators") } - return &MockCommand{OutputData: []byte("200")} + return &core.MockCommand{OutputData: []byte("200")} default: - return &MockCommand{} + return &core.MockCommand{} } }, } - kubectl := &KubectlClient{exec: mock, validators: validators} + kubectl := core.NewTestKubectlClientWithValidators(mock, validators) check := checkSentinelAPIAuthProbe(kubectl) if !check.OK { t.Fatalf("expected OK, got detail=%q", check.Detail) @@ -729,12 +727,12 @@ func TestReportHasRegistryOrPullFailure(t *testing.T) { func TestCheckNamespacePodAdmission(t *testing.T) { t.Run("ok on dry-run success", func(t *testing.T) { - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - return &MockCommand{OutputData: []byte("pod/doctor-admission created (dry run)")} + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + return &core.MockCommand{OutputData: []byte("pod/doctor-admission created (dry run)")} }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) check := checkNamespacePodAdmission(kubectl, "mcp-servers") if !check.OK { t.Fatalf("expected OK, got detail=%q", check.Detail) @@ -742,15 +740,15 @@ func TestCheckNamespacePodAdmission(t *testing.T) { }) t.Run("fails when admission rejects", func(t *testing.T) { - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - return &MockCommand{ + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + return &core.MockCommand{ OutputData: []byte("pods \"doctor-admission\" is forbidden: exceeds quota"), OutputErr: errors.New("exit status 1"), } }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) check := checkNamespacePodAdmission(kubectl, "mcp-servers") if check.OK { t.Fatalf("expected failure when dry-run rejected; detail=%q", check.Detail) @@ -781,12 +779,12 @@ func TestCheckTraefikDeploymentReady(t *testing.T) { } for _, tc := range cases { t.Run(tc.name, func(t *testing.T) { - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - return &MockCommand{OutputData: []byte(tc.output), OutputErr: tc.outErr} + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + return &core.MockCommand{OutputData: []byte(tc.output), OutputErr: tc.outErr} }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) check := checkTraefikDeploymentReady(kubectl, DistroGeneric) if check.OK != tc.wantOK { t.Fatalf("OK=%v want %v; detail=%q", check.OK, tc.wantOK, check.Detail) @@ -795,18 +793,18 @@ func TestCheckTraefikDeploymentReady(t *testing.T) { } t.Run("ok with k3s bundled traefik deployment", func(t *testing.T) { - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { switch { case contains(spec.Args, "kube-system"): - return &MockCommand{OutputData: []byte("1/1")} + return &core.MockCommand{OutputData: []byte("1/1")} case contains(spec.Args, "traefik"): - return &MockCommand{OutputErr: errors.New("not found")} + return &core.MockCommand{OutputErr: errors.New("not found")} } - return &MockCommand{} + return &core.MockCommand{} }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) check := checkTraefikDeploymentReady(kubectl, DistroK3s) if !check.OK { t.Fatalf("expected OK for k3s bundled Traefik, got detail=%q", check.Detail) @@ -859,12 +857,12 @@ func TestCheckTraefikServiceExposure(t *testing.T) { } for _, tc := range cases { t.Run(tc.name, func(t *testing.T) { - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - return &MockCommand{OutputData: []byte(tc.output)} + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + return &core.MockCommand{OutputData: []byte(tc.output)} }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) check := checkTraefikServiceExposure(kubectl, DistroGeneric) if check.OK != tc.wantOK { t.Fatalf("OK=%v want %v; detail=%q", check.OK, tc.wantOK, check.Detail) @@ -876,18 +874,18 @@ func TestCheckTraefikServiceExposure(t *testing.T) { } t.Run("ok with k3s bundled traefik service", func(t *testing.T) { - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { switch { case contains(spec.Args, "kube-system"): - return &MockCommand{OutputData: []byte("LoadBalancer|10.1.2.3||web:80:0,websecure:443:0,")} + return &core.MockCommand{OutputData: []byte("LoadBalancer|10.1.2.3||web:80:0,websecure:443:0,")} case contains(spec.Args, "traefik"): - return &MockCommand{OutputErr: errors.New("not found")} + return &core.MockCommand{OutputErr: errors.New("not found")} } - return &MockCommand{} + return &core.MockCommand{} }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) check := checkTraefikServiceExposure(kubectl, DistroK3s) if !check.OK { t.Fatalf("expected OK for k3s bundled Traefik, got detail=%q", check.Detail) @@ -916,12 +914,12 @@ func TestCheckOperatorRecentReconcileErrors(t *testing.T) { } for _, tc := range cases { t.Run(tc.name, func(t *testing.T) { - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - return &MockCommand{OutputData: []byte(tc.logs), OutputErr: tc.outErr} + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + return &core.MockCommand{OutputData: []byte(tc.logs), OutputErr: tc.outErr} }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) check := checkOperatorRecentReconcileErrors(kubectl) if check.OK != tc.wantOK { t.Fatalf("OK=%v want %v; detail=%q", check.OK, tc.wantOK, check.Detail) @@ -933,26 +931,28 @@ func TestCheckOperatorRecentReconcileErrors(t *testing.T) { func TestCheckMCPServerReconcileSmoke(t *testing.T) { t.Run("waits for deployment rollout readiness", func(t *testing.T) { sawRollout := false - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { switch { + case argContains(spec.Args, "readyReplicas") && argContains(spec.Args, "containerPort"): + return &core.MockCommand{OutputData: []byte("go-example|1|registry.local/go-example:dev|8088\n")} case contains(spec.Args, "apply"): - return &MockCommand{} + return &core.MockCommand{} case contains(spec.Args, "rollout"): sawRollout = true if !contains(spec.Args, "--timeout=2m30s") { t.Fatalf("rollout status args %v missing timeout", spec.Args) } - return &MockCommand{} + return &core.MockCommand{} case contains(spec.Args, "get"): - return &MockCommand{OutputData: []byte("doctor-smoke")} + return &core.MockCommand{OutputData: []byte("doctor-smoke")} case contains(spec.Args, "delete"): - return &MockCommand{} + return &core.MockCommand{} } - return &MockCommand{OutputErr: fmt.Errorf("unexpected command: %v", spec.Args)} + return &core.MockCommand{OutputErr: fmt.Errorf("unexpected command: %v", spec.Args)} }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) check := checkMCPServerReconcileSmoke(kubectl, "mcp-servers") if !check.OK { @@ -967,25 +967,27 @@ func TestCheckMCPServerReconcileSmoke(t *testing.T) { }) t.Run("fails when deployment rollout does not become ready", func(t *testing.T) { - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { switch { + case argContains(spec.Args, "readyReplicas") && argContains(spec.Args, "containerPort"): + return &core.MockCommand{OutputData: []byte("go-example|1|registry.local/go-example:dev|8088\n")} case contains(spec.Args, "apply"): - return &MockCommand{} + return &core.MockCommand{} case contains(spec.Args, "rollout"): - return &MockCommand{ + return &core.MockCommand{ OutputData: []byte("deployment \"doctor-smoke\" exceeded its progress deadline"), OutputErr: errors.New("rollout timed out"), } case contains(spec.Args, "get"): - return &MockCommand{OutputData: []byte("doctor-smoke")} + return &core.MockCommand{OutputData: []byte("doctor-smoke")} case contains(spec.Args, "delete"): - return &MockCommand{} + return &core.MockCommand{} } - return &MockCommand{OutputErr: fmt.Errorf("unexpected command: %v", spec.Args)} + return &core.MockCommand{OutputErr: fmt.Errorf("unexpected command: %v", spec.Args)} }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) check := checkMCPServerReconcileSmoke(kubectl, "mcp-servers") if check.OK { @@ -998,19 +1000,64 @@ func TestCheckMCPServerReconcileSmoke(t *testing.T) { t.Fatalf("detail should include rollout output, got %q", check.Detail) } }) + + t.Run("skips impossible rollout wait for fallback pause image", func(t *testing.T) { + sawRollout := false + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + switch { + case argContains(spec.Args, "readyReplicas") && argContains(spec.Args, "containerPort"): + return &core.MockCommand{} + case contains(spec.Args, "apply"): + return &core.MockCommand{} + case contains(spec.Args, "rollout"): + sawRollout = true + return &core.MockCommand{OutputErr: errors.New("rollout should not be called")} + case contains(spec.Args, "get") && contains(spec.Args, "pods"): + return &core.MockCommand{OutputData: []byte("node-a")} + case contains(spec.Args, "get"): + return &core.MockCommand{OutputData: []byte("doctor-smoke")} + case contains(spec.Args, "delete"): + return &core.MockCommand{} + } + return &core.MockCommand{OutputErr: fmt.Errorf("unexpected command: %v", spec.Args)} + }, + } + kubectl := core.NewTestKubectlClient(mock) + + check := checkMCPServerReconcileSmoke(kubectl, "mcp-servers") + if !check.OK { + t.Fatalf("expected OK, got detail=%q remedy=%q", check.Detail, check.Remedy) + } + if sawRollout { + t.Fatal("fallback pause smoke should not wait for rollout readiness") + } + if !strings.Contains(check.Detail, "skipped readiness") { + t.Fatalf("detail should mention skipped readiness, got %q", check.Detail) + } + }) +} + +func argContains(args []string, value string) bool { + for _, arg := range args { + if strings.Contains(arg, value) { + return true + } + } + return false } func TestCheckNodeCapacity(t *testing.T) { t.Run("metrics-server healthy", func(t *testing.T) { - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { if contains(spec.Args, "top") { - return &MockCommand{OutputData: []byte("node-a 200m 10% 1Gi 20%\nnode-b 400m 20% 2Gi 40%\n")} + return &core.MockCommand{OutputData: []byte("node-a 200m 10% 1Gi 20%\nnode-b 400m 20% 2Gi 40%\n")} } - return &MockCommand{} + return &core.MockCommand{} }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) check := checkNodeCapacity(kubectl) if !check.OK { t.Fatalf("expected OK, got detail=%q", check.Detail) @@ -1021,15 +1068,15 @@ func TestCheckNodeCapacity(t *testing.T) { }) t.Run("flags hot node at CPU>=95%%", func(t *testing.T) { - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { if contains(spec.Args, "top") { - return &MockCommand{OutputData: []byte("node-a 3800m 96% 7Gi 80%\n")} + return &core.MockCommand{OutputData: []byte("node-a 3800m 96% 7Gi 80%\n")} } - return &MockCommand{} + return &core.MockCommand{} }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) check := checkNodeCapacity(kubectl) if check.OK { t.Fatal("expected failure for 96% CPU") @@ -1040,15 +1087,15 @@ func TestCheckNodeCapacity(t *testing.T) { }) t.Run("flags hot node at memory>=95%%", func(t *testing.T) { - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { if contains(spec.Args, "top") { - return &MockCommand{OutputData: []byte("node-a 100m 10% 8Gi 97%\n")} + return &core.MockCommand{OutputData: []byte("node-a 100m 10% 8Gi 97%\n")} } - return &MockCommand{} + return &core.MockCommand{} }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) check := checkNodeCapacity(kubectl) if check.OK { t.Fatal("expected failure for 97% memory") @@ -1056,18 +1103,18 @@ func TestCheckNodeCapacity(t *testing.T) { }) t.Run("falls back to allocatable when metrics-server missing", func(t *testing.T) { - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { switch { case contains(spec.Args, "top"): - return &MockCommand{OutputData: []byte("error: Metrics API not available"), OutputErr: errors.New("exit status 1")} + return &core.MockCommand{OutputData: []byte("error: Metrics API not available"), OutputErr: errors.New("exit status 1")} case contains(spec.Args, "nodes"): - return &MockCommand{OutputData: []byte("node-a 4 16Gi\nnode-b 4 16Gi\n")} + return &core.MockCommand{OutputData: []byte("node-a 4 16Gi\nnode-b 4 16Gi\n")} } - return &MockCommand{} + return &core.MockCommand{} }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) check := checkNodeCapacity(kubectl) if !check.OK { t.Fatalf("expected OK fallback, got detail=%q", check.Detail) @@ -1078,12 +1125,12 @@ func TestCheckNodeCapacity(t *testing.T) { }) t.Run("fails when both metrics and allocatable are unavailable", func(t *testing.T) { - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - return &MockCommand{OutputErr: errors.New("cluster unreachable")} + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + return &core.MockCommand{OutputErr: errors.New("cluster unreachable")} }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) check := checkNodeCapacity(kubectl) if check.OK { t.Fatal("expected failure when both paths fail") @@ -1093,12 +1140,12 @@ func TestCheckNodeCapacity(t *testing.T) { func TestCheckMCPServersImagePullSecrets(t *testing.T) { t.Run("ok when no imagePullSecrets configured", func(t *testing.T) { - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - return &MockCommand{OutputData: []byte("")} + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + return &core.MockCommand{OutputData: []byte("")} }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) check := checkMCPServersImagePullSecrets(kubectl, "mcp-servers") if !check.OK { t.Fatalf("expected OK when no secrets configured, got detail=%q", check.Detail) @@ -1106,19 +1153,19 @@ func TestCheckMCPServersImagePullSecrets(t *testing.T) { }) t.Run("ok when all referenced secrets exist", func(t *testing.T) { - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { switch { case contains(spec.Args, "serviceaccount"): - return &MockCommand{OutputData: []byte("reg-creds\ngcr-creds\n")} + return &core.MockCommand{OutputData: []byte("reg-creds\ngcr-creds\n")} case contains(spec.Args, "secret"): // both secret lookups succeed - return &MockCommand{OutputData: []byte("reg-creds")} + return &core.MockCommand{OutputData: []byte("reg-creds")} } - return &MockCommand{} + return &core.MockCommand{} }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) check := checkMCPServersImagePullSecrets(kubectl, "mcp-servers") if !check.OK { t.Fatalf("expected OK, got detail=%q", check.Detail) @@ -1126,20 +1173,20 @@ func TestCheckMCPServersImagePullSecrets(t *testing.T) { }) t.Run("fails when a referenced secret is missing", func(t *testing.T) { - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { switch { case contains(spec.Args, "serviceaccount"): - return &MockCommand{OutputData: []byte("reg-creds\nmissing-creds\n")} + return &core.MockCommand{OutputData: []byte("reg-creds\nmissing-creds\n")} case contains(spec.Args, "missing-creds"): - return &MockCommand{OutputErr: errors.New("secrets \"missing-creds\" not found")} + return &core.MockCommand{OutputErr: errors.New("secrets \"missing-creds\" not found")} case contains(spec.Args, "secret"): - return &MockCommand{OutputData: []byte("reg-creds")} + return &core.MockCommand{OutputData: []byte("reg-creds")} } - return &MockCommand{} + return &core.MockCommand{} }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) check := checkMCPServersImagePullSecrets(kubectl, "mcp-servers") if check.OK { t.Fatalf("expected failure when a pull secret is missing; detail=%q", check.Detail) @@ -1150,12 +1197,12 @@ func TestCheckMCPServersImagePullSecrets(t *testing.T) { }) t.Run("fails when serviceaccount lookup errors", func(t *testing.T) { - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - return &MockCommand{OutputErr: errors.New("serviceaccount default not found")} + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + return &core.MockCommand{OutputErr: errors.New("serviceaccount default not found")} }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) check := checkMCPServersImagePullSecrets(kubectl, "mcp-servers") if check.OK { t.Fatal("expected failure when serviceaccount lookup fails") diff --git a/internal/cli/cluster/ingress.go b/internal/cli/cluster/ingress.go new file mode 100644 index 0000000..6bf2d17 --- /dev/null +++ b/internal/cli/cluster/ingress.go @@ -0,0 +1,9 @@ +package cluster + +// IngressOptions captures ingress install settings used by both cluster +// configuration and the setup command. +type IngressOptions struct { + Mode string + Manifest string + Force bool +} diff --git a/internal/cli/cluster.go b/internal/cli/cluster/manager.go similarity index 51% rename from internal/cli/cluster.go rename to internal/cli/cluster/manager.go index 754325e..2563914 100644 --- a/internal/cli/cluster.go +++ b/internal/cli/cluster/manager.go @@ -1,7 +1,5 @@ -package cli - -// This file implements the "cluster" command for managing Kubernetes cluster operations. -// It handles cluster initialization, status checks, configuration, provisioning, and certificate management. +// Package cluster implements cluster operations for the cluster CLI command. +package cluster import ( "errors" @@ -11,25 +9,22 @@ import ( "strings" "go.uber.org/zap" + + "mcp-runtime/internal/cli/core" + "mcp-runtime/internal/cli/kubeerr" ) const defaultClusterName = "mcp-runtime" -type ingressOptions struct { - mode string - manifest string - force bool -} - // ClusterManager handles cluster operations with injected dependencies. type ClusterManager struct { - kubectl *KubectlClient - exec Executor + kubectl *core.KubectlClient + exec core.Executor logger *zap.Logger } // NewClusterManager creates a ClusterManager with the given dependencies. -func NewClusterManager(kubectl *KubectlClient, exec Executor, logger *zap.Logger) *ClusterManager { +func NewClusterManager(kubectl *core.KubectlClient, exec core.Executor, logger *zap.Logger) *ClusterManager { return &ClusterManager{ kubectl: kubectl, exec: exec, @@ -39,11 +34,11 @@ func NewClusterManager(kubectl *KubectlClient, exec Executor, logger *zap.Logger // DefaultClusterManager returns a ClusterManager using default clients. func DefaultClusterManager(logger *zap.Logger) *ClusterManager { - return NewClusterManager(kubectlClient, execExecutor, logger) + return NewClusterManager(core.DefaultKubectlClient(), core.DefaultExecutor(), logger) } // KubectlRunner exposes the shared kubectl runner for foldered command routing. -func (m *ClusterManager) KubectlRunner() KubectlRunner { +func (m *ClusterManager) KubectlRunner() core.KubectlRunner { return m.kubectl } @@ -64,36 +59,36 @@ func (m *ClusterManager) InitCluster(kubeconfig, context string) error { m.logger.Info("Installing CRD") // #nosec G204 -- fixed file path from repository. if err := m.kubectl.Run([]string{"apply", "--validate=false", "-f", "config/crd/bases/mcpruntime.org_mcpservers.yaml"}); err != nil { - wrappedErr := wrapWithSentinel(ErrInstallCRDFailed, err, fmt.Sprintf("failed to install CRD: %v", err)) - Error("Failed to install CRD") - logStructuredError(m.logger, wrappedErr, "Failed to install CRD") + wrappedErr := core.WrapWithSentinel(core.ErrInstallCRDFailed, err, fmt.Sprintf("failed to install CRD: %v", err)) + core.Error("Failed to install CRD") + core.LogStructuredError(m.logger, wrappedErr, "Failed to install CRD") return wrappedErr } // Create namespace m.logger.Info("Creating mcp-runtime namespace") - if err := m.EnsureNamespace(NamespaceMCPRuntime); err != nil { - wrappedErr := wrapWithSentinelAndContext( - ErrEnsureRuntimeNamespaceFailed, + if err := m.EnsureNamespace(core.NamespaceMCPRuntime); err != nil { + wrappedErr := core.WrapWithSentinelAndContext( + core.ErrEnsureRuntimeNamespaceFailed, err, fmt.Sprintf("failed to ensure mcp-runtime namespace: %v", err), - map[string]any{"namespace": NamespaceMCPRuntime, "component": "cluster"}, + map[string]any{"namespace": core.NamespaceMCPRuntime, "component": "cluster"}, ) - Error("Failed to ensure mcp-runtime namespace") - logStructuredError(m.logger, wrappedErr, "Failed to ensure mcp-runtime namespace") + core.Error("Failed to ensure mcp-runtime namespace") + core.LogStructuredError(m.logger, wrappedErr, "Failed to ensure mcp-runtime namespace") return wrappedErr } m.logger.Info("Creating mcp-servers namespace") - if err := m.EnsureNamespace(NamespaceMCPServers); err != nil { - wrappedErr := wrapWithSentinelAndContext( - ErrEnsureServersNamespaceFailed, + if err := m.EnsureNamespace(core.NamespaceMCPServers); err != nil { + wrappedErr := core.WrapWithSentinelAndContext( + core.ErrEnsureServersNamespaceFailed, err, fmt.Sprintf("failed to ensure mcp-servers namespace: %v", err), - map[string]any{"namespace": NamespaceMCPServers, "component": "cluster"}, + map[string]any{"namespace": core.NamespaceMCPServers, "component": "cluster"}, ) - Error("Failed to ensure mcp-servers namespace") - logStructuredError(m.logger, wrappedErr, "Failed to ensure mcp-servers namespace") + core.Error("Failed to ensure mcp-servers namespace") + core.LogStructuredError(m.logger, wrappedErr, "Failed to ensure mcp-servers namespace") return wrappedErr } @@ -107,8 +102,8 @@ func resolveKubeconfigPath(kubeconfig string) (string, error) { } home, err := os.UserHomeDir() if err != nil { - wrappedErr := wrapWithSentinel(ErrGetHomeDirectoryFailed, err, fmt.Sprintf("failed to get home directory: %v", err)) - Error("Failed to get home directory") + wrappedErr := core.WrapWithSentinel(core.ErrGetHomeDirectoryFailed, err, fmt.Sprintf("failed to get home directory: %v", err)) + core.Error("Failed to get home directory") // Note: No logger available in this helper function return "", wrappedErr } @@ -123,43 +118,43 @@ func (m *ClusterManager) ConfigureKubeconfig(kubeconfig, context string) error { } if _, err := os.Stat(path); err != nil { msg := fmt.Sprintf("kubeconfig %q not found or not readable: %v", path, err) - if hint, handled := clusterSetupHint(err.Error()); handled { + if hint, handled := kubeerr.SetupHint(err.Error()); handled { msg = hint } - wrappedErr := wrapWithSentinelAndContext( - ErrKubeconfigNotReadable, + wrappedErr := core.WrapWithSentinelAndContext( + core.ErrKubeconfigNotReadable, err, msg, map[string]any{"kubeconfig": path, "component": "cluster"}, ) - Error("Kubeconfig not readable") - logStructuredError(m.logger, wrappedErr, "Kubeconfig not readable") + core.Error("Kubeconfig not readable") + core.LogStructuredError(m.logger, wrappedErr, "Kubeconfig not readable") return wrappedErr } if err := os.Setenv("KUBECONFIG", path); err != nil { - wrappedErr := wrapWithSentinelAndContext( - ErrSetKubeconfigFailed, + wrappedErr := core.WrapWithSentinelAndContext( + core.ErrSetKubeconfigFailed, err, fmt.Sprintf("failed to set KUBECONFIG: %v", err), map[string]any{"kubeconfig": path, "component": "cluster"}, ) - Error("Failed to set KUBECONFIG") - logStructuredError(m.logger, wrappedErr, "Failed to set KUBECONFIG") + core.Error("Failed to set KUBECONFIG") + core.LogStructuredError(m.logger, wrappedErr, "Failed to set KUBECONFIG") return wrappedErr } if context != "" { // #nosec G204 -- context from CLI flag, kubectl validates context names. if err := m.kubectl.Run([]string{"config", "use-context", context}); err != nil { - wrappedErr := wrapWithSentinelAndContext( - ErrSetContextFailed, + wrappedErr := core.WrapWithSentinelAndContext( + core.ErrSetContextFailed, err, fmt.Sprintf("failed to set context: %v", err), map[string]any{"context": context, "component": "cluster"}, ) - Error("Failed to set context") - logStructuredError(m.logger, wrappedErr, "Failed to set context") + core.Error("Failed to set context") + core.LogStructuredError(m.logger, wrappedErr, "Failed to set context") return wrappedErr } } @@ -172,24 +167,24 @@ func (m *ClusterManager) ConfigureKubeconfigFromProvider(provider, region, clust case "eks": return configureEKSKubeconfig(m.exec, region, clusterName, kubeconfig) case "aks": - err := newWithSentinel(ErrAKSKubeconfigNotImplemented, "AKS kubeconfig not yet implemented; planned support (use `az aks get-credentials --name --resource-group `)") - Error("AKS kubeconfig not implemented") - logStructuredError(m.logger, err, "AKS kubeconfig not implemented") + err := core.NewWithSentinel(core.ErrAKSKubeconfigNotImplemented, "AKS kubeconfig not yet implemented; planned support (use `az aks get-credentials --name --resource-group `)") + core.Error("AKS kubeconfig not implemented") + core.LogStructuredError(m.logger, err, "AKS kubeconfig not implemented") return err case "gke": - err := newWithSentinel(ErrGKEKubeconfigNotImplemented, "GKE kubeconfig not yet implemented; planned support (use `gcloud container clusters get-credentials --region --project `)") - Error("GKE kubeconfig not implemented") - logStructuredError(m.logger, err, "GKE kubeconfig not implemented") + err := core.NewWithSentinel(core.ErrGKEKubeconfigNotImplemented, "GKE kubeconfig not yet implemented; planned support (use `gcloud container clusters get-credentials --region --project `)") + core.Error("GKE kubeconfig not implemented") + core.LogStructuredError(m.logger, err, "GKE kubeconfig not implemented") return err default: - err := newWithSentinel(ErrUnsupportedProvider, fmt.Sprintf("unsupported provider: %s", provider)) - Error("Unsupported provider") - logStructuredError(m.logger, err, "Unsupported provider") + err := core.NewWithSentinel(core.ErrUnsupportedProvider, fmt.Sprintf("unsupported provider: %s", provider)) + core.Error("Unsupported provider") + core.LogStructuredError(m.logger, err, "Unsupported provider") return err } } -func configureEKSKubeconfig(exec Executor, region, clusterName, kubeconfig string) error { +func configureEKSKubeconfig(exec core.Executor, region, clusterName, kubeconfig string) error { if clusterName == "" { clusterName = defaultClusterName } @@ -203,7 +198,7 @@ func configureEKSKubeconfig(exec Executor, region, clusterName, kubeconfig strin args = append(args, "--kubeconfig", kubeconfig) } // #nosec G204 -- command arguments are built from trusted inputs and fixed verbs. - cmd, err := exec.Command("aws", args, AllowlistBins("aws"), NoShellMeta(), NoControlChars()) + cmd, err := exec.Command("aws", args, core.AllowlistBins("aws"), core.NoShellMeta(), core.NoControlChars()) if err != nil { return err } @@ -220,51 +215,51 @@ func (m *ClusterManager) CheckClusterStatus() error { // #nosec G204 -- fixed kubectl command. output, err := m.kubectl.CombinedOutput([]string{"cluster-info"}) if err != nil { - wrappedErr := wrapWithSentinel(ErrClusterNotAccessible, err, fmt.Sprintf("cluster not accessible: %v", err)) - Error("Cluster not accessible") - logStructuredError(m.logger, wrappedErr, "Cluster not accessible") + wrappedErr := core.WrapWithSentinel(core.ErrClusterNotAccessible, err, fmt.Sprintf("cluster not accessible: %v", err)) + core.Error("Cluster not accessible") + core.LogStructuredError(m.logger, wrappedErr, "Cluster not accessible") return wrappedErr } - DefaultPrinter.Println(string(output)) + core.DefaultPrinter.Println(string(output)) // Check nodes - Section("Nodes") + core.Section("Nodes") // #nosec G204 -- fixed kubectl command. if err := m.kubectl.RunWithOutput([]string{"get", "nodes"}, os.Stdout, os.Stderr); err != nil { - Warn(fmt.Sprintf("Failed to get nodes: %v", err)) + core.Warn(fmt.Sprintf("Failed to get nodes: %v", err)) } // Check CRD - Section("MCP CRD") + core.Section("MCP CRD") // #nosec G204 -- fixed kubectl command. - if err := m.kubectl.RunWithOutput([]string{"get", "crd", MCPServerCRDName}, os.Stdout, os.Stderr); err != nil { - Warn(fmt.Sprintf("Failed to get MCP CRD: %v", err)) + if err := m.kubectl.RunWithOutput([]string{"get", "crd", core.MCPServerCRDName}, os.Stdout, os.Stderr); err != nil { + core.Warn(fmt.Sprintf("Failed to get MCP CRD: %v", err)) } // Check operator - Section("Operator") + core.Section("Operator") // #nosec G204 -- fixed kubectl command with hardcoded namespace. - if err := m.kubectl.RunWithOutput([]string{"get", "pods", "-n", NamespaceMCPRuntime}, os.Stdout, os.Stderr); err != nil { - Warn(fmt.Sprintf("Failed to get operator pods: %v", err)) + if err := m.kubectl.RunWithOutput([]string{"get", "pods", "-n", core.NamespaceMCPRuntime}, os.Stdout, os.Stderr); err != nil { + core.Warn(fmt.Sprintf("Failed to get operator pods: %v", err)) } return nil } // ConfigureCluster configures cluster settings like ingress. -func (m *ClusterManager) ConfigureCluster(ingress ingressOptions) error { - m.logger.Info("Configuring cluster", zap.String("ingress", ingress.mode)) +func (m *ClusterManager) ConfigureCluster(opts IngressOptions) error { + m.logger.Info("Configuring cluster", zap.String("ingress", opts.Mode)) - mode := strings.ToLower(ingress.mode) + mode := strings.ToLower(opts.Mode) switch mode { case "none": m.logger.Info("Skipping ingress controller install (ingress=none)") return nil case "traefik": default: - err := newWithSentinel(ErrUnsupportedIngressController, fmt.Sprintf("unsupported ingress controller: %s", ingress.mode)) - Error("Unsupported ingress controller") - logStructuredError(m.logger, err, "Unsupported ingress controller") + err := core.NewWithSentinel(core.ErrUnsupportedIngressController, fmt.Sprintf("unsupported ingress controller: %s", opts.Mode)) + core.Error("Unsupported ingress controller") + core.LogStructuredError(m.logger, err, "Unsupported ingress controller") return err } @@ -275,17 +270,17 @@ func (m *ClusterManager) ConfigureCluster(ingress ingressOptions) error { if err == nil && strings.TrimSpace(string(out)) != "" { hasIngress = true } - if hasIngress && !ingress.force { - m.logger.Info("Ingress controller already present; skipping install", zap.String("ingress", ingress.mode)) + if hasIngress && !opts.Force { + m.logger.Info("Ingress controller already present; skipping install", zap.String("ingress", opts.Mode)) return nil } - manifest := ingress.manifest + manifest := opts.Manifest if manifest == "" { manifest = "config/ingress/overlays/prod" } - m.logger.Info("Installing ingress controller", zap.String("ingress", ingress.mode), zap.String("manifest", manifest)) + m.logger.Info("Installing ingress controller", zap.String("ingress", opts.Mode), zap.String("manifest", manifest)) useKustomize := false manifestArg := manifest @@ -307,53 +302,55 @@ func (m *ClusterManager) ConfigureCluster(ingress ingressOptions) error { // #nosec G204 -- manifest path from internal config or CLI flag with file validation. if err := m.kubectl.RunWithOutput(args, os.Stdout, os.Stderr); err != nil { - wrappedErr := wrapWithSentinelAndContext( - ErrInstallIngressControllerFailed, + wrappedErr := core.WrapWithSentinelAndContext( + core.ErrInstallIngressControllerFailed, err, - fmt.Sprintf("failed to install ingress controller (%s): %v", ingress.mode, err), - map[string]any{"ingress_mode": ingress.mode, "manifest": manifest, "component": "cluster"}, + fmt.Sprintf("failed to install ingress controller (%s): %v", opts.Mode, err), + map[string]any{"ingress_mode": opts.Mode, "manifest": manifest, "component": "cluster"}, ) - Error("Failed to install ingress controller") - logStructuredError(m.logger, wrappedErr, "Failed to install ingress controller") + core.Error("Failed to install ingress controller") + core.LogStructuredError(m.logger, wrappedErr, "Failed to install ingress controller") return wrappedErr } - m.logger.Info("Ingress controller installed successfully", zap.String("ingress", ingress.mode)) + m.logger.Info("Ingress controller installed successfully", zap.String("ingress", opts.Mode)) m.logger.Info("Cluster configuration complete") return nil } // ConfigureClusterWithValues adapts exported flag values into the internal ingress options shape. func (m *ClusterManager) ConfigureClusterWithValues(mode, manifest string, force bool) error { - return m.ConfigureCluster(ingressOptions{ - mode: mode, - manifest: manifest, - force: force, + return m.ConfigureCluster(IngressOptions{ + Mode: mode, + Manifest: manifest, + Force: force, }) } -// ProvisionCluster provisions a new Kubernetes cluster. -func (m *ClusterManager) ProvisionCluster(provider, region string, nodeCount int, clusterName string) error { - m.logger.Info("Provisioning cluster", zap.String("provider", provider), zap.String("region", region), zap.String("name", clusterName)) +// ProvisionCluster provisions a new Kubernetes cluster. When dryRun is true, +// it prints the configuration and command that would run without creating +// any cluster or calling out to cloud APIs. +func (m *ClusterManager) ProvisionCluster(provider, region string, nodeCount int, clusterName string, dryRun bool) error { + m.logger.Info("Provisioning cluster", zap.String("provider", provider), zap.String("region", region), zap.String("name", clusterName), zap.Bool("dry_run", dryRun)) switch provider { case "kind": - return m.provisionKindCluster(nodeCount, clusterName) + return m.provisionKindCluster(nodeCount, clusterName, dryRun) case "gke": - return provisionGKECluster(m.logger, region, nodeCount, clusterName) + return provisionGKECluster(m.logger, region, nodeCount, clusterName, dryRun) case "eks": - return provisionEKSCluster(m.logger, m.exec, region, nodeCount, clusterName) + return provisionEKSCluster(m.logger, m.exec, region, nodeCount, clusterName, dryRun) case "aks": - return provisionAKSCluster(m.logger, region, nodeCount, clusterName) + return provisionAKSCluster(m.logger, region, nodeCount, clusterName, dryRun) default: - err := newWithSentinel(ErrUnsupportedProvider, fmt.Sprintf("unsupported provider: %s", provider)) - Error("Unsupported provider") - logStructuredError(m.logger, err, "Unsupported provider") + err := core.NewWithSentinel(core.ErrUnsupportedProvider, fmt.Sprintf("unsupported provider: %s", provider)) + core.Error("Unsupported provider") + core.LogStructuredError(m.logger, err, "Unsupported provider") return err } } -func (m *ClusterManager) provisionKindCluster(nodeCount int, name string) error { +func (m *ClusterManager) provisionKindCluster(nodeCount int, name string, dryRun bool) error { m.logger.Info("Provisioning Kind cluster") clusterName := name @@ -370,31 +367,39 @@ nodes: config += "- role: worker\n" } + if dryRun { + core.Info(fmt.Sprintf("[dry-run] would write kind config and run: kind create cluster --name %s --config ", clusterName)) + core.Info("[dry-run] kind config that would be written:") + fmt.Print(config) + core.Success("Dry-run complete; no cluster created") + return nil + } + // Write config to temp file tmp, err := os.CreateTemp("", "mcp-kind-config-*.yaml") if err != nil { - wrappedErr := wrapWithSentinel(ErrCreateKindConfigFailed, err, fmt.Sprintf("failed to create temp kind config: %v", err)) - Error("Failed to create kind config") - logStructuredError(m.logger, wrappedErr, "Failed to create kind config") + wrappedErr := core.WrapWithSentinel(core.ErrCreateKindConfigFailed, err, fmt.Sprintf("failed to create temp kind config: %v", err)) + core.Error("Failed to create kind config") + core.LogStructuredError(m.logger, wrappedErr, "Failed to create kind config") return wrappedErr } defer os.Remove(tmp.Name()) if _, err := tmp.WriteString(config); err != nil { if closeErr := tmp.Close(); closeErr != nil { - wrappedErr := wrapWithSentinel(ErrCloseKindConfigFailed, errors.Join(err, closeErr), fmt.Sprintf("failed to close kind config after write error: %v", closeErr)) - Error("Failed to close kind config") - logStructuredError(m.logger, wrappedErr, "Failed to close kind config") + wrappedErr := core.WrapWithSentinel(core.ErrCloseKindConfigFailed, errors.Join(err, closeErr), fmt.Sprintf("failed to close kind config after write error: %v", closeErr)) + core.Error("Failed to close kind config") + core.LogStructuredError(m.logger, wrappedErr, "Failed to close kind config") return wrappedErr } - wrappedErr := wrapWithSentinel(ErrWriteKindConfigFailed, err, fmt.Sprintf("failed to write kind config: %v", err)) - Error("Failed to write kind config") - logStructuredError(m.logger, wrappedErr, "Failed to write kind config") + wrappedErr := core.WrapWithSentinel(core.ErrWriteKindConfigFailed, err, fmt.Sprintf("failed to write kind config: %v", err)) + core.Error("Failed to write kind config") + core.LogStructuredError(m.logger, wrappedErr, "Failed to write kind config") return wrappedErr } if err := tmp.Close(); err != nil { - wrappedErr := wrapWithSentinel(ErrCloseKindConfigFailed, err, fmt.Sprintf("failed to close kind config: %v", err)) - Error("Failed to close kind config") - logStructuredError(m.logger, wrappedErr, "Failed to close kind config") + wrappedErr := core.WrapWithSentinel(core.ErrCloseKindConfigFailed, err, fmt.Sprintf("failed to close kind config: %v", err)) + core.Error("Failed to close kind config") + core.LogStructuredError(m.logger, wrappedErr, "Failed to close kind config") return wrappedErr } @@ -407,14 +412,14 @@ nodes: cmd.SetStderr(os.Stderr) if err := cmd.Run(); err != nil { - wrappedErr := wrapWithSentinelAndContext( - ErrCreateKindClusterFailed, + wrappedErr := core.WrapWithSentinelAndContext( + core.ErrCreateKindClusterFailed, err, fmt.Sprintf("failed to create kind cluster: %v", err), map[string]any{"cluster_name": clusterName, "node_count": nodeCount, "component": "cluster"}, ) - Error("Failed to create kind cluster") - logStructuredError(m.logger, wrappedErr, "Failed to create kind cluster") + core.Error("Failed to create kind cluster") + core.LogStructuredError(m.logger, wrappedErr, "Failed to create kind cluster") return wrappedErr } @@ -422,17 +427,22 @@ nodes: return nil } -func provisionGKECluster(logger *zap.Logger, region string, nodeCount int, clusterName string) error { +func provisionGKECluster(logger *zap.Logger, region string, nodeCount int, clusterName string, dryRun bool) error { if clusterName == "" { clusterName = defaultClusterName } - err := newWithSentinel(ErrGKEProvisioningNotImplemented, fmt.Sprintf("GKE provisioning not yet implemented; create the cluster with gcloud, e.g. `gcloud container clusters create %s --region %s --num-nodes %d`", clusterName, region, nodeCount)) - Error("GKE provisioning not implemented") - logStructuredError(logger, err, "GKE provisioning not implemented") + if dryRun { + core.Info(fmt.Sprintf("[dry-run] would run: gcloud container clusters create %s --region %s --num-nodes %d", clusterName, region, nodeCount)) + core.Success("Dry-run complete; no GKE call made") + return nil + } + err := core.NewWithSentinel(core.ErrGKEProvisioningNotImplemented, fmt.Sprintf("GKE provisioning not yet implemented; create the cluster with gcloud, e.g. `gcloud container clusters create %s --region %s --num-nodes %d`", clusterName, region, nodeCount)) + core.Error("GKE provisioning not implemented") + core.LogStructuredError(logger, err, "GKE provisioning not implemented") return err } -func provisionEKSCluster(logger *zap.Logger, exec Executor, region string, nodeCount int, clusterName string) error { +func provisionEKSCluster(logger *zap.Logger, exec core.Executor, region string, nodeCount int, clusterName string, dryRun bool) error { if clusterName == "" { clusterName = defaultClusterName } @@ -444,8 +454,13 @@ func provisionEKSCluster(logger *zap.Logger, exec Executor, region string, nodeC "--region", region, "--nodes", fmt.Sprintf("%d", nodeCount), } + if dryRun { + core.Info(fmt.Sprintf("[dry-run] would run: eksctl %s", strings.Join(args, " "))) + core.Success("Dry-run complete; no EKS call made") + return nil + } // #nosec G204 -- command arguments are built from trusted inputs and fixed verbs. - cmd, err := exec.Command("eksctl", args, AllowlistBins("eksctl"), NoShellMeta(), NoControlChars()) + cmd, err := exec.Command("eksctl", args, core.AllowlistBins("eksctl"), core.NoShellMeta(), core.NoControlChars()) if err != nil { return err } @@ -454,27 +469,32 @@ func provisionEKSCluster(logger *zap.Logger, exec Executor, region string, nodeC logger.Info("Provisioning EKS cluster with eksctl", zap.String("name", clusterName), zap.String("region", region), zap.Int("nodes", nodeCount)) if err := cmd.Run(); err != nil { - wrappedErr := wrapWithSentinelAndContext( - ErrProvisionEKSFailed, + wrappedErr := core.WrapWithSentinelAndContext( + core.ErrProvisionEKSFailed, err, fmt.Sprintf("failed to provision EKS cluster: %v", err), map[string]any{"cluster_name": clusterName, "region": region, "node_count": nodeCount, "component": "cluster"}, ) - Error("Failed to provision EKS cluster") - logStructuredError(logger, wrappedErr, "Failed to provision EKS cluster") + core.Error("Failed to provision EKS cluster") + core.LogStructuredError(logger, wrappedErr, "Failed to provision EKS cluster") return wrappedErr } logger.Info("EKS cluster provisioned successfully", zap.String("name", clusterName)) return nil } -func provisionAKSCluster(logger *zap.Logger, region string, nodeCount int, clusterName string) error { +func provisionAKSCluster(logger *zap.Logger, region string, nodeCount int, clusterName string, dryRun bool) error { if clusterName == "" { clusterName = defaultClusterName } - err := newWithSentinel(ErrAKSProvisioningNotImplemented, fmt.Sprintf("AKS provisioning not yet implemented; create the cluster with az, e.g. `az aks create --name %s --resource-group --location %s --node-count %d`", clusterName, region, nodeCount)) - Error("AKS provisioning not implemented") - logStructuredError(logger, err, "AKS provisioning not implemented") + if dryRun { + core.Info(fmt.Sprintf("[dry-run] would run: az aks create --name %s --resource-group --location %s --node-count %d", clusterName, region, nodeCount)) + core.Success("Dry-run complete; no AKS call made") + return nil + } + err := core.NewWithSentinel(core.ErrAKSProvisioningNotImplemented, fmt.Sprintf("AKS provisioning not yet implemented; create the cluster with az, e.g. `az aks create --name %s --resource-group --location %s --node-count %d`", clusterName, region, nodeCount)) + core.Error("AKS provisioning not implemented") + core.LogStructuredError(logger, err, "AKS provisioning not implemented") return err } @@ -495,10 +515,3 @@ metadata: cmd.SetStderr(os.Stderr) return cmd.Run() } - -// ensureNamespace is a package-level helper that uses the default kubectl client. -// Used by other modules that don't have a ClusterManager instance. -func ensureNamespace(name string) error { - mgr := DefaultClusterManager(zap.NewNop()) - return mgr.EnsureNamespace(name) -} diff --git a/internal/cli/client.go b/internal/cli/core/client.go similarity index 82% rename from internal/cli/client.go rename to internal/cli/core/client.go index a6c9cdf..978be62 100644 --- a/internal/cli/client.go +++ b/internal/cli/core/client.go @@ -1,4 +1,4 @@ -package cli +package core // This file implements KubectlClient, a wrapper around kubectl command execution. // It provides validation, security checks, and a clean interface for running kubectl commands. @@ -81,6 +81,16 @@ func DefaultKubectlRunner() KubectlRunner { return kubectlClient } +// DefaultKubectlClient returns the shared kubectl client used by CLI commands. +func DefaultKubectlClient() *KubectlClient { + return kubectlClient +} + +// DefaultExecutor returns the shared process executor used by CLI commands. +func DefaultExecutor() Executor { + return execExecutor +} + func mustNewKubectlClient() *KubectlClient { client, err := NewKubectlClient(execExecutor) if err != nil { @@ -88,3 +98,10 @@ func mustNewKubectlClient() *KubectlClient { } return client } + +// SwapDefaultKubectlClient replaces the shared kubectl client (tests only). +func SwapDefaultKubectlClient(c *KubectlClient) (restore func()) { + prev := kubectlClient + kubectlClient = c + return func() { kubectlClient = prev } +} diff --git a/internal/cli/config.go b/internal/cli/core/config.go similarity index 97% rename from internal/cli/config.go rename to internal/cli/core/config.go index 8a625ca..712e263 100644 --- a/internal/cli/config.go +++ b/internal/cli/core/config.go @@ -1,4 +1,4 @@ -package cli +package core // This file defines CLI configuration loading from environment variables. // CLIConfig holds all CLI settings including timeouts, registry settings, and server defaults. @@ -56,6 +56,9 @@ const ( defaultRegistryPort = 5000 defaultRegistryEndpoint = "registry.local" // used by build paths; same default as metadata.DefaultRegistryHost defaultRegistryIngressHost = "registry.local" + // Exported aliases for tests and subpackages (same values as above). + DefaultRegistryEndpoint = defaultRegistryEndpoint + DefaultRegistryIngressHost = defaultRegistryIngressHost defaultSkopeoImage = "quay.io/skopeo/stable:v1.14" defaultServerPort = 8088 ) diff --git a/internal/cli/setup_test.go b/internal/cli/core/config_env_test.go similarity index 81% rename from internal/cli/setup_test.go rename to internal/cli/core/config_env_test.go index 704cd5d..d8f1090 100644 --- a/internal/cli/setup_test.go +++ b/internal/cli/core/config_env_test.go @@ -1,7 +1,6 @@ -package cli +package core import ( - "errors" "os" "testing" "time" @@ -166,42 +165,3 @@ func assertCLIConfig(t *testing.T, cfg CLIConfig, want cliConfigExpectation) { t.Errorf("DefaultServerPort = %v, want %v", cfg.DefaultServerPort, want.defaultServerPort) } } - -func TestValidateTLSSetupCLIFlags(t *testing.T) { - t.Parallel() - cases := []struct { - name string - tls bool - acme, tlsCI string - staging bool - skipCM bool - wantErr bool - wantIsField bool - }{ - {"ok disabled", false, "", "", false, false, false, false}, - {"ok with-tls acme", true, "a@b.com", "", false, false, false, false}, - {"mutual exclusivity", false, "a@b.com", "issuer", false, false, true, true}, - {"acme without with-tls", false, "a@b.com", "", false, false, true, true}, - {"tls-cluster-issuer without with-tls", false, "", "issuer", false, false, true, true}, - {"staging without with-tls", false, "", "", true, false, true, true}, - {"skip-cm without with-tls", false, "", "", false, true, true, true}, - {"with-tls staging no email", true, "", "", true, true, false, false}, - } - for _, tc := range cases { - t.Run(tc.name, func(t *testing.T) { - err := validateTLSSetupCLIFlags(tc.tls, tc.acme, tc.tlsCI, tc.staging, tc.skipCM) - if tc.wantErr { - if err == nil { - t.Fatal("expected error") - } - if tc.wantIsField && !errors.Is(err, ErrFieldRequired) { - t.Fatalf("expected ErrFieldRequired, got %v", err) - } - } else { - if err != nil { - t.Fatalf("unexpected: %v", err) - } - } - }) - } -} diff --git a/internal/cli/config_test.go b/internal/cli/core/config_test.go similarity index 88% rename from internal/cli/config_test.go rename to internal/cli/core/config_test.go index 4badefd..8cd3c59 100644 --- a/internal/cli/config_test.go +++ b/internal/cli/core/config_test.go @@ -1,4 +1,4 @@ -package cli +package core import ( "testing" @@ -192,21 +192,3 @@ func TestConfigAccessors(t *testing.T) { t.Fatalf("GetDefaultServerPort mismatch") } } - -func TestApplySetupPlanToCLIConfig_TLSClusterIssuer(t *testing.T) { - orig := DefaultCLIConfig - t.Cleanup(func() { DefaultCLIConfig = orig }) - DefaultCLIConfig = &CLIConfig{RegistryClusterIssuerName: "unset"} - applySetupPlanToCLIConfig(SetupPlan{TLSEnabled: true, TLSClusterIssuer: "internal-ca", ACMEmail: ""}) - if GetRegistryClusterIssuerName() != "internal-ca" { - t.Fatalf("expected custom issuer, got %q", GetRegistryClusterIssuerName()) - } - applySetupPlanToCLIConfig(SetupPlan{TLSEnabled: true, TLSClusterIssuer: "ignored", ACMEmail: "ops@mcpruntime.com"}) - if want := ClusterIssuerNameForACME(false); GetRegistryClusterIssuerName() != want { - t.Fatalf("expected ACME issuer to take precedence, got %q", GetRegistryClusterIssuerName()) - } - applySetupPlanToCLIConfig(SetupPlan{TLSEnabled: false}) - if GetRegistryClusterIssuerName() != "" { - t.Fatalf("expected cleared when TLS off, got %q", GetRegistryClusterIssuerName()) - } -} diff --git a/internal/cli/constants.go b/internal/cli/core/constants.go similarity index 93% rename from internal/cli/constants.go rename to internal/cli/core/constants.go index 4d4f896..8d6ab4f 100644 --- a/internal/cli/constants.go +++ b/internal/cli/core/constants.go @@ -1,4 +1,4 @@ -package cli +package core // This file defines constants used across the CLI, including: // - Kubernetes namespace names @@ -14,6 +14,9 @@ const ( // NamespaceMCPServers is the default namespace for MCP server deployments. NamespaceMCPServers = "mcp-servers" + + // DefaultAnalyticsNamespace is the namespace for the bundled mcp-sentinel stack. + DefaultAnalyticsNamespace = "mcp-sentinel" ) // Deployment and resource names. diff --git a/internal/cli/core/doc.go b/internal/cli/core/doc.go new file mode 100644 index 0000000..cebeeaa --- /dev/null +++ b/internal/cli/core/doc.go @@ -0,0 +1,6 @@ +// Package cli contains shared CLI infrastructure used by command packages. +// +// Command-specific behavior belongs in internal/cli/; this package is +// limited to config, constants, errors, runtime composition, process execution, +// kubectl clients, terminal output, and test doubles. +package core diff --git a/internal/cli/errors.go b/internal/cli/core/errors.go similarity index 99% rename from internal/cli/errors.go rename to internal/cli/core/errors.go index dcddd1e..017f194 100644 --- a/internal/cli/errors.go +++ b/internal/cli/core/errors.go @@ -1,4 +1,4 @@ -package cli +package core // This file defines error handling utilities for the CLI, including: // - Sentinel errors for different error categories (CLI, Cluster, Registry, etc.) diff --git a/internal/cli/exec.go b/internal/cli/core/exec.go similarity index 82% rename from internal/cli/exec.go rename to internal/cli/core/exec.go index 452f4fa..a0f331f 100644 --- a/internal/cli/exec.go +++ b/internal/cli/core/exec.go @@ -1,4 +1,4 @@ -package cli +package core // This file defines interfaces and implementations for command execution. // It provides abstractions for running shell commands with validation and testability. @@ -14,6 +14,13 @@ import ( // execCommand is a test seam for stubbing command creation in tests. var execCommand = exec.Command +// SwapExecCommand replaces the exec.Command seam used by the default executor (tests only). +func SwapExecCommand(f func(string, ...string) *exec.Cmd) (restore func()) { + prev := execCommand + execCommand = f + return func() { execCommand = prev } +} + // Command represents a command that can be executed. type Command interface { Output() ([]byte, error) @@ -67,6 +74,18 @@ func execCommandWithValidators(name string, args []string, validators ...ExecVal return execExecutor.Command(name, args, validators...) } +// ExecCommandWithValidators runs the named binary with args after validators pass. +func ExecCommandWithValidators(name string, args []string, validators ...ExecValidator) (Command, error) { + return execCommandWithValidators(name, args, validators...) +} + +// SwapExecExecutor replaces the global process executor (tests only). +func SwapExecExecutor(e Executor) (restore func()) { + prev := execExecutor + execExecutor = e + return func() { execExecutor = prev } +} + func AllowlistBins(allowed ...string) ExecValidator { set := make(map[string]struct{}, len(allowed)) for _, name := range allowed { diff --git a/internal/cli/exec_test.go b/internal/cli/core/exec_test.go similarity index 99% rename from internal/cli/exec_test.go rename to internal/cli/core/exec_test.go index 9a3ac39..8797add 100644 --- a/internal/cli/exec_test.go +++ b/internal/cli/core/exec_test.go @@ -1,4 +1,4 @@ -package cli +package core import ( "os" diff --git a/internal/cli/kubectl_runner.go b/internal/cli/core/kubectl_runner.go similarity index 96% rename from internal/cli/kubectl_runner.go rename to internal/cli/core/kubectl_runner.go index cb9261c..98d8ee9 100644 --- a/internal/cli/kubectl_runner.go +++ b/internal/cli/core/kubectl_runner.go @@ -1,4 +1,4 @@ -package cli +package core // This file defines the KubectlRunner interface for kubectl operations. // This interface is used by setup helpers to abstract kubectl command execution. diff --git a/internal/cli/main_test.go b/internal/cli/core/main_test.go similarity index 98% rename from internal/cli/main_test.go rename to internal/cli/core/main_test.go index 7fa4cdd..d886650 100644 --- a/internal/cli/main_test.go +++ b/internal/cli/core/main_test.go @@ -1,4 +1,4 @@ -package cli +package core import ( "fmt" diff --git a/internal/cli/printer.go b/internal/cli/core/printer.go similarity index 99% rename from internal/cli/printer.go rename to internal/cli/core/printer.go index 5f7d7c0..522d4a1 100644 --- a/internal/cli/printer.go +++ b/internal/cli/core/printer.go @@ -1,4 +1,4 @@ -package cli +package core // This file defines the Printer type and its implementation for formatted terminal output. // All terminal formatting is centralized here to abstract the underlying library (pterm). diff --git a/internal/cli/printer_test.go b/internal/cli/core/printer_test.go similarity index 99% rename from internal/cli/printer_test.go rename to internal/cli/core/printer_test.go index 1582340..da37fd9 100644 --- a/internal/cli/printer_test.go +++ b/internal/cli/core/printer_test.go @@ -1,4 +1,4 @@ -package cli +package core import ( "bytes" diff --git a/internal/cli/core/runtime.go b/internal/cli/core/runtime.go new file mode 100644 index 0000000..569f7b1 --- /dev/null +++ b/internal/cli/core/runtime.go @@ -0,0 +1,54 @@ +package core + +import "go.uber.org/zap" + +// Runtime is the shared CLI facade for wiring common dependencies once and +// handing typed managers to the foldered command packages. +type Runtime struct { + logger *zap.Logger + config *CLIConfig + kubectl *KubectlClient + executor Executor + printer *Printer +} + +// NewRuntime builds the shared CLI runtime facade. +func NewRuntime(logger *zap.Logger) *Runtime { + return &Runtime{ + logger: logger, + config: DefaultCLIConfig, + kubectl: kubectlClient, + executor: execExecutor, + printer: DefaultPrinter, + } +} + +// Logger returns the shared logger. +func (r *Runtime) Logger() *zap.Logger { + return r.logger +} + +// Config returns the loaded CLI configuration. +func (r *Runtime) Config() *CLIConfig { + return r.config +} + +// KubectlRunner returns the shared kubectl runner. +func (r *Runtime) KubectlRunner() KubectlRunner { + return r.kubectl +} + +// KubectlClient returns the shared kubectl client. +func (r *Runtime) KubectlClient() *KubectlClient { + return r.kubectl +} + +// Executor returns the shared process executor. +func (r *Runtime) Executor() Executor { + return r.executor +} + +// Printer returns the shared terminal printer. +func (r *Runtime) Printer() *Printer { + return r.printer +} diff --git a/internal/cli/core/test_helpers_test.go b/internal/cli/core/test_helpers_test.go new file mode 100644 index 0000000..fc682c5 --- /dev/null +++ b/internal/cli/core/test_helpers_test.go @@ -0,0 +1,15 @@ +package core + +import ( + "bytes" + "testing" +) + +func setDefaultPrinterWriter(t *testing.T, w *bytes.Buffer) { + t.Helper() + orig := DefaultPrinter.Writer + DefaultPrinter.Writer = w + t.Cleanup(func() { + DefaultPrinter.Writer = orig + }) +} diff --git a/internal/cli/testing.go b/internal/cli/core/testing.go similarity index 82% rename from internal/cli/testing.go rename to internal/cli/core/testing.go index aca1da7..b6ac455 100644 --- a/internal/cli/testing.go +++ b/internal/cli/core/testing.go @@ -1,4 +1,4 @@ -package cli +package core // This file provides test doubles (mocks) for testing CLI functionality. // It includes MockCommand and MockExecutor for testing command execution. @@ -87,3 +87,14 @@ func (m *MockExecutor) HasCommand(name string) bool { } return false } + +// NewTestKubectlClient returns a KubectlClient for tests (no path validators). +func NewTestKubectlClient(exec Executor) *KubectlClient { + return &KubectlClient{exec: exec, validators: nil} +} + +// NewTestKubectlClientWithValidators returns a KubectlClient for tests using the +// given validator list (or nil for none). +func NewTestKubectlClientWithValidators(exec Executor, validators []ExecValidator) *KubectlClient { + return &KubectlClient{exec: exec, validators: validators} +} diff --git a/internal/cli/core/validation.go b/internal/cli/core/validation.go new file mode 100644 index 0000000..35c60e8 --- /dev/null +++ b/internal/cli/core/validation.go @@ -0,0 +1,41 @@ +package core + +import ( + "fmt" + "regexp" + "strings" +) + +// ValidK8sName matches Kubernetes resource name requirements (RFC 1123 subdomain). +var ValidK8sName = regexp.MustCompile(`^[a-z0-9]([-a-z0-9]*[a-z0-9])?$`) + +// ValidateManifestField rejects control characters, requires non-empty after +// trimming, and returns the trimmed value. +func ValidateManifestField(field, value string) (string, error) { + if strings.ContainsAny(value, "\r\n\t") { + return "", NewWithSentinel(ErrControlCharsNotAllowed, fmt.Sprintf("%s must not contain control characters", field)) + } + value = strings.TrimSpace(value) + if value == "" { + return "", NewWithSentinel(ErrFieldRequired, fmt.Sprintf("%s is required", field)) + } + return value, nil +} + +// ValidateK8sNameAndNamespace validates a name+namespace pair against RFC-1123 +// subdomain rules plus ValidateManifestField. nameLabel customizes the +// invalid-name error message ("server name", "resource name"); nameSentinel +// (may be nil) selects the sentinel error category. +func ValidateK8sNameAndNamespace(nameLabel string, nameSentinel error, name, namespace string) (string, string, error) { + if !ValidK8sName.MatchString(name) { + return "", "", NewWithSentinel(nameSentinel, fmt.Sprintf("invalid %s %q: must be lowercase alphanumeric with optional hyphens", nameLabel, name)) + } + var err error + if name, err = ValidateManifestField("name", name); err != nil { + return "", "", err + } + if namespace, err = ValidateManifestField("namespace", namespace); err != nil { + return "", "", err + } + return name, namespace, nil +} diff --git a/internal/cli/core/validation_test.go b/internal/cli/core/validation_test.go new file mode 100644 index 0000000..6448f22 --- /dev/null +++ b/internal/cli/core/validation_test.go @@ -0,0 +1,65 @@ +package core + +import ( + "errors" + "testing" +) + +func TestValidateManifestField(t *testing.T) { + t.Run("trims whitespace", func(t *testing.T) { + got, err := ValidateManifestField("field", " value ") + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if got != "value" { + t.Fatalf("expected trimmed value, got %q", got) + } + }) + + t.Run("rejects empty value", func(t *testing.T) { + _, err := ValidateManifestField("field", " ") + if err == nil || !errors.Is(err, ErrFieldRequired) { + t.Fatalf("expected ErrFieldRequired, got %v", err) + } + }) + + t.Run("rejects control characters", func(t *testing.T) { + _, err := ValidateManifestField("field", "bad\t") + if err == nil || !errors.Is(err, ErrControlCharsNotAllowed) { + t.Fatalf("expected ErrControlCharsNotAllowed, got %v", err) + } + }) +} + +func TestValidateK8sNameAndNamespace(t *testing.T) { + t.Run("returns sanitized values for valid input", func(t *testing.T) { + name, ns, err := ValidateK8sNameAndNamespace("server name", ErrInvalidServerName, "my-server", "test-ns") + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if name != "my-server" || ns != "test-ns" { + t.Fatalf("unexpected values: name=%q namespace=%q", name, ns) + } + }) + + t.Run("rejects invalid name with sentinel", func(t *testing.T) { + _, _, err := ValidateK8sNameAndNamespace("server name", ErrInvalidServerName, "BadName", "test-ns") + if err == nil || !errors.Is(err, ErrInvalidServerName) { + t.Fatalf("expected ErrInvalidServerName, got %v", err) + } + }) + + t.Run("rejects invalid namespace", func(t *testing.T) { + _, _, err := ValidateK8sNameAndNamespace("server name", ErrInvalidServerName, "my-server", "bad\tns") + if err == nil { + t.Fatal("expected error for invalid namespace") + } + }) + + t.Run("accepts nil sentinel", func(t *testing.T) { + _, _, err := ValidateK8sNameAndNamespace("resource name", nil, "BadName", "ns") + if err == nil { + t.Fatal("expected error for invalid name even with nil sentinel") + } + }) +} diff --git a/internal/cli/kube/file.go b/internal/cli/kube/file.go new file mode 100644 index 0000000..c8f2716 --- /dev/null +++ b/internal/cli/kube/file.go @@ -0,0 +1,48 @@ +package kube + +import ( + "fmt" + "io" + "os" + "path/filepath" +) + +// WriteOutputFile writes data to a path under a resolved parent directory with +// 0600 file permissions and 0750 (or tighter) directory permissions. +func WriteOutputFile(file string, data []byte) error { + absPath, err := filepath.Abs(file) + if err != nil { + return fmt.Errorf("resolve output path: %w", err) + } + dir := filepath.Dir(absPath) + if err := os.MkdirAll(dir, 0o750); err != nil { + return fmt.Errorf("create output directory: %w", err) + } + root, err := os.OpenRoot(dir) + if err != nil { + return fmt.Errorf("open output directory: %w", err) + } + defer root.Close() + + f, err := root.OpenFile(filepath.Base(absPath), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o600) + if err != nil { + return fmt.Errorf("write output file: %w", err) + } + + n, err := f.Write(data) + if err != nil { + _ = f.Close() + return fmt.Errorf("write output file: %w", err) + } + if n != len(data) { + _ = f.Close() + return fmt.Errorf("write output file: %w", io.ErrShortWrite) + } + if err := f.Close(); err != nil { + return fmt.Errorf("write output file: %w", err) + } + if err := os.Chmod(absPath, 0o600); err != nil { + return fmt.Errorf("write output file: %w", err) + } + return nil +} diff --git a/internal/cli/kube/file_test.go b/internal/cli/kube/file_test.go new file mode 100644 index 0000000..7b6cf7e --- /dev/null +++ b/internal/cli/kube/file_test.go @@ -0,0 +1,55 @@ +package kube + +import ( + "os" + "path/filepath" + "testing" +) + +func TestWriteOutputFileUsesRestrictedDirectoryPermissions(t *testing.T) { + target := filepath.Join(t.TempDir(), "nested", "exported", "server.yaml") + if err := WriteOutputFile(target, []byte("kind: Namespace\n")); err != nil { + t.Fatalf("WriteOutputFile() error = %v", err) + } + + data, err := os.ReadFile(target) + if err != nil { + t.Fatalf("ReadFile() error = %v", err) + } + if string(data) != "kind: Namespace\n" { + t.Fatalf("WriteOutputFile() content = %q", string(data)) + } + + info, err := os.Stat(filepath.Dir(target)) + if err != nil { + t.Fatalf("Stat() error = %v", err) + } + if perms := info.Mode().Perm(); perms&0o027 != 0 { + t.Fatalf("directory permissions = %o, want 0750 or less", perms) + } +} + +func TestWriteOutputFileTightensExistingFilePermissions(t *testing.T) { + target := filepath.Join(t.TempDir(), "exported", "server.yaml") + if err := os.MkdirAll(filepath.Dir(target), 0o750); err != nil { + t.Fatalf("MkdirAll() error = %v", err) + } + if err := os.WriteFile(target, []byte("kind: Secret\n"), 0o644); err != nil { + t.Fatalf("WriteFile() error = %v", err) + } + if err := os.Chmod(target, 0o644); err != nil { + t.Fatalf("Chmod() error = %v", err) + } + + if err := WriteOutputFile(target, []byte("kind: Namespace\n")); err != nil { + t.Fatalf("WriteOutputFile() error = %v", err) + } + + info, err := os.Stat(target) + if err != nil { + t.Fatalf("Stat() error = %v", err) + } + if perms := info.Mode().Perm(); perms != 0o600 { + t.Fatalf("file permissions = %o, want 0600", perms) + } +} diff --git a/internal/cli/kube/manifest.go b/internal/cli/kube/manifest.go new file mode 100644 index 0000000..8e317c2 --- /dev/null +++ b/internal/cli/kube/manifest.go @@ -0,0 +1,128 @@ +// Package kube contains shared kubectl-oriented helpers for CLI commands. +package kube + +import ( + "bytes" + "fmt" + "io" + "os" + "path/filepath" + "strings" +) + +// Command is the minimal command shape needed for stdin-based kubectl apply. +type Command interface { + SetStdin(io.Reader) + SetStdout(io.Writer) + SetStderr(io.Writer) + Run() error +} + +// ResolveRegularFilePath resolves a path and rejects directories. +func ResolveRegularFilePath(file string) (string, error) { + absPath, err := filepath.Abs(file) + if err != nil { + return "", fmt.Errorf("invalid file path: %w", err) + } + + info, err := os.Stat(absPath) + if err != nil { + return "", fmt.Errorf("cannot access file %q: %w", file, err) + } + if info.IsDir() { + return "", fmt.Errorf("path %q is a directory, not a file", file) + } + + return absPath, nil +} + +// ReadFileAtPath reads a regular file without following symlink escapes outside its parent directory. +func ReadFileAtPath(path string) ([]byte, error) { + absPath, err := filepath.Abs(path) + if err != nil { + return nil, fmt.Errorf("resolve file path: %w", err) + } + + root, err := os.OpenRoot(filepath.Dir(absPath)) + if err != nil { + return nil, err + } + defer root.Close() + + base := filepath.Base(absPath) + info, err := root.Stat(base) + if err != nil { + return nil, err + } + if !info.Mode().IsRegular() { + return nil, fmt.Errorf("read file %q: not a regular file", path) + } + + file, err := root.Open(base) + if err != nil { + return nil, err + } + defer file.Close() + + return io.ReadAll(file) +} + +// ApplyManifestFromFile applies a manifest file using kubectl. +func ApplyManifestFromFile[T Command](commandArgs func([]string) (T, error), file string, stdout, stderr io.Writer) error { + absPath, err := ResolveRegularFilePath(file) + if err != nil { + return err + } + + manifestBytes, err := ReadFileAtPath(absPath) + if err != nil { + return err + } + + applyCmd, err := commandArgs([]string{"apply", "-f", "-"}) + if err != nil { + return err + } + applyCmd.SetStdin(bytes.NewReader(manifestBytes)) + applyCmd.SetStdout(stdout) + applyCmd.SetStderr(stderr) + return applyCmd.Run() +} + +// ApplyManifestContent applies manifest YAML from a string via kubectl stdin. +func ApplyManifestContent[T Command](commandArgs func([]string) (T, error), manifest string) error { + return ApplyManifestContentWithNamespace(commandArgs, manifest, "") +} + +// ApplyManifestContentWithNamespace applies manifest YAML from stdin, optionally scoped to a namespace. +func ApplyManifestContentWithNamespace[T Command](commandArgs func([]string) (T, error), manifest, namespace string) error { + args := []string{"apply", "-f", "-"} + if strings.TrimSpace(namespace) != "" { + args = append(args, "-n", namespace) + } + applyCmd, err := commandArgs(args) + if err != nil { + return err + } + applyCmd.SetStdin(strings.NewReader(manifest)) + applyCmd.SetStdout(os.Stdout) + applyCmd.SetStderr(os.Stderr) + return applyCmd.Run() +} + +// EnsureNamespace applies/creates a namespace idempotently. +func EnsureNamespace[T Command](commandArgs func([]string) (T, error), name string) error { + nsYAML := fmt.Sprintf(`apiVersion: v1 +kind: Namespace +metadata: + name: %s +`, name) + cmd, err := commandArgs([]string{"apply", "--validate=false", "-f", "-"}) + if err != nil { + return err + } + cmd.SetStdin(strings.NewReader(nsYAML)) + cmd.SetStdout(os.Stdout) + cmd.SetStderr(os.Stderr) + return cmd.Run() +} diff --git a/internal/cli/kube/manifest_test.go b/internal/cli/kube/manifest_test.go new file mode 100644 index 0000000..b5399eb --- /dev/null +++ b/internal/cli/kube/manifest_test.go @@ -0,0 +1,129 @@ +package kube + +import ( + "io" + "os" + "path/filepath" + "runtime" + "strings" + "syscall" + "testing" +) + +type applyCommand struct { + stdin io.Reader + stdout io.Writer + stderr io.Writer + runErr error +} + +func (c *applyCommand) SetStdin(r io.Reader) { c.stdin = r } +func (c *applyCommand) SetStdout(w io.Writer) { c.stdout = w } +func (c *applyCommand) SetStderr(w io.Writer) { c.stderr = w } +func (c *applyCommand) Run() error { return c.runErr } + +type applyRunner struct { + args []string + cmd *applyCommand + err error +} + +func (r *applyRunner) CommandArgs(args []string) (Command, error) { + r.args = args + if r.err != nil { + return nil, r.err + } + r.cmd = &applyCommand{} + return r.cmd, nil +} + +func TestApplyManifestFromFile(t *testing.T) { + tmpFile, err := os.CreateTemp("", "manifest-*.yaml") + if err != nil { + t.Fatalf("CreateTemp() error = %v", err) + } + t.Cleanup(func() { _ = os.Remove(tmpFile.Name()) }) + if _, err := tmpFile.WriteString("apiVersion: v1\nkind: ConfigMap\n"); err != nil { + t.Fatalf("WriteString() error = %v", err) + } + if err := tmpFile.Close(); err != nil { + t.Fatalf("Close() error = %v", err) + } + + runner := &applyRunner{} + if err := ApplyManifestFromFile(runner.CommandArgs, tmpFile.Name(), io.Discard, io.Discard); err != nil { + t.Fatalf("ApplyManifestFromFile() error = %v", err) + } + if got, want := strings.Join(runner.args, " "), "apply -f -"; got != want { + t.Fatalf("kubectl args = %q, want %q", got, want) + } + captured, err := io.ReadAll(runner.cmd.stdin) + if err != nil { + t.Fatalf("ReadAll(stdin) error = %v", err) + } + if string(captured) != "apiVersion: v1\nkind: ConfigMap\n" { + t.Fatalf("stdin = %q", string(captured)) + } +} + +func TestReadFileAtPath(t *testing.T) { + t.Run("reads regular file", func(t *testing.T) { + path := filepath.Join(t.TempDir(), "manifest.yaml") + if err := os.WriteFile(path, []byte("kind: Namespace\n"), 0o600); err != nil { + t.Fatalf("WriteFile() error = %v", err) + } + + data, err := ReadFileAtPath(path) + if err != nil { + t.Fatalf("ReadFileAtPath() error = %v", err) + } + if string(data) != "kind: Namespace\n" { + t.Fatalf("ReadFileAtPath() = %q", string(data)) + } + }) + + t.Run("rejects symlink that escapes the opened root", func(t *testing.T) { + baseDir := t.TempDir() + manifestDir := filepath.Join(baseDir, "manifests") + if err := os.MkdirAll(manifestDir, 0o750); err != nil { + t.Fatalf("MkdirAll() error = %v", err) + } + + outsidePath := filepath.Join(baseDir, "outside.yaml") + if err := os.WriteFile(outsidePath, []byte("kind: Secret\n"), 0o600); err != nil { + t.Fatalf("WriteFile() error = %v", err) + } + + linkPath := filepath.Join(manifestDir, "linked.yaml") + relTarget, err := filepath.Rel(manifestDir, outsidePath) + if err != nil { + t.Fatalf("Rel() error = %v", err) + } + if err := os.Symlink(relTarget, linkPath); err != nil { + t.Skipf("Symlink() unavailable: %v", err) + } + + if _, err := ReadFileAtPath(linkPath); err == nil { + t.Fatal("ReadFileAtPath() error = nil, want symlink escape rejection") + } + }) + + t.Run("rejects non-regular files", func(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("named pipes are not exercised in this test on Windows") + } + + pipePath := filepath.Join(t.TempDir(), "manifest.pipe") + if err := syscall.Mkfifo(pipePath, 0o600); err != nil { + t.Skipf("Mkfifo() unavailable: %v", err) + } + + _, err := ReadFileAtPath(pipePath) + if err == nil { + t.Fatal("ReadFileAtPath() error = nil, want non-regular file rejection") + } + if !strings.Contains(err.Error(), "not a regular file") { + t.Fatalf("ReadFileAtPath() error = %v, want non-regular file rejection", err) + } + }) +} diff --git a/internal/cli/kube/patch.go b/internal/cli/kube/patch.go new file mode 100644 index 0000000..fdb5ac6 --- /dev/null +++ b/internal/cli/kube/patch.go @@ -0,0 +1,106 @@ +package kube + +import ( + "encoding/json" + "fmt" + "io" + "os" + "path/filepath" + + "gopkg.in/yaml.v3" +) + +func normalizePatchValue(value any) any { + switch typed := value.(type) { + case map[string]any: + normalized := make(map[string]any, len(typed)) + for key, child := range typed { + normalized[key] = normalizePatchValue(child) + } + return normalized + case map[any]any: + normalized := make(map[string]any, len(typed)) + for key, child := range typed { + normalized[fmt.Sprint(key)] = normalizePatchValue(child) + } + return normalized + case []any: + normalized := make([]any, len(typed)) + for i, child := range typed { + normalized[i] = normalizePatchValue(child) + } + return normalized + default: + return value + } +} + +// NormalizePatchDocument parses YAML or JSON patch content and returns a JSON +// string suitable for kubectl patch --type=json (or merge) style inputs. +func NormalizePatchDocument(raw string) (string, error) { + var value any + if err := yaml.Unmarshal([]byte(raw), &value); err != nil { + return "", fmt.Errorf("parse patch document: %w", err) + } + + data, err := json.Marshal(normalizePatchValue(value)) + if err != nil { + return "", fmt.Errorf("marshal patch document: %w", err) + } + + return string(data), nil +} + +func readRegularPatchFile(path string) ([]byte, error) { + absPath, err := filepath.Abs(path) + if err != nil { + return nil, fmt.Errorf("resolve file path: %w", err) + } + + root, err := os.OpenRoot(filepath.Dir(absPath)) + if err != nil { + return nil, err + } + defer root.Close() + + base := filepath.Base(absPath) + info, err := root.Stat(base) + if err != nil { + return nil, err + } + if !info.Mode().IsRegular() { + return nil, fmt.Errorf("read file %q: not a regular file", path) + } + + f, err := root.Open(base) + if err != nil { + return nil, err + } + defer f.Close() + + return io.ReadAll(f) +} + +// NormalizePatchFile reads a patch file from disk and returns normalized JSON +// like NormalizePatchDocument. +func NormalizePatchFile(file string) (string, error) { + absPath, err := filepath.Abs(file) + if err != nil { + return "", fmt.Errorf("resolve patch file path: %w", err) + } + + info, err := os.Stat(absPath) + if err != nil { + return "", fmt.Errorf("stat patch file %q: %w", file, err) + } + if info.IsDir() { + return "", fmt.Errorf("patch path %q is a directory", file) + } + + data, err := readRegularPatchFile(absPath) + if err != nil { + return "", fmt.Errorf("read patch file %q: %w", file, err) + } + + return NormalizePatchDocument(string(data)) +} diff --git a/internal/cli/kube/patch_test.go b/internal/cli/kube/patch_test.go new file mode 100644 index 0000000..46d34a4 --- /dev/null +++ b/internal/cli/kube/patch_test.go @@ -0,0 +1,14 @@ +package kube + +import "testing" + +func TestNormalizePatchDocumentYAMLMap(t *testing.T) { + raw := "foo: bar\nnested:\n x: 1\n" + out, err := NormalizePatchDocument(raw) + if err != nil { + t.Fatalf("NormalizePatchDocument: %v", err) + } + if out == "" || out[0] != '{' { + t.Fatalf("expected JSON object, got %q", out) + } +} diff --git a/internal/cli/kubeerr/kubeerr.go b/internal/cli/kubeerr/kubeerr.go new file mode 100644 index 0000000..48be213 --- /dev/null +++ b/internal/cli/kubeerr/kubeerr.go @@ -0,0 +1,39 @@ +package kubeerr + +import "strings" + +// CommandDetail extracts a single-line error detail from kubectl output or the exec error. +func CommandDetail(output string, fallback error) string { + lines := strings.Split(strings.TrimSpace(output), "\n") + for i := len(lines) - 1; i >= 0; i-- { + line := strings.TrimSpace(lines[i]) + if line != "" { + return line + } + } + if fallback != nil { + return fallback.Error() + } + return "Unknown error" +} + +// SetupHint returns a friendlier message when the cluster has not been provisioned yet. +func SetupHint(detail string) (string, bool) { + lower := strings.ToLower(detail) + + switch { + case strings.Contains(lower, "executable file not found"), + strings.Contains(lower, "kubectl: not found"): + return "kubectl is missing. Install kubectl and re-run the command.", true + case strings.Contains(lower, "kubeconfig"), + strings.Contains(lower, "no configuration has been provided"): + return "kubeconfig is missing or not readable. Either copy your cluster kubeconfig to ~/.kube/config, or re-run with `./bin/mcp-runtime setup --kubeconfig /etc/rancher/k3s/k3s.yaml` (for k3s) and optionally `--context `.", true + case strings.Contains(lower, "connection refused"), + strings.Contains(lower, "unable to connect to the server"), + strings.Contains(lower, "context deadline exceeded"), + strings.Contains(lower, "the connection to the server"): + return "no Kubernetes API reachable. Verify your kubeconfig/context (or pass `--kubeconfig`/`--context` to setup) and ensure the cluster control plane is reachable.", true + default: + return "", false + } +} diff --git a/internal/cli/kubeerr/kubeerr_test.go b/internal/cli/kubeerr/kubeerr_test.go new file mode 100644 index 0000000..537a66b --- /dev/null +++ b/internal/cli/kubeerr/kubeerr_test.go @@ -0,0 +1,33 @@ +package kubeerr + +import ( + "errors" + "testing" +) + +func TestCommandDetail(t *testing.T) { + if got := CommandDetail("first\n\nlast\n", errors.New("fallback")); got != "last" { + t.Fatalf("expected last output line, got %q", got) + } + if got := CommandDetail("", errors.New("fallback")); got != "fallback" { + t.Fatalf("expected fallback error, got %q", got) + } + if got := CommandDetail("", nil); got != "Unknown error" { + t.Fatalf("expected unknown error, got %q", got) + } +} + +func TestSetupHint(t *testing.T) { + if _, ok := SetupHint("kubectl: not found"); !ok { + t.Fatal("expected kubectl missing hint") + } + if _, ok := SetupHint("no configuration has been provided"); !ok { + t.Fatal("expected kubeconfig hint") + } + if _, ok := SetupHint("connection refused"); !ok { + t.Fatal("expected connectivity hint") + } + if _, ok := SetupHint("some other error"); ok { + t.Fatal("unexpected hint") + } +} diff --git a/internal/cli/pipeline/command.go b/internal/cli/pipeline/command.go new file mode 100644 index 0000000..a1d1d49 --- /dev/null +++ b/internal/cli/pipeline/command.go @@ -0,0 +1,42 @@ +// Package pipeline owns routing for the pipeline top-level command. +package pipeline + +import ( + "path/filepath" + + "github.com/spf13/cobra" + "go.uber.org/zap" + + "mcp-runtime/internal/cli/core" +) + +// filepathGlob is a test seam for filepath.Glob. +var filepathGlob = filepath.Glob + +type manager struct { + kubectl *core.KubectlClient + logger *zap.Logger +} + +func newManager(runtime *core.Runtime) *manager { + return &manager{ + kubectl: runtime.KubectlClient(), + logger: runtime.Logger(), + } +} + +// New returns the pipeline command. +func New(runtime *core.Runtime) *cobra.Command { + return NewWithManager(newManager(runtime)) +} + +// NewWithManager returns the pipeline command using the provided manager. +func NewWithManager(mgr *manager) *cobra.Command { + cmd := &cobra.Command{ + Use: "pipeline", + Short: "Pipeline integration commands", + Long: "Commands for CI/CD pipeline integration to generate and deploy CRDs", + } + cmd.AddCommand(newGenerateCmd(mgr), newDeployCmd(mgr)) + return cmd +} diff --git a/internal/cli/pipeline/deploy.go b/internal/cli/pipeline/deploy.go new file mode 100644 index 0000000..f8a2a3b --- /dev/null +++ b/internal/cli/pipeline/deploy.go @@ -0,0 +1,119 @@ +package pipeline + +import ( + "fmt" + "path/filepath" + + "github.com/spf13/cobra" + "go.uber.org/zap" + + "mcp-runtime/internal/cli/core" + "mcp-runtime/internal/cli/kube" + "mcp-runtime/internal/cli/platformapi" +) + +func newDeployCmd(mgr *manager) *cobra.Command { + var manifestsDir string + var namespace string + cmd := &cobra.Command{ + Use: "deploy", + Short: "Deploy CRD files to cluster", + Long: `Deploy generated CRD files to the Kubernetes cluster. +This applies all CRD manifests to the cluster, which triggers +the operator to create the necessary Kubernetes resources.`, + RunE: func(cmd *cobra.Command, args []string) error { + return mgr.DeployCRDs(manifestsDir, namespace) + }, + } + cmd.Flags().StringVar(&manifestsDir, "dir", "manifests", "Directory containing CRD files") + cmd.Flags().StringVar(&namespace, "namespace", "", "Namespace to deploy to (overrides metadata)") + return cmd +} + +func (m *manager) DeployCRDs(manifestsDir, namespace string) error { + if _, kerr := m.kubectl.CombinedOutput([]string{"version", "--request-timeout=5s"}); kerr != nil { + if platformapi.HasPlatformClient() { + return core.NewWithSentinel(core.ErrApplyManifestFailed, "pipeline deploy applies YAML with kubectl and needs a working kubeconfig. mcp-runtime auth is for the platform API only, not for applying manifests. Run deploy from a host with cluster access, or fix KUBECONFIG, then retry.") + } + } + m.logger.Info("Deploying CRD files", zap.String("dir", manifestsDir)) + + files, err := filepathGlob(filepath.Join(manifestsDir, "*.yaml")) + if err != nil { + wrappedErr := core.WrapWithSentinelAndContext( + core.ErrListManifestFilesFailed, + err, + fmt.Sprintf("failed to list manifest files: %v", err), + map[string]any{"manifest_dir": manifestsDir, "component": "pipeline"}, + ) + core.Error("Failed to list manifest files") + core.LogStructuredError(m.logger, wrappedErr, "Failed to list manifest files") + return wrappedErr + } + + ymlFiles, err := filepathGlob(filepath.Join(manifestsDir, "*.yml")) + if err != nil { + wrappedErr := core.WrapWithSentinelAndContext( + core.ErrListManifestFilesFailed, + err, + fmt.Sprintf("failed to list manifest files: %v", err), + map[string]any{"manifest_dir": manifestsDir, "component": "pipeline"}, + ) + core.Error("Failed to list manifest files") + core.LogStructuredError(m.logger, wrappedErr, "Failed to list manifest files") + return wrappedErr + } + + files = append(files, ymlFiles...) + if len(files) == 0 { + err := core.NewWithSentinel(core.ErrNoManifestFilesFound, fmt.Sprintf("no manifest files found in %s", manifestsDir)) + core.Error("No manifest files found") + core.LogStructuredError(m.logger, err, "No manifest files found") + return err + } + + for _, file := range files { + m.logger.Info("Applying manifest", zap.String("file", file)) + + absPath, err := kube.ResolveRegularFilePath(file) + if err != nil { + wrappedErr := core.WrapWithSentinelAndContext( + core.ErrApplyManifestFailed, + err, + fmt.Sprintf("failed to resolve %s: %v", file, err), + map[string]any{"file": file, "namespace": namespace, "component": "pipeline"}, + ) + core.Error("Failed to resolve manifest file") + core.LogStructuredError(m.logger, wrappedErr, "Failed to resolve manifest file") + return wrappedErr + } + + manifestBytes, err := kube.ReadFileAtPath(absPath) + if err != nil { + wrappedErr := core.WrapWithSentinelAndContext( + core.ErrApplyManifestFailed, + err, + fmt.Sprintf("failed to read %s: %v", absPath, err), + map[string]any{"file": file, "namespace": namespace, "component": "pipeline"}, + ) + core.Error("Failed to read manifest file") + core.LogStructuredError(m.logger, wrappedErr, "Failed to read manifest file") + return wrappedErr + } + + if err := kube.ApplyManifestContentWithNamespace(m.kubectl.CommandArgs, string(manifestBytes), namespace); err != nil { + wrappedErr := core.WrapWithSentinelAndContext( + core.ErrApplyManifestFailed, + err, + fmt.Sprintf("failed to apply %s: %v", file, err), + map[string]any{"file": file, "namespace": namespace, "component": "pipeline"}, + ) + core.Error("Failed to apply manifest") + core.LogStructuredError(m.logger, wrappedErr, "Failed to apply manifest") + return wrappedErr + } + } + + m.logger.Info("All CRD files deployed successfully") + return nil +} diff --git a/internal/cli/pipeline/generate.go b/internal/cli/pipeline/generate.go new file mode 100644 index 0000000..70c4316 --- /dev/null +++ b/internal/cli/pipeline/generate.go @@ -0,0 +1,86 @@ +package pipeline + +import ( + "fmt" + "path/filepath" + + "github.com/spf13/cobra" + "go.uber.org/zap" + + "mcp-runtime/internal/cli/core" + "mcp-runtime/pkg/metadata" +) + +func newGenerateCmd(mgr *manager) *cobra.Command { + var metadataFile string + var metadataDir string + var outputDir string + cmd := &cobra.Command{ + Use: "generate", + Short: "Generate CRD files from metadata", + Long: `Generate Kubernetes CRD files from metadata/registry files. +This command reads server definitions and creates CRD YAML files that +the operator will use to deploy MCP servers.`, + RunE: func(cmd *cobra.Command, args []string) error { + return mgr.GenerateCRDsFromMetadata(metadataFile, metadataDir, outputDir) + }, + } + cmd.Flags().StringVar(&metadataFile, "file", "", "Path to metadata file (YAML)") + cmd.Flags().StringVar(&metadataDir, "dir", ".mcp", "Directory containing metadata files") + cmd.Flags().StringVar(&outputDir, "output", "manifests", "Output directory for CRD files") + return cmd +} + +func (m *manager) GenerateCRDsFromMetadata(metadataFile, metadataDir, outputDir string) error { + var registry *metadata.RegistryFile + var err error + + if metadataFile != "" { + m.logger.Info("Loading metadata from file", zap.String("file", metadataFile)) + registry, err = metadata.LoadFromFile(metadataFile) + } else { + m.logger.Info("Loading metadata from directory", zap.String("dir", metadataDir)) + registry, err = metadata.LoadFromDirectory(metadataDir) + } + + if err != nil { + wrappedErr := core.WrapWithSentinel(core.ErrLoadMetadataFailed, err, fmt.Sprintf("failed to load metadata: %v", err)) + core.Error("Failed to load metadata") + core.LogStructuredError(m.logger, wrappedErr, "Failed to load metadata") + return wrappedErr + } + + if len(registry.Servers) == 0 { + err := core.ErrNoServersInMetadata + core.Error("No servers found in metadata") + core.LogStructuredError(m.logger, err, "No servers found in metadata") + return err + } + + if metadata.ResolveRegistryHost() == metadata.DefaultRegistryHost { + m.logger.Warn("Using default image host registry.local for generated MCPServer image refs. If cluster pulls fail, set MCP_REGISTRY_INGRESS_HOST to your registry (e.g. ClusterIP:port) and configure containerd/k3s for HTTP, or use public DNS and TLS.") + } + + m.logger.Info("Generating CRD files", zap.Int("count", len(registry.Servers)), zap.String("output", outputDir)) + + if err := metadata.GenerateCRDsFromRegistry(registry, outputDir); err != nil { + wrappedErr := core.WrapWithSentinelAndContext( + core.ErrGenerateCRDsFailed, + err, + fmt.Sprintf("failed to generate CRDs: %v", err), + map[string]any{"output_dir": outputDir, "server_count": len(registry.Servers), "component": "pipeline"}, + ) + core.Error("Failed to generate CRDs") + core.LogStructuredError(m.logger, wrappedErr, "Failed to generate CRDs") + return wrappedErr + } + + m.logger.Info("CRD files generated successfully", zap.String("output", outputDir)) + + files, _ := filepath.Glob(filepath.Join(outputDir, "*.yaml")) + for _, file := range files { + core.Success(fmt.Sprintf("Generated: %s", file)) + } + + return nil +} diff --git a/internal/cli/pipeline/pipeline.go b/internal/cli/pipeline/pipeline.go deleted file mode 100644 index e9f4584..0000000 --- a/internal/cli/pipeline/pipeline.go +++ /dev/null @@ -1,219 +0,0 @@ -// Package pipeline owns routing for the pipeline top-level command. -package pipeline - -import ( - "fmt" - "path/filepath" - - "github.com/spf13/cobra" - "go.uber.org/zap" - - "mcp-runtime/internal/cli" - "mcp-runtime/pkg/metadata" -) - -// filepathGlob is a test seam for filepath.Glob. -var filepathGlob = filepath.Glob - -type manager struct { - kubectl *cli.KubectlClient - logger *zap.Logger -} - -func newManager(runtime *cli.Runtime) *manager { - return &manager{ - kubectl: runtime.KubectlClient(), - logger: runtime.Logger(), - } -} - -// New returns the pipeline command. -func New(runtime *cli.Runtime) *cobra.Command { - return NewWithManager(newManager(runtime)) -} - -// NewWithManager returns the pipeline command using the provided manager. -func NewWithManager(mgr *manager) *cobra.Command { - cmd := &cobra.Command{ - Use: "pipeline", - Short: "Pipeline integration commands", - Long: "Commands for CI/CD pipeline integration to generate and deploy CRDs", - } - - var metadataFile string - var metadataDir string - var outputDir string - generateCmd := &cobra.Command{ - Use: "generate", - Short: "Generate CRD files from metadata", - Long: `Generate Kubernetes CRD files from metadata/registry files. -This command reads server definitions and creates CRD YAML files that -the operator will use to deploy MCP servers.`, - RunE: func(cmd *cobra.Command, args []string) error { - return mgr.GenerateCRDsFromMetadata(metadataFile, metadataDir, outputDir) - }, - } - generateCmd.Flags().StringVar(&metadataFile, "file", "", "Path to metadata file (YAML)") - generateCmd.Flags().StringVar(&metadataDir, "dir", ".mcp", "Directory containing metadata files") - generateCmd.Flags().StringVar(&outputDir, "output", "manifests", "Output directory for CRD files") - - var manifestsDir string - var namespace string - deployCmd := &cobra.Command{ - Use: "deploy", - Short: "Deploy CRD files to cluster", - Long: `Deploy generated CRD files to the Kubernetes cluster. -This applies all CRD manifests to the cluster, which triggers -the operator to create the necessary Kubernetes resources.`, - RunE: func(cmd *cobra.Command, args []string) error { - return mgr.DeployCRDs(manifestsDir, namespace) - }, - } - deployCmd.Flags().StringVar(&manifestsDir, "dir", "manifests", "Directory containing CRD files") - deployCmd.Flags().StringVar(&namespace, "namespace", "", "Namespace to deploy to (overrides metadata)") - - cmd.AddCommand(generateCmd, deployCmd) - return cmd -} - -func (m *manager) GenerateCRDsFromMetadata(metadataFile, metadataDir, outputDir string) error { - var registry *metadata.RegistryFile - var err error - - if metadataFile != "" { - m.logger.Info("Loading metadata from file", zap.String("file", metadataFile)) - registry, err = metadata.LoadFromFile(metadataFile) - } else { - m.logger.Info("Loading metadata from directory", zap.String("dir", metadataDir)) - registry, err = metadata.LoadFromDirectory(metadataDir) - } - - if err != nil { - wrappedErr := cli.WrapWithSentinel(cli.ErrLoadMetadataFailed, err, fmt.Sprintf("failed to load metadata: %v", err)) - cli.Error("Failed to load metadata") - cli.LogStructuredError(m.logger, wrappedErr, "Failed to load metadata") - return wrappedErr - } - - if len(registry.Servers) == 0 { - err := cli.ErrNoServersInMetadata - cli.Error("No servers found in metadata") - cli.LogStructuredError(m.logger, err, "No servers found in metadata") - return err - } - - if metadata.ResolveRegistryHost() == metadata.DefaultRegistryHost { - m.logger.Warn("Using default image host registry.local for generated MCPServer image refs. If cluster pulls fail, set MCP_REGISTRY_INGRESS_HOST to your registry (e.g. ClusterIP:port) and configure containerd/k3s for HTTP, or use public DNS and TLS.") - } - - m.logger.Info("Generating CRD files", zap.Int("count", len(registry.Servers)), zap.String("output", outputDir)) - - if err := metadata.GenerateCRDsFromRegistry(registry, outputDir); err != nil { - wrappedErr := cli.WrapWithSentinelAndContext( - cli.ErrGenerateCRDsFailed, - err, - fmt.Sprintf("failed to generate CRDs: %v", err), - map[string]any{"output_dir": outputDir, "server_count": len(registry.Servers), "component": "pipeline"}, - ) - cli.Error("Failed to generate CRDs") - cli.LogStructuredError(m.logger, wrappedErr, "Failed to generate CRDs") - return wrappedErr - } - - m.logger.Info("CRD files generated successfully", zap.String("output", outputDir)) - - files, _ := filepath.Glob(filepath.Join(outputDir, "*.yaml")) - for _, file := range files { - cli.Success(fmt.Sprintf("Generated: %s", file)) - } - - return nil -} - -func (m *manager) DeployCRDs(manifestsDir, namespace string) error { - if _, kerr := m.kubectl.CombinedOutput([]string{"version", "--request-timeout=5s"}); kerr != nil { - if cli.HasPlatformClient() { - return cli.NewWithSentinel(cli.ErrApplyManifestFailed, "pipeline deploy applies YAML with kubectl and needs a working kubeconfig. mcp-runtime auth is for the platform API only, not for applying manifests. Run deploy from a host with cluster access, or fix KUBECONFIG, then retry.") - } - } - m.logger.Info("Deploying CRD files", zap.String("dir", manifestsDir)) - - files, err := filepathGlob(filepath.Join(manifestsDir, "*.yaml")) - if err != nil { - wrappedErr := cli.WrapWithSentinelAndContext( - cli.ErrListManifestFilesFailed, - err, - fmt.Sprintf("failed to list manifest files: %v", err), - map[string]any{"manifest_dir": manifestsDir, "component": "pipeline"}, - ) - cli.Error("Failed to list manifest files") - cli.LogStructuredError(m.logger, wrappedErr, "Failed to list manifest files") - return wrappedErr - } - - ymlFiles, err := filepathGlob(filepath.Join(manifestsDir, "*.yml")) - if err != nil { - wrappedErr := cli.WrapWithSentinelAndContext( - cli.ErrListManifestFilesFailed, - err, - fmt.Sprintf("failed to list manifest files: %v", err), - map[string]any{"manifest_dir": manifestsDir, "component": "pipeline"}, - ) - cli.Error("Failed to list manifest files") - cli.LogStructuredError(m.logger, wrappedErr, "Failed to list manifest files") - return wrappedErr - } - - files = append(files, ymlFiles...) - if len(files) == 0 { - err := cli.NewWithSentinel(cli.ErrNoManifestFilesFound, fmt.Sprintf("no manifest files found in %s", manifestsDir)) - cli.Error("No manifest files found") - cli.LogStructuredError(m.logger, err, "No manifest files found") - return err - } - - for _, file := range files { - m.logger.Info("Applying manifest", zap.String("file", file)) - - absPath, err := cli.ResolveRegularFilePath(file) - if err != nil { - wrappedErr := cli.WrapWithSentinelAndContext( - cli.ErrApplyManifestFailed, - err, - fmt.Sprintf("failed to resolve %s: %v", file, err), - map[string]any{"file": file, "namespace": namespace, "component": "pipeline"}, - ) - cli.Error("Failed to resolve manifest file") - cli.LogStructuredError(m.logger, wrappedErr, "Failed to resolve manifest file") - return wrappedErr - } - - manifestBytes, err := cli.ReadFileAtPath(absPath) - if err != nil { - wrappedErr := cli.WrapWithSentinelAndContext( - cli.ErrApplyManifestFailed, - err, - fmt.Sprintf("failed to read %s: %v", absPath, err), - map[string]any{"file": file, "namespace": namespace, "component": "pipeline"}, - ) - cli.Error("Failed to read manifest file") - cli.LogStructuredError(m.logger, wrappedErr, "Failed to read manifest file") - return wrappedErr - } - - if err := cli.ApplyManifestContentWithNamespace(m.kubectl, string(manifestBytes), namespace); err != nil { - wrappedErr := cli.WrapWithSentinelAndContext( - cli.ErrApplyManifestFailed, - err, - fmt.Sprintf("failed to apply %s: %v", file, err), - map[string]any{"file": file, "namespace": namespace, "component": "pipeline"}, - ) - cli.Error("Failed to apply manifest") - cli.LogStructuredError(m.logger, wrappedErr, "Failed to apply manifest") - return wrappedErr - } - } - - m.logger.Info("All CRD files deployed successfully") - return nil -} diff --git a/internal/cli/pipeline/pipeline_test.go b/internal/cli/pipeline/pipeline_test.go index baa2b72..a7a3e0c 100644 --- a/internal/cli/pipeline/pipeline_test.go +++ b/internal/cli/pipeline/pipeline_test.go @@ -11,13 +11,13 @@ import ( "go.uber.org/zap" - "mcp-runtime/internal/cli" + "mcp-runtime/internal/cli/core" ) func TestManagerDeployCRDs(t *testing.T) { t.Run("returns error when no manifests found", func(t *testing.T) { - mock := &cli.MockExecutor{} - kubectl, err := cli.NewKubectlClient(mock) + mock := &core.MockExecutor{} + kubectl, err := core.NewKubectlClient(mock) if err != nil { t.Fatalf("failed to create kubectl client: %v", err) } @@ -31,9 +31,9 @@ func TestManagerDeployCRDs(t *testing.T) { t.Run("applies each manifest file", func(t *testing.T) { var appliedManifests []string - mock := &cli.MockExecutor{ - CommandFunc: func(spec cli.ExecSpec) *cli.MockCommand { - cmd := &cli.MockCommand{Args: spec.Args} + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + cmd := &core.MockCommand{Args: spec.Args} cmd.RunFunc = func() error { if cmd.StdinR != nil { data, err := io.ReadAll(cmd.StdinR) @@ -47,7 +47,7 @@ func TestManagerDeployCRDs(t *testing.T) { return cmd }, } - kubectl, err := cli.NewKubectlClient(mock) + kubectl, err := core.NewKubectlClient(mock) if err != nil { t.Fatalf("failed to create kubectl client: %v", err) } @@ -91,11 +91,11 @@ func TestManagerGenerateCRDsFromMetadata(t *testing.T) { t.Run("generates CRDs from file successfully", func(t *testing.T) { var buf bytes.Buffer - origWriter := cli.DefaultPrinter.Writer - cli.DefaultPrinter.Writer = &buf - t.Cleanup(func() { cli.DefaultPrinter.Writer = origWriter }) + origWriter := core.DefaultPrinter.Writer + core.DefaultPrinter.Writer = &buf + t.Cleanup(func() { core.DefaultPrinter.Writer = origWriter }) - mgr := &manager{kubectl: &cli.KubectlClient{}, logger: zap.NewNop()} + mgr := &manager{kubectl: &core.KubectlClient{}, logger: zap.NewNop()} tmpDir := t.TempDir() outputDir := filepath.Join(tmpDir, "output") metadataFile := filepath.Join(tmpDir, "servers.yaml") @@ -121,8 +121,8 @@ servers: func TestManagerDeployCRDsErrors(t *testing.T) { t.Run("apply error", func(t *testing.T) { - mock := &cli.MockExecutor{DefaultRunErr: errors.New("apply failed")} - kubectl, err := cli.NewKubectlClient(mock) + mock := &core.MockExecutor{DefaultRunErr: errors.New("apply failed")} + kubectl, err := core.NewKubectlClient(mock) if err != nil { t.Fatalf("failed to create kubectl client: %v", err) } @@ -145,8 +145,8 @@ func TestManagerDeployCRDsErrors(t *testing.T) { return nil, errors.New("glob error") } - mock := &cli.MockExecutor{} - kubectl, err := cli.NewKubectlClient(mock) + mock := &core.MockExecutor{} + kubectl, err := core.NewKubectlClient(mock) if err != nil { t.Fatalf("failed to create kubectl client: %v", err) } diff --git a/internal/cli/platform_ingress_test.go b/internal/cli/platform_ingress_test.go deleted file mode 100644 index 578cc74..0000000 --- a/internal/cli/platform_ingress_test.go +++ /dev/null @@ -1,102 +0,0 @@ -package cli - -import ( - "strings" - "testing" -) - -func TestRenderPlatformIngressManifestNoTLS(t *testing.T) { - got := renderPlatformIngressManifest("platform.example.com", "") - mustContain := []string{ - "name: " + platformIngressName, - "namespace: " + defaultAnalyticsNamespace, - "traefik.ingress.kubernetes.io/router.entrypoints: web", - `- host: "platform.example.com"`, - "- path: /api\n", - "- path: /grafana\n", - "- path: /prometheus\n", - "- path: /\n", - "name: mcp-sentinel-ui", - "number: 8082", - "name: grafana", - "name: prometheus", - } - for _, want := range mustContain { - if !strings.Contains(got, want) { - t.Fatalf("missing %q in manifest:\n%s", want, got) - } - } - if strings.Contains(got, "tls:") { - t.Fatalf("did not expect a TLS block when issuer is empty:\n%s", got) - } - if strings.Contains(got, "cert-manager.io/cluster-issuer") { - t.Fatalf("did not expect cert-manager annotation when issuer is empty:\n%s", got) - } -} - -func TestRenderPlatformIngressManifestApiBeforeGrafana(t *testing.T) { - got := renderPlatformIngressManifest("platform.example.com", "") - apiIdx := strings.Index(got, "- path: /api") - grafanaIdx := strings.Index(got, "- path: /grafana") - rootIdx := strings.Index(got, "- path: /\n") - if apiIdx < 0 || grafanaIdx < 0 || rootIdx < 0 { - t.Fatalf("missing one of /api, /grafana, / paths:\n%s", got) - } - // Traefik matches longer/more-specific prefixes before /, so /api must - // appear in the manifest and be a sibling of /grafana, /prometheus. - if apiIdx > grafanaIdx { - t.Fatalf("/api must be listed before /grafana in the rule for readability:\n%s", got) - } - if grafanaIdx > rootIdx { - t.Fatalf("/grafana must be listed before / catch-all:\n%s", got) - } -} - -func TestRenderPlatformIngressManifestWithTLS(t *testing.T) { - got := renderPlatformIngressManifest("platform.mcpruntime.org", "letsencrypt-prod") - mustContain := []string{ - "traefik.ingress.kubernetes.io/router.entrypoints: websecure", - "cert-manager.io/cluster-issuer: letsencrypt-prod", - "tls:", - `- "platform.mcpruntime.org"`, - "secretName: " + platformTLSSecretName, - `- host: "platform.mcpruntime.org"`, - } - for _, want := range mustContain { - if !strings.Contains(got, want) { - t.Fatalf("missing %q in manifest:\n%s", want, got) - } - } - if strings.Contains(got, "\n traefik.ingress.kubernetes.io/router.entrypoints: web\n") { - t.Fatalf("did not expect plain web entrypoint when TLS issuer is set:\n%s", got) - } -} - -// TestACMETLSDNSNamesExcludesPlatformHost asserts that the registry-cert SANs -// do NOT include the platform host. The platform Ingress in mcp-sentinel uses -// cert-manager's ingress-shim to mint its own cert; including the platform -// host in the registry-cert would cause a redundant ACME order on every -// renewal (and the secret in the registry namespace cannot be referenced from -// a different namespace by Kubernetes Ingress anyway). -func TestACMETLSDNSNamesExcludesPlatformHost(t *testing.T) { - prev := DefaultCLIConfig - t.Cleanup(func() { DefaultCLIConfig = prev }) - DefaultCLIConfig = &CLIConfig{ - RegistryIngressHost: "registry.example.com", - McpIngressHost: "mcp.example.com", - PlatformIngressHost: "platform.example.com", - } - names := acmeTLSDNSNames() - want := map[string]bool{ - "registry.example.com": true, - "mcp.example.com": true, - } - if len(names) != len(want) { - t.Fatalf("expected %d hostnames, got %d (%v)", len(want), len(names), names) - } - for _, n := range names { - if !want[n] { - t.Fatalf("unexpected hostname %q in registry SANs (platform host should be excluded)", n) - } - } -} diff --git a/internal/cli/platform_url.go b/internal/cli/platformapi/baseurl.go similarity index 53% rename from internal/cli/platform_url.go rename to internal/cli/platformapi/baseurl.go index 748508c..e651e0b 100644 --- a/internal/cli/platform_url.go +++ b/internal/cli/platformapi/baseurl.go @@ -1,10 +1,10 @@ -package cli +package platformapi import "strings" -// NormalizePlatformAPIBaseURL trims whitespace, trailing slashes, and an -// optional trailing /api suffix from a platform base URL. -func NormalizePlatformAPIBaseURL(raw string) string { +// NormalizeBaseURL trims whitespace, trailing slashes, and an optional trailing +// /api suffix from a platform base URL. +func NormalizeBaseURL(raw string) string { s := strings.TrimSpace(raw) s = strings.TrimRight(s, "/") if strings.HasSuffix(strings.ToLower(s), "/api") { diff --git a/internal/cli/platformapi/baseurl_test.go b/internal/cli/platformapi/baseurl_test.go new file mode 100644 index 0000000..90e3bee --- /dev/null +++ b/internal/cli/platformapi/baseurl_test.go @@ -0,0 +1,19 @@ +package platformapi + +import "testing" + +func TestNormalizeBaseURL(t *testing.T) { + cases := []struct { + in, want string + }{ + {"https://example.com/", "https://example.com"}, + {"https://example.com/api", "https://example.com"}, + {"https://example.com/api/", "https://example.com"}, + {" https://x ", "https://x"}, + } + for _, tc := range cases { + if got := NormalizeBaseURL(tc.in); got != tc.want { + t.Fatalf("NormalizeBaseURL(%q) = %q, want %q", tc.in, got, tc.want) + } + } +} diff --git a/internal/cli/platform_client.go b/internal/cli/platformapi/client.go similarity index 83% rename from internal/cli/platform_client.go rename to internal/cli/platformapi/client.go index 443e2e4..de97094 100644 --- a/internal/cli/platform_client.go +++ b/internal/cli/platformapi/client.go @@ -1,7 +1,7 @@ // HTTP client for the Sentinel platform API using auth from authfile. // User-facing (non-kubeconfig) path for access, server list, and policy. -package cli +package platformapi import ( "bytes" @@ -12,6 +12,8 @@ import ( "io" "net/http" "net/url" + "os" + "path/filepath" "strings" "time" @@ -23,21 +25,22 @@ import ( ) const maxAPIBodyRead = 4 << 20 +const defaultMCPServersNamespace = "mcp-servers" // errPlatformNoBaseURL is returned when a token exists but the API base URL is missing. var errPlatformNoBaseURL = errors.New("set MCP_PLATFORM_API_URL or run mcp-runtime auth login with --api-url to use the platform API") -// platformClient calls the mcp-sentinel API with an API key. -type platformClient struct { +// PlatformClient calls the mcp-sentinel API with an API key. +type PlatformClient struct { baseURL string token string http *http.Client apiPrefix string } -// newPlatformClient returns a client when platform credentials and API base URL are configured. +// NewPlatformClient returns a client when platform credentials and API base URL are configured. // If the user is not logged in, returns [authfile.ErrNotFound] so the caller can fall back to kubectl. -func newPlatformClient() (*platformClient, error) { +func NewPlatformClient() (*PlatformClient, error) { tok, base, _, err := authfile.ResolveToken() if err != nil { return nil, err @@ -48,8 +51,8 @@ func newPlatformClient() (*platformClient, error) { } return nil, authfile.ErrNotFound } - return &platformClient{ - baseURL: NormalizePlatformAPIBaseURL(base), + return &PlatformClient{ + baseURL: NormalizeBaseURL(base), token: tok, http: &http.Client{Timeout: 2 * time.Minute}, apiPrefix: "/api", @@ -57,11 +60,11 @@ func newPlatformClient() (*platformClient, error) { } func HasPlatformClient() bool { - _, err := newPlatformClient() + _, err := NewPlatformClient() return err == nil } -func (c *platformClient) do(ctx context.Context, method, relPath, query string, body io.Reader) (*http.Response, error) { +func (c *PlatformClient) do(ctx context.Context, method, relPath, query string, body io.Reader) (*http.Response, error) { u, err := url.Parse(c.baseURL) if err != nil { return nil, err @@ -132,7 +135,7 @@ type sessionAPIBody struct { PolicyVersion string `json:"policyVersion"` } -func (c *platformClient) listGrants(ctx context.Context, namespace string) ([]sentinelaccess.GrantSummary, error) { +func (c *PlatformClient) ListGrants(ctx context.Context, namespace string) ([]sentinelaccess.GrantSummary, error) { resp, err := c.do(ctx, http.MethodGet, "/runtime/grants", listQuery(namespace), nil) if err != nil { return nil, err @@ -152,7 +155,7 @@ func (c *platformClient) listGrants(ctx context.Context, namespace string) ([]se return out.Grants, nil } -func (c *platformClient) listSessions(ctx context.Context, namespace string) ([]sentinelaccess.SessionSummary, error) { +func (c *PlatformClient) ListSessions(ctx context.Context, namespace string) ([]sentinelaccess.SessionSummary, error) { resp, err := c.do(ctx, http.MethodGet, "/runtime/sessions", listQuery(namespace), nil) if err != nil { return nil, err @@ -172,7 +175,7 @@ func (c *platformClient) listSessions(ctx context.Context, namespace string) ([] return out.Sessions, nil } -func (c *platformClient) getGrant(ctx context.Context, namespace, name string) (sentinelaccess.GrantSummary, error) { +func (c *PlatformClient) GetGrant(ctx context.Context, namespace, name string) (sentinelaccess.GrantSummary, error) { p := fmt.Sprintf("/runtime/grants/%s/%s", url.PathEscape(namespace), url.PathEscape(name)) resp, err := c.do(ctx, http.MethodGet, p, "", nil) if err != nil { @@ -193,7 +196,7 @@ func (c *platformClient) getGrant(ctx context.Context, namespace, name string) ( return out.Grant, nil } -func (c *platformClient) getSession(ctx context.Context, namespace, name string) (sentinelaccess.SessionSummary, error) { +func (c *PlatformClient) GetSession(ctx context.Context, namespace, name string) (sentinelaccess.SessionSummary, error) { p := fmt.Sprintf("/runtime/sessions/%s/%s", url.PathEscape(namespace), url.PathEscape(name)) resp, err := c.do(ctx, http.MethodGet, p, "", nil) if err != nil { @@ -214,7 +217,7 @@ func (c *platformClient) getSession(ctx context.Context, namespace, name string) return out.Session, nil } -func (c *platformClient) postGrant(ctx context.Context, body grantAPIBody) error { +func (c *PlatformClient) postGrant(ctx context.Context, body grantAPIBody) error { js, err := json.Marshal(body) if err != nil { return err @@ -231,7 +234,7 @@ func (c *platformClient) postGrant(ctx context.Context, body grantAPIBody) error return nil } -func (c *platformClient) postSession(ctx context.Context, body sessionAPIBody) error { +func (c *PlatformClient) postSession(ctx context.Context, body sessionAPIBody) error { js, err := json.Marshal(body) if err != nil { return err @@ -248,7 +251,7 @@ func (c *platformClient) postSession(ctx context.Context, body sessionAPIBody) e return nil } -func (c *platformClient) deleteGrant(ctx context.Context, namespace, name string) error { +func (c *PlatformClient) DeleteGrant(ctx context.Context, namespace, name string) error { p := fmt.Sprintf("/runtime/grants/%s/%s", url.PathEscape(namespace), url.PathEscape(name)) resp, err := c.do(ctx, http.MethodDelete, p, "", nil) if err != nil { @@ -262,7 +265,7 @@ func (c *platformClient) deleteGrant(ctx context.Context, namespace, name string return nil } -func (c *platformClient) deleteSession(ctx context.Context, namespace, name string) error { +func (c *PlatformClient) DeleteSession(ctx context.Context, namespace, name string) error { p := fmt.Sprintf("/runtime/sessions/%s/%s", url.PathEscape(namespace), url.PathEscape(name)) resp, err := c.do(ctx, http.MethodDelete, p, "", nil) if err != nil { @@ -276,7 +279,7 @@ func (c *platformClient) deleteSession(ctx context.Context, namespace, name stri return nil } -func (c *platformClient) postGrantToggle(ctx context.Context, namespace, name, action string) error { +func (c *PlatformClient) PostGrantToggle(ctx context.Context, namespace, name, action string) error { p := fmt.Sprintf("/runtime/grants/%s/%s/%s", url.PathEscape(namespace), url.PathEscape(name), action) resp, err := c.do(ctx, http.MethodPost, p, "", nil) if err != nil { @@ -290,7 +293,7 @@ func (c *platformClient) postGrantToggle(ctx context.Context, namespace, name, a return nil } -func (c *platformClient) postSessionToggle(ctx context.Context, namespace, name, action string) error { +func (c *PlatformClient) PostSessionToggle(ctx context.Context, namespace, name, action string) error { p := fmt.Sprintf("/runtime/sessions/%s/%s/%s", url.PathEscape(namespace), url.PathEscape(name), action) resp, err := c.do(ctx, http.MethodPost, p, "", nil) if err != nil { @@ -304,7 +307,7 @@ func (c *platformClient) postSessionToggle(ctx context.Context, namespace, name, return nil } -func (c *platformClient) applyAccessFromYAMLFile(ctx context.Context, path string) error { +func (c *PlatformClient) ApplyAccessFromYAMLFile(ctx context.Context, path string) error { b, err := readFileAtPath(path) if err != nil { return err @@ -351,11 +354,11 @@ func (c *platformClient) applyAccessFromYAMLFile(ctx context.Context, path strin return fmt.Errorf("apply %s document %d session: %w", path, docIndex, err) } default: - return newWithSentinel(ErrFieldRequired, fmt.Sprintf("manifest document %d kind %q is not supported for platform apply (use MCPAccessGrant or MCPAgentSession)", docIndex, meta.Kind)) + return fmt.Errorf("manifest document %d kind %q is not supported for platform apply (use MCPAccessGrant or MCPAgentSession)", docIndex, meta.Kind) } } if docIndex == 0 { - return newWithSentinel(ErrFieldRequired, "manifest does not contain MCPAccessGrant or MCPAgentSession") + return errors.New("manifest does not contain MCPAccessGrant or MCPAgentSession") } return nil } @@ -425,7 +428,8 @@ func httpAPIError(status int, body []byte) error { // --- runtime / servers (GET) ------------------------------------------------ -type serverListItem struct { +// ServerListItem is one row from the platform API runtime servers list. +type ServerListItem struct { Name string `json:"name"` Namespace string `json:"namespace"` Ready string `json:"ready"` @@ -435,15 +439,15 @@ type serverListItem struct { } type serverListResponse struct { - Servers []serverListItem `json:"servers"` + Servers []ServerListItem `json:"servers"` } -func (c *platformClient) listRuntimeServers(ctx context.Context, namespace string) ([]serverListItem, error) { +func (c *PlatformClient) ListRuntimeServers(ctx context.Context, namespace string) ([]ServerListItem, error) { v := url.Values{} if strings.TrimSpace(namespace) != "" { v.Set("namespace", namespace) } else { - v.Set("namespace", NamespaceMCPServers) + v.Set("namespace", defaultMCPServersNamespace) } resp, err := c.do(ctx, http.MethodGet, "/runtime/servers", v.Encode(), nil) if err != nil { @@ -464,7 +468,37 @@ func (c *platformClient) listRuntimeServers(ctx context.Context, namespace strin return out.Servers, nil } -func (c *platformClient) getRuntimePolicy(ctx context.Context, namespace, server string) ([]byte, error) { +func readFileAtPath(path string) ([]byte, error) { + absPath, err := filepath.Abs(path) + if err != nil { + return nil, fmt.Errorf("resolve file path: %w", err) + } + + root, err := os.OpenRoot(filepath.Dir(absPath)) + if err != nil { + return nil, err + } + defer root.Close() + + base := filepath.Base(absPath) + info, err := root.Stat(base) + if err != nil { + return nil, err + } + if !info.Mode().IsRegular() { + return nil, fmt.Errorf("read file %q: not a regular file", path) + } + + file, err := root.Open(base) + if err != nil { + return nil, err + } + defer file.Close() + + return io.ReadAll(file) +} + +func (c *PlatformClient) GetRuntimePolicy(ctx context.Context, namespace, server string) ([]byte, error) { v := url.Values{} v.Set("namespace", namespace) v.Set("server", server) @@ -483,11 +517,12 @@ func (c *platformClient) getRuntimePolicy(ctx context.Context, namespace, server return b, nil } -func (m *AccessManager) platformOrKube() (plat *platformClient, useKubectl bool, err error) { - if m.useKube { +// ResolvePlatformOrKube returns a platform API client when useKube is false and auth resolves; otherwise useKubectl is true. +func ResolvePlatformOrKube(useKube bool) (*PlatformClient, bool, error) { + if useKube { return nil, true, nil } - cl, e := newPlatformClient() + cl, e := NewPlatformClient() if e == nil { return cl, false, nil } @@ -496,29 +531,3 @@ func (m *AccessManager) platformOrKube() (plat *platformClient, useKubectl bool, } return nil, false, e } - -func (m *ServerManager) platformOrKube() (plat *platformClient, useKubectl bool, err error) { - if m.useKube { - return nil, true, nil - } - cl, e := newPlatformClient() - if e == nil { - return cl, false, nil - } - if errors.Is(e, authfile.ErrNotFound) { - return nil, true, nil - } - return nil, false, e -} - -// requireKubectlForMutation returns an error when only platform API credentials are active (no kube path). -func (m *ServerManager) requireKubectlForMutation() error { - _, useK, err := m.platformOrKube() - if err != nil { - return err - } - if !useK { - return newWithSentinel(nil, "this command requires kubectl and a cluster kubeconfig, or set --use-kube when you use kubectl alongside platform auth. Use mcp-runtime auth for API-backed list, status, and policy when kubeconfig is not used.") - } - return nil -} diff --git a/internal/cli/platform_client_test.go b/internal/cli/platformapi/client_test.go similarity index 91% rename from internal/cli/platform_client_test.go rename to internal/cli/platformapi/client_test.go index 4f0b804..2c8f724 100644 --- a/internal/cli/platform_client_test.go +++ b/internal/cli/platformapi/client_test.go @@ -1,4 +1,4 @@ -package cli +package platformapi import ( "context" @@ -65,14 +65,14 @@ spec: t.Fatal(err) } - client := &platformClient{ + client := &PlatformClient{ baseURL: "https://platform.example.com", token: "token-1", http: httpClient, apiPrefix: "/api", } - if err := client.applyAccessFromYAMLFile(context.Background(), manifest); err != nil { - t.Fatalf("applyAccessFromYAMLFile() error = %v", err) + if err := client.ApplyAccessFromYAMLFile(context.Background(), manifest); err != nil { + t.Fatalf("ApplyAccessFromYAMLFile() error = %v", err) } if grantCalls != 1 || sessionCalls != 1 { t.Fatalf("calls = grant:%d session:%d, want 1/1", grantCalls, sessionCalls) diff --git a/internal/cli/platformstatus/kubectl.go b/internal/cli/platformstatus/kubectl.go new file mode 100644 index 0000000..c436969 --- /dev/null +++ b/internal/cli/platformstatus/kubectl.go @@ -0,0 +1,31 @@ +package platformstatus + +import ( + "fmt" + "strings" + + "mcp-runtime/internal/cli/core" + "mcp-runtime/internal/cli/kubeerr" +) + +func runKubectlCombinedOutput(args []string) (string, error) { + cmd, err := core.DefaultKubectlClient().CommandArgs(args) + if err != nil { + return "", err + } + output, execErr := cmd.CombinedOutput() + return strings.TrimSpace(string(output)), execErr +} + +// CheckClusterStatusQuiet probes cluster connectivity without printing status. +func CheckClusterStatusQuiet() error { + output, err := runKubectlCombinedOutput([]string{"cluster-info"}) + if err == nil { + return nil + } + detail := kubeerr.CommandDetail(output, err) + if hint, handled := kubeerr.SetupHint(detail); handled { + return core.WrapWithSentinel(core.ErrClusterNotAccessible, err, hint) + } + return core.WrapWithSentinel(core.ErrClusterNotAccessible, err, fmt.Sprintf("cluster not accessible: %s", detail)) +} diff --git a/internal/cli/platformstatus/workloads.go b/internal/cli/platformstatus/workloads.go new file mode 100644 index 0000000..7fec6e7 --- /dev/null +++ b/internal/cli/platformstatus/workloads.go @@ -0,0 +1,124 @@ +package platformstatus + +import ( + "fmt" + "strconv" + "strings" + + "mcp-runtime/internal/cli/core" + "mcp-runtime/internal/cli/kubeerr" +) + +// PlatformWorkload identifies a namespaced workload for status tables. +type PlatformWorkload struct { + Component string + Namespace string + Kind string + Name string +} + +// DefaultPlatformStatusWorkloads lists bundled analytics stack workloads for status output. +var DefaultPlatformStatusWorkloads = []PlatformWorkload{ + {Component: "ClickHouse", Namespace: core.DefaultAnalyticsNamespace, Kind: "statefulset", Name: "clickhouse"}, + {Component: "Zookeeper", Namespace: core.DefaultAnalyticsNamespace, Kind: "deployment", Name: "zookeeper"}, + {Component: "Kafka", Namespace: core.DefaultAnalyticsNamespace, Kind: "statefulset", Name: "kafka"}, + {Component: "Ingest", Namespace: core.DefaultAnalyticsNamespace, Kind: "deployment", Name: "mcp-sentinel-ingest"}, + {Component: "Processor", Namespace: core.DefaultAnalyticsNamespace, Kind: "deployment", Name: "mcp-sentinel-processor"}, + {Component: "API", Namespace: core.DefaultAnalyticsNamespace, Kind: "deployment", Name: "mcp-sentinel-api"}, + {Component: "UI", Namespace: core.DefaultAnalyticsNamespace, Kind: "deployment", Name: "mcp-sentinel-ui"}, + {Component: "Gateway", Namespace: core.DefaultAnalyticsNamespace, Kind: "deployment", Name: "mcp-sentinel-gateway"}, + {Component: "Prometheus", Namespace: core.DefaultAnalyticsNamespace, Kind: "deployment", Name: "prometheus"}, + {Component: "Grafana", Namespace: core.DefaultAnalyticsNamespace, Kind: "deployment", Name: "grafana"}, + {Component: "OTel Collector", Namespace: core.DefaultAnalyticsNamespace, Kind: "deployment", Name: "otel-collector"}, + {Component: "Tempo", Namespace: core.DefaultAnalyticsNamespace, Kind: "statefulset", Name: "tempo"}, + {Component: "Loki", Namespace: core.DefaultAnalyticsNamespace, Kind: "statefulset", Name: "loki"}, + {Component: "Promtail", Namespace: core.DefaultAnalyticsNamespace, Kind: "daemonset", Name: "promtail"}, +} + +// AnalyticsNamespaceInstalled reports whether the analytics namespace exists. +func AnalyticsNamespaceInstalled(clusterReachable bool) (bool, error) { + if !clusterReachable { + return false, nil + } + + output, err := runKubectlCombinedOutput([]string{"get", "namespace", core.DefaultAnalyticsNamespace, "-o", "jsonpath={.metadata.name}"}) + if err == nil { + return strings.TrimSpace(output) == core.DefaultAnalyticsNamespace, nil + } + if strings.TrimSpace(output) == "" { + return false, fmt.Errorf("empty output from namespace probe") + } + + lower := strings.ToLower(output) + if strings.Contains(lower, "not found") || strings.Contains(lower, "notfound") { + return false, nil + } + + return false, fmt.Errorf("%s", kubeerr.CommandDetail(output, err)) +} + +// AnalyticsStackRow builds a table row for the analytics namespace aggregate status. +func AnalyticsStackRow(status, details string) []string { + ns := core.DefaultAnalyticsNamespace + return []string{"Analytics Stack", ns, "namespace/" + ns, status, details} +} + +// WorkloadStatusRow renders one workload row for platform status tables. +func WorkloadStatusRow(workload PlatformWorkload, clusterReachable bool) []string { + resource := fmt.Sprintf("%s/%s", workload.Kind, workload.Name) + if !clusterReachable { + return []string{workload.Component, workload.Namespace, resource, core.Red("ERROR"), "Cluster unavailable"} + } + + st, details := workloadReadinessStatus(workload) + return []string{workload.Component, workload.Namespace, resource, st, details} +} + +func workloadReadinessStatus(workload PlatformWorkload) (string, string) { + jsonPath, err := workloadReadyJSONPath(workload.Kind) + if err != nil { + return core.Red("ERROR"), err.Error() + } + + output, cmdErr := runKubectlCombinedOutput([]string{ + "get", workload.Kind, workload.Name, + "-n", workload.Namespace, + "-o", "jsonpath=" + jsonPath, + }) + if cmdErr != nil { + return core.Red("ERROR"), kubeerr.CommandDetail(output, cmdErr) + } + + if workloadReady(output) { + return core.Green("OK"), "Ready: " + output + } + return core.Yellow("PENDING"), "Ready: " + output +} + +func workloadReadyJSONPath(kind string) (string, error) { + switch strings.ToLower(kind) { + case "deployment", "statefulset": + return "{.status.readyReplicas}/{.spec.replicas}", nil + case "daemonset": + return "{.status.numberReady}/{.status.desiredNumberScheduled}", nil + default: + return "", fmt.Errorf("unsupported workload kind %q", kind) + } +} + +func workloadReady(value string) bool { + parts := strings.Split(strings.TrimSpace(value), "/") + if len(parts) != 2 { + return false + } + + ready, err := strconv.Atoi(strings.TrimSpace(parts[0])) + if err != nil { + return false + } + desired, err := strconv.Atoi(strings.TrimSpace(parts[1])) + if err != nil { + return false + } + return desired > 0 && ready >= desired +} diff --git a/internal/cli/registry/config/config.go b/internal/cli/registry/config/config.go new file mode 100644 index 0000000..07bba2b --- /dev/null +++ b/internal/cli/registry/config/config.go @@ -0,0 +1,141 @@ +package config + +import ( + "errors" + "fmt" + "os" + "path/filepath" + + "gopkg.in/yaml.v3" +) + +var ( + ErrURLRequired = errors.New("registry url is required") + ErrURLMissingInConfig = errors.New("registry url missing in config") +) + +type ExternalRegistryConfig struct { + URL string `yaml:"url"` + Username string `yaml:"username,omitempty"` + Password string `yaml:"password,omitempty"` +} + +type Env struct { + URL string + Username string + Password string +} + +func Path() (string, error) { + home, err := os.UserHomeDir() + if err != nil { + return "", err + } + return filepath.Join(home, ".mcp-runtime", "registry.yaml"), nil +} + +func Save(cfg *ExternalRegistryConfig) error { + if cfg == nil || cfg.URL == "" { + return ErrURLRequired + } + path, err := Path() + if err != nil { + return err + } + if err := os.MkdirAll(filepath.Dir(path), 0o750); err != nil { + return err + } + data, err := Marshal(cfg) + if err != nil { + return err + } + return os.WriteFile(path, data, 0o600) +} + +func Marshal(cfg *ExternalRegistryConfig) ([]byte, error) { + data := map[string]string{ + "url": cfg.URL, + } + if cfg.Username != "" { + data["username"] = cfg.Username + } + if cfg.Password != "" { + data["password"] = cfg.Password + } + return yaml.Marshal(data) +} + +func Load() (*ExternalRegistryConfig, error) { + path, err := Path() + if err != nil { + return nil, err + } + // #nosec G304 -- path is scoped to the user's config directory. + data, err := os.ReadFile(path) + if err != nil { + if os.IsNotExist(err) { + return nil, nil + } + return nil, fmt.Errorf("read registry config: %w", err) + } + var cfg ExternalRegistryConfig + if err := yaml.Unmarshal(data, &cfg); err != nil { + return nil, fmt.Errorf("unmarshal registry config: %w", err) + } + if cfg.URL == "" { + return nil, ErrURLMissingInConfig + } + return &cfg, nil +} + +// Resolve returns external registry config using precedence: flags > env > config file. +func Resolve(flagCfg *ExternalRegistryConfig, env Env) (*ExternalRegistryConfig, error) { + var cfg ExternalRegistryConfig + sourceFound := false + + if fileCfg, err := Load(); err == nil && fileCfg != nil { + cfg = *fileCfg + if cfg.URL != "" { + sourceFound = true + } + } else if err != nil { + return nil, err + } + + if env.URL != "" { + cfg.URL = env.URL + sourceFound = true + } + if env.Username != "" { + cfg.Username = env.Username + sourceFound = true + } + if env.Password != "" { + cfg.Password = env.Password + sourceFound = true + } + + if flagCfg != nil { + if flagCfg.URL != "" { + cfg.URL = flagCfg.URL + sourceFound = true + } + if flagCfg.Username != "" { + cfg.Username = flagCfg.Username + sourceFound = true + } + if flagCfg.Password != "" { + cfg.Password = flagCfg.Password + sourceFound = true + } + } + + if cfg.URL == "" { + if sourceFound { + return nil, ErrURLRequired + } + return nil, nil + } + + return &cfg, nil +} diff --git a/internal/cli/registry/config/config_test.go b/internal/cli/registry/config/config_test.go new file mode 100644 index 0000000..45e4833 --- /dev/null +++ b/internal/cli/registry/config/config_test.go @@ -0,0 +1,161 @@ +package config + +import ( + "errors" + "os" + "path/filepath" + "strings" + "testing" +) + +func TestPath(t *testing.T) { + home := t.TempDir() + t.Setenv("HOME", home) + + path, err := Path() + if err != nil { + t.Fatalf("Path returned error: %v", err) + } + expectedSuffix := filepath.Join(".mcp-runtime", "registry.yaml") + if !strings.HasSuffix(path, expectedSuffix) { + t.Fatalf("expected path to end with %q, got %q", expectedSuffix, path) + } + if !strings.HasPrefix(path, home) { + t.Fatalf("expected path to start with home %q, got %q", home, path) + } +} + +func TestSaveAndLoad(t *testing.T) { + home := t.TempDir() + t.Setenv("HOME", home) + + cfg := &ExternalRegistryConfig{ + URL: "registry.example.com", + Username: "user", + Password: "pass", + } + if err := Save(cfg); err != nil { + t.Fatalf("Save returned error: %v", err) + } + + loaded, err := Load() + if err != nil { + t.Fatalf("Load returned error: %v", err) + } + if loaded == nil { + t.Fatal("expected config to be loaded") + } + if loaded.URL != cfg.URL || loaded.Username != cfg.Username || loaded.Password != cfg.Password { + t.Fatalf("loaded config mismatch: %#v", loaded) + } +} + +func TestLoadMissing(t *testing.T) { + home := t.TempDir() + t.Setenv("HOME", home) + + cfg, err := Load() + if err != nil { + t.Fatalf("Load returned error: %v", err) + } + if cfg != nil { + t.Fatalf("expected nil config when file missing, got %#v", cfg) + } +} + +func TestLoadInvalid(t *testing.T) { + home := t.TempDir() + t.Setenv("HOME", home) + + path, err := Path() + if err != nil { + t.Fatalf("Path returned error: %v", err) + } + if err := os.MkdirAll(filepath.Dir(path), 0o750); err != nil { + t.Fatal(err) + } + if err := os.WriteFile(path, []byte(":::invalid\n"), 0o600); err != nil { + t.Fatal(err) + } + + if _, err := Load(); err == nil { + t.Fatal("expected error for invalid yaml") + } +} + +func TestResolvePrecedence(t *testing.T) { + home := t.TempDir() + t.Setenv("HOME", home) + + if err := Save(&ExternalRegistryConfig{URL: "file.example.com"}); err != nil { + t.Fatalf("Save returned error: %v", err) + } + + t.Run("uses file config when no overrides", func(t *testing.T) { + cfg, err := Resolve(nil, Env{}) + if err != nil { + t.Fatalf("Resolve returned error: %v", err) + } + if cfg.URL != "file.example.com" { + t.Fatalf("expected file URL, got %#v", cfg) + } + }) + + t.Run("env overrides file", func(t *testing.T) { + cfg, err := Resolve(nil, Env{URL: "env.example.com", Username: "envuser"}) + if err != nil { + t.Fatalf("Resolve returned error: %v", err) + } + if cfg.URL != "env.example.com" || cfg.Username != "envuser" { + t.Fatalf("expected env overrides, got %#v", cfg) + } + }) + + t.Run("flags override env and file", func(t *testing.T) { + cfg, err := Resolve(&ExternalRegistryConfig{URL: "flag.example.com", Password: "flagpass"}, Env{ + URL: "env.example.com", + Username: "envuser", + Password: "envpass", + }) + if err != nil { + t.Fatalf("Resolve returned error: %v", err) + } + if cfg.URL != "flag.example.com" || cfg.Username != "envuser" || cfg.Password != "flagpass" { + t.Fatalf("expected flag/env precedence, got %#v", cfg) + } + }) +} + +func TestSaveErrors(t *testing.T) { + if err := Save(nil); !errors.Is(err, ErrURLRequired) { + t.Fatalf("expected ErrURLRequired for nil config, got %v", err) + } + if err := Save(&ExternalRegistryConfig{}); !errors.Is(err, ErrURLRequired) { + t.Fatalf("expected ErrURLRequired for empty URL, got %v", err) + } +} + +func TestResolveErrors(t *testing.T) { + t.Run("returns nil when no source found", func(t *testing.T) { + home := t.TempDir() + t.Setenv("HOME", home) + + cfg, err := Resolve(nil, Env{}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg != nil { + t.Fatalf("expected nil config, got: %#v", cfg) + } + }) + + t.Run("returns error when source found but no url", func(t *testing.T) { + home := t.TempDir() + t.Setenv("HOME", home) + + _, err := Resolve(nil, Env{Username: "user"}) + if !errors.Is(err, ErrURLRequired) { + t.Fatalf("expected ErrURLRequired, got %v", err) + } + }) +} diff --git a/internal/cli/registry/defaults.go b/internal/cli/registry/defaults.go new file mode 100644 index 0000000..59241fb --- /dev/null +++ b/internal/cli/registry/defaults.go @@ -0,0 +1,36 @@ +package registry + +import ( + "go.uber.org/zap" + + "mcp-runtime/internal/cli/core" + "mcp-runtime/internal/cli/registry/resolve" +) + +func resolvePlatformRegistryURL(logger *zap.Logger) string { + return resolve.PlatformURL(logger, func(args []string) (resolve.OutputCommand, error) { + return core.DefaultKubectlClient().CommandArgs(args) + }, registryResolveConfig()) +} + +func ResolvePlatformRegistryURL(logger *zap.Logger) string { + return resolvePlatformRegistryURL(logger) +} + +func registryResolveConfig() resolve.Config { + return resolve.Config{ + RegistryEndpoint: core.DefaultCLIConfig.RegistryEndpoint, + DefaultRegistryEndpoint: core.DefaultRegistryEndpoint, + RegistryPort: core.DefaultCLIConfig.RegistryPort, + } +} + +func defaultGitTag() string { + return resolve.GitTag(func(name string, args []string) (resolve.OutputCommand, error) { + return core.ExecCommandWithValidators(name, args) + }) +} + +func DefaultGitTag() string { + return defaultGitTag() +} diff --git a/internal/cli/registry.go b/internal/cli/registry/manager.go similarity index 59% rename from internal/cli/registry.go rename to internal/cli/registry/manager.go index 3a18dae..7d03338 100644 --- a/internal/cli/registry.go +++ b/internal/cli/registry/manager.go @@ -1,19 +1,23 @@ -package cli +package registry // This file implements the "registry" command for managing the container registry. // It handles registry provisioning, status checks, image pushing, and registry information display. import ( "bytes" + "errors" "fmt" "os" - "path/filepath" "strconv" "strings" "time" "go.uber.org/zap" - "gopkg.in/yaml.v3" + + "mcp-runtime/internal/cli/core" + "mcp-runtime/internal/cli/kube" + "mcp-runtime/internal/cli/registry/config" + "mcp-runtime/internal/cli/registry/ref" ) const defaultRegistryImage = "registry:2.8.3" @@ -21,13 +25,13 @@ const registryImageOverrideEnv = "MCP_RUNTIME_REGISTRY_IMAGE_OVERRIDE" // RegistryManager handles registry operations with injected dependencies. type RegistryManager struct { - kubectl *KubectlClient - exec Executor + kubectl *core.KubectlClient + exec core.Executor logger *zap.Logger } // NewRegistryManager creates a RegistryManager with the given dependencies. -func NewRegistryManager(kubectl *KubectlClient, exec Executor, logger *zap.Logger) *RegistryManager { +func NewRegistryManager(kubectl *core.KubectlClient, exec core.Executor, logger *zap.Logger) *RegistryManager { return &RegistryManager{ kubectl: kubectl, exec: exec, @@ -37,12 +41,12 @@ func NewRegistryManager(kubectl *KubectlClient, exec Executor, logger *zap.Logge // DefaultRegistryManager returns a RegistryManager using default clients. func DefaultRegistryManager(logger *zap.Logger) *RegistryManager { - return NewRegistryManager(kubectlClient, execExecutor, logger) + return NewRegistryManager(core.DefaultKubectlClient(), core.DefaultExecutor(), logger) } // RunRegistryProvision contains the registry provision command flow for folder packages. -func RunRegistryProvision(mgr *RegistryManager, url, username, password, operatorImage string) error { - flagCfg := &ExternalRegistryConfig{ +func RunRegistryProvision(mgr *RegistryManager, url, username, password, operatorImage string, dryRun bool) error { + flagCfg := &config.ExternalRegistryConfig{ URL: url, Username: username, Password: password, @@ -52,15 +56,26 @@ func RunRegistryProvision(mgr *RegistryManager, url, username, password, operato return err } if cfg == nil || cfg.URL == "" { - err := newWithSentinel(ErrRegistryURLRequired, "registry url is required (flag, env PROVISIONED_REGISTRY_URL, or config file)") - Error("Registry URL required") - logStructuredError(mgr.logger, err, "Registry URL required") + err := core.NewWithSentinel(core.ErrRegistryURLRequired, "registry url is required (flag, env PROVISIONED_REGISTRY_URL, or config file)") + core.Error("Registry URL required") + core.LogStructuredError(mgr.logger, err, "Registry URL required") return err } - if err := saveExternalRegistryConfig(cfg); err != nil { - wrappedErr := wrapWithSentinel(ErrSaveRegistryConfigFailed, err, fmt.Sprintf("failed to save registry config: %v", err)) - Error("Failed to save registry config") - logStructuredError(mgr.logger, wrappedErr, "Failed to save registry config") + if dryRun { + core.Info(fmt.Sprintf("[dry-run] would save registry config: url=%s username=%q", cfg.URL, cfg.Username)) + if cfg.Username != "" && cfg.Password != "" { + core.Info(fmt.Sprintf("[dry-run] would docker login to %s", cfg.URL)) + } + if operatorImage != "" { + core.Info(fmt.Sprintf("[dry-run] would build and push operator image: %s", operatorImage)) + } + core.Success("Dry-run complete; no changes made") + return nil + } + if err := config.Save(cfg); err != nil { + wrappedErr := core.WrapWithSentinel(core.ErrSaveRegistryConfigFailed, err, fmt.Sprintf("failed to save registry config: %v", err)) + core.Error("Failed to save registry config") + core.LogStructuredError(mgr.logger, wrappedErr, "Failed to save registry config") return wrappedErr } if cfg.Username != "" && cfg.Password != "" { @@ -72,25 +87,25 @@ func RunRegistryProvision(mgr *RegistryManager, url, username, password, operato if operatorImage != "" { mgr.logger.Info("Building and pushing operator image to external registry", zap.String("image", operatorImage)) if err := buildOperatorImage(operatorImage); err != nil { - wrappedErr := wrapWithSentinelAndContext( - ErrBuildOperatorImageFailed, + wrappedErr := core.WrapWithSentinelAndContext( + core.ErrBuildOperatorImageFailed, err, fmt.Sprintf("failed to build operator image: %v", err), map[string]any{"image": operatorImage, "component": "registry"}, ) - Error("Failed to build operator image") - logStructuredError(mgr.logger, wrappedErr, "Failed to build operator image") + core.Error("Failed to build operator image") + core.LogStructuredError(mgr.logger, wrappedErr, "Failed to build operator image") return wrappedErr } if err := pushOperatorImage(operatorImage); err != nil { - wrappedErr := wrapWithSentinelAndContext( - ErrPushOperatorImageFailed, + wrappedErr := core.WrapWithSentinelAndContext( + core.ErrPushOperatorImageFailed, err, fmt.Sprintf("failed to push operator image: %v", err), map[string]any{"image": operatorImage, "component": "registry"}, ) - Error("Failed to push operator image") - logStructuredError(mgr.logger, wrappedErr, "Failed to push operator image") + core.Error("Failed to push operator image") + core.LogStructuredError(mgr.logger, wrappedErr, "Failed to push operator image") return wrappedErr } } @@ -102,9 +117,9 @@ func RunRegistryProvision(mgr *RegistryManager, url, username, password, operato // RunRegistryPush contains the registry push command flow for folder packages. func RunRegistryPush(mgr *RegistryManager, image, registryURL, name, mode, helperNamespace string) error { if image == "" { - err := newWithSentinel(ErrImageRequired, "image is required (use --image)") - Error("Image required") - logStructuredError(mgr.logger, err, "Image required") + err := core.NewWithSentinel(core.ErrImageRequired, "image is required (use --image)") + core.Error("Image required") + core.LogStructuredError(mgr.logger, err, "Image required") return err } targetRegistry := registryURL @@ -114,14 +129,14 @@ func RunRegistryPush(mgr *RegistryManager, image, registryURL, name, mode, helpe } } if targetRegistry == "" { - targetRegistry = getPlatformRegistryURL(mgr.logger) + targetRegistry = resolvePlatformRegistryURL(mgr.logger) } - repo, tag := splitImage(image) + repo, tag := ref.SplitImage(image) if name != "" { repo = name } else { - repo = dropRegistryPrefix(repo) + repo = ref.DropRegistryPrefix(repo) } target := targetRegistry + "/" + repo if tag != "" { @@ -136,141 +151,42 @@ func RunRegistryPush(mgr *RegistryManager, image, registryURL, name, mode, helpe case "in-cluster": return mgr.PushInCluster(image, target, helperNamespace) default: - err := newWithSentinel(ErrUnknownRegistryMode, fmt.Sprintf("unknown mode %q (use direct|in-cluster)", mode)) - Error("Unknown registry mode") - logStructuredError(mgr.logger, err, "Unknown registry mode") - return err - } -} - -type ExternalRegistryConfig struct { - URL string `yaml:"url"` - Username string `yaml:"username,omitempty"` - Password string `yaml:"password,omitempty"` -} - -func registryConfigPath() (string, error) { - home, err := os.UserHomeDir() - if err != nil { - return "", err - } - return filepath.Join(home, ".mcp-runtime", "registry.yaml"), nil -} - -func saveExternalRegistryConfig(cfg *ExternalRegistryConfig) error { - if cfg == nil || cfg.URL == "" { - err := newWithSentinel(ErrRegistryURLRequired, "registry url is required") - Error("Registry URL required") - // Note: No logger available in this helper function - return err - } - path, err := registryConfigPath() - if err != nil { + err := core.NewWithSentinel(core.ErrUnknownRegistryMode, fmt.Sprintf("unknown mode %q (use direct|in-cluster)", mode)) + core.Error("Unknown registry mode") + core.LogStructuredError(mgr.logger, err, "Unknown registry mode") return err } - if err := os.MkdirAll(filepath.Dir(path), 0o750); err != nil { - return err - } - data, err := marshalExternalRegistryConfig(cfg) - if err != nil { - return err - } - return os.WriteFile(path, data, 0o600) -} - -func marshalExternalRegistryConfig(cfg *ExternalRegistryConfig) ([]byte, error) { - data := map[string]string{ - "url": cfg.URL, - } - if cfg.Username != "" { - data["username"] = cfg.Username - } - if cfg.Password != "" { - data["password"] = cfg.Password - } - return yaml.Marshal(data) -} - -func loadExternalRegistryConfig() (*ExternalRegistryConfig, error) { - path, err := registryConfigPath() - if err != nil { - return nil, err - } - // #nosec G304 -- path is scoped to the user's config directory. - data, err := os.ReadFile(path) - if err != nil { - if os.IsNotExist(err) { - return nil, nil - } - return nil, wrapWithSentinel(ErrReadRegistryConfigFailed, err, fmt.Sprintf("failed to read registry config: %v", err)) - } - var cfg ExternalRegistryConfig - if err := yaml.Unmarshal(data, &cfg); err != nil { - return nil, wrapWithSentinel(ErrUnmarshalRegistryConfigFailed, err, fmt.Sprintf("failed to unmarshal registry config: %v", err)) - } - if cfg.URL == "" { - return nil, newWithSentinel(ErrRegistryURLMissingInConfig, "registry url missing in config") - } - return &cfg, nil } // resolveExternalRegistryConfig returns the external registry config using precedence: // CLI flags > environment variables (PROVISIONED_REGISTRY_*) > config file. // Returns (nil, nil) if no source provides a URL. -func resolveExternalRegistryConfig(flagCfg *ExternalRegistryConfig) (*ExternalRegistryConfig, error) { - var cfg ExternalRegistryConfig - sourceFound := false - - if fileCfg, err := loadExternalRegistryConfig(); err == nil && fileCfg != nil { - cfg = *fileCfg - if cfg.URL != "" { - sourceFound = true - } - } else if err != nil { - // os.IsNotExist is already handled in loadExternalRegistryConfig - return nil, err +func resolveExternalRegistryConfig(flagCfg *config.ExternalRegistryConfig) (*config.ExternalRegistryConfig, error) { + cfg, err := config.Resolve(flagCfg, registryConfigEnv()) + if err == nil { + return cfg, nil } - - // Load from CLIConfig (which reads from env vars at startup) - if DefaultCLIConfig.ProvisionedRegistryURL != "" { - cfg.URL = DefaultCLIConfig.ProvisionedRegistryURL - sourceFound = true - } - if DefaultCLIConfig.ProvisionedRegistryUsername != "" { - cfg.Username = DefaultCLIConfig.ProvisionedRegistryUsername - sourceFound = true + if errors.Is(err, config.ErrURLRequired) { + wrapped := core.NewWithSentinel(core.ErrRegistryURLRequired, "registry url is required") + core.Error("Registry URL required") + return nil, wrapped } - if DefaultCLIConfig.ProvisionedRegistryPassword != "" { - cfg.Password = DefaultCLIConfig.ProvisionedRegistryPassword - sourceFound = true + if errors.Is(err, config.ErrURLMissingInConfig) { + return nil, core.NewWithSentinel(core.ErrRegistryURLMissingInConfig, "registry url missing in config") } + return nil, err +} - if flagCfg != nil { - if flagCfg.URL != "" { - cfg.URL = flagCfg.URL - sourceFound = true - } - if flagCfg.Username != "" { - cfg.Username = flagCfg.Username - sourceFound = true - } - if flagCfg.Password != "" { - cfg.Password = flagCfg.Password - sourceFound = true - } - } +func ResolveExternalRegistryConfig(flagCfg *config.ExternalRegistryConfig) (*config.ExternalRegistryConfig, error) { + return resolveExternalRegistryConfig(flagCfg) +} - if cfg.URL == "" { - if sourceFound { - err := newWithSentinel(ErrRegistryURLRequired, "registry url is required") - Error("Registry URL required") - // Note: No logger available in this helper function - return nil, err - } - return nil, nil +func registryConfigEnv() config.Env { + return config.Env{ + URL: core.DefaultCLIConfig.ProvisionedRegistryURL, + Username: core.DefaultCLIConfig.ProvisionedRegistryUsername, + Password: core.DefaultCLIConfig.ProvisionedRegistryPassword, } - - return &cfg, nil } func deployRegistry(logger *zap.Logger, namespace string, port int, registryType, registryStorageSize, manifestPath string) error { @@ -284,9 +200,9 @@ func deployRegistry(logger *zap.Logger, namespace string, port int, registryType case "docker": // continue default: - err := newWithSentinel(ErrUnsupportedRegistryType, fmt.Sprintf("unsupported registry type %q (supported: docker; harbor coming soon)", registryType)) - Error("Unsupported registry type") - logStructuredError(logger, err, "Unsupported registry type") + err := core.NewWithSentinel(core.ErrUnsupportedRegistryType, fmt.Sprintf("unsupported registry type %q (supported: docker; harbor coming soon)", registryType)) + core.Error("Unsupported registry type") + core.LogStructuredError(logger, err, "Unsupported registry type") return err } @@ -296,33 +212,33 @@ func deployRegistry(logger *zap.Logger, namespace string, port int, registryType // Ensure Namespace if err := ensureNamespace(namespace); err != nil { - wrappedErr := wrapWithSentinelAndContext( - ErrEnsureNamespaceFailed, + wrappedErr := core.WrapWithSentinelAndContext( + core.ErrEnsureNamespaceFailed, err, fmt.Sprintf("failed to ensure namespace: %v", err), map[string]any{"namespace": namespace, "component": "registry"}, ) - Error("Failed to ensure namespace") - logStructuredError(logger, wrappedErr, "Failed to ensure namespace") + core.Error("Failed to ensure namespace") + core.LogStructuredError(logger, wrappedErr, "Failed to ensure namespace") return wrappedErr } // Apply registry manifests via kustomize with namespace override logger.Info("Applying registry manifests") overrideImage := strings.TrimSpace(os.Getenv(registryImageOverrideEnv)) - manifest, err := renderKustomizeManifest(kubectlClient, manifestPath) + manifest, err := renderKustomizeManifest(core.DefaultKubectlClient(), manifestPath) if err != nil { - wrappedErr := wrapWithSentinelAndContext( - ErrDeployRegistryFailed, + wrappedErr := core.WrapWithSentinelAndContext( + core.ErrDeployRegistryFailed, err, fmt.Sprintf("failed to render registry manifest %q: %v", manifestPath, err), map[string]any{"namespace": namespace, "manifest_path": manifestPath, "registry_type": registryType, "component": "registry"}, ) - Error("Failed to render registry manifest") - logStructuredError(logger, wrappedErr, "Failed to render registry manifest") + core.Error("Failed to render registry manifest") + core.LogStructuredError(logger, wrappedErr, "Failed to render registry manifest") return wrappedErr } - manifest = rewriteRegistryHost(manifest, GetRegistryIngressHost()) - issuer := GetRegistryClusterIssuerName() + manifest = rewriteRegistryHost(manifest, core.GetRegistryIngressHost()) + issuer := core.GetRegistryClusterIssuerName() manifest = rewriteRegistryClusterIssuerAnnotation(manifest, issuer) if s := strings.TrimSpace(issuer); s != "" && !strings.Contains(manifest, "cert-manager.io/cluster-issuer: "+s) { logger.Warn("registry manifest does not show expected cert-manager.io/cluster-issuer; ingress TLS issuer may be wrong (check overlay for cert-manager.io/cluster-issuer: mcp-runtime-ca)", @@ -333,37 +249,37 @@ func deployRegistry(logger *zap.Logger, namespace string, port int, registryType updated := strings.Replace(manifest, "image: "+defaultRegistryImage, "image: "+overrideImage, 1) if updated == manifest { err := fmt.Errorf("registry image reference %q not found in manifest", defaultRegistryImage) - wrappedErr := wrapWithSentinelAndContext( - ErrDeployRegistryFailed, + wrappedErr := core.WrapWithSentinelAndContext( + core.ErrDeployRegistryFailed, err, err.Error(), map[string]any{"namespace": namespace, "manifest_path": manifestPath, "registry_type": registryType, "component": "registry"}, ) - Error("Failed to rewrite registry image") - logStructuredError(logger, wrappedErr, "Failed to rewrite registry image") + core.Error("Failed to rewrite registry image") + core.LogStructuredError(logger, wrappedErr, "Failed to rewrite registry image") return wrappedErr } - if err := applyManifestContentWithNamespace(kubectlClient, updated, namespace); err != nil { - wrappedErr := wrapWithSentinelAndContext( - ErrDeployRegistryFailed, + if err := kube.ApplyManifestContentWithNamespace(core.DefaultKubectlClient().CommandArgs, updated, namespace); err != nil { + wrappedErr := core.WrapWithSentinelAndContext( + core.ErrDeployRegistryFailed, err, fmt.Sprintf("failed to deploy registry with image override %q: %v", overrideImage, err), map[string]any{"namespace": namespace, "manifest_path": manifestPath, "registry_type": registryType, "component": "registry"}, ) - Error("Failed to deploy registry") - logStructuredError(logger, wrappedErr, "Failed to deploy registry") + core.Error("Failed to deploy registry") + core.LogStructuredError(logger, wrappedErr, "Failed to deploy registry") return wrappedErr } } else { - if err := applyManifestContentWithNamespace(kubectlClient, manifest, namespace); err != nil { - wrappedErr := wrapWithSentinelAndContext( - ErrDeployRegistryFailed, + if err := kube.ApplyManifestContentWithNamespace(core.DefaultKubectlClient().CommandArgs, manifest, namespace); err != nil { + wrappedErr := core.WrapWithSentinelAndContext( + core.ErrDeployRegistryFailed, err, fmt.Sprintf("failed to deploy registry: %v", err), map[string]any{"namespace": namespace, "manifest_path": manifestPath, "registry_type": registryType, "component": "registry"}, ) - Error("Failed to deploy registry") - logStructuredError(logger, wrappedErr, "Failed to deploy registry") + core.Error("Failed to deploy registry") + core.LogStructuredError(logger, wrappedErr, "Failed to deploy registry") return wrappedErr } } @@ -383,6 +299,46 @@ func deployRegistry(logger *zap.Logger, namespace string, port int, registryType return nil } +func DeployRegistry(logger *zap.Logger, namespace string, port int, registryType, registryStorageSize, manifestPath string) error { + return deployRegistry(logger, namespace, port, registryType, registryStorageSize, manifestPath) +} + +func ensureNamespace(namespace string) error { + return kube.EnsureNamespace(core.DefaultKubectlClient().CommandArgs, namespace) +} + +func buildOperatorImage(image string) error { + cmd, err := core.ExecCommandWithValidators("make", []string{"-f", "Makefile.operator", "docker-build-operator-no-test", "IMG=" + image}) + if err != nil { + return err + } + cmd.SetStdout(os.Stdout) + cmd.SetStderr(os.Stderr) + return cmd.Run() +} + +func pushOperatorImage(image string) error { + cmd, err := core.ExecCommandWithValidators("docker", []string{"push", image}) + if err != nil { + return err + } + cmd.SetStdout(os.Stdout) + cmd.SetStderr(os.Stderr) + return cmd.Run() +} + +func waitForDeploymentAvailable(logger *zap.Logger, name, namespace, selector string, timeout time.Duration) error { + if logger != nil { + logger.Info("Waiting for deployment rollout", zap.String("deployment", name), zap.String("namespace", namespace), zap.String("selector", selector)) + } + return core.DefaultKubectlClient().RunWithOutput([]string{ + "rollout", "status", + "deployment/" + name, + "-n", namespace, + "--timeout=" + timeout.String(), + }, os.Stdout, os.Stderr) +} + func rewriteRegistryHost(manifest, host string) string { host = strings.TrimSpace(host) if host == "" || host == "registry.local" { @@ -406,7 +362,7 @@ func rewriteRegistryClusterIssuerAnnotation(manifest, issuerName string) string return strings.ReplaceAll(manifest, oldLine, newLine) } -func renderKustomizeManifest(kubectl KubectlRunner, manifestPath string) (string, error) { +func renderKustomizeManifest(kubectl core.KubectlRunner, manifestPath string) (string, error) { renderCmd, err := kubectl.CommandArgs([]string{"kustomize", manifestPath}) if err != nil { return "", err @@ -427,7 +383,7 @@ func ensureRegistryStorageSize(logger *zap.Logger, namespace, registryStorageSiz } // #nosec G204 -- fixed kubectl command, namespace from internal config. - getCmd, err := kubectlClient.CommandArgs([]string{"get", "pvc", RegistryPVCName, "-n", namespace, "-o", "jsonpath={.spec.resources.requests.storage}"}) + getCmd, err := core.DefaultKubectlClient().CommandArgs([]string{"get", "pvc", core.RegistryPVCName, "-n", namespace, "-o", "jsonpath={.spec.resources.requests.storage}"}) if err != nil { return err } @@ -435,14 +391,14 @@ func ensureRegistryStorageSize(logger *zap.Logger, namespace, registryStorageSiz getCmd.SetStdout(&stdout) getCmd.SetStderr(&stderr) if err := getCmd.Run(); err != nil { - wrappedErr := wrapWithSentinelAndContext( - ErrReadRegistryStorageFailed, + wrappedErr := core.WrapWithSentinelAndContext( + core.ErrReadRegistryStorageFailed, err, fmt.Sprintf("failed to read current registry storage size: %v (%s)", err, strings.TrimSpace(stderr.String())), - map[string]any{"namespace": namespace, "pvc": RegistryPVCName, "component": "registry"}, + map[string]any{"namespace": namespace, "pvc": core.RegistryPVCName, "component": "registry"}, ) - Error("Failed to read registry storage size") - logStructuredError(logger, wrappedErr, "Failed to read registry storage size") + core.Error("Failed to read registry storage size") + core.LogStructuredError(logger, wrappedErr, "Failed to read registry storage size") return wrappedErr } @@ -455,15 +411,15 @@ func ensureRegistryStorageSize(logger *zap.Logger, namespace, registryStorageSiz logger.Info("Updating registry storage size", zap.String("from", currentSize), zap.String("to", storageSize)) patchPayload := fmt.Sprintf(`{"spec":{"resources":{"requests":{"storage":"%s"}}}}`, storageSize) // #nosec G204 -- command arguments are built from trusted inputs and fixed verbs. - if err := kubectlClient.RunWithOutput([]string{"patch", "pvc", RegistryPVCName, "-n", namespace, "-p", patchPayload}, os.Stdout, os.Stderr); err != nil { - wrappedErr := wrapWithSentinelAndContext( - ErrUpdateRegistryStorageFailed, + if err := core.DefaultKubectlClient().RunWithOutput([]string{"patch", "pvc", core.RegistryPVCName, "-n", namespace, "-p", patchPayload}, os.Stdout, os.Stderr); err != nil { + wrappedErr := core.WrapWithSentinelAndContext( + core.ErrUpdateRegistryStorageFailed, err, fmt.Sprintf("failed to update registry storage size to %s: %v", storageSize, err), - map[string]any{"namespace": namespace, "pvc": RegistryPVCName, "storage_size": storageSize, "component": "registry"}, + map[string]any{"namespace": namespace, "pvc": core.RegistryPVCName, "storage_size": storageSize, "component": "registry"}, ) - Error("Failed to update registry storage size") - logStructuredError(logger, wrappedErr, "Failed to update registry storage size") + core.Error("Failed to update registry storage size") + core.LogStructuredError(logger, wrappedErr, "Failed to update registry storage size") return wrappedErr } @@ -474,30 +430,30 @@ func ensureRegistryStorageSize(logger *zap.Logger, namespace, registryStorageSiz func (m *RegistryManager) CheckRegistryStatus(namespace string) error { m.logger.Info("Checking registry status") - Header("Registry Status") - DefaultPrinter.Println() + core.Header("Registry Status") + core.DefaultPrinter.Println() // Get deployment status // #nosec G204 -- fixed kubectl command, namespace from internal config. - readyOut, err := m.kubectl.Output([]string{"get", "deployment", RegistryDeploymentName, "-n", namespace, "-o", "jsonpath={.status.readyReplicas}/{.spec.replicas}"}) + readyOut, err := m.kubectl.Output([]string{"get", "deployment", core.RegistryDeploymentName, "-n", namespace, "-o", "jsonpath={.status.readyReplicas}/{.spec.replicas}"}) if err != nil { - Error("Registry deployment not found") + core.Error("Registry deployment not found") return err } // Get service IP // #nosec G204 -- fixed kubectl command, namespace from internal config. - ipOut, _ := m.kubectl.Output([]string{"get", "service", RegistryServiceName, "-n", namespace, "-o", "jsonpath={.spec.clusterIP}:{.spec.ports[0].port}"}) + ipOut, _ := m.kubectl.Output([]string{"get", "service", core.RegistryServiceName, "-n", namespace, "-o", "jsonpath={.spec.clusterIP}:{.spec.ports[0].port}"}) // Get pod status // #nosec G204 -- fixed kubectl command, namespace from internal config. - podOut, _ := m.kubectl.Output([]string{"get", "pods", "-n", namespace, "-l", SelectorRegistry, "-o", "jsonpath={.items[0].status.phase}"}) + podOut, _ := m.kubectl.Output([]string{"get", "pods", "-n", namespace, "-l", core.SelectorRegistry, "-o", "jsonpath={.items[0].status.phase}"}) // Build status table replicas := strings.TrimSpace(string(readyOut)) - status := Green("Healthy") + status := core.Green("Healthy") if replicas == "" || strings.HasPrefix(replicas, "/") || strings.HasPrefix(replicas, "0/") { - status = Yellow("Starting") + status = core.Yellow("Starting") } tableData := [][]string{ @@ -508,7 +464,7 @@ func (m *RegistryManager) CheckRegistryStatus(namespace string) error { {"Pod Phase", strings.TrimSpace(string(podOut))}, } - TableBoxed(tableData) + core.TableBoxed(tableData) return nil } @@ -527,14 +483,14 @@ func (m *RegistryManager) LoginRegistry(registryURL, username, password string) cmd.SetStderr(os.Stderr) if err := cmd.Run(); err != nil { - wrappedErr := wrapWithSentinelAndContext( - ErrRegistryLoginFailed, + wrappedErr := core.WrapWithSentinelAndContext( + core.ErrRegistryLoginFailed, err, fmt.Sprintf("failed to login to registry: %v", err), map[string]any{"registry_url": registryURL, "component": "registry"}, ) - Error("Failed to login to registry") - logStructuredError(m.logger, wrappedErr, "Failed to login to registry") + core.Error("Failed to login to registry") + core.LogStructuredError(m.logger, wrappedErr, "Failed to login to registry") return wrappedErr } @@ -544,29 +500,29 @@ func (m *RegistryManager) LoginRegistry(registryURL, username, password string) // ShowRegistryInfo displays registry connection information. func (m *RegistryManager) ShowRegistryInfo() error { - ns := NamespaceRegistry + ns := core.NamespaceRegistry // #nosec G204 -- fixed kubectl command with hardcoded namespace. - ingressHost, err := m.kubectl.Output([]string{"get", "ingress", RegistryServiceName, "-n", ns, "-o", "jsonpath={.spec.rules[0].host}"}) + ingressHost, err := m.kubectl.Output([]string{"get", "ingress", core.RegistryServiceName, "-n", ns, "-o", "jsonpath={.spec.rules[0].host}"}) if err != nil { m.logger.Debug("Failed to get registry ingress host", zap.Error(err)) } // Get registry service // #nosec G204 -- fixed kubectl command with hardcoded namespace. - clusterIP, err := m.kubectl.Output([]string{"get", "service", RegistryServiceName, "-n", ns, "-o", "jsonpath={.spec.clusterIP}"}) + clusterIP, err := m.kubectl.Output([]string{"get", "service", core.RegistryServiceName, "-n", ns, "-o", "jsonpath={.spec.clusterIP}"}) if err != nil { m.logger.Debug("Failed to get registry cluster IP", zap.Error(err)) } // #nosec G204 -- fixed kubectl command with hardcoded namespace. - port, err := m.kubectl.Output([]string{"get", "service", RegistryServiceName, "-n", ns, "-o", "jsonpath={.spec.ports[0].port}"}) + port, err := m.kubectl.Output([]string{"get", "service", core.RegistryServiceName, "-n", ns, "-o", "jsonpath={.spec.ports[0].port}"}) if err != nil { m.logger.Debug("Failed to get registry port", zap.Error(err)) } if len(clusterIP) > 0 && len(port) > 0 { - Header("Registry Information") - DefaultPrinter.Println() + core.Header("Registry Information") + core.DefaultPrinter.Println() ip := strings.TrimSpace(string(clusterIP)) p := strings.TrimSpace(string(port)) @@ -578,60 +534,30 @@ func (m *RegistryManager) ShowRegistryInfo() error { {"Internal URL", fmt.Sprintf("%s:%s", ip, p)}, {"Service DNS", fmt.Sprintf("registry.registry.svc.cluster.local:%s", p)}, } - TableBoxed(tableData) + core.TableBoxed(tableData) - DefaultPrinter.Println() - Section("Local Access") + core.DefaultPrinter.Println() + core.Section("Local Access") if host != "" { - Info("Option 1: Use the ingress host:") - DefaultPrinter.Printf(" %s\n", host) - DefaultPrinter.Println() - Info("If running without TLS, add the ingress host to your runtime's insecure registry list.") - DefaultPrinter.Println() + core.Info("Option 1: Use the ingress host:") + core.DefaultPrinter.Printf(" %s\n", host) + core.DefaultPrinter.Println() + core.Info("If running without TLS, add the ingress host to your runtime's insecure registry list.") + core.DefaultPrinter.Println() } - Info("Option 2: Add the internal service IP to /etc/docker/daemon.json:") - DefaultPrinter.Printf(" \"insecure-registries\": [\"%s:%s\"]\n", ip, p) - DefaultPrinter.Println() - Info("Option 3: Use port-forward:") - DefaultPrinter.Printf(" kubectl port-forward -n registry svc/registry %s:%s\n", p, p) - DefaultPrinter.Printf(" Then use: localhost:%s\n", p) + core.Info("Option 2: Add the internal service IP to /etc/docker/daemon.json:") + core.DefaultPrinter.Printf(" \"insecure-registries\": [\"%s:%s\"]\n", ip, p) + core.DefaultPrinter.Println() + core.Info("Option 3: Use port-forward:") + core.DefaultPrinter.Printf(" kubectl port-forward -n registry svc/registry %s:%s\n", p, p) + core.DefaultPrinter.Printf(" Then use: localhost:%s\n", p) } else { - Warn("Registry not found. Deploy it with: mcp-runtime setup") + core.Warn("Registry not found. Deploy it with: mcp-runtime setup") } return nil } -// loginRegistry is a package-level helper for backward compatibility. -func loginRegistry(logger *zap.Logger, registryURL, username, password string) error { - mgr := DefaultRegistryManager(logger) - return mgr.LoginRegistry(registryURL, username, password) -} - -func splitImage(image string) (string, string) { - tag := "" - parts := strings.Split(image, ":") - if len(parts) > 1 && !strings.Contains(parts[len(parts)-1], "/") { - tag = parts[len(parts)-1] - image = strings.Join(parts[:len(parts)-1], ":") - } - return image, tag -} - -// dropRegistryPrefix removes registry prefix from image repository name -// Example: "registry.mcpruntime.com/my-image" -> "my-image" -func dropRegistryPrefix(repo string) string { - parts := strings.Split(repo, "/") - if len(parts) <= 1 { - return repo - } - first := parts[0] - if strings.Contains(first, ".") || strings.Contains(first, ":") || first == "localhost" { - return strings.Join(parts[1:], "/") - } - return repo -} - // PushDirect pushes an image directly using docker. func (m *RegistryManager) PushDirect(source, target string) error { // #nosec G204 -- source/target are image references from internal push logic. @@ -642,14 +568,14 @@ func (m *RegistryManager) PushDirect(source, target string) error { tagCmd.SetStdout(os.Stdout) tagCmd.SetStderr(os.Stderr) if err := tagCmd.Run(); err != nil { - wrappedErr := wrapWithSentinelAndContext( - ErrTagImageFailed, + wrappedErr := core.WrapWithSentinelAndContext( + core.ErrTagImageFailed, err, fmt.Sprintf("failed to tag image: %v", err), map[string]any{"source": source, "target": target, "component": "registry"}, ) - Error("Failed to tag image") - logStructuredError(m.logger, wrappedErr, "Failed to tag image") + core.Error("Failed to tag image") + core.LogStructuredError(m.logger, wrappedErr, "Failed to tag image") return wrappedErr } @@ -661,18 +587,18 @@ func (m *RegistryManager) PushDirect(source, target string) error { pushCmd.SetStdout(os.Stdout) pushCmd.SetStderr(os.Stderr) if err := pushCmd.Run(); err != nil { - wrappedErr := wrapWithSentinelAndContext( - ErrPushImageFailed, + wrappedErr := core.WrapWithSentinelAndContext( + core.ErrPushImageFailed, err, fmt.Sprintf("failed to push image: %v", err), map[string]any{"target": target, "component": "registry"}, ) - Error("Failed to push image") - logStructuredError(m.logger, wrappedErr, "Failed to push image") + core.Error("Failed to push image") + core.LogStructuredError(m.logger, wrappedErr, "Failed to push image") return wrappedErr } - Success(fmt.Sprintf("Pushed %s", target)) + core.Success(fmt.Sprintf("Pushed %s", target)) return nil } @@ -682,30 +608,30 @@ func (m *RegistryManager) PushInCluster(source, target, helperNS string) error { // #nosec G204 -- helperNS from CLI flag, kubectl validates namespace names. if err := m.kubectl.Run([]string{"get", "namespace", helperNS}); err != nil { - wrappedErr := wrapWithSentinelAndContext( - ErrHelperNamespaceNotFound, + wrappedErr := core.WrapWithSentinelAndContext( + core.ErrHelperNamespaceNotFound, err, fmt.Sprintf("helper namespace %q not found (create it or pass --namespace): %v", helperNS, err), map[string]any{"namespace": helperNS, "component": "registry"}, ) - Error("Helper namespace not found") - logStructuredError(m.logger, wrappedErr, "Helper namespace not found") + core.Error("Helper namespace not found") + core.LogStructuredError(m.logger, wrappedErr, "Helper namespace not found") return wrappedErr } // Ensure source is saved to tar; use CWD to satisfy kubectl path validation. tmpFile, err := os.CreateTemp(".", "mcp-img-*.tar") if err != nil { - wrappedErr := wrapWithSentinel(ErrCreateTempFileFailed, err, fmt.Sprintf("failed to create temp file: %v", err)) - Error("Failed to create temp file") - logStructuredError(m.logger, wrappedErr, "Failed to create temp file") + wrappedErr := core.WrapWithSentinel(core.ErrCreateTempFileFailed, err, fmt.Sprintf("failed to create temp file: %v", err)) + core.Error("Failed to create temp file") + core.LogStructuredError(m.logger, wrappedErr, "Failed to create temp file") return wrappedErr } tmpPath := tmpFile.Name() if err := tmpFile.Close(); err != nil { - wrappedErr := wrapWithSentinel(ErrCloseTempFileFailed, err, fmt.Sprintf("failed to close temp file: %v", err)) - Error("Failed to close temp file") - logStructuredError(m.logger, wrappedErr, "Failed to close temp file") + wrappedErr := core.WrapWithSentinel(core.ErrCloseTempFileFailed, err, fmt.Sprintf("failed to close temp file: %v", err)) + core.Error("Failed to close temp file") + core.LogStructuredError(m.logger, wrappedErr, "Failed to close temp file") return wrappedErr } defer os.Remove(tmpPath) @@ -718,28 +644,28 @@ func (m *RegistryManager) PushInCluster(source, target, helperNS string) error { saveCmd.SetStdout(os.Stdout) saveCmd.SetStderr(os.Stderr) if err := saveCmd.Run(); err != nil { - wrappedErr := wrapWithSentinelAndContext( - ErrSaveImageFailed, + wrappedErr := core.WrapWithSentinelAndContext( + core.ErrSaveImageFailed, err, fmt.Sprintf("failed to save image: %v", err), map[string]any{"source": source, "component": "registry"}, ) - Error("Failed to save image") - logStructuredError(m.logger, wrappedErr, "Failed to save image") + core.Error("Failed to save image") + core.LogStructuredError(m.logger, wrappedErr, "Failed to save image") return wrappedErr } // Start helper pod with skopeo // #nosec G204 -- command arguments are built from trusted inputs and fixed verbs. - if err := m.kubectl.RunWithOutput([]string{"run", helperName, "-n", helperNS, "--image=" + GetSkopeoImage(), "--restart=Never", "--command", "--", "sh", "-c", "while true; do sleep 3600; done"}, os.Stdout, os.Stderr); err != nil { - wrappedErr := wrapWithSentinelAndContext( - ErrStartHelperPodFailed, + if err := m.kubectl.RunWithOutput([]string{"run", helperName, "-n", helperNS, "--image=" + core.GetSkopeoImage(), "--restart=Never", "--command", "--", "sh", "-c", "while true; do sleep 3600; done"}, os.Stdout, os.Stderr); err != nil { + wrappedErr := core.WrapWithSentinelAndContext( + core.ErrStartHelperPodFailed, err, fmt.Sprintf("failed to start helper pod: %v", err), map[string]any{"pod": helperName, "namespace": helperNS, "component": "registry"}, ) - Error("Failed to start helper pod") - logStructuredError(m.logger, wrappedErr, "Failed to start helper pod") + core.Error("Failed to start helper pod") + core.LogStructuredError(m.logger, wrappedErr, "Failed to start helper pod") return wrappedErr } defer func() { @@ -748,7 +674,7 @@ func (m *RegistryManager) PushInCluster(source, target, helperNS string) error { }() // #nosec G204 -- command arguments are built from trusted inputs and fixed verbs. - timeout := GetHelperPodTimeout() + timeout := core.GetHelperPodTimeout() if timeout <= 0 { timeout = 60 * time.Second } @@ -756,28 +682,28 @@ func (m *RegistryManager) PushInCluster(source, target, helperNS string) error { // Best-effort diagnostics for common real-cluster failures (DiskPressure, taints, quotas, etc). _ = m.kubectl.RunWithOutput([]string{"describe", "pod", helperName, "-n", helperNS, "--request-timeout=10s"}, os.Stdout, os.Stderr) _ = m.kubectl.RunWithOutput([]string{"get", "events", "-n", helperNS, "--request-timeout=10s", "--field-selector", "involvedObject.name=" + helperName, "--sort-by=.lastTimestamp"}, os.Stdout, os.Stderr) - wrappedErr := wrapWithSentinelAndContext( - ErrHelperPodNotReady, + wrappedErr := core.WrapWithSentinelAndContext( + core.ErrHelperPodNotReady, err, fmt.Sprintf("helper pod not ready: %v", err), map[string]any{"pod": helperName, "namespace": helperNS, "timeout": timeout.String(), "component": "registry"}, ) - Error("Helper pod not ready") - logStructuredError(m.logger, wrappedErr, "Helper pod not ready") + core.Error("Helper pod not ready") + core.LogStructuredError(m.logger, wrappedErr, "Helper pod not ready") return wrappedErr } // Copy tar into pod // #nosec G204 -- command arguments are built from trusted inputs and fixed verbs. if err := m.kubectl.RunWithOutput([]string{"cp", tmpPath, fmt.Sprintf("%s/%s:%s", helperNS, helperName, "/tmp/image.tar")}, os.Stdout, os.Stderr); err != nil { - wrappedErr := wrapWithSentinelAndContext( - ErrCopyImageToHelperFailed, + wrappedErr := core.WrapWithSentinelAndContext( + core.ErrCopyImageToHelperFailed, err, fmt.Sprintf("failed to copy image tar to helper pod: %v", err), map[string]any{"pod": helperName, "namespace": helperNS, "component": "registry"}, ) - Error("Failed to copy image to helper pod") - logStructuredError(m.logger, wrappedErr, "Failed to copy image to helper pod") + core.Error("Failed to copy image to helper pod") + core.LogStructuredError(m.logger, wrappedErr, "Failed to copy image to helper pod") return wrappedErr } @@ -792,18 +718,18 @@ func (m *RegistryManager) PushInCluster(source, target, helperNS string) error { // #nosec G204 -- command arguments are built from trusted inputs and fixed verbs. if err := m.kubectl.RunWithOutput([]string{"exec", "-n", helperNS, helperName, "--", "skopeo", "copy", "--dest-tls-verify=false", "docker-archive:/tmp/image.tar", "docker://" + pushTarget}, os.Stdout, os.Stderr); err != nil { - wrappedErr := wrapWithSentinelAndContext( - ErrPushImageFromHelperFailed, + wrappedErr := core.WrapWithSentinelAndContext( + core.ErrPushImageFromHelperFailed, err, fmt.Sprintf("failed to push image from helper pod: %v", err), map[string]any{"pod": helperName, "namespace": helperNS, "target": target, "push_target": pushTarget, "component": "registry"}, ) - Error("Failed to push image from helper pod") - logStructuredError(m.logger, wrappedErr, "Failed to push image from helper pod") + core.Error("Failed to push image from helper pod") + core.LogStructuredError(m.logger, wrappedErr, "Failed to push image from helper pod") return wrappedErr } - Success(fmt.Sprintf("Pushed %s via in-cluster helper", target)) + core.Success(fmt.Sprintf("Pushed %s via in-cluster helper", target)) return nil } @@ -814,7 +740,7 @@ func (m *RegistryManager) PushInCluster(source, target, helperNS string) error { // DNS stores the image at the same repo path, leaving the original hostname (e.g. the // ingress host) usable for subsequent pulls. Targets outside the internal registry // (e.g. a user-provided external registry) are returned unchanged. -func rewriteTargetHostForInClusterPush(target string, kubectl *KubectlClient) string { +func rewriteTargetHostForInClusterPush(target string, kubectl *core.KubectlClient) string { slash := strings.Index(target, "/") if slash <= 0 { return target @@ -833,13 +759,13 @@ func rewriteTargetHostForInClusterPush(target string, kubectl *KubectlClient) st } internal := map[string]struct{}{} - if ep := strings.ToLower(strings.TrimSpace(GetRegistryEndpoint())); ep != "" { + if ep := strings.ToLower(strings.TrimSpace(core.GetRegistryEndpoint())); ep != "" { if idx := strings.LastIndex(ep, ":"); idx >= 0 { ep = ep[:idx] } internal[ep] = struct{}{} } - if ih := strings.ToLower(strings.TrimSpace(GetRegistryIngressHost())); ih != "" { + if ih := strings.ToLower(strings.TrimSpace(core.GetRegistryIngressHost())); ih != "" { internal[ih] = struct{}{} } @@ -847,10 +773,10 @@ func rewriteTargetHostForInClusterPush(target string, kubectl *KubectlClient) st return target } - port := GetRegistryPort() + port := core.GetRegistryPort() if kubectl != nil { // #nosec G204 -- fixed arguments, no user input. - if portCmd, err := kubectl.CommandArgs([]string{"get", "service", RegistryServiceName, "-n", NamespaceRegistry, "-o", "jsonpath={.spec.ports[0].port}"}); err == nil { + if portCmd, err := kubectl.CommandArgs([]string{"get", "service", core.RegistryServiceName, "-n", core.NamespaceRegistry, "-o", "jsonpath={.spec.ports[0].port}"}); err == nil { if out, err := portCmd.Output(); err == nil { if p := strings.TrimSpace(string(out)); p != "" { port = parsePortOrDefault(p, port) @@ -858,7 +784,7 @@ func rewriteTargetHostForInClusterPush(target string, kubectl *KubectlClient) st } } } - return fmt.Sprintf("%s.%s.svc.cluster.local:%d%s", RegistryServiceName, NamespaceRegistry, port, rest) + return fmt.Sprintf("%s.%s.svc.cluster.local:%d%s", core.RegistryServiceName, core.NamespaceRegistry, port, rest) } func parsePortOrDefault(s string, def int) int { diff --git a/internal/cli/registry_test.go b/internal/cli/registry/manager_test.go similarity index 57% rename from internal/cli/registry_test.go rename to internal/cli/registry/manager_test.go index eff0385..0f82c53 100644 --- a/internal/cli/registry_test.go +++ b/internal/cli/registry/manager_test.go @@ -1,4 +1,4 @@ -package cli +package registry import ( "bytes" @@ -10,14 +10,16 @@ import ( "testing" "go.uber.org/zap" + + "mcp-runtime/internal/cli/core" ) func TestRegistryManager_CheckRegistryStatus(t *testing.T) { t.Run("returns error when deployment not found", func(t *testing.T) { - mock := &MockExecutor{ + mock := &core.MockExecutor{ DefaultErr: errors.New("not found"), } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) mgr := NewRegistryManager(kubectl, mock, zap.NewNop()) err := mgr.CheckRegistryStatus("registry") @@ -27,10 +29,10 @@ func TestRegistryManager_CheckRegistryStatus(t *testing.T) { }) t.Run("calls kubectl get deployment", func(t *testing.T) { - mock := &MockExecutor{ + mock := &core.MockExecutor{ DefaultOutput: []byte("1"), } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) mgr := NewRegistryManager(kubectl, mock, zap.NewNop()) _ = mgr.CheckRegistryStatus("registry") @@ -55,8 +57,8 @@ func TestRegistryManager_CheckRegistryStatus(t *testing.T) { func TestRegistryManager_LoginRegistry(t *testing.T) { t.Run("calls docker login", func(t *testing.T) { - mock := &MockExecutor{} - kubectl := &KubectlClient{exec: mock, validators: nil} + mock := &core.MockExecutor{} + kubectl := core.NewTestKubectlClient(mock) mgr := NewRegistryManager(kubectl, mock, zap.NewNop()) err := mgr.LoginRegistry("localhost:5000", "user", "pass") @@ -87,8 +89,8 @@ func TestRegistryManager_LoginRegistry(t *testing.T) { func TestRegistryManager_PushDirect(t *testing.T) { t.Run("calls docker tag and push", func(t *testing.T) { - mock := &MockExecutor{} - kubectl := &KubectlClient{exec: mock, validators: nil} + mock := &core.MockExecutor{} + kubectl := core.NewTestKubectlClient(mock) mgr := NewRegistryManager(kubectl, mock, zap.NewNop()) err := mgr.PushDirect("source:tag", "target:tag") @@ -120,187 +122,11 @@ func TestRegistryManager_PushDirect(t *testing.T) { }) } -// Helper functions for image parsing -func TestSplitImage(t *testing.T) { - tests := []struct { - image string - want string - tag string - }{ - {"registry.example.com/example-mcp-server:latest", "registry.example.com/example-mcp-server", "latest"}, - {"registry.example.com/example-mcp-server", "registry.example.com/example-mcp-server", ""}, - {"example-mcp-server:latest", "example-mcp-server", "latest"}, - {"example-mcp-server", "example-mcp-server", ""}, - } - for _, test := range tests { - image, tag := splitImage(test.image) - if image != test.want { - t.Errorf("SplitImage(%q) = %q, want %q", test.image, image, test.want) - } - if tag != test.tag { - t.Errorf("SplitImage(%q) tag = %q, want %q", test.image, tag, test.tag) - } - } -} - -func TestDropRegistryPrefix(t *testing.T) { - tests := []struct { - repo string - want string - }{ - {"registry.example.com/example-mcp-server", "example-mcp-server"}, - {"example-mcp-server", "example-mcp-server"}, - {"localhost:5000/my-image", "my-image"}, - {"192.168.1.1:5000/my-image", "my-image"}, - {"my-image", "my-image"}, - } - for _, test := range tests { - repo := dropRegistryPrefix(test.repo) - if repo != test.want { - t.Errorf("dropRegistryPrefix(%q) = %q, want %q", test.repo, repo, test.want) - } - } -} - -func TestRegistryConfigPath(t *testing.T) { - home := t.TempDir() - t.Setenv("HOME", home) - - path, err := registryConfigPath() - if err != nil { - t.Fatalf("registryConfigPath returned error: %v", err) - } - expectedSuffix := filepath.Join(".mcp-runtime", "registry.yaml") - if !strings.HasSuffix(path, expectedSuffix) { - t.Fatalf("expected path to end with %q, got %q", expectedSuffix, path) - } - if !strings.HasPrefix(path, home) { - t.Fatalf("expected path to start with home %q, got %q", home, path) - } -} - -func TestSaveAndLoadExternalRegistryConfig(t *testing.T) { - home := t.TempDir() - t.Setenv("HOME", home) - - cfg := &ExternalRegistryConfig{ - URL: "registry.example.com", - Username: "user", - Password: "pass", - } - if err := saveExternalRegistryConfig(cfg); err != nil { - t.Fatalf("saveExternalRegistryConfig returned error: %v", err) - } - - loaded, err := loadExternalRegistryConfig() - if err != nil { - t.Fatalf("loadExternalRegistryConfig returned error: %v", err) - } - if loaded == nil { - t.Fatal("expected config to be loaded") - } - if loaded.URL != cfg.URL || loaded.Username != cfg.Username || loaded.Password != cfg.Password { - t.Fatalf("loaded config mismatch: %#v", loaded) - } -} - -func TestLoadExternalRegistryConfigMissing(t *testing.T) { - home := t.TempDir() - t.Setenv("HOME", home) - - cfg, err := loadExternalRegistryConfig() - if err != nil { - t.Fatalf("loadExternalRegistryConfig returned error: %v", err) - } - if cfg != nil { - t.Fatalf("expected nil config when file missing, got %#v", cfg) - } -} - -func TestLoadExternalRegistryConfigInvalid(t *testing.T) { - home := t.TempDir() - t.Setenv("HOME", home) - - path, err := registryConfigPath() - if err != nil { - t.Fatalf("registryConfigPath returned error: %v", err) - } - if err := os.MkdirAll(filepath.Dir(path), 0o750); err != nil { - t.Fatalf("failed to create config dir: %v", err) - } - if err := os.WriteFile(path, []byte("username: user\n"), 0o600); err != nil { - t.Fatalf("failed to write config file: %v", err) - } - - if _, err := loadExternalRegistryConfig(); err == nil { - t.Fatal("expected error for config missing url") - } -} - -func TestResolveExternalRegistryConfigPrecedence(t *testing.T) { - home := t.TempDir() - t.Setenv("HOME", home) - - origConfig := DefaultCLIConfig - t.Cleanup(func() { DefaultCLIConfig = origConfig }) - - if err := saveExternalRegistryConfig(&ExternalRegistryConfig{URL: "file.example.com"}); err != nil { - t.Fatalf("failed to save file config: %v", err) - } - - t.Run("uses file config when no overrides", func(t *testing.T) { - DefaultCLIConfig = &CLIConfig{} - cfg, err := resolveExternalRegistryConfig(nil) - if err != nil { - t.Fatalf("resolveExternalRegistryConfig returned error: %v", err) - } - if cfg == nil || cfg.URL != "file.example.com" { - t.Fatalf("expected file config, got %#v", cfg) - } - }) - - t.Run("env config overrides file", func(t *testing.T) { - DefaultCLIConfig = &CLIConfig{ - ProvisionedRegistryURL: "env.example.com", - ProvisionedRegistryUsername: "env-user", - ProvisionedRegistryPassword: "env-pass", - } - cfg, err := resolveExternalRegistryConfig(nil) - if err != nil { - t.Fatalf("resolveExternalRegistryConfig returned error: %v", err) - } - if cfg == nil || cfg.URL != "env.example.com" || cfg.Username != "env-user" || cfg.Password != "env-pass" { - t.Fatalf("expected env config, got %#v", cfg) - } - }) - - t.Run("flag config overrides env", func(t *testing.T) { - DefaultCLIConfig = &CLIConfig{ - ProvisionedRegistryURL: "env.example.com", - ProvisionedRegistryUsername: "env-user", - ProvisionedRegistryPassword: "env-pass", - } - cfg, err := resolveExternalRegistryConfig(&ExternalRegistryConfig{ - URL: "flag.example.com", - Username: "flag-user", - Password: "flag-pass", - }) - if err != nil { - t.Fatalf("resolveExternalRegistryConfig returned error: %v", err) - } - if cfg == nil || cfg.URL != "flag.example.com" || cfg.Username != "flag-user" || cfg.Password != "flag-pass" { - t.Fatalf("expected flag config, got %#v", cfg) - } - }) -} - func TestEnsureRegistryStorageSize(t *testing.T) { - origKubectl := kubectlClient - t.Cleanup(func() { kubectlClient = origKubectl }) t.Run("skips when size empty", func(t *testing.T) { - mock := &MockExecutor{} - kubectlClient = &KubectlClient{exec: mock, validators: nil} + mock := &core.MockExecutor{} + swapDefaultKubectlForTest(t, mock) if err := ensureRegistryStorageSize(zap.NewNop(), "registry", ""); err != nil { t.Fatalf("unexpected error: %v", err) @@ -311,9 +137,9 @@ func TestEnsureRegistryStorageSize(t *testing.T) { }) t.Run("no-op when size matches", func(t *testing.T) { - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - cmd := &MockCommand{Args: spec.Args} + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + cmd := &core.MockCommand{Args: spec.Args} if contains(spec.Args, "get") && contains(spec.Args, "pvc") { cmd.RunFunc = func() error { if cmd.StdoutW != nil { @@ -325,7 +151,7 @@ func TestEnsureRegistryStorageSize(t *testing.T) { return cmd }, } - kubectlClient = &KubectlClient{exec: mock, validators: nil} + swapDefaultKubectlForTest(t, mock) if err := ensureRegistryStorageSize(zap.NewNop(), "registry", "10Gi"); err != nil { t.Fatalf("unexpected error: %v", err) @@ -339,9 +165,9 @@ func TestEnsureRegistryStorageSize(t *testing.T) { }) t.Run("patches when size differs", func(t *testing.T) { - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - cmd := &MockCommand{Args: spec.Args} + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + cmd := &core.MockCommand{Args: spec.Args} if contains(spec.Args, "get") && contains(spec.Args, "pvc") { cmd.RunFunc = func() error { if cmd.StdoutW != nil { @@ -353,7 +179,7 @@ func TestEnsureRegistryStorageSize(t *testing.T) { return cmd }, } - kubectlClient = &KubectlClient{exec: mock, validators: nil} + swapDefaultKubectlForTest(t, mock) if err := ensureRegistryStorageSize(zap.NewNop(), "registry", "10Gi"); err != nil { t.Fatalf("unexpected error: %v", err) @@ -374,16 +200,16 @@ func TestEnsureRegistryStorageSize(t *testing.T) { }) t.Run("returns error when get pvc fails", func(t *testing.T) { - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - cmd := &MockCommand{Args: spec.Args} + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + cmd := &core.MockCommand{Args: spec.Args} if contains(spec.Args, "get") && contains(spec.Args, "pvc") { cmd.RunErr = errors.New("pvc not found") } return cmd }, } - kubectlClient = &KubectlClient{exec: mock, validators: nil} + swapDefaultKubectlForTest(t, mock) err := ensureRegistryStorageSize(zap.NewNop(), "registry", "10Gi") if err == nil { @@ -392,9 +218,9 @@ func TestEnsureRegistryStorageSize(t *testing.T) { }) t.Run("returns error when patch fails", func(t *testing.T) { - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - cmd := &MockCommand{Args: spec.Args} + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + cmd := &core.MockCommand{Args: spec.Args} if contains(spec.Args, "get") && contains(spec.Args, "pvc") { cmd.RunFunc = func() error { if cmd.StdoutW != nil { @@ -408,7 +234,7 @@ func TestEnsureRegistryStorageSize(t *testing.T) { return cmd }, } - kubectlClient = &KubectlClient{exec: mock, validators: nil} + swapDefaultKubectlForTest(t, mock) err := ensureRegistryStorageSize(zap.NewNop(), "registry", "10Gi") if err == nil { @@ -419,9 +245,9 @@ func TestEnsureRegistryStorageSize(t *testing.T) { func TestShowRegistryInfo(t *testing.T) { t.Run("displays registry info when available", func(t *testing.T) { - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - cmd := &MockCommand{Args: spec.Args} + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + cmd := &core.MockCommand{Args: spec.Args} if contains(spec.Args, "jsonpath={.spec.clusterIP}") { cmd.OutputData = []byte("10.0.0.1") } else if contains(spec.Args, "jsonpath={.spec.ports[0].port}") { @@ -430,7 +256,7 @@ func TestShowRegistryInfo(t *testing.T) { return cmd }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) mgr := NewRegistryManager(kubectl, mock, zap.NewNop()) var buf bytes.Buffer @@ -446,10 +272,10 @@ func TestShowRegistryInfo(t *testing.T) { }) t.Run("shows warning when registry not found", func(t *testing.T) { - mock := &MockExecutor{ + mock := &core.MockExecutor{ DefaultErr: errors.New("not found"), } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) mgr := NewRegistryManager(kubectl, mock, zap.NewNop()) var buf bytes.Buffer @@ -465,30 +291,9 @@ func TestShowRegistryInfo(t *testing.T) { }) } -func TestLoginRegistryWrapper(t *testing.T) { - origExecutor := execExecutor - origKubectl := kubectlClient - t.Cleanup(func() { - execExecutor = origExecutor - kubectlClient = origKubectl - }) - - mock := &MockExecutor{} - execExecutor = mock - kubectlClient = &KubectlClient{exec: mock, validators: nil} - - err := loginRegistry(zap.NewNop(), "localhost:5000", "user", "pass") - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - if !mock.HasCommand("docker") { - t.Error("expected docker login to be called") - } -} - func TestLoginRegistryError(t *testing.T) { - mock := &MockExecutor{DefaultRunErr: errors.New("login failed")} - kubectl := &KubectlClient{exec: mock, validators: nil} + mock := &core.MockExecutor{DefaultRunErr: errors.New("login failed")} + kubectl := core.NewTestKubectlClient(mock) mgr := NewRegistryManager(kubectl, mock, zap.NewNop()) err := mgr.LoginRegistry("localhost:5000", "user", "pass") @@ -499,8 +304,8 @@ func TestLoginRegistryError(t *testing.T) { func TestPushDirectErrors(t *testing.T) { t.Run("returns error when tag fails", func(t *testing.T) { - mock := &MockExecutor{DefaultRunErr: errors.New("tag failed")} - kubectl := &KubectlClient{exec: mock, validators: nil} + mock := &core.MockExecutor{DefaultRunErr: errors.New("tag failed")} + kubectl := core.NewTestKubectlClient(mock) mgr := NewRegistryManager(kubectl, mock, zap.NewNop()) err := mgr.PushDirect("source:tag", "target:tag") @@ -511,17 +316,17 @@ func TestPushDirectErrors(t *testing.T) { t.Run("returns error when push fails", func(t *testing.T) { callCount := 0 - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { callCount++ - cmd := &MockCommand{Args: spec.Args} + cmd := &core.MockCommand{Args: spec.Args} if callCount > 1 { // First call is tag, second is push cmd.RunErr = errors.New("push failed") } return cmd }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) mgr := NewRegistryManager(kubectl, mock, zap.NewNop()) err := mgr.PushDirect("source:tag", "target:tag") @@ -533,8 +338,8 @@ func TestPushDirectErrors(t *testing.T) { func TestPushInCluster(t *testing.T) { t.Run("returns error when namespace not found", func(t *testing.T) { - mock := &MockExecutor{DefaultRunErr: errors.New("namespace not found")} - kubectl := &KubectlClient{exec: mock, validators: nil} + mock := &core.MockExecutor{DefaultRunErr: errors.New("namespace not found")} + kubectl := core.NewTestKubectlClient(mock) mgr := NewRegistryManager(kubectl, mock, zap.NewNop()) err := mgr.PushInCluster("source:tag", "target:tag", "missing-ns") @@ -548,17 +353,17 @@ func TestPushInCluster(t *testing.T) { t.Run("returns error when docker save fails", func(t *testing.T) { callCount := 0 - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { callCount++ - cmd := &MockCommand{Args: spec.Args} + cmd := &core.MockCommand{Args: spec.Args} if spec.Name == "docker" && contains(spec.Args, "save") { cmd.RunErr = errors.New("save failed") } return cmd }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) mgr := NewRegistryManager(kubectl, mock, zap.NewNop()) err := mgr.PushInCluster("source:tag", "target:tag", "registry") @@ -568,16 +373,16 @@ func TestPushInCluster(t *testing.T) { }) t.Run("returns error when run helper pod fails", func(t *testing.T) { - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - cmd := &MockCommand{Args: spec.Args} + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + cmd := &core.MockCommand{Args: spec.Args} if spec.Name == "kubectl" && contains(spec.Args, "run") { cmd.RunErr = errors.New("run failed") } return cmd }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) mgr := NewRegistryManager(kubectl, mock, zap.NewNop()) err := mgr.PushInCluster("source:tag", "target:tag", "registry") @@ -587,16 +392,16 @@ func TestPushInCluster(t *testing.T) { }) t.Run("returns error when wait fails", func(t *testing.T) { - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - cmd := &MockCommand{Args: spec.Args} + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + cmd := &core.MockCommand{Args: spec.Args} if spec.Name == "kubectl" && contains(spec.Args, "wait") { cmd.RunErr = errors.New("wait failed") } return cmd }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) mgr := NewRegistryManager(kubectl, mock, zap.NewNop()) err := mgr.PushInCluster("source:tag", "target:tag", "registry") @@ -606,16 +411,16 @@ func TestPushInCluster(t *testing.T) { }) t.Run("returns error when cp fails", func(t *testing.T) { - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - cmd := &MockCommand{Args: spec.Args} + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + cmd := &core.MockCommand{Args: spec.Args} if spec.Name == "kubectl" && contains(spec.Args, "cp") { cmd.RunErr = errors.New("cp failed") } return cmd }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) mgr := NewRegistryManager(kubectl, mock, zap.NewNop()) err := mgr.PushInCluster("source:tag", "target:tag", "registry") @@ -625,16 +430,16 @@ func TestPushInCluster(t *testing.T) { }) t.Run("returns error when exec skopeo fails", func(t *testing.T) { - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - cmd := &MockCommand{Args: spec.Args} + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + cmd := &core.MockCommand{Args: spec.Args} if spec.Name == "kubectl" && contains(spec.Args, "exec") { cmd.RunErr = errors.New("exec failed") } return cmd }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) mgr := NewRegistryManager(kubectl, mock, zap.NewNop()) err := mgr.PushInCluster("source:tag", "target:tag", "registry") @@ -645,16 +450,16 @@ func TestPushInCluster(t *testing.T) { t.Run("succeeds and cleans up helper pod", func(t *testing.T) { deleteCalled := false - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - cmd := &MockCommand{Args: spec.Args} + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + cmd := &core.MockCommand{Args: spec.Args} if spec.Name == "kubectl" && contains(spec.Args, "delete") { deleteCalled = true } return cmd }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) mgr := NewRegistryManager(kubectl, mock, zap.NewNop()) var buf bytes.Buffer @@ -670,18 +475,18 @@ func TestPushInCluster(t *testing.T) { }) t.Run("rewrites registry.local push target to service DNS", func(t *testing.T) { - origConfig := DefaultCLIConfig - t.Cleanup(func() { DefaultCLIConfig = origConfig }) - DefaultCLIConfig = &CLIConfig{ + origConfig := core.DefaultCLIConfig + t.Cleanup(func() { core.DefaultCLIConfig = origConfig }) + core.DefaultCLIConfig = &core.CLIConfig{ RegistryEndpoint: "registry.local", RegistryIngressHost: "registry.local", RegistryPort: 5000, } var skopeoTarget string - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - cmd := &MockCommand{Args: spec.Args} + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + cmd := &core.MockCommand{Args: spec.Args} if spec.Name == "kubectl" && contains(spec.Args, "jsonpath={.spec.ports[0].port}") { cmd.OutputData = []byte("5000") } @@ -696,7 +501,7 @@ func TestPushInCluster(t *testing.T) { return cmd }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) mgr := NewRegistryManager(kubectl, mock, zap.NewNop()) var buf bytes.Buffer @@ -713,8 +518,8 @@ func TestPushInCluster(t *testing.T) { } func TestRewriteTargetHostForInClusterPush(t *testing.T) { - origConfig := DefaultCLIConfig - t.Cleanup(func() { DefaultCLIConfig = origConfig }) + origConfig := core.DefaultCLIConfig + t.Cleanup(func() { core.DefaultCLIConfig = origConfig }) cases := []struct { name string @@ -762,7 +567,7 @@ func TestRewriteTargetHostForInClusterPush(t *testing.T) { for _, tc := range cases { t.Run(tc.name, func(t *testing.T) { - DefaultCLIConfig = &CLIConfig{ + core.DefaultCLIConfig = &core.CLIConfig{ RegistryEndpoint: tc.endpoint, RegistryIngressHost: tc.ingress, RegistryPort: 5000, @@ -779,90 +584,13 @@ func TestRewriteTargetHostForInClusterPush(t *testing.T) { } } -func TestSaveExternalRegistryConfigErrors(t *testing.T) { - t.Run("rejects nil config", func(t *testing.T) { - err := saveExternalRegistryConfig(nil) - if err == nil { - t.Fatal("expected error for nil config") - } - }) - - t.Run("rejects empty url", func(t *testing.T) { - err := saveExternalRegistryConfig(&ExternalRegistryConfig{}) - if err == nil { - t.Fatal("expected error for empty url") - } - }) -} - -func TestLoadExternalRegistryConfigYAMLError(t *testing.T) { - home := t.TempDir() - t.Setenv("HOME", home) - - path, err := registryConfigPath() - if err != nil { - t.Fatal(err) - } - if err := os.MkdirAll(filepath.Dir(path), 0o750); err != nil { - t.Fatal(err) - } - // Write invalid YAML - if err := os.WriteFile(path, []byte(":::invalid\n"), 0o600); err != nil { - t.Fatal(err) - } - - _, err = loadExternalRegistryConfig() - if err == nil { - t.Fatal("expected error for invalid yaml") - } -} - -func TestResolveExternalRegistryConfigErrors(t *testing.T) { - t.Run("returns nil when no source found", func(t *testing.T) { - home := t.TempDir() - t.Setenv("HOME", home) - - origConfig := DefaultCLIConfig - t.Cleanup(func() { DefaultCLIConfig = origConfig }) - DefaultCLIConfig = &CLIConfig{} - - cfg, err := resolveExternalRegistryConfig(nil) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - if cfg != nil { - t.Fatalf("expected nil config, got: %#v", cfg) - } - }) - - t.Run("returns error when source found but no url", func(t *testing.T) { - home := t.TempDir() - t.Setenv("HOME", home) - - origConfig := DefaultCLIConfig - t.Cleanup(func() { DefaultCLIConfig = origConfig }) - DefaultCLIConfig = &CLIConfig{ - ProvisionedRegistryUsername: "user", // Has username but no url - } - - _, err := resolveExternalRegistryConfig(nil) - if err == nil { - t.Fatal("expected error when source found but url missing") - } - }) -} - func TestDeployRegistry(t *testing.T) { - origKubectl := kubectlClient - t.Cleanup(func() { kubectlClient = origKubectl }) t.Run("defaults to docker registry type", func(t *testing.T) { - origKubectl := kubectlClient - t.Cleanup(func() { kubectlClient = origKubectl }) - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - cmd := &MockCommand{Args: spec.Args} + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + cmd := &core.MockCommand{Args: spec.Args} if contains(spec.Args, "get") && contains(spec.Args, "deployment") && contains(spec.Args, "jsonpath={.status.availableReplicas}") { @@ -871,7 +599,7 @@ func TestDeployRegistry(t *testing.T) { return cmd }, } - kubectlClient = &KubectlClient{exec: mock, validators: nil} + swapDefaultKubectlForTest(t, mock) // Create temp manifest dir tmpDir := t.TempDir() @@ -890,17 +618,15 @@ func TestDeployRegistry(t *testing.T) { }) t.Run("applies image override via rendered manifest", func(t *testing.T) { - origKubectl := kubectlClient - origConfig := DefaultCLIConfig - t.Cleanup(func() { kubectlClient = origKubectl }) - t.Cleanup(func() { DefaultCLIConfig = origConfig }) + origConfig := core.DefaultCLIConfig + t.Cleanup(func() { core.DefaultCLIConfig = origConfig }) t.Setenv(registryImageOverrideEnv, "docker.io/library/mcp-runtime-registry:latest") - DefaultCLIConfig = &CLIConfig{RegistryEndpoint: "10.43.39.164:5000", RegistryIngressHost: "registry.prod.example.com"} + core.DefaultCLIConfig = &core.CLIConfig{RegistryEndpoint: "10.43.39.164:5000", RegistryIngressHost: "registry.prod.example.com"} - var applyCmd *MockCommand - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - cmd := &MockCommand{Args: spec.Args} + var applyCmd *core.MockCommand + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + cmd := &core.MockCommand{Args: spec.Args} switch { case contains(spec.Args, "kustomize"): cmd.OutputData = []byte(`apiVersion: apps/v1 @@ -930,7 +656,7 @@ spec: return cmd }, } - kubectlClient = &KubectlClient{exec: mock, validators: nil} + swapDefaultKubectlForTest(t, mock) err := deployRegistry(zap.NewNop(), "registry", 5000, "docker", "", "config/registry") if err != nil { @@ -960,11 +686,9 @@ spec: }) t.Run("rejects unsupported registry type", func(t *testing.T) { - origKubectl := kubectlClient - t.Cleanup(func() { kubectlClient = origKubectl }) - mock := &MockExecutor{} - kubectlClient = &KubectlClient{exec: mock, validators: nil} + mock := &core.MockExecutor{} + swapDefaultKubectlForTest(t, mock) err := deployRegistry(zap.NewNop(), "registry", 5000, "harbor", "", "") if err == nil { @@ -976,11 +700,9 @@ spec: }) t.Run("returns error when ensure namespace fails", func(t *testing.T) { - origKubectl := kubectlClient - t.Cleanup(func() { kubectlClient = origKubectl }) - mock := &MockExecutor{DefaultRunErr: errors.New("namespace failed")} - kubectlClient = &KubectlClient{exec: mock, validators: nil} + mock := &core.MockExecutor{DefaultRunErr: errors.New("namespace failed")} + swapDefaultKubectlForTest(t, mock) err := deployRegistry(zap.NewNop(), "registry", 5000, "docker", "", "config/registry") if err == nil { @@ -989,12 +711,10 @@ spec: }) t.Run("returns error when apply fails", func(t *testing.T) { - origKubectl := kubectlClient - t.Cleanup(func() { kubectlClient = origKubectl }) - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - cmd := &MockCommand{Args: spec.Args} + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + cmd := &core.MockCommand{Args: spec.Args} switch { case contains(spec.Args, "kustomize"): cmd.OutputData = []byte("apiVersion: v1\nkind: ConfigMap\nmetadata:\n name: registry\n") @@ -1010,7 +730,7 @@ spec: return cmd }, } - kubectlClient = &KubectlClient{exec: mock, validators: nil} + swapDefaultKubectlForTest(t, mock) err := deployRegistry(zap.NewNop(), "registry", 5000, "docker", "", "config/registry") if err == nil { @@ -1020,16 +740,16 @@ spec: } func TestCheckRegistryStatusStarting(t *testing.T) { - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - cmd := &MockCommand{Args: spec.Args} + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + cmd := &core.MockCommand{Args: spec.Args} if contains(spec.Args, "deployment") { cmd.OutputData = []byte("0/1") // Starting state } return cmd }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) mgr := NewRegistryManager(kubectl, mock, zap.NewNop()) var buf bytes.Buffer @@ -1042,50 +762,31 @@ func TestCheckRegistryStatusStarting(t *testing.T) { // Should show "Starting" status for 0/1 replicas } -func TestDropRegistryPrefixMoreCases(t *testing.T) { - tests := []struct { - repo string - want string - }{ - {"user/repo", "user/repo"}, // user/repo should NOT be stripped - {"gcr.io/project/image", "project/image"}, - {"docker.io/library/nginx", "library/nginx"}, - } - for _, test := range tests { - repo := dropRegistryPrefix(test.repo) - if repo != test.want { - t.Errorf("dropRegistryPrefix(%q) = %q, want %q", test.repo, repo, test.want) - } - } -} - func TestRegistryProvisionCmdWithOperatorImage(t *testing.T) { home := t.TempDir() t.Setenv("HOME", home) - origConfig := DefaultCLIConfig - origExecutor := execExecutor + origConfig := core.DefaultCLIConfig t.Cleanup(func() { - DefaultCLIConfig = origConfig - execExecutor = origExecutor + core.DefaultCLIConfig = origConfig }) - DefaultCLIConfig = &CLIConfig{} + core.DefaultCLIConfig = &core.CLIConfig{} // Mock executor that returns error for make command (build step) - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - cmd := &MockCommand{Args: spec.Args} + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + cmd := &core.MockCommand{Args: spec.Args} if spec.Name == "make" { cmd.RunErr = errors.New("make build failed") } return cmd }, } - execExecutor = mock - kubectl := &KubectlClient{exec: mock, validators: nil} + t.Cleanup(core.SwapExecExecutor(mock)) + kubectl := core.NewTestKubectlClient(mock) mgr := NewRegistryManager(kubectl, mock, zap.NewNop()) - err := RunRegistryProvision(mgr, "registry.example.com", "", "", "registry.example.com/operator:latest") + err := RunRegistryProvision(mgr, "registry.example.com", "", "", "registry.example.com/operator:latest", false) if err == nil { t.Fatal("expected error when build fails") } @@ -1093,3 +794,29 @@ func TestRegistryProvisionCmdWithOperatorImage(t *testing.T) { t.Fatalf("expected build error, got: %v", err) } } + +func contains(slice []string, val string) bool { + for _, item := range slice { + if item == val { + return true + } + } + return false +} + +func swapDefaultKubectlForTest(t *testing.T, exec core.Executor) { + t.Helper() + t.Cleanup(core.SwapDefaultKubectlClient(core.NewTestKubectlClient(exec))) +} + +func setDefaultPrinterWriter(t *testing.T, w *bytes.Buffer) { + t.Helper() + prevWriter := core.DefaultPrinter.Writer + prevQuiet := core.DefaultPrinter.Quiet + core.DefaultPrinter.Writer = w + core.DefaultPrinter.Quiet = false + t.Cleanup(func() { + core.DefaultPrinter.Writer = prevWriter + core.DefaultPrinter.Quiet = prevQuiet + }) +} diff --git a/internal/cli/registry/ref/ref.go b/internal/cli/registry/ref/ref.go new file mode 100644 index 0000000..ad61298 --- /dev/null +++ b/internal/cli/registry/ref/ref.go @@ -0,0 +1,27 @@ +package ref + +import "strings" + +// SplitImage returns the repository/name portion and optional tag for an image reference. +func SplitImage(image string) (string, string) { + tag := "" + parts := strings.Split(image, ":") + if len(parts) > 1 && !strings.Contains(parts[len(parts)-1], "/") { + tag = parts[len(parts)-1] + image = strings.Join(parts[:len(parts)-1], ":") + } + return image, tag +} + +// DropRegistryPrefix removes an explicit registry host from an image repository. +func DropRegistryPrefix(repo string) string { + parts := strings.Split(repo, "/") + if len(parts) <= 1 { + return repo + } + first := parts[0] + if strings.Contains(first, ".") || strings.Contains(first, ":") || first == "localhost" { + return strings.Join(parts[1:], "/") + } + return repo +} diff --git a/internal/cli/registry/ref/ref_test.go b/internal/cli/registry/ref/ref_test.go new file mode 100644 index 0000000..8b7ce23 --- /dev/null +++ b/internal/cli/registry/ref/ref_test.go @@ -0,0 +1,47 @@ +package ref + +import "testing" + +func TestSplitImage(t *testing.T) { + tests := []struct { + image string + want string + tag string + }{ + {"registry.example.com/example-mcp-server:latest", "registry.example.com/example-mcp-server", "latest"}, + {"registry.example.com/example-mcp-server", "registry.example.com/example-mcp-server", ""}, + {"example-mcp-server:latest", "example-mcp-server", "latest"}, + {"example-mcp-server", "example-mcp-server", ""}, + } + for _, test := range tests { + image, tag := SplitImage(test.image) + if image != test.want { + t.Errorf("SplitImage(%q) = %q, want %q", test.image, image, test.want) + } + if tag != test.tag { + t.Errorf("SplitImage(%q) tag = %q, want %q", test.image, tag, test.tag) + } + } +} + +func TestDropRegistryPrefix(t *testing.T) { + tests := []struct { + repo string + want string + }{ + {"registry.example.com/example-mcp-server", "example-mcp-server"}, + {"example-mcp-server", "example-mcp-server"}, + {"localhost:5000/my-image", "my-image"}, + {"192.168.1.1:5000/my-image", "my-image"}, + {"my-image", "my-image"}, + {"user/repo", "user/repo"}, + {"gcr.io/project/image", "project/image"}, + {"docker.io/library/nginx", "library/nginx"}, + } + for _, test := range tests { + repo := DropRegistryPrefix(test.repo) + if repo != test.want { + t.Errorf("DropRegistryPrefix(%q) = %q, want %q", test.repo, repo, test.want) + } + } +} diff --git a/internal/cli/registry/registry.go b/internal/cli/registry/registry.go index d90b9a1..5cc6d78 100644 --- a/internal/cli/registry/registry.go +++ b/internal/cli/registry/registry.go @@ -4,16 +4,16 @@ package registry import ( "github.com/spf13/cobra" - "mcp-runtime/internal/cli" + "mcp-runtime/internal/cli/core" ) // New returns the registry command. -func New(runtime *cli.Runtime) *cobra.Command { - return NewWithManager(runtime.RegistryManager()) +func New(runtime *core.Runtime) *cobra.Command { + return NewWithManager(NewRegistryManager(runtime.KubectlClient(), runtime.Executor(), runtime.Logger())) } // NewWithManager returns the registry command using the provided manager. -func NewWithManager(mgr *cli.RegistryManager) *cobra.Command { +func NewWithManager(mgr *RegistryManager) *cobra.Command { cmd := &cobra.Command{ Use: "registry", Short: "Manage container registry", @@ -29,7 +29,7 @@ func NewWithManager(mgr *cli.RegistryManager) *cobra.Command { return mgr.CheckRegistryStatus(namespace) }, } - statusCmd.Flags().StringVar(&namespace, "namespace", cli.NamespaceRegistry, "Registry namespace") + statusCmd.Flags().StringVar(&namespace, "namespace", core.NamespaceRegistry, "Registry namespace") infoCmd := &cobra.Command{ Use: "info", @@ -44,18 +44,20 @@ func NewWithManager(mgr *cli.RegistryManager) *cobra.Command { var username string var password string var operatorImage string + var provisionDryRun bool provisionCmd := &cobra.Command{ Use: "provision", Short: "Configure an external registry", Long: "Configure an external registry to be used for operator/runtime images", RunE: func(cmd *cobra.Command, args []string) error { - return cli.RunRegistryProvision(mgr, url, username, password, operatorImage) + return RunRegistryProvision(mgr, url, username, password, operatorImage, provisionDryRun) }, } provisionCmd.Flags().StringVar(&url, "url", "", "External registry URL (e.g., registry.mcpruntime.com)") provisionCmd.Flags().StringVar(&username, "username", "", "Registry username (optional)") provisionCmd.Flags().StringVar(&password, "password", "", "Registry password (optional)") provisionCmd.Flags().StringVar(&operatorImage, "operator-image", "", "Optional: build and push operator image to this external registry (e.g., /mcp-runtime-operator:latest)") + provisionCmd.Flags().BoolVar(&provisionDryRun, "dry-run", false, "Print what would be done without saving config, logging in, or pushing images") var image string var registryURL string @@ -66,14 +68,14 @@ func NewWithManager(mgr *cli.RegistryManager) *cobra.Command { Use: "push", Short: "Retag and push an image to the platform or provisioned registry", RunE: func(cmd *cobra.Command, args []string) error { - return cli.RunRegistryPush(mgr, image, registryURL, name, mode, helperNamespace) + return RunRegistryPush(mgr, image, registryURL, name, mode, helperNamespace) }, } pushCmd.Flags().StringVar(&image, "image", "", "Local image to push (required)") pushCmd.Flags().StringVar(®istryURL, "registry", "", "Target registry (defaults to provisioned or internal)") pushCmd.Flags().StringVar(&name, "name", "", "Override target repo/name (default: source name without registry)") pushCmd.Flags().StringVar(&mode, "mode", "in-cluster", "Push mode: in-cluster (default, uses skopeo helper) or direct (docker push)") - pushCmd.Flags().StringVar(&helperNamespace, "namespace", cli.NamespaceRegistry, "Namespace to run the in-cluster helper pod") + pushCmd.Flags().StringVar(&helperNamespace, "namespace", core.NamespaceRegistry, "Namespace to run the in-cluster helper pod") cmd.AddCommand(statusCmd, infoCmd, provisionCmd, pushCmd) return cmd diff --git a/internal/cli/registry/resolve/resolver.go b/internal/cli/registry/resolve/resolver.go new file mode 100644 index 0000000..c9b2b27 --- /dev/null +++ b/internal/cli/registry/resolve/resolver.go @@ -0,0 +1,112 @@ +package resolve + +import ( + "fmt" + "os" + "strings" + + "go.uber.org/zap" +) + +const registryServiceDNS = "registry.registry.svc.cluster.local" + +type Config struct { + RegistryEndpoint string + DefaultRegistryEndpoint string + RegistryPort int +} + +type OutputCommand interface { + Output() ([]byte, error) +} + +type KubectlCommand func(args []string) (OutputCommand, error) +type CommandFactory func(name string, args []string) (OutputCommand, error) + +// PlatformURL resolves the registry host:port used for image names. +func PlatformURL(logger *zap.Logger, kubectl KubectlCommand, cfg Config) string { + if endpoint := strings.TrimSpace(cfg.RegistryEndpoint); endpoint != "" && + (endpoint != cfg.DefaultRegistryEndpoint || registryEndpointExplicitlyConfigured()) { + return endpoint + } + + if os.Getenv("MCP_RUNTIME_TEST_MODE") == "1" { + portValue, portErr := servicePort(kubectl) + if portErr == nil && portValue != "" { + return fmt.Sprintf("%s:%s", registryServiceDNS, portValue) + } + if logger != nil { + logger.Warn("Could not detect registry service port in test mode, using default service DNS:port") + } + return fmt.Sprintf("%s:%d", registryServiceDNS, cfg.RegistryPort) + } + + ip, ipErr := serviceClusterIP(kubectl) + portValue, portErr := servicePort(kubectl) + if ipErr == nil && ip != "" && portErr == nil && portValue != "" { + return fmt.Sprintf("%s:%s", ip, portValue) + } + if portErr == nil && portValue != "" { + return fmt.Sprintf("%s:%s", registryServiceDNS, portValue) + } + + if logger != nil { + logger.Warn("Could not detect registry ingress host or service port, using default service DNS:port") + } + return fmt.Sprintf("%s:%d", registryServiceDNS, cfg.RegistryPort) +} + +// GitTag returns a short git SHA when available, otherwise "latest". +func GitTag(command CommandFactory) string { + if command == nil { + return "latest" + } + cmd, err := command("git", []string{"rev-parse", "--short", "HEAD"}) + if err == nil { + sha, execErr := cmd.Output() + if execErr == nil && len(sha) > 0 { + return strings.TrimSpace(string(sha)) + } + } + return "latest" +} + +func registryEndpointExplicitlyConfigured() bool { + if value, ok := os.LookupEnv("MCP_REGISTRY_ENDPOINT"); ok && strings.TrimSpace(value) != "" { + return true + } + if value, ok := os.LookupEnv("MCP_REGISTRY_HOST"); ok && strings.TrimSpace(value) != "" { + return true + } + return false +} + +func serviceClusterIP(kubectl KubectlCommand) (string, error) { + if kubectl == nil { + return "", fmt.Errorf("kubectl is nil") + } + cmd, err := kubectl([]string{"get", "service", "registry", "-n", "registry", "-o", "jsonpath={.spec.clusterIP}"}) + if err != nil { + return "", err + } + out, err := cmd.Output() + if err != nil { + return "", err + } + return strings.TrimSpace(string(out)), nil +} + +func servicePort(kubectl KubectlCommand) (string, error) { + if kubectl == nil { + return "", fmt.Errorf("kubectl is nil") + } + cmd, err := kubectl([]string{"get", "service", "registry", "-n", "registry", "-o", "jsonpath={.spec.ports[0].port}"}) + if err != nil { + return "", err + } + out, err := cmd.Output() + if err != nil { + return "", err + } + return strings.TrimSpace(string(out)), nil +} diff --git a/internal/cli/registry/resolve/resolver_test.go b/internal/cli/registry/resolve/resolver_test.go new file mode 100644 index 0000000..22a304b --- /dev/null +++ b/internal/cli/registry/resolve/resolver_test.go @@ -0,0 +1,177 @@ +package resolve + +import ( + "errors" + "strings" + "testing" + + "go.uber.org/zap" +) + +func TestGitTag(t *testing.T) { + t.Run("returns_latest_when_git_fails", func(t *testing.T) { + tag := GitTag(func(name string, args []string) (OutputCommand, error) { + return fakeCommand{outputErr: errors.New("git not found")}, nil + }) + if tag != "latest" { + t.Errorf("expected latest when git fails, got %q", tag) + } + }) + + t.Run("returns_latest_when_output_empty", func(t *testing.T) { + tag := GitTag(func(name string, args []string) (OutputCommand, error) { + return fakeCommand{output: []byte("")}, nil + }) + if tag != "latest" { + t.Errorf("expected latest when output empty, got %q", tag) + } + }) + + t.Run("returns_trimmed_sha", func(t *testing.T) { + tag := GitTag(func(name string, args []string) (OutputCommand, error) { + return fakeCommand{output: []byte(" abc1234 \n")}, nil + }) + if tag != "abc1234" { + t.Errorf("expected abc1234, got %q", tag) + } + }) +} + +func TestPlatformURL(t *testing.T) { + logger := zap.NewNop() + cfg := Config{RegistryEndpoint: "", DefaultRegistryEndpoint: "registry.local", RegistryPort: 5000} + + t.Run("returns_configured_registry_endpoint_when_available", func(t *testing.T) { + url := PlatformURL(logger, (&fakeKubectl{}).commandArgs, Config{ + RegistryEndpoint: "10.43.39.164:5000", + DefaultRegistryEndpoint: "registry.local", + RegistryPort: 5000, + }) + if url != "10.43.39.164:5000" { + t.Errorf("expected configured registry endpoint, got %q", url) + } + }) + + t.Run("non_test_uses_service_ip_for_implicit_default_endpoint", func(t *testing.T) { + t.Setenv("MCP_RUNTIME_TEST_MODE", "") + url := PlatformURL(logger, (&fakeKubectl{clusterIP: "10.96.201.51", port: "5000"}).commandArgs, Config{ + RegistryEndpoint: "registry.local", + DefaultRegistryEndpoint: "registry.local", + RegistryPort: 5000, + }) + if url != "10.96.201.51:5000" { + t.Errorf("expected service IP registry URL, got %q", url) + } + }) + + t.Run("test_mode_prefers_service_dns_over_cluster_ip", func(t *testing.T) { + t.Setenv("MCP_RUNTIME_TEST_MODE", "1") + kubectl := &fakeKubectl{clusterIP: "10.96.201.51", port: "5000"} + url := PlatformURL(logger, kubectl.commandArgs, Config{ + RegistryEndpoint: "registry.local", + DefaultRegistryEndpoint: "registry.local", + RegistryPort: 5000, + }) + if url != "registry.registry.svc.cluster.local:5000" { + t.Errorf("expected service DNS registry URL in test mode, got %q", url) + } + if kubectl.clusterIPQueried { + t.Error("expected test mode to avoid ClusterIP lookup") + } + }) + + t.Run("respects_explicit_default_endpoint_override", func(t *testing.T) { + t.Setenv("MCP_REGISTRY_ENDPOINT", "registry.local") + url := PlatformURL(logger, (&fakeKubectl{}).commandArgs, Config{ + RegistryEndpoint: "registry.local", + DefaultRegistryEndpoint: "registry.local", + RegistryPort: 5000, + }) + if url != "registry.local" { + t.Errorf("expected explicitly configured endpoint, got %q", url) + } + }) + + t.Run("test_mode_respects_explicit_registry_host", func(t *testing.T) { + t.Setenv("MCP_RUNTIME_TEST_MODE", "1") + t.Setenv("MCP_REGISTRY_HOST", "registry.example.com:5000") + url := PlatformURL(logger, (&fakeKubectl{}).commandArgs, Config{ + RegistryEndpoint: "registry.example.com:5000", + DefaultRegistryEndpoint: "registry.local", + RegistryPort: 5000, + }) + if url != "registry.example.com:5000" { + t.Errorf("expected explicit registry host, got %q", url) + } + }) + + t.Run("falls_back_to_service_dns_when_cluster_ip_missing", func(t *testing.T) { + url := PlatformURL(logger, (&fakeKubectl{clusterIPErr: errors.New("kubectl error"), port: "5000"}).commandArgs, cfg) + if url != "registry.registry.svc.cluster.local:5000" { + t.Errorf("expected service DNS registry URL, got %q", url) + } + }) + + t.Run("returns_default_when_port_command_fails", func(t *testing.T) { + url := PlatformURL(logger, (&fakeKubectl{clusterIP: "10.96.201.51", portErr: errors.New("kubectl error")}).commandArgs, cfg) + if !strings.Contains(url, "registry.registry.svc.cluster.local") { + t.Errorf("expected default registry URL, got %q", url) + } + }) + + t.Run("test_mode_returns_default_when_port_command_fails", func(t *testing.T) { + t.Setenv("MCP_RUNTIME_TEST_MODE", "1") + url := PlatformURL(logger, (&fakeKubectl{portErr: errors.New("kubectl error")}).commandArgs, Config{ + RegistryEndpoint: "", + DefaultRegistryEndpoint: "registry.local", + RegistryPort: 5001, + }) + if url != "registry.registry.svc.cluster.local:5001" { + t.Errorf("expected default service DNS registry URL, got %q", url) + } + }) + + t.Run("returns_service_dns_when_ip_empty", func(t *testing.T) { + url := PlatformURL(logger, (&fakeKubectl{clusterIP: "", port: "5000"}).commandArgs, cfg) + if !strings.Contains(url, "registry.registry.svc.cluster.local") { + t.Errorf("expected default registry URL, got %q", url) + } + }) + + t.Run("returns_default_when_port_empty", func(t *testing.T) { + url := PlatformURL(logger, (&fakeKubectl{clusterIP: "10.96.201.51", port: ""}).commandArgs, cfg) + if !strings.Contains(url, "registry.registry.svc.cluster.local") { + t.Errorf("expected default registry URL, got %q", url) + } + }) +} + +type fakeCommand struct { + output []byte + outputErr error +} + +func (c fakeCommand) Output() ([]byte, error) { + return c.output, c.outputErr +} + +type fakeKubectl struct { + clusterIP string + clusterIPErr error + port string + portErr error + clusterIPQueried bool +} + +func (k *fakeKubectl) commandArgs(args []string) (OutputCommand, error) { + for _, arg := range args { + switch arg { + case "jsonpath={.spec.clusterIP}": + k.clusterIPQueried = true + return fakeCommand{output: []byte(k.clusterIP), outputErr: k.clusterIPErr}, nil + case "jsonpath={.spec.ports[0].port}": + return fakeCommand{output: []byte(k.port), outputErr: k.portErr}, nil + } + } + return fakeCommand{}, nil +} diff --git a/internal/cli/resource_helpers.go b/internal/cli/resource_helpers.go deleted file mode 100644 index 2297630..0000000 --- a/internal/cli/resource_helpers.go +++ /dev/null @@ -1,166 +0,0 @@ -package cli - -import ( - "encoding/json" - "fmt" - "io" - "os" - "path/filepath" - - "gopkg.in/yaml.v3" -) - -func resolveRegularFilePath(file string) (string, error) { - absPath, err := filepath.Abs(file) - if err != nil { - return "", wrapWithSentinel(ErrInvalidFilePath, err, fmt.Sprintf("invalid file path: %v", err)) - } - - info, err := os.Stat(absPath) - if err != nil { - return "", wrapWithSentinel(ErrFileNotAccessible, err, fmt.Sprintf("cannot access file %q: %v", file, err)) - } - if info.IsDir() { - return "", newWithSentinel(ErrFileIsDirectory, fmt.Sprintf("path %q is a directory, not a file", file)) - } - - return absPath, nil -} - -func ResolveRegularFilePath(file string) (string, error) { - return resolveRegularFilePath(file) -} - -func readFileAtPath(path string) ([]byte, error) { - absPath, err := filepath.Abs(path) - if err != nil { - return nil, fmt.Errorf("resolve file path: %w", err) - } - - root, err := os.OpenRoot(filepath.Dir(absPath)) - if err != nil { - return nil, err - } - defer root.Close() - - base := filepath.Base(absPath) - info, err := root.Stat(base) - if err != nil { - return nil, err - } - if !info.Mode().IsRegular() { - return nil, fmt.Errorf("read file %q: not a regular file", path) - } - - file, err := root.Open(base) - if err != nil { - return nil, err - } - defer file.Close() - - return io.ReadAll(file) -} - -func ReadFileAtPath(path string) ([]byte, error) { - return readFileAtPath(path) -} - -func applyManifestFromFile(kubectl *KubectlClient, file string, stdout, stderr io.Writer) error { - absPath, err := resolveRegularFilePath(file) - if err != nil { - return err - } - - return kubectl.RunWithOutput([]string{"apply", "-f", absPath}, stdout, stderr) -} - -func normalizePatchValue(value any) any { - switch typed := value.(type) { - case map[string]any: - normalized := make(map[string]any, len(typed)) - for key, child := range typed { - normalized[key] = normalizePatchValue(child) - } - return normalized - case map[any]any: - normalized := make(map[string]any, len(typed)) - for key, child := range typed { - normalized[fmt.Sprint(key)] = normalizePatchValue(child) - } - return normalized - case []any: - normalized := make([]any, len(typed)) - for i, child := range typed { - normalized[i] = normalizePatchValue(child) - } - return normalized - default: - return value - } -} - -func normalizePatchDocument(raw string) (string, error) { - var value any - if err := yaml.Unmarshal([]byte(raw), &value); err != nil { - return "", fmt.Errorf("parse patch document: %w", err) - } - - data, err := json.Marshal(normalizePatchValue(value)) - if err != nil { - return "", fmt.Errorf("marshal patch document: %w", err) - } - - return string(data), nil -} - -func normalizePatchFile(file string) (string, error) { - absPath, err := resolveRegularFilePath(file) - if err != nil { - return "", err - } - - data, err := readFileAtPath(absPath) - if err != nil { - return "", wrapWithSentinel(ErrFileNotAccessible, err, fmt.Sprintf("cannot read file %q: %v", file, err)) - } - - return normalizePatchDocument(string(data)) -} - -func writeOutputFile(file string, data []byte) error { - absPath, err := filepath.Abs(file) - if err != nil { - return fmt.Errorf("resolve output path: %w", err) - } - dir := filepath.Dir(absPath) - if err := os.MkdirAll(dir, 0o750); err != nil { - return fmt.Errorf("create output directory: %w", err) - } - root, err := os.OpenRoot(dir) - if err != nil { - return fmt.Errorf("open output directory: %w", err) - } - defer root.Close() - - f, err := root.OpenFile(filepath.Base(absPath), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o600) - if err != nil { - return fmt.Errorf("write output file: %w", err) - } - - n, err := f.Write(data) - if err != nil { - _ = f.Close() - return fmt.Errorf("write output file: %w", err) - } - if n != len(data) { - _ = f.Close() - return fmt.Errorf("write output file: %w", io.ErrShortWrite) - } - if err := f.Close(); err != nil { - return fmt.Errorf("write output file: %w", err) - } - if err := os.Chmod(absPath, 0o600); err != nil { - return fmt.Errorf("write output file: %w", err) - } - return nil -} diff --git a/internal/cli/resource_helpers_test.go b/internal/cli/resource_helpers_test.go deleted file mode 100644 index 4221453..0000000 --- a/internal/cli/resource_helpers_test.go +++ /dev/null @@ -1,120 +0,0 @@ -package cli - -import ( - "os" - "path/filepath" - "runtime" - "strings" - "syscall" - "testing" -) - -func TestReadFileAtPath(t *testing.T) { - t.Run("reads regular file", func(t *testing.T) { - path := filepath.Join(t.TempDir(), "manifest.yaml") - if err := os.WriteFile(path, []byte("kind: Namespace\n"), 0o600); err != nil { - t.Fatalf("WriteFile() error = %v", err) - } - - data, err := readFileAtPath(path) - if err != nil { - t.Fatalf("readFileAtPath() error = %v", err) - } - if string(data) != "kind: Namespace\n" { - t.Fatalf("readFileAtPath() = %q", string(data)) - } - }) - - t.Run("rejects symlink that escapes the opened root", func(t *testing.T) { - baseDir := t.TempDir() - manifestDir := filepath.Join(baseDir, "manifests") - if err := os.MkdirAll(manifestDir, 0o750); err != nil { - t.Fatalf("MkdirAll() error = %v", err) - } - - outsidePath := filepath.Join(baseDir, "outside.yaml") - if err := os.WriteFile(outsidePath, []byte("kind: Secret\n"), 0o600); err != nil { - t.Fatalf("WriteFile() error = %v", err) - } - - linkPath := filepath.Join(manifestDir, "linked.yaml") - relTarget, err := filepath.Rel(manifestDir, outsidePath) - if err != nil { - t.Fatalf("Rel() error = %v", err) - } - if err := os.Symlink(relTarget, linkPath); err != nil { - t.Skipf("Symlink() unavailable: %v", err) - } - - if _, err := readFileAtPath(linkPath); err == nil { - t.Fatal("readFileAtPath() error = nil, want symlink escape rejection") - } - }) - - t.Run("rejects non-regular files", func(t *testing.T) { - if runtime.GOOS == "windows" { - t.Skip("named pipes are not exercised in this test on Windows") - } - - pipePath := filepath.Join(t.TempDir(), "manifest.pipe") - if err := syscall.Mkfifo(pipePath, 0o600); err != nil { - t.Skipf("Mkfifo() unavailable: %v", err) - } - - _, err := readFileAtPath(pipePath) - if err == nil { - t.Fatal("readFileAtPath() error = nil, want non-regular file rejection") - } - if !strings.Contains(err.Error(), "not a regular file") { - t.Fatalf("readFileAtPath() error = %v, want non-regular file rejection", err) - } - }) -} - -func TestWriteOutputFileUsesRestrictedDirectoryPermissions(t *testing.T) { - target := filepath.Join(t.TempDir(), "nested", "exported", "server.yaml") - if err := writeOutputFile(target, []byte("kind: Namespace\n")); err != nil { - t.Fatalf("writeOutputFile() error = %v", err) - } - - data, err := os.ReadFile(target) - if err != nil { - t.Fatalf("ReadFile() error = %v", err) - } - if string(data) != "kind: Namespace\n" { - t.Fatalf("writeOutputFile() content = %q", string(data)) - } - - info, err := os.Stat(filepath.Dir(target)) - if err != nil { - t.Fatalf("Stat() error = %v", err) - } - if perms := info.Mode().Perm(); perms&0o027 != 0 { - t.Fatalf("directory permissions = %o, want 0750 or less", perms) - } -} - -func TestWriteOutputFileTightensExistingFilePermissions(t *testing.T) { - target := filepath.Join(t.TempDir(), "exported", "server.yaml") - if err := os.MkdirAll(filepath.Dir(target), 0o750); err != nil { - t.Fatalf("MkdirAll() error = %v", err) - } - if err := os.WriteFile(target, []byte("kind: Secret\n"), 0o644); err != nil { - t.Fatalf("WriteFile() error = %v", err) - } - if err := os.Chmod(target, 0o644); err != nil { - t.Fatalf("Chmod() error = %v", err) - } - - if err := writeOutputFile(target, []byte("kind: Namespace\n")); err != nil { - t.Fatalf("writeOutputFile() error = %v", err) - } - - info, err := os.Stat(target) - if err != nil { - t.Fatalf("Stat() error = %v", err) - } - if perms := info.Mode().Perm(); perms != 0o600 { - t.Fatalf("file permissions = %o, want 0600", perms) - } -} diff --git a/internal/cli/root/commands.go b/internal/cli/root/commands.go index b35bb14..fd956b1 100644 --- a/internal/cli/root/commands.go +++ b/internal/cli/root/commands.go @@ -4,11 +4,11 @@ import ( "github.com/spf13/cobra" "go.uber.org/zap" - "mcp-runtime/internal/cli" "mcp-runtime/internal/cli/access" "mcp-runtime/internal/cli/auth" "mcp-runtime/internal/cli/bootstrap" "mcp-runtime/internal/cli/cluster" + "mcp-runtime/internal/cli/core" "mcp-runtime/internal/cli/pipeline" "mcp-runtime/internal/cli/registry" "mcp-runtime/internal/cli/sentinel" @@ -19,15 +19,16 @@ import ( // AddCommands registers every top-level mcp-runtime command on root. func AddCommands(root *cobra.Command, logger *zap.Logger) { - runtime := cli.NewRuntime(logger) + runtime := core.NewRuntime(logger) + clusterMgr := cluster.DefaultClusterManager(logger) - root.AddCommand(cluster.New(runtime)) + root.AddCommand(cluster.NewWithManager(clusterMgr)) root.AddCommand(registry.New(runtime)) root.AddCommand(server.New(runtime)) root.AddCommand(access.New(runtime)) root.AddCommand(auth.New(runtime)) root.AddCommand(bootstrap.New(runtime)) - root.AddCommand(setup.New(runtime)) + root.AddCommand(setup.New(runtime, clusterMgr)) root.AddCommand(status.New(runtime)) root.AddCommand(sentinel.New(runtime)) root.AddCommand(pipeline.New(runtime)) diff --git a/internal/cli/root/doc.go b/internal/cli/root/doc.go index bc84e43..d5b5c1a 100644 --- a/internal/cli/root/doc.go +++ b/internal/cli/root/doc.go @@ -2,6 +2,6 @@ // mcp-runtime // binary. // -// Each subpackage owns one top-level Cobra command boundary and delegates -// behavior to the shared internal/cli implementation package. +// Each subpackage owns one top-level Cobra command boundary and uses +// internal/cli/core for shared CLI infrastructure. package root diff --git a/internal/cli/runtime.go b/internal/cli/runtime.go deleted file mode 100644 index 71d22bb..0000000 --- a/internal/cli/runtime.go +++ /dev/null @@ -1,59 +0,0 @@ -package cli - -import "go.uber.org/zap" - -// Runtime is the shared CLI facade for wiring common dependencies once and -// handing typed managers to the foldered command packages. -type Runtime struct { - logger *zap.Logger -} - -// NewRuntime builds the shared CLI runtime facade. -func NewRuntime(logger *zap.Logger) *Runtime { - return &Runtime{logger: logger} -} - -// Logger returns the shared logger. -func (r *Runtime) Logger() *zap.Logger { - return r.logger -} - -// KubectlRunner returns the shared kubectl runner. -func (r *Runtime) KubectlRunner() KubectlRunner { - return DefaultKubectlRunner() -} - -// KubectlClient returns the shared kubectl client. -func (r *Runtime) KubectlClient() *KubectlClient { - return kubectlClient -} - -// Executor returns the shared process executor. -func (r *Runtime) Executor() Executor { - return execExecutor -} - -// AccessManager returns the access command manager. -func (r *Runtime) AccessManager() *AccessManager { - return DefaultAccessManager(r.logger) -} - -// ClusterManager returns the cluster command manager. -func (r *Runtime) ClusterManager() *ClusterManager { - return DefaultClusterManager(r.logger) -} - -// RegistryManager returns the registry command manager. -func (r *Runtime) RegistryManager() *RegistryManager { - return DefaultRegistryManager(r.logger) -} - -// SentinelManager returns the sentinel command manager. -func (r *Runtime) SentinelManager() *SentinelManager { - return DefaultSentinelManager(r.logger) -} - -// ServerManager returns the server command manager. -func (r *Runtime) ServerManager() *ServerManager { - return DefaultServerManager(r.logger) -} diff --git a/internal/cli/sentinel.go b/internal/cli/sentinel/manager.go similarity index 52% rename from internal/cli/sentinel.go rename to internal/cli/sentinel/manager.go index dc25f99..72e0fc2 100644 --- a/internal/cli/sentinel.go +++ b/internal/cli/sentinel/manager.go @@ -1,4 +1,4 @@ -package cli +package sentinel import ( "fmt" @@ -8,10 +8,14 @@ import ( "strings" "go.uber.org/zap" + + "mcp-runtime/internal/cli/core" + "mcp-runtime/internal/cli/platformstatus" ) +// SentinelManager operates the bundled mcp-sentinel stack via kubectl. type SentinelManager struct { - kubectl *KubectlClient + kubectl *core.KubectlClient logger *zap.Logger } @@ -34,14 +38,14 @@ type sentinelPortTarget struct { } var sentinelComponents = []sentinelComponent{ - {Key: "clickhouse", Display: "ClickHouse", Namespace: defaultAnalyticsNamespace, Kind: "statefulset", Resource: "clickhouse", Label: "clickhouse"}, - {Key: "zookeeper", Display: "Zookeeper", Namespace: defaultAnalyticsNamespace, Kind: "deployment", Resource: "zookeeper", Label: "zookeeper"}, - {Key: "kafka", Display: "Kafka", Namespace: defaultAnalyticsNamespace, Kind: "statefulset", Resource: "kafka", Label: "kafka"}, - {Key: "ingest", Display: "Ingest", Namespace: defaultAnalyticsNamespace, Kind: "deployment", Resource: "mcp-sentinel-ingest", Label: "mcp-sentinel-ingest"}, + {Key: "clickhouse", Display: "ClickHouse", Namespace: core.DefaultAnalyticsNamespace, Kind: "statefulset", Resource: "clickhouse", Label: "clickhouse"}, + {Key: "zookeeper", Display: "Zookeeper", Namespace: core.DefaultAnalyticsNamespace, Kind: "deployment", Resource: "zookeeper", Label: "zookeeper"}, + {Key: "kafka", Display: "Kafka", Namespace: core.DefaultAnalyticsNamespace, Kind: "statefulset", Resource: "kafka", Label: "kafka"}, + {Key: "ingest", Display: "Ingest", Namespace: core.DefaultAnalyticsNamespace, Kind: "deployment", Resource: "mcp-sentinel-ingest", Label: "mcp-sentinel-ingest"}, { Key: "api", Display: "API", - Namespace: defaultAnalyticsNamespace, + Namespace: core.DefaultAnalyticsNamespace, Kind: "deployment", Resource: "mcp-sentinel-api", Label: "mcp-sentinel-api", @@ -52,11 +56,11 @@ var sentinelComponents = []sentinelComponent{ RemotePort: 8080, }, }, - {Key: "processor", Display: "Processor", Namespace: defaultAnalyticsNamespace, Kind: "deployment", Resource: "mcp-sentinel-processor", Label: "mcp-sentinel-processor"}, + {Key: "processor", Display: "Processor", Namespace: core.DefaultAnalyticsNamespace, Kind: "deployment", Resource: "mcp-sentinel-processor", Label: "mcp-sentinel-processor"}, { Key: "ui", Display: "UI", - Namespace: defaultAnalyticsNamespace, + Namespace: core.DefaultAnalyticsNamespace, Kind: "deployment", Resource: "mcp-sentinel-ui", Label: "mcp-sentinel-ui", @@ -67,11 +71,11 @@ var sentinelComponents = []sentinelComponent{ RemotePort: 8082, }, }, - {Key: "gateway", Display: "Gateway", Namespace: defaultAnalyticsNamespace, Kind: "deployment", Resource: "mcp-sentinel-gateway", Label: "mcp-sentinel-gateway"}, + {Key: "gateway", Display: "Gateway", Namespace: core.DefaultAnalyticsNamespace, Kind: "deployment", Resource: "mcp-sentinel-gateway", Label: "mcp-sentinel-gateway"}, { Key: "prometheus", Display: "Prometheus", - Namespace: defaultAnalyticsNamespace, + Namespace: core.DefaultAnalyticsNamespace, Kind: "deployment", Resource: "prometheus", Label: "prometheus", @@ -86,7 +90,7 @@ var sentinelComponents = []sentinelComponent{ { Key: "grafana", Display: "Grafana", - Namespace: defaultAnalyticsNamespace, + Namespace: core.DefaultAnalyticsNamespace, Kind: "deployment", Resource: "grafana", Label: "grafana", @@ -97,21 +101,24 @@ var sentinelComponents = []sentinelComponent{ RemotePort: 3000, }, }, - {Key: "otel-collector", Display: "OTel Collector", Namespace: defaultAnalyticsNamespace, Kind: "deployment", Resource: "otel-collector", Label: "otel-collector", Aliases: []string{"otel"}}, - {Key: "tempo", Display: "Tempo", Namespace: defaultAnalyticsNamespace, Kind: "statefulset", Resource: "tempo", Label: "tempo"}, - {Key: "loki", Display: "Loki", Namespace: defaultAnalyticsNamespace, Kind: "statefulset", Resource: "loki", Label: "loki"}, - {Key: "promtail", Display: "Promtail", Namespace: defaultAnalyticsNamespace, Kind: "daemonset", Resource: "promtail", Label: "promtail"}, + {Key: "otel-collector", Display: "OTel Collector", Namespace: core.DefaultAnalyticsNamespace, Kind: "deployment", Resource: "otel-collector", Label: "otel-collector", Aliases: []string{"otel"}}, + {Key: "tempo", Display: "Tempo", Namespace: core.DefaultAnalyticsNamespace, Kind: "statefulset", Resource: "tempo", Label: "tempo"}, + {Key: "loki", Display: "Loki", Namespace: core.DefaultAnalyticsNamespace, Kind: "statefulset", Resource: "loki", Label: "loki"}, + {Key: "promtail", Display: "Promtail", Namespace: core.DefaultAnalyticsNamespace, Kind: "daemonset", Resource: "promtail", Label: "promtail"}, } -func NewSentinelManager(kubectl *KubectlClient, logger *zap.Logger) *SentinelManager { +// NewSentinelManager creates a SentinelManager with explicit dependencies. +func NewSentinelManager(kubectl *core.KubectlClient, logger *zap.Logger) *SentinelManager { return &SentinelManager{kubectl: kubectl, logger: logger} } -func DefaultSentinelManager(logger *zap.Logger) *SentinelManager { - return NewSentinelManager(kubectlClient, logger) +// DefaultSentinelManager returns a SentinelManager using the shared runtime clients. +func DefaultSentinelManager(runtime *core.Runtime) *SentinelManager { + return NewSentinelManager(runtime.KubectlClient(), runtime.Logger()) } -func SentinelComponentKeys() []string { +// ComponentKeys returns sorted valid component names for cobra completion. +func ComponentKeys() []string { keys := make([]string, 0, len(sentinelComponents)) for _, component := range sentinelComponents { keys = append(keys, component.Key) @@ -134,7 +141,7 @@ func findSentinelComponent(name string) (*sentinelComponent, error) { } } - return nil, newWithSentinel(nil, fmt.Sprintf("unknown sentinel component %q (use one of: %s)", name, strings.Join(SentinelComponentKeys(), ", "))) + return nil, core.NewWithSentinel(nil, fmt.Sprintf("unknown sentinel component %q (use one of: %s)", name, strings.Join(ComponentKeys(), ", "))) } func findSentinelPortTarget(name string) (*sentinelPortTarget, error) { @@ -143,41 +150,43 @@ func findSentinelPortTarget(name string) (*sentinelPortTarget, error) { return nil, err } if component.PortTarget == nil { - return nil, newWithSentinel(nil, fmt.Sprintf("component %q does not expose a predefined port-forward target", name)) + return nil, core.NewWithSentinel(nil, fmt.Sprintf("component %q does not expose a predefined port-forward target", name)) } return component.PortTarget, nil } +// ShowSentinelStatus prints a status table for sentinel workloads. func (m *SentinelManager) ShowSentinelStatus() error { - Header("MCP Sentinel Status") - DefaultPrinter.Println() + core.Header("MCP Sentinel Status") + core.DefaultPrinter.Println() tableData := [][]string{{"Component", "Namespace", "Resource", "Status", "Details"}} clusterReachable := true - if err := checkClusterStatusQuiet(); err != nil { + if err := platformstatus.CheckClusterStatusQuiet(); err != nil { clusterReachable = false - tableData = append(tableData, analyticsStackRow(Red("ERROR"), err.Error())) - TableBoxed(tableData) + tableData = append(tableData, platformstatus.AnalyticsStackRow(core.Red("ERROR"), err.Error())) + core.TableBoxed(tableData) return nil } - installed, err := analyticsNamespaceInstalled(clusterReachable) + installed, err := platformstatus.AnalyticsNamespaceInstalled(clusterReachable) switch { case err != nil: - tableData = append(tableData, analyticsStackRow(Red("ERROR"), err.Error())) + tableData = append(tableData, platformstatus.AnalyticsStackRow(core.Red("ERROR"), err.Error())) case !installed: - tableData = append(tableData, analyticsStackRow(Yellow("SKIPPED"), "Namespace not found")) + tableData = append(tableData, platformstatus.AnalyticsStackRow(core.Yellow("SKIPPED"), "Namespace not found")) default: - for _, workload := range analyticsStatusWorkloads { - tableData = append(tableData, workloadStatusRow(workload, true)) + for _, workload := range platformstatus.DefaultPlatformStatusWorkloads { + tableData = append(tableData, platformstatus.WorkloadStatusRow(workload, true)) } } - TableBoxed(tableData) + core.TableBoxed(tableData) return nil } +// ViewSentinelLogs streams logs for a sentinel component. func (m *SentinelManager) ViewSentinelLogs(component string, follow, previous bool, tail int, since string) error { target, err := findSentinelComponent(component) if err != nil { @@ -203,7 +212,7 @@ func (m *SentinelManager) ViewSentinelLogs(component string, follow, previous bo } if err := m.kubectl.RunWithOutput(args, os.Stdout, os.Stderr); err != nil { - return wrapWithSentinelAndContext(nil, err, fmt.Sprintf("failed to stream logs for sentinel component %q: %v", component, err), map[string]any{ + return core.WrapWithSentinelAndContext(nil, err, fmt.Sprintf("failed to stream logs for sentinel component %q: %v", component, err), map[string]any{ "component": component, "namespace": target.Namespace, }) @@ -211,17 +220,19 @@ func (m *SentinelManager) ViewSentinelLogs(component string, follow, previous bo return nil } +// ShowSentinelEvents lists recent events in the analytics namespace. func (m *SentinelManager) ShowSentinelEvents() error { - args := []string{"get", "events", "-n", defaultAnalyticsNamespace, "--sort-by=.lastTimestamp"} + args := []string{"get", "events", "-n", core.DefaultAnalyticsNamespace, "--sort-by=.lastTimestamp"} if err := m.kubectl.RunWithOutput(args, os.Stdout, os.Stderr); err != nil { - return wrapWithSentinelAndContext(nil, err, fmt.Sprintf("failed to list sentinel events: %v", err), map[string]any{ - "namespace": defaultAnalyticsNamespace, + return core.WrapWithSentinelAndContext(nil, err, fmt.Sprintf("failed to list sentinel events: %v", err), map[string]any{ + "namespace": core.DefaultAnalyticsNamespace, "component": "sentinel", }) } return nil } +// PortForwardSentinelTarget runs kubectl port-forward for a known service target. func (m *SentinelManager) PortForwardSentinelTarget(target string, localPort int, address string) error { portTarget, err := findSentinelPortTarget(target) if err != nil { @@ -233,28 +244,29 @@ func (m *SentinelManager) PortForwardSentinelTarget(target string, localPort int args := []string{ "port-forward", - "-n", defaultAnalyticsNamespace, + "-n", core.DefaultAnalyticsNamespace, fmt.Sprintf("%s/%s", portTarget.ResourceKind, portTarget.ResourceName), fmt.Sprintf("%d:%d", localPort, portTarget.RemotePort), "--address", address, } if err := m.kubectl.RunWithOutput(args, os.Stdout, os.Stderr); err != nil { - return wrapWithSentinelAndContext(nil, err, fmt.Sprintf("failed to port-forward sentinel target %q: %v", target, err), map[string]any{ + return core.WrapWithSentinelAndContext(nil, err, fmt.Sprintf("failed to port-forward sentinel target %q: %v", target, err), map[string]any{ "target": target, - "namespace": defaultAnalyticsNamespace, + "namespace": core.DefaultAnalyticsNamespace, "component": "sentinel", }) } return nil } +// RestartSentinel restarts one component or all sentinel workloads. func (m *SentinelManager) RestartSentinel(component string, restartAll bool) error { if restartAll { for _, target := range sentinelComponents { args := []string{"rollout", "restart", fmt.Sprintf("%s/%s", target.Kind, target.Resource), "-n", target.Namespace} if err := m.kubectl.RunWithOutput(args, os.Stdout, os.Stderr); err != nil { - return wrapWithSentinelAndContext(nil, err, fmt.Sprintf("failed to restart sentinel component %q: %v", target.Key, err), map[string]any{ + return core.WrapWithSentinelAndContext(nil, err, fmt.Sprintf("failed to restart sentinel component %q: %v", target.Key, err), map[string]any{ "component": target.Key, "namespace": target.Namespace, }) @@ -269,7 +281,7 @@ func (m *SentinelManager) RestartSentinel(component string, restartAll bool) err } args := []string{"rollout", "restart", fmt.Sprintf("%s/%s", target.Kind, target.Resource), "-n", target.Namespace} if err := m.kubectl.RunWithOutput(args, os.Stdout, os.Stderr); err != nil { - return wrapWithSentinelAndContext(nil, err, fmt.Sprintf("failed to restart sentinel component %q: %v", component, err), map[string]any{ + return core.WrapWithSentinelAndContext(nil, err, fmt.Sprintf("failed to restart sentinel component %q: %v", component, err), map[string]any{ "component": component, "namespace": target.Namespace, }) diff --git a/internal/cli/sentinel_test.go b/internal/cli/sentinel/manager_test.go similarity index 52% rename from internal/cli/sentinel_test.go rename to internal/cli/sentinel/manager_test.go index d1e35d9..c71bb14 100644 --- a/internal/cli/sentinel_test.go +++ b/internal/cli/sentinel/manager_test.go @@ -1,15 +1,27 @@ -package cli +package sentinel_test import ( "testing" "go.uber.org/zap" + + "mcp-runtime/internal/cli/core" + "mcp-runtime/internal/cli/sentinel" ) +func contains(slice []string, val string) bool { + for _, s := range slice { + if s == val { + return true + } + } + return false +} + func TestSentinelManager_ViewSentinelLogs(t *testing.T) { - mock := &MockExecutor{} - kubectl := &KubectlClient{exec: mock, validators: nil} - mgr := NewSentinelManager(kubectl, zap.NewNop()) + mock := &core.MockExecutor{} + kubectl := core.NewTestKubectlClient(mock) + mgr := sentinel.NewSentinelManager(kubectl, zap.NewNop()) if err := mgr.ViewSentinelLogs("api", true, false, 50, "5m"); err != nil { t.Fatalf("unexpected error: %v", err) @@ -19,7 +31,7 @@ func TestSentinelManager_ViewSentinelLogs(t *testing.T) { if cmd.Name != "kubectl" { t.Fatalf("expected kubectl, got %q", cmd.Name) } - for _, want := range []string{"logs", "-n", defaultAnalyticsNamespace, "-l", "app=mcp-sentinel-api", "--all-containers=true", "--prefix=true", "--tail", "50", "--since", "5m", "-f"} { + for _, want := range []string{"logs", "-n", core.DefaultAnalyticsNamespace, "-l", "app=mcp-sentinel-api", "--all-containers=true", "--prefix=true", "--tail", "50", "--since", "5m", "-f"} { if !contains(cmd.Args, want) { t.Fatalf("expected %q in args, got %v", want, cmd.Args) } @@ -27,16 +39,16 @@ func TestSentinelManager_ViewSentinelLogs(t *testing.T) { } func TestSentinelManager_PortForwardSentinelTarget(t *testing.T) { - mock := &MockExecutor{} - kubectl := &KubectlClient{exec: mock, validators: nil} - mgr := NewSentinelManager(kubectl, zap.NewNop()) + mock := &core.MockExecutor{} + kubectl := core.NewTestKubectlClient(mock) + mgr := sentinel.NewSentinelManager(kubectl, zap.NewNop()) if err := mgr.PortForwardSentinelTarget("grafana", 0, "0.0.0.0"); err != nil { t.Fatalf("unexpected error: %v", err) } cmd := mock.LastCommand() - for _, want := range []string{"port-forward", "-n", defaultAnalyticsNamespace, "service/grafana", "3000:3000", "--address", "0.0.0.0"} { + for _, want := range []string{"port-forward", "-n", core.DefaultAnalyticsNamespace, "service/grafana", "3000:3000", "--address", "0.0.0.0"} { if !contains(cmd.Args, want) { t.Fatalf("expected %q in args, got %v", want, cmd.Args) } @@ -44,16 +56,16 @@ func TestSentinelManager_PortForwardSentinelTarget(t *testing.T) { } func TestSentinelManager_RestartSentinel(t *testing.T) { - mock := &MockExecutor{} - kubectl := &KubectlClient{exec: mock, validators: nil} - mgr := NewSentinelManager(kubectl, zap.NewNop()) + mock := &core.MockExecutor{} + kubectl := core.NewTestKubectlClient(mock) + mgr := sentinel.NewSentinelManager(kubectl, zap.NewNop()) if err := mgr.RestartSentinel("processor", false); err != nil { t.Fatalf("unexpected error: %v", err) } cmd := mock.LastCommand() - for _, want := range []string{"rollout", "restart", "deployment/mcp-sentinel-processor", "-n", defaultAnalyticsNamespace} { + for _, want := range []string{"rollout", "restart", "deployment/mcp-sentinel-processor", "-n", core.DefaultAnalyticsNamespace} { if !contains(cmd.Args, want) { t.Fatalf("expected %q in args, got %v", want, cmd.Args) } diff --git a/internal/cli/sentinel/sentinel.go b/internal/cli/sentinel/sentinel.go index 8a59580..5095624 100644 --- a/internal/cli/sentinel/sentinel.go +++ b/internal/cli/sentinel/sentinel.go @@ -4,16 +4,16 @@ package sentinel import ( "github.com/spf13/cobra" - "mcp-runtime/internal/cli" + "mcp-runtime/internal/cli/core" ) // New returns the sentinel command. -func New(runtime *cli.Runtime) *cobra.Command { - return NewWithManager(runtime.SentinelManager()) +func New(runtime *core.Runtime) *cobra.Command { + return NewWithManager(DefaultSentinelManager(runtime)) } // NewWithManager returns the sentinel command using the provided manager. -func NewWithManager(mgr *cli.SentinelManager) *cobra.Command { +func NewWithManager(mgr *SentinelManager) *cobra.Command { cmd := &cobra.Command{ Use: "sentinel", Short: "Operate the bundled mcp-sentinel stack", @@ -36,7 +36,7 @@ func NewWithManager(mgr *cli.SentinelManager) *cobra.Command { Use: "logs [component]", Short: "View logs for a mcp-sentinel component", Args: cobra.ExactArgs(1), - ValidArgs: cli.SentinelComponentKeys(), + ValidArgs: ComponentKeys(), RunE: func(cmd *cobra.Command, args []string) error { return mgr.ViewSentinelLogs(args[0], follow, previous, tail, since) }, diff --git a/internal/cli/server/build.go b/internal/cli/server/build.go index 6ac152e..9d6ee86 100644 --- a/internal/cli/server/build.go +++ b/internal/cli/server/build.go @@ -3,8 +3,6 @@ package server import ( "github.com/spf13/cobra" "go.uber.org/zap" - - "mcp-runtime/internal/cli" ) func newBuildImageCmd(logger *zap.Logger) *cobra.Command { @@ -21,7 +19,7 @@ func newBuildImageCmd(logger *zap.Logger) *cobra.Command { Long: "Build a Docker image from Dockerfile and update metadata file.", Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { - return cli.BuildImage(logger, args[0], dockerfile, metadataFile, metadataDir, registryURL, tag, contextDir) + return BuildImage(logger, args[0], dockerfile, metadataFile, metadataDir, registryURL, tag, contextDir) }, } diff --git a/internal/cli/server/build_image.go b/internal/cli/server/build_image.go new file mode 100644 index 0000000..e93534b --- /dev/null +++ b/internal/cli/server/build_image.go @@ -0,0 +1,192 @@ +package server + +// This file implements the "server build" command for building Docker images. +// It handles Docker image building, metadata file updates, and registry integration. +// +// Example usage: +// mcp-runtime server build image my-server --tag v1.0.0 +// mcp-runtime server build image my-server --dockerfile custom.Dockerfile --registry my-registry.com + +import ( + "fmt" + "os" + "path/filepath" + + "go.uber.org/zap" + + "mcp-runtime/internal/cli/registry/resolve" + "mcp-runtime/pkg/metadata" + + "gopkg.in/yaml.v3" + + "mcp-runtime/internal/cli/core" +) + +// yamlMarshal is a test seam for yaml.Marshal. +var yamlMarshal = yaml.Marshal + +func buildImage(logger *zap.Logger, serverName, dockerfile, metadataFile, metadataDir, registryURL, tag, context string) error { + // Get registry URL + if registryURL == "" { + kubectl := core.DefaultKubectlClient() + registryURL = resolve.PlatformURL(logger, func(args []string) (resolve.OutputCommand, error) { + return kubectl.CommandArgs(args) + }, registryResolveConfig()) + } + + // Get tag + if tag == "" { + tag = resolve.GitTag(func(name string, args []string) (resolve.OutputCommand, error) { + return core.ExecCommandWithValidators(name, args) + }) + } + + logger.Info("Building image", zap.String("server", serverName)) + + // Determine image name + imageName := fmt.Sprintf("%s/%s", registryURL, serverName) + fullImage := fmt.Sprintf("%s:%s", imageName, tag) + + // Build Docker image + // #nosec G204 -- command arguments are built from trusted inputs and fixed verbs. + buildCmd, err := core.ExecCommandWithValidators("docker", []string{ + "build", + "-f", dockerfile, + "-t", fullImage, + context, + }) + if err != nil { + return err + } + buildCmd.SetStdout(os.Stdout) + buildCmd.SetStderr(os.Stderr) + + if err := buildCmd.Run(); err != nil { + wrappedErr := core.WrapWithSentinelAndContext( + core.ErrBuildImageFailed, + err, + fmt.Sprintf("failed to build image for %s: %v", serverName, err), + map[string]any{"server": serverName, "image": fullImage, "dockerfile": dockerfile, "component": "build"}, + ) + core.Error("Failed to build image") + core.LogStructuredError(logger, wrappedErr, "Failed to build image") + return wrappedErr + } + + logger.Info("Image built successfully", zap.String("image", fullImage)) + + // Update metadata file (required for a successful build: CI and scripts rely on non-zero exit) + if err := updateMetadataImage(serverName, imageName, tag, metadataFile, metadataDir); err != nil { + core.LogStructuredError(logger, err, "Image built but metadata update failed") + return err + } + + return nil +} + +// BuildImage builds a Docker image and updates MCP metadata for the server. +func BuildImage(logger *zap.Logger, serverName, dockerfile, metadataFile, metadataDir, registryURL, tag, context string) error { + return buildImage(logger, serverName, dockerfile, metadataFile, metadataDir, registryURL, tag, context) +} + +func registryResolveConfig() resolve.Config { + return resolve.Config{ + RegistryEndpoint: core.DefaultCLIConfig.RegistryEndpoint, + DefaultRegistryEndpoint: core.DefaultRegistryEndpoint, + RegistryPort: core.DefaultCLIConfig.RegistryPort, + } +} + +func updateMetadataImage(serverName, imageName, tag, metadataFile, metadataDir string) error { + // Find the metadata file containing this server + var targetFile string + + if metadataFile != "" { + targetFile = metadataFile + } else { + // Search in metadata directory + files, _ := filepath.Glob(filepath.Join(metadataDir, "*.yaml")) + ymlFiles, _ := filepath.Glob(filepath.Join(metadataDir, "*.yml")) + files = append(files, ymlFiles...) + + for _, file := range files { + registry, err := metadata.LoadFromFile(file) + if err != nil { + continue + } + for _, s := range registry.Servers { + if s.Name == serverName { + targetFile = file + break + } + } + if targetFile != "" { + break + } + } + } + + if targetFile == "" { + err := core.NewWithSentinel(core.ErrMetadataFileNotFound, fmt.Sprintf("metadata file not found for server %s", serverName)) + core.Error("Metadata file not found") + // Note: No logger available in this helper function + return err + } + + // Load and update + registry, err := metadata.LoadFromFile(targetFile) + if err != nil { + wrappedErr := core.WrapWithSentinel(core.ErrLoadMetadataFailed, err, fmt.Sprintf("failed to load metadata: %v", err)) + core.Error("Failed to load metadata") + // Note: No logger available in this helper function + return wrappedErr + } + + // Update server image + updated := false + for i := range registry.Servers { + if registry.Servers[i].Name == serverName { + registry.Servers[i].Image = imageName + registry.Servers[i].ImageTag = tag + updated = true + break + } + } + + if !updated { + err := core.NewWithSentinel(core.ErrServerNotFoundInMetadata, fmt.Sprintf("server %s not found in metadata", serverName)) + core.Error("Server not found in metadata") + // Note: No logger available in this helper function + return err + } + + // Write back + data, err := yamlMarshal(registry) + if err != nil { + wrappedErr := core.WrapWithSentinel(core.ErrMarshalMetadataFailed, err, fmt.Sprintf("failed to marshal metadata: %v", err)) + core.Error("Failed to marshal metadata") + // Note: No logger available in this helper function + return wrappedErr + } + + fileMode := os.FileMode(0o600) + if info, statErr := os.Stat(targetFile); statErr == nil { + fileMode = info.Mode().Perm() + if fileMode&0o200 == 0 { + writeErr := fmt.Errorf("file is not writable: %s", targetFile) + wrappedErr := core.WrapWithSentinel(core.ErrWriteMetadataFailed, writeErr, fmt.Sprintf("failed to write metadata: %v", writeErr)) + core.Error("Failed to write metadata") + // Note: No logger available in this helper function + return wrappedErr + } + } + + if err := os.WriteFile(targetFile, data, fileMode); err != nil { + wrappedErr := core.WrapWithSentinel(core.ErrWriteMetadataFailed, err, fmt.Sprintf("failed to write metadata: %v", err)) + core.Error("Failed to write metadata") + // Note: No logger available in this helper function + return wrappedErr + } + + return nil +} diff --git a/internal/cli/server/build_image_test.go b/internal/cli/server/build_image_test.go new file mode 100644 index 0000000..e694fbb --- /dev/null +++ b/internal/cli/server/build_image_test.go @@ -0,0 +1,425 @@ +package server + +import ( + "errors" + "os" + "path/filepath" + "strings" + "testing" + + "go.uber.org/zap" + + "mcp-runtime/internal/cli/core" +) + +func TestBuildImage(t *testing.T) { + logger := zap.NewNop() + + t.Run("builds_image_successfully", func(t *testing.T) { + mock := &core.MockExecutor{} + defer core.SwapExecExecutor(mock)() + + tmp := t.TempDir() + metadataFile := filepath.Join(tmp, "servers.yaml") + if err := os.WriteFile(metadataFile, []byte(`version: v1 +servers: + - name: test-server +`), 0o600); err != nil { + t.Fatalf("write metadata: %v", err) + } + + err := buildImage(logger, "test-server", "Dockerfile", metadataFile, ".", "test-registry", "test-tag", ".") + if err != nil { + t.Fatalf("failed to build image: %v", err) + } + + if !mock.HasCommand("docker") { + t.Error("expected docker command to be executed") + } + + last := mock.LastCommand() + if last.Name != "docker" { + t.Errorf("expected docker command, got %q", last.Name) + } + + expectedArgs := []string{"build", "-f", "Dockerfile", "-t", "test-registry/test-server:test-tag", "."} + if !equalStringSlices(last.Args, expectedArgs) { + t.Errorf("docker args = %v, want %v", last.Args, expectedArgs) + } + }) + + t.Run("returns_error_after_build_when_metadata_missing", func(t *testing.T) { + mock := &core.MockExecutor{} + defer core.SwapExecExecutor(mock)() + + tmp := t.TempDir() + err := buildImage(logger, "missing-server", "Dockerfile", "", tmp, "test-registry", "test-tag", ".") + if err == nil { + t.Fatal("expected error when metadata file not found for server name") + } + if !errors.Is(err, core.ErrMetadataFileNotFound) { + t.Fatalf("expected ErrMetadataFileNotFound, got %v", err) + } + }) + + t.Run("returns_error_on_build_failure", func(t *testing.T) { + mock := &core.MockExecutor{ + DefaultRunErr: errors.New("docker build failed"), + } + defer core.SwapExecExecutor(mock)() + + err := buildImage(logger, "test-server", "Dockerfile", "", ".", "test-registry", "test-tag", ".") + if err == nil { + t.Error("expected error when docker build fails") + } + }) + + t.Run("uses_git_tag_when_tag_empty", func(t *testing.T) { + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + if spec.Name == "git" { + return &core.MockCommand{OutputData: []byte("abc1234\n")} + } + return &core.MockCommand{} + }, + } + defer core.SwapExecExecutor(mock)() + + tmp := t.TempDir() + metadataFile := filepath.Join(tmp, "servers.yaml") + if err := os.WriteFile(metadataFile, []byte(`version: v1 +servers: + - name: my-server +`), 0o600); err != nil { + t.Fatalf("write metadata: %v", err) + } + + err := buildImage(logger, "my-server", "Dockerfile", metadataFile, ".", "registry.io", "", ".") + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + for _, cmd := range mock.Commands { + if cmd.Name == "docker" { + found := false + for _, arg := range cmd.Args { + if arg == "registry.io/my-server:abc1234" { + found = true + break + } + } + if !found { + t.Errorf("expected image tag with git SHA, got args: %v", cmd.Args) + } + } + } + }) + + t.Run("uses_platform_registry_when_registry_empty", func(t *testing.T) { + origConfig := core.DefaultCLIConfig + defer func() { core.DefaultCLIConfig = origConfig }() + core.DefaultCLIConfig = &core.CLIConfig{RegistryEndpoint: "", RegistryIngressHost: "", RegistryPort: 5000} + + kubectlMock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + for _, arg := range spec.Args { + if arg == "jsonpath={.spec.ports[0].port}" { + return &core.MockCommand{OutputData: []byte("5000")} + } + } + return &core.MockCommand{} + }, + } + defer core.SwapDefaultKubectlClient(core.NewTestKubectlClient(kubectlMock))() + mock := &core.MockExecutor{} + defer core.SwapExecExecutor(mock)() + + tmp := t.TempDir() + metadataFile := filepath.Join(tmp, "servers.yaml") + if err := os.WriteFile(metadataFile, []byte(`version: v1 +servers: + - name: my-server +`), 0o600); err != nil { + t.Fatalf("write metadata: %v", err) + } + + err := buildImage(logger, "my-server", "Dockerfile", metadataFile, ".", "", "v1.0", ".") + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + for _, cmd := range mock.Commands { + if cmd.Name == "docker" { + found := false + for _, arg := range cmd.Args { + if arg == "registry.registry.svc.cluster.local:5000/my-server:v1.0" { + found = true + break + } + } + if !found { + t.Errorf("expected platform registry in image tag, got args: %v", cmd.Args) + } + } + } + }) + + t.Run("returns_error_when_command_validator_fails", func(t *testing.T) { + failingExecutor := &validatorFailingExecutor{err: errors.New("validator failed")} + defer core.SwapExecExecutor(failingExecutor)() + + err := buildImage(logger, "test-server", "Dockerfile", "", ".", "registry", "tag", ".") + if err == nil { + t.Error("expected error when command validator fails") + } + if err.Error() != "validator failed" { + t.Errorf("unexpected error: %v", err) + } + }) +} + +func equalStringSlices(a, b []string) bool { + if len(a) != len(b) { + return false + } + for i := range a { + if a[i] != b[i] { + return false + } + } + return true +} + +type validatorFailingExecutor struct { + err error +} + +func (v *validatorFailingExecutor) Command(name string, args []string, validators ...core.ExecValidator) (core.Command, error) { + return nil, v.err +} + +func TestUpdateMetadataImage(t *testing.T) { + t.Run("updates_with_explicit_metadata_file", func(t *testing.T) { + tmpDir := t.TempDir() + metadataFile := filepath.Join(tmpDir, "servers.yaml") + + initialContent := `version: "1" +servers: + - name: my-server + image: old-registry/my-server + imageTag: old-tag +` + if err := os.WriteFile(metadataFile, []byte(initialContent), 0o600); err != nil { + t.Fatalf("failed to write initial metadata: %v", err) + } + + err := updateMetadataImage("my-server", "new-registry/my-server", "new-tag", metadataFile, "") + if err != nil { + t.Fatalf("updateMetadataImage failed: %v", err) + } + + content, err := os.ReadFile(metadataFile) + if err != nil { + t.Fatalf("failed to read updated metadata: %v", err) + } + + if !strings.Contains(string(content), "new-registry/my-server") { + t.Errorf("expected new image in metadata, got: %s", content) + } + if !strings.Contains(string(content), "new-tag") { + t.Errorf("expected new tag in metadata, got: %s", content) + } + }) + + t.Run("finds_metadata_in_directory", func(t *testing.T) { + tmpDir := t.TempDir() + metadataDir := filepath.Join(tmpDir, ".mcp") + if err := os.MkdirAll(metadataDir, 0o755); err != nil { + t.Fatalf("failed to create metadata dir: %v", err) + } + + metadataFile := filepath.Join(metadataDir, "servers.yaml") + initialContent := `version: "1" +servers: + - name: discovered-server + image: old-image + imageTag: old +` + if err := os.WriteFile(metadataFile, []byte(initialContent), 0o600); err != nil { + t.Fatalf("failed to write metadata: %v", err) + } + + err := updateMetadataImage("discovered-server", "new-image", "v2.0", "", metadataDir) + if err != nil { + t.Fatalf("updateMetadataImage failed: %v", err) + } + + content, err := os.ReadFile(metadataFile) + if err != nil { + t.Fatalf("failed to read metadata: %v", err) + } + + if !strings.Contains(string(content), "new-image") { + t.Errorf("expected new image, got: %s", content) + } + }) + + t.Run("finds_yml_files", func(t *testing.T) { + tmpDir := t.TempDir() + metadataDir := filepath.Join(tmpDir, ".mcp") + if err := os.MkdirAll(metadataDir, 0o755); err != nil { + t.Fatalf("failed to create metadata dir: %v", err) + } + + metadataFile := filepath.Join(metadataDir, "servers.yml") + initialContent := `version: "1" +servers: + - name: yml-server + image: old-image +` + if err := os.WriteFile(metadataFile, []byte(initialContent), 0o600); err != nil { + t.Fatalf("failed to write metadata: %v", err) + } + + err := updateMetadataImage("yml-server", "new-image", "v1.0", "", metadataDir) + if err != nil { + t.Fatalf("updateMetadataImage failed: %v", err) + } + }) + + t.Run("returns_error_when_file_not_found", func(t *testing.T) { + tmpDir := t.TempDir() + + err := updateMetadataImage("nonexistent-server", "image", "tag", "", tmpDir) + if err == nil { + t.Error("expected error when metadata file not found") + } + if !strings.Contains(err.Error(), "metadata file not found") { + t.Errorf("unexpected error: %v", err) + } + }) + + t.Run("returns_error_when_server_not_in_metadata", func(t *testing.T) { + tmpDir := t.TempDir() + metadataFile := filepath.Join(tmpDir, "servers.yaml") + + initialContent := `version: "1" +servers: + - name: other-server + image: some-image +` + if err := os.WriteFile(metadataFile, []byte(initialContent), 0o600); err != nil { + t.Fatalf("failed to write metadata: %v", err) + } + + err := updateMetadataImage("missing-server", "image", "tag", metadataFile, "") + if err == nil { + t.Error("expected error when server not found in metadata") + } + if !strings.Contains(err.Error(), "not found in metadata") { + t.Errorf("unexpected error: %v", err) + } + }) + + t.Run("returns_error_when_metadata_file_invalid", func(t *testing.T) { + tmpDir := t.TempDir() + metadataFile := filepath.Join(tmpDir, "invalid.yaml") + + if err := os.WriteFile(metadataFile, []byte("not: valid: yaml: content:::"), 0o600); err != nil { + t.Fatalf("failed to write invalid metadata: %v", err) + } + + err := updateMetadataImage("server", "image", "tag", metadataFile, "") + if err == nil { + t.Error("expected error when metadata file is invalid") + } + }) + + t.Run("skips_invalid_files_in_directory_search", func(t *testing.T) { + tmpDir := t.TempDir() + metadataDir := filepath.Join(tmpDir, ".mcp") + if err := os.MkdirAll(metadataDir, 0o755); err != nil { + t.Fatalf("failed to create metadata dir: %v", err) + } + + invalidFile := filepath.Join(metadataDir, "invalid.yaml") + if err := os.WriteFile(invalidFile, []byte("not: valid: yaml:::"), 0o600); err != nil { + t.Fatalf("failed to write invalid file: %v", err) + } + + validFile := filepath.Join(metadataDir, "valid.yaml") + validContent := `version: "1" +servers: + - name: target-server + image: old-image +` + if err := os.WriteFile(validFile, []byte(validContent), 0o600); err != nil { + t.Fatalf("failed to write valid file: %v", err) + } + + err := updateMetadataImage("target-server", "new-image", "v1.0", "", metadataDir) + if err != nil { + t.Fatalf("updateMetadataImage should skip invalid files: %v", err) + } + }) + + t.Run("returns_error_when_file_write_fails", func(t *testing.T) { + if os.Geteuid() == 0 { + t.Skip("root can bypass read-only file mode semantics in this environment") + } + + tmpDir := t.TempDir() + metadataFile := filepath.Join(tmpDir, "servers.yaml") + + initialContent := `version: "1" +servers: + - name: my-server + image: old-image +` + if err := os.WriteFile(metadataFile, []byte(initialContent), 0o600); err != nil { + t.Fatalf("failed to write metadata: %v", err) + } + + if err := os.Chmod(metadataFile, 0o400); err != nil { + t.Fatalf("failed to chmod file: %v", err) + } + defer func() { _ = os.Chmod(metadataFile, 0o600) }() + + err := updateMetadataImage("my-server", "new-image", "v1.0", metadataFile, "") + if err == nil { + t.Error("expected error when file write fails") + } + if !strings.Contains(err.Error(), "failed to write metadata") { + t.Errorf("unexpected error: %v", err) + } + }) + + t.Run("returns_error_when_yaml_marshal_fails", func(t *testing.T) { + tmpDir := t.TempDir() + metadataFile := filepath.Join(tmpDir, "servers.yaml") + + initialContent := `version: "1" +servers: + - name: my-server + image: old-image +` + if err := os.WriteFile(metadataFile, []byte(initialContent), 0o600); err != nil { + t.Fatalf("failed to write metadata: %v", err) + } + + originalMarshal := yamlMarshal + defer func() { yamlMarshal = originalMarshal }() + + yamlMarshal = func(v interface{}) ([]byte, error) { + return nil, errors.New("marshal failed") + } + + err := updateMetadataImage("my-server", "new-image", "v1.0", metadataFile, "") + if err == nil { + t.Error("expected error when yaml marshal fails") + } + if !strings.Contains(err.Error(), "failed to marshal metadata") { + t.Errorf("unexpected error: %v", err) + } + }) +} diff --git a/internal/cli/server.go b/internal/cli/server/manager.go similarity index 67% rename from internal/cli/server.go rename to internal/cli/server/manager.go index 0513618..d2e8adf 100644 --- a/internal/cli/server.go +++ b/internal/cli/server/manager.go @@ -1,4 +1,4 @@ -package cli +package server // This file implements the "server" command for managing MCP server resources. // It handles creating, listing, viewing, and deleting MCPServer custom resources. @@ -8,25 +8,30 @@ import ( "encoding/json" "fmt" "os" - "regexp" + "strconv" "strings" "text/tabwriter" "github.com/spf13/cobra" "go.uber.org/zap" "gopkg.in/yaml.v3" + + "mcp-runtime/internal/cli/core" + "mcp-runtime/internal/cli/kube" + "mcp-runtime/internal/cli/kubeerr" + "mcp-runtime/internal/cli/platformapi" ) // ServerManager handles MCP server operations with injected dependencies. type ServerManager struct { - kubectl *KubectlClient + kubectl *core.KubectlClient logger *zap.Logger // useKube forces kubectl; when false, platform API is used for supported read-only commands when logged in. useKube bool } // NewServerManager creates a ServerManager with the given dependencies. -func NewServerManager(kubectl *KubectlClient, logger *zap.Logger) *ServerManager { +func NewServerManager(kubectl *core.KubectlClient, logger *zap.Logger) *ServerManager { return &ServerManager{ kubectl: kubectl, logger: logger, @@ -35,28 +40,7 @@ func NewServerManager(kubectl *KubectlClient, logger *zap.Logger) *ServerManager // DefaultServerManager returns a ServerManager using the default kubectl client. func DefaultServerManager(logger *zap.Logger) *ServerManager { - return NewServerManager(kubectlClient, logger) -} - -// validServerName matches Kubernetes resource name requirements (RFC 1123 subdomain). -var validServerName = regexp.MustCompile(`^[a-z0-9]([-a-z0-9]*[a-z0-9])?$`) - -// validateServerInput validates name and namespace for kubectl commands. -// Returns sanitized values or an error if validation fails. -func validateServerInput(name, namespace string) (string, string, error) { - if !validServerName.MatchString(name) { - return "", "", newWithSentinel(ErrInvalidServerName, fmt.Sprintf("invalid server name %q: must be lowercase alphanumeric with optional hyphens", name)) - } - - var err error - if name, err = validateManifestValue("name", name); err != nil { - return "", "", err - } - if namespace, err = validateManifestValue("namespace", namespace); err != nil { - return "", "", err - } - - return name, namespace, nil + return NewServerManager(core.DefaultKubectlClient(), logger) } // BindUseKubeFlag wires the shared --use-kube flag onto the command. @@ -64,6 +48,17 @@ func (m *ServerManager) BindUseKubeFlag(cmd *cobra.Command) { cmd.PersistentFlags().BoolVar(&m.useKube, "use-kube", false, "Use kubectl and local kubeconfig instead of the platform API for supported commands") } +func (m *ServerManager) requireKubectlForMutation() error { + _, useK, err := platformapi.ResolvePlatformOrKube(m.useKube) + if err != nil { + return err + } + if !useK { + return core.NewWithSentinel(nil, "this command requires kubectl and a cluster kubeconfig, or set --use-kube when you use kubectl alongside platform auth. Use mcp-runtime auth for API-backed list, status, and policy when kubeconfig is not used.") + } + return nil +} + // Logger exposes the manager logger to foldered command packages. func (m *ServerManager) Logger() *zap.Logger { return m.logger @@ -76,12 +71,12 @@ func (m *ServerManager) ListServers(namespace string) error { return err } - plat, useK, err := m.platformOrKube() + plat, useK, err := platformapi.ResolvePlatformOrKube(m.useKube) if err != nil { return err } if !useK { - items, err := plat.listRuntimeServers(context.Background(), namespace) + items, err := plat.ListRuntimeServers(context.Background(), namespace) if err != nil { return err } @@ -96,14 +91,14 @@ func (m *ServerManager) ListServers(namespace string) error { // #nosec G204 -- namespace validated above; kubectl validates resource names. if err := m.kubectl.RunWithOutput([]string{"get", "mcpserver", "-n", namespace}, os.Stdout, os.Stderr); err != nil { - wrappedErr := wrapWithSentinelAndContext( - ErrListServersFailed, + wrappedErr := core.WrapWithSentinelAndContext( + core.ErrListServersFailed, err, fmt.Sprintf("failed to list servers in namespace %q: %v", namespace, err), map[string]any{"namespace": namespace, "component": "server"}, ) - Error("Failed to list servers") - logStructuredError(m.logger, wrappedErr, "Failed to list servers") + core.Error("Failed to list servers") + core.LogStructuredError(m.logger, wrappedErr, "Failed to list servers") return wrappedErr } return nil @@ -116,12 +111,12 @@ func (m *ServerManager) GetServer(name, namespace string) error { return err } - plat, useK, err := m.platformOrKube() + plat, useK, err := platformapi.ResolvePlatformOrKube(m.useKube) if err != nil { return err } if !useK { - items, err := plat.listRuntimeServers(context.Background(), namespace) + items, err := plat.ListRuntimeServers(context.Background(), namespace) if err != nil { return err } @@ -133,19 +128,19 @@ func (m *ServerManager) GetServer(name, namespace string) error { return nil } } - return newWithSentinel(ErrGetMCPServerFailed, fmt.Sprintf("server %q not found in namespace %q (platform API)", name, namespace)) + return core.NewWithSentinel(core.ErrGetMCPServerFailed, fmt.Sprintf("server %q not found in namespace %q (platform API)", name, namespace)) } // #nosec G204 -- name/namespace validated via validateServerInput. if err := m.kubectl.RunWithOutput([]string{"get", "mcpserver", name, "-n", namespace, "-o", "yaml"}, os.Stdout, os.Stderr); err != nil { - wrappedErr := wrapWithSentinelAndContext( - ErrGetMCPServerFailed, + wrappedErr := core.WrapWithSentinelAndContext( + core.ErrGetMCPServerFailed, err, fmt.Sprintf("failed to get server %q in namespace %q: %v", name, namespace, err), map[string]any{"server": name, "namespace": namespace, "component": "server"}, ) - Error("Failed to get server") - logStructuredError(m.logger, wrappedErr, "Failed to get server") + core.Error("Failed to get server") + core.LogStructuredError(m.logger, wrappedErr, "Failed to get server") return wrappedErr } return nil @@ -157,7 +152,7 @@ func (m *ServerManager) CreateServer(name, namespace, image, imageTag string) er return err } if image == "" { - return ErrImageRequired + return core.ErrImageRequired } name, namespace, err := validateServerInput(name, namespace) @@ -184,7 +179,7 @@ func (m *ServerManager) CreateServer(name, namespace, image, imageTag string) er Image: image, ImageTag: imageTag, Replicas: 1, - Port: GetDefaultServerPort(), + Port: core.GetDefaultServerPort(), ServicePort: 80, IngressPath: "/" + name + "/mcp", }, @@ -192,26 +187,26 @@ func (m *ServerManager) CreateServer(name, namespace, image, imageTag string) er manifestBytes, err := yaml.Marshal(manifest) if err != nil { - wrappedErr := wrapWithSentinelAndContext( - ErrMarshalManifestFailed, + wrappedErr := core.WrapWithSentinelAndContext( + core.ErrMarshalManifestFailed, err, fmt.Sprintf("failed to marshal manifest: %v", err), map[string]any{"server": name, "namespace": namespace, "component": "server"}, ) - Error("Failed to marshal manifest") - logStructuredError(m.logger, wrappedErr, "Failed to marshal manifest") + core.Error("Failed to marshal manifest") + core.LogStructuredError(m.logger, wrappedErr, "Failed to marshal manifest") return wrappedErr } - if err := applyManifestContent(m.kubectl, string(manifestBytes)); err != nil { - wrappedErr := wrapWithSentinelAndContext( - ErrCreateServerFailed, + if err := kube.ApplyManifestContent(m.kubectl.CommandArgs, string(manifestBytes)); err != nil { + wrappedErr := core.WrapWithSentinelAndContext( + core.ErrCreateServerFailed, err, fmt.Sprintf("failed to create server %q: %v", name, err), map[string]any{"server": name, "namespace": namespace, "image": image, "component": "server"}, ) - Error("Failed to create server") - logStructuredError(m.logger, wrappedErr, "Failed to create server") + core.Error("Failed to create server") + core.LogStructuredError(m.logger, wrappedErr, "Failed to create server") return wrappedErr } return nil @@ -222,15 +217,15 @@ func (m *ServerManager) ApplyServerFromFile(file string) error { if err := m.requireKubectlForMutation(); err != nil { return err } - if err := applyManifestFromFile(m.kubectl, file, os.Stdout, os.Stderr); err != nil { - wrappedErr := wrapWithSentinelAndContext( + if err := kube.ApplyManifestFromFile(m.kubectl.CommandArgs, file, os.Stdout, os.Stderr); err != nil { + wrappedErr := core.WrapWithSentinelAndContext( nil, err, fmt.Sprintf("failed to apply server manifest from file %q: %v", file, err), map[string]any{"file": file, "component": "server"}, ) - Error("Failed to apply server manifest") - logStructuredError(m.logger, wrappedErr, "Failed to apply server manifest") + core.Error("Failed to apply server manifest") + core.LogStructuredError(m.logger, wrappedErr, "Failed to apply server manifest") return wrappedErr } return nil @@ -241,30 +236,30 @@ func (m *ServerManager) CreateServerFromFile(file string) error { if err := m.requireKubectlForMutation(); err != nil { return err } - absPath, err := resolveRegularFilePath(file) + absPath, err := kube.ResolveRegularFilePath(file) if err != nil { - Error("Cannot access file") - logStructuredError(m.logger, err, "Cannot access file") + core.Error("Cannot access file") + core.LogStructuredError(m.logger, err, "Cannot access file") return err } - manifestBytes, err := readFileAtPath(absPath) + manifestBytes, err := kube.ReadFileAtPath(absPath) if err != nil { - wrappedErr := wrapWithSentinel(ErrFileNotAccessible, err, fmt.Sprintf("cannot read file %q: %v", file, err)) - Error("Cannot access file") - logStructuredError(m.logger, wrappedErr, "Cannot access file") + wrappedErr := core.WrapWithSentinel(core.ErrFileNotAccessible, err, fmt.Sprintf("cannot read file %q: %v", file, err)) + core.Error("Cannot access file") + core.LogStructuredError(m.logger, wrappedErr, "Cannot access file") return wrappedErr } - if err := applyManifestContent(m.kubectl, string(manifestBytes)); err != nil { - wrappedErr := wrapWithSentinelAndContext( - ErrCreateServerFailed, + if err := kube.ApplyManifestContent(m.kubectl.CommandArgs, string(manifestBytes)); err != nil { + wrappedErr := core.WrapWithSentinelAndContext( + core.ErrCreateServerFailed, err, fmt.Sprintf("failed to create server from file %q: %v", file, err), map[string]any{"file": file, "component": "server"}, ) - Error("Failed to create server from file") - logStructuredError(m.logger, wrappedErr, "Failed to create server from file") + core.Error("Failed to create server from file") + core.LogStructuredError(m.logger, wrappedErr, "Failed to create server from file") return wrappedErr } return nil @@ -286,41 +281,41 @@ func (m *ServerManager) ExportServer(name, namespace, file string) error { } output, execErr := cmd.CombinedOutput() if execErr != nil { - wrappedErr := wrapWithSentinelAndContext( + wrappedErr := core.WrapWithSentinelAndContext( nil, execErr, - fmt.Sprintf("failed to export server %q in namespace %q: %s", name, namespace, commandErrorDetail(string(output), execErr)), + fmt.Sprintf("failed to export server %q in namespace %q: %s", name, namespace, kubeerr.CommandDetail(string(output), execErr)), map[string]any{"server": name, "namespace": namespace, "component": "server"}, ) - Error("Failed to export server") - logStructuredError(m.logger, wrappedErr, "Failed to export server") + core.Error("Failed to export server") + core.LogStructuredError(m.logger, wrappedErr, "Failed to export server") return wrappedErr } if file != "" { - if err := writeOutputFile(file, output); err != nil { - wrappedErr := wrapWithSentinelAndContext( + if err := kube.WriteOutputFile(file, output); err != nil { + wrappedErr := core.WrapWithSentinelAndContext( nil, err, fmt.Sprintf("failed to write server manifest to %q: %v", file, err), map[string]any{"server": name, "namespace": namespace, "file": file, "component": "server"}, ) - Error("Failed to write server manifest") - logStructuredError(m.logger, wrappedErr, "Failed to write server manifest") + core.Error("Failed to write server manifest") + core.LogStructuredError(m.logger, wrappedErr, "Failed to write server manifest") return wrappedErr } return nil } if _, err := os.Stdout.Write(output); err != nil { - wrappedErr := wrapWithSentinelAndContext( + wrappedErr := core.WrapWithSentinelAndContext( nil, err, fmt.Sprintf("failed to write server manifest to stdout: %v", err), map[string]any{"server": name, "namespace": namespace, "component": "server"}, ) - Error("Failed to write server manifest") - logStructuredError(m.logger, wrappedErr, "Failed to write server manifest") + core.Error("Failed to write server manifest") + core.LogStructuredError(m.logger, wrappedErr, "Failed to write server manifest") return wrappedErr } return nil @@ -340,46 +335,46 @@ func (m *ServerManager) PatchServer(name, namespace, patchType, patch, patchFile switch patchType { case "merge", "json", "strategic": default: - return newWithSentinel(nil, fmt.Sprintf("unsupported patch type %q (use merge|json|strategic)", patchType)) + return core.NewWithSentinel(nil, fmt.Sprintf("unsupported patch type %q (use merge|json|strategic)", patchType)) } inlinePatch := strings.TrimSpace(patch) patchFile = strings.TrimSpace(patchFile) switch { case inlinePatch == "" && patchFile == "": - return newWithSentinel(nil, "either --patch or --patch-file is required") + return core.NewWithSentinel(nil, "either --patch or --patch-file is required") case inlinePatch != "" && patchFile != "": - return newWithSentinel(nil, "use either --patch or --patch-file, not both") + return core.NewWithSentinel(nil, "use either --patch or --patch-file, not both") } normalizedPatch := inlinePatch if patchFile != "" { - normalizedPatch, err = normalizePatchFile(patchFile) + normalizedPatch, err = kube.NormalizePatchFile(patchFile) } else { - normalizedPatch, err = normalizePatchDocument(inlinePatch) + normalizedPatch, err = kube.NormalizePatchDocument(inlinePatch) } if err != nil { - wrappedErr := wrapWithSentinelAndContext( + wrappedErr := core.WrapWithSentinelAndContext( nil, err, fmt.Sprintf("failed to prepare patch for server %q: %v", name, err), map[string]any{"server": name, "namespace": namespace, "patch_type": patchType, "component": "server"}, ) - Error("Failed to prepare server patch") - logStructuredError(m.logger, wrappedErr, "Failed to prepare server patch") + core.Error("Failed to prepare server patch") + core.LogStructuredError(m.logger, wrappedErr, "Failed to prepare server patch") return wrappedErr } args := []string{"patch", "mcpserver", name, "-n", namespace, "--type", patchType, "--patch", normalizedPatch} if err := m.kubectl.RunWithOutput(args, os.Stdout, os.Stderr); err != nil { - wrappedErr := wrapWithSentinelAndContext( + wrappedErr := core.WrapWithSentinelAndContext( nil, err, fmt.Sprintf("failed to patch server %q in namespace %q: %v", name, namespace, err), map[string]any{"server": name, "namespace": namespace, "patch_type": patchType, "component": "server"}, ) - Error("Failed to patch server") - logStructuredError(m.logger, wrappedErr, "Failed to patch server") + core.Error("Failed to patch server") + core.LogStructuredError(m.logger, wrappedErr, "Failed to patch server") return wrappedErr } @@ -393,21 +388,21 @@ func (m *ServerManager) InspectServerPolicy(name, namespace string) error { return err } - plat, useK, err := m.platformOrKube() + plat, useK, err := platformapi.ResolvePlatformOrKube(m.useKube) if err != nil { return err } if !useK { - b, err := plat.getRuntimePolicy(context.Background(), namespace, name) + b, err := plat.GetRuntimePolicy(context.Background(), namespace, name) if err != nil { - wrappedErr := wrapWithSentinelAndContext( + wrappedErr := core.WrapWithSentinelAndContext( nil, err, fmt.Sprintf("platform API policy for server %q: %v", name, err), map[string]any{"server": name, "namespace": namespace, "component": "server"}, ) - Error("Failed to read server policy") - logStructuredError(m.logger, wrappedErr, "Failed to read server policy") + core.Error("Failed to read server policy") + core.LogStructuredError(m.logger, wrappedErr, "Failed to read server policy") return wrappedErr } var pretty map[string]interface{} @@ -429,27 +424,27 @@ func (m *ServerManager) InspectServerPolicy(name, namespace string) error { } output, execErr := cmd.CombinedOutput() if execErr != nil { - wrappedErr := wrapWithSentinelAndContext( + wrappedErr := core.WrapWithSentinelAndContext( nil, execErr, - fmt.Sprintf("failed to inspect rendered policy for server %q in namespace %q: %s", name, namespace, commandErrorDetail(string(output), execErr)), + fmt.Sprintf("failed to inspect rendered policy for server %q in namespace %q: %s", name, namespace, kubeerr.CommandDetail(string(output), execErr)), map[string]any{"server": name, "namespace": namespace, "component": "server"}, ) - Error("Failed to inspect server policy") - logStructuredError(m.logger, wrappedErr, "Failed to inspect server policy") + core.Error("Failed to inspect server policy") + core.LogStructuredError(m.logger, wrappedErr, "Failed to inspect server policy") return wrappedErr } if len(output) > 0 { if _, err := os.Stdout.Write(output); err != nil { - wrappedErr := wrapWithSentinelAndContext( + wrappedErr := core.WrapWithSentinelAndContext( nil, err, fmt.Sprintf("failed to write rendered policy to stdout: %v", err), map[string]any{"server": name, "namespace": namespace, "component": "server"}, ) - Error("Failed to inspect server policy") - logStructuredError(m.logger, wrappedErr, "Failed to inspect server policy") + core.Error("Failed to inspect server policy") + core.LogStructuredError(m.logger, wrappedErr, "Failed to inspect server policy") return wrappedErr } } @@ -474,21 +469,21 @@ func (m *ServerManager) DeleteServer(name, namespace string) error { // #nosec G204 -- name/namespace validated via validateServerInput. if err := m.kubectl.RunWithOutput([]string{"delete", "mcpserver", name, "-n", namespace}, os.Stdout, os.Stderr); err != nil { - wrappedErr := wrapWithSentinelAndContext( - ErrDeleteServerFailed, + wrappedErr := core.WrapWithSentinelAndContext( + core.ErrDeleteServerFailed, err, fmt.Sprintf("failed to delete server %q in namespace %q: %v", name, namespace, err), map[string]any{"server": name, "namespace": namespace, "component": "server"}, ) - Error("Failed to delete server") - logStructuredError(m.logger, wrappedErr, "Failed to delete server") + core.Error("Failed to delete server") + core.LogStructuredError(m.logger, wrappedErr, "Failed to delete server") return wrappedErr } return nil } // ViewServerLogs views logs from an MCP server. -func (m *ServerManager) ViewServerLogs(name, namespace string, follow bool) error { +func (m *ServerManager) ViewServerLogs(name, namespace string, follow, previous bool, tail int, since string) error { if err := m.requireKubectlForMutation(); err != nil { return err } @@ -497,21 +492,33 @@ func (m *ServerManager) ViewServerLogs(name, namespace string, follow bool) erro return err } - args := []string{"logs", "-l", LabelApp + "=" + name, "-n", namespace, "--all-containers=true"} + args := []string{ + "logs", + "-l", core.LabelApp + "=" + name, + "-n", namespace, + "--all-containers=true", + "--tail", strconv.Itoa(tail), + } if follow { args = append(args, "-f") } + if previous { + args = append(args, "--previous") + } + if s := strings.TrimSpace(since); s != "" { + args = append(args, "--since", s) + } // #nosec G204 -- name/namespace validated via validateServerInput. if err := m.kubectl.RunWithOutput(args, os.Stdout, os.Stderr); err != nil { - wrappedErr := wrapWithSentinelAndContext( - ErrViewServerLogsFailed, + wrappedErr := core.WrapWithSentinelAndContext( + core.ErrViewServerLogsFailed, err, fmt.Sprintf("failed to view logs for server %q in namespace %q: %v", name, namespace, err), map[string]any{"server": name, "namespace": namespace, "component": "server"}, ) - Error("Failed to view server logs") - logStructuredError(m.logger, wrappedErr, "Failed to view server logs") + core.Error("Failed to view server logs") + core.LogStructuredError(m.logger, wrappedErr, "Failed to view server logs") return wrappedErr } return nil @@ -519,20 +526,20 @@ func (m *ServerManager) ViewServerLogs(name, namespace string, follow bool) erro // ServerStatus shows the status of MCP servers in a namespace. func (m *ServerManager) ServerStatus(namespace string) error { - Header(fmt.Sprintf("MCP Servers in %s", namespace)) - DefaultPrinter.Println() + core.Header(fmt.Sprintf("MCP Servers in %s", namespace)) + core.DefaultPrinter.Println() - plat, useK, err := m.platformOrKube() + plat, useK, err := platformapi.ResolvePlatformOrKube(m.useKube) if err != nil { return err } if !useK { - items, err := plat.listRuntimeServers(context.Background(), namespace) + items, err := plat.ListRuntimeServers(context.Background(), namespace) if err != nil { return err } if len(items) == 0 { - Warn("No MCP servers found in namespace " + namespace) + core.Warn("No MCP servers found in namespace " + namespace) } else { tw := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0) _, _ = fmt.Fprintln(tw, "NAME\tNAMESPACE\tREADY\tSTATUS\tAGE") @@ -541,7 +548,7 @@ func (m *ServerManager) ServerStatus(namespace string) error { } _ = tw.Flush() } - Info("Pod details need kubectl. Run with --use-kube for full status including pods.") + core.Info("Pod details need kubectl. Run with --use-kube for full status including pods.") return nil } @@ -557,20 +564,20 @@ func (m *ServerManager) ServerStatus(namespace string) error { if errDetails == "" { errDetails = err.Error() } - DefaultPrinter.Println("ERROR: Failed to list MCP servers: " + errDetails) - wrappedErr := wrapWithSentinelAndContext( - ErrGetMCPServerFailed, + core.DefaultPrinter.Println("ERROR: Failed to list MCP servers: " + errDetails) + wrappedErr := core.WrapWithSentinelAndContext( + core.ErrGetMCPServerFailed, err, fmt.Sprintf("kubectl get mcpserver failed: %v", err), map[string]any{"namespace": namespace, "component": "server"}, ) - logStructuredError(m.logger, wrappedErr, "Failed to get MCP servers") + core.LogStructuredError(m.logger, wrappedErr, "Failed to get MCP servers") return wrappedErr } trimmed := strings.TrimSpace(string(out)) if trimmed == "" { - Warn("No MCP servers found in namespace " + namespace) + core.Warn("No MCP servers found in namespace " + namespace) return nil } rawLines := strings.Split(trimmed, "\n") @@ -582,7 +589,7 @@ func (m *ServerManager) ServerStatus(namespace string) error { lines = append(lines, line) } if len(lines) == 0 { - Warn("No MCP servers found in namespace " + namespace) + core.Warn("No MCP servers found in namespace " + namespace) return nil } @@ -613,21 +620,21 @@ func (m *ServerManager) ServerStatus(namespace string) error { } if len(tableData) > 1 { - TableBoxed(tableData) + core.TableBoxed(tableData) } // Pod status section - DefaultPrinter.Println() - Section("Pod Status") + core.DefaultPrinter.Println() + core.Section("Pod Status") // #nosec G204 -- namespace from CLI flag; fixed label selector. - podCmd, err := m.kubectl.CommandArgs([]string{"get", "pods", "-n", namespace, "-l", SelectorManagedBy, "-o", "custom-columns=NAME:.metadata.name,READY:.status.containerStatuses[0].ready,STATUS:.status.phase,RESTARTS:.status.containerStatuses[0].restartCount"}) + podCmd, err := m.kubectl.CommandArgs([]string{"get", "pods", "-n", namespace, "-l", core.SelectorManagedBy, "-o", "custom-columns=NAME:.metadata.name,READY:.status.containerStatuses[0].ready,STATUS:.status.phase,RESTARTS:.status.containerStatuses[0].restartCount"}) if err != nil { return err } podOut, err := podCmd.Output() if err != nil { - Warn("Failed to list pods: " + err.Error()) + core.Warn("Failed to list pods: " + err.Error()) return nil } trimmedPods := strings.TrimSpace(string(podOut)) @@ -647,9 +654,9 @@ func (m *ServerManager) ServerStatus(namespace string) error { for _, pl := range podLines { podData = append(podData, strings.Fields(pl)) } - Table(podData) + core.Table(podData) } else { - Info("No pods found") + core.Info("No pods found") } return nil @@ -675,15 +682,3 @@ type manifestSpec struct { ServicePort int `yaml:"servicePort"` IngressPath string `yaml:"ingressPath"` } - -// validateManifestValue ensures basic values do not contain control characters that would break YAML. -func validateManifestValue(field, value string) (string, error) { - if strings.ContainsAny(value, "\r\n\t") { - return "", newWithSentinel(ErrControlCharsNotAllowed, fmt.Sprintf("%s must not contain control characters", field)) - } - value = strings.TrimSpace(value) - if value == "" { - return "", newWithSentinel(ErrFieldRequired, fmt.Sprintf("%s is required", field)) - } - return value, nil -} diff --git a/internal/cli/server_config_test.go b/internal/cli/server/manager_config_test.go similarity index 92% rename from internal/cli/server_config_test.go rename to internal/cli/server/manager_config_test.go index 6ff3a1f..5bbeb44 100644 --- a/internal/cli/server_config_test.go +++ b/internal/cli/server/manager_config_test.go @@ -1,4 +1,4 @@ -package cli +package server import ( "encoding/json" @@ -8,13 +8,15 @@ import ( "testing" "go.uber.org/zap" + + "mcp-runtime/internal/cli/core" ) func TestServerManager_ExportServer(t *testing.T) { - mock := &MockExecutor{ + mock := &core.MockExecutor{ DefaultOutput: []byte("apiVersion: mcpruntime.org/v1alpha1\nkind: MCPServer\n"), } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) mgr := NewServerManager(kubectl, zap.NewNop()) outputFile := filepath.Join(t.TempDir(), "exported", "server.yaml") @@ -39,8 +41,8 @@ func TestServerManager_ExportServer(t *testing.T) { } func TestServerManager_PatchServerFromFile(t *testing.T) { - mock := &MockExecutor{} - kubectl := &KubectlClient{exec: mock, validators: nil} + mock := &core.MockExecutor{} + kubectl := core.NewTestKubectlClient(mock) mgr := NewServerManager(kubectl, zap.NewNop()) patchFile := filepath.Join(t.TempDir(), "patch.yaml") @@ -88,10 +90,10 @@ func TestServerManager_PatchServerFromFile(t *testing.T) { } func TestServerManager_InspectServerPolicy(t *testing.T) { - mock := &MockExecutor{ + mock := &core.MockExecutor{ DefaultOutput: []byte("{\"policy\":{\"mode\":\"allow-list\"}}"), } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) mgr := NewServerManager(kubectl, zap.NewNop()) origStdout := os.Stdout diff --git a/internal/cli/server_test.go b/internal/cli/server/manager_test.go similarity index 74% rename from internal/cli/server_test.go rename to internal/cli/server/manager_test.go index 4f2b983..93b04e6 100644 --- a/internal/cli/server_test.go +++ b/internal/cli/server/manager_test.go @@ -1,4 +1,4 @@ -package cli +package server import ( "bytes" @@ -10,14 +10,16 @@ import ( "go.uber.org/zap" "gopkg.in/yaml.v3" + + "mcp-runtime/internal/cli/core" ) func TestServerManager_ListServers(t *testing.T) { t.Run("calls kubectl with correct args", func(t *testing.T) { - mock := &MockExecutor{ + mock := &core.MockExecutor{ DefaultOutput: []byte("server1\nserver2\n"), } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) mgr := NewServerManager(kubectl, zap.NewNop()) err := mgr.ListServers("test-ns") @@ -48,8 +50,8 @@ func TestServerManager_ListServers(t *testing.T) { }) t.Run("trims namespace and passes to kubectl", func(t *testing.T) { - mock := &MockExecutor{} - kubectl := &KubectlClient{exec: mock, validators: nil} + mock := &core.MockExecutor{} + kubectl := core.NewTestKubectlClient(mock) mgr := NewServerManager(kubectl, zap.NewNop()) err := mgr.ListServers(" test-ns ") @@ -71,8 +73,8 @@ func TestServerManager_ListServers(t *testing.T) { }) t.Run("rejects empty namespace", func(t *testing.T) { - mock := &MockExecutor{} - kubectl := &KubectlClient{exec: mock, validators: nil} + mock := &core.MockExecutor{} + kubectl := core.NewTestKubectlClient(mock) mgr := NewServerManager(kubectl, zap.NewNop()) err := mgr.ListServers(" ") @@ -87,8 +89,8 @@ func TestServerManager_ListServers(t *testing.T) { func TestServerManager_DeleteServer(t *testing.T) { t.Run("validates server name", func(t *testing.T) { - mock := &MockExecutor{} - kubectl := &KubectlClient{exec: mock, validators: nil} + mock := &core.MockExecutor{} + kubectl := core.NewTestKubectlClient(mock) mgr := NewServerManager(kubectl, zap.NewNop()) // Invalid name with special chars @@ -104,8 +106,8 @@ func TestServerManager_DeleteServer(t *testing.T) { }) t.Run("calls kubectl delete with correct args", func(t *testing.T) { - mock := &MockExecutor{} - kubectl := &KubectlClient{exec: mock, validators: nil} + mock := &core.MockExecutor{} + kubectl := core.NewTestKubectlClient(mock) mgr := NewServerManager(kubectl, zap.NewNop()) err := mgr.DeleteServer("my-server", "test-ns") @@ -137,8 +139,8 @@ func TestServerManager_DeleteServer(t *testing.T) { func TestServerManager_GetServer(t *testing.T) { t.Run("validates inputs", func(t *testing.T) { - mock := &MockExecutor{} - kubectl := &KubectlClient{exec: mock, validators: nil} + mock := &core.MockExecutor{} + kubectl := core.NewTestKubectlClient(mock) mgr := NewServerManager(kubectl, zap.NewNop()) err := mgr.GetServer("invalid|name", "ns") @@ -153,13 +155,13 @@ func TestServerManager_GetServer(t *testing.T) { func TestServerManager_CreateServer(t *testing.T) { t.Run("requires image", func(t *testing.T) { - mock := &MockExecutor{} - kubectl := &KubectlClient{exec: mock, validators: nil} + mock := &core.MockExecutor{} + kubectl := core.NewTestKubectlClient(mock) mgr := NewServerManager(kubectl, zap.NewNop()) err := mgr.CreateServer("my-server", "test-ns", "", "latest") - if err != ErrImageRequired { - t.Fatalf("expected ErrImageRequired, got %v", err) + if err != core.ErrImageRequired { + t.Fatalf("expected core.ErrImageRequired, got %v", err) } if len(mock.Commands) > 0 { t.Error("should not call kubectl when image is missing") @@ -167,8 +169,8 @@ func TestServerManager_CreateServer(t *testing.T) { }) t.Run("validates inputs", func(t *testing.T) { - mock := &MockExecutor{} - kubectl := &KubectlClient{exec: mock, validators: nil} + mock := &core.MockExecutor{} + kubectl := core.NewTestKubectlClient(mock) mgr := NewServerManager(kubectl, zap.NewNop()) err := mgr.CreateServer("bad;name", "test-ns", "img", "latest") @@ -181,8 +183,8 @@ func TestServerManager_CreateServer(t *testing.T) { }) t.Run("rejects tag with control characters", func(t *testing.T) { - mock := &MockExecutor{} - kubectl := &KubectlClient{exec: mock, validators: nil} + mock := &core.MockExecutor{} + kubectl := core.NewTestKubectlClient(mock) mgr := NewServerManager(kubectl, zap.NewNop()) err := mgr.CreateServer("my-server", "test-ns", "repo/image", "bad\n") @@ -195,14 +197,14 @@ func TestServerManager_CreateServer(t *testing.T) { }) t.Run("creates manifest and applies via kubectl", func(t *testing.T) { - var applyCmd *MockCommand - mockExec := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - applyCmd = &MockCommand{Args: spec.Args} + var applyCmd *core.MockCommand + mockExec := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + applyCmd = &core.MockCommand{Args: spec.Args} return applyCmd }, } - kubectl, err := NewKubectlClient(mockExec) + kubectl, err := core.NewKubectlClient(mockExec) if err != nil { t.Fatalf("failed to create kubectl client: %v", err) } @@ -248,8 +250,8 @@ func TestServerManager_CreateServer(t *testing.T) { func TestServerManager_CreateServerFromFile(t *testing.T) { t.Run("rejects missing file", func(t *testing.T) { - mock := &MockExecutor{} - kubectl := &KubectlClient{exec: mock, validators: nil} + mock := &core.MockExecutor{} + kubectl := core.NewTestKubectlClient(mock) mgr := NewServerManager(kubectl, zap.NewNop()) err := mgr.CreateServerFromFile("does-not-exist.yaml") @@ -262,8 +264,8 @@ func TestServerManager_CreateServerFromFile(t *testing.T) { }) t.Run("rejects directory path", func(t *testing.T) { - mock := &MockExecutor{} - kubectl := &KubectlClient{exec: mock, validators: nil} + mock := &core.MockExecutor{} + kubectl := core.NewTestKubectlClient(mock) mgr := NewServerManager(kubectl, zap.NewNop()) dir := t.TempDir() @@ -277,14 +279,14 @@ func TestServerManager_CreateServerFromFile(t *testing.T) { }) t.Run("applies file via kubectl stdin with default validators", func(t *testing.T) { - var applyCmd *MockCommand - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - applyCmd = &MockCommand{Args: spec.Args} + var applyCmd *core.MockCommand + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + applyCmd = &core.MockCommand{Args: spec.Args} return applyCmd }, } - kubectl, err := NewKubectlClient(mock) + kubectl, err := core.NewKubectlClient(mock) if err != nil { t.Fatalf("failed to create kubectl client: %v", err) } @@ -326,11 +328,11 @@ func TestServerManager_CreateServerFromFile(t *testing.T) { func TestServerManager_ViewServerLogs(t *testing.T) { t.Run("builds logs command without follow", func(t *testing.T) { - mock := &MockExecutor{} - kubectl := &KubectlClient{exec: mock, validators: nil} + mock := &core.MockExecutor{} + kubectl := core.NewTestKubectlClient(mock) mgr := NewServerManager(kubectl, zap.NewNop()) - err := mgr.ViewServerLogs("my-server", "test-ns", false) + err := mgr.ViewServerLogs("my-server", "test-ns", false, false, 200, "") if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -345,11 +347,11 @@ func TestServerManager_ViewServerLogs(t *testing.T) { }) t.Run("adds follow flag when requested", func(t *testing.T) { - mock := &MockExecutor{} - kubectl := &KubectlClient{exec: mock, validators: nil} + mock := &core.MockExecutor{} + kubectl := core.NewTestKubectlClient(mock) mgr := NewServerManager(kubectl, zap.NewNop()) - err := mgr.ViewServerLogs("my-server", "test-ns", true) + err := mgr.ViewServerLogs("my-server", "test-ns", true, false, 200, "") if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -361,75 +363,11 @@ func TestServerManager_ViewServerLogs(t *testing.T) { }) } -func TestValidateManifestValue(t *testing.T) { - t.Run("trims and returns value", func(t *testing.T) { - got, err := validateManifestValue("field", " value ") - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - if got != "value" { - t.Fatalf("expected trimmed value, got %q", got) - } - }) - - t.Run("rejects empty value", func(t *testing.T) { - _, err := validateManifestValue("field", " ") - if err == nil { - t.Fatal("expected error for empty value") - } - }) - - t.Run("rejects control characters", func(t *testing.T) { - _, err := validateManifestValue("field", "bad\t") - if err == nil { - t.Fatal("expected error for control characters") - } - }) -} - -func TestValidateServerInput(t *testing.T) { - t.Run("returns sanitized values for valid input", func(t *testing.T) { - name, namespace, err := validateServerInput("my-server", "test-ns") - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - if name != "my-server" || namespace != "test-ns" { - t.Fatalf("unexpected values: name=%q namespace=%q", name, namespace) - } - }) -} - -// contains checks if a string slice contains a value. -func contains(slice []string, val string) bool { - for _, s := range slice { - if s == val { - return true - } - } - return false -} - -func TestValidateServerInputErrors(t *testing.T) { - t.Run("rejects invalid namespace", func(t *testing.T) { - _, _, err := validateServerInput("my-server", "bad\tns") - if err == nil { - t.Fatal("expected error for invalid namespace") - } - }) - - t.Run("rejects empty namespace", func(t *testing.T) { - _, _, err := validateServerInput("my-server", " ") - if err == nil { - t.Fatal("expected error for empty namespace") - } - }) -} - func TestServerManager_GetServerSuccess(t *testing.T) { - mock := &MockExecutor{ + mock := &core.MockExecutor{ DefaultOutput: []byte("apiVersion: v1\nkind: MCPServer"), } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) mgr := NewServerManager(kubectl, zap.NewNop()) err := mgr.GetServer("my-server", "test-ns") @@ -443,8 +381,8 @@ func TestServerManager_GetServerSuccess(t *testing.T) { func TestServerManager_CreateServerErrors(t *testing.T) { t.Run("rejects invalid image with control chars", func(t *testing.T) { - mock := &MockExecutor{} - kubectl := &KubectlClient{exec: mock, validators: nil} + mock := &core.MockExecutor{} + kubectl := core.NewTestKubectlClient(mock) mgr := NewServerManager(kubectl, zap.NewNop()) err := mgr.CreateServer("my-server", "test-ns", "bad\nimage", "latest") @@ -454,8 +392,8 @@ func TestServerManager_CreateServerErrors(t *testing.T) { }) t.Run("handles kubectl apply error", func(t *testing.T) { - mock := &MockExecutor{DefaultRunErr: errors.New("apply failed")} - kubectl := &KubectlClient{exec: mock, validators: nil} + mock := &core.MockExecutor{DefaultRunErr: errors.New("apply failed")} + kubectl := core.NewTestKubectlClient(mock) mgr := NewServerManager(kubectl, zap.NewNop()) err := mgr.CreateServer("my-server", "test-ns", "repo/image", "latest") @@ -466,11 +404,11 @@ func TestServerManager_CreateServerErrors(t *testing.T) { } func TestServerManager_ViewServerLogsError(t *testing.T) { - mock := &MockExecutor{} - kubectl := &KubectlClient{exec: mock, validators: nil} + mock := &core.MockExecutor{} + kubectl := core.NewTestKubectlClient(mock) mgr := NewServerManager(kubectl, zap.NewNop()) - err := mgr.ViewServerLogs("bad;name", "test-ns", false) + err := mgr.ViewServerLogs("bad;name", "test-ns", false, false, 200, "") if err == nil { t.Fatal("expected error for invalid name") } @@ -481,10 +419,10 @@ func TestServerManager_ViewServerLogsError(t *testing.T) { func TestServerManager_ServerStatus(t *testing.T) { t.Run("handles empty servers list", func(t *testing.T) { - mock := &MockExecutor{ + mock := &core.MockExecutor{ DefaultOutput: []byte(""), } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) mgr := NewServerManager(kubectl, zap.NewNop()) var buf bytes.Buffer @@ -500,9 +438,9 @@ func TestServerManager_ServerStatus(t *testing.T) { }) t.Run("handles server list with provisioned registry", func(t *testing.T) { - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - cmd := &MockCommand{Args: spec.Args} + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + cmd := &core.MockCommand{Args: spec.Args} if contains(spec.Args, "mcpserver") { cmd.OutputData = []byte("server1|image:tag|1|/path|true\n") } else if contains(spec.Args, "pods") { @@ -511,7 +449,7 @@ func TestServerManager_ServerStatus(t *testing.T) { return cmd }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) mgr := NewServerManager(kubectl, zap.NewNop()) var buf bytes.Buffer @@ -527,10 +465,10 @@ func TestServerManager_ServerStatus(t *testing.T) { }) t.Run("handles kubectl get mcpserver error", func(t *testing.T) { - mock := &MockExecutor{ + mock := &core.MockExecutor{ DefaultErr: errors.New("not found"), } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) mgr := NewServerManager(kubectl, zap.NewNop()) var buf bytes.Buffer @@ -544,10 +482,10 @@ func TestServerManager_ServerStatus(t *testing.T) { t.Run("handles get pods error gracefully", func(t *testing.T) { callCount := 0 - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { callCount++ - cmd := &MockCommand{Args: spec.Args} + cmd := &core.MockCommand{Args: spec.Args} if contains(spec.Args, "mcpserver") { cmd.OutputData = []byte("server1|image:tag|1|/path|false\n") } else if contains(spec.Args, "pods") { @@ -556,7 +494,7 @@ func TestServerManager_ServerStatus(t *testing.T) { return cmd }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) mgr := NewServerManager(kubectl, zap.NewNop()) var buf bytes.Buffer @@ -569,16 +507,16 @@ func TestServerManager_ServerStatus(t *testing.T) { }) t.Run("handles whitespace-only lines in server output", func(t *testing.T) { - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - cmd := &MockCommand{Args: spec.Args} + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + cmd := &core.MockCommand{Args: spec.Args} if contains(spec.Args, "mcpserver") { cmd.OutputData = []byte("server1|image:tag|1|/path|false\n \n\n") } return cmd }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) mgr := NewServerManager(kubectl, zap.NewNop()) var buf bytes.Buffer @@ -591,9 +529,9 @@ func TestServerManager_ServerStatus(t *testing.T) { }) t.Run("handles pods command with no pods found", func(t *testing.T) { - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - cmd := &MockCommand{Args: spec.Args} + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + cmd := &core.MockCommand{Args: spec.Args} if contains(spec.Args, "mcpserver") { cmd.OutputData = []byte("server1|image:tag|1|/path|false\n") } else if contains(spec.Args, "pods") { @@ -602,7 +540,7 @@ func TestServerManager_ServerStatus(t *testing.T) { return cmd }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) mgr := NewServerManager(kubectl, zap.NewNop()) var buf bytes.Buffer @@ -614,3 +552,21 @@ func TestServerManager_ServerStatus(t *testing.T) { } }) } + +func setDefaultPrinterWriter(t *testing.T, w *bytes.Buffer) { + t.Helper() + orig := core.DefaultPrinter.Writer + core.DefaultPrinter.Writer = w + t.Cleanup(func() { + core.DefaultPrinter.Writer = orig + }) +} + +func contains(slice []string, val string) bool { + for _, s := range slice { + if s == val { + return true + } + } + return false +} diff --git a/internal/cli/server/server.go b/internal/cli/server/server.go index 9835dfa..b95df93 100644 --- a/internal/cli/server/server.go +++ b/internal/cli/server/server.go @@ -4,16 +4,16 @@ package server import ( "github.com/spf13/cobra" - "mcp-runtime/internal/cli" + "mcp-runtime/internal/cli/core" ) // New returns the server command. -func New(runtime *cli.Runtime) *cobra.Command { - return NewWithManager(runtime.ServerManager()) +func New(runtime *core.Runtime) *cobra.Command { + return NewWithManager(NewServerManager(runtime.KubectlClient(), runtime.Logger())) } // NewWithManager returns the server command using the provided manager. -func NewWithManager(mgr *cli.ServerManager) *cobra.Command { +func NewWithManager(mgr *ServerManager) *cobra.Command { cmd := &cobra.Command{ Use: "server", Short: "Manage MCP servers", @@ -38,7 +38,7 @@ For pushing images, use 'registry push'.`, return mgr.ListServers(namespace) }, } - listCmd.Flags().StringVar(&namespace, "namespace", cli.NamespaceMCPServers, "Namespace to list servers from") + listCmd.Flags().StringVar(&namespace, "namespace", core.NamespaceMCPServers, "Namespace to list servers from") var getNamespace string getCmd := &cobra.Command{ @@ -50,7 +50,7 @@ For pushing images, use 'registry push'.`, return mgr.GetServer(args[0], getNamespace) }, } - getCmd.Flags().StringVar(&getNamespace, "namespace", cli.NamespaceMCPServers, "Namespace") + getCmd.Flags().StringVar(&getNamespace, "namespace", core.NamespaceMCPServers, "Namespace") var createNamespace string var image string @@ -68,7 +68,7 @@ For pushing images, use 'registry push'.`, return mgr.CreateServer(args[0], createNamespace, image, imageTag) }, } - createCmd.Flags().StringVar(&createNamespace, "namespace", cli.NamespaceMCPServers, "Namespace") + createCmd.Flags().StringVar(&createNamespace, "namespace", core.NamespaceMCPServers, "Namespace") createCmd.Flags().StringVar(&image, "image", "", "Container image") createCmd.Flags().StringVar(&imageTag, "tag", "latest", "Image tag") createCmd.Flags().StringVar(&file, "file", "", "YAML file with server spec") @@ -94,7 +94,7 @@ For pushing images, use 'registry push'.`, return mgr.ExportServer(args[0], exportNamespace, exportFile) }, } - exportCmd.Flags().StringVar(&exportNamespace, "namespace", cli.NamespaceMCPServers, "Namespace") + exportCmd.Flags().StringVar(&exportNamespace, "namespace", core.NamespaceMCPServers, "Namespace") exportCmd.Flags().StringVar(&exportFile, "file", "", "Write the manifest to a file instead of stdout") var patchNamespace string @@ -109,7 +109,7 @@ For pushing images, use 'registry push'.`, return mgr.PatchServer(args[0], patchNamespace, patchType, patch, patchFile) }, } - patchCmd.Flags().StringVar(&patchNamespace, "namespace", cli.NamespaceMCPServers, "Namespace") + patchCmd.Flags().StringVar(&patchNamespace, "namespace", core.NamespaceMCPServers, "Namespace") patchCmd.Flags().StringVar(&patchType, "type", "merge", "Patch type (merge|json|strategic)") patchCmd.Flags().StringVar(&patch, "patch", "", "Inline JSON/YAML patch document") patchCmd.Flags().StringVar(&patchFile, "patch-file", "", "Path to a JSON/YAML patch document") @@ -124,21 +124,27 @@ For pushing images, use 'registry push'.`, return mgr.DeleteServer(args[0], deleteNamespace) }, } - deleteCmd.Flags().StringVar(&deleteNamespace, "namespace", cli.NamespaceMCPServers, "Namespace") + deleteCmd.Flags().StringVar(&deleteNamespace, "namespace", core.NamespaceMCPServers, "Namespace") var logsNamespace string var follow bool + var previous bool + var tail int + var since string logsCmd := &cobra.Command{ Use: "logs [name]", Short: "View server logs", Long: "View logs from an MCP server", Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { - return mgr.ViewServerLogs(args[0], logsNamespace, follow) + return mgr.ViewServerLogs(args[0], logsNamespace, follow, previous, tail, since) }, } - logsCmd.Flags().StringVar(&logsNamespace, "namespace", cli.NamespaceMCPServers, "Namespace") + logsCmd.Flags().StringVar(&logsNamespace, "namespace", core.NamespaceMCPServers, "Namespace") logsCmd.Flags().BoolVar(&follow, "follow", false, "Follow log output") + logsCmd.Flags().BoolVar(&previous, "previous", false, "Show logs from the previous container instance") + logsCmd.Flags().IntVar(&tail, "tail", 200, "Number of recent log lines to show (-1 for all)") + logsCmd.Flags().StringVar(&since, "since", "", "Only return logs newer than a relative duration like 5m or 1h") var statusNamespace string statusCmd := &cobra.Command{ @@ -149,7 +155,7 @@ For pushing images, use 'registry push'.`, return mgr.ServerStatus(statusNamespace) }, } - statusCmd.Flags().StringVar(&statusNamespace, "namespace", cli.NamespaceMCPServers, "Namespace to inspect") + statusCmd.Flags().StringVar(&statusNamespace, "namespace", core.NamespaceMCPServers, "Namespace to inspect") var policyNamespace string policyCmd := &cobra.Command{ @@ -164,7 +170,7 @@ For pushing images, use 'registry push'.`, return mgr.InspectServerPolicy(args[0], policyNamespace) }, } - inspectCmd.Flags().StringVar(&policyNamespace, "namespace", cli.NamespaceMCPServers, "Namespace") + inspectCmd.Flags().StringVar(&policyNamespace, "namespace", core.NamespaceMCPServers, "Namespace") policyCmd.AddCommand(inspectCmd) buildCmd := &cobra.Command{ diff --git a/internal/cli/server/server_status_print_test.go b/internal/cli/server/server_status_print_test.go new file mode 100644 index 0000000..4e18278 --- /dev/null +++ b/internal/cli/server/server_status_print_test.go @@ -0,0 +1,148 @@ +package server + +import ( + "bytes" + "errors" + "os" + "strings" + "testing" + + "github.com/pterm/pterm" + "go.uber.org/zap" + + "mcp-runtime/internal/cli/core" +) + +func TestServerStatus_printingAndKubectl(t *testing.T) { + logger := zap.NewNop() + namespace := "mcp-servers" + + t.Run("returns-error-and-logs-combined-output-on-mcpserver-list-failure", func(t *testing.T) { + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + if spec.Name == "kubectl" && contains(spec.Args, "mcpserver") { + return &core.MockCommand{ + Args: spec.Args, + OutputData: []byte("boom-out\nboom-err\n"), + OutputErr: errors.New("kubectl failed"), + } + } + return &core.MockCommand{} + }, + } + kubectl := core.NewTestKubectlClient(mock) + mgr := NewServerManager(kubectl, logger) + + var buf bytes.Buffer + pterm.SetDefaultOutput(&buf) + pterm.DisableStyling() + setDefaultPrinterWriter(t, &buf) + t.Cleanup(func() { + pterm.SetDefaultOutput(os.Stdout) + pterm.EnableStyling() + }) + + err := mgr.ServerStatus(namespace) + if err == nil { + t.Fatal("expected error from ServerStatus, got nil") + } + out := buf.String() + if !strings.Contains(out, "boom-out") || !strings.Contains(out, "boom-err") { + t.Fatalf("expected combined output to be logged, got output: %s", out) + } + }) + + t.Run("prints warning when_no_servers_found", func(t *testing.T) { + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + if spec.Name == "kubectl" && contains(spec.Args, "mcpserver") { + return &core.MockCommand{Args: spec.Args, OutputData: []byte(""), OutputErr: nil} + } + return &core.MockCommand{} + }, + } + kubectl := core.NewTestKubectlClient(mock) + mgr := NewServerManager(kubectl, logger) + + var buf bytes.Buffer + pterm.SetDefaultOutput(&buf) + pterm.DisableStyling() + setDefaultPrinterWriter(t, &buf) + t.Cleanup(func() { + pterm.SetDefaultOutput(os.Stdout) + pterm.EnableStyling() + }) + + if err := mgr.ServerStatus(namespace); err != nil { + t.Fatalf("ServerStatus unexpected error = %v", err) + } + out := buf.String() + if !strings.Contains(out, "No MCP servers found in namespace "+namespace) { + t.Fatalf("expected no servers warning, got output: %s", out) + } + }) + + t.Run("uses-managed-by-label-when-listing-pods", func(t *testing.T) { + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + cmd := &core.MockCommand{Args: spec.Args} + if spec.Name == "kubectl" && contains(spec.Args, "mcpserver") { + cmd.OutputData = []byte("server1|image:tag|1|/server|false\n") + } else if spec.Name == "kubectl" && contains(spec.Args, "pods") { + cmd.OutputData = []byte("NAME READY STATUS RESTARTS\npod-1 true Running 0\n") + } + return cmd + }, + } + kubectl := core.NewTestKubectlClient(mock) + mgr := NewServerManager(kubectl, logger) + + if err := mgr.ServerStatus(namespace); err != nil { + t.Fatalf("ServerStatus unexpected error = %v", err) + } + + found := false + for _, c := range mock.Commands { + if c.Name == "kubectl" && contains(c.Args, "pods") && contains(c.Args, core.SelectorManagedBy) { + found = true + break + } + } + if !found { + t.Fatalf("expected managed-by label selector, got commands: %v", mock.Commands) + } + }) + + t.Run("prints_no_pods_found_when_only_header_returned", func(t *testing.T) { + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + cmd := &core.MockCommand{Args: spec.Args} + if spec.Name == "kubectl" && contains(spec.Args, "mcpserver") { + cmd.OutputData = []byte("server1|image:tag|1|/server|false\n") + } else if spec.Name == "kubectl" && contains(spec.Args, "pods") { + cmd.OutputData = []byte("NAME READY STATUS RESTARTS\n") + } + return cmd + }, + } + kubectl := core.NewTestKubectlClient(mock) + mgr := NewServerManager(kubectl, logger) + + var buf bytes.Buffer + pterm.SetDefaultOutput(&buf) + pterm.DisableStyling() + setDefaultPrinterWriter(t, &buf) + t.Cleanup(func() { + pterm.SetDefaultOutput(os.Stdout) + pterm.EnableStyling() + }) + + if err := mgr.ServerStatus(namespace); err != nil { + t.Fatalf("ServerStatus unexpected error = %v", err) + } + out := buf.String() + if !strings.Contains(out, "No pods found") { + t.Fatalf("expected no pods message, got output: %s", out) + } + }) +} diff --git a/internal/cli/server/validation.go b/internal/cli/server/validation.go new file mode 100644 index 0000000..d0c01e6 --- /dev/null +++ b/internal/cli/server/validation.go @@ -0,0 +1,11 @@ +package server + +import "mcp-runtime/internal/cli/core" + +// validateManifestValue is a local alias so call sites in this package can use +// the short name; the implementation lives in core. +var validateManifestValue = core.ValidateManifestField + +func validateServerInput(name, namespace string) (string, string, error) { + return core.ValidateK8sNameAndNamespace("server name", core.ErrInvalidServerName, name, namespace) +} diff --git a/internal/cli/server/validation_test.go b/internal/cli/server/validation_test.go new file mode 100644 index 0000000..e17912f --- /dev/null +++ b/internal/cli/server/validation_test.go @@ -0,0 +1,57 @@ +package server + +import "testing" + +func TestValidateManifestValue(t *testing.T) { + t.Run("trims and returns value", func(t *testing.T) { + got, err := validateManifestValue("field", " value ") + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if got != "value" { + t.Fatalf("expected trimmed value, got %q", got) + } + }) + + t.Run("rejects empty value", func(t *testing.T) { + _, err := validateManifestValue("field", " ") + if err == nil { + t.Fatal("expected error for empty value") + } + }) + + t.Run("rejects control characters", func(t *testing.T) { + _, err := validateManifestValue("field", "bad\t") + if err == nil { + t.Fatal("expected error for control characters") + } + }) +} + +func TestValidateServerInput(t *testing.T) { + t.Run("returns sanitized values for valid input", func(t *testing.T) { + name, namespace, err := validateServerInput("my-server", "test-ns") + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if name != "my-server" || namespace != "test-ns" { + t.Fatalf("unexpected values: name=%q namespace=%q", name, namespace) + } + }) +} + +func TestValidateServerInputErrors(t *testing.T) { + t.Run("rejects invalid namespace", func(t *testing.T) { + _, _, err := validateServerInput("my-server", "bad\tns") + if err == nil { + t.Fatal("expected error for invalid namespace") + } + }) + + t.Run("rejects empty namespace", func(t *testing.T) { + _, _, err := validateServerInput("my-server", " ") + if err == nil { + t.Fatal("expected error for empty namespace") + } + }) +} diff --git a/internal/cli/asset_paths.go b/internal/cli/setup/assetpath/paths.go similarity index 63% rename from internal/cli/asset_paths.go rename to internal/cli/setup/assetpath/paths.go index d929801..0fd2bf2 100644 --- a/internal/cli/asset_paths.go +++ b/internal/cli/setup/assetpath/paths.go @@ -1,4 +1,6 @@ -package cli +// Package assetpath resolves repository-relative asset paths from the current +// working directory by walking upward until go.mod, services/, and k8s/ match. +package assetpath import ( "fmt" @@ -7,16 +9,16 @@ import ( "strings" ) -// resolveRepoAssetPath finds a repo-relative path from the current working directory -// by walking upward until the asset exists. The repo now assumes the flattened root +// ResolveRepoAssetPath finds a repo-relative path from the current working directory +// by walking upward until the asset exists. The repo assumes a flattened root // layout (for example services/ and k8s/ at the top level). -func resolveRepoAssetPath(path string) (string, error) { +func ResolveRepoAssetPath(path string) (string, error) { cleaned := filepath.Clean(strings.TrimSpace(path)) if cleaned == "" { return "", fmt.Errorf("empty repo asset path") } if cleaned == "." { - return resolveRepoRoot() + return ResolveRepoRoot() } if filepath.IsAbs(cleaned) { if _, err := os.Stat(cleaned); err != nil { @@ -25,7 +27,7 @@ func resolveRepoAssetPath(path string) (string, error) { return cleaned, nil } - root, err := resolveRepoRoot() + root, err := ResolveRepoRoot() if err != nil { return "", err } @@ -37,14 +39,15 @@ func resolveRepoAssetPath(path string) (string, error) { return "", fmt.Errorf("repo asset path %q not found from repo root %s", cleaned, root) } -func resolveRepoRoot() (string, error) { +// ResolveRepoRoot walks upward from the working directory until IsRepoRoot reports true. +func ResolveRepoRoot() (string, error) { cwd, err := os.Getwd() if err != nil { return "", err } for { - if isRepoRoot(cwd) { + if IsRepoRoot(cwd) { return cwd, nil } @@ -58,7 +61,8 @@ func resolveRepoRoot() (string, error) { return "", fmt.Errorf("repo root not found from current directory") } -func isRepoRoot(dir string) bool { +// IsRepoRoot reports whether dir looks like the mcp-runtime repository root. +func IsRepoRoot(dir string) bool { if _, err := os.Stat(filepath.Join(dir, "go.mod")); err != nil { return false } diff --git a/internal/cli/asset_paths_test.go b/internal/cli/setup/assetpath/paths_test.go similarity index 78% rename from internal/cli/asset_paths_test.go rename to internal/cli/setup/assetpath/paths_test.go index 077c56e..8bfe210 100644 --- a/internal/cli/asset_paths_test.go +++ b/internal/cli/setup/assetpath/paths_test.go @@ -1,4 +1,4 @@ -package cli +package assetpath import ( "os" @@ -39,9 +39,9 @@ func TestResolveRepoAssetPath(t *testing.T) { }) t.Run("walks upward to repo root", func(t *testing.T) { - got, err := resolveRepoAssetPath(filepath.Join("k8s", "08-api.yaml")) + got, err := ResolveRepoAssetPath(filepath.Join("k8s", "08-api.yaml")) if err != nil { - t.Fatalf("resolveRepoAssetPath() error = %v", err) + t.Fatalf("ResolveRepoAssetPath() error = %v", err) } want := filepath.Join(repoRoot, "k8s", "08-api.yaml") @@ -54,25 +54,25 @@ func TestResolveRepoAssetPath(t *testing.T) { t.Fatalf("EvalSymlinks(want) error = %v", err) } if gotEval != wantEval { - t.Fatalf("resolveRepoAssetPath() = %q, want %q", gotEval, wantEval) + t.Fatalf("ResolveRepoAssetPath() = %q, want %q", gotEval, wantEval) } }) t.Run("accepts absolute paths", func(t *testing.T) { want := filepath.Join(repoRoot, "k8s", "08-api.yaml") - got, err := resolveRepoAssetPath(want) + got, err := ResolveRepoAssetPath(want) if err != nil { - t.Fatalf("resolveRepoAssetPath() error = %v", err) + t.Fatalf("ResolveRepoAssetPath() error = %v", err) } if got != want { - t.Fatalf("resolveRepoAssetPath() = %q, want %q", got, want) + t.Fatalf("ResolveRepoAssetPath() = %q, want %q", got, want) } }) t.Run("resolves repo root for dot context", func(t *testing.T) { - got, err := resolveRepoAssetPath(".") + got, err := ResolveRepoAssetPath(".") if err != nil { - t.Fatalf("resolveRepoAssetPath(.) error = %v", err) + t.Fatalf("ResolveRepoAssetPath(.) error = %v", err) } gotEval, err := filepath.EvalSymlinks(got) @@ -84,12 +84,12 @@ func TestResolveRepoAssetPath(t *testing.T) { t.Fatalf("EvalSymlinks(repoRoot) error = %v", err) } if gotEval != wantEval { - t.Fatalf("resolveRepoAssetPath(.) = %q, want %q", gotEval, wantEval) + t.Fatalf("ResolveRepoAssetPath(.) = %q, want %q", gotEval, wantEval) } }) t.Run("errors for missing assets", func(t *testing.T) { - if _, err := resolveRepoAssetPath(filepath.Join("k8s", "missing.yaml")); err == nil { + if _, err := ResolveRepoAssetPath(filepath.Join("k8s", "missing.yaml")); err == nil { t.Fatal("expected error for missing asset") } }) diff --git a/internal/cli/setup/config_plan_test.go b/internal/cli/setup/config_plan_test.go new file mode 100644 index 0000000..701ffe0 --- /dev/null +++ b/internal/cli/setup/config_plan_test.go @@ -0,0 +1,30 @@ +package setup + +import ( + "testing" + + "mcp-runtime/internal/cli/certmanager" + "mcp-runtime/internal/cli/core" + setupplan "mcp-runtime/internal/cli/setup/plan" +) + +func TestApplySetupPlanToCLIConfig_TLSClusterIssuer(t *testing.T) { + orig := core.DefaultCLIConfig + t.Cleanup(func() { core.DefaultCLIConfig = orig }) + core.DefaultCLIConfig = &core.CLIConfig{RegistryClusterIssuerName: "unset"} + + applySetupPlanToCLIConfig(setupplan.Plan{TLSEnabled: true, TLSClusterIssuer: "internal-ca", ACMEmail: ""}) + if core.GetRegistryClusterIssuerName() != "internal-ca" { + t.Fatalf("expected custom issuer, got %q", core.GetRegistryClusterIssuerName()) + } + + applySetupPlanToCLIConfig(setupplan.Plan{TLSEnabled: true, TLSClusterIssuer: "ignored", ACMEmail: "ops@mcpruntime.com"}) + if want := certmanager.ClusterIssuerNameForACME(false); core.GetRegistryClusterIssuerName() != want { + t.Fatalf("expected ACME issuer to take precedence, got %q", core.GetRegistryClusterIssuerName()) + } + + applySetupPlanToCLIConfig(setupplan.Plan{TLSEnabled: false}) + if core.GetRegistryClusterIssuerName() != "" { + t.Fatalf("expected cleared when TLS off, got %q", core.GetRegistryClusterIssuerName()) + } +} diff --git a/internal/cli/setup/flow.go b/internal/cli/setup/flow.go new file mode 100644 index 0000000..f2c98d4 --- /dev/null +++ b/internal/cli/setup/flow.go @@ -0,0 +1,151 @@ +package setup + +import ( + "fmt" + "net" + "net/url" + "strings" + + "go.uber.org/zap" + + "mcp-runtime/internal/cli/core" + "mcp-runtime/internal/cli/registry/config" + setupplan "mcp-runtime/internal/cli/setup/plan" +) + +// printPlatformEntrypoints prints the public URLs derived from +// MCP_PLATFORM_DOMAIN / MCP_*_INGRESS_HOST so the operator knows which +// hostnames must resolve in DNS and what the dashboard URL is. +func printPlatformEntrypoints(tlsEnabled bool) { + scheme := "http://" + if tlsEnabled { + scheme = "https://" + } + registry := strings.TrimSpace(core.GetRegistryIngressHost()) + mcp := strings.TrimSpace(core.GetMcpIngressHost()) + platform := strings.TrimSpace(core.GetPlatformIngressHost()) + if registry == "" && mcp == "" && platform == "" { + return + } + fmt.Println() + fmt.Println("Public entrypoints:") + if platform != "" { + fmt.Printf(" Dashboard: %s%s/\n", scheme, platform) + } + if registry != "" { + fmt.Printf(" Registry: %s%s/v2/\n", scheme, registry) + } + if mcp != "" { + fmt.Printf(" MCP: %s%s//mcp\n", scheme, mcp) + } + if platform != "" { + fmt.Println(" (Make sure DNS A/AAAA records point platform./registry./mcp. at the cluster ingress.)") + } +} + +func resolveRegistrySetup(logger *zap.Logger, deps SetupDeps) (*config.ExternalRegistryConfig, bool, string) { + extRegistry, err := deps.ResolveExternalRegistryConfig(nil) + if err != nil { + core.Warn(fmt.Sprintf("Could not load external registry config: %v", err)) + } + usingExternalRegistry := extRegistry != nil + return extRegistry, usingExternalRegistry, defaultRegistrySecretName +} + +func validateNonTestSetup(plan setupplan.Plan, extRegistry *config.ExternalRegistryConfig, usingExternalRegistry bool) error { + if plan.TestMode { + return nil + } + if !plan.StrictProd { + return nil + } + if !plan.TLSEnabled { + return core.NewWithSentinel( + core.ErrSetupStepFailed, + "strict production setup requires --with-tls; use normal setup for local HTTP/internal registry flows", + ) + } + if usingExternalRegistry && extRegistry != nil && strings.TrimSpace(extRegistry.URL) != "" { + if isDevRegistryURL(extRegistry.URL) { + return core.NewWithSentinel( + core.ErrSetupStepFailed, + fmt.Sprintf("strict production setup requires a stable production registry, got dev-only registry URL %q", extRegistry.URL), + ) + } + return nil + } + if isDevRegistryURL(core.GetRegistryEndpoint()) { + return core.NewWithSentinel( + core.ErrSetupStepFailed, + fmt.Sprintf("strict production setup requires a stable internal registry endpoint; set MCP_REGISTRY_ENDPOINT (current %q)", core.GetRegistryEndpoint()), + ) + } + return nil +} + +func setupWarnings(plan setupplan.Plan, extRegistry *config.ExternalRegistryConfig, usingExternalRegistry bool) []string { + if plan.TestMode { + return nil + } + + var warnings []string + if !plan.TLSEnabled { + warnings = append(warnings, "Non-test setup is running without TLS. This is fine for local/internal registries but not recommended for production.") + } + + if usingExternalRegistry && extRegistry != nil && strings.TrimSpace(extRegistry.URL) != "" { + registryURL := strings.TrimSpace(extRegistry.URL) + if strings.HasPrefix(strings.ToLower(registryURL), "http://") { + warnings = append(warnings, fmt.Sprintf("External registry %q is using HTTP. This is acceptable for local environments but not recommended for production.", registryURL)) + } + if isDevRegistryURL(registryURL) { + warnings = append(warnings, fmt.Sprintf("External registry %q looks local/internal. Normal setup allows this, but use --strict-prod to enforce production-style validation.", registryURL)) + } + return warnings + } + + registryEndpoint := strings.TrimSpace(core.GetRegistryEndpoint()) + if registryEndpoint == "" { + warnings = append(warnings, "Internal registry host is empty; setup will fall back to service DNS. This is fine for local clusters but not recommended for production.") + return warnings + } + if isDevRegistryURL(registryEndpoint) { + warnings = append(warnings, fmt.Sprintf("Internal registry endpoint %q looks local/internal. Normal setup allows this for local clusters, but use --strict-prod to enforce production-style validation.", registryEndpoint)) + } + return warnings +} + +func isDevRegistryURL(raw string) bool { + trimmed := strings.TrimSpace(strings.TrimSuffix(raw, "/")) + if trimmed == "" { + return true + } + if strings.HasPrefix(strings.ToLower(trimmed), "http://") { + return true + } + + host := trimmed + if strings.Contains(trimmed, "://") { + if parsed, err := url.Parse(trimmed); err == nil && parsed.Host != "" { + host = parsed.Host + } + } + if slash := strings.Index(host, "/"); slash >= 0 { + host = host[:slash] + } + if h, _, err := net.SplitHostPort(host); err == nil { + host = h + } else if idx := strings.LastIndex(host, ":"); idx >= 0 && strings.Count(host, ":") == 1 { + host = host[:idx] + } + + host = strings.ToLower(strings.Trim(host, "[]")) + switch host { + case "", "localhost", "registry.local": + return true + } + if strings.HasSuffix(host, ".local") || strings.HasSuffix(host, ".svc.cluster.local") { + return true + } + return net.ParseIP(host) != nil +} diff --git a/internal/cli/setup_helpers_test.go b/internal/cli/setup/helpers_test.go similarity index 80% rename from internal/cli/setup_helpers_test.go rename to internal/cli/setup/helpers_test.go index f226e4a..ea10de0 100644 --- a/internal/cli/setup_helpers_test.go +++ b/internal/cli/setup/helpers_test.go @@ -1,4 +1,4 @@ -package cli +package setup import ( "encoding/base64" @@ -15,12 +15,17 @@ import ( "go.uber.org/zap" "gopkg.in/yaml.v3" + + "mcp-runtime/internal/cli/cluster" + "mcp-runtime/internal/cli/core" + "mcp-runtime/internal/cli/registry/config" + setupplan "mcp-runtime/internal/cli/setup/plan" ) type helperFakeClusterManager struct{} -func (f *helperFakeClusterManager) InitCluster(_, _ string) error { return nil } -func (f *helperFakeClusterManager) ConfigureCluster(_ ingressOptions) error { return nil } +func (f *helperFakeClusterManager) InitCluster(_, _ string) error { return nil } +func (f *helperFakeClusterManager) ConfigureCluster(_ cluster.IngressOptions) error { return nil } type helperFakeRegistryManager struct{} @@ -54,12 +59,10 @@ func csvHasValue(csv, value string) bool { } func TestGetOperatorImage(t *testing.T) { - origOverride := DefaultCLIConfig.OperatorImage - origKubectl := kubectlClient + origOverride := core.DefaultCLIConfig.OperatorImage origTagResolver := setupImageTagResolver t.Cleanup(func() { - DefaultCLIConfig.OperatorImage = origOverride - kubectlClient = origKubectl + core.DefaultCLIConfig.OperatorImage = origOverride setupImageTagResolver = origTagResolver }) @@ -67,7 +70,7 @@ func TestGetOperatorImage(t *testing.T) { setupImageTagResolver = func() string { return "deadbeef" } t.Run("uses override when set", func(t *testing.T) { - DefaultCLIConfig.OperatorImage = "override/operator:v1" + core.DefaultCLIConfig.OperatorImage = "override/operator:v1" got := getOperatorImage(nil) if got != "override/operator:v1" { t.Fatalf("expected override image, got %q", got) @@ -75,8 +78,8 @@ func TestGetOperatorImage(t *testing.T) { }) t.Run("uses external registry URL", func(t *testing.T) { - DefaultCLIConfig.OperatorImage = "" - ext := &ExternalRegistryConfig{URL: "registry.example.com/"} + core.DefaultCLIConfig.OperatorImage = "" + ext := &config.ExternalRegistryConfig{URL: "registry.example.com/"} got := getOperatorImage(ext) if got != "registry.example.com/mcp-runtime-operator:latest" { t.Fatalf("unexpected external registry image: %q", got) @@ -84,16 +87,16 @@ func TestGetOperatorImage(t *testing.T) { }) t.Run("uses platform registry URL when external not set", func(t *testing.T) { - DefaultCLIConfig.OperatorImage = "" - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { + core.DefaultCLIConfig.OperatorImage = "" + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { if contains(spec.Args, "jsonpath={.spec.ports[0].port}") { - return &MockCommand{OutputData: []byte("5000")} + return &core.MockCommand{OutputData: []byte("5000")} } - return &MockCommand{} + return &core.MockCommand{} }, } - kubectlClient = &KubectlClient{exec: mock, validators: nil} + swapDefaultKubectlClientForTest(t, core.NewTestKubectlClient(mock)) got := getOperatorImage(nil) if got != "registry.registry.svc.cluster.local:5000/mcp-runtime-operator:latest" { t.Fatalf("unexpected platform registry image: %q", got) @@ -101,9 +104,9 @@ func TestGetOperatorImage(t *testing.T) { }) t.Run("uses versioned tag outside test mode", func(t *testing.T) { - DefaultCLIConfig.OperatorImage = "" + core.DefaultCLIConfig.OperatorImage = "" t.Setenv("MCP_RUNTIME_TEST_MODE", "") - ext := &ExternalRegistryConfig{URL: "registry.example.com/"} + ext := &config.ExternalRegistryConfig{URL: "registry.example.com/"} got := getOperatorImage(ext) if got != "registry.example.com/mcp-runtime-operator:deadbeef" { t.Fatalf("unexpected versioned image: %q", got) @@ -112,12 +115,10 @@ func TestGetOperatorImage(t *testing.T) { } func TestGetGatewayProxyImage(t *testing.T) { - origOverride := DefaultCLIConfig.GatewayProxyImage - origKubectl := kubectlClient + origOverride := core.DefaultCLIConfig.GatewayProxyImage origTagResolver := setupImageTagResolver t.Cleanup(func() { - DefaultCLIConfig.GatewayProxyImage = origOverride - kubectlClient = origKubectl + core.DefaultCLIConfig.GatewayProxyImage = origOverride setupImageTagResolver = origTagResolver }) @@ -125,7 +126,7 @@ func TestGetGatewayProxyImage(t *testing.T) { setupImageTagResolver = func() string { return "deadbeef" } t.Run("uses override when set", func(t *testing.T) { - DefaultCLIConfig.GatewayProxyImage = "override/mcp-proxy:v1" + core.DefaultCLIConfig.GatewayProxyImage = "override/mcp-proxy:v1" got := getGatewayProxyImage(nil) if got != "override/mcp-proxy:v1" { t.Fatalf("expected override image, got %q", got) @@ -133,8 +134,8 @@ func TestGetGatewayProxyImage(t *testing.T) { }) t.Run("uses external registry URL", func(t *testing.T) { - DefaultCLIConfig.GatewayProxyImage = "" - ext := &ExternalRegistryConfig{URL: "registry.example.com/"} + core.DefaultCLIConfig.GatewayProxyImage = "" + ext := &config.ExternalRegistryConfig{URL: "registry.example.com/"} got := getGatewayProxyImage(ext) if got != "registry.example.com/mcp-sentinel-mcp-proxy:latest" { t.Fatalf("unexpected external registry image: %q", got) @@ -142,16 +143,16 @@ func TestGetGatewayProxyImage(t *testing.T) { }) t.Run("uses platform registry URL when external not set", func(t *testing.T) { - DefaultCLIConfig.GatewayProxyImage = "" - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { + core.DefaultCLIConfig.GatewayProxyImage = "" + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { if contains(spec.Args, "jsonpath={.spec.ports[0].port}") { - return &MockCommand{OutputData: []byte("5000")} + return &core.MockCommand{OutputData: []byte("5000")} } - return &MockCommand{} + return &core.MockCommand{} }, } - kubectlClient = &KubectlClient{exec: mock, validators: nil} + swapDefaultKubectlClientForTest(t, core.NewTestKubectlClient(mock)) got := getGatewayProxyImage(nil) if got != "registry.registry.svc.cluster.local:5000/mcp-sentinel-mcp-proxy:latest" { t.Fatalf("unexpected platform registry image: %q", got) @@ -159,9 +160,9 @@ func TestGetGatewayProxyImage(t *testing.T) { }) t.Run("uses versioned tag outside test mode", func(t *testing.T) { - DefaultCLIConfig.GatewayProxyImage = "" + core.DefaultCLIConfig.GatewayProxyImage = "" t.Setenv("MCP_RUNTIME_TEST_MODE", "") - ext := &ExternalRegistryConfig{URL: "registry.example.com/"} + ext := &config.ExternalRegistryConfig{URL: "registry.example.com/"} got := getGatewayProxyImage(ext) if got != "registry.example.com/mcp-sentinel-mcp-proxy:deadbeef" { t.Fatalf("unexpected versioned image: %q", got) @@ -195,13 +196,13 @@ func TestBuildOperatorArgs(t *testing.T) { } func TestOperatorEnvOverrides(t *testing.T) { - orig := DefaultCLIConfig + orig := core.DefaultCLIConfig t.Cleanup(func() { - DefaultCLIConfig = orig + core.DefaultCLIConfig = orig }) t.Run("returns empty when no gateway override is set", func(t *testing.T) { - DefaultCLIConfig = &CLIConfig{} + core.DefaultCLIConfig = &core.CLIConfig{} got := operatorEnvOverrides("") if len(got) != 1 { t.Fatalf("expected default analytics ingest env only, got %v", got) @@ -212,7 +213,7 @@ func TestOperatorEnvOverrides(t *testing.T) { }) t.Run("returns gateway proxy image override", func(t *testing.T) { - DefaultCLIConfig = &CLIConfig{GatewayProxyImage: "example.com/mcp-proxy:latest"} + core.DefaultCLIConfig = &core.CLIConfig{GatewayProxyImage: "example.com/mcp-proxy:latest"} got := operatorEnvOverrides("") if len(got) != 2 { t.Fatalf("expected gateway and analytics env overrides, got %d (%v)", len(got), got) @@ -226,7 +227,7 @@ func TestOperatorEnvOverrides(t *testing.T) { }) t.Run("prefers explicit setup image over config override", func(t *testing.T) { - DefaultCLIConfig = &CLIConfig{ + core.DefaultCLIConfig = &core.CLIConfig{ GatewayProxyImage: "example.com/mcp-proxy:config", AnalyticsIngestURL: "http://custom-analytics-ingest", } @@ -243,7 +244,7 @@ func TestOperatorEnvOverrides(t *testing.T) { }) t.Run("uses analytics ingest override when configured", func(t *testing.T) { - DefaultCLIConfig = &CLIConfig{AnalyticsIngestURL: "http://custom-analytics-ingest"} + core.DefaultCLIConfig = &core.CLIConfig{AnalyticsIngestURL: "http://custom-analytics-ingest"} got := operatorEnvOverrides("") if len(got) != 1 { t.Fatalf("expected analytics ingest env only, got %d (%v)", len(got), got) @@ -254,7 +255,7 @@ func TestOperatorEnvOverrides(t *testing.T) { }) t.Run("includes ingress readiness mode when configured", func(t *testing.T) { - DefaultCLIConfig = &CLIConfig{IngressReadinessMode: "permissive"} + core.DefaultCLIConfig = &core.CLIConfig{IngressReadinessMode: "permissive"} got := operatorEnvOverrides("") if len(got) != 2 { t.Fatalf("expected analytics plus ingress readiness env overrides, got %v", got) @@ -265,7 +266,7 @@ func TestOperatorEnvOverrides(t *testing.T) { }) t.Run("includes registry endpoint and ingress host when configured", func(t *testing.T) { - DefaultCLIConfig = &CLIConfig{ + core.DefaultCLIConfig = &core.CLIConfig{ RegistryEndpoint: "10.43.39.164:5000", RegistryIngressHost: "registry.local", } @@ -284,8 +285,8 @@ func TestOperatorEnvOverrides(t *testing.T) { func TestConfigureProvisionedRegistryEnv(t *testing.T) { t.Run("returns nil when registry not set", func(t *testing.T) { - mock := &MockExecutor{} - kubectl := &KubectlClient{exec: mock, validators: nil} + mock := &core.MockExecutor{} + kubectl := core.NewTestKubectlClient(mock) if err := configureProvisionedRegistryEnvWithKubectl(kubectl, nil, ""); err != nil { t.Fatalf("unexpected error: %v", err) @@ -296,9 +297,9 @@ func TestConfigureProvisionedRegistryEnv(t *testing.T) { }) t.Run("sets URL only when no credentials", func(t *testing.T) { - mock := &MockExecutor{} - kubectl := &KubectlClient{exec: mock, validators: nil} - ext := &ExternalRegistryConfig{URL: "registry.example.com"} + mock := &core.MockExecutor{} + kubectl := core.NewTestKubectlClient(mock) + ext := &config.ExternalRegistryConfig{URL: "registry.example.com"} if err := configureProvisionedRegistryEnvWithKubectl(kubectl, ext, ""); err != nil { t.Fatalf("unexpected error: %v", err) @@ -321,9 +322,9 @@ func TestConfigureProvisionedRegistryEnv(t *testing.T) { t.Run("creates secrets and sets secret env when credentials provided", func(t *testing.T) { var envData string var applyInputs []string - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - cmd := &MockCommand{Args: spec.Args} + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + cmd := &core.MockCommand{Args: spec.Args} if contains(spec.Args, "create") && contains(spec.Args, "secret") { cmd.RunFunc = func() error { if cmd.StdinR != nil { @@ -348,8 +349,8 @@ func TestConfigureProvisionedRegistryEnv(t *testing.T) { return cmd }, } - kubectl := &KubectlClient{exec: mock, validators: nil} - ext := &ExternalRegistryConfig{ + kubectl := core.NewTestKubectlClient(mock) + ext := &config.ExternalRegistryConfig{ URL: "registry.example.com", Username: "user", Password: "pass", @@ -387,8 +388,8 @@ func TestConfigureProvisionedRegistryEnv(t *testing.T) { func TestEnsureProvisionedRegistrySecret(t *testing.T) { t.Run("returns nil when no credentials", func(t *testing.T) { - mock := &MockExecutor{} - kubectl := &KubectlClient{exec: mock, validators: nil} + mock := &core.MockExecutor{} + kubectl := core.NewTestKubectlClient(mock) if err := ensureProvisionedRegistrySecretWithKubectl(kubectl, "name", "", ""); err != nil { t.Fatalf("unexpected error: %v", err) @@ -400,9 +401,9 @@ func TestEnsureProvisionedRegistrySecret(t *testing.T) { t.Run("creates and applies secret with env data", func(t *testing.T) { var envData string - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - cmd := &MockCommand{Args: spec.Args} + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + cmd := &core.MockCommand{Args: spec.Args} if contains(spec.Args, "create") && contains(spec.Args, "secret") { cmd.RunFunc = func() error { if cmd.StdinR != nil { @@ -418,7 +419,7 @@ func TestEnsureProvisionedRegistrySecret(t *testing.T) { return cmd }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) if err := ensureProvisionedRegistrySecretWithKubectl(kubectl, "custom-secret", "user", "pass"); err != nil { t.Fatalf("unexpected error: %v", err) @@ -434,8 +435,8 @@ func TestEnsureProvisionedRegistrySecret(t *testing.T) { func TestEnsureImagePullSecret(t *testing.T) { t.Run("returns nil when no credentials", func(t *testing.T) { - mock := &MockExecutor{} - kubectl := &KubectlClient{exec: mock, validators: nil} + mock := &core.MockExecutor{} + kubectl := core.NewTestKubectlClient(mock) if err := ensureImagePullSecretWithKubectl(kubectl, "ns", "name", "registry.example.com", "", ""); err != nil { t.Fatalf("unexpected error: %v", err) @@ -447,9 +448,9 @@ func TestEnsureImagePullSecret(t *testing.T) { t.Run("applies dockerconfigjson secret manifest", func(t *testing.T) { var manifest string - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - cmd := &MockCommand{Args: spec.Args} + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + cmd := &core.MockCommand{Args: spec.Args} if contains(spec.Args, "apply") && contains(spec.Args, "-f") && contains(spec.Args, "-") { cmd.RunFunc = func() error { if cmd.StdinR != nil { @@ -462,7 +463,7 @@ func TestEnsureImagePullSecret(t *testing.T) { return cmd }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) if err := ensureImagePullSecretWithKubectl(kubectl, "ns", "name", "registry.example.com", "user", "pass"); err != nil { t.Fatalf("unexpected error: %v", err) @@ -493,21 +494,21 @@ func TestEnsureImagePullSecret(t *testing.T) { } func TestEnsureAnalyticsImagePullSecret(t *testing.T) { - orig := DefaultCLIConfig + orig := core.DefaultCLIConfig t.Cleanup(func() { - DefaultCLIConfig = orig + core.DefaultCLIConfig = orig }) - DefaultCLIConfig = &CLIConfig{ + core.DefaultCLIConfig = &core.CLIConfig{ ProvisionedRegistryURL: "registry.example.com", ProvisionedRegistryUsername: "user", ProvisionedRegistryPassword: "pass", } var manifest string - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - cmd := &MockCommand{Args: spec.Args} + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + cmd := &core.MockCommand{Args: spec.Args} if contains(spec.Args, "apply") && contains(spec.Args, "-f") && contains(spec.Args, "-") { cmd.RunFunc = func() error { if cmd.StdinR != nil { @@ -520,7 +521,7 @@ func TestEnsureAnalyticsImagePullSecret(t *testing.T) { return cmd }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) secretName, err := ensureAnalyticsImagePullSecret(kubectl) if err != nil { @@ -529,7 +530,7 @@ func TestEnsureAnalyticsImagePullSecret(t *testing.T) { if secretName != defaultRegistrySecretName { t.Fatalf("expected secret name %q, got %q", defaultRegistrySecretName, secretName) } - if !strings.Contains(manifest, "namespace: "+defaultAnalyticsNamespace) { + if !strings.Contains(manifest, "namespace: "+core.DefaultAnalyticsNamespace) { t.Fatalf("expected analytics namespace in secret manifest, got %q", manifest) } if !strings.Contains(manifest, "kubernetes.io/dockerconfigjson") { @@ -539,15 +540,15 @@ func TestEnsureAnalyticsImagePullSecret(t *testing.T) { func TestRenderAnalyticsSecretManifestReusesExistingPassword(t *testing.T) { encoded := base64.StdEncoding.EncodeToString([]byte("keep-me")) - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { if contains(spec.Args, "get") && contains(spec.Args, "secret") { - return &MockCommand{Args: spec.Args, OutputData: []byte(encoded)} + return &core.MockCommand{Args: spec.Args, OutputData: []byte(encoded)} } - return &MockCommand{Args: spec.Args} + return &core.MockCommand{Args: spec.Args} }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) manifest, err := renderAnalyticsSecretManifest(kubectl) if err != nil { @@ -563,21 +564,21 @@ func TestRenderAnalyticsSecretManifestReusesExistingAPIKeys(t *testing.T) { apiKeyEncoded := base64.StdEncoding.EncodeToString([]byte("api-key")) uiKeyEncoded := base64.StdEncoding.EncodeToString([]byte("ui-key")) passwordEncoded := base64.StdEncoding.EncodeToString([]byte("grafana-password")) - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { switch { case contains(spec.Args, "jsonpath={.data.API_KEYS}"): - return &MockCommand{Args: spec.Args, OutputData: []byte(apiKeyEncoded)} + return &core.MockCommand{Args: spec.Args, OutputData: []byte(apiKeyEncoded)} case contains(spec.Args, "jsonpath={.data.UI_API_KEY}"): - return &MockCommand{Args: spec.Args, OutputData: []byte(uiKeyEncoded)} + return &core.MockCommand{Args: spec.Args, OutputData: []byte(uiKeyEncoded)} case contains(spec.Args, "jsonpath={.data.GRAFANA_ADMIN_PASSWORD}"): - return &MockCommand{Args: spec.Args, OutputData: []byte(passwordEncoded)} + return &core.MockCommand{Args: spec.Args, OutputData: []byte(passwordEncoded)} default: - return &MockCommand{Args: spec.Args} + return &core.MockCommand{Args: spec.Args} } }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) manifest, err := renderAnalyticsSecretManifest(kubectl) if err != nil { @@ -607,19 +608,19 @@ func TestRenderAnalyticsSecretManifestReusesExistingAPIKeys(t *testing.T) { func TestRenderAnalyticsSecretManifestEscapesPostgresCredentialsInDSN(t *testing.T) { postgresUserEncoded := base64.StdEncoding.EncodeToString([]byte("user@runtime")) postgresPasswordEncoded := base64.StdEncoding.EncodeToString([]byte(`pa:ss?/#[%]`)) - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { switch { case contains(spec.Args, "jsonpath={.data.POSTGRES_USER}"): - return &MockCommand{Args: spec.Args, OutputData: []byte(postgresUserEncoded)} + return &core.MockCommand{Args: spec.Args, OutputData: []byte(postgresUserEncoded)} case contains(spec.Args, "jsonpath={.data.POSTGRES_PASSWORD}"): - return &MockCommand{Args: spec.Args, OutputData: []byte(postgresPasswordEncoded)} + return &core.MockCommand{Args: spec.Args, OutputData: []byte(postgresPasswordEncoded)} default: - return &MockCommand{Args: spec.Args} + return &core.MockCommand{Args: spec.Args} } }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) manifest, err := renderAnalyticsSecretManifest(kubectl) if err != nil { @@ -635,19 +636,19 @@ func TestRenderAnalyticsSecretManifestEscapesPostgresCredentialsInDSN(t *testing } func TestRenderAnalyticsSecretManifestGeneratesKeysWhenMissing(t *testing.T) { - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { if contains(spec.Args, "get") && contains(spec.Args, "secret") { - return &MockCommand{ + return &core.MockCommand{ Args: spec.Args, OutputData: []byte("Error from server (NotFound): secrets \"mcp-sentinel-secrets\" not found"), OutputErr: errors.New("not found"), } } - return &MockCommand{Args: spec.Args} + return &core.MockCommand{Args: spec.Args} }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) manifest, err := renderAnalyticsSecretManifest(kubectl) if err != nil { @@ -718,7 +719,7 @@ func TestPrepareAnalyticsImagesUsesTestModeImageSet(t *testing.T) { }, } - got, err := prepareAnalyticsImages(zap.NewNop(), &ExternalRegistryConfig{URL: "registry.example.com"}, true, true, deps) + got, err := prepareAnalyticsImages(zap.NewNop(), &config.ExternalRegistryConfig{URL: "registry.example.com"}, true, true, deps) if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -777,11 +778,11 @@ spec: } func TestDeployAnalyticsManifestsReturnsRolloutFailures(t *testing.T) { - orig := DefaultCLIConfig + orig := core.DefaultCLIConfig t.Cleanup(func() { - DefaultCLIConfig = orig + core.DefaultCLIConfig = orig }) - DefaultCLIConfig = &CLIConfig{} + core.DefaultCLIConfig = &core.CLIConfig{} cwd, err := os.Getwd() if err != nil { @@ -835,9 +836,9 @@ func TestDeployAnalyticsManifestsReturnsRolloutFailures(t *testing.T) { }) var applied []string - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - cmd := &MockCommand{Args: spec.Args} + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + cmd := &core.MockCommand{Args: spec.Args} switch { case contains(spec.Args, "apply") && contains(spec.Args, "-f"): for i := 0; i+1 < len(spec.Args); i++ { @@ -854,7 +855,7 @@ func TestDeployAnalyticsManifestsReturnsRolloutFailures(t *testing.T) { return cmd }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) err = deployAnalyticsManifestsWithKubectl(kubectl, zap.NewNop(), AnalyticsImageSet{ Ingest: "example.com/mcp-sentinel-ingest:latest", @@ -871,9 +872,9 @@ func TestDeployAnalyticsManifestsReturnsRolloutFailures(t *testing.T) { } func TestDeployAnalyticsManifestsWithKubectl_HostpathUsesHostpathManifests(t *testing.T) { - orig := DefaultCLIConfig - t.Cleanup(func() { DefaultCLIConfig = orig }) - DefaultCLIConfig = &CLIConfig{} + orig := core.DefaultCLIConfig + t.Cleanup(func() { core.DefaultCLIConfig = orig }) + core.DefaultCLIConfig = &core.CLIConfig{} cwd, err := os.Getwd() if err != nil { @@ -908,9 +909,9 @@ func TestDeployAnalyticsManifestsWithKubectl_HostpathUsesHostpathManifests(t *te } t.Cleanup(func() { _ = os.Chdir(cwd) }) - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - cmd := &MockCommand{Args: spec.Args} + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + cmd := &core.MockCommand{Args: spec.Args} if contains(spec.Args, "get") && contains(spec.Args, "secret") { cmd.OutputData = []byte("Error from server (NotFound): secrets \"mcp-sentinel-secrets\" not found") cmd.OutputErr = errors.New("not found") @@ -921,14 +922,14 @@ func TestDeployAnalyticsManifestsWithKubectl_HostpathUsesHostpathManifests(t *te return cmd }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) err = deployAnalyticsManifestsWithKubectl(kubectl, zap.NewNop(), AnalyticsImageSet{ Ingest: "example.com/mcp-sentinel-ingest:latest", API: "example.com/mcp-sentinel-api:latest", Processor: "example.com/mcp-sentinel-processor:latest", UI: "example.com/mcp-sentinel-ui:latest", - }, StorageModeHostpath) + }, setupplan.StorageModeHostpath) if err == nil { t.Fatal("expected failure from rollout timeout") } @@ -938,9 +939,9 @@ func TestDeployAnalyticsManifestsWithKubectl_HostpathUsesHostpathManifests(t *te } func TestDeployAnalyticsManifestsWithKubectl_WaitsForPostgresStatefulSet(t *testing.T) { - orig := DefaultCLIConfig - t.Cleanup(func() { DefaultCLIConfig = orig }) - DefaultCLIConfig = &CLIConfig{} + orig := core.DefaultCLIConfig + t.Cleanup(func() { core.DefaultCLIConfig = orig }) + core.DefaultCLIConfig = &core.CLIConfig{} cwd, err := os.Getwd() if err != nil { @@ -989,9 +990,9 @@ func TestDeployAnalyticsManifestsWithKubectl_WaitsForPostgresStatefulSet(t *test t.Cleanup(func() { _ = os.Chdir(cwd) }) var sawPostgresStatefulSet bool - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - cmd := &MockCommand{Args: spec.Args} + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + cmd := &core.MockCommand{Args: spec.Args} if contains(spec.Args, "get") && contains(spec.Args, "secret") { cmd.OutputData = []byte("Error from server (NotFound): secrets \"mcp-sentinel-secrets\" not found") cmd.OutputErr = errors.New("not found") @@ -1003,7 +1004,7 @@ func TestDeployAnalyticsManifestsWithKubectl_WaitsForPostgresStatefulSet(t *test return cmd }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) err = deployAnalyticsManifestsWithKubectl(kubectl, zap.NewNop(), AnalyticsImageSet{ Ingest: "example.com/mcp-sentinel-ingest:latest", @@ -1023,12 +1024,14 @@ func TestDeployAnalyticsManifestsWithKubectl_WaitsForPostgresStatefulSet(t *test } func TestSetupDepsWithDefaultsSetsNil(t *testing.T) { - deps := SetupDeps{}.withDefaults(zap.NewNop()) + // Avoid importing internal/cli/cluster from this package's tests (import cycle: + // cli_test -> cluster -> cli). Supply a fake cluster manager; other defaults still apply. + deps := SetupDeps{ClusterManager: &helperFakeClusterManager{}}.withDefaults(zap.NewNop()) if deps.ResolveExternalRegistryConfig == nil { t.Fatal("expected ResolveExternalRegistryConfig default") } - if deps.ClusterManager == nil { - t.Fatal("expected ClusterManager default") + if _, ok := deps.ClusterManager.(*helperFakeClusterManager); !ok { + t.Fatal("expected ClusterManager to remain the injected fake") } if deps.RegistryManager == nil { t.Fatal("expected RegistryManager default") @@ -1063,8 +1066,8 @@ func TestSetupDepsWithDefaultsSetsNil(t *testing.T) { if deps.EnsureNamespace == nil { t.Fatal("expected EnsureNamespace default") } - if deps.GetPlatformRegistryURL == nil { - t.Fatal("expected GetPlatformRegistryURL default") + if deps.ResolvePlatformRegistryURL == nil { + t.Fatal("expected ResolvePlatformRegistryURL default") } if deps.PushOperatorImageToInternal == nil { t.Fatal("expected PushOperatorImageToInternal default") @@ -1085,10 +1088,10 @@ func TestSetupDepsWithDefaultsSetsNil(t *testing.T) { t.Fatal("expected CheckCRDInstalled default") } if deps.GetDeploymentTimeout == nil { - t.Fatal("expected GetDeploymentTimeout default") + t.Fatal("expected core.GetDeploymentTimeout default") } if deps.GetRegistryPort == nil { - t.Fatal("expected GetRegistryPort default") + t.Fatal("expected core.GetRegistryPort default") } if deps.OperatorImageFor == nil { t.Fatal("expected OperatorImageFor default") @@ -1099,29 +1102,29 @@ func TestSetupDepsWithDefaultsSetsNil(t *testing.T) { } func TestSetupDepsWithDefaultsPreservesNonNil(t *testing.T) { - cluster := &helperFakeClusterManager{} - registry := &helperFakeRegistryManager{} + clusterMgr := &helperFakeClusterManager{} + registryMgr := &helperFakeRegistryManager{} deps := SetupDeps{ - ClusterManager: cluster, - RegistryManager: registry, + ClusterManager: clusterMgr, + RegistryManager: registryMgr, GetRegistryPort: func() int { return 123 }, - OperatorImageFor: func(_ *ExternalRegistryConfig) string { + OperatorImageFor: func(_ *config.ExternalRegistryConfig) string { return "custom-image" }, - GatewayProxyImageFor: func(_ *ExternalRegistryConfig) string { + GatewayProxyImageFor: func(_ *config.ExternalRegistryConfig) string { return "custom-gateway-image" }, } got := deps.withDefaults(zap.NewNop()) - if got.ClusterManager != cluster { + if got.ClusterManager != clusterMgr { t.Fatal("expected ClusterManager to be preserved") } - if got.RegistryManager != registry { + if got.RegistryManager != registryMgr { t.Fatal("expected RegistryManager to be preserved") } if got.GetRegistryPort() != 123 { - t.Fatal("expected GetRegistryPort to be preserved") + t.Fatal("expected core.GetRegistryPort to be preserved") } if got.OperatorImageFor(nil) != "custom-image" { t.Fatal("expected OperatorImageFor to be preserved") @@ -1132,8 +1135,8 @@ func TestSetupDepsWithDefaultsPreservesNonNil(t *testing.T) { } func TestCheckCRDInstalledWithKubectl(t *testing.T) { - mock := &MockExecutor{} - kubectl := &KubectlClient{exec: mock, validators: nil} + mock := &core.MockExecutor{} + kubectl := core.NewTestKubectlClient(mock) if err := checkCRDInstalledWithKubectl(kubectl, "example.crd.io"); err != nil { t.Fatalf("unexpected error: %v", err) @@ -1147,11 +1150,9 @@ func TestCheckCRDInstalledWithKubectl(t *testing.T) { } func TestCheckCRDInstalledUsesDefaultKubectl(t *testing.T) { - origKubectl := kubectlClient - t.Cleanup(func() { kubectlClient = origKubectl }) - mock := &MockExecutor{} - kubectlClient = &KubectlClient{exec: mock, validators: nil} + mock := &core.MockExecutor{} + swapDefaultKubectlClientForTest(t, core.NewTestKubectlClient(mock)) if err := checkCRDInstalled("example.crd.io"); err != nil { t.Fatalf("unexpected error: %v", err) @@ -1165,8 +1166,8 @@ func TestCheckCRDInstalledUsesDefaultKubectl(t *testing.T) { } func TestCheckCRDInstalledWithKubectlError(t *testing.T) { - mock := &MockExecutor{DefaultRunErr: errors.New("kubectl failed")} - kubectl := &KubectlClient{exec: mock, validators: nil} + mock := &core.MockExecutor{DefaultRunErr: errors.New("kubectl failed")} + kubectl := core.NewTestKubectlClient(mock) if err := checkCRDInstalledWithKubectl(kubectl, "example.crd.io"); err == nil { t.Fatal("expected error") @@ -1174,12 +1175,12 @@ func TestCheckCRDInstalledWithKubectlError(t *testing.T) { } func TestWaitForDeploymentAvailableWithKubectl(t *testing.T) { - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - return &MockCommand{Args: spec.Args, OutputData: []byte("1")} + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + return &core.MockCommand{Args: spec.Args, OutputData: []byte("1")} }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) if err := waitForDeploymentAvailableWithKubectl(kubectl, zap.NewNop(), "registry", "registry", "app=registry", time.Second); err != nil { t.Fatalf("unexpected error: %v", err) @@ -1193,12 +1194,12 @@ func TestWaitForDeploymentAvailableWithKubectl(t *testing.T) { } func TestWaitForDeploymentAvailableWithKubectlTimeout(t *testing.T) { - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - return &MockCommand{Args: spec.Args, OutputData: []byte("0")} + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + return &core.MockCommand{Args: spec.Args, OutputData: []byte("0")} }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) if err := waitForDeploymentAvailableWithKubectl(kubectl, zap.NewNop(), "registry", "registry", "app=registry", -time.Second); err == nil { t.Fatal("expected timeout error") @@ -1206,8 +1207,6 @@ func TestWaitForDeploymentAvailableWithKubectlTimeout(t *testing.T) { } func TestDeployOperatorManifestsWithKubectl(t *testing.T) { - origKubectl := kubectlClient - t.Cleanup(func() { kubectlClient = origKubectl }) root := repoRootForTest(t) origDir, err := os.Getwd() @@ -1222,9 +1221,9 @@ func TestDeployOperatorManifestsWithKubectl(t *testing.T) { }) var managerManifest string - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - cmd := &MockCommand{Args: spec.Args} + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + cmd := &core.MockCommand{Args: spec.Args} if idx := argIndex(spec.Args, "-f"); idx != -1 && idx+1 < len(spec.Args) { path := spec.Args[idx+1] if strings.Contains(path, "manager-") && strings.HasSuffix(path, ".yaml") { @@ -1241,8 +1240,8 @@ func TestDeployOperatorManifestsWithKubectl(t *testing.T) { return cmd }, } - kubectl := &KubectlClient{exec: mock, validators: nil} - kubectlClient = kubectl + kubectl := core.NewTestKubectlClient(mock) + swapDefaultKubectlClientForTest(t, kubectl) operatorImage := "registry.example.com/mcp-runtime-operator:dev" gatewayProxyImage := "registry.example.com/mcp-sentinel-mcp-proxy:dev" @@ -1292,7 +1291,7 @@ func TestDeployOperatorManifestsWithKubectl(t *testing.T) { if commandHasArgs(cmd, "apply", "-k", "config/rbac/") { hasRBAC = true } - if commandHasArgs(cmd, "delete", "deployment/"+OperatorDeploymentName, "-n", NamespaceMCPRuntime, "--ignore-not-found") { + if commandHasArgs(cmd, "delete", "deployment/"+core.OperatorDeploymentName, "-n", core.NamespaceMCPRuntime, "--ignore-not-found") { hasDelete = true } if idx := argIndex(cmd.Args, "-f"); idx != -1 && idx+1 < len(cmd.Args) { @@ -1311,8 +1310,6 @@ func TestDeployOperatorManifestsWithKubectl(t *testing.T) { } func TestDeployOperatorManifestsWithKubectlUsesIfNotPresentForTestModeImage(t *testing.T) { - origKubectl := kubectlClient - t.Cleanup(func() { kubectlClient = origKubectl }) root := repoRootForTest(t) origDir, err := os.Getwd() @@ -1327,9 +1324,9 @@ func TestDeployOperatorManifestsWithKubectlUsesIfNotPresentForTestModeImage(t *t }) var managerManifest string - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - cmd := &MockCommand{Args: spec.Args} + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + cmd := &core.MockCommand{Args: spec.Args} if idx := argIndex(spec.Args, "-f"); idx != -1 && idx+1 < len(spec.Args) { path := spec.Args[idx+1] if strings.Contains(path, "manager-") && strings.HasSuffix(path, ".yaml") { @@ -1346,8 +1343,8 @@ func TestDeployOperatorManifestsWithKubectlUsesIfNotPresentForTestModeImage(t *t return cmd }, } - kubectl := &KubectlClient{exec: mock, validators: nil} - kubectlClient = kubectl + kubectl := core.NewTestKubectlClient(mock) + swapDefaultKubectlClientForTest(t, kubectl) if err := deployOperatorManifestsWithKubectl(kubectl, zap.NewNop(), testModeOperatorImage, "", nil); err != nil { t.Fatalf("unexpected error: %v", err) @@ -1362,16 +1359,16 @@ func TestDeployOperatorManifestsWithKubectlUsesIfNotPresentForTestModeImage(t *t func TestDeployOperatorManifestsWithKubectlCRDError(t *testing.T) { mockErr := errors.New("apply crd failed") - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - cmd := &MockCommand{Args: spec.Args} + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + cmd := &core.MockCommand{Args: spec.Args} if commandHasArgs(spec, "apply", "--validate=false", "-f", "config/crd/bases") { cmd.RunErr = mockErr } return cmd }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) if err := deployOperatorManifestsWithKubectl(kubectl, zap.NewNop(), "example", "", nil); err == nil { t.Fatal("expected error") @@ -1379,21 +1376,19 @@ func TestDeployOperatorManifestsWithKubectlCRDError(t *testing.T) { } func TestDeployOperatorManifestsWithKubectlRBACError(t *testing.T) { - origKubectl := kubectlClient - t.Cleanup(func() { kubectlClient = origKubectl }) mockErr := errors.New("apply rbac failed") - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - cmd := &MockCommand{Args: spec.Args} + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + cmd := &core.MockCommand{Args: spec.Args} if commandHasArgs(spec, "apply", "-k", "config/rbac/") { cmd.RunErr = mockErr } return cmd }, } - kubectl := &KubectlClient{exec: mock, validators: nil} - kubectlClient = kubectl + kubectl := core.NewTestKubectlClient(mock) + swapDefaultKubectlClientForTest(t, kubectl) if err := deployOperatorManifestsWithKubectl(kubectl, zap.NewNop(), "example", "", nil); err == nil { t.Fatal("expected error") @@ -1401,8 +1396,6 @@ func TestDeployOperatorManifestsWithKubectlRBACError(t *testing.T) { } func TestDeployOperatorManifestsWithKubectlManagerApplyError(t *testing.T) { - origKubectl := kubectlClient - t.Cleanup(func() { kubectlClient = origKubectl }) root := repoRootForTest(t) origDir, err := os.Getwd() @@ -1417,9 +1410,9 @@ func TestDeployOperatorManifestsWithKubectlManagerApplyError(t *testing.T) { }) mockErr := errors.New("apply manager failed") - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - cmd := &MockCommand{Args: spec.Args} + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + cmd := &core.MockCommand{Args: spec.Args} if idx := argIndex(spec.Args, "-f"); idx != -1 && idx+1 < len(spec.Args) { path := spec.Args[idx+1] if strings.Contains(path, "manager-") && strings.HasSuffix(path, ".yaml") { @@ -1429,8 +1422,8 @@ func TestDeployOperatorManifestsWithKubectlManagerApplyError(t *testing.T) { return cmd }, } - kubectl := &KubectlClient{exec: mock, validators: nil} - kubectlClient = kubectl + kubectl := core.NewTestKubectlClient(mock) + swapDefaultKubectlClientForTest(t, kubectl) if err := deployOperatorManifestsWithKubectl(kubectl, zap.NewNop(), "example", "", nil); err == nil { t.Fatal("expected error") @@ -1438,18 +1431,17 @@ func TestDeployOperatorManifestsWithKubectlManagerApplyError(t *testing.T) { } func TestSetupTLSWithKubectl(t *testing.T) { - origKubectl := kubectlClient - t.Cleanup(func() { kubectlClient = origKubectl }) + chdirRepoRootForTest(t) - mock := &MockExecutor{} - kubectl := &KubectlClient{exec: mock, validators: nil} - kubectlClient = kubectl + mock := &core.MockExecutor{} + kubectl := core.NewTestKubectlClient(mock) + swapDefaultKubectlClientForTest(t, kubectl) if err := setupTLSPrivateCA(kubectl, zap.NewNop()); err != nil { t.Fatalf("unexpected error: %v", err) } - timeoutArg := fmt.Sprintf("--timeout=%s", GetCertTimeout()) + timeoutArg := fmt.Sprintf("--timeout=%s", core.GetCertTimeout()) var ( hasCRD bool hasSecret bool @@ -1459,7 +1451,7 @@ func TestSetupTLSWithKubectl(t *testing.T) { hasWait bool ) for _, cmd := range mock.Commands { - if commandHasArgs(cmd, "get", "crd", CertManagerCRDName) { + if commandHasArgs(cmd, "get", "crd", core.CertManagerCRDName) { hasCRD = true } if commandHasArgs(cmd, "get", "secret", "mcp-runtime-ca", "-n", "cert-manager") { @@ -1473,10 +1465,10 @@ func TestSetupTLSWithKubectl(t *testing.T) { } // Registry Certificate is applied via `kubectl apply -f - -n registry` with the // manifest piped over stdin, not via `apply -f `. - if commandHasArgs(cmd, "apply", "-f", "-", "-n", NamespaceRegistry) { + if commandHasArgs(cmd, "apply", "-f", "-", "-n", core.NamespaceRegistry) { hasCert = true } - if commandHasArgs(cmd, "wait", "--for=condition=Ready", "certificate/registry-cert", "-n", NamespaceRegistry, timeoutArg) { + if commandHasArgs(cmd, "wait", "--for=condition=Ready", "certificate/registry-cert", "-n", core.NamespaceRegistry, timeoutArg) { hasWait = true } } @@ -1486,16 +1478,16 @@ func TestSetupTLSWithKubectl(t *testing.T) { } func TestSetupTLSWithKubectlMissingCRD(t *testing.T) { - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - cmd := &MockCommand{Args: spec.Args} - if commandHasArgs(spec, "get", "crd", CertManagerCRDName) { + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + cmd := &core.MockCommand{Args: spec.Args} + if commandHasArgs(spec, "get", "crd", core.CertManagerCRDName) { cmd.RunErr = errors.New("missing crd") } return cmd }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) if err := setupTLSPrivateCA(kubectl, zap.NewNop()); err == nil { t.Fatal("expected error") @@ -1503,16 +1495,16 @@ func TestSetupTLSWithKubectlMissingCRD(t *testing.T) { } func TestSetupTLSWithKubectlMissingSecret(t *testing.T) { - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - cmd := &MockCommand{Args: spec.Args} + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + cmd := &core.MockCommand{Args: spec.Args} if commandHasArgs(spec, "get", "secret", "mcp-runtime-ca", "-n", "cert-manager") { cmd.RunErr = errors.New("missing secret") } return cmd }, } - kubectl := &KubectlClient{exec: mock, validators: nil} + kubectl := core.NewTestKubectlClient(mock) if err := setupTLSPrivateCA(kubectl, zap.NewNop()); err == nil { t.Fatal("expected error") @@ -1520,27 +1512,25 @@ func TestSetupTLSWithKubectlMissingSecret(t *testing.T) { } func TestSetupTLSWithKubectlWaitError(t *testing.T) { - origKubectl := kubectlClient - t.Cleanup(func() { kubectlClient = origKubectl }) - mock := &MockExecutor{ - CommandFunc: func(spec ExecSpec) *MockCommand { - cmd := &MockCommand{Args: spec.Args} - if commandHasArgs(spec, "wait", "--for=condition=Ready", "certificate/registry-cert", "-n", NamespaceRegistry) { + mock := &core.MockExecutor{ + CommandFunc: func(spec core.ExecSpec) *core.MockCommand { + cmd := &core.MockCommand{Args: spec.Args} + if commandHasArgs(spec, "wait", "--for=condition=Ready", "certificate/registry-cert", "-n", core.NamespaceRegistry) { cmd.RunErr = errors.New("wait failed") } return cmd }, } - kubectl := &KubectlClient{exec: mock, validators: nil} - kubectlClient = kubectl + kubectl := core.NewTestKubectlClient(mock) + swapDefaultKubectlClientForTest(t, kubectl) if err := setupTLSPrivateCA(kubectl, zap.NewNop()); err == nil { t.Fatal("expected error") } } -func commandHasArgs(cmd ExecSpec, args ...string) bool { +func commandHasArgs(cmd core.ExecSpec, args ...string) bool { for _, arg := range args { if !contains(cmd.Args, arg) { return false @@ -1549,6 +1539,20 @@ func commandHasArgs(cmd ExecSpec, args ...string) bool { return true } +func contains(slice []string, val string) bool { + for _, item := range slice { + if item == val { + return true + } + } + return false +} + +func swapDefaultKubectlClientForTest(t *testing.T, kubectl *core.KubectlClient) { + t.Helper() + t.Cleanup(core.SwapDefaultKubectlClient(kubectl)) +} + func argIndex(args []string, target string) int { for i, arg := range args { if arg == target { @@ -1577,3 +1581,18 @@ func repoRootForTest(t *testing.T) string { dir = parent } } + +func chdirRepoRootForTest(t *testing.T) { + t.Helper() + root := repoRootForTest(t) + origDir, err := os.Getwd() + if err != nil { + t.Fatalf("get working dir: %v", err) + } + if err := os.Chdir(root); err != nil { + t.Fatalf("chdir to repo root: %v", err) + } + t.Cleanup(func() { + _ = os.Chdir(origDir) + }) +} diff --git a/internal/cli/platform_ingress.go b/internal/cli/setup/ingressmanifest/render.go similarity index 69% rename from internal/cli/platform_ingress.go rename to internal/cli/setup/ingressmanifest/render.go index ec107da..88140c2 100644 --- a/internal/cli/platform_ingress.go +++ b/internal/cli/setup/ingressmanifest/render.go @@ -1,51 +1,39 @@ -package cli +// Package ingressmanifest builds YAML for the host-based Sentinel platform UI Ingress. +package ingressmanifest import ( - "fmt" "strconv" "strings" ) -const platformIngressName = "mcp-sentinel-platform-ui" -const platformTLSSecretName = "mcp-sentinel-platform-tls" - -// applyPlatformIngressIfConfigured applies a host-based ingress for the -// dashboard UI on platform. when MCP_PLATFORM_INGRESS_HOST -// (or MCP_PLATFORM_DOMAIN) is set. When unset, the dev path-based gateway -// ingress in k8s/10-gateway.yaml continues to handle all dashboard traffic. -func applyPlatformIngressIfConfigured(kubectl KubectlRunner) error { - host := strings.TrimSpace(GetPlatformIngressHost()) - if host == "" { - return nil - } - manifest := renderPlatformIngressManifest(host, GetRegistryClusterIssuerName()) - Info(fmt.Sprintf("Applying platform UI ingress for %s", host)) - if err := applyManifestContent(kubectl, manifest); err != nil { - return fmt.Errorf("apply platform UI ingress: %w", err) - } - return nil -} +const ( + // PlatformIngressName is the Kubernetes Ingress resource name for the dashboard. + PlatformIngressName = "mcp-sentinel-platform-ui" + // PlatformTLSSecretName is the TLS secret name used when TLS is enabled. + PlatformTLSSecretName = "mcp-sentinel-platform-tls" +) -// renderPlatformIngressManifest emits an Ingress that maps platform. -// to the dashboard UI, /api on the same UI service (which reverse-proxies to +// RenderPlatformUIIngress emits an Ingress that maps platform. to the +// dashboard UI, /api on the same UI service (which reverse-proxies to // mcp-sentinel-api via API_UPSTREAM), and the in-cluster Grafana / Prometheus // paths. When issuerName is set, a TLS section and cert-manager annotation are // added so cert-manager's ingress-shim provisions a Certificate for // platform. into the mcp-sentinel-platform-tls Secret in the same // namespace as the Ingress. -func renderPlatformIngressManifest(host, issuerName string) string { +func RenderPlatformUIIngress(host, issuerName, analyticsNamespace string) string { host = strings.TrimSpace(host) issuerName = strings.TrimSpace(issuerName) + analyticsNamespace = strings.TrimSpace(analyticsNamespace) var b strings.Builder b.WriteString("apiVersion: networking.k8s.io/v1\n") b.WriteString("kind: Ingress\n") b.WriteString("metadata:\n") b.WriteString(" name: ") - b.WriteString(platformIngressName) + b.WriteString(PlatformIngressName) b.WriteString("\n") b.WriteString(" namespace: ") - b.WriteString(defaultAnalyticsNamespace) + b.WriteString(analyticsNamespace) b.WriteString("\n") b.WriteString(" annotations:\n") if issuerName != "" { @@ -65,7 +53,7 @@ func renderPlatformIngressManifest(host, issuerName string) string { b.WriteString(strconv.Quote(host)) b.WriteString("\n") b.WriteString(" secretName: ") - b.WriteString(platformTLSSecretName) + b.WriteString(PlatformTLSSecretName) b.WriteString("\n") } b.WriteString(" rules:\n") diff --git a/internal/cli/setup/ingressmanifest/render_test.go b/internal/cli/setup/ingressmanifest/render_test.go new file mode 100644 index 0000000..b86adf6 --- /dev/null +++ b/internal/cli/setup/ingressmanifest/render_test.go @@ -0,0 +1,73 @@ +package ingressmanifest + +import ( + "strings" + "testing" +) + +const testAnalyticsNS = "mcp-sentinel" + +func TestRenderPlatformUIIngressNoTLS(t *testing.T) { + got := RenderPlatformUIIngress("platform.example.com", "", testAnalyticsNS) + mustContain := []string{ + "name: " + PlatformIngressName, + "namespace: " + testAnalyticsNS, + "traefik.ingress.kubernetes.io/router.entrypoints: web", + `- host: "platform.example.com"`, + "- path: /api\n", + "- path: /grafana\n", + "- path: /prometheus\n", + "- path: /\n", + "name: mcp-sentinel-ui", + "number: 8082", + "name: grafana", + "name: prometheus", + } + for _, want := range mustContain { + if !strings.Contains(got, want) { + t.Fatalf("missing %q in manifest:\n%s", want, got) + } + } + if strings.Contains(got, "tls:") { + t.Fatalf("did not expect a TLS block when issuer is empty:\n%s", got) + } + if strings.Contains(got, "cert-manager.io/cluster-issuer") { + t.Fatalf("did not expect cert-manager annotation when issuer is empty:\n%s", got) + } +} + +func TestRenderPlatformUIIngressApiBeforeGrafana(t *testing.T) { + got := RenderPlatformUIIngress("platform.example.com", "", testAnalyticsNS) + apiIdx := strings.Index(got, "- path: /api") + grafanaIdx := strings.Index(got, "- path: /grafana") + rootIdx := strings.Index(got, "- path: /\n") + if apiIdx < 0 || grafanaIdx < 0 || rootIdx < 0 { + t.Fatalf("missing one of /api, /grafana, / paths:\n%s", got) + } + if apiIdx > grafanaIdx { + t.Fatalf("/api must be listed before /grafana in the rule for readability:\n%s", got) + } + if grafanaIdx > rootIdx { + t.Fatalf("/grafana must be listed before / catch-all:\n%s", got) + } +} + +func TestRenderPlatformUIIngressWithTLS(t *testing.T) { + got := RenderPlatformUIIngress("platform.mcpruntime.org", "letsencrypt-prod", testAnalyticsNS) + mustContain := []string{ + "traefik.ingress.kubernetes.io/router.entrypoints: websecure", + "cert-manager.io/cluster-issuer: letsencrypt-prod", + "tls:", + `- "platform.mcpruntime.org"`, + "secretName: " + PlatformTLSSecretName, + `- host: "platform.mcpruntime.org"`, + } + for _, want := range mustContain { + if !strings.Contains(got, want) { + t.Fatalf("missing %q in manifest:\n%s", want, got) + } + } + if strings.Contains(got, "\n traefik.ingress.kubernetes.io/router.entrypoints: web\n") { + t.Fatalf("did not expect plain web entrypoint when TLS issuer is set:\n%s", got) + } +} diff --git a/internal/cli/setup_plan.go b/internal/cli/setup/plan/plan.go similarity index 78% rename from internal/cli/setup_plan.go rename to internal/cli/setup/plan/plan.go index 7770867..96c4375 100644 --- a/internal/cli/setup_plan.go +++ b/internal/cli/setup/plan/plan.go @@ -1,16 +1,15 @@ -package cli +// Package plan contains pure setup planning types and default resolution. +package plan -// This file defines the setup planning types and logic. -// SetupPlanInput captures raw CLI inputs, and BuildSetupPlan resolves them into a concrete SetupPlan -// that determines which manifests and configurations to use during setup. +import "mcp-runtime/internal/cli/cluster" const ( StorageModeDynamic = "dynamic" StorageModeHostpath = "hostpath" ) -// SetupPlanInput captures the raw CLI inputs for setup. -type SetupPlanInput struct { +// Input captures the raw CLI inputs for setup. +type Input struct { Kubeconfig string Context string RegistryType string @@ -33,14 +32,14 @@ type SetupPlanInput struct { InstallCertManager bool } -// SetupPlan captures the resolved setup decisions. -type SetupPlan struct { +// Plan captures the resolved setup decisions. +type Plan struct { Kubeconfig string Context string RegistryType string RegistryStorageSize string StorageMode string - Ingress ingressOptions + Ingress cluster.IngressOptions RegistryManifest string TLSEnabled bool TestMode bool @@ -53,8 +52,8 @@ type SetupPlan struct { InstallCertManager bool } -// BuildSetupPlan resolves CLI inputs into a concrete setup plan. -func BuildSetupPlan(input SetupPlanInput) SetupPlan { +// Build resolves CLI inputs into a concrete setup plan. +func Build(input Input) Plan { if input.StorageMode == "" { input.StorageMode = StorageModeDynamic } @@ -79,16 +78,16 @@ func BuildSetupPlan(input SetupPlanInput) SetupPlan { registryManifest = "config/registry/overlays/tls" } - return SetupPlan{ + return Plan{ Kubeconfig: input.Kubeconfig, Context: input.Context, RegistryType: input.RegistryType, RegistryStorageSize: input.RegistryStorageSize, StorageMode: input.StorageMode, - Ingress: ingressOptions{ - mode: input.IngressMode, - manifest: manifestPath, - force: input.ForceIngressInstall, + Ingress: cluster.IngressOptions{ + Mode: input.IngressMode, + Manifest: manifestPath, + Force: input.ForceIngressInstall, }, RegistryManifest: registryManifest, TLSEnabled: input.TLSEnabled, diff --git a/internal/cli/setup_plan_test.go b/internal/cli/setup/plan_flow_test.go similarity index 76% rename from internal/cli/setup_plan_test.go rename to internal/cli/setup/plan_flow_test.go index 1349154..d44530a 100644 --- a/internal/cli/setup_plan_test.go +++ b/internal/cli/setup/plan_flow_test.go @@ -1,4 +1,4 @@ -package cli +package setup import ( "fmt" @@ -7,10 +7,15 @@ import ( "time" "go.uber.org/zap" + + "mcp-runtime/internal/cli/cluster" + "mcp-runtime/internal/cli/core" + "mcp-runtime/internal/cli/registry/config" + setupplan "mcp-runtime/internal/cli/setup/plan" ) func TestBuildSetupPlan_DefaultHTTP(t *testing.T) { - plan := BuildSetupPlan(SetupPlanInput{ + plan := setupplan.Build(setupplan.Input{ Kubeconfig: "/tmp/kubeconfig", Context: "my-context", RegistryType: "docker", @@ -23,8 +28,8 @@ func TestBuildSetupPlan_DefaultHTTP(t *testing.T) { TLSEnabled: false, }) - if plan.Ingress.manifest != "config/ingress/overlays/http" { - t.Fatalf("expected http ingress manifest, got %q", plan.Ingress.manifest) + if plan.Ingress.Manifest != "config/ingress/overlays/http" { + t.Fatalf("expected http ingress manifest, got %q", plan.Ingress.Manifest) } if plan.Kubeconfig != "/tmp/kubeconfig" { t.Fatalf("expected kubeconfig to be preserved, got %q", plan.Kubeconfig) @@ -38,7 +43,7 @@ func TestBuildSetupPlan_DefaultHTTP(t *testing.T) { } func TestBuildSetupPlan_DefaultTLS(t *testing.T) { - plan := BuildSetupPlan(SetupPlanInput{ + plan := setupplan.Build(setupplan.Input{ RegistryType: "docker", RegistryStorageSize: "20Gi", StorageMode: "dynamic", @@ -49,8 +54,8 @@ func TestBuildSetupPlan_DefaultTLS(t *testing.T) { TLSEnabled: true, }) - if plan.Ingress.manifest != "config/ingress/overlays/prod" { - t.Fatalf("expected tls ingress manifest, got %q", plan.Ingress.manifest) + if plan.Ingress.Manifest != "config/ingress/overlays/prod" { + t.Fatalf("expected tls ingress manifest, got %q", plan.Ingress.Manifest) } if plan.RegistryManifest != "config/registry/overlays/tls" { t.Fatalf("expected tls registry manifest, got %q", plan.RegistryManifest) @@ -58,7 +63,7 @@ func TestBuildSetupPlan_DefaultTLS(t *testing.T) { } func TestBuildSetupPlan_TLSClusterIssuer(t *testing.T) { - plan := BuildSetupPlan(SetupPlanInput{ + plan := setupplan.Build(setupplan.Input{ TLSEnabled: true, TLSClusterIssuer: "company-ca", }) @@ -68,7 +73,7 @@ func TestBuildSetupPlan_TLSClusterIssuer(t *testing.T) { } func TestBuildSetupPlan_CustomIngressManifest(t *testing.T) { - plan := BuildSetupPlan(SetupPlanInput{ + plan := setupplan.Build(setupplan.Input{ RegistryType: "docker", RegistryStorageSize: "20Gi", StorageMode: "dynamic", @@ -79,8 +84,8 @@ func TestBuildSetupPlan_CustomIngressManifest(t *testing.T) { TLSEnabled: true, }) - if plan.Ingress.manifest != "custom/manifest" { - t.Fatalf("expected custom ingress manifest, got %q", plan.Ingress.manifest) + if plan.Ingress.Manifest != "custom/manifest" { + t.Fatalf("expected custom ingress manifest, got %q", plan.Ingress.Manifest) } if plan.RegistryManifest != "config/registry/overlays/tls" { t.Fatalf("expected tls registry manifest, got %q", plan.RegistryManifest) @@ -89,7 +94,7 @@ func TestBuildSetupPlan_CustomIngressManifest(t *testing.T) { func TestBuildSetupPlan_PreservesTestModeAndOperatorArgs(t *testing.T) { operatorArgs := []string{"--metrics-bind-address=:9090", "--leader-elect=false"} - plan := BuildSetupPlan(SetupPlanInput{ + plan := setupplan.Build(setupplan.Input{ RegistryType: "docker", RegistryStorageSize: "20Gi", StorageMode: "dynamic", @@ -120,10 +125,10 @@ func TestBuildSetupPlan_PreservesTestModeAndOperatorArgs(t *testing.T) { } func TestBuildSetupPlan_HostpathRegistryManifest(t *testing.T) { - plan := BuildSetupPlan(SetupPlanInput{ + plan := setupplan.Build(setupplan.Input{ RegistryType: "docker", RegistryStorageSize: "20Gi", - StorageMode: StorageModeHostpath, + StorageMode: setupplan.StorageModeHostpath, IngressMode: "traefik", IngressManifest: "config/ingress/overlays/http", IngressManifestChanged: false, @@ -137,10 +142,10 @@ func TestBuildSetupPlan_HostpathRegistryManifest(t *testing.T) { } func TestBuildSetupPlan_HostpathRegistryManifest_TLS(t *testing.T) { - plan := BuildSetupPlan(SetupPlanInput{ + plan := setupplan.Build(setupplan.Input{ RegistryType: "docker", RegistryStorageSize: "20Gi", - StorageMode: StorageModeHostpath, + StorageMode: setupplan.StorageModeHostpath, IngressMode: "traefik", IngressManifest: "config/ingress/overlays/http", IngressManifestChanged: false, @@ -155,8 +160,8 @@ func TestBuildSetupPlan_HostpathRegistryManifest_TLS(t *testing.T) { func TestValidateNonTestSetupAllowsLenientDefaultMode(t *testing.T) { err := validateNonTestSetup( - SetupPlan{TLSEnabled: false, TestMode: false}, - &ExternalRegistryConfig{URL: "registry.example.com"}, + setupplan.Plan{TLSEnabled: false, TestMode: false}, + &config.ExternalRegistryConfig{URL: "registry.example.com"}, true, ) if err != nil { @@ -165,12 +170,12 @@ func TestValidateNonTestSetupAllowsLenientDefaultMode(t *testing.T) { } func TestValidateNonTestSetupAllowsStableInternalRegistry(t *testing.T) { - orig := DefaultCLIConfig - t.Cleanup(func() { DefaultCLIConfig = orig }) - DefaultCLIConfig = &CLIConfig{RegistryEndpoint: "registry.prod.example.com", RegistryIngressHost: "registry.prod.example.com"} + orig := core.DefaultCLIConfig + t.Cleanup(func() { core.DefaultCLIConfig = orig }) + core.DefaultCLIConfig = &core.CLIConfig{RegistryEndpoint: "registry.prod.example.com", RegistryIngressHost: "registry.prod.example.com"} err := validateNonTestSetup( - SetupPlan{TLSEnabled: true, TestMode: false}, + setupplan.Plan{TLSEnabled: true, TestMode: false}, nil, false, ) @@ -181,8 +186,8 @@ func TestValidateNonTestSetupAllowsStableInternalRegistry(t *testing.T) { func TestValidateNonTestSetupAllowsDevRegistryURLByDefault(t *testing.T) { err := validateNonTestSetup( - SetupPlan{TLSEnabled: true, TestMode: false}, - &ExternalRegistryConfig{URL: "registry.local"}, + setupplan.Plan{TLSEnabled: true, TestMode: false}, + &config.ExternalRegistryConfig{URL: "registry.local"}, true, ) if err != nil { @@ -191,12 +196,12 @@ func TestValidateNonTestSetupAllowsDevRegistryURLByDefault(t *testing.T) { } func TestValidateNonTestSetupAllowsDevInternalRegistryEndpointByDefault(t *testing.T) { - orig := DefaultCLIConfig - t.Cleanup(func() { DefaultCLIConfig = orig }) - DefaultCLIConfig = &CLIConfig{RegistryEndpoint: "10.43.39.164:5000", RegistryIngressHost: "registry.local"} + orig := core.DefaultCLIConfig + t.Cleanup(func() { core.DefaultCLIConfig = orig }) + core.DefaultCLIConfig = &core.CLIConfig{RegistryEndpoint: "10.43.39.164:5000", RegistryIngressHost: "registry.local"} err := validateNonTestSetup( - SetupPlan{TLSEnabled: true, TestMode: false}, + setupplan.Plan{TLSEnabled: true, TestMode: false}, nil, false, ) @@ -207,8 +212,8 @@ func TestValidateNonTestSetupAllowsDevInternalRegistryEndpointByDefault(t *testi func TestValidateNonTestSetupRejectsMissingTLSInStrictProd(t *testing.T) { err := validateNonTestSetup( - SetupPlan{TLSEnabled: false, TestMode: false, StrictProd: true}, - &ExternalRegistryConfig{URL: "registry.example.com"}, + setupplan.Plan{TLSEnabled: false, TestMode: false, StrictProd: true}, + &config.ExternalRegistryConfig{URL: "registry.example.com"}, true, ) if err == nil || !strings.Contains(err.Error(), "--with-tls") { @@ -218,8 +223,8 @@ func TestValidateNonTestSetupRejectsMissingTLSInStrictProd(t *testing.T) { func TestValidateNonTestSetupRejectsDevRegistryURLInStrictProd(t *testing.T) { err := validateNonTestSetup( - SetupPlan{TLSEnabled: true, TestMode: false, StrictProd: true}, - &ExternalRegistryConfig{URL: "registry.local"}, + setupplan.Plan{TLSEnabled: true, TestMode: false, StrictProd: true}, + &config.ExternalRegistryConfig{URL: "registry.local"}, true, ) if err == nil || !strings.Contains(err.Error(), "dev-only registry URL") { @@ -228,12 +233,12 @@ func TestValidateNonTestSetupRejectsDevRegistryURLInStrictProd(t *testing.T) { } func TestValidateNonTestSetupRejectsDevInternalRegistryEndpointInStrictProd(t *testing.T) { - orig := DefaultCLIConfig - t.Cleanup(func() { DefaultCLIConfig = orig }) - DefaultCLIConfig = &CLIConfig{RegistryEndpoint: "10.43.39.164:5000", RegistryIngressHost: "registry.local"} + orig := core.DefaultCLIConfig + t.Cleanup(func() { core.DefaultCLIConfig = orig }) + core.DefaultCLIConfig = &core.CLIConfig{RegistryEndpoint: "10.43.39.164:5000", RegistryIngressHost: "registry.local"} err := validateNonTestSetup( - SetupPlan{TLSEnabled: true, TestMode: false, StrictProd: true}, + setupplan.Plan{TLSEnabled: true, TestMode: false, StrictProd: true}, nil, false, ) @@ -282,7 +287,7 @@ func (f *fakeClusterManager) InitCluster(_, _ string) error { return nil } -func (f *fakeClusterManager) ConfigureCluster(ingressOptions) error { +func (f *fakeClusterManager) ConfigureCluster(cluster.IngressOptions) error { f.rec.add("cluster-config") return nil } @@ -304,8 +309,8 @@ func (f *fakeRegistryManager) PushInCluster(_, _, _ string) error { func TestSetupPlatformWithDeps_ExternalRegistry(t *testing.T) { rec := &callRecorder{} deps := SetupDeps{ - ResolveExternalRegistryConfig: func(*ExternalRegistryConfig) (*ExternalRegistryConfig, error) { - return &ExternalRegistryConfig{ + ResolveExternalRegistryConfig: func(*config.ExternalRegistryConfig) (*config.ExternalRegistryConfig, error) { + return &config.ExternalRegistryConfig{ URL: "registry.example.com", Username: "user", Password: "pass", @@ -317,13 +322,13 @@ func TestSetupPlatformWithDeps_ExternalRegistry(t *testing.T) { DeployRegistry: func(*zap.Logger, string, int, string, string, string) error { rec.add("deploy-registry"); return nil }, WaitForDeploymentAvailable: func(_ *zap.Logger, name, _, _ string, _ time.Duration) error { rec.addWait(name); return nil }, PrintDeploymentDiagnostics: func(string, string, string) { rec.add("diagnostics") }, - SetupTLS: func(*zap.Logger, SetupPlan) error { rec.add("tls"); return nil }, + SetupTLS: func(*zap.Logger, setupplan.Plan) error { rec.add("tls"); return nil }, BuildOperatorImage: func(string) error { rec.add("build"); return nil }, PushOperatorImage: func(string) error { rec.add("push"); return nil }, BuildGatewayProxyImage: func(string) error { rec.add("build-gateway"); return nil }, PushGatewayProxyImage: func(string) error { rec.add("push-gateway"); return nil }, EnsureNamespace: func(string) error { rec.add("ensure-ns"); return nil }, - GetPlatformRegistryURL: func(*zap.Logger) string { return "registry.local" }, + ResolvePlatformRegistryURL: func(*zap.Logger) string { return "registry.local" }, PushOperatorImageToInternal: func(*zap.Logger, string, string, string) error { rec.add("push-internal"); return nil }, PushGatewayProxyImageToInternal: func(*zap.Logger, string, string, string) error { rec.add("push-gateway-internal") @@ -333,7 +338,7 @@ func TestSetupPlatformWithDeps_ExternalRegistry(t *testing.T) { rec.add("deploy-operator") return nil }, - ConfigureProvisionedRegistryEnv: func(*ExternalRegistryConfig, string) error { + ConfigureProvisionedRegistryEnv: func(*config.ExternalRegistryConfig, string) error { rec.add("configure-env") return nil }, @@ -341,23 +346,23 @@ func TestSetupPlatformWithDeps_ExternalRegistry(t *testing.T) { CheckCRDInstalled: func(string) error { rec.add("check-crd"); return nil }, GetDeploymentTimeout: func() time.Duration { return time.Second }, GetRegistryPort: func() int { return 5000 }, - OperatorImageFor: func(*ExternalRegistryConfig) string { + OperatorImageFor: func(*config.ExternalRegistryConfig) string { rec.add("operator-image") return "registry.example.com/mcp-runtime-operator:latest" }, - GatewayProxyImageFor: func(*ExternalRegistryConfig) string { + GatewayProxyImageFor: func(*config.ExternalRegistryConfig) string { rec.add("gateway-image") return "registry.example.com/mcp-sentinel-mcp-proxy:latest" }, } - plan := SetupPlan{ + plan := setupplan.Plan{ RegistryType: "docker", RegistryStorageSize: "20Gi", - Ingress: ingressOptions{ - mode: "traefik", - manifest: "config/ingress/overlays/http", - force: false, + Ingress: cluster.IngressOptions{ + Mode: "traefik", + Manifest: "config/ingress/overlays/http", + Force: false, }, RegistryManifest: "config/registry", TLSEnabled: true, @@ -387,7 +392,7 @@ func TestSetupPlatformWithDeps_ExternalRegistry(t *testing.T) { func TestSetupPlatformWithDeps_InternalRegistryTLS(t *testing.T) { rec := &callRecorder{} deps := SetupDeps{ - ResolveExternalRegistryConfig: func(*ExternalRegistryConfig) (*ExternalRegistryConfig, error) { + ResolveExternalRegistryConfig: func(*config.ExternalRegistryConfig) (*config.ExternalRegistryConfig, error) { return nil, nil }, ClusterManager: &fakeClusterManager{rec: rec}, @@ -399,13 +404,13 @@ func TestSetupPlatformWithDeps_InternalRegistryTLS(t *testing.T) { DeployRegistry: func(*zap.Logger, string, int, string, string, string) error { rec.add("deploy-registry"); return nil }, WaitForDeploymentAvailable: func(_ *zap.Logger, name, _, _ string, _ time.Duration) error { rec.addWait(name); return nil }, PrintDeploymentDiagnostics: func(string, string, string) { rec.add("diagnostics") }, - SetupTLS: func(*zap.Logger, SetupPlan) error { rec.add("tls"); return nil }, + SetupTLS: func(*zap.Logger, setupplan.Plan) error { rec.add("tls"); return nil }, BuildOperatorImage: func(string) error { rec.add("build"); return nil }, PushOperatorImage: func(string) error { rec.add("push"); return nil }, BuildGatewayProxyImage: func(string) error { rec.add("build-gateway"); return nil }, PushGatewayProxyImage: func(string) error { rec.add("push-gateway"); return nil }, EnsureNamespace: func(string) error { rec.add("ensure-ns"); return nil }, - GetPlatformRegistryURL: func(*zap.Logger) string { return "registry.local" }, + ResolvePlatformRegistryURL: func(*zap.Logger) string { return "registry.local" }, PushOperatorImageToInternal: func(*zap.Logger, string, string, string) error { rec.add("push-internal") return nil @@ -415,7 +420,7 @@ func TestSetupPlatformWithDeps_InternalRegistryTLS(t *testing.T) { return nil }, DeployOperatorManifests: func(*zap.Logger, string, string, []string) error { rec.add("deploy-operator"); return nil }, - ConfigureProvisionedRegistryEnv: func(*ExternalRegistryConfig, string) error { + ConfigureProvisionedRegistryEnv: func(*config.ExternalRegistryConfig, string) error { rec.add("configure-env") return nil }, @@ -423,23 +428,23 @@ func TestSetupPlatformWithDeps_InternalRegistryTLS(t *testing.T) { CheckCRDInstalled: func(string) error { rec.add("check-crd"); return nil }, GetDeploymentTimeout: func() time.Duration { return time.Second }, GetRegistryPort: func() int { return 5000 }, - OperatorImageFor: func(*ExternalRegistryConfig) string { + OperatorImageFor: func(*config.ExternalRegistryConfig) string { rec.add("operator-image") return "registry.local/mcp-runtime-operator:latest" }, - GatewayProxyImageFor: func(*ExternalRegistryConfig) string { + GatewayProxyImageFor: func(*config.ExternalRegistryConfig) string { rec.add("gateway-image") return "registry.local/mcp-sentinel-mcp-proxy:latest" }, } - plan := SetupPlan{ + plan := setupplan.Plan{ RegistryType: "docker", RegistryStorageSize: "20Gi", - Ingress: ingressOptions{ - mode: "traefik", - manifest: "config/ingress/overlays/prod", - force: false, + Ingress: cluster.IngressOptions{ + Mode: "traefik", + Manifest: "config/ingress/overlays/prod", + Force: false, }, RegistryManifest: "config/registry/overlays/tls", TLSEnabled: true, @@ -473,8 +478,8 @@ func TestSetupPlatformWithDeps_InternalRegistryTLS(t *testing.T) { func TestSetupPlatformWithDeps_ExternalRegistryTLS(t *testing.T) { rec := &callRecorder{} deps := SetupDeps{ - ResolveExternalRegistryConfig: func(*ExternalRegistryConfig) (*ExternalRegistryConfig, error) { - return &ExternalRegistryConfig{ + ResolveExternalRegistryConfig: func(*config.ExternalRegistryConfig) (*config.ExternalRegistryConfig, error) { + return &config.ExternalRegistryConfig{ URL: "registry.example.com", Username: "user", Password: "pass", @@ -489,13 +494,13 @@ func TestSetupPlatformWithDeps_ExternalRegistryTLS(t *testing.T) { DeployRegistry: func(*zap.Logger, string, int, string, string, string) error { rec.add("deploy-registry"); return nil }, WaitForDeploymentAvailable: func(_ *zap.Logger, name, _, _ string, _ time.Duration) error { rec.addWait(name); return nil }, PrintDeploymentDiagnostics: func(string, string, string) { rec.add("diagnostics") }, - SetupTLS: func(*zap.Logger, SetupPlan) error { rec.add("tls"); return nil }, + SetupTLS: func(*zap.Logger, setupplan.Plan) error { rec.add("tls"); return nil }, BuildOperatorImage: func(string) error { rec.add("build"); return nil }, PushOperatorImage: func(string) error { rec.add("push"); return nil }, BuildGatewayProxyImage: func(string) error { rec.add("build-gateway"); return nil }, PushGatewayProxyImage: func(string) error { rec.add("push-gateway"); return nil }, EnsureNamespace: func(string) error { rec.add("ensure-ns"); return nil }, - GetPlatformRegistryURL: func(*zap.Logger) string { return "registry.local" }, + ResolvePlatformRegistryURL: func(*zap.Logger) string { return "registry.local" }, PushOperatorImageToInternal: func(*zap.Logger, string, string, string) error { rec.add("push-internal") return nil @@ -505,7 +510,7 @@ func TestSetupPlatformWithDeps_ExternalRegistryTLS(t *testing.T) { return nil }, DeployOperatorManifests: func(*zap.Logger, string, string, []string) error { rec.add("deploy-operator"); return nil }, - ConfigureProvisionedRegistryEnv: func(*ExternalRegistryConfig, string) error { + ConfigureProvisionedRegistryEnv: func(*config.ExternalRegistryConfig, string) error { rec.add("configure-env") return nil }, @@ -513,23 +518,23 @@ func TestSetupPlatformWithDeps_ExternalRegistryTLS(t *testing.T) { CheckCRDInstalled: func(string) error { rec.add("check-crd"); return nil }, GetDeploymentTimeout: func() time.Duration { return time.Second }, GetRegistryPort: func() int { return 5000 }, - OperatorImageFor: func(*ExternalRegistryConfig) string { + OperatorImageFor: func(*config.ExternalRegistryConfig) string { rec.add("operator-image") return "registry.example.com/mcp-runtime-operator:latest" }, - GatewayProxyImageFor: func(*ExternalRegistryConfig) string { + GatewayProxyImageFor: func(*config.ExternalRegistryConfig) string { rec.add("gateway-image") return "registry.example.com/mcp-sentinel-mcp-proxy:latest" }, } - plan := SetupPlan{ + plan := setupplan.Plan{ RegistryType: "docker", RegistryStorageSize: "20Gi", - Ingress: ingressOptions{ - mode: "traefik", - manifest: "config/ingress/overlays/prod", - force: false, + Ingress: cluster.IngressOptions{ + Mode: "traefik", + Manifest: "config/ingress/overlays/prod", + Force: false, }, RegistryManifest: "config/registry/overlays/tls", TLSEnabled: true, @@ -559,7 +564,7 @@ func TestSetupPlatformWithDeps_ExternalRegistryTLS(t *testing.T) { func TestSetupPlatformWithDeps_DiagnosticsOnRegistryWaitFailure(t *testing.T) { rec := &callRecorder{} deps := SetupDeps{ - ResolveExternalRegistryConfig: func(*ExternalRegistryConfig) (*ExternalRegistryConfig, error) { + ResolveExternalRegistryConfig: func(*config.ExternalRegistryConfig) (*config.ExternalRegistryConfig, error) { return nil, nil }, ClusterManager: &fakeClusterManager{rec: rec}, @@ -580,34 +585,38 @@ func TestSetupPlatformWithDeps_DiagnosticsOnRegistryWaitFailure(t *testing.T) { return nil }, PrintDeploymentDiagnostics: func(string, string, string) { rec.add("diagnostics") }, - SetupTLS: func(*zap.Logger, SetupPlan) error { return nil }, + SetupTLS: func(*zap.Logger, setupplan.Plan) error { return nil }, BuildOperatorImage: func(string) error { return nil }, PushOperatorImage: func(string) error { return nil }, BuildGatewayProxyImage: func(string) error { return nil }, PushGatewayProxyImage: func(string) error { return nil }, EnsureNamespace: func(string) error { return nil }, - GetPlatformRegistryURL: func(*zap.Logger) string { return "registry.local" }, + ResolvePlatformRegistryURL: func(*zap.Logger) string { return "registry.local" }, PushOperatorImageToInternal: func(*zap.Logger, string, string, string) error { return nil }, PushGatewayProxyImageToInternal: func(*zap.Logger, string, string, string) error { return nil }, DeployOperatorManifests: func(*zap.Logger, string, string, []string) error { return nil }, - ConfigureProvisionedRegistryEnv: func(*ExternalRegistryConfig, string) error { return nil }, + ConfigureProvisionedRegistryEnv: func(*config.ExternalRegistryConfig, string) error { return nil }, RestartDeployment: func(string, string) error { return nil }, CheckCRDInstalled: func(string) error { return nil }, GetDeploymentTimeout: func() time.Duration { return time.Second }, GetRegistryPort: func() int { return 5000 }, - OperatorImageFor: func(*ExternalRegistryConfig) string { return "registry.local/mcp-runtime-operator:latest" }, - GatewayProxyImageFor: func(*ExternalRegistryConfig) string { return "registry.local/mcp-sentinel-mcp-proxy:latest" }, + OperatorImageFor: func(*config.ExternalRegistryConfig) string { + return "registry.local/mcp-runtime-operator:latest" + }, + GatewayProxyImageFor: func(*config.ExternalRegistryConfig) string { + return "registry.local/mcp-sentinel-mcp-proxy:latest" + }, } - plan := SetupPlan{ + plan := setupplan.Plan{ RegistryType: "docker", RegistryStorageSize: "20Gi", - Ingress: ingressOptions{ - mode: "traefik", - manifest: "config/ingress/overlays/http", - force: false, + Ingress: cluster.IngressOptions{ + Mode: "traefik", + Manifest: "config/ingress/overlays/http", + Force: false, }, RegistryManifest: "config/registry", TLSEnabled: true, @@ -625,8 +634,8 @@ func TestSetupPlatformWithDeps_DiagnosticsOnRegistryWaitFailure(t *testing.T) { func TestSetupPlatformWithDeps_DiagnosticsOnOperatorWaitFailure(t *testing.T) { rec := &callRecorder{} deps := SetupDeps{ - ResolveExternalRegistryConfig: func(*ExternalRegistryConfig) (*ExternalRegistryConfig, error) { - return &ExternalRegistryConfig{URL: "registry.example.com"}, nil + ResolveExternalRegistryConfig: func(*config.ExternalRegistryConfig) (*config.ExternalRegistryConfig, error) { + return &config.ExternalRegistryConfig{URL: "registry.example.com"}, nil }, ClusterManager: &fakeClusterManager{rec: rec}, RegistryManager: &fakeRegistryManager{rec: rec}, @@ -640,34 +649,38 @@ func TestSetupPlatformWithDeps_DiagnosticsOnOperatorWaitFailure(t *testing.T) { return nil }, PrintDeploymentDiagnostics: func(string, string, string) { rec.add("diagnostics") }, - SetupTLS: func(*zap.Logger, SetupPlan) error { return nil }, + SetupTLS: func(*zap.Logger, setupplan.Plan) error { return nil }, BuildOperatorImage: func(string) error { return nil }, PushOperatorImage: func(string) error { return nil }, BuildGatewayProxyImage: func(string) error { return nil }, PushGatewayProxyImage: func(string) error { return nil }, EnsureNamespace: func(string) error { return nil }, - GetPlatformRegistryURL: func(*zap.Logger) string { return "registry.local" }, + ResolvePlatformRegistryURL: func(*zap.Logger) string { return "registry.local" }, PushOperatorImageToInternal: func(*zap.Logger, string, string, string) error { return nil }, PushGatewayProxyImageToInternal: func(*zap.Logger, string, string, string) error { return nil }, DeployOperatorManifests: func(*zap.Logger, string, string, []string) error { return nil }, - ConfigureProvisionedRegistryEnv: func(*ExternalRegistryConfig, string) error { return nil }, + ConfigureProvisionedRegistryEnv: func(*config.ExternalRegistryConfig, string) error { return nil }, RestartDeployment: func(string, string) error { return nil }, CheckCRDInstalled: func(string) error { return nil }, GetDeploymentTimeout: func() time.Duration { return time.Second }, GetRegistryPort: func() int { return 5000 }, - OperatorImageFor: func(*ExternalRegistryConfig) string { return "registry.example.com/mcp-runtime-operator:latest" }, - GatewayProxyImageFor: func(*ExternalRegistryConfig) string { return "registry.example.com/mcp-sentinel-mcp-proxy:latest" }, + OperatorImageFor: func(*config.ExternalRegistryConfig) string { + return "registry.example.com/mcp-runtime-operator:latest" + }, + GatewayProxyImageFor: func(*config.ExternalRegistryConfig) string { + return "registry.example.com/mcp-sentinel-mcp-proxy:latest" + }, } - plan := SetupPlan{ + plan := setupplan.Plan{ RegistryType: "docker", RegistryStorageSize: "20Gi", - Ingress: ingressOptions{ - mode: "traefik", - manifest: "config/ingress/overlays/http", - force: false, + Ingress: cluster.IngressOptions{ + Mode: "traefik", + Manifest: "config/ingress/overlays/http", + Force: false, }, RegistryManifest: "config/registry/overlays/tls", TLSEnabled: true, @@ -687,8 +700,8 @@ func TestSetupPlatformWithDeps_DiagnosticsOnOperatorWaitFailure(t *testing.T) { func TestSetupPlatformWithDeps_CRDCheckFailure(t *testing.T) { rec := &callRecorder{} deps := SetupDeps{ - ResolveExternalRegistryConfig: func(*ExternalRegistryConfig) (*ExternalRegistryConfig, error) { - return &ExternalRegistryConfig{URL: "registry.example.com"}, nil + ResolveExternalRegistryConfig: func(*config.ExternalRegistryConfig) (*config.ExternalRegistryConfig, error) { + return &config.ExternalRegistryConfig{URL: "registry.example.com"}, nil }, ClusterManager: &fakeClusterManager{rec: rec}, RegistryManager: &fakeRegistryManager{rec: rec}, @@ -699,36 +712,40 @@ func TestSetupPlatformWithDeps_CRDCheckFailure(t *testing.T) { return nil }, PrintDeploymentDiagnostics: func(string, string, string) { rec.add("diagnostics") }, - SetupTLS: func(*zap.Logger, SetupPlan) error { return nil }, + SetupTLS: func(*zap.Logger, setupplan.Plan) error { return nil }, BuildOperatorImage: func(string) error { return nil }, PushOperatorImage: func(string) error { return nil }, BuildGatewayProxyImage: func(string) error { return nil }, PushGatewayProxyImage: func(string) error { return nil }, EnsureNamespace: func(string) error { return nil }, - GetPlatformRegistryURL: func(*zap.Logger) string { return "registry.local" }, + ResolvePlatformRegistryURL: func(*zap.Logger) string { return "registry.local" }, PushOperatorImageToInternal: func(*zap.Logger, string, string, string) error { return nil }, PushGatewayProxyImageToInternal: func(*zap.Logger, string, string, string) error { return nil }, DeployOperatorManifests: func(*zap.Logger, string, string, []string) error { return nil }, - ConfigureProvisionedRegistryEnv: func(*ExternalRegistryConfig, string) error { return nil }, + ConfigureProvisionedRegistryEnv: func(*config.ExternalRegistryConfig, string) error { return nil }, RestartDeployment: func(string, string) error { return nil }, CheckCRDInstalled: func(string) error { return fmt.Errorf("crd missing") }, GetDeploymentTimeout: func() time.Duration { return time.Second }, GetRegistryPort: func() int { return 5000 }, - OperatorImageFor: func(*ExternalRegistryConfig) string { return "registry.example.com/mcp-runtime-operator:latest" }, - GatewayProxyImageFor: func(*ExternalRegistryConfig) string { return "registry.example.com/mcp-sentinel-mcp-proxy:latest" }, + OperatorImageFor: func(*config.ExternalRegistryConfig) string { + return "registry.example.com/mcp-runtime-operator:latest" + }, + GatewayProxyImageFor: func(*config.ExternalRegistryConfig) string { + return "registry.example.com/mcp-sentinel-mcp-proxy:latest" + }, } - plan := SetupPlan{ + plan := setupplan.Plan{ RegistryType: "docker", RegistryStorageSize: "20Gi", - Ingress: ingressOptions{ - mode: "traefik", - manifest: "config/ingress/overlays/http", - force: false, + Ingress: cluster.IngressOptions{ + Mode: "traefik", + Manifest: "config/ingress/overlays/http", + Force: false, }, RegistryManifest: "config/registry/overlays/tls", TLSEnabled: true, @@ -748,7 +765,7 @@ func TestSetupPlatformWithDeps_CRDCheckFailure(t *testing.T) { func TestSetupPlatformWithDeps_InternalRegistryPushFailure(t *testing.T) { rec := &callRecorder{} deps := SetupDeps{ - ResolveExternalRegistryConfig: func(*ExternalRegistryConfig) (*ExternalRegistryConfig, error) { + ResolveExternalRegistryConfig: func(*config.ExternalRegistryConfig) (*config.ExternalRegistryConfig, error) { return nil, nil }, ClusterManager: &fakeClusterManager{rec: rec}, @@ -760,13 +777,13 @@ func TestSetupPlatformWithDeps_InternalRegistryPushFailure(t *testing.T) { return nil }, PrintDeploymentDiagnostics: func(string, string, string) { rec.add("diagnostics") }, - SetupTLS: func(*zap.Logger, SetupPlan) error { return nil }, + SetupTLS: func(*zap.Logger, setupplan.Plan) error { return nil }, BuildOperatorImage: func(string) error { rec.add("build"); return nil }, PushOperatorImage: func(string) error { rec.add("push"); return nil }, BuildGatewayProxyImage: func(string) error { rec.add("build-gateway"); return nil }, PushGatewayProxyImage: func(string) error { rec.add("push-gateway"); return nil }, EnsureNamespace: func(string) error { rec.add("ensure-ns"); return nil }, - GetPlatformRegistryURL: func(*zap.Logger) string { return "registry.local" }, + ResolvePlatformRegistryURL: func(*zap.Logger) string { return "registry.local" }, PushOperatorImageToInternal: func(*zap.Logger, string, string, string) error { rec.add("push-internal") return fmt.Errorf("push failed") @@ -776,22 +793,26 @@ func TestSetupPlatformWithDeps_InternalRegistryPushFailure(t *testing.T) { return nil }, DeployOperatorManifests: func(*zap.Logger, string, string, []string) error { rec.add("deploy-operator"); return nil }, - ConfigureProvisionedRegistryEnv: func(*ExternalRegistryConfig, string) error { return nil }, + ConfigureProvisionedRegistryEnv: func(*config.ExternalRegistryConfig, string) error { return nil }, RestartDeployment: func(string, string) error { return nil }, CheckCRDInstalled: func(string) error { return nil }, GetDeploymentTimeout: func() time.Duration { return time.Second }, GetRegistryPort: func() int { return 5000 }, - OperatorImageFor: func(*ExternalRegistryConfig) string { return "registry.local/mcp-runtime-operator:latest" }, - GatewayProxyImageFor: func(*ExternalRegistryConfig) string { return "registry.local/mcp-sentinel-mcp-proxy:latest" }, + OperatorImageFor: func(*config.ExternalRegistryConfig) string { + return "registry.local/mcp-runtime-operator:latest" + }, + GatewayProxyImageFor: func(*config.ExternalRegistryConfig) string { + return "registry.local/mcp-sentinel-mcp-proxy:latest" + }, } - plan := SetupPlan{ + plan := setupplan.Plan{ RegistryType: "docker", RegistryStorageSize: "20Gi", - Ingress: ingressOptions{ - mode: "traefik", - manifest: "config/ingress/overlays/http", - force: false, + Ingress: cluster.IngressOptions{ + Mode: "traefik", + Manifest: "config/ingress/overlays/http", + Force: false, }, RegistryManifest: "config/registry", TLSEnabled: false, diff --git a/internal/cli/setup.go b/internal/cli/setup/platform.go similarity index 56% rename from internal/cli/setup.go rename to internal/cli/setup/platform.go index 9f848f2..eb70951 100644 --- a/internal/cli/setup.go +++ b/internal/cli/setup/platform.go @@ -1,4 +1,4 @@ -package cli +package setup // This file implements the "setup" command for installing and configuring the MCP platform. // It handles cluster initialization, registry deployment, operator installation, and TLS setup. @@ -13,7 +13,6 @@ import ( "errors" "fmt" "io" - "net" "net/url" "os" "strconv" @@ -23,18 +22,27 @@ import ( "go.uber.org/zap" "gopkg.in/yaml.v3" + "mcp-runtime/internal/cli/certmanager" + "mcp-runtime/internal/cli/cluster" + "mcp-runtime/internal/cli/core" + "mcp-runtime/internal/cli/kube" + "mcp-runtime/internal/cli/registry" + "mcp-runtime/internal/cli/registry/config" + "mcp-runtime/internal/cli/registry/ref" + "mcp-runtime/internal/cli/setup/assetpath" + "mcp-runtime/internal/cli/setup/ingressmanifest" + setupplan "mcp-runtime/internal/cli/setup/plan" "mcp-runtime/pkg/manifest" ) const defaultRegistrySecretName = "mcp-runtime-registry-creds" // #nosec G101 -- default secret name, not a credential. const testModeOperatorImage = "docker.io/library/mcp-runtime-operator:latest" const defaultGatewayProxyRepository = "mcp-sentinel-mcp-proxy" -const defaultAnalyticsNamespace = "mcp-sentinel" const defaultAnalyticsIngestURL = "http://mcp-sentinel-ingest.mcp-sentinel.svc.cluster.local:8081/events" const gatewayProxyDockerfilePath = "services/mcp-proxy/Dockerfile" const gatewayProxyBuildContext = "." -var setupImageTagResolver = getGitTag +var setupImageTagResolver = registry.DefaultGitTag type analyticsComponent struct { Name string @@ -89,7 +97,7 @@ var analyticsComponents = []analyticsComponent{ type ClusterManagerAPI interface { InitCluster(kubeconfig, context string) error - ConfigureCluster(opts ingressOptions) error + ConfigureCluster(opts cluster.IngressOptions) error } type RegistryManagerAPI interface { @@ -98,14 +106,14 @@ type RegistryManagerAPI interface { } type SetupDeps struct { - ResolveExternalRegistryConfig func(*ExternalRegistryConfig) (*ExternalRegistryConfig, error) + ResolveExternalRegistryConfig func(*config.ExternalRegistryConfig) (*config.ExternalRegistryConfig, error) ClusterManager ClusterManagerAPI RegistryManager RegistryManagerAPI LoginRegistry func(logger *zap.Logger, registryURL, username, password string) error DeployRegistry func(logger *zap.Logger, namespace string, port int, registryType, registryStorageSize, manifestPath string) error WaitForDeploymentAvailable func(logger *zap.Logger, name, namespace, selector string, timeout time.Duration) error PrintDeploymentDiagnostics func(deploy, namespace, selector string) - SetupTLS func(logger *zap.Logger, plan SetupPlan) error + SetupTLS func(logger *zap.Logger, plan setupplan.Plan) error BuildOperatorImage func(image string) error PushOperatorImage func(image string) error BuildGatewayProxyImage func(image string) error @@ -113,36 +121,38 @@ type SetupDeps struct { BuildAnalyticsImage func(image, dockerfilePath, buildContext string) error PushAnalyticsImage func(image string) error EnsureNamespace func(namespace string) error - GetPlatformRegistryURL func(logger *zap.Logger) string + ResolvePlatformRegistryURL func(logger *zap.Logger) string PushOperatorImageToInternal func(logger *zap.Logger, sourceImage, targetImage, helperNamespace string) error PushGatewayProxyImageToInternal func(logger *zap.Logger, sourceImage, targetImage, helperNamespace string) error PushAnalyticsImageToInternal func(logger *zap.Logger, sourceImage, targetImage, helperNamespace string) error DeployOperatorManifests func(logger *zap.Logger, operatorImage, gatewayProxyImage string, operatorArgs []string) error DeployAnalyticsManifests func(logger *zap.Logger, images AnalyticsImageSet, storageMode string) error - ConfigureProvisionedRegistryEnv func(ext *ExternalRegistryConfig, secretName string) error + ConfigureProvisionedRegistryEnv func(ext *config.ExternalRegistryConfig, secretName string) error RestartDeployment func(name, namespace string) error CheckCRDInstalled func(name string) error GetDeploymentTimeout func() time.Duration GetRegistryPort func() int - OperatorImageFor func(ext *ExternalRegistryConfig) string - GatewayProxyImageFor func(ext *ExternalRegistryConfig) string + OperatorImageFor func(ext *config.ExternalRegistryConfig) string + GatewayProxyImageFor func(ext *config.ExternalRegistryConfig) string } func (d SetupDeps) withDefaults(logger *zap.Logger) SetupDeps { if d.ResolveExternalRegistryConfig == nil { - d.ResolveExternalRegistryConfig = resolveExternalRegistryConfig + d.ResolveExternalRegistryConfig = registry.ResolveExternalRegistryConfig } if d.ClusterManager == nil { - d.ClusterManager = DefaultClusterManager(logger) + panic("cli: SetupDeps.ClusterManager must be set; pass it via SetupPlatform") } if d.RegistryManager == nil { - d.RegistryManager = DefaultRegistryManager(logger) + d.RegistryManager = registry.DefaultRegistryManager(logger) } if d.LoginRegistry == nil { - d.LoginRegistry = loginRegistry + d.LoginRegistry = func(l *zap.Logger, registryURL, username, password string) error { + return registry.DefaultRegistryManager(l).LoginRegistry(registryURL, username, password) + } } if d.DeployRegistry == nil { - d.DeployRegistry = deployRegistry + d.DeployRegistry = registry.DeployRegistry } if d.WaitForDeploymentAvailable == nil { d.WaitForDeploymentAvailable = waitForDeploymentAvailable @@ -151,7 +161,9 @@ func (d SetupDeps) withDefaults(logger *zap.Logger) SetupDeps { d.PrintDeploymentDiagnostics = printDeploymentDiagnostics } if d.SetupTLS == nil { - d.SetupTLS = func(l *zap.Logger, p SetupPlan) error { return setupTLSWithKubectlAndPlan(kubectlClient, l, p) } + d.SetupTLS = func(l *zap.Logger, p setupplan.Plan) error { + return setupTLSWithKubectlAndPlan(core.DefaultKubectlClient(), l, p) + } } if d.BuildOperatorImage == nil { d.BuildOperatorImage = buildOperatorImage @@ -172,10 +184,12 @@ func (d SetupDeps) withDefaults(logger *zap.Logger) SetupDeps { d.PushAnalyticsImage = pushAnalyticsImage } if d.EnsureNamespace == nil { - d.EnsureNamespace = ensureNamespace + d.EnsureNamespace = func(namespace string) error { + return kube.EnsureNamespace(core.DefaultKubectlClient().CommandArgs, namespace) + } } - if d.GetPlatformRegistryURL == nil { - d.GetPlatformRegistryURL = getPlatformRegistryURL + if d.ResolvePlatformRegistryURL == nil { + d.ResolvePlatformRegistryURL = registry.ResolvePlatformRegistryURL } if d.PushOperatorImageToInternal == nil { d.PushOperatorImageToInternal = pushOperatorImageToInternalRegistry @@ -202,10 +216,10 @@ func (d SetupDeps) withDefaults(logger *zap.Logger) SetupDeps { d.CheckCRDInstalled = checkCRDInstalled } if d.GetDeploymentTimeout == nil { - d.GetDeploymentTimeout = GetDeploymentTimeout + d.GetDeploymentTimeout = core.GetDeploymentTimeout } if d.GetRegistryPort == nil { - d.GetRegistryPort = GetRegistryPort + d.GetRegistryPort = core.GetRegistryPort } if d.OperatorImageFor == nil { d.OperatorImageFor = getOperatorImage @@ -224,10 +238,10 @@ func ValidateTLSSetupCLIFlags( acmeStagingResolved, skipCertManagerInstall bool, ) error { if acmeEmailResolved != "" && tlsCIResolved != "" { - return newWithSentinel(ErrFieldRequired, "use either --acme-email (or MCP_ACME_EMAIL) for public Let's Encrypt, or --tls-cluster-issuer (or MCP_TLS_CLUSTER_ISSUER) for an existing internal ClusterIssuer, not both") + return core.NewWithSentinel(core.ErrFieldRequired, "use either --acme-email (or MCP_ACME_EMAIL) for public Let's Encrypt, or --tls-cluster-issuer (or MCP_TLS_CLUSTER_ISSUER) for an existing internal ClusterIssuer, not both") } if !tlsEnabled && (tlsCIResolved != "" || acmeEmailResolved != "" || acmeStagingResolved || skipCertManagerInstall) { - return newWithSentinel(ErrFieldRequired, "--with-tls is required when using --acme-email, --tls-cluster-issuer, --acme-staging, --skip-cert-manager-install, or related environment variables (MCP_ACME_EMAIL, MCP_ACME_STAGING, MCP_TLS_CLUSTER_ISSUER)") + return core.NewWithSentinel(core.ErrFieldRequired, "--with-tls is required when using --acme-email, --tls-cluster-issuer, --acme-staging, --skip-cert-manager-install, or related environment variables (MCP_ACME_EMAIL, MCP_ACME_STAGING, MCP_TLS_CLUSTER_ISSUER)") } return nil } @@ -252,32 +266,24 @@ func BuildOperatorArgs(metricsAddr, probeAddr string, leaderElect, leaderElectCh func ValidateStorageMode(mode string) error { switch mode { - case StorageModeDynamic, StorageModeHostpath: + case setupplan.StorageModeDynamic, setupplan.StorageModeHostpath: return nil default: - return wrapWithSentinel(ErrFieldRequired, fmt.Errorf("invalid storage mode %q", mode), "invalid --storage-mode; expected dynamic or hostpath") + return core.WrapWithSentinel(core.ErrFieldRequired, fmt.Errorf("invalid storage mode %q", mode), "invalid --storage-mode; expected dynamic or hostpath") } } -func SetupPlatform(logger *zap.Logger, plan SetupPlan) error { - return setupPlatformWithDeps(logger, plan, SetupDeps{}.withDefaults(logger)) -} - -func validateTLSSetupCLIFlags( - tlsEnabled bool, - acmeEmailResolved, tlsCIResolved string, - acmeStagingResolved, skipCertManagerInstall bool, -) error { - return ValidateTLSSetupCLIFlags(tlsEnabled, acmeEmailResolved, tlsCIResolved, acmeStagingResolved, skipCertManagerInstall) +func SetupPlatform(logger *zap.Logger, plan setupplan.Plan, clusterMgr ClusterManagerAPI) error { + return setupPlatformWithDeps(logger, plan, SetupDeps{ClusterManager: clusterMgr}.withDefaults(logger)) } func buildOperatorArgs(metricsAddr, probeAddr string, leaderElect, leaderElectChanged bool) []string { return BuildOperatorArgs(metricsAddr, probeAddr, leaderElect, leaderElectChanged) } -func setupPlatformWithDeps(logger *zap.Logger, plan SetupPlan, deps SetupDeps) error { +func setupPlatformWithDeps(logger *zap.Logger, plan setupplan.Plan, deps SetupDeps) error { deps = deps.withDefaults(logger) - Section("MCP Runtime Setup") + core.Section("MCP Runtime Setup") // Propagate test mode to build helpers so they can choose faster/safer build paths. if plan.TestMode { @@ -288,12 +294,12 @@ func setupPlatformWithDeps(logger *zap.Logger, plan SetupPlan, deps SetupDeps) e extRegistry, usingExternalRegistry, registrySecretName := resolveRegistrySetup(logger, deps) if err := validateNonTestSetup(plan, extRegistry, usingExternalRegistry); err != nil { - logStructuredError(logger, err, "Invalid non-test setup configuration") + core.LogStructuredError(logger, err, "Invalid non-test setup configuration") return err } applySetupPlanToCLIConfig(plan) for _, warning := range setupWarnings(plan, extRegistry, usingExternalRegistry) { - Warn(warning) + core.Warn(warning) } ctx := &SetupContext{ Plan: plan, @@ -305,149 +311,12 @@ func setupPlatformWithDeps(logger *zap.Logger, plan SetupPlan, deps SetupDeps) e return err } - Success("Platform setup complete") - fmt.Println(Green("\nPlatform is ready. Use 'mcp-runtime status' to check everything.")) + core.Success("Platform setup complete") + fmt.Println(core.Green("\nPlatform is ready. Use 'mcp-runtime status' to check everything.")) printPlatformEntrypoints(plan.TLSEnabled) return nil } -// printPlatformEntrypoints prints the public URLs derived from -// MCP_PLATFORM_DOMAIN / MCP_*_INGRESS_HOST so the operator knows which -// hostnames must resolve in DNS and what the dashboard URL is. -func printPlatformEntrypoints(tlsEnabled bool) { - scheme := "http://" - if tlsEnabled { - scheme = "https://" - } - registry := strings.TrimSpace(GetRegistryIngressHost()) - mcp := strings.TrimSpace(GetMcpIngressHost()) - platform := strings.TrimSpace(GetPlatformIngressHost()) - if registry == "" && mcp == "" && platform == "" { - return - } - fmt.Println() - fmt.Println("Public entrypoints:") - if platform != "" { - fmt.Printf(" Dashboard: %s%s/\n", scheme, platform) - } - if registry != "" { - fmt.Printf(" Registry: %s%s/v2/\n", scheme, registry) - } - if mcp != "" { - fmt.Printf(" MCP: %s%s//mcp\n", scheme, mcp) - } - if platform != "" { - fmt.Println(" (Make sure DNS A/AAAA records point platform./registry./mcp. at the cluster ingress.)") - } -} - -func resolveRegistrySetup(logger *zap.Logger, deps SetupDeps) (*ExternalRegistryConfig, bool, string) { - extRegistry, err := deps.ResolveExternalRegistryConfig(nil) - if err != nil { - Warn(fmt.Sprintf("Could not load external registry config: %v", err)) - } - usingExternalRegistry := extRegistry != nil - return extRegistry, usingExternalRegistry, defaultRegistrySecretName -} - -func validateNonTestSetup(plan SetupPlan, extRegistry *ExternalRegistryConfig, usingExternalRegistry bool) error { - if plan.TestMode { - return nil - } - if !plan.StrictProd { - return nil - } - if !plan.TLSEnabled { - return newWithSentinel( - ErrSetupStepFailed, - "strict production setup requires --with-tls; use normal setup for local HTTP/internal registry flows", - ) - } - if usingExternalRegistry && extRegistry != nil && strings.TrimSpace(extRegistry.URL) != "" { - if isDevRegistryURL(extRegistry.URL) { - return newWithSentinel( - ErrSetupStepFailed, - fmt.Sprintf("strict production setup requires a stable production registry, got dev-only registry URL %q", extRegistry.URL), - ) - } - return nil - } - if isDevRegistryURL(GetRegistryEndpoint()) { - return newWithSentinel( - ErrSetupStepFailed, - fmt.Sprintf("strict production setup requires a stable internal registry endpoint; set MCP_REGISTRY_ENDPOINT (current %q)", GetRegistryEndpoint()), - ) - } - return nil -} - -func setupWarnings(plan SetupPlan, extRegistry *ExternalRegistryConfig, usingExternalRegistry bool) []string { - if plan.TestMode { - return nil - } - - var warnings []string - if !plan.TLSEnabled { - warnings = append(warnings, "Non-test setup is running without TLS. This is fine for local/internal registries but not recommended for production.") - } - - if usingExternalRegistry && extRegistry != nil && strings.TrimSpace(extRegistry.URL) != "" { - registryURL := strings.TrimSpace(extRegistry.URL) - if strings.HasPrefix(strings.ToLower(registryURL), "http://") { - warnings = append(warnings, fmt.Sprintf("External registry %q is using HTTP. This is acceptable for local environments but not recommended for production.", registryURL)) - } - if isDevRegistryURL(registryURL) { - warnings = append(warnings, fmt.Sprintf("External registry %q looks local/internal. Normal setup allows this, but use --strict-prod to enforce production-style validation.", registryURL)) - } - return warnings - } - - registryEndpoint := strings.TrimSpace(GetRegistryEndpoint()) - if registryEndpoint == "" { - warnings = append(warnings, "Internal registry host is empty; setup will fall back to service DNS. This is fine for local clusters but not recommended for production.") - return warnings - } - if isDevRegistryURL(registryEndpoint) { - warnings = append(warnings, fmt.Sprintf("Internal registry endpoint %q looks local/internal. Normal setup allows this for local clusters, but use --strict-prod to enforce production-style validation.", registryEndpoint)) - } - return warnings -} - -func isDevRegistryURL(raw string) bool { - trimmed := strings.TrimSpace(strings.TrimSuffix(raw, "/")) - if trimmed == "" { - return true - } - if strings.HasPrefix(strings.ToLower(trimmed), "http://") { - return true - } - - host := trimmed - if strings.Contains(trimmed, "://") { - if parsed, err := url.Parse(trimmed); err == nil && parsed.Host != "" { - host = parsed.Host - } - } - if slash := strings.Index(host, "/"); slash >= 0 { - host = host[:slash] - } - if h, _, err := net.SplitHostPort(host); err == nil { - host = h - } else if idx := strings.LastIndex(host, ":"); idx >= 0 && strings.Count(host, ":") == 1 { - host = host[:idx] - } - - host = strings.ToLower(strings.Trim(host, "[]")) - switch host { - case "", "localhost", "registry.local": - return true - } - if strings.HasSuffix(host, ".local") || strings.HasSuffix(host, ".svc.cluster.local") { - return true - } - return net.ParseIP(host) != nil -} - func setupImageTag() string { if os.Getenv("MCP_RUNTIME_TEST_MODE") == "1" { return "latest" @@ -455,74 +324,74 @@ func setupImageTag() string { return setupImageTagResolver() } -func setupClusterSteps(logger *zap.Logger, kubeconfig, context string, ingressOpts ingressOptions, deps SetupDeps) error { +func setupClusterSteps(logger *zap.Logger, kubeconfig, context string, ingressOpts cluster.IngressOptions, deps SetupDeps) error { // Step 1: Initialize cluster - Step("Step 1: Initialize cluster") - Info("Installing CRD") + core.Step("Step 1: Initialize cluster") + core.Info("Installing CRD") if err := deps.ClusterManager.InitCluster(kubeconfig, context); err != nil { - wrappedErr := wrapWithSentinel(ErrClusterInitFailed, err, fmt.Sprintf("failed to initialize cluster: %v", err)) - Error("Cluster initialization failed") - logStructuredError(logger, wrappedErr, "Cluster initialization failed") + wrappedErr := core.WrapWithSentinel(core.ErrClusterInitFailed, err, fmt.Sprintf("failed to initialize cluster: %v", err)) + core.Error("Cluster initialization failed") + core.LogStructuredError(logger, wrappedErr, "Cluster initialization failed") return wrappedErr } - Info("Cluster initialized") + core.Info("Cluster initialized") // Step 2: Configure cluster - Step("Step 2: Configure cluster") - Info("Checking ingress controller") + core.Step("Step 2: Configure cluster") + core.Info("Checking ingress controller") if err := deps.ClusterManager.ConfigureCluster(ingressOpts); err != nil { - wrappedErr := wrapWithSentinel(ErrClusterConfigFailed, err, fmt.Sprintf("cluster configuration failed: %v", err)) - Error("Cluster configuration failed") - logStructuredError(logger, wrappedErr, "Cluster configuration failed") + wrappedErr := core.WrapWithSentinel(core.ErrClusterConfigFailed, err, fmt.Sprintf("cluster configuration failed: %v", err)) + core.Error("Cluster configuration failed") + core.LogStructuredError(logger, wrappedErr, "Cluster configuration failed") return wrappedErr } - Info("Cluster configuration complete") + core.Info("Cluster configuration complete") return nil } -func setupTLSStep(logger *zap.Logger, plan SetupPlan, deps SetupDeps) error { +func setupTLSStep(logger *zap.Logger, plan setupplan.Plan, deps SetupDeps) error { // Step 3: Configure TLS (if enabled) - Step("Step 3: Configure TLS") + core.Step("Step 3: Configure TLS") if !plan.TLSEnabled { - Info("Skipped (TLS disabled, use --with-tls to enable)") + core.Info("Skipped (TLS disabled, use --with-tls to enable)") return nil } if err := deps.SetupTLS(logger, plan); err != nil { - wrappedErr := wrapWithSentinel(ErrTLSSetupFailed, err, fmt.Sprintf("TLS setup failed: %v", err)) - Error("TLS setup failed") - logStructuredError(logger, wrappedErr, "TLS setup failed") + wrappedErr := core.WrapWithSentinel(core.ErrTLSSetupFailed, err, fmt.Sprintf("TLS setup failed: %v", err)) + core.Error("TLS setup failed") + core.LogStructuredError(logger, wrappedErr, "TLS setup failed") return wrappedErr } - Success("TLS configured successfully") + core.Success("TLS configured successfully") return nil } -func setupRegistryStep(logger *zap.Logger, extRegistry *ExternalRegistryConfig, usingExternalRegistry bool, registryType, registryStorageSize, registryManifest string, tlsEnabled bool, deps SetupDeps) error { +func setupRegistryStep(logger *zap.Logger, extRegistry *config.ExternalRegistryConfig, usingExternalRegistry bool, registryType, registryStorageSize, registryManifest string, tlsEnabled bool, deps SetupDeps) error { // Step 4: Deploy internal container registry - Step("Step 4: Configure registry") + core.Step("Step 4: Configure registry") if usingExternalRegistry { - Info(fmt.Sprintf("Using external registry: %s", extRegistry.URL)) + core.Info(fmt.Sprintf("Using external registry: %s", extRegistry.URL)) if extRegistry.Username != "" || extRegistry.Password != "" { - Info("Logging into external registry") + core.Info("Logging into external registry") if err := deps.LoginRegistry(logger, extRegistry.URL, extRegistry.Username, extRegistry.Password); err != nil { - wrappedErr := wrapWithSentinel(ErrRegistryLoginFailed, err, fmt.Sprintf("failed to login to registry %q: %v", extRegistry.URL, err)) - Error("Registry login failed") - logStructuredError(logger, wrappedErr, "Registry login failed") + wrappedErr := core.WrapWithSentinel(core.ErrRegistryLoginFailed, err, fmt.Sprintf("failed to login to registry %q: %v", extRegistry.URL, err)) + core.Error("Registry login failed") + core.LogStructuredError(logger, wrappedErr, "Registry login failed") return wrappedErr } } return nil } - Info(fmt.Sprintf("Type: %s", registryType)) + core.Info(fmt.Sprintf("Type: %s", registryType)) if tlsEnabled { - Info("TLS: enabled (registry overlay)") + core.Info("TLS: enabled (registry overlay)") } else { - Info("TLS: disabled (dev HTTP mode)") + core.Info("TLS: disabled (dev HTTP mode)") } if err := deps.DeployRegistry(logger, "registry", deps.GetRegistryPort(), registryType, registryStorageSize, registryManifest); err != nil { - wrappedErr := wrapWithSentinelAndContext( - ErrDeployRegistryFailed, + wrappedErr := core.WrapWithSentinelAndContext( + core.ErrDeployRegistryFailed, err, fmt.Sprintf("failed to deploy registry (type: %s, manifest: %s): %v", registryType, registryManifest, err), map[string]any{ @@ -533,12 +402,12 @@ func setupRegistryStep(logger *zap.Logger, extRegistry *ExternalRegistryConfig, "registry_port": deps.GetRegistryPort(), }, ) - Error("Registry deployment failed") - logStructuredError(logger, wrappedErr, "Registry deployment failed") + core.Error("Registry deployment failed") + core.LogStructuredError(logger, wrappedErr, "Registry deployment failed") return wrappedErr } - Info("Waiting for registry to be ready...") + core.Info("Waiting for registry to be ready...") if err := deps.WaitForDeploymentAvailable(logger, "registry", "registry", "app=registry", deps.GetDeploymentTimeout()); err != nil { deps.PrintDeploymentDiagnostics("registry", "registry", "app=registry") regCtx := map[string]any{ @@ -547,26 +416,26 @@ func setupRegistryStep(logger *zap.Logger, extRegistry *ExternalRegistryConfig, "selector": "app=registry", "component": "registry", } - mergeDeploymentDebugDiagnosticsIfNeeded(kubectlClient, regCtx, "registry", "registry", "app=registry") - wrappedErr := wrapWithSentinelAndContext( - ErrRegistryNotReady, + mergeDeploymentDebugDiagnosticsIfNeeded(core.DefaultKubectlClient(), regCtx, "registry", "registry", "app=registry") + wrappedErr := core.WrapWithSentinelAndContext( + core.ErrRegistryNotReady, err, fmt.Sprintf("registry deployment not ready in namespace %q: %v", "registry", err), regCtx, ) - Error("Registry failed to become ready") - logStructuredError(logger, wrappedErr, "Registry failed to become ready") + core.Error("Registry failed to become ready") + core.LogStructuredError(logger, wrappedErr, "Registry failed to become ready") return wrappedErr } if err := deps.RegistryManager.ShowRegistryInfo(); err != nil { - Warn(fmt.Sprintf("Failed to show registry info: %v", err)) + core.Warn(fmt.Sprintf("Failed to show registry info: %v", err)) } return nil } -func prepareDeploymentImages(logger *zap.Logger, extRegistry *ExternalRegistryConfig, usingExternalRegistry, testMode bool, deps SetupDeps) (string, string, error) { - Step("Step 5: Publish runtime images") +func prepareDeploymentImages(logger *zap.Logger, extRegistry *config.ExternalRegistryConfig, usingExternalRegistry, testMode bool, deps SetupDeps) (string, string, error) { + core.Step("Step 5: Publish runtime images") operatorImage, err := prepareOperatorImage(logger, extRegistry, usingExternalRegistry, testMode, deps) if err != nil { @@ -579,14 +448,14 @@ func prepareDeploymentImages(logger *zap.Logger, extRegistry *ExternalRegistryCo return operatorImage, gatewayProxyImage, nil } -func prepareOperatorImage(logger *zap.Logger, extRegistry *ExternalRegistryConfig, usingExternalRegistry, testMode bool, deps SetupDeps) (string, error) { +func prepareOperatorImage(logger *zap.Logger, extRegistry *config.ExternalRegistryConfig, usingExternalRegistry, testMode bool, deps SetupDeps) (string, error) { operatorImage := deps.OperatorImageFor(extRegistry) - Info(fmt.Sprintf("Operator image: %s", operatorImage)) + core.Info(fmt.Sprintf("Operator image: %s", operatorImage)) - Info("Building operator image") + core.Info("Building operator image") if err := deps.BuildOperatorImage(operatorImage); err != nil { - wrappedErr := wrapWithSentinelAndContext( - ErrOperatorImageBuildFailed, + wrappedErr := core.WrapWithSentinelAndContext( + core.ErrOperatorImageBuildFailed, err, fmt.Sprintf("operator image build failed for image %q: %v", operatorImage, err), map[string]any{ @@ -594,46 +463,46 @@ func prepareOperatorImage(logger *zap.Logger, extRegistry *ExternalRegistryConfi "component": "operator", }, ) - Error("Operator image build failed") - logStructuredError(logger, wrappedErr, "Operator image build failed") + core.Error("Operator image build failed") + core.LogStructuredError(logger, wrappedErr, "Operator image build failed") return "", wrappedErr } if usingExternalRegistry { if testMode { - Info("Test mode: pushing operator image to external registry") + core.Info("Test mode: pushing operator image to external registry") } else { - Info("Pushing operator image to external registry") + core.Info("Pushing operator image to external registry") } if err := deps.PushOperatorImage(operatorImage); err != nil { - Warn(fmt.Sprintf("Could not push image to external registry: %v", err)) + core.Warn(fmt.Sprintf("Could not push image to external registry: %v", err)) } return operatorImage, nil } - Info("Pushing operator image to internal registry") - internalRegistryURL := deps.GetPlatformRegistryURL(logger) - _, operatorTag := splitImage(operatorImage) + core.Info("Pushing operator image to internal registry") + internalRegistryURL := deps.ResolvePlatformRegistryURL(logger) + _, operatorTag := ref.SplitImage(operatorImage) if operatorTag == "" { operatorTag = setupImageTag() } internalOperatorImage := fmt.Sprintf("%s/mcp-runtime-operator:%s", internalRegistryURL, operatorTag) if err := deps.EnsureNamespace("registry"); err != nil { - wrappedErr := wrapWithSentinelAndContext( - ErrEnsureRegistryNamespaceFailed, + wrappedErr := core.WrapWithSentinelAndContext( + core.ErrEnsureRegistryNamespaceFailed, err, fmt.Sprintf("failed to ensure registry namespace: %v", err), map[string]any{"namespace": "registry", "component": "setup"}, ) - Error("Failed to ensure registry namespace") - logStructuredError(logger, wrappedErr, "Failed to ensure registry namespace") + core.Error("Failed to ensure registry namespace") + core.LogStructuredError(logger, wrappedErr, "Failed to ensure registry namespace") return "", wrappedErr } if err := deps.PushOperatorImageToInternal(logger, operatorImage, internalOperatorImage, "registry"); err != nil { - wrappedErr := wrapWithSentinelAndContext( - ErrPushOperatorImageInternalFailed, + wrappedErr := core.WrapWithSentinelAndContext( + core.ErrPushOperatorImageInternalFailed, err, fmt.Sprintf("failed to push operator image %q to internal registry %q: %v", operatorImage, internalOperatorImage, err), map[string]any{ @@ -643,22 +512,22 @@ func prepareOperatorImage(logger *zap.Logger, extRegistry *ExternalRegistryConfi "component": "operator", }, ) - Error("Failed to push operator image to internal registry") - logStructuredError(logger, wrappedErr, "Failed to push operator image to internal registry") + core.Error("Failed to push operator image to internal registry") + core.LogStructuredError(logger, wrappedErr, "Failed to push operator image to internal registry") return "", wrappedErr } - Info(fmt.Sprintf("Using internal registry image: %s", internalOperatorImage)) + core.Info(fmt.Sprintf("Using internal registry image: %s", internalOperatorImage)) return internalOperatorImage, nil } -func prepareGatewayProxyImage(logger *zap.Logger, extRegistry *ExternalRegistryConfig, usingExternalRegistry, testMode bool, deps SetupDeps) (string, error) { +func prepareGatewayProxyImage(logger *zap.Logger, extRegistry *config.ExternalRegistryConfig, usingExternalRegistry, testMode bool, deps SetupDeps) (string, error) { gatewayProxyImage := deps.GatewayProxyImageFor(extRegistry) - Info(fmt.Sprintf("Gateway proxy image: %s", gatewayProxyImage)) + core.Info(fmt.Sprintf("Gateway proxy image: %s", gatewayProxyImage)) - Info("Building gateway proxy image") + core.Info("Building gateway proxy image") if err := deps.BuildGatewayProxyImage(gatewayProxyImage); err != nil { - wrappedErr := wrapWithSentinelAndContext( - ErrGatewayProxyImageBuildFailed, + wrappedErr := core.WrapWithSentinelAndContext( + core.ErrGatewayProxyImageBuildFailed, err, fmt.Sprintf("gateway proxy image build failed for image %q: %v", gatewayProxyImage, err), map[string]any{ @@ -666,46 +535,46 @@ func prepareGatewayProxyImage(logger *zap.Logger, extRegistry *ExternalRegistryC "component": "gateway-proxy", }, ) - Error("Gateway proxy image build failed") - logStructuredError(logger, wrappedErr, "Gateway proxy image build failed") + core.Error("Gateway proxy image build failed") + core.LogStructuredError(logger, wrappedErr, "Gateway proxy image build failed") return "", wrappedErr } if usingExternalRegistry { if testMode { - Info("Test mode: pushing gateway proxy image to external registry") + core.Info("Test mode: pushing gateway proxy image to external registry") } else { - Info("Pushing gateway proxy image to external registry") + core.Info("Pushing gateway proxy image to external registry") } if err := deps.PushGatewayProxyImage(gatewayProxyImage); err != nil { - Warn(fmt.Sprintf("Could not push gateway proxy image to external registry: %v", err)) + core.Warn(fmt.Sprintf("Could not push gateway proxy image to external registry: %v", err)) } return gatewayProxyImage, nil } - Info("Pushing gateway proxy image to internal registry") - internalRegistryURL := deps.GetPlatformRegistryURL(logger) - _, gatewayTag := splitImage(gatewayProxyImage) + core.Info("Pushing gateway proxy image to internal registry") + internalRegistryURL := deps.ResolvePlatformRegistryURL(logger) + _, gatewayTag := ref.SplitImage(gatewayProxyImage) if gatewayTag == "" { gatewayTag = setupImageTag() } internalGatewayProxyImage := fmt.Sprintf("%s/%s:%s", internalRegistryURL, defaultGatewayProxyRepository, gatewayTag) if err := deps.EnsureNamespace("registry"); err != nil { - wrappedErr := wrapWithSentinelAndContext( - ErrEnsureRegistryNamespaceFailed, + wrappedErr := core.WrapWithSentinelAndContext( + core.ErrEnsureRegistryNamespaceFailed, err, fmt.Sprintf("failed to ensure registry namespace: %v", err), map[string]any{"namespace": "registry", "component": "setup"}, ) - Error("Failed to ensure registry namespace") - logStructuredError(logger, wrappedErr, "Failed to ensure registry namespace") + core.Error("Failed to ensure registry namespace") + core.LogStructuredError(logger, wrappedErr, "Failed to ensure registry namespace") return "", wrappedErr } if err := deps.PushGatewayProxyImageToInternal(logger, gatewayProxyImage, internalGatewayProxyImage, "registry"); err != nil { - wrappedErr := wrapWithSentinelAndContext( - ErrPushGatewayProxyImageInternalFailed, + wrappedErr := core.WrapWithSentinelAndContext( + core.ErrPushGatewayProxyImageInternalFailed, err, fmt.Sprintf("failed to push gateway proxy image %q to internal registry %q: %v", gatewayProxyImage, internalGatewayProxyImage, err), map[string]any{ @@ -715,17 +584,17 @@ func prepareGatewayProxyImage(logger *zap.Logger, extRegistry *ExternalRegistryC "component": "gateway-proxy", }, ) - Error("Failed to push gateway proxy image to internal registry") - logStructuredError(logger, wrappedErr, "Failed to push gateway proxy image to internal registry") + core.Error("Failed to push gateway proxy image to internal registry") + core.LogStructuredError(logger, wrappedErr, "Failed to push gateway proxy image to internal registry") return "", wrappedErr } - Info(fmt.Sprintf("Using internal registry gateway proxy image: %s", internalGatewayProxyImage)) + core.Info(fmt.Sprintf("Using internal registry gateway proxy image: %s", internalGatewayProxyImage)) return internalGatewayProxyImage, nil } -func prepareAnalyticsImages(logger *zap.Logger, extRegistry *ExternalRegistryConfig, usingExternalRegistry, testMode bool, deps SetupDeps) (AnalyticsImageSet, error) { - Step("Step 5a: Publish analytics images") +func prepareAnalyticsImages(logger *zap.Logger, extRegistry *config.ExternalRegistryConfig, usingExternalRegistry, testMode bool, deps SetupDeps) (AnalyticsImageSet, error) { + core.Step("Step 5a: Publish analytics images") images := AnalyticsImageSet{ Ingest: analyticsImageFor(extRegistry, analyticsComponents[0].Repository), @@ -737,13 +606,13 @@ func prepareAnalyticsImages(logger *zap.Logger, extRegistry *ExternalRegistryCon for _, component := range analyticsComponents { image := analyticsImageFor(extRegistry, component.Repository) if testMode { - Info(fmt.Sprintf("Test mode: building analytics %s image: %s", component.Name, image)) + core.Info(fmt.Sprintf("Test mode: building analytics %s image: %s", component.Name, image)) } else { - Info(fmt.Sprintf("Building analytics %s image: %s", component.Name, image)) + core.Info(fmt.Sprintf("Building analytics %s image: %s", component.Name, image)) } if err := deps.BuildAnalyticsImage(image, component.Dockerfile, component.BuildContext); err != nil { - return AnalyticsImageSet{}, wrapWithSentinelAndContext( - ErrBuildImageFailed, + return AnalyticsImageSet{}, core.WrapWithSentinelAndContext( + core.ErrBuildImageFailed, err, fmt.Sprintf("failed to build analytics %s image %q: %v", component.Name, image, err), map[string]any{"image": image, "component": component.Name}, @@ -751,38 +620,38 @@ func prepareAnalyticsImages(logger *zap.Logger, extRegistry *ExternalRegistryCon } if usingExternalRegistry { if testMode { - Info(fmt.Sprintf("Test mode: pushing analytics %s image to external registry", component.Name)) + core.Info(fmt.Sprintf("Test mode: pushing analytics %s image to external registry", component.Name)) } else { - Info(fmt.Sprintf("Pushing analytics %s image to external registry", component.Name)) + core.Info(fmt.Sprintf("Pushing analytics %s image to external registry", component.Name)) } if err := deps.PushAnalyticsImage(image); err != nil { - Warn(fmt.Sprintf("Could not push analytics %s image to external registry: %v", component.Name, err)) + core.Warn(fmt.Sprintf("Could not push analytics %s image to external registry: %v", component.Name, err)) } continue } if testMode { - Info(fmt.Sprintf("Test mode: pushing analytics %s image to internal registry", component.Name)) + core.Info(fmt.Sprintf("Test mode: pushing analytics %s image to internal registry", component.Name)) } else { - Info(fmt.Sprintf("Pushing analytics %s image to internal registry", component.Name)) + core.Info(fmt.Sprintf("Pushing analytics %s image to internal registry", component.Name)) } - internalRegistryURL := deps.GetPlatformRegistryURL(logger) - _, imageTag := splitImage(image) + internalRegistryURL := deps.ResolvePlatformRegistryURL(logger) + _, imageTag := ref.SplitImage(image) if imageTag == "" { imageTag = setupImageTag() } internalImage := fmt.Sprintf("%s/%s:%s", internalRegistryURL, component.Repository, imageTag) if err := deps.EnsureNamespace("registry"); err != nil { - return AnalyticsImageSet{}, wrapWithSentinelAndContext( - ErrEnsureRegistryNamespaceFailed, + return AnalyticsImageSet{}, core.WrapWithSentinelAndContext( + core.ErrEnsureRegistryNamespaceFailed, err, fmt.Sprintf("failed to ensure registry namespace: %v", err), map[string]any{"namespace": "registry", "component": component.Name}, ) } if err := deps.PushAnalyticsImageToInternal(logger, image, internalImage, "registry"); err != nil { - return AnalyticsImageSet{}, wrapWithSentinelAndContext( - ErrPushImageInClusterFailed, + return AnalyticsImageSet{}, core.WrapWithSentinelAndContext( + core.ErrPushImageInClusterFailed, err, fmt.Sprintf("failed to push analytics %s image %q to internal registry %q: %v", component.Name, image, internalImage, err), map[string]any{"source_image": image, "target_image": internalImage, "component": component.Name}, @@ -804,71 +673,71 @@ func prepareAnalyticsImages(logger *zap.Logger, extRegistry *ExternalRegistryCon } func deployAnalyticsStepCmd(logger *zap.Logger, images AnalyticsImageSet, storageMode string, deps SetupDeps) error { - Info("Deploying mcp-sentinel manifests") + core.Info("Deploying mcp-sentinel manifests") if err := deps.DeployAnalyticsManifests(logger, images, storageMode); err != nil { - Error("Analytics deployment failed") - logStructuredError(logger, err, "Analytics deployment failed") + core.Error("Analytics deployment failed") + core.LogStructuredError(logger, err, "Analytics deployment failed") return err } return nil } -func deployOperatorStep(logger *zap.Logger, operatorImage, gatewayProxyImage string, extRegistry *ExternalRegistryConfig, registrySecretName string, usingExternalRegistry bool, operatorArgs []string, deps SetupDeps) error { - Info("Deploying operator manifests") +func deployOperatorStep(logger *zap.Logger, operatorImage, gatewayProxyImage string, extRegistry *config.ExternalRegistryConfig, registrySecretName string, usingExternalRegistry bool, operatorArgs []string, deps SetupDeps) error { + core.Info("Deploying operator manifests") if err := deps.DeployOperatorManifests(logger, operatorImage, gatewayProxyImage, operatorArgs); err != nil { - wrappedErr := wrapWithSentinelAndContext( - ErrOperatorDeploymentFailed, + wrappedErr := core.WrapWithSentinelAndContext( + core.ErrOperatorDeploymentFailed, err, fmt.Sprintf("operator deployment failed for image %q: %v", operatorImage, err), map[string]any{ "image": operatorImage, - "namespace": NamespaceMCPRuntime, + "namespace": core.NamespaceMCPRuntime, "component": "operator", }, ) - Error("Operator deployment failed") - logStructuredError(logger, wrappedErr, "Operator deployment failed") + core.Error("Operator deployment failed") + core.LogStructuredError(logger, wrappedErr, "Operator deployment failed") return wrappedErr } if usingExternalRegistry { if err := deps.ConfigureProvisionedRegistryEnv(extRegistry, registrySecretName); err != nil { - wrappedErr := wrapWithSentinelAndContext( - ErrConfigureExternalRegistryEnvFailed, + wrappedErr := core.WrapWithSentinelAndContext( + core.ErrConfigureExternalRegistryEnvFailed, err, fmt.Sprintf("failed to configure external registry env on operator (registry: %q, secret: %q): %v", extRegistry.URL, registrySecretName, err), map[string]any{ "registry_url": extRegistry.URL, "secret_name": registrySecretName, - "namespace": NamespaceMCPRuntime, + "namespace": core.NamespaceMCPRuntime, "component": "operator", }, ) - Error("Failed to configure external registry environment") - logStructuredError(logger, wrappedErr, "Failed to configure external registry environment") + core.Error("Failed to configure external registry environment") + core.LogStructuredError(logger, wrappedErr, "Failed to configure external registry environment") return wrappedErr } } if err := deps.RestartDeployment("mcp-runtime-operator-controller-manager", "mcp-runtime"); err != nil { if usingExternalRegistry { - wrappedErr := wrapWithSentinel(ErrRestartOperatorDeploymentFailed, err, fmt.Sprintf("failed to restart operator deployment after registry env update: %v", err)) - Error("Failed to restart operator deployment") - logStructuredError(logger, wrappedErr, "Failed to restart operator deployment") + wrappedErr := core.WrapWithSentinel(core.ErrRestartOperatorDeploymentFailed, err, fmt.Sprintf("failed to restart operator deployment after registry env update: %v", err)) + core.Error("Failed to restart operator deployment") + core.LogStructuredError(logger, wrappedErr, "Failed to restart operator deployment") return wrappedErr } - Warn(fmt.Sprintf("Could not restart operator deployment: %v", err)) + core.Warn(fmt.Sprintf("Could not restart operator deployment: %v", err)) } return nil } func verifySetup(logger *zap.Logger, usingExternalRegistry bool, deps SetupDeps) error { - Step("Step 6: Verify platform components") + core.Step("Step 6: Verify platform components") if usingExternalRegistry { - Info("Skipping internal registry availability check (using external registry)") + core.Info("Skipping internal registry availability check (using external registry)") } else { - Info("Waiting for registry deployment to be available") + core.Info("Waiting for registry deployment to be available") if err := deps.WaitForDeploymentAvailable(logger, "registry", "registry", "app=registry", deps.GetDeploymentTimeout()); err != nil { deps.PrintDeploymentDiagnostics("registry", "registry", "app=registry") regCtx := map[string]any{ @@ -877,20 +746,20 @@ func verifySetup(logger *zap.Logger, usingExternalRegistry bool, deps SetupDeps) "selector": "app=registry", "component": "registry", } - mergeDeploymentDebugDiagnosticsIfNeeded(kubectlClient, regCtx, "registry", "registry", "app=registry") - wrappedErr := wrapWithSentinelAndContext( - ErrRegistryNotReady, + mergeDeploymentDebugDiagnosticsIfNeeded(core.DefaultKubectlClient(), regCtx, "registry", "registry", "app=registry") + wrappedErr := core.WrapWithSentinelAndContext( + core.ErrRegistryNotReady, err, fmt.Sprintf("registry not ready: %v", err), regCtx, ) - Error("Registry not ready") - logStructuredError(logger, wrappedErr, "Registry not ready") + core.Error("Registry not ready") + core.LogStructuredError(logger, wrappedErr, "Registry not ready") return wrappedErr } } - Info("Waiting for operator deployment to be available") + core.Info("Waiting for operator deployment to be available") if err := deps.WaitForDeploymentAvailable(logger, "mcp-runtime-operator-controller-manager", "mcp-runtime", "control-plane=controller-manager", deps.GetDeploymentTimeout()); err != nil { deps.PrintDeploymentDiagnostics("mcp-runtime-operator-controller-manager", "mcp-runtime", "control-plane=controller-manager") opCtx := map[string]any{ @@ -899,74 +768,74 @@ func verifySetup(logger *zap.Logger, usingExternalRegistry bool, deps SetupDeps) "selector": "control-plane=controller-manager", "component": "operator", } - mergeDeploymentDebugDiagnosticsIfNeeded(kubectlClient, opCtx, "mcp-runtime-operator-controller-manager", "mcp-runtime", "control-plane=controller-manager") - wrappedErr := wrapWithSentinelAndContext( - ErrOperatorNotReady, + mergeDeploymentDebugDiagnosticsIfNeeded(core.DefaultKubectlClient(), opCtx, "mcp-runtime-operator-controller-manager", "mcp-runtime", "control-plane=controller-manager") + wrappedErr := core.WrapWithSentinelAndContext( + core.ErrOperatorNotReady, err, fmt.Sprintf("operator not ready: %v", err), opCtx, ) - Error("Operator not ready") - logStructuredError(logger, wrappedErr, "Operator not ready") + core.Error("Operator not ready") + core.LogStructuredError(logger, wrappedErr, "Operator not ready") return wrappedErr } - Info("Checking MCPServer CRD presence") + core.Info("Checking MCPServer CRD presence") if err := deps.CheckCRDInstalled("mcpservers.mcpruntime.org"); err != nil { crdName := "mcpservers.mcpruntime.org" crdCtx := map[string]any{"crd": crdName, "component": "crd-check"} - mergeCRDCheckDebugDiagnosticsIfNeeded(kubectlClient, crdCtx, crdName) - wrappedErr := wrapWithSentinelAndContext(ErrCRDCheckFailed, err, fmt.Sprintf("CRD check failed: %v", err), crdCtx) - Error("CRD check failed") - logStructuredError(logger, wrappedErr, "CRD check failed") + mergeCRDCheckDebugDiagnosticsIfNeeded(core.DefaultKubectlClient(), crdCtx, crdName) + wrappedErr := core.WrapWithSentinelAndContext(core.ErrCRDCheckFailed, err, fmt.Sprintf("CRD check failed: %v", err), crdCtx) + core.Error("CRD check failed") + core.LogStructuredError(logger, wrappedErr, "CRD check failed") return wrappedErr } - Success("Verification complete") + core.Success("Verification complete") return nil } -func getOperatorImage(ext *ExternalRegistryConfig) string { +func getOperatorImage(ext *config.ExternalRegistryConfig) string { tag := setupImageTag() // Check for explicit override first - if override := GetOperatorImageOverride(); override != "" { + if override := core.GetOperatorImageOverride(); override != "" { return override } if ext != nil && ext.URL != "" { return strings.TrimSuffix(ext.URL, "/") + "/mcp-runtime-operator:" + tag } - return fmt.Sprintf("%s/mcp-runtime-operator:%s", getPlatformRegistryURL(nil), tag) + return fmt.Sprintf("%s/mcp-runtime-operator:%s", registry.ResolvePlatformRegistryURL(nil), tag) } -func getGatewayProxyImage(ext *ExternalRegistryConfig) string { +func getGatewayProxyImage(ext *config.ExternalRegistryConfig) string { tag := setupImageTag() - if override := GetGatewayProxyImageOverride(); override != "" { + if override := core.GetGatewayProxyImageOverride(); override != "" { return override } if ext != nil && ext.URL != "" { return strings.TrimSuffix(ext.URL, "/") + "/" + defaultGatewayProxyRepository + ":" + tag } - return fmt.Sprintf("%s/%s:%s", getPlatformRegistryURL(nil), defaultGatewayProxyRepository, tag) + return fmt.Sprintf("%s/%s:%s", registry.ResolvePlatformRegistryURL(nil), defaultGatewayProxyRepository, tag) } -func analyticsImageFor(ext *ExternalRegistryConfig, repository string) string { +func analyticsImageFor(ext *config.ExternalRegistryConfig, repository string) string { tag := setupImageTag() if ext != nil && ext.URL != "" { return strings.TrimSuffix(ext.URL, "/") + "/" + repository + ":" + tag } - return fmt.Sprintf("%s/%s:%s", getPlatformRegistryURL(nil), repository, tag) + return fmt.Sprintf("%s/%s:%s", registry.ResolvePlatformRegistryURL(nil), repository, tag) } -func configureProvisionedRegistryEnv(ext *ExternalRegistryConfig, secretName string) error { - return configureProvisionedRegistryEnvWithKubectl(kubectlClient, ext, secretName) +func configureProvisionedRegistryEnv(ext *config.ExternalRegistryConfig, secretName string) error { + return configureProvisionedRegistryEnvWithKubectl(core.DefaultKubectlClient(), ext, secretName) } -func configureProvisionedRegistryEnvWithKubectl(kubectl KubectlRunner, ext *ExternalRegistryConfig, secretName string) error { +func configureProvisionedRegistryEnvWithKubectl(kubectl core.KubectlRunner, ext *config.ExternalRegistryConfig, secretName string) error { if ext == nil || ext.URL == "" { return nil } @@ -984,7 +853,7 @@ func configureProvisionedRegistryEnvWithKubectl(kubectl KubectlRunner, ext *Exte return err } // Create imagePullSecret in mcp-servers namespace for pod image pulls. - if err := ensureImagePullSecretWithKubectl(kubectl, NamespaceMCPServers, secretName, ext.URL, ext.Username, ext.Password); err != nil { + if err := ensureImagePullSecretWithKubectl(kubectl, core.NamespaceMCPServers, secretName, ext.URL, ext.Username, ext.Password); err != nil { return err } args = append(args, "PROVISIONED_REGISTRY_SECRET_NAME="+secretName) @@ -995,7 +864,7 @@ func configureProvisionedRegistryEnvWithKubectl(kubectl KubectlRunner, ext *Exte return kubectl.RunWithOutput(args, os.Stdout, os.Stderr) } -func ensureProvisionedRegistrySecretWithKubectl(kubectl KubectlRunner, name, username, password string) error { +func ensureProvisionedRegistrySecretWithKubectl(kubectl core.KubectlRunner, name, username, password string) error { var envData strings.Builder if username != "" { envData.WriteString("PROVISIONED_REGISTRY_USERNAME=") @@ -1015,7 +884,7 @@ func ensureProvisionedRegistrySecretWithKubectl(kubectl KubectlRunner, name, use createCmd, err := kubectl.CommandArgs([]string{ "create", "secret", "generic", name, "--from-env-file=-", - "-n", NamespaceMCPRuntime, + "-n", core.NamespaceMCPRuntime, "--dry-run=client", "-o", "yaml", }) @@ -1027,13 +896,13 @@ func ensureProvisionedRegistrySecretWithKubectl(kubectl KubectlRunner, name, use createCmd.SetStdout(&rendered) createCmd.SetStderr(os.Stderr) if err := createCmd.Run(); err != nil { - wrappedErr := wrapWithSentinelAndContext( - ErrRenderSecretManifestFailed, + wrappedErr := core.WrapWithSentinelAndContext( + core.ErrRenderSecretManifestFailed, err, fmt.Sprintf("render secret manifest: %v", err), - map[string]any{"secret_name": name, "namespace": NamespaceMCPRuntime, "component": "setup"}, + map[string]any{"secret_name": name, "namespace": core.NamespaceMCPRuntime, "component": "setup"}, ) - Error("Failed to render secret manifest") + core.Error("Failed to render secret manifest") // Note: logger not available in this helper, but error will be logged by caller return wrappedErr } @@ -1047,13 +916,13 @@ func ensureProvisionedRegistrySecretWithKubectl(kubectl KubectlRunner, name, use applyCmd.SetStdout(os.Stdout) applyCmd.SetStderr(os.Stderr) if err := applyCmd.Run(); err != nil { - wrappedErr := wrapWithSentinelAndContext( - ErrApplySecretManifestFailed, + wrappedErr := core.WrapWithSentinelAndContext( + core.ErrApplySecretManifestFailed, err, fmt.Sprintf("apply secret manifest: %v", err), - map[string]any{"secret_name": name, "namespace": NamespaceMCPRuntime, "component": "setup"}, + map[string]any{"secret_name": name, "namespace": core.NamespaceMCPRuntime, "component": "setup"}, ) - Error("Failed to apply secret manifest") + core.Error("Failed to apply secret manifest") // Note: logger not available in this helper, but error will be logged by caller return wrappedErr } @@ -1061,7 +930,7 @@ func ensureProvisionedRegistrySecretWithKubectl(kubectl KubectlRunner, name, use return nil } -func ensureImagePullSecretWithKubectl(kubectl KubectlRunner, namespace, name, registry, username, password string) error { +func ensureImagePullSecretWithKubectl(kubectl core.KubectlRunner, namespace, name, registry, username, password string) error { if username == "" && password == "" { return nil } @@ -1077,13 +946,13 @@ func ensureImagePullSecretWithKubectl(kubectl KubectlRunner, namespace, name, re } dockerCfgJSON, err := json.Marshal(dockerCfg) if err != nil { - wrappedErr := wrapWithSentinelAndContext( - ErrMarshalDockerConfigFailed, + wrappedErr := core.WrapWithSentinelAndContext( + core.ErrMarshalDockerConfigFailed, err, fmt.Sprintf("marshal docker config: %v", err), map[string]any{"registry": registry, "namespace": namespace, "component": "setup"}, ) - Error("Failed to marshal docker config") + core.Error("Failed to marshal docker config") // Note: logger not available in this helper, but error will be logged by caller return wrappedErr } @@ -1108,13 +977,13 @@ data: applyCmd.SetStdout(os.Stdout) applyCmd.SetStderr(os.Stderr) if err := applyCmd.Run(); err != nil { - wrappedErr := wrapWithSentinelAndContext( - ErrApplyImagePullSecretFailed, + wrappedErr := core.WrapWithSentinelAndContext( + core.ErrApplyImagePullSecretFailed, err, fmt.Sprintf("apply imagePullSecret: %v", err), map[string]any{"secret_name": name, "namespace": namespace, "registry": registry, "component": "setup"}, ) - Error("Failed to apply image pull secret") + core.Error("Failed to apply image pull secret") // Note: logger not available in this helper, but error will be logged by caller return wrappedErr } @@ -1125,7 +994,7 @@ data: func buildOperatorImage(image string) error { target := "docker-build-operator-no-test" // #nosec G204 -- command arguments are built from trusted inputs and fixed verbs. - cmd, err := execCommandWithValidators("make", []string{"-f", "Makefile.operator", target, "IMG=" + image}) + cmd, err := core.ExecCommandWithValidators("make", []string{"-f", "Makefile.operator", target, "IMG=" + image}) if err != nil { return err } @@ -1138,17 +1007,17 @@ func buildOperatorImage(image string) error { } func buildGatewayProxyImage(image string) error { - dockerfilePath, err := resolveRepoAssetPath(gatewayProxyDockerfilePath) + dockerfilePath, err := assetpath.ResolveRepoAssetPath(gatewayProxyDockerfilePath) if err != nil { return err } - buildContext, err := resolveRepoAssetPath(gatewayProxyBuildContext) + buildContext, err := assetpath.ResolveRepoAssetPath(gatewayProxyBuildContext) if err != nil { return err } // #nosec G204 -- command arguments are built from trusted inputs and fixed verbs. - cmd, err := execCommandWithValidators("docker", []string{ + cmd, err := core.ExecCommandWithValidators("docker", []string{ "build", "-f", dockerfilePath, "-t", image, @@ -1163,17 +1032,17 @@ func buildGatewayProxyImage(image string) error { } func buildAnalyticsImage(image, dockerfilePath, buildContext string) error { - resolvedDockerfilePath, err := resolveRepoAssetPath(dockerfilePath) + resolvedDockerfilePath, err := assetpath.ResolveRepoAssetPath(dockerfilePath) if err != nil { return err } - resolvedBuildContext, err := resolveRepoAssetPath(buildContext) + resolvedBuildContext, err := assetpath.ResolveRepoAssetPath(buildContext) if err != nil { return err } // #nosec G204 -- command arguments are built from trusted inputs and fixed verbs. - cmd, err := execCommandWithValidators("docker", []string{ + cmd, err := core.ExecCommandWithValidators("docker", []string{ "build", "-f", resolvedDockerfilePath, "-t", image, @@ -1189,17 +1058,17 @@ func buildAnalyticsImage(image, dockerfilePath, buildContext string) error { func restartDeployment(name, namespace string) error { // #nosec G204 -- command arguments are built from trusted inputs and fixed verbs. - return restartDeploymentWithKubectl(kubectlClient, name, namespace) + return restartDeploymentWithKubectl(core.DefaultKubectlClient(), name, namespace) } -func restartDeploymentWithKubectl(kubectl KubectlRunner, name, namespace string) error { +func restartDeploymentWithKubectl(kubectl core.KubectlRunner, name, namespace string) error { // #nosec G204 -- command arguments are built from trusted inputs and fixed verbs. return kubectl.RunWithOutput([]string{"rollout", "restart", "deployment/" + name, "-n", namespace}, os.Stdout, os.Stderr) } func pushOperatorImage(image string) error { // #nosec G204 -- image from internal build process or validated config. - cmd, err := execCommandWithValidators("docker", []string{"push", image}) + cmd, err := core.ExecCommandWithValidators("docker", []string{"push", image}) if err != nil { return err } @@ -1210,7 +1079,7 @@ func pushOperatorImage(image string) error { func pushGatewayProxyImage(image string) error { // #nosec G204 -- image from internal build process or validated config. - cmd, err := execCommandWithValidators("docker", []string{"push", image}) + cmd, err := core.ExecCommandWithValidators("docker", []string{"push", image}) if err != nil { return err } @@ -1221,7 +1090,7 @@ func pushGatewayProxyImage(image string) error { func pushAnalyticsImage(image string) error { // #nosec G204 -- image from internal build process or validated config. - cmd, err := execCommandWithValidators("docker", []string{"push", image}) + cmd, err := core.ExecCommandWithValidators("docker", []string{"push", image}) if err != nil { return err } @@ -1231,48 +1100,48 @@ func pushAnalyticsImage(image string) error { } func pushOperatorImageToInternalRegistry(logger *zap.Logger, sourceImage, targetImage, helperNamespace string) error { - mgr := DefaultRegistryManager(logger) + mgr := registry.DefaultRegistryManager(logger) if err := mgr.PushInCluster(sourceImage, targetImage, helperNamespace); err != nil { - wrappedErr := wrapWithSentinelAndContext( - ErrPushImageInClusterFailed, + wrappedErr := core.WrapWithSentinelAndContext( + core.ErrPushImageInClusterFailed, err, fmt.Sprintf("failed to push image in-cluster: %v", err), map[string]any{"source_image": sourceImage, "target_image": targetImage, "namespace": helperNamespace, "component": "setup"}, ) - Error("Failed to push image in-cluster") - logStructuredError(logger, wrappedErr, "Failed to push image in-cluster") + core.Error("Failed to push image in-cluster") + core.LogStructuredError(logger, wrappedErr, "Failed to push image in-cluster") return wrappedErr } return nil } func pushGatewayProxyImageToInternalRegistry(logger *zap.Logger, sourceImage, targetImage, helperNamespace string) error { - mgr := DefaultRegistryManager(logger) + mgr := registry.DefaultRegistryManager(logger) if err := mgr.PushInCluster(sourceImage, targetImage, helperNamespace); err != nil { - wrappedErr := wrapWithSentinelAndContext( - ErrPushImageInClusterFailed, + wrappedErr := core.WrapWithSentinelAndContext( + core.ErrPushImageInClusterFailed, err, fmt.Sprintf("failed to push image in-cluster: %v", err), map[string]any{"source_image": sourceImage, "target_image": targetImage, "namespace": helperNamespace, "component": "gateway-proxy"}, ) - Error("Failed to push image in-cluster") - logStructuredError(logger, wrappedErr, "Failed to push image in-cluster") + core.Error("Failed to push image in-cluster") + core.LogStructuredError(logger, wrappedErr, "Failed to push image in-cluster") return wrappedErr } return nil } func pushAnalyticsImageToInternalRegistry(logger *zap.Logger, sourceImage, targetImage, helperNamespace string) error { - mgr := DefaultRegistryManager(logger) + mgr := registry.DefaultRegistryManager(logger) if err := mgr.PushInCluster(sourceImage, targetImage, helperNamespace); err != nil { - wrappedErr := wrapWithSentinelAndContext( - ErrPushImageInClusterFailed, + wrappedErr := core.WrapWithSentinelAndContext( + core.ErrPushImageInClusterFailed, err, fmt.Sprintf("failed to push image in-cluster: %v", err), map[string]any{"source_image": sourceImage, "target_image": targetImage, "namespace": helperNamespace, "component": "analytics"}, ) - Error("Failed to push image in-cluster") - logStructuredError(logger, wrappedErr, "Failed to push image in-cluster") + core.Error("Failed to push image in-cluster") + core.LogStructuredError(logger, wrappedErr, "Failed to push image in-cluster") return wrappedErr } return nil @@ -1280,21 +1149,21 @@ func pushAnalyticsImageToInternalRegistry(logger *zap.Logger, sourceImage, targe func checkCRDInstalled(name string) error { // #nosec G204 -- name is hardcoded CRD identifier from internal code. - return checkCRDInstalledWithKubectl(kubectlClient, name) + return checkCRDInstalledWithKubectl(core.DefaultKubectlClient(), name) } -func checkCRDInstalledWithKubectl(kubectl KubectlRunner, name string) error { +func checkCRDInstalledWithKubectl(kubectl core.KubectlRunner, name string) error { // #nosec G204 -- name is hardcoded CRD identifier from internal code. return kubectl.RunWithOutput([]string{"get", "crd", name}, os.Stdout, os.Stderr) } // waitForDeploymentAvailable polls a deployment until it has at least one available replica or times out. func waitForDeploymentAvailable(logger *zap.Logger, name, namespace, selector string, timeout time.Duration) error { - return waitForDeploymentAvailableWithKubectl(kubectlClient, logger, name, namespace, selector, timeout) + return waitForDeploymentAvailableWithKubectl(core.DefaultKubectlClient(), logger, name, namespace, selector, timeout) } // waitForDeploymentAvailableWithKubectl polls a deployment until it has at least one available replica or times out. -func waitForDeploymentAvailableWithKubectl(kubectl KubectlRunner, logger *zap.Logger, name, namespace, selector string, timeout time.Duration) error { +func waitForDeploymentAvailableWithKubectl(kubectl core.KubectlRunner, logger *zap.Logger, name, namespace, selector string, timeout time.Duration) error { deadline := time.Now().Add(timeout) lastLog := time.Time{} for { @@ -1313,7 +1182,7 @@ func waitForDeploymentAvailableWithKubectl(kubectl KubectlRunner, logger *zap.Lo } } if time.Since(lastLog) > 10*time.Second { - Info(fmt.Sprintf("Still waiting for deployment/%s in %s (selector %s, timeout %s)", name, namespace, selector, timeout.Round(time.Second))) + core.Info(fmt.Sprintf("Still waiting for deployment/%s in %s (selector %s, timeout %s)", name, namespace, selector, timeout.Round(time.Second))) lastLog = time.Now() } if time.Now().After(deadline) { @@ -1326,10 +1195,10 @@ func waitForDeploymentAvailableWithKubectl(kubectl KubectlRunner, logger *zap.Lo "component": "deployment-wait", } mergeDeploymentDebugDiagnosticsIfNeeded(kubectl, ctx, name, namespace, selector) - wrappedErr := wrapWithSentinelAndContext(ErrDeploymentTimeout, cause, msg, ctx) - Error("Deployment timeout") + wrappedErr := core.WrapWithSentinelAndContext(core.ErrDeploymentTimeout, cause, msg, ctx) + core.Error("Deployment timeout") if logger != nil { - logStructuredError(logger, wrappedErr, "Deployment timeout") + core.LogStructuredError(logger, wrappedErr, "Deployment timeout") } return wrappedErr } @@ -1339,20 +1208,20 @@ func waitForDeploymentAvailableWithKubectl(kubectl KubectlRunner, logger *zap.Lo // printDeploymentDiagnostics prints a quick status of pods for a deployment selector to help users triage readiness issues. func printDeploymentDiagnostics(deploy, namespace, selector string) { - printDeploymentDiagnosticsWithKubectl(kubectlClient, deploy, namespace, selector) + printDeploymentDiagnosticsWithKubectl(core.DefaultKubectlClient(), deploy, namespace, selector) } // printDeploymentDiagnosticsWithKubectl prints a quick status of pods for a deployment selector. -func printDeploymentDiagnosticsWithKubectl(kubectl KubectlRunner, deploy, namespace, selector string) { - Warn(fmt.Sprintf("Deployment %s in %s is not ready. Showing pod statuses:", deploy, namespace)) +func printDeploymentDiagnosticsWithKubectl(kubectl core.KubectlRunner, deploy, namespace, selector string) { + core.Warn(fmt.Sprintf("Deployment %s in %s is not ready. Showing pod statuses:", deploy, namespace)) // #nosec G204 -- namespace/selector from internal diagnostics, not user input. _ = kubectl.RunWithOutput([]string{"get", "pods", "-n", namespace, "-l", selector, "-o", "wide"}, os.Stdout, os.Stderr) } // mergeDeploymentDebugDiagnosticsIfNeeded fetches describe/events/pods from the API when --debug is set // and attaches a bounded blob to the errx context (cluster-backed failures, not local validation). -func mergeDeploymentDebugDiagnosticsIfNeeded(kubectl KubectlRunner, m map[string]any, deployName, namespace, selector string) { - if !IsDebugMode() { +func mergeDeploymentDebugDiagnosticsIfNeeded(kubectl core.KubectlRunner, m map[string]any, deployName, namespace, selector string) { + if !core.IsDebugMode() { return } if d := buildDeploymentWaitDebugDetail(kubectl, deployName, namespace, selector); d != "" { @@ -1361,7 +1230,7 @@ func mergeDeploymentDebugDiagnosticsIfNeeded(kubectl KubectlRunner, m map[string } // buildDeploymentWaitDebugDetail returns kubectl text for a stuck or timed-out deployment wait. -func buildDeploymentWaitDebugDetail(kubectl KubectlRunner, deployName, namespace, selector string) string { +func buildDeploymentWaitDebugDetail(kubectl core.KubectlRunner, deployName, namespace, selector string) string { var b strings.Builder b.WriteString(fmt.Sprintf("---- describe deployment %s\n", deployName)) // #nosec G204 -- deploy/namespace/selector are internal setup identifiers, not user shell input. @@ -1392,7 +1261,7 @@ func buildDeploymentWaitDebugDetail(kubectl KubectlRunner, deployName, namespace } // buildNamespacedResourceDebugDetail returns describe, pods, and events for a namespaced object (e.g. StatefulSet, Job). -func buildNamespacedResourceDebugDetail(kubectl KubectlRunner, kind, name, namespace string) string { +func buildNamespacedResourceDebugDetail(kubectl core.KubectlRunner, kind, name, namespace string) string { var b strings.Builder b.WriteString(fmt.Sprintf("---- describe %s %s\n", kind, name)) // #nosec G204 -- kind/name/namespace are internal resource identifiers, not user shell input. @@ -1423,7 +1292,7 @@ func buildNamespacedResourceDebugDetail(kubectl KubectlRunner, kind, name, names } // buildCRDCheckDebugDetail returns CRD and api-resources text when a CRD presence check fails. -func buildCRDCheckDebugDetail(kubectl KubectlRunner, crdName string) string { +func buildCRDCheckDebugDetail(kubectl core.KubectlRunner, crdName string) string { var b strings.Builder b.WriteString("---- get crd\n") // #nosec G204 -- crdName is a hardcoded internal API identity. @@ -1445,8 +1314,8 @@ func buildCRDCheckDebugDetail(kubectl KubectlRunner, crdName string) string { return b.String() } -func mergeCRDCheckDebugDiagnosticsIfNeeded(kubectl KubectlRunner, m map[string]any, crdName string) { - if !IsDebugMode() { +func mergeCRDCheckDebugDiagnosticsIfNeeded(kubectl core.KubectlRunner, m map[string]any, crdName string) { + if !core.IsDebugMode() { return } if d := buildCRDCheckDebugDetail(kubectl, crdName); d != "" { @@ -1457,60 +1326,60 @@ func mergeCRDCheckDebugDiagnosticsIfNeeded(kubectl KubectlRunner, m map[string]a // deployOperatorManifests deploys operator manifests without requiring kustomize or controller-gen. // It applies CRD, RBAC, and manager manifests directly, replacing the image name in the process. func deployOperatorManifests(logger *zap.Logger, operatorImage, gatewayProxyImage string, operatorArgs []string) error { - return deployOperatorManifestsWithKubectl(kubectlClient, logger, operatorImage, gatewayProxyImage, operatorArgs) + return deployOperatorManifestsWithKubectl(core.DefaultKubectlClient(), logger, operatorImage, gatewayProxyImage, operatorArgs) } // deployOperatorManifestsWithKubectl deploys operator manifests without requiring kustomize or controller-gen. // It applies CRD, RBAC, and manager manifests directly, replacing the image name and injecting operator args/env. -func deployOperatorManifestsWithKubectl(kubectl KubectlRunner, logger *zap.Logger, operatorImage, gatewayProxyImage string, operatorArgs []string) error { +func deployOperatorManifestsWithKubectl(kubectl core.KubectlRunner, logger *zap.Logger, operatorImage, gatewayProxyImage string, operatorArgs []string) error { // Step 1: Apply CRD - Info("Applying CRD manifests") + core.Info("Applying CRD manifests") // #nosec G204 -- fixed directory path from repository. if err := kubectl.RunWithOutput([]string{"apply", "--validate=false", "-f", "config/crd/bases"}, os.Stdout, os.Stderr); err != nil { - wrappedErr := wrapWithSentinel(ErrApplyCRDFailed, err, fmt.Sprintf("failed to apply CRD: %v", err)) - Error("Failed to apply CRD") + wrappedErr := core.WrapWithSentinel(core.ErrApplyCRDFailed, err, fmt.Sprintf("failed to apply CRD: %v", err)) + core.Error("Failed to apply CRD") if logger != nil { - logStructuredError(logger, wrappedErr, "Failed to apply CRD") + core.LogStructuredError(logger, wrappedErr, "Failed to apply CRD") } return wrappedErr } // Step 2: Apply RBAC (ServiceAccount, Role, RoleBinding) - Info("Applying RBAC manifests") - if err := ensureNamespace(NamespaceMCPRuntime); err != nil { - wrappedErr := wrapWithSentinelAndContext( - ErrEnsureOperatorNamespaceFailed, + core.Info("Applying RBAC manifests") + if err := kube.EnsureNamespace(core.DefaultKubectlClient().CommandArgs, core.NamespaceMCPRuntime); err != nil { + wrappedErr := core.WrapWithSentinelAndContext( + core.ErrEnsureOperatorNamespaceFailed, err, fmt.Sprintf("failed to ensure operator namespace: %v", err), - map[string]any{"namespace": NamespaceMCPRuntime, "component": "setup"}, + map[string]any{"namespace": core.NamespaceMCPRuntime, "component": "setup"}, ) - Error("Failed to ensure operator namespace") + core.Error("Failed to ensure operator namespace") if logger != nil { - logStructuredError(logger, wrappedErr, "Failed to ensure operator namespace") + core.LogStructuredError(logger, wrappedErr, "Failed to ensure operator namespace") } return wrappedErr } // #nosec G204 -- fixed kustomize path from repository. if err := kubectl.RunWithOutput([]string{"apply", "-k", "config/rbac/"}, os.Stdout, os.Stderr); err != nil { - wrappedErr := wrapWithSentinel(ErrApplyRBACFailed, err, fmt.Sprintf("failed to apply RBAC: %v", err)) - Error("Failed to apply RBAC") + wrappedErr := core.WrapWithSentinel(core.ErrApplyRBACFailed, err, fmt.Sprintf("failed to apply RBAC: %v", err)) + core.Error("Failed to apply RBAC") if logger != nil { - logStructuredError(logger, wrappedErr, "Failed to apply RBAC") + core.LogStructuredError(logger, wrappedErr, "Failed to apply RBAC") } return wrappedErr } // Step 3: Apply manager deployment with structured image replacement - Info("Applying operator deployment") + core.Info("Applying operator deployment") // Read manager.yaml and apply structured mutations managerYAML, err := os.ReadFile("config/manager/manager.yaml") if err != nil { - wrappedErr := wrapWithSentinel(ErrReadManagerYAMLFailed, err, fmt.Sprintf("failed to read manager.yaml: %v", err)) - Error("Failed to read manager.yaml") + wrappedErr := core.WrapWithSentinel(core.ErrReadManagerYAMLFailed, err, fmt.Sprintf("failed to read manager.yaml: %v", err)) + core.Error("Failed to read manager.yaml") if logger != nil { - logStructuredError(logger, wrappedErr, "Failed to read manager.yaml") + core.LogStructuredError(logger, wrappedErr, "Failed to read manager.yaml") } return wrappedErr } @@ -1518,20 +1387,20 @@ func deployOperatorManifestsWithKubectl(kubectl KubectlRunner, logger *zap.Logge // Use structured manifest mutation instead of regex mutator, err := manifest.NewMutator(managerYAML) if err != nil { - wrappedErr := wrapWithSentinel(ErrParseManagerYAMLFailed, err, fmt.Sprintf("failed to parse manager.yaml: %v", err)) - Error("Failed to parse manager.yaml") + wrappedErr := core.WrapWithSentinel(core.ErrParseManagerYAMLFailed, err, fmt.Sprintf("failed to parse manager.yaml: %v", err)) + core.Error("Failed to parse manager.yaml") if logger != nil { - logStructuredError(logger, wrappedErr, "Failed to parse manager.yaml") + core.LogStructuredError(logger, wrappedErr, "Failed to parse manager.yaml") } return wrappedErr } // Set the operator image - if err := mutator.SetDeploymentImage(OperatorDeploymentName, OperatorManagerContainerName, operatorImage); err != nil { - wrappedErr := wrapWithSentinel(ErrSetOperatorImageFailed, err, fmt.Sprintf("failed to set operator image: %v", err)) - Error("Failed to set operator image") + if err := mutator.SetDeploymentImage(core.OperatorDeploymentName, core.OperatorManagerContainerName, operatorImage); err != nil { + wrappedErr := core.WrapWithSentinel(core.ErrSetOperatorImageFailed, err, fmt.Sprintf("failed to set operator image: %v", err)) + core.Error("Failed to set operator image") if logger != nil { - logStructuredError(logger, wrappedErr, "Failed to set operator image") + core.LogStructuredError(logger, wrappedErr, "Failed to set operator image") } return wrappedErr } @@ -1539,11 +1408,11 @@ func deployOperatorManifestsWithKubectl(kubectl KubectlRunner, logger *zap.Logge // Set image pull policy based on image pullPolicy := operatorImagePullPolicy(operatorImage) if pullPolicy != "" { - if err := mutator.SetDeploymentImagePullPolicy(OperatorDeploymentName, OperatorManagerContainerName, pullPolicy); err != nil { - wrappedErr := wrapWithSentinel(ErrMutateManagerYAMLFailed, err, fmt.Sprintf("failed to set operator image pull policy: %v", err)) - Error("Failed to set operator image pull policy") + if err := mutator.SetDeploymentImagePullPolicy(core.OperatorDeploymentName, core.OperatorManagerContainerName, pullPolicy); err != nil { + wrappedErr := core.WrapWithSentinel(core.ErrMutateManagerYAMLFailed, err, fmt.Sprintf("failed to set operator image pull policy: %v", err)) + core.Error("Failed to set operator image pull policy") if logger != nil { - logStructuredError(logger, wrappedErr, "Failed to set operator image pull policy") + core.LogStructuredError(logger, wrappedErr, "Failed to set operator image pull policy") } return wrappedErr } @@ -1551,11 +1420,11 @@ func deployOperatorManifestsWithKubectl(kubectl KubectlRunner, logger *zap.Logge // Inject operator args if provided if len(operatorArgs) > 0 { - if err := mutator.MergeDeploymentArgs(OperatorDeploymentName, OperatorManagerContainerName, operatorArgs); err != nil { - wrappedErr := wrapWithSentinel(ErrMutateManagerYAMLFailed, err, fmt.Sprintf("failed to merge operator args: %v", err)) - Error("Failed to merge operator args") + if err := mutator.MergeDeploymentArgs(core.OperatorDeploymentName, core.OperatorManagerContainerName, operatorArgs); err != nil { + wrappedErr := core.WrapWithSentinel(core.ErrMutateManagerYAMLFailed, err, fmt.Sprintf("failed to merge operator args: %v", err)) + core.Error("Failed to merge operator args") if logger != nil { - logStructuredError(logger, wrappedErr, "Failed to merge operator args") + core.LogStructuredError(logger, wrappedErr, "Failed to merge operator args") } return wrappedErr } @@ -1567,11 +1436,11 @@ func deployOperatorManifestsWithKubectl(kubectl KubectlRunner, logger *zap.Logge for _, ev := range envVars { envMap[ev.Name] = ev.Value } - if err := mutator.MergeDeploymentEnv(OperatorDeploymentName, OperatorManagerContainerName, envMap); err != nil { - wrappedErr := wrapWithSentinel(ErrMutateManagerYAMLFailed, err, fmt.Sprintf("failed to merge operator env vars: %v", err)) - Error("Failed to merge operator env vars") + if err := mutator.MergeDeploymentEnv(core.OperatorDeploymentName, core.OperatorManagerContainerName, envMap); err != nil { + wrappedErr := core.WrapWithSentinel(core.ErrMutateManagerYAMLFailed, err, fmt.Sprintf("failed to merge operator env vars: %v", err)) + core.Error("Failed to merge operator env vars") if logger != nil { - logStructuredError(logger, wrappedErr, "Failed to merge operator env vars") + core.LogStructuredError(logger, wrappedErr, "Failed to merge operator env vars") } return wrappedErr } @@ -1580,10 +1449,10 @@ func deployOperatorManifestsWithKubectl(kubectl KubectlRunner, logger *zap.Logge // Render the mutated manifest mutatedYAML, err := mutator.ToYAML() if err != nil { - wrappedErr := wrapWithSentinel(ErrRenderManagerYAMLFailed, err, fmt.Sprintf("failed to render mutated manifest: %v", err)) - Error("Failed to render mutated manifest") + wrappedErr := core.WrapWithSentinel(core.ErrRenderManagerYAMLFailed, err, fmt.Sprintf("failed to render mutated manifest: %v", err)) + core.Error("Failed to render mutated manifest") if logger != nil { - logStructuredError(logger, wrappedErr, "Failed to render mutated manifest") + core.LogStructuredError(logger, wrappedErr, "Failed to render mutated manifest") } return wrappedErr } @@ -1591,10 +1460,10 @@ func deployOperatorManifestsWithKubectl(kubectl KubectlRunner, logger *zap.Logge // Write to temp file under the working directory so kubectl path validation passes. tmpFile, err := os.CreateTemp(".", "manager-*.yaml") if err != nil { - wrappedErr := wrapWithSentinel(ErrCreateTempFileFailed, err, fmt.Sprintf("failed to create temp file: %v", err)) - Error("Failed to create temp file") + wrappedErr := core.WrapWithSentinel(core.ErrCreateTempFileFailed, err, fmt.Sprintf("failed to create temp file: %v", err)) + core.Error("Failed to create temp file") if logger != nil { - logStructuredError(logger, wrappedErr, "Failed to create temp file") + core.LogStructuredError(logger, wrappedErr, "Failed to create temp file") } return wrappedErr } @@ -1602,93 +1471,93 @@ func deployOperatorManifestsWithKubectl(kubectl KubectlRunner, logger *zap.Logge if _, err := tmpFile.Write(mutatedYAML); err != nil { if closeErr := tmpFile.Close(); closeErr != nil { - wrappedErr := wrapWithSentinel(ErrCloseTempFileFailed, errors.Join(err, closeErr), fmt.Sprintf("failed to close temp file after write error: %v", closeErr)) - Error("Failed to close temp file") + wrappedErr := core.WrapWithSentinel(core.ErrCloseTempFileFailed, errors.Join(err, closeErr), fmt.Sprintf("failed to close temp file after write error: %v", closeErr)) + core.Error("Failed to close temp file") if logger != nil { - logStructuredError(logger, wrappedErr, "Failed to close temp file") + core.LogStructuredError(logger, wrappedErr, "Failed to close temp file") } return wrappedErr } - wrappedErr := wrapWithSentinel(ErrWriteTempFileFailed, err, fmt.Sprintf("failed to write temp file: %v", err)) - Error("Failed to write temp file") + wrappedErr := core.WrapWithSentinel(core.ErrWriteTempFileFailed, err, fmt.Sprintf("failed to write temp file: %v", err)) + core.Error("Failed to write temp file") if logger != nil { - logStructuredError(logger, wrappedErr, "Failed to write temp file") + core.LogStructuredError(logger, wrappedErr, "Failed to write temp file") } return wrappedErr } if err := tmpFile.Close(); err != nil { - wrappedErr := wrapWithSentinel(ErrCloseTempFileFailed, err, fmt.Sprintf("failed to close temp file: %v", err)) - Error("Failed to close temp file") + wrappedErr := core.WrapWithSentinel(core.ErrCloseTempFileFailed, err, fmt.Sprintf("failed to close temp file: %v", err)) + core.Error("Failed to close temp file") if logger != nil { - logStructuredError(logger, wrappedErr, "Failed to close temp file") + core.LogStructuredError(logger, wrappedErr, "Failed to close temp file") } return wrappedErr } // Delete existing deployment to avoid immutable selector conflicts on reapply. // #nosec G204 -- command arguments are built from trusted inputs and fixed verbs. - _ = kubectl.Run([]string{"delete", "deployment/" + OperatorDeploymentName, "-n", NamespaceMCPRuntime, "--ignore-not-found"}) + _ = kubectl.Run([]string{"delete", "deployment/" + core.OperatorDeploymentName, "-n", core.NamespaceMCPRuntime, "--ignore-not-found"}) // #nosec G204 -- command arguments are built from trusted inputs and fixed verbs. if err := kubectl.RunWithOutput([]string{"apply", "-f", tmpFile.Name()}, os.Stdout, os.Stderr); err != nil { - wrappedErr := wrapWithSentinelAndContext( - ErrApplyManagerDeploymentFailed, + wrappedErr := core.WrapWithSentinelAndContext( + core.ErrApplyManagerDeploymentFailed, err, fmt.Sprintf("failed to apply manager deployment: %v", err), - map[string]any{"operator_image": operatorImage, "namespace": NamespaceMCPRuntime, "component": "setup"}, + map[string]any{"operator_image": operatorImage, "namespace": core.NamespaceMCPRuntime, "component": "setup"}, ) - Error("Failed to apply manager deployment") + core.Error("Failed to apply manager deployment") if logger != nil { - logStructuredError(logger, wrappedErr, "Failed to apply manager deployment") + core.LogStructuredError(logger, wrappedErr, "Failed to apply manager deployment") } return wrappedErr } - Success("Operator manifests deployed successfully") + core.Success("Operator manifests deployed successfully") return nil } // mcpSentinelDependencyRolloutFailed wraps early mcp-sentinel storage/messaging rollouts; diagnostics are attached only in --debug. -func mcpSentinelDependencyRolloutFailed(kubectl KubectlRunner, err error, kind, name, namespace, phase string) error { +func mcpSentinelDependencyRolloutFailed(kubectl core.KubectlRunner, err error, kind, name, namespace, phase string) error { ctx := map[string]any{ "component": "mcp-sentinel", "phase": phase, "resource": fmt.Sprintf("%s/%s", kind, name), "namespace": namespace, } - if IsDebugMode() { + if core.IsDebugMode() { if diag := buildNamespacedResourceDebugDetail(kubectl, kind, name, namespace); diag != "" { ctx["diagnostics"] = trimDiagnosticsString(diag) } } - return wrapWithSentinelAndContext(ErrOperatorDeploymentFailed, err, + return core.WrapWithSentinelAndContext(core.ErrOperatorDeploymentFailed, err, fmt.Sprintf("mcp-sentinel %s: %s/%s: %v", phase, kind, name, err), ctx) } // mcpSentinelDependencyJobFailed wraps the clickhouse init job; diagnostics are attached only in --debug. -func mcpSentinelDependencyJobFailed(kubectl KubectlRunner, err error, name, namespace, phase string) error { +func mcpSentinelDependencyJobFailed(kubectl core.KubectlRunner, err error, name, namespace, phase string) error { ctx := map[string]any{ "component": "mcp-sentinel", "phase": phase, "resource": "job/" + name, "namespace": namespace, } - if IsDebugMode() { + if core.IsDebugMode() { if diag := buildNamespacedResourceDebugDetail(kubectl, "job", name, namespace); diag != "" { ctx["diagnostics"] = trimDiagnosticsString(diag) } } - return wrapWithSentinelAndContext(ErrOperatorDeploymentFailed, err, + return core.WrapWithSentinelAndContext(core.ErrOperatorDeploymentFailed, err, fmt.Sprintf("mcp-sentinel %s: job/%s: %v", phase, name, err), ctx) } func deployAnalyticsManifests(logger *zap.Logger, images AnalyticsImageSet, storageMode string) error { - return deployAnalyticsManifestsWithKubectl(kubectlClient, logger, images, storageMode) + return deployAnalyticsManifestsWithKubectl(core.DefaultKubectlClient(), logger, images, storageMode) } -func deployAnalyticsManifestsWithKubectl(kubectl KubectlRunner, logger *zap.Logger, images AnalyticsImageSet, storageMode string) error { +func deployAnalyticsManifestsWithKubectl(kubectl core.KubectlRunner, logger *zap.Logger, images AnalyticsImageSet, storageMode string) error { rolloutTimeout := analyticsRolloutTimeoutString() - Info("Applying mcp-sentinel namespace and config") + core.Info("Applying mcp-sentinel namespace and config") manifests := []string{ "k8s/00-namespace.yaml", "k8s/01-config.yaml", @@ -1699,12 +1568,12 @@ func deployAnalyticsManifestsWithKubectl(kubectl KubectlRunner, logger *zap.Logg } } - Info("Applying mcp-sentinel managed secrets") + core.Info("Applying mcp-sentinel managed secrets") secretManifest, err := renderAnalyticsSecretManifest(kubectl) if err != nil { return err } - if err := applyManifestContent(kubectl, secretManifest); err != nil { + if err := kube.ApplyManifestContent(kubectl.CommandArgs, secretManifest); err != nil { return err } @@ -1716,13 +1585,13 @@ func deployAnalyticsManifestsWithKubectl(kubectl KubectlRunner, logger *zap.Logg clickhouseManifest := "k8s/03-clickhouse.yaml" kafkaManifest := "k8s/05-kafka.yaml" postgresManifest := "k8s/20-postgres.yaml" - if storageMode == StorageModeHostpath { + if storageMode == setupplan.StorageModeHostpath { clickhouseManifest = "k8s/03-clickhouse-hostpath.yaml" kafkaManifest = "k8s/05-kafka-hostpath.yaml" postgresManifest = "k8s/20-postgres-hostpath.yaml" } - Info("Applying analytics storage and messaging components") + core.Info("Applying analytics storage and messaging components") for _, manifest := range []string{ clickhouseManifest, kafkaManifest, @@ -1732,25 +1601,25 @@ func deployAnalyticsManifestsWithKubectl(kubectl KubectlRunner, logger *zap.Logg } } - if err := waitForRolloutStatusWithKubectl(kubectl, "statefulset", "clickhouse", defaultAnalyticsNamespace, rolloutTimeout); err != nil { - return mcpSentinelDependencyRolloutFailed(kubectl, err, "statefulset", "clickhouse", defaultAnalyticsNamespace, "storage (clickhouse)") + if err := waitForRolloutStatusWithKubectl(kubectl, "statefulset", "clickhouse", core.DefaultAnalyticsNamespace, rolloutTimeout); err != nil { + return mcpSentinelDependencyRolloutFailed(kubectl, err, "statefulset", "clickhouse", core.DefaultAnalyticsNamespace, "storage (clickhouse)") } - if err := waitForRolloutStatusWithKubectl(kubectl, "deployment", "zookeeper", defaultAnalyticsNamespace, rolloutTimeout); err != nil { - return mcpSentinelDependencyRolloutFailed(kubectl, err, "deployment", "zookeeper", defaultAnalyticsNamespace, "messaging (zookeeper)") + if err := waitForRolloutStatusWithKubectl(kubectl, "deployment", "zookeeper", core.DefaultAnalyticsNamespace, rolloutTimeout); err != nil { + return mcpSentinelDependencyRolloutFailed(kubectl, err, "deployment", "zookeeper", core.DefaultAnalyticsNamespace, "messaging (zookeeper)") } - if err := waitForRolloutStatusWithKubectl(kubectl, "statefulset", "kafka", defaultAnalyticsNamespace, rolloutTimeout); err != nil { - return mcpSentinelDependencyRolloutFailed(kubectl, err, "statefulset", "kafka", defaultAnalyticsNamespace, "messaging (kafka)") + if err := waitForRolloutStatusWithKubectl(kubectl, "statefulset", "kafka", core.DefaultAnalyticsNamespace, rolloutTimeout); err != nil { + return mcpSentinelDependencyRolloutFailed(kubectl, err, "statefulset", "kafka", core.DefaultAnalyticsNamespace, "messaging (kafka)") } - Info("Initializing ClickHouse schema") + core.Info("Initializing ClickHouse schema") if err := applyRenderedManifest(kubectl, "k8s/04-clickhouse-init.yaml", images, imagePullSecretName); err != nil { return err } - if err := waitForJobCompletionWithKubectl(kubectl, "clickhouse-init", defaultAnalyticsNamespace, rolloutTimeout); err != nil { - return mcpSentinelDependencyJobFailed(kubectl, err, "clickhouse-init", defaultAnalyticsNamespace, "clickhouse init schema") + if err := waitForJobCompletionWithKubectl(kubectl, "clickhouse-init", core.DefaultAnalyticsNamespace, rolloutTimeout); err != nil { + return mcpSentinelDependencyJobFailed(kubectl, err, "clickhouse-init", core.DefaultAnalyticsNamespace, "clickhouse init schema") } - Info("Applying analytics services") + core.Info("Applying analytics services") for _, manifest := range []string{ postgresManifest, "k8s/06-ingest.yaml", @@ -1776,7 +1645,7 @@ func deployAnalyticsManifestsWithKubectl(kubectl KubectlRunner, logger *zap.Logg return err } - Info(fmt.Sprintf("Waiting for mcp-sentinel workload rollouts (per-resource timeout %s; override with MCP_DEPLOYMENT_TIMEOUT)", rolloutTimeout)) + core.Info(fmt.Sprintf("Waiting for mcp-sentinel workload rollouts (per-resource timeout %s; override with MCP_DEPLOYMENT_TIMEOUT)", rolloutTimeout)) targets := []struct{ kind, name string }{ {kind: "statefulset", name: "mcp-sentinel-postgres"}, {kind: "deployment", name: "mcp-sentinel-ingest"}, @@ -1793,7 +1662,7 @@ func deployAnalyticsManifestsWithKubectl(kubectl KubectlRunner, logger *zap.Logg var rolloutFailures []string var failedForDebug []analyticsFailedRollout for _, target := range targets { - rolloutLog, err := runRolloutWithOptionalDebugCapture(kubectl, target.kind, target.name, defaultAnalyticsNamespace, rolloutTimeout) + rolloutLog, err := runRolloutWithOptionalDebugCapture(kubectl, target.kind, target.name, core.DefaultAnalyticsNamespace, rolloutTimeout) if err != nil { rolloutFailures = append(rolloutFailures, fmt.Sprintf("%s/%s: %v", target.kind, target.name, err)) failedForDebug = append(failedForDebug, analyticsFailedRollout{ @@ -1802,7 +1671,7 @@ func deployAnalyticsManifestsWithKubectl(kubectl KubectlRunner, logger *zap.Logg } } if len(rolloutFailures) == 0 { - Success("mcp-sentinel manifests deployed successfully") + core.Success("mcp-sentinel manifests deployed successfully") return nil } @@ -1811,12 +1680,12 @@ func deployAnalyticsManifestsWithKubectl(kubectl KubectlRunner, logger *zap.Logg cause := errors.New(summary) msg := fmt.Sprintf("analytics components failed to roll out: %s", summary) ctx := map[string]any{"component": "mcp-sentinel", "rollout_failures": summary} - if IsDebugMode() { + if core.IsDebugMode() { if diag := buildAnalyticsRolloutDebugDetail(kubectl, failedForDebug); diag != "" { ctx["diagnostics"] = trimDiagnosticsString(diag) } } - return wrapWithSentinelAndContext(ErrOperatorDeploymentFailed, cause, msg, ctx) + return core.WrapWithSentinelAndContext(core.ErrOperatorDeploymentFailed, cause, msg, ctx) } func trimDiagnosticsString(s string) string { @@ -1829,13 +1698,13 @@ func trimDiagnosticsString(s string) string { // runRolloutWithOptionalDebugCapture runs kubectl rollout status, teeing output to a buffer // in --debug mode so it can be attached to the structured error. -func runRolloutWithOptionalDebugCapture(kubectl KubectlRunner, kind, name, namespace, timeout string) (capture string, err error) { +func runRolloutWithOptionalDebugCapture(kubectl core.KubectlRunner, kind, name, namespace, timeout string) (capture string, err error) { args := []string{ "rollout", "status", fmt.Sprintf("%s/%s", kind, name), "-n", namespace, "--timeout=" + timeout, } - if !IsDebugMode() { + if !core.IsDebugMode() { return "", kubectl.RunWithOutput(args, os.Stdout, os.Stderr) } var buf bytes.Buffer @@ -1844,7 +1713,7 @@ func runRolloutWithOptionalDebugCapture(kubectl KubectlRunner, kind, name, names return buf.String(), err } -func kubectlText(kubectl KubectlRunner, args []string) (string, error) { +func kubectlText(kubectl core.KubectlRunner, args []string) (string, error) { cmd, err := kubectl.CommandArgs(args) if err != nil { return "", err @@ -1859,7 +1728,7 @@ type analyticsFailedRollout struct { } // buildAnalyticsRolloutDebugDetail collects kubectl output for mcp-sentinel (describe + get) when --debug is set. -func buildAnalyticsRolloutDebugDetail(kubectl KubectlRunner, failed []analyticsFailedRollout) string { +func buildAnalyticsRolloutDebugDetail(kubectl core.KubectlRunner, failed []analyticsFailedRollout) string { var b strings.Builder for _, w := range failed { if strings.TrimSpace(w.rolloutLog) != "" { @@ -1868,7 +1737,7 @@ func buildAnalyticsRolloutDebugDetail(kubectl KubectlRunner, failed []analyticsF } b.WriteString(fmt.Sprintf("---- describe %s %s\n", w.kind, w.name)) out, err := kubectlText(kubectl, []string{ - "describe", w.kind, w.name, "-n", defaultAnalyticsNamespace, "--request-timeout=30s", + "describe", w.kind, w.name, "-n", core.DefaultAnalyticsNamespace, "--request-timeout=30s", }) if err != nil { b.WriteString(fmt.Sprintf("error: %v\n", err)) @@ -1877,14 +1746,14 @@ func buildAnalyticsRolloutDebugDetail(kubectl KubectlRunner, failed []analyticsF b.WriteString(out) } b.WriteString("---- get pods (wide)\n") - if out, err := kubectlText(kubectl, []string{"get", "pods", "-n", defaultAnalyticsNamespace, "-o", "wide", "--request-timeout=30s"}); err != nil { + if out, err := kubectlText(kubectl, []string{"get", "pods", "-n", core.DefaultAnalyticsNamespace, "-o", "wide", "--request-timeout=30s"}); err != nil { b.WriteString(fmt.Sprintf("error: %v\n", err)) } else { b.WriteString(out) } b.WriteString("---- get events (sorted)\n") if out, err := kubectlText(kubectl, []string{ - "get", "events", "-n", defaultAnalyticsNamespace, "--sort-by", ".lastTimestamp", "--request-timeout=30s", + "get", "events", "-n", core.DefaultAnalyticsNamespace, "--sort-by", ".lastTimestamp", "--request-timeout=30s", }); err != nil { b.WriteString(fmt.Sprintf("error: %v\n", err)) } else { @@ -1893,44 +1762,34 @@ func buildAnalyticsRolloutDebugDetail(kubectl KubectlRunner, failed []analyticsF return b.String() } -func applyRenderedManifest(kubectl KubectlRunner, manifestPath string, images AnalyticsImageSet, imagePullSecretName string) error { - resolvedManifestPath, err := resolveRepoAssetPath(manifestPath) +func applyRenderedManifest(kubectl core.KubectlRunner, manifestPath string, images AnalyticsImageSet, imagePullSecretName string) error { + resolvedManifestPath, err := assetpath.ResolveRepoAssetPath(manifestPath) if err != nil { - return wrapWithSentinel(ErrReadManagerYAMLFailed, err, fmt.Sprintf("failed to resolve manifest %s: %v", manifestPath, err)) + return core.WrapWithSentinel(core.ErrReadManagerYAMLFailed, err, fmt.Sprintf("failed to resolve manifest %s: %v", manifestPath, err)) } - content, err := readFileAtPath(resolvedManifestPath) + content, err := kube.ReadFileAtPath(resolvedManifestPath) if err != nil { - return wrapWithSentinel(ErrReadManagerYAMLFailed, err, fmt.Sprintf("failed to read manifest %s: %v", resolvedManifestPath, err)) + return core.WrapWithSentinel(core.ErrReadManagerYAMLFailed, err, fmt.Sprintf("failed to read manifest %s: %v", resolvedManifestPath, err)) } rendered, err := renderAnalyticsManifest(string(content), images, imagePullSecretName) if err != nil { return fmt.Errorf("render manifest %s: %w", manifestPath, err) } - return applyManifestContent(kubectl, rendered) + return kube.ApplyManifestContent(kubectl.CommandArgs, rendered) } -func applyManifestContent(kubectl KubectlRunner, manifest string) error { - return applyManifestContentWithNamespace(kubectl, manifest, "") -} - -func applyManifestContentWithNamespace(kubectl KubectlRunner, manifest, namespace string) error { - args := []string{"apply", "-f", "-"} - if strings.TrimSpace(namespace) != "" { - args = append(args, "-n", namespace) +func applyPlatformIngressIfConfigured(kubectl core.KubectlRunner) error { + host := strings.TrimSpace(core.GetPlatformIngressHost()) + if host == "" { + return nil } - applyCmd, err := kubectl.CommandArgs(args) - if err != nil { - return err + manifest := ingressmanifest.RenderPlatformUIIngress(host, core.GetRegistryClusterIssuerName(), core.DefaultAnalyticsNamespace) + core.Info(fmt.Sprintf("Applying platform UI ingress for %s", host)) + if err := kube.ApplyManifestContent(kubectl.CommandArgs, manifest); err != nil { + return fmt.Errorf("apply platform UI ingress: %w", err) } - applyCmd.SetStdin(strings.NewReader(manifest)) - applyCmd.SetStdout(os.Stdout) - applyCmd.SetStderr(os.Stderr) - return applyCmd.Run() -} - -func ApplyManifestContentWithNamespace(kubectl KubectlRunner, manifest, namespace string) error { - return applyManifestContentWithNamespace(kubectl, manifest, namespace) + return nil } func renderAnalyticsManifest(content string, images AnalyticsImageSet, imagePullSecretName string) (string, error) { @@ -1992,59 +1851,59 @@ func renderAnalyticsManifest(content string, images AnalyticsImageSet, imagePull return rendered, nil } -func renderAnalyticsSecretManifest(kubectl KubectlRunner) (string, error) { - apiKeys, err := existingSecretDataValueOrRandom(kubectl, defaultAnalyticsNamespace, "mcp-sentinel-secrets", "API_KEYS", 16) +func renderAnalyticsSecretManifest(kubectl core.KubectlRunner) (string, error) { + apiKeys, err := existingSecretDataValueOrRandom(kubectl, core.DefaultAnalyticsNamespace, "mcp-sentinel-secrets", "API_KEYS", 16) if err != nil { - return "", wrapWithSentinel(ErrRenderSecretManifestFailed, err, fmt.Sprintf("failed to read analytics secrets: %v", err)) + return "", core.WrapWithSentinel(core.ErrRenderSecretManifestFailed, err, fmt.Sprintf("failed to read analytics secrets: %v", err)) } - uiAPIKey, err := existingSecretDataValueOrRandom(kubectl, defaultAnalyticsNamespace, "mcp-sentinel-secrets", "UI_API_KEY", 16) + uiAPIKey, err := existingSecretDataValueOrRandom(kubectl, core.DefaultAnalyticsNamespace, "mcp-sentinel-secrets", "UI_API_KEY", 16) if err != nil { - return "", wrapWithSentinel(ErrRenderSecretManifestFailed, err, fmt.Sprintf("failed to read analytics secrets: %v", err)) + return "", core.WrapWithSentinel(core.ErrRenderSecretManifestFailed, err, fmt.Sprintf("failed to read analytics secrets: %v", err)) } apiKeys = ensureCSVIncludes(apiKeys, uiAPIKey) - grafanaPassword, err := existingSecretDataValueOrRandom(kubectl, defaultAnalyticsNamespace, "mcp-sentinel-secrets", "GRAFANA_ADMIN_PASSWORD", 16) + grafanaPassword, err := existingSecretDataValueOrRandom(kubectl, core.DefaultAnalyticsNamespace, "mcp-sentinel-secrets", "GRAFANA_ADMIN_PASSWORD", 16) if err != nil { - return "", wrapWithSentinel(ErrRenderSecretManifestFailed, err, fmt.Sprintf("failed to read analytics secrets: %v", err)) + return "", core.WrapWithSentinel(core.ErrRenderSecretManifestFailed, err, fmt.Sprintf("failed to read analytics secrets: %v", err)) } - postgresUser, err := existingSecretDataValueOrDefault(kubectl, defaultAnalyticsNamespace, "mcp-sentinel-secrets", "POSTGRES_USER", "mcp_runtime") + postgresUser, err := existingSecretDataValueOrDefault(kubectl, core.DefaultAnalyticsNamespace, "mcp-sentinel-secrets", "POSTGRES_USER", "mcp_runtime") if err != nil { - return "", wrapWithSentinel(ErrRenderSecretManifestFailed, err, fmt.Sprintf("failed to read analytics secrets: %v", err)) + return "", core.WrapWithSentinel(core.ErrRenderSecretManifestFailed, err, fmt.Sprintf("failed to read analytics secrets: %v", err)) } - postgresPassword, err := existingSecretDataValueOrRandom(kubectl, defaultAnalyticsNamespace, "mcp-sentinel-secrets", "POSTGRES_PASSWORD", 16) + postgresPassword, err := existingSecretDataValueOrRandom(kubectl, core.DefaultAnalyticsNamespace, "mcp-sentinel-secrets", "POSTGRES_PASSWORD", 16) if err != nil { - return "", wrapWithSentinel(ErrRenderSecretManifestFailed, err, fmt.Sprintf("failed to read analytics secrets: %v", err)) + return "", core.WrapWithSentinel(core.ErrRenderSecretManifestFailed, err, fmt.Sprintf("failed to read analytics secrets: %v", err)) } - postgresDB, err := existingSecretDataValueOrDefault(kubectl, defaultAnalyticsNamespace, "mcp-sentinel-secrets", "POSTGRES_DB", "mcp_runtime") + postgresDB, err := existingSecretDataValueOrDefault(kubectl, core.DefaultAnalyticsNamespace, "mcp-sentinel-secrets", "POSTGRES_DB", "mcp_runtime") if err != nil { - return "", wrapWithSentinel(ErrRenderSecretManifestFailed, err, fmt.Sprintf("failed to read analytics secrets: %v", err)) + return "", core.WrapWithSentinel(core.ErrRenderSecretManifestFailed, err, fmt.Sprintf("failed to read analytics secrets: %v", err)) } - postgresDSN, err := existingSecretDataValue(kubectl, defaultAnalyticsNamespace, "mcp-sentinel-secrets", "POSTGRES_DSN") + postgresDSN, err := existingSecretDataValue(kubectl, core.DefaultAnalyticsNamespace, "mcp-sentinel-secrets", "POSTGRES_DSN") if err != nil { - return "", wrapWithSentinel(ErrRenderSecretManifestFailed, err, fmt.Sprintf("failed to read analytics secrets: %v", err)) + return "", core.WrapWithSentinel(core.ErrRenderSecretManifestFailed, err, fmt.Sprintf("failed to read analytics secrets: %v", err)) } if postgresDSN == "" { postgresDSN = fmt.Sprintf( "postgres://%s@mcp-sentinel-postgres.%s.svc.cluster.local:5432/%s?sslmode=disable", url.UserPassword(postgresUser, postgresPassword).String(), - defaultAnalyticsNamespace, + core.DefaultAnalyticsNamespace, postgresDB, ) } - platformJWTSecret, err := existingSecretDataValueOrRandom(kubectl, defaultAnalyticsNamespace, "mcp-sentinel-secrets", "PLATFORM_JWT_SECRET", 32) + platformJWTSecret, err := existingSecretDataValueOrRandom(kubectl, core.DefaultAnalyticsNamespace, "mcp-sentinel-secrets", "PLATFORM_JWT_SECRET", 32) if err != nil { - return "", wrapWithSentinel(ErrRenderSecretManifestFailed, err, fmt.Sprintf("failed to read analytics secrets: %v", err)) + return "", core.WrapWithSentinel(core.ErrRenderSecretManifestFailed, err, fmt.Sprintf("failed to read analytics secrets: %v", err)) } - platformAdminEmail, err := existingSecretDataValue(kubectl, defaultAnalyticsNamespace, "mcp-sentinel-secrets", "PLATFORM_ADMIN_EMAIL") + platformAdminEmail, err := existingSecretDataValue(kubectl, core.DefaultAnalyticsNamespace, "mcp-sentinel-secrets", "PLATFORM_ADMIN_EMAIL") if err != nil { - return "", wrapWithSentinel(ErrRenderSecretManifestFailed, err, fmt.Sprintf("failed to read analytics secrets: %v", err)) + return "", core.WrapWithSentinel(core.ErrRenderSecretManifestFailed, err, fmt.Sprintf("failed to read analytics secrets: %v", err)) } - platformAdminPassword, err := existingSecretDataValue(kubectl, defaultAnalyticsNamespace, "mcp-sentinel-secrets", "PLATFORM_ADMIN_PASSWORD") + platformAdminPassword, err := existingSecretDataValue(kubectl, core.DefaultAnalyticsNamespace, "mcp-sentinel-secrets", "PLATFORM_ADMIN_PASSWORD") if err != nil { - return "", wrapWithSentinel(ErrRenderSecretManifestFailed, err, fmt.Sprintf("failed to read analytics secrets: %v", err)) + return "", core.WrapWithSentinel(core.ErrRenderSecretManifestFailed, err, fmt.Sprintf("failed to read analytics secrets: %v", err)) } - adminUsers, err := existingSecretDataValue(kubectl, defaultAnalyticsNamespace, "mcp-sentinel-secrets", "ADMIN_USERS") + adminUsers, err := existingSecretDataValue(kubectl, core.DefaultAnalyticsNamespace, "mcp-sentinel-secrets", "ADMIN_USERS") if err != nil { - return "", wrapWithSentinel(ErrRenderSecretManifestFailed, err, fmt.Sprintf("failed to read analytics secrets: %v", err)) + return "", core.WrapWithSentinel(core.ErrRenderSecretManifestFailed, err, fmt.Sprintf("failed to read analytics secrets: %v", err)) } if adminUsers == "" && platformAdminEmail != "" { adminUsers = platformAdminEmail @@ -2054,7 +1913,7 @@ func renderAnalyticsSecretManifest(kubectl KubectlRunner) (string, error) { "kind": "Secret", "metadata": map[string]string{ "name": "mcp-sentinel-secrets", - "namespace": defaultAnalyticsNamespace, + "namespace": core.DefaultAnalyticsNamespace, }, "type": "Opaque", "stringData": map[string]string{ @@ -2074,7 +1933,7 @@ func renderAnalyticsSecretManifest(kubectl KubectlRunner) (string, error) { } rendered, err := yaml.Marshal(secretManifest) if err != nil { - return "", wrapWithSentinel(ErrRenderSecretManifestFailed, err, fmt.Sprintf("failed to render analytics secrets: %v", err)) + return "", core.WrapWithSentinel(core.ErrRenderSecretManifestFailed, err, fmt.Sprintf("failed to render analytics secrets: %v", err)) } return string(rendered), nil } @@ -2102,21 +1961,21 @@ func ensureCSVIncludes(csv, value string) string { return strings.Join(parts, ",") } -func ensureAnalyticsImagePullSecret(kubectl KubectlRunner) (string, error) { - extRegistry, err := resolveExternalRegistryConfig(nil) +func ensureAnalyticsImagePullSecret(kubectl core.KubectlRunner) (string, error) { + extRegistry, err := registry.ResolveExternalRegistryConfig(nil) if err != nil { return "", err } if extRegistry == nil || extRegistry.URL == "" || (extRegistry.Username == "" && extRegistry.Password == "") { return "", nil } - if err := ensureImagePullSecretWithKubectl(kubectl, defaultAnalyticsNamespace, defaultRegistrySecretName, extRegistry.URL, extRegistry.Username, extRegistry.Password); err != nil { + if err := ensureImagePullSecretWithKubectl(kubectl, core.DefaultAnalyticsNamespace, defaultRegistrySecretName, extRegistry.URL, extRegistry.Username, extRegistry.Password); err != nil { return "", err } return defaultRegistrySecretName, nil } -func existingSecretDataValue(kubectl KubectlRunner, namespace, name, key string) (string, error) { +func existingSecretDataValue(kubectl core.KubectlRunner, namespace, name, key string) (string, error) { cmd, err := kubectl.CommandArgs([]string{"get", "secret", name, "-n", namespace, "-o", "jsonpath={.data." + key + "}"}) if err != nil { return "", err @@ -2142,7 +2001,7 @@ func existingSecretDataValue(kubectl KubectlRunner, namespace, name, key string) return string(decoded), nil } -func existingSecretDataValueOrRandom(kubectl KubectlRunner, namespace, name, key string, size int) (string, error) { +func existingSecretDataValueOrRandom(kubectl core.KubectlRunner, namespace, name, key string, size int) (string, error) { value, err := existingSecretDataValue(kubectl, namespace, name, key) if err != nil { return "", err @@ -2153,7 +2012,7 @@ func existingSecretDataValueOrRandom(kubectl KubectlRunner, namespace, name, key return randomHex(size) } -func existingSecretDataValueOrDefault(kubectl KubectlRunner, namespace, name, key, fallback string) (string, error) { +func existingSecretDataValueOrDefault(kubectl core.KubectlRunner, namespace, name, key, fallback string) (string, error) { value, err := existingSecretDataValue(kubectl, namespace, name, key) if err != nil { return "", err @@ -2253,30 +2112,30 @@ func randomHex(size int) (string, error) { return hex.EncodeToString(buffer), nil } -func waitForRolloutStatusWithKubectl(kubectl KubectlRunner, kind, name, namespace, timeout string) error { +func waitForRolloutStatusWithKubectl(kubectl core.KubectlRunner, kind, name, namespace, timeout string) error { return kubectl.RunWithOutput([]string{"rollout", "status", fmt.Sprintf("%s/%s", kind, name), "-n", namespace, "--timeout=" + timeout}, os.Stdout, os.Stderr) } // analyticsRolloutTimeoutString returns the kubectl --timeout value for mcp-sentinel rollouts. -// Uses MCP_DEPLOYMENT_TIMEOUT (see GetDeploymentTimeout); if unset or non-positive, uses the default 5m. +// Uses MCP_DEPLOYMENT_TIMEOUT (see core.GetDeploymentTimeout); if unset or non-positive, uses the default 5m. func analyticsRolloutTimeoutString() string { - d := GetDeploymentTimeout() + d := core.GetDeploymentTimeout() if d <= 0 { - d = defaultDeploymentTimeout + d = 5 * time.Minute } return d.String() } // printAnalyticsRolloutDiagnostics prints pods and events to help triage stuck mcp-sentinel rollouts. -func printAnalyticsRolloutDiagnostics(kubectl KubectlRunner) { - Warn("mcp-sentinel rollouts failed. Namespace snapshot (pods):") +func printAnalyticsRolloutDiagnostics(kubectl core.KubectlRunner) { + core.Warn("mcp-sentinel rollouts failed. Namespace snapshot (pods):") // #nosec G204 -- fixed namespace for diagnostics. - _ = kubectl.RunWithOutput([]string{"get", "pods", "-n", defaultAnalyticsNamespace, "-o", "wide"}, os.Stdout, os.Stderr) - Warn("Recent events in mcp-sentinel (newest last):") - _ = kubectl.RunWithOutput([]string{"get", "events", "-n", defaultAnalyticsNamespace, "--sort-by", ".lastTimestamp"}, os.Stdout, os.Stderr) + _ = kubectl.RunWithOutput([]string{"get", "pods", "-n", core.DefaultAnalyticsNamespace, "-o", "wide"}, os.Stdout, os.Stderr) + core.Warn("Recent events in mcp-sentinel (newest last):") + _ = kubectl.RunWithOutput([]string{"get", "events", "-n", core.DefaultAnalyticsNamespace, "--sort-by", ".lastTimestamp"}, os.Stdout, os.Stderr) } -func waitForJobCompletionWithKubectl(kubectl KubectlRunner, name, namespace, timeout string) error { +func waitForJobCompletionWithKubectl(kubectl core.KubectlRunner, name, namespace, timeout string) error { return kubectl.RunWithOutput([]string{"wait", "--for=condition=complete", "job/" + name, "-n", namespace, "--timeout=" + timeout}, os.Stdout, os.Stderr) } @@ -2298,61 +2157,61 @@ func operatorEnvOverrides(gatewayProxyImage string) []operatorEnvVar { var envVars []operatorEnvVar image := strings.TrimSpace(gatewayProxyImage) if image == "" { - image = strings.TrimSpace(GetGatewayProxyImageOverride()) + image = strings.TrimSpace(core.GetGatewayProxyImageOverride()) } if image != "" { envVars = append(envVars, operatorEnvVar{Name: "MCP_GATEWAY_PROXY_IMAGE", Value: image}) } - ingestURL := strings.TrimSpace(GetAnalyticsIngestURLOverride()) + ingestURL := strings.TrimSpace(core.GetAnalyticsIngestURLOverride()) if ingestURL == "" { ingestURL = defaultAnalyticsIngestURL } if ingestURL != "" { envVars = append(envVars, operatorEnvVar{Name: "MCP_SENTINEL_INGEST_URL", Value: ingestURL}) } - if mode := strings.TrimSpace(DefaultCLIConfig.IngressReadinessMode); mode != "" { + if mode := strings.TrimSpace(core.DefaultCLIConfig.IngressReadinessMode); mode != "" { envVars = append(envVars, operatorEnvVar{Name: "MCP_INGRESS_READINESS_MODE", Value: mode}) } - registryEndpoint := strings.TrimSpace(GetRegistryEndpoint()) + registryEndpoint := strings.TrimSpace(core.GetRegistryEndpoint()) if registryEndpoint != "" { envVars = append(envVars, operatorEnvVar{Name: "MCP_REGISTRY_ENDPOINT", Value: registryEndpoint}) } - registryIngressHost := strings.TrimSpace(GetRegistryIngressHost()) + registryIngressHost := strings.TrimSpace(core.GetRegistryIngressHost()) if registryIngressHost != "" { envVars = append(envVars, operatorEnvVar{Name: "MCP_REGISTRY_INGRESS_HOST", Value: registryIngressHost}) } - if mcpHost := strings.TrimSpace(GetMcpIngressHost()); mcpHost != "" { + if mcpHost := strings.TrimSpace(core.GetMcpIngressHost()); mcpHost != "" { envVars = append(envVars, operatorEnvVar{Name: "MCP_DEFAULT_INGRESS_HOST", Value: mcpHost}) } - clusterName := strings.TrimSpace(GetClusterName()) + clusterName := strings.TrimSpace(core.GetClusterName()) if clusterName != "" { envVars = append(envVars, operatorEnvVar{Name: "MCP_CLUSTER_NAME", Value: clusterName}) } return envVars } -func applySetupPlanToCLIConfig(plan SetupPlan) { - if DefaultCLIConfig == nil { +func applySetupPlanToCLIConfig(plan setupplan.Plan) { + if core.DefaultCLIConfig == nil { return } if !plan.TLSEnabled { - DefaultCLIConfig.RegistryClusterIssuerName = "" + core.DefaultCLIConfig.RegistryClusterIssuerName = "" return } if strings.TrimSpace(plan.ACMEmail) != "" { - DefaultCLIConfig.RegistryClusterIssuerName = ClusterIssuerNameForACME(plan.ACMEStaging) + core.DefaultCLIConfig.RegistryClusterIssuerName = certmanager.ClusterIssuerNameForACME(plan.ACMEStaging) return } if strings.TrimSpace(plan.TLSClusterIssuer) != "" { - DefaultCLIConfig.RegistryClusterIssuerName = strings.TrimSpace(plan.TLSClusterIssuer) + core.DefaultCLIConfig.RegistryClusterIssuerName = strings.TrimSpace(plan.TLSClusterIssuer) return } - DefaultCLIConfig.RegistryClusterIssuerName = certClusterIssuerName + core.DefaultCLIConfig.RegistryClusterIssuerName = certmanager.CertClusterIssuerName } // setupTLSWithKubectlAndPlan provisions TLS: Let's Encrypt when plan.ACMEmail is set, an existing // ClusterIssuer when plan.TLSClusterIssuer is set, otherwise the bundled private CA (mcp-runtime-ca). -func setupTLSWithKubectlAndPlan(kubectl KubectlRunner, logger *zap.Logger, plan SetupPlan) error { +func setupTLSWithKubectlAndPlan(kubectl core.KubectlRunner, logger *zap.Logger, plan setupplan.Plan) error { if strings.TrimSpace(plan.ACMEmail) != "" { return setupTLSLetsEncrypt(kubectl, logger, plan) } @@ -2362,264 +2221,264 @@ func setupTLSWithKubectlAndPlan(kubectl KubectlRunner, logger *zap.Logger, plan return setupTLSPrivateCA(kubectl, logger) } -func setupTLSLetsEncrypt(kubectl KubectlRunner, logger *zap.Logger, plan SetupPlan) error { - Info("Configuring TLS with Let's Encrypt (cert-manager HTTP-01)") - if err := validateACMEHostnameForPublicCA(); err != nil { - wrappedErr := wrapWithSentinel(ErrTLSSetupFailed, err, err.Error()) - Error("Invalid configuration for Let's Encrypt") +func setupTLSLetsEncrypt(kubectl core.KubectlRunner, logger *zap.Logger, plan setupplan.Plan) error { + core.Info("Configuring TLS with Let's Encrypt (cert-manager HTTP-01)") + if err := certmanager.ValidateACMEHostnameForPublicCA(); err != nil { + wrappedErr := core.WrapWithSentinel(core.ErrTLSSetupFailed, err, err.Error()) + core.Error("Invalid configuration for Let's Encrypt") if logger != nil { - logStructuredError(logger, wrappedErr, "Invalid configuration for Let's Encrypt") + core.LogStructuredError(logger, wrappedErr, "Invalid configuration for Let's Encrypt") } return wrappedErr } - if err := validateIngressManifestForACME(plan.Ingress.manifest); err != nil { - wrappedErr := wrapWithSentinel(ErrTLSSetupFailed, err, err.Error()) - Error("Ingress configuration blocks Let's Encrypt") + if err := certmanager.ValidateIngressManifestForACME(plan.Ingress.Manifest); err != nil { + wrappedErr := core.WrapWithSentinel(core.ErrTLSSetupFailed, err, err.Error()) + core.Error("Ingress configuration blocks Let's Encrypt") if logger != nil { - logStructuredError(logger, wrappedErr, "Ingress configuration blocks Let's Encrypt") + core.LogStructuredError(logger, wrappedErr, "Ingress configuration blocks Let's Encrypt") } return wrappedErr } if plan.InstallCertManager { - if err := ensureCertManagerInstalled(kubectl, logger); err != nil { + if err := certmanager.EnsureCertManagerInstalled(kubectl, logger); err != nil { return err } } else { - Info("Checking cert-manager installation (--skip-cert-manager-install)") - if err := checkCertManagerInstalledWithKubectl(kubectl); err != nil { - err := wrapWithSentinel(ErrCertManagerNotInstalled, err, "cert-manager not installed. Install it, or omit --skip-cert-manager-install to let setup apply it from upstream") - Error("Cert-manager not installed") + core.Info("Checking cert-manager installation (--skip-cert-manager-install)") + if err := certmanager.CheckCertManagerInstalledWithKubectl(kubectl); err != nil { + err := core.WrapWithSentinel(core.ErrCertManagerNotInstalled, err, "cert-manager not installed. Install it, or omit --skip-cert-manager-install to let setup apply it from upstream") + core.Error("Cert-manager not installed") if logger != nil { - logStructuredError(logger, err, "Cert-manager not installed") + core.LogStructuredError(logger, err, "Cert-manager not installed") } return err } - Info("cert-manager CRDs found") + core.Info("cert-manager CRDs found") } - if err := waitForTraefikDeploymentForACME(kubectl); err != nil { - wrappedErr := wrapWithSentinel(ErrTLSSetupFailed, err, err.Error()) - Error("Traefik is not ready for HTTP-01") + if err := certmanager.WaitForTraefikDeploymentForACME(kubectl); err != nil { + wrappedErr := core.WrapWithSentinel(core.ErrTLSSetupFailed, err, err.Error()) + core.Error("Traefik is not ready for HTTP-01") if logger != nil { - logStructuredError(logger, wrappedErr, "Traefik is not ready for HTTP-01") + core.LogStructuredError(logger, wrappedErr, "Traefik is not ready for HTTP-01") } return wrappedErr } - Info("Checking TCP connectivity to your ACME hostnames on port 80 (best effort from this machine)") - preflightACMEHostnamesPort80(acmeTLSDNSNames()) + core.Info("Checking TCP connectivity to your ACME hostnames on port 80 (best effort from this machine)") + certmanager.PreflightACMEHostnamesPort80(certmanager.ACMETLSDNSNames()) - Info("Applying Let's Encrypt ClusterIssuer") - if err := applyLetsEncryptClusterIssuer(kubectl, plan.ACMEmail, plan.ACMEStaging, logger); err != nil { + core.Info("Applying Let's Encrypt ClusterIssuer") + if err := certmanager.ApplyLetsEncryptClusterIssuer(kubectl, plan.ACMEmail, plan.ACMEStaging, logger); err != nil { return err } - if err := ensureNamespace(NamespaceRegistry); err != nil { - wrappedErr := wrapWithSentinelAndContext( - ErrCreateRegistryNamespaceFailed, + if err := kube.EnsureNamespace(kubectl.CommandArgs, core.NamespaceRegistry); err != nil { + wrappedErr := core.WrapWithSentinelAndContext( + core.ErrCreateRegistryNamespaceFailed, err, fmt.Sprintf("failed to create registry namespace: %v", err), - map[string]any{"namespace": NamespaceRegistry, "component": "setup"}, + map[string]any{"namespace": core.NamespaceRegistry, "component": "setup"}, ) - Error("Failed to create registry namespace") + core.Error("Failed to create registry namespace") if logger != nil { - logStructuredError(logger, wrappedErr, "Failed to create registry namespace") + core.LogStructuredError(logger, wrappedErr, "Failed to create registry namespace") } return wrappedErr } - issuerName := ClusterIssuerNameForACME(plan.ACMEStaging) - dnsNames := acmeTLSDNSNames() - Info("Applying Certificate for registry (Let's Encrypt SANs)") - if err := applyRegistryCertificateForACME(kubectl, dnsNames, issuerName); err != nil { - wrappedErr := wrapWithSentinelAndContext( - ErrApplyCertificateFailed, + issuerName := certmanager.ClusterIssuerNameForACME(plan.ACMEStaging) + dnsNames := certmanager.ACMETLSDNSNames() + core.Info("Applying Certificate for registry (Let's Encrypt SANs)") + if err := certmanager.ApplyRegistryCertificateForACME(kubectl, dnsNames, issuerName); err != nil { + wrappedErr := core.WrapWithSentinelAndContext( + core.ErrApplyCertificateFailed, err, fmt.Sprintf("failed to apply Certificate: %v", err), - map[string]any{"certificate": registryCertificateName, "namespace": NamespaceRegistry, "component": "setup"}, + map[string]any{"certificate": certmanager.RegistryCertificateName, "namespace": core.NamespaceRegistry, "component": "setup"}, ) - Error("Failed to apply Certificate") + core.Error("Failed to apply Certificate") if logger != nil { - logStructuredError(logger, wrappedErr, "Failed to apply Certificate") + core.LogStructuredError(logger, wrappedErr, "Failed to apply Certificate") } return wrappedErr } - certTimeout := GetCertTimeout() + certTimeout := core.GetCertTimeout() if certTimeout < 5*time.Minute { certTimeout = 5 * time.Minute } - Info(fmt.Sprintf("Waiting for certificate to be issued (timeout: %s)", certTimeout)) - if err := waitForCertificateReadyWithKubectl(kubectl, registryCertificateName, NamespaceRegistry, certTimeout); err != nil { - err := newWithSentinel(ErrCertificateNotReady, fmt.Sprintf("certificate not ready after %s. Check cert-manager logs: kubectl logs -n cert-manager deployment/cert-manager", certTimeout)) - Error("Certificate not ready") + core.Info(fmt.Sprintf("Waiting for certificate to be issued (timeout: %s)", certTimeout)) + if err := certmanager.WaitForCertificateReadyWithKubectl(kubectl, certmanager.RegistryCertificateName, core.NamespaceRegistry, certTimeout); err != nil { + err := core.NewWithSentinel(core.ErrCertificateNotReady, fmt.Sprintf("certificate not ready after %s. Check cert-manager logs: kubectl logs -n cert-manager deployment/cert-manager", certTimeout)) + core.Error("Certificate not ready") if logger != nil { - logStructuredError(logger, err, "Certificate not ready") + core.LogStructuredError(logger, err, "Certificate not ready") } return err } - Success("Certificate issued successfully") + core.Success("Certificate issued successfully") return nil } // setupTLSWithExistingClusterIssuer issues the registry (and optional mcp SAN) Certificate using a // ClusterIssuer that already exists in the cluster (internal / enterprise CA). -func setupTLSWithExistingClusterIssuer(kubectl KubectlRunner, logger *zap.Logger, plan SetupPlan) error { +func setupTLSWithExistingClusterIssuer(kubectl core.KubectlRunner, logger *zap.Logger, plan setupplan.Plan) error { issuerName := strings.TrimSpace(plan.TLSClusterIssuer) - Info("Configuring TLS with existing ClusterIssuer: " + issuerName) + core.Info("Configuring TLS with existing ClusterIssuer: " + issuerName) if plan.InstallCertManager { - if err := ensureCertManagerInstalled(kubectl, logger); err != nil { + if err := certmanager.EnsureCertManagerInstalled(kubectl, logger); err != nil { return err } } else { - Info("Checking cert-manager installation (--skip-cert-manager-install)") - if err := checkCertManagerInstalledWithKubectl(kubectl); err != nil { - err := wrapWithSentinel(ErrCertManagerNotInstalled, err, "cert-manager not installed. Install it, or omit --skip-cert-manager-install to let setup apply it from upstream") - Error("Cert-manager not installed") + core.Info("Checking cert-manager installation (--skip-cert-manager-install)") + if err := certmanager.CheckCertManagerInstalledWithKubectl(kubectl); err != nil { + err := core.WrapWithSentinel(core.ErrCertManagerNotInstalled, err, "cert-manager not installed. Install it, or omit --skip-cert-manager-install to let setup apply it from upstream") + core.Error("Cert-manager not installed") if logger != nil { - logStructuredError(logger, err, "Cert-manager not installed") + core.LogStructuredError(logger, err, "Cert-manager not installed") } return err } - Info("cert-manager CRDs found") + core.Info("cert-manager CRDs found") } - if err := checkNamedClusterIssuerWithKubectl(kubectl, issuerName); err != nil { - Error("Cluster issuer not found") + if err := certmanager.CheckNamedClusterIssuerWithKubectl(kubectl, issuerName); err != nil { + core.Error("Cluster issuer not found") if logger != nil { - logStructuredError(logger, err, "Cluster issuer not found") + core.LogStructuredError(logger, err, "Cluster issuer not found") } return err } - if err := ensureNamespace(NamespaceRegistry); err != nil { - wrappedErr := wrapWithSentinelAndContext( - ErrCreateRegistryNamespaceFailed, + if err := kube.EnsureNamespace(kubectl.CommandArgs, core.NamespaceRegistry); err != nil { + wrappedErr := core.WrapWithSentinelAndContext( + core.ErrCreateRegistryNamespaceFailed, err, fmt.Sprintf("failed to create registry namespace: %v", err), - map[string]any{"namespace": NamespaceRegistry, "component": "setup"}, + map[string]any{"namespace": core.NamespaceRegistry, "component": "setup"}, ) - Error("Failed to create registry namespace") + core.Error("Failed to create registry namespace") if logger != nil { - logStructuredError(logger, wrappedErr, "Failed to create registry namespace") + core.LogStructuredError(logger, wrappedErr, "Failed to create registry namespace") } return wrappedErr } - dnsNames := acmeTLSDNSNames() + dnsNames := certmanager.ACMETLSDNSNames() if len(dnsNames) == 0 { err := fmt.Errorf("no DNS names resolved for the Certificate; set MCP_PLATFORM_DOMAIN, MCP_REGISTRY_HOST, or MCP_REGISTRY_INGRESS_HOST (and optional MCP_MCP_INGRESS_HOST)") - wrappedErr := wrapWithSentinel(ErrTLSSetupFailed, err, err.Error()) - Error("Invalid TLS host configuration") + wrappedErr := core.WrapWithSentinel(core.ErrTLSSetupFailed, err, err.Error()) + core.Error("Invalid TLS host configuration") if logger != nil { - logStructuredError(logger, wrappedErr, "Invalid TLS host configuration") + core.LogStructuredError(logger, wrappedErr, "Invalid TLS host configuration") } return wrappedErr } - Info("Applying Certificate for registry (custom ClusterIssuer)") - if err := applyRegistryCertificateForACME(kubectl, dnsNames, issuerName); err != nil { - wrappedErr := wrapWithSentinelAndContext( - ErrApplyCertificateFailed, + core.Info("Applying Certificate for registry (custom ClusterIssuer)") + if err := certmanager.ApplyRegistryCertificateForACME(kubectl, dnsNames, issuerName); err != nil { + wrappedErr := core.WrapWithSentinelAndContext( + core.ErrApplyCertificateFailed, err, fmt.Sprintf("failed to apply Certificate: %v", err), - map[string]any{"certificate": registryCertificateName, "namespace": NamespaceRegistry, "component": "setup"}, + map[string]any{"certificate": certmanager.RegistryCertificateName, "namespace": core.NamespaceRegistry, "component": "setup"}, ) - Error("Failed to apply Certificate") + core.Error("Failed to apply Certificate") if logger != nil { - logStructuredError(logger, wrappedErr, "Failed to apply Certificate") + core.LogStructuredError(logger, wrappedErr, "Failed to apply Certificate") } return wrappedErr } - certTimeout := GetCertTimeout() + certTimeout := core.GetCertTimeout() if certTimeout < 5*time.Minute { certTimeout = 5 * time.Minute } - Info(fmt.Sprintf("Waiting for certificate to be issued (timeout: %s)", certTimeout)) - if err := waitForCertificateReadyWithKubectl(kubectl, registryCertificateName, NamespaceRegistry, certTimeout); err != nil { - err := newWithSentinel(ErrCertificateNotReady, fmt.Sprintf("certificate not ready after %s. Check cert-manager and your ClusterIssuer configuration: kubectl logs -n cert-manager deployment/cert-manager", certTimeout)) - Error("Certificate not ready") + core.Info(fmt.Sprintf("Waiting for certificate to be issued (timeout: %s)", certTimeout)) + if err := certmanager.WaitForCertificateReadyWithKubectl(kubectl, certmanager.RegistryCertificateName, core.NamespaceRegistry, certTimeout); err != nil { + err := core.NewWithSentinel(core.ErrCertificateNotReady, fmt.Sprintf("certificate not ready after %s. Check cert-manager and your ClusterIssuer configuration: kubectl logs -n cert-manager deployment/cert-manager", certTimeout)) + core.Error("Certificate not ready") if logger != nil { - logStructuredError(logger, err, "Certificate not ready") + core.LogStructuredError(logger, err, "Certificate not ready") } return err } - Success("Certificate issued successfully") + core.Success("Certificate issued successfully") return nil } // setupTLSPrivateCA uses a pre-created TLS secret mcp-runtime-ca in cert-manager (see config/cert-manager/cluster-issuer.yaml). -func setupTLSPrivateCA(kubectl KubectlRunner, logger *zap.Logger) error { - Info("Checking cert-manager installation") - if err := checkCertManagerInstalledWithKubectl(kubectl); err != nil { - err := wrapWithSentinel(ErrCertManagerNotInstalled, err, "cert-manager not installed. Install it first:\n helm install cert-manager jetstack/cert-manager --namespace cert-manager --create-namespace --set crds.enabled=true\n or run setup with --with-tls --acme-email to install cert-manager automatically") - Error("Cert-manager not installed") +func setupTLSPrivateCA(kubectl core.KubectlRunner, logger *zap.Logger) error { + core.Info("Checking cert-manager installation") + if err := certmanager.CheckCertManagerInstalledWithKubectl(kubectl); err != nil { + err := core.WrapWithSentinel(core.ErrCertManagerNotInstalled, err, "cert-manager not installed. Install it first:\n helm install cert-manager jetstack/cert-manager --namespace cert-manager --create-namespace --set crds.enabled=true\n or run setup with --with-tls --acme-email to install cert-manager automatically") + core.Error("Cert-manager not installed") if logger != nil { - logStructuredError(logger, err, "Cert-manager not installed") + core.LogStructuredError(logger, err, "Cert-manager not installed") } return err } - Info("cert-manager CRDs found") + core.Info("cert-manager CRDs found") - Info("Checking CA secret") - if err := checkCASecretWithKubectl(kubectl); err != nil { - err := wrapWithSentinel(ErrCASecretNotFound, err, "CA secret 'mcp-runtime-ca' not found in cert-manager namespace. For Let's Encrypt use --acme-email, or create a private CA:\n kubectl create secret tls mcp-runtime-ca --cert=ca.crt --key=ca.key -n cert-manager") - Error("CA secret not found") + core.Info("Checking CA secret") + if err := certmanager.CheckCASecretWithKubectl(kubectl); err != nil { + err := core.WrapWithSentinel(core.ErrCASecretNotFound, err, "CA secret 'mcp-runtime-ca' not found in cert-manager namespace. For Let's Encrypt use --acme-email, or create a private CA:\n kubectl create secret tls mcp-runtime-ca --cert=ca.crt --key=ca.key -n cert-manager") + core.Error("CA secret not found") if logger != nil { - logStructuredError(logger, err, "CA secret not found") + core.LogStructuredError(logger, err, "CA secret not found") } return err } - Info("CA secret found") + core.Info("CA secret found") - Info("Applying ClusterIssuer") - if err := applyClusterIssuerWithKubectl(kubectl); err != nil { - wrappedErr := wrapWithSentinel(ErrClusterIssuerApplyFailed, err, fmt.Sprintf("failed to apply ClusterIssuer: %v", err)) - Error("Failed to apply ClusterIssuer") + core.Info("Applying ClusterIssuer") + if err := certmanager.ApplyClusterIssuerWithKubectl(kubectl); err != nil { + wrappedErr := core.WrapWithSentinel(core.ErrClusterIssuerApplyFailed, err, fmt.Sprintf("failed to apply ClusterIssuer: %v", err)) + core.Error("Failed to apply ClusterIssuer") if logger != nil { - logStructuredError(logger, wrappedErr, "Failed to apply ClusterIssuer") + core.LogStructuredError(logger, wrappedErr, "Failed to apply ClusterIssuer") } return wrappedErr } - if err := ensureNamespace(NamespaceRegistry); err != nil { - wrappedErr := wrapWithSentinelAndContext( - ErrCreateRegistryNamespaceFailed, + if err := kube.EnsureNamespace(kubectl.CommandArgs, core.NamespaceRegistry); err != nil { + wrappedErr := core.WrapWithSentinelAndContext( + core.ErrCreateRegistryNamespaceFailed, err, fmt.Sprintf("failed to create registry namespace: %v", err), - map[string]any{"namespace": NamespaceRegistry, "component": "setup"}, + map[string]any{"namespace": core.NamespaceRegistry, "component": "setup"}, ) - Error("Failed to create registry namespace") + core.Error("Failed to create registry namespace") if logger != nil { - logStructuredError(logger, wrappedErr, "Failed to create registry namespace") + core.LogStructuredError(logger, wrappedErr, "Failed to create registry namespace") } return wrappedErr } - Info("Applying Certificate for registry") - if err := applyRegistryCertificateWithKubectl(kubectl); err != nil { - wrappedErr := wrapWithSentinelAndContext( - ErrApplyCertificateFailed, + core.Info("Applying Certificate for registry") + if err := certmanager.ApplyRegistryCertificateWithKubectl(kubectl); err != nil { + wrappedErr := core.WrapWithSentinelAndContext( + core.ErrApplyCertificateFailed, err, fmt.Sprintf("failed to apply Certificate: %v", err), - map[string]any{"certificate": registryCertificateName, "namespace": NamespaceRegistry, "component": "setup"}, + map[string]any{"certificate": certmanager.RegistryCertificateName, "namespace": core.NamespaceRegistry, "component": "setup"}, ) - Error("Failed to apply Certificate") + core.Error("Failed to apply Certificate") if logger != nil { - logStructuredError(logger, wrappedErr, "Failed to apply Certificate") + core.LogStructuredError(logger, wrappedErr, "Failed to apply Certificate") } return wrappedErr } - certTimeout := GetCertTimeout() - Info(fmt.Sprintf("Waiting for certificate to be issued (timeout: %s)", certTimeout)) - if err := waitForCertificateReadyWithKubectl(kubectl, registryCertificateName, NamespaceRegistry, certTimeout); err != nil { - err := newWithSentinel(ErrCertificateNotReady, fmt.Sprintf("certificate not ready after %s. Check cert-manager logs: kubectl logs -n cert-manager deployment/cert-manager", certTimeout)) - Error("Certificate not ready") + certTimeout := core.GetCertTimeout() + core.Info(fmt.Sprintf("Waiting for certificate to be issued (timeout: %s)", certTimeout)) + if err := certmanager.WaitForCertificateReadyWithKubectl(kubectl, certmanager.RegistryCertificateName, core.NamespaceRegistry, certTimeout); err != nil { + err := core.NewWithSentinel(core.ErrCertificateNotReady, fmt.Sprintf("certificate not ready after %s. Check cert-manager logs: kubectl logs -n cert-manager deployment/cert-manager", certTimeout)) + core.Error("Certificate not ready") if logger != nil { - logStructuredError(logger, err, "Certificate not ready") + core.LogStructuredError(logger, err, "Certificate not ready") } return err } - Success("Certificate issued successfully") + core.Success("Certificate issued successfully") return nil } diff --git a/internal/cli/setup/setup.go b/internal/cli/setup/setup.go index a22415a..e73691b 100644 --- a/internal/cli/setup/setup.go +++ b/internal/cli/setup/setup.go @@ -8,19 +8,23 @@ import ( "github.com/spf13/cobra" "go.uber.org/zap" - "mcp-runtime/internal/cli" + "mcp-runtime/internal/cli/core" + setupplan "mcp-runtime/internal/cli/setup/plan" ) type manager struct { - logger *zap.Logger + logger *zap.Logger + clusterMgr ClusterManagerAPI } -func newManager(runtime *cli.Runtime) *manager { - return &manager{logger: runtime.Logger()} +func newManager(runtime *core.Runtime, clusterMgr ClusterManagerAPI) *manager { + return &manager{logger: runtime.Logger(), clusterMgr: clusterMgr} } -// New returns the setup command. -func New(runtime *cli.Runtime) *cobra.Command { +// New returns the setup command. clusterMgr is the cluster operator that setup +// uses for cluster init and ingress configuration; it is supplied by the +// composition root so setup does not import the cluster command package. +func New(runtime *core.Runtime, clusterMgr ClusterManagerAPI) *cobra.Command { var registryType string var registryStorageSize string var storageMode string @@ -40,7 +44,7 @@ func New(runtime *cli.Runtime) *cobra.Command { var acmeStaging bool var tlsClusterIssuer string var skipCertManagerInstall bool - mgr := newManager(runtime) + mgr := newManager(runtime, clusterMgr) cmd := &cobra.Command{ Use: "setup", @@ -54,11 +58,11 @@ func New(runtime *cli.Runtime) *cobra.Command { The platform deploys an internal Docker registry by default, which teams will use to push and pull container images.`, RunE: func(cmd *cobra.Command, args []string) error { - if err := cli.ValidateStorageMode(storageMode); err != nil { + if err := ValidateStorageMode(storageMode); err != nil { return err } - operatorArgs := cli.BuildOperatorArgs( + operatorArgs := BuildOperatorArgs( operatorMetricsAddr, operatorProbeAddr, operatorLeaderElect, @@ -77,11 +81,11 @@ will use to push and pull container images.`, if tlsCIResolved == "" { tlsCIResolved = strings.TrimSpace(os.Getenv("MCP_TLS_CLUSTER_ISSUER")) } - if err := cli.ValidateTLSSetupCLIFlags(tlsEnabled, acmeEmailResolved, tlsCIResolved, acmeStagingResolved, skipCertManagerInstall); err != nil { + if err := ValidateTLSSetupCLIFlags(tlsEnabled, acmeEmailResolved, tlsCIResolved, acmeStagingResolved, skipCertManagerInstall); err != nil { return err } - plan := cli.BuildSetupPlan(cli.SetupPlanInput{ + plan := setupplan.Build(setupplan.Input{ Kubeconfig: kubeconfig, Context: kubeContext, RegistryType: registryType, @@ -102,7 +106,7 @@ will use to push and pull container images.`, InstallCertManager: !skipCertManagerInstall, }) - return cli.SetupPlatform(mgr.logger, plan) + return SetupPlatform(mgr.logger, plan, mgr.clusterMgr) }, } diff --git a/internal/cli/setup_steps.go b/internal/cli/setup/steps.go similarity index 90% rename from internal/cli/setup_steps.go rename to internal/cli/setup/steps.go index ba9f19d..ff86390 100644 --- a/internal/cli/setup_steps.go +++ b/internal/cli/setup/steps.go @@ -1,4 +1,4 @@ -package cli +package setup // This file defines the setup step execution framework. // It provides a pipeline-based approach for running setup steps with dependency injection and testability. @@ -7,12 +7,16 @@ import ( "fmt" "go.uber.org/zap" + + "mcp-runtime/internal/cli/core" + "mcp-runtime/internal/cli/registry/config" + setupplan "mcp-runtime/internal/cli/setup/plan" ) // SetupContext carries state shared across setup steps. type SetupContext struct { - Plan SetupPlan - ExternalRegistry *ExternalRegistryConfig + Plan setupplan.Plan + ExternalRegistry *config.ExternalRegistryConfig UsingExternalRegistry bool RegistrySecretName string OperatorImage string @@ -146,7 +150,7 @@ type verifyStep struct{} func (s verifyStep) Name() string { return "verify" } func (s verifyStep) Run(logger *zap.Logger, deps SetupDeps, ctx *SetupContext) error { if err := verifySetup(logger, ctx.UsingExternalRegistry, deps); err != nil { - Error("Post-setup verification failed") + core.Error("Post-setup verification failed") return err } return nil @@ -168,14 +172,14 @@ func buildSetupSteps(ctx *SetupContext) []SetupStep { func runSetupSteps(logger *zap.Logger, deps SetupDeps, ctx *SetupContext, steps []SetupStep) error { for _, step := range steps { if err := step.Run(logger, deps, ctx); err != nil { - wrappedErr := wrapWithSentinelAndContext( - ErrSetupStepFailed, + wrappedErr := core.WrapWithSentinelAndContext( + core.ErrSetupStepFailed, err, fmt.Sprintf("setup step %q failed: %v", step.Name(), err), map[string]any{"step": step.Name(), "component": "setup"}, ) - Error("Setup step failed") - logStructuredError(logger, wrappedErr, "Setup step failed") + core.Error("Setup step failed") + core.LogStructuredError(logger, wrappedErr, "Setup step failed") return wrappedErr } } diff --git a/internal/cli/setup_steps_test.go b/internal/cli/setup/steps_test.go similarity index 88% rename from internal/cli/setup_steps_test.go rename to internal/cli/setup/steps_test.go index 177a964..e333e59 100644 --- a/internal/cli/setup_steps_test.go +++ b/internal/cli/setup/steps_test.go @@ -1,4 +1,4 @@ -package cli +package setup import ( "sync/atomic" @@ -6,6 +6,10 @@ import ( "time" "go.uber.org/zap" + + "mcp-runtime/internal/cli/cluster" + "mcp-runtime/internal/cli/registry/config" + setupplan "mcp-runtime/internal/cli/setup/plan" ) type fakeRegistryManagerForSteps struct { @@ -32,11 +36,11 @@ func (f *fakeClusterManagerForKubeconfig) InitCluster(kubeconfig, context string return nil } -func (f *fakeClusterManagerForKubeconfig) ConfigureCluster(ingressOptions) error { return nil } +func (f *fakeClusterManagerForKubeconfig) ConfigureCluster(cluster.IngressOptions) error { return nil } func TestBuildSetupStepsOrderWithTLS(t *testing.T) { ctx := &SetupContext{ - Plan: SetupPlan{ + Plan: setupplan.Plan{ TLSEnabled: true, }, } @@ -63,7 +67,7 @@ func TestBuildSetupStepsOrderWithTLS(t *testing.T) { func TestBuildSetupStepsOrderWithoutTLS(t *testing.T) { ctx := &SetupContext{ - Plan: SetupPlan{ + Plan: setupplan.Plan{ TLSEnabled: false, }, } @@ -89,7 +93,7 @@ func TestBuildSetupStepsOrderWithoutTLS(t *testing.T) { func TestBuildSetupStepsOrderWithAnalytics(t *testing.T) { ctx := &SetupContext{ - Plan: SetupPlan{ + Plan: setupplan.Plan{ DeployAnalytics: true, }, } @@ -117,17 +121,17 @@ func TestBuildSetupStepsOrderWithAnalytics(t *testing.T) { func TestOperatorImageStepSetsContext(t *testing.T) { ctx := &SetupContext{ - Plan: SetupPlan{}, - ExternalRegistry: &ExternalRegistryConfig{ + Plan: setupplan.Plan{}, + ExternalRegistry: &config.ExternalRegistryConfig{ URL: "registry.example.com", }, UsingExternalRegistry: true, } deps := SetupDeps{ - OperatorImageFor: func(_ *ExternalRegistryConfig) string { + OperatorImageFor: func(_ *config.ExternalRegistryConfig) string { return "registry.example.com/mcp-runtime-operator:latest" }, - GatewayProxyImageFor: func(_ *ExternalRegistryConfig) string { + GatewayProxyImageFor: func(_ *config.ExternalRegistryConfig) string { return "registry.example.com/mcp-sentinel-mcp-proxy:latest" }, BuildOperatorImage: func(string) error { return nil }, @@ -154,17 +158,21 @@ func TestOperatorImageStepTestModeBuildsAndPushesToRegistry(t *testing.T) { var pushCalls int32 var gatewayPushCalls int32 ctx := &SetupContext{ - Plan: SetupPlan{ + Plan: setupplan.Plan{ TestMode: true, }, - ExternalRegistry: &ExternalRegistryConfig{URL: "registry.example.com"}, + ExternalRegistry: &config.ExternalRegistryConfig{URL: "registry.example.com"}, UsingExternalRegistry: true, } deps := SetupDeps{ - OperatorImageFor: func(_ *ExternalRegistryConfig) string { return "registry.example.com/mcp-runtime-operator:latest" }, - GatewayProxyImageFor: func(_ *ExternalRegistryConfig) string { return "registry.example.com/mcp-sentinel-mcp-proxy:latest" }, - BuildOperatorImage: func(string) error { atomic.AddInt32(&buildCalls, 1); return nil }, - PushOperatorImage: func(string) error { atomic.AddInt32(&pushCalls, 1); return nil }, + OperatorImageFor: func(_ *config.ExternalRegistryConfig) string { + return "registry.example.com/mcp-runtime-operator:latest" + }, + GatewayProxyImageFor: func(_ *config.ExternalRegistryConfig) string { + return "registry.example.com/mcp-sentinel-mcp-proxy:latest" + }, + BuildOperatorImage: func(string) error { atomic.AddInt32(&buildCalls, 1); return nil }, + PushOperatorImage: func(string) error { atomic.AddInt32(&pushCalls, 1); return nil }, BuildGatewayProxyImage: func(string) error { atomic.AddInt32(&gatewayBuildCalls, 1) return nil @@ -198,7 +206,7 @@ func TestOperatorImageStepTestModeBuildsAndPushesToRegistry(t *testing.T) { func TestDeployOperatorStepCmdPassesOperatorArgs(t *testing.T) { ctx := &SetupContext{ - Plan: SetupPlan{ + Plan: setupplan.Plan{ OperatorArgs: []string{"--metrics-bind-address=:9090", "--leader-elect=false"}, }, OperatorImage: "registry.example.com/mcp-runtime-operator:latest", @@ -251,12 +259,12 @@ func TestClusterStepPassesKubeconfigAndContext(t *testing.T) { } ctx := &SetupContext{ - Plan: SetupPlan{ + Plan: setupplan.Plan{ Kubeconfig: "/etc/rancher/k3s/k3s.yaml", Context: "k3s", - Ingress: ingressOptions{ - mode: "traefik", - manifest: "config/ingress/overlays/http", + Ingress: cluster.IngressOptions{ + Mode: "traefik", + Manifest: "config/ingress/overlays/http", }, }, } @@ -278,7 +286,7 @@ func TestRegistryStepDeploysInternalRegistry(t *testing.T) { var waitCalls int32 fakeRegistry := &fakeRegistryManagerForSteps{} ctx := &SetupContext{ - Plan: SetupPlan{ + Plan: setupplan.Plan{ RegistryType: "docker", RegistryStorageSize: "1Gi", RegistryManifest: "config/registry", diff --git a/internal/cli/setup/tls_flags_test.go b/internal/cli/setup/tls_flags_test.go new file mode 100644 index 0000000..2b0e91c --- /dev/null +++ b/internal/cli/setup/tls_flags_test.go @@ -0,0 +1,45 @@ +package setup + +import ( + "errors" + "testing" + + "mcp-runtime/internal/cli/core" +) + +func TestValidateTLSSetupCLIFlags(t *testing.T) { + t.Parallel() + cases := []struct { + name string + tls bool + acme, tlsCI string + staging bool + skipCM bool + wantErr bool + wantIsField bool + }{ + {"ok disabled", false, "", "", false, false, false, false}, + {"ok with-tls acme", true, "a@b.com", "", false, false, false, false}, + {"mutual exclusivity", false, "a@b.com", "issuer", false, false, true, true}, + {"acme without with-tls", false, "a@b.com", "", false, false, true, true}, + {"tls-cluster-issuer without with-tls", false, "", "issuer", false, false, true, true}, + {"staging without with-tls", false, "", "", true, false, true, true}, + {"skip-cm without with-tls", false, "", "", false, true, true, true}, + {"with-tls staging no email", true, "", "", true, true, false, false}, + } + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + err := ValidateTLSSetupCLIFlags(tc.tls, tc.acme, tc.tlsCI, tc.staging, tc.skipCM) + if tc.wantErr { + if err == nil { + t.Fatal("expected error") + } + if tc.wantIsField && !errors.Is(err, core.ErrFieldRequired) { + t.Fatalf("expected ErrFieldRequired, got %v", err) + } + } else if err != nil { + t.Fatalf("unexpected: %v", err) + } + }) + } +} diff --git a/internal/cli/status.go b/internal/cli/status.go deleted file mode 100644 index 2ca98aa..0000000 --- a/internal/cli/status.go +++ /dev/null @@ -1,278 +0,0 @@ -package cli - -// This file implements the "status" command for checking platform health. -// It summarizes the core runtime plus the bundled analytics stack. - -import ( - "fmt" - "strconv" - "strings" - - "go.uber.org/zap" -) - -type platformWorkload struct { - Component string - Namespace string - Kind string - Name string -} - -var analyticsStatusWorkloads = []platformWorkload{ - {Component: "ClickHouse", Namespace: defaultAnalyticsNamespace, Kind: "statefulset", Name: "clickhouse"}, - {Component: "Zookeeper", Namespace: defaultAnalyticsNamespace, Kind: "deployment", Name: "zookeeper"}, - {Component: "Kafka", Namespace: defaultAnalyticsNamespace, Kind: "statefulset", Name: "kafka"}, - {Component: "Ingest", Namespace: defaultAnalyticsNamespace, Kind: "deployment", Name: "mcp-sentinel-ingest"}, - {Component: "Processor", Namespace: defaultAnalyticsNamespace, Kind: "deployment", Name: "mcp-sentinel-processor"}, - {Component: "API", Namespace: defaultAnalyticsNamespace, Kind: "deployment", Name: "mcp-sentinel-api"}, - {Component: "UI", Namespace: defaultAnalyticsNamespace, Kind: "deployment", Name: "mcp-sentinel-ui"}, - {Component: "Gateway", Namespace: defaultAnalyticsNamespace, Kind: "deployment", Name: "mcp-sentinel-gateway"}, - {Component: "Prometheus", Namespace: defaultAnalyticsNamespace, Kind: "deployment", Name: "prometheus"}, - {Component: "Grafana", Namespace: defaultAnalyticsNamespace, Kind: "deployment", Name: "grafana"}, - {Component: "OTel Collector", Namespace: defaultAnalyticsNamespace, Kind: "deployment", Name: "otel-collector"}, - {Component: "Tempo", Namespace: defaultAnalyticsNamespace, Kind: "statefulset", Name: "tempo"}, - {Component: "Loki", Namespace: defaultAnalyticsNamespace, Kind: "statefulset", Name: "loki"}, - {Component: "Promtail", Namespace: defaultAnalyticsNamespace, Kind: "daemonset", Name: "promtail"}, -} - -func ShowPlatformStatus(logger *zap.Logger) error { - Header("MCP Platform Status") - DefaultPrinter.Println() - - tableData := [][]string{ - {"Component", "Namespace", "Resource", "Status", "Details"}, - } - - clusterReachable := true - clusterStatus := Green("OK") - clusterDetails := "Connected" - if err := checkClusterStatusQuiet(); err != nil { - clusterReachable = false - clusterStatus = Red("ERROR") - clusterDetails = err.Error() - } - tableData = append(tableData, []string{"Cluster", "-", "kube-api", clusterStatus, clusterDetails}) - - extRegistry, err := resolveExternalRegistryConfig(nil) - switch { - case err != nil: - // resolveExternalRegistryConfig already returns (nil, nil) when no config exists, - // so any error here is a real load/parse/validation problem rather than a missing file. - Warn("Failed to load external registry config: " + err.Error()) - tableData = append(tableData, []string{"Registry", "-", "config", Red("ERROR"), err.Error()}) - case extRegistry != nil && extRegistry.URL != "": - tableData = append(tableData, []string{"Registry", "-", "external", Cyan("EXTERNAL"), "Configured: " + extRegistry.URL}) - default: - tableData = append(tableData, workloadStatusRow( - platformWorkload{Component: "Registry", Namespace: NamespaceRegistry, Kind: "deployment", Name: RegistryDeploymentName}, - clusterReachable, - )) - } - - tableData = append(tableData, workloadStatusRow( - platformWorkload{Component: "Operator", Namespace: NamespaceMCPRuntime, Kind: "deployment", Name: OperatorDeploymentName}, - clusterReachable, - )) - - switch installed, analyticsErr := analyticsNamespaceInstalled(clusterReachable); { - case !clusterReachable: - tableData = append(tableData, analyticsStackRow(Red("ERROR"), "Cluster unavailable")) - case analyticsErr != nil: - tableData = append(tableData, analyticsStackRow(Red("ERROR"), analyticsErr.Error())) - case !installed: - tableData = append(tableData, analyticsStackRow(Yellow("SKIPPED"), "Namespace not found")) - default: - for _, workload := range analyticsStatusWorkloads { - tableData = append(tableData, workloadStatusRow(workload, true)) - } - } - - TableBoxed(tableData) - - // MCP Servers section - DefaultPrinter.Println() - Section("MCP Servers") - - if !clusterReachable { - Warn("Skipping MCP server status: cluster unavailable") - DefaultPrinter.Println() - Info("Use 'mcp-runtime server list' for detailed server info") - return nil - } - - // #nosec G204 -- fixed kubectl command. - cmd, err := kubectlClient.CommandArgs([]string{"get", "mcpserver", "--all-namespaces", "-o", "custom-columns=NAMESPACE:.metadata.namespace,NAME:.metadata.name,IMAGE:.spec.image,REPLICAS:.spec.replicas,PATH:.spec.ingressPath"}) - if err != nil { - Warn("Failed to list MCP servers: " + err.Error()) - } else { - output, execErr := cmd.CombinedOutput() - if execErr != nil { - errDetails := strings.TrimSpace(string(output)) - if errDetails == "" { - errDetails = execErr.Error() - } - Warn("Failed to list MCP servers: " + errDetails) - } else if len(strings.TrimSpace(string(output))) == 0 { - Warn("No MCP servers deployed") - } else { - lines := strings.Split(strings.TrimSpace(string(output)), "\n") - if len(lines) <= 1 { - Warn("No MCP servers deployed") - } else { - serverData := [][]string{} - for _, line := range lines { - fields := strings.Fields(line) - serverData = append(serverData, fields) - } - Table(serverData) - } - } - } - - // Quick tips - DefaultPrinter.Println() - Info("Use 'mcp-runtime server list' for detailed server info") - - return nil -} - -func checkClusterStatusQuiet() error { - output, err := runKubectlCombinedOutput([]string{"cluster-info"}) - if err == nil { - return nil - } - detail := commandErrorDetail(output, err) - if hint, handled := clusterSetupHint(detail); handled { - return wrapWithSentinel(ErrClusterNotAccessible, err, hint) - } - return wrapWithSentinel(ErrClusterNotAccessible, err, fmt.Sprintf("cluster not accessible: %s", detail)) -} - -// clusterSetupHint returns a friendlier message when the cluster has not been -// provisioned yet (missing kubectl, kubeconfig, or API not reachable). -func clusterSetupHint(detail string) (string, bool) { - lower := strings.ToLower(detail) - - switch { - case strings.Contains(lower, "executable file not found"), - strings.Contains(lower, "kubectl: not found"): - return "kubectl is missing. Install kubectl and re-run the command.", true - case strings.Contains(lower, "kubeconfig"), - strings.Contains(lower, "no configuration has been provided"): - return "kubeconfig is missing or not readable. Either copy your cluster kubeconfig to ~/.kube/config, or re-run with `./bin/mcp-runtime setup --kubeconfig /etc/rancher/k3s/k3s.yaml` (for k3s) and optionally `--context `.", true - case strings.Contains(lower, "connection refused"), - strings.Contains(lower, "unable to connect to the server"), - strings.Contains(lower, "context deadline exceeded"), - strings.Contains(lower, "the connection to the server"): - return "no Kubernetes API reachable. Verify your kubeconfig/context (or pass `--kubeconfig`/`--context` to setup) and ensure the cluster control plane is reachable.", true - default: - return "", false - } -} - -func analyticsNamespaceInstalled(clusterReachable bool) (bool, error) { - if !clusterReachable { - return false, nil - } - - output, err := runKubectlCombinedOutput([]string{"get", "namespace", defaultAnalyticsNamespace, "-o", "jsonpath={.metadata.name}"}) - if err == nil { - return strings.TrimSpace(output) == defaultAnalyticsNamespace, nil - } - if strings.TrimSpace(output) == "" { - return false, fmt.Errorf("empty output from namespace probe") - } - - lower := strings.ToLower(output) - if strings.Contains(lower, "not found") || strings.Contains(lower, "notfound") { - return false, nil - } - - return false, fmt.Errorf("%s", commandErrorDetail(output, err)) -} - -func analyticsStackRow(status, details string) []string { - return []string{"Analytics Stack", defaultAnalyticsNamespace, "namespace/" + defaultAnalyticsNamespace, status, details} -} - -func workloadStatusRow(workload platformWorkload, clusterReachable bool) []string { - resource := fmt.Sprintf("%s/%s", workload.Kind, workload.Name) - if !clusterReachable { - return []string{workload.Component, workload.Namespace, resource, Red("ERROR"), "Cluster unavailable"} - } - - status, details := workloadReadinessStatus(workload) - return []string{workload.Component, workload.Namespace, resource, status, details} -} - -func workloadReadinessStatus(workload platformWorkload) (string, string) { - jsonPath, err := workloadReadyJSONPath(workload.Kind) - if err != nil { - return Red("ERROR"), err.Error() - } - - output, cmdErr := runKubectlCombinedOutput([]string{ - "get", workload.Kind, workload.Name, - "-n", workload.Namespace, - "-o", "jsonpath=" + jsonPath, - }) - if cmdErr != nil { - return Red("ERROR"), commandErrorDetail(output, cmdErr) - } - - if workloadReady(output) { - return Green("OK"), "Ready: " + output - } - return Yellow("PENDING"), "Ready: " + output -} - -func workloadReadyJSONPath(kind string) (string, error) { - switch strings.ToLower(kind) { - case "deployment", "statefulset": - return "{.status.readyReplicas}/{.spec.replicas}", nil - case "daemonset": - return "{.status.numberReady}/{.status.desiredNumberScheduled}", nil - default: - return "", fmt.Errorf("unsupported workload kind %q", kind) - } -} - -func workloadReady(value string) bool { - parts := strings.Split(strings.TrimSpace(value), "/") - if len(parts) != 2 { - return false - } - - ready, err := strconv.Atoi(strings.TrimSpace(parts[0])) - if err != nil { - return false - } - desired, err := strconv.Atoi(strings.TrimSpace(parts[1])) - if err != nil { - return false - } - return desired > 0 && ready >= desired -} - -func runKubectlCombinedOutput(args []string) (string, error) { - cmd, err := kubectlClient.CommandArgs(args) - if err != nil { - return "", err - } - output, execErr := cmd.CombinedOutput() - return strings.TrimSpace(string(output)), execErr -} - -func commandErrorDetail(output string, fallback error) string { - lines := strings.Split(strings.TrimSpace(output), "\n") - for i := len(lines) - 1; i >= 0; i-- { - line := strings.TrimSpace(lines[i]) - if line != "" { - return line - } - } - if fallback != nil { - return fallback.Error() - } - return "Unknown error" -} diff --git a/internal/cli/status_test.go b/internal/cli/status/platform_status_test.go similarity index 65% rename from internal/cli/status_test.go rename to internal/cli/status/platform_status_test.go index 09b9070..506d5f6 100644 --- a/internal/cli/status_test.go +++ b/internal/cli/status/platform_status_test.go @@ -1,4 +1,4 @@ -package cli +package status_test import ( "bytes" @@ -10,6 +10,10 @@ import ( "github.com/pterm/pterm" "go.uber.org/zap" + + "mcp-runtime/internal/cli/core" + "mcp-runtime/internal/cli/platformstatus" + "mcp-runtime/internal/cli/status" ) type commandResponse struct { @@ -92,6 +96,53 @@ func TestHelperProcess(t *testing.T) { os.Exit(0) } +func resetStatusTestConfig(t *testing.T) { + t.Helper() + orig := core.DefaultCLIConfig + core.DefaultCLIConfig = &core.CLIConfig{} + t.Cleanup(func() { + core.DefaultCLIConfig = orig + }) + t.Setenv("HOME", t.TempDir()) +} + +func setDefaultPrinterWriter(t *testing.T, w *bytes.Buffer) { + t.Helper() + orig := core.DefaultPrinter.Writer + core.DefaultPrinter.Writer = w + t.Cleanup(func() { + core.DefaultPrinter.Writer = orig + }) +} + +func runShowPlatformStatus(t *testing.T, responses map[string]commandResponse) string { + t.Helper() + return runShowPlatformStatusWithCalls(t, responses, nil) +} + +func runShowPlatformStatusWithCalls(t *testing.T, responses map[string]commandResponse, calls *[]string) string { + t.Helper() + + logger := zap.NewNop() + restoreExec := core.SwapExecCommand(fakeExecCommand(t, exec.Command, responses, calls)) + t.Cleanup(restoreExec) + + var buf bytes.Buffer + pterm.SetDefaultOutput(&buf) + pterm.DisableStyling() + setDefaultPrinterWriter(t, &buf) + t.Cleanup(func() { + pterm.SetDefaultOutput(os.Stdout) + pterm.EnableStyling() + }) + + if err := status.ShowPlatformStatus(logger); err != nil { + t.Fatalf("ShowPlatformStatus() unexpected error = %v", err) + } + + return buf.String() +} + func TestShowPlatformStatus(t *testing.T) { t.Run("marks-operator-pending-when-replicas-start-with-zero", func(t *testing.T) { resetStatusTestConfig(t) @@ -145,7 +196,7 @@ func TestShowPlatformStatus(t *testing.T) { t.Run("surfaces external registry config errors instead of falling back to in-cluster registry", func(t *testing.T) { resetStatusTestConfig(t) - DefaultCLIConfig = &CLIConfig{ProvisionedRegistryUsername: "user-only"} + core.DefaultCLIConfig = &core.CLIConfig{ProvisionedRegistryUsername: "user-only"} var calls []string responses := map[string]commandResponse{ @@ -260,13 +311,12 @@ func TestAnalyticsNamespaceInstalledRequiresExactMatch(t *testing.T) { }, } - origExec := execCommand - execCommand = fakeExecCommand(t, origExec, responses, nil) - t.Cleanup(func() { execCommand = origExec }) + restore := core.SwapExecCommand(fakeExecCommand(t, exec.Command, responses, nil)) + t.Cleanup(restore) - installed, err := analyticsNamespaceInstalled(true) + installed, err := platformstatus.AnalyticsNamespaceInstalled(true) if err != nil { - t.Fatalf("analyticsNamespaceInstalled() unexpected error = %v", err) + t.Fatalf("AnalyticsNamespaceInstalled() unexpected error = %v", err) } if installed { t.Fatal("expected namespace check to fail on mismatched namespace name") @@ -282,11 +332,10 @@ func TestAnalyticsNamespaceInstalledReturnsErrorOnEmptyFailure(t *testing.T) { }, } - origExec := execCommand - execCommand = fakeExecCommand(t, origExec, responses, nil) - t.Cleanup(func() { execCommand = origExec }) + restore := core.SwapExecCommand(fakeExecCommand(t, exec.Command, responses, nil)) + t.Cleanup(restore) - installed, err := analyticsNamespaceInstalled(true) + installed, err := platformstatus.AnalyticsNamespaceInstalled(true) if err == nil { t.Fatal("expected empty namespace probe failure to surface an error") } @@ -297,191 +346,3 @@ func TestAnalyticsNamespaceInstalledReturnsErrorOnEmptyFailure(t *testing.T) { t.Fatalf("expected empty-output error, got %v", err) } } - -func TestServerStatus(t *testing.T) { - t.Run("returns-error-and-logs-combined-output-on-mcpserver-list-failure", func(t *testing.T) { - logger := zap.NewNop() - namespace := "mcp-servers" - responses := map[string]commandResponse{ - commandKey("kubectl", "get", "mcpserver", "-n", namespace, "-o", "jsonpath={range .items[*]}{.metadata.name}|{.spec.image}:{.spec.imageTag}|{.spec.replicas}|{.spec.ingressPath}|{.spec.useProvisionedRegistry}{\"\\n\"}{end}"): { - Stdout: "boom-out\n", - Stderr: "boom-err\n", - ExitCode: 1, - }, - } - - origExec := execCommand - execCommand = fakeExecCommand(t, origExec, responses, nil) - t.Cleanup(func() { execCommand = origExec }) - - var buf bytes.Buffer - pterm.SetDefaultOutput(&buf) - pterm.DisableStyling() - setDefaultPrinterWriter(t, &buf) - t.Cleanup(func() { - pterm.SetDefaultOutput(os.Stdout) - pterm.EnableStyling() - }) - - mgr := DefaultServerManager(logger) - err := mgr.ServerStatus(namespace) - if err == nil { - t.Fatal("expected error from serverStatus, got nil") - } - - output := buf.String() - if !strings.Contains(output, "boom-out") || !strings.Contains(output, "boom-err") { - t.Fatalf("expected combined output to be logged, got output: %s", output) - } - }) - - t.Run("prints warning when no servers found", func(t *testing.T) { - logger := zap.NewNop() - namespace := "mcp-servers" - responses := map[string]commandResponse{ - commandKey("kubectl", "get", "mcpserver", "-n", namespace, "-o", "jsonpath={range .items[*]}{.metadata.name}|{.spec.image}:{.spec.imageTag}|{.spec.replicas}|{.spec.ingressPath}|{.spec.useProvisionedRegistry}{\"\\n\"}{end}"): {}, - } - - origExec := execCommand - execCommand = fakeExecCommand(t, origExec, responses, nil) - t.Cleanup(func() { execCommand = origExec }) - - var buf bytes.Buffer - pterm.SetDefaultOutput(&buf) - pterm.DisableStyling() - setDefaultPrinterWriter(t, &buf) - t.Cleanup(func() { - pterm.SetDefaultOutput(os.Stdout) - pterm.EnableStyling() - }) - - mgr := DefaultServerManager(logger) - if err := mgr.ServerStatus(namespace); err != nil { - t.Fatalf("serverStatus() unexpected error = %v", err) - } - - output := buf.String() - if !strings.Contains(output, "No MCP servers found in namespace "+namespace) { - t.Fatalf("expected no servers warning, got output: %s", output) - } - }) - - t.Run("uses-managed-by-label-when-listing-pods", func(t *testing.T) { - logger := zap.NewNop() - namespace := "mcp-servers" - var calls []string - - responses := map[string]commandResponse{ - commandKey("kubectl", "get", "mcpserver", "-n", namespace, "-o", "jsonpath={range .items[*]}{.metadata.name}|{.spec.image}:{.spec.imageTag}|{.spec.replicas}|{.spec.ingressPath}|{.spec.useProvisionedRegistry}{\"\\n\"}{end}"): { - Stdout: "server1|image:tag|1|/server|false\n", - }, - commandKey("kubectl", "get", "pods", "-n", namespace, "-l", "app.kubernetes.io/managed-by=mcp-runtime", "-o", "custom-columns=NAME:.metadata.name,READY:.status.containerStatuses[0].ready,STATUS:.status.phase,RESTARTS:.status.containerStatuses[0].restartCount"): { - Stdout: "NAME READY STATUS RESTARTS\npod-1 true Running 0\n", - }, - } - - origExec := execCommand - execCommand = fakeExecCommand(t, origExec, responses, &calls) - t.Cleanup(func() { execCommand = origExec }) - - mgr2 := DefaultServerManager(logger) - if err := mgr2.ServerStatus(namespace); err != nil { - t.Fatalf("serverStatus() unexpected error = %v", err) - } - - found := false - for _, call := range calls { - if strings.Contains(call, "get pods") && strings.Contains(call, "app.kubernetes.io/managed-by=mcp-runtime") { - found = true - break - } - } - if !found { - t.Fatalf("expected managed-by label selector, got calls: %v", calls) - } - }) - - t.Run("prints no pods found when only header returned", func(t *testing.T) { - logger := zap.NewNop() - namespace := "mcp-servers" - responses := map[string]commandResponse{ - commandKey("kubectl", "get", "mcpserver", "-n", namespace, "-o", "jsonpath={range .items[*]}{.metadata.name}|{.spec.image}:{.spec.imageTag}|{.spec.replicas}|{.spec.ingressPath}|{.spec.useProvisionedRegistry}{\"\\n\"}{end}"): { - Stdout: "server1|image:tag|1|/server|false\n", - }, - commandKey("kubectl", "get", "pods", "-n", namespace, "-l", "app.kubernetes.io/managed-by=mcp-runtime", "-o", "custom-columns=NAME:.metadata.name,READY:.status.containerStatuses[0].ready,STATUS:.status.phase,RESTARTS:.status.containerStatuses[0].restartCount"): { - Stdout: "NAME READY STATUS RESTARTS\n", - }, - } - - origExec := execCommand - execCommand = fakeExecCommand(t, origExec, responses, nil) - t.Cleanup(func() { execCommand = origExec }) - - var buf bytes.Buffer - pterm.SetDefaultOutput(&buf) - pterm.DisableStyling() - setDefaultPrinterWriter(t, &buf) - t.Cleanup(func() { - pterm.SetDefaultOutput(os.Stdout) - pterm.EnableStyling() - }) - - mgr := DefaultServerManager(logger) - if err := mgr.ServerStatus(namespace); err != nil { - t.Fatalf("serverStatus() unexpected error = %v", err) - } - - output := buf.String() - if !strings.Contains(output, "No pods found") { - t.Fatalf("expected no pods message, got output: %s", output) - } - }) -} - -func setDefaultPrinterWriter(t *testing.T, w *bytes.Buffer) { - t.Helper() - orig := DefaultPrinter.Writer - DefaultPrinter.Writer = w - t.Cleanup(func() { - DefaultPrinter.Writer = orig - }) -} - -func resetStatusTestConfig(t *testing.T) { - t.Helper() - orig := DefaultCLIConfig - DefaultCLIConfig = &CLIConfig{} - t.Cleanup(func() { - DefaultCLIConfig = orig - }) - t.Setenv("HOME", t.TempDir()) -} - -func runShowPlatformStatus(t *testing.T, responses map[string]commandResponse) string { - t.Helper() - return runShowPlatformStatusWithCalls(t, responses, nil) -} - -func runShowPlatformStatusWithCalls(t *testing.T, responses map[string]commandResponse, calls *[]string) string { - t.Helper() - - logger := zap.NewNop() - origExec := execCommand - execCommand = fakeExecCommand(t, origExec, responses, calls) - t.Cleanup(func() { execCommand = origExec }) - - var buf bytes.Buffer - pterm.SetDefaultOutput(&buf) - pterm.DisableStyling() - setDefaultPrinterWriter(t, &buf) - t.Cleanup(func() { - pterm.SetDefaultOutput(os.Stdout) - pterm.EnableStyling() - }) - - if err := ShowPlatformStatus(logger); err != nil { - t.Fatalf("ShowPlatformStatus() unexpected error = %v", err) - } - - return buf.String() -} diff --git a/internal/cli/status/status.go b/internal/cli/status/status.go index c80ca07..679e72b 100644 --- a/internal/cli/status/status.go +++ b/internal/cli/status/status.go @@ -1,30 +1,135 @@ -// Package status owns routing for the status top-level command. +// Package status owns the status top-level command and platform status output. package status import ( + "strings" + "github.com/spf13/cobra" "go.uber.org/zap" - "mcp-runtime/internal/cli" + "mcp-runtime/internal/cli/core" + "mcp-runtime/internal/cli/platformstatus" + "mcp-runtime/internal/cli/registry/config" ) type manager struct { logger *zap.Logger } -func newManager(runtime *cli.Runtime) *manager { +func newManager(runtime *core.Runtime) *manager { return &manager{logger: runtime.Logger()} } // New returns the status command. -func New(runtime *cli.Runtime) *cobra.Command { +func New(runtime *core.Runtime) *cobra.Command { mgr := newManager(runtime) return &cobra.Command{ Use: "status", Short: "Show platform status", Long: "Show the overall status of the MCP platform", RunE: func(cmd *cobra.Command, args []string) error { - return cli.ShowPlatformStatus(mgr.logger) + return ShowPlatformStatus(mgr.logger) }, } } + +// ShowPlatformStatus prints the MCP platform status table and MCP server list. +func ShowPlatformStatus(logger *zap.Logger) error { + core.Header("MCP Platform Status") + core.DefaultPrinter.Println() + + tableData := [][]string{ + {"Component", "Namespace", "Resource", "Status", "Details"}, + } + + clusterReachable := true + clusterStatus := core.Green("OK") + clusterDetails := "Connected" + if err := platformstatus.CheckClusterStatusQuiet(); err != nil { + clusterReachable = false + clusterStatus = core.Red("ERROR") + clusterDetails = err.Error() + } + tableData = append(tableData, []string{"Cluster", "-", "kube-api", clusterStatus, clusterDetails}) + + extRegistry, err := config.Resolve(nil, config.Env{ + URL: core.DefaultCLIConfig.ProvisionedRegistryURL, + Username: core.DefaultCLIConfig.ProvisionedRegistryUsername, + Password: core.DefaultCLIConfig.ProvisionedRegistryPassword, + }) + switch { + case err != nil: + core.Warn("Failed to load external registry config: " + err.Error()) + tableData = append(tableData, []string{"Registry", "-", "config", core.Red("ERROR"), err.Error()}) + case extRegistry != nil && extRegistry.URL != "": + tableData = append(tableData, []string{"Registry", "-", "external", core.Cyan("EXTERNAL"), "Configured: " + extRegistry.URL}) + default: + tableData = append(tableData, platformstatus.WorkloadStatusRow( + platformstatus.PlatformWorkload{Component: "Registry", Namespace: core.NamespaceRegistry, Kind: "deployment", Name: core.RegistryDeploymentName}, + clusterReachable, + )) + } + + tableData = append(tableData, platformstatus.WorkloadStatusRow( + platformstatus.PlatformWorkload{Component: "Operator", Namespace: core.NamespaceMCPRuntime, Kind: "deployment", Name: core.OperatorDeploymentName}, + clusterReachable, + )) + + switch installed, analyticsErr := platformstatus.AnalyticsNamespaceInstalled(clusterReachable); { + case !clusterReachable: + tableData = append(tableData, platformstatus.AnalyticsStackRow(core.Red("ERROR"), "Cluster unavailable")) + case analyticsErr != nil: + tableData = append(tableData, platformstatus.AnalyticsStackRow(core.Red("ERROR"), analyticsErr.Error())) + case !installed: + tableData = append(tableData, platformstatus.AnalyticsStackRow(core.Yellow("SKIPPED"), "Namespace not found")) + default: + for _, workload := range platformstatus.DefaultPlatformStatusWorkloads { + tableData = append(tableData, platformstatus.WorkloadStatusRow(workload, true)) + } + } + + core.TableBoxed(tableData) + + core.DefaultPrinter.Println() + core.Section("MCP Servers") + + if !clusterReachable { + core.Warn("Skipping MCP server status: cluster unavailable") + core.DefaultPrinter.Println() + core.Info("Use 'mcp-runtime server list' for detailed server info") + return nil + } + + cmd, err := core.DefaultKubectlClient().CommandArgs([]string{"get", "mcpserver", "--all-namespaces", "-o", "custom-columns=NAMESPACE:.metadata.namespace,NAME:.metadata.name,IMAGE:.spec.image,REPLICAS:.spec.replicas,PATH:.spec.ingressPath"}) + if err != nil { + core.Warn("Failed to list MCP servers: " + err.Error()) + } else { + output, execErr := cmd.CombinedOutput() + if execErr != nil { + errDetails := strings.TrimSpace(string(output)) + if errDetails == "" { + errDetails = execErr.Error() + } + core.Warn("Failed to list MCP servers: " + errDetails) + } else if len(strings.TrimSpace(string(output))) == 0 { + core.Warn("No MCP servers deployed") + } else { + lines := strings.Split(strings.TrimSpace(string(output)), "\n") + if len(lines) <= 1 { + core.Warn("No MCP servers deployed") + } else { + serverData := [][]string{} + for _, line := range lines { + fields := strings.Fields(line) + serverData = append(serverData, fields) + } + core.Table(serverData) + } + } + } + + core.DefaultPrinter.Println() + core.Info("Use 'mcp-runtime server list' for detailed server info") + + return nil +} From 94ade1077f1d033d7a5bd02db60d2af7553f2cb6 Mon Sep 17 00:00:00 2001 From: Prince Roshan Date: Sat, 2 May 2026 01:56:27 +0530 Subject: [PATCH 2/3] fix(ci): resolve PR check failures --- .github/workflows/ci.yaml | 3 ++- internal/cli/cluster/doctor_impl.go | 2 +- .../cli/testdata/mcp-runtime_cluster_cert_apply_help.golden | 3 ++- .../cli/testdata/mcp-runtime_cluster_provision_help.golden | 1 + .../cli/testdata/mcp-runtime_registry_provision_help.golden | 1 + test/golden/cli/testdata/mcp-runtime_server_logs_help.golden | 3 +++ 6 files changed, 10 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 235dab6..3ff27ca 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -89,8 +89,9 @@ jobs: - name: Run unit tests with coverage run: | + module_path="$(go list -m)" go test -v -race -coverprofile=unit.out \ - $(go list ./... | grep -v '^github.com/Agent-Hellboy/mcp-runtime/test') + $(go list ./... | grep -v -x "${module_path}/test/integration") - name: Test Sentinel service modules (api, ui) run: | diff --git a/internal/cli/cluster/doctor_impl.go b/internal/cli/cluster/doctor_impl.go index 6686f18..ef33de5 100644 --- a/internal/cli/cluster/doctor_impl.go +++ b/internal/cli/cluster/doctor_impl.go @@ -1841,7 +1841,7 @@ func resolveDoctorSmokeTarget(kubectl core.KubectlRunner, preferredNamespace str } port := int32(8088) if len(parts) == 4 { - if parsed, parseErr := strconv.Atoi(strings.TrimSpace(parts[3])); parseErr == nil && parsed > 0 { + if parsed, parseErr := strconv.ParseInt(strings.TrimSpace(parts[3]), 10, 32); parseErr == nil && parsed > 0 && parsed <= 65535 { port = int32(parsed) } } diff --git a/test/golden/cli/testdata/mcp-runtime_cluster_cert_apply_help.golden b/test/golden/cli/testdata/mcp-runtime_cluster_cert_apply_help.golden index 45b3ca6..70ad3c9 100644 --- a/test/golden/cli/testdata/mcp-runtime_cluster_cert_apply_help.golden +++ b/test/golden/cli/testdata/mcp-runtime_cluster_cert_apply_help.golden @@ -4,7 +4,8 @@ Usage: mcp-runtime cluster cert apply [flags] Flags: - -h, --help help for apply + --dry-run Run preflight checks and print the resources that would be applied without modifying the cluster + -h, --help help for apply Global Flags: --debug Enable debug mode with structured error logging diff --git a/test/golden/cli/testdata/mcp-runtime_cluster_provision_help.golden b/test/golden/cli/testdata/mcp-runtime_cluster_provision_help.golden index 3c4ad2a..22ce852 100644 --- a/test/golden/cli/testdata/mcp-runtime_cluster_provision_help.golden +++ b/test/golden/cli/testdata/mcp-runtime_cluster_provision_help.golden @@ -4,6 +4,7 @@ Usage: mcp-runtime cluster provision [flags] Flags: + --dry-run Print the cluster config and command that would run without creating any cluster -h, --help help for provision --name string Cluster name (used by supported providers) (default "mcp-runtime") --nodes int Number of nodes (default 3) diff --git a/test/golden/cli/testdata/mcp-runtime_registry_provision_help.golden b/test/golden/cli/testdata/mcp-runtime_registry_provision_help.golden index 9fc3f5d..2ab8069 100644 --- a/test/golden/cli/testdata/mcp-runtime_registry_provision_help.golden +++ b/test/golden/cli/testdata/mcp-runtime_registry_provision_help.golden @@ -4,6 +4,7 @@ Usage: mcp-runtime registry provision [flags] Flags: + --dry-run Print what would be done without saving config, logging in, or pushing images -h, --help help for provision --operator-image string Optional: build and push operator image to this external registry (e.g., /mcp-runtime-operator:latest) --password string Registry password (optional) diff --git a/test/golden/cli/testdata/mcp-runtime_server_logs_help.golden b/test/golden/cli/testdata/mcp-runtime_server_logs_help.golden index 7306977..eec033a 100644 --- a/test/golden/cli/testdata/mcp-runtime_server_logs_help.golden +++ b/test/golden/cli/testdata/mcp-runtime_server_logs_help.golden @@ -7,6 +7,9 @@ Flags: --follow Follow log output -h, --help help for logs --namespace string Namespace (default "mcp-servers") + --previous Show logs from the previous container instance + --since string Only return logs newer than a relative duration like 5m or 1h + --tail int Number of recent log lines to show (-1 for all) (default 200) Global Flags: --debug Enable debug mode with structured error logging From b9cd29ca2c9327f4f1ed2745429ceac7c2e23dc7 Mon Sep 17 00:00:00 2001 From: Prince Roshan Date: Sat, 2 May 2026 02:21:38 +0530 Subject: [PATCH 3/3] fix(cli): address PR review feedback --- cmd/mcp-runtime/main.go | 30 +++- cmd/mcp-runtime/main_test.go | 54 ++++++ docs/internals/go-package-reference.md | 27 +-- internal/cli/cluster/manager.go | 2 +- internal/cli/core/exec.go | 12 +- internal/cli/core/exec_test.go | 7 +- internal/cli/kube/file.go | 5 +- internal/cli/platformstatus/kubectl.go | 8 +- internal/cli/platformstatus/workloads.go | 12 +- internal/cli/sentinel/manager.go | 6 +- internal/cli/status/platform_status_test.go | 47 +++++- internal/cli/status/status.go | 22 ++- test/e2e/kind.sh | 173 +++++++++++++++++--- 13 files changed, 317 insertions(+), 88 deletions(-) diff --git a/cmd/mcp-runtime/main.go b/cmd/mcp-runtime/main.go index c9c41da..f2daf78 100644 --- a/cmd/mcp-runtime/main.go +++ b/cmd/mcp-runtime/main.go @@ -44,10 +44,11 @@ var rootCmd = &cobra.Command{ - MCP server deployments - Platform configuration`, Version: fmt.Sprintf("%s (commit: %s, built: %s)", version, commit, date), - // Runtime errors should not trigger Cobra's usage/help dump; flag/arg - // validation errors still do (those happen before RunE). main() prints - // the error itself, so silence Cobra's own error print to avoid duplicates. - SilenceUsage: true, + // Keep usage enabled during Cobra validation; command wrappers disable it + // after validation passes so runtime errors do not dump command help. + SilenceUsage: false, + // main() prints the error itself, so silence Cobra's own error print to + // avoid duplicates. SilenceErrors: true, PersistentPreRun: func(cmd *cobra.Command, args []string) { // Set debug mode globally so logStructuredError can check it @@ -61,6 +62,27 @@ func init() { func initCommands(logger *zap.Logger) { cliroot.AddCommands(rootCmd, logger) + silenceUsageAfterValidation(rootCmd) +} + +func silenceUsageAfterValidation(cmd *cobra.Command) { + if cmd.RunE != nil { + runE := cmd.RunE + cmd.RunE = func(cmd *cobra.Command, args []string) error { + cmd.SilenceUsage = true + return runE(cmd, args) + } + } + if cmd.Run != nil { + run := cmd.Run + cmd.Run = func(cmd *cobra.Command, args []string) { + cmd.SilenceUsage = true + run(cmd, args) + } + } + for _, child := range cmd.Commands() { + silenceUsageAfterValidation(child) + } } // newConsoleLogger returns a human-friendly console logger with timestamps and caller info. diff --git a/cmd/mcp-runtime/main_test.go b/cmd/mcp-runtime/main_test.go index 3018ca8..5e285b0 100644 --- a/cmd/mcp-runtime/main_test.go +++ b/cmd/mcp-runtime/main_test.go @@ -2,8 +2,11 @@ package main import ( "bytes" + "errors" "strings" "testing" + + "github.com/spf13/cobra" ) func TestRootCommandHelp(t *testing.T) { @@ -28,3 +31,54 @@ func TestRootCommandHelp(t *testing.T) { t.Fatalf("help output missing expected text") } } + +func TestSilenceUsageAfterValidation(t *testing.T) { + t.Run("keeps usage for validation errors", func(t *testing.T) { + root := &cobra.Command{Use: "root", SilenceErrors: true} + cmd := &cobra.Command{ + Use: "needs-arg [name]", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return nil + }, + } + root.AddCommand(cmd) + silenceUsageAfterValidation(root) + + var out bytes.Buffer + root.SetOut(&out) + root.SetErr(&out) + root.SetArgs([]string{"needs-arg"}) + + if err := root.Execute(); err == nil { + t.Fatal("expected validation error") + } + if !strings.Contains(out.String(), "Usage:") { + t.Fatalf("expected usage for validation error, got: %q", out.String()) + } + }) + + t.Run("hides usage for runtime errors", func(t *testing.T) { + root := &cobra.Command{Use: "root", SilenceErrors: true} + cmd := &cobra.Command{ + Use: "runtime", + RunE: func(cmd *cobra.Command, args []string) error { + return errors.New("runtime failed") + }, + } + root.AddCommand(cmd) + silenceUsageAfterValidation(root) + + var out bytes.Buffer + root.SetOut(&out) + root.SetErr(&out) + root.SetArgs([]string{"runtime"}) + + if err := root.Execute(); err == nil { + t.Fatal("expected runtime error") + } + if strings.Contains(out.String(), "Usage:") { + t.Fatalf("did not expect usage for runtime error, got: %q", out.String()) + } + }) +} diff --git a/docs/internals/go-package-reference.md b/docs/internals/go-package-reference.md index 16bbf10..0a8b86b 100644 --- a/docs/internals/go-package-reference.md +++ b/docs/internals/go-package-reference.md @@ -2118,7 +2118,6 @@ kubectl clients, terminal output, and test doubles. - [`func Step(title string)`](#cli-core-func-step-title-string) - [`func Success(msg string)`](#cli-core-func-success-msg-string) - [`func SwapDefaultKubectlClient(c *KubectlClient) (restore func())`](#cli-core-func-swapdefaultkubectlclient-c-kubectlclient-restore-func) -- [`func SwapExecCommand(f func(string, ...string) *exec.Cmd) (restore func())`](#cli-core-func-swapexeccommand-f-func-string-string-exec-cmd-restore-func) - [`func SwapExecExecutor(e Executor) (restore func())`](#cli-core-func-swapexecexecutor-e-executor-restore-func) - [`func Table(data [][]string)`](#cli-core-func-table-data-string) - [`func TableBoxed(data [][]string)`](#cli-core-func-tableboxed-data-string) @@ -2654,14 +2653,6 @@ func SwapDefaultKubectlClient(c *KubectlClient) (restore func()) ``` - -```text -func SwapExecCommand(f func(string, ...string) *exec.Cmd) (restore func()) - SwapExecCommand replaces the exec.Command seam used by the default executor - (tests only). - -``` - ```text func SwapExecExecutor(e Executor) (restore func()) @@ -4080,10 +4071,10 @@ _No package overview is documented._ ### Index - [`Variables`](#cli-platform-status-variables) -- [`func AnalyticsNamespaceInstalled(clusterReachable bool) (bool, error)`](#cli-platform-status-func-analyticsnamespaceinstalled-clusterreachable-bool-bool-error) +- [`func AnalyticsNamespaceInstalled(kubectl core.KubectlRunner, clusterReachable bool) (bool, error)`](#cli-platform-status-func-analyticsnamespaceinstalled-kubectl-core-kubectlrunner-clusterreachable-bool-bool-error) - [`func AnalyticsStackRow(status, details string) []string`](#cli-platform-status-func-analyticsstackrow-status-details-string-string) -- [`func CheckClusterStatusQuiet() error`](#cli-platform-status-func-checkclusterstatusquiet-error) -- [`func WorkloadStatusRow(workload PlatformWorkload, clusterReachable bool) []string`](#cli-platform-status-func-workloadstatusrow-workload-platformworkload-clusterreachable-bool-string) +- [`func CheckClusterStatusQuiet(kubectl core.KubectlRunner) error`](#cli-platform-status-func-checkclusterstatusquiet-kubectl-core-kubectlrunner-error) +- [`func WorkloadStatusRow(kubectl core.KubectlRunner, workload PlatformWorkload, clusterReachable bool) []string`](#cli-platform-status-func-workloadstatusrow-kubectl-core-kubectlrunner-workload-platformworkload-clusterreachable-bool-string) - [`type PlatformWorkload struct`](#cli-platform-status-type-platformworkload-struct) @@ -4113,9 +4104,9 @@ var DefaultPlatformStatusWorkloads = []PlatformWorkload{ ### Functions - + ```text -func AnalyticsNamespaceInstalled(clusterReachable bool) (bool, error) +func AnalyticsNamespaceInstalled(kubectl core.KubectlRunner, clusterReachable bool) (bool, error) AnalyticsNamespaceInstalled reports whether the analytics namespace exists. ``` @@ -4128,16 +4119,16 @@ func AnalyticsStackRow(status, details string) []string ``` - + ```text -func CheckClusterStatusQuiet() error +func CheckClusterStatusQuiet(kubectl core.KubectlRunner) error CheckClusterStatusQuiet probes cluster connectivity without printing status. ``` - + ```text -func WorkloadStatusRow(workload PlatformWorkload, clusterReachable bool) []string +func WorkloadStatusRow(kubectl core.KubectlRunner, workload PlatformWorkload, clusterReachable bool) []string WorkloadStatusRow renders one workload row for platform status tables. ``` diff --git a/internal/cli/cluster/manager.go b/internal/cli/cluster/manager.go index 2563914..11061ab 100644 --- a/internal/cli/cluster/manager.go +++ b/internal/cli/cluster/manager.go @@ -370,7 +370,7 @@ nodes: if dryRun { core.Info(fmt.Sprintf("[dry-run] would write kind config and run: kind create cluster --name %s --config ", clusterName)) core.Info("[dry-run] kind config that would be written:") - fmt.Print(config) + core.DefaultPrinter.Println(config) core.Success("Dry-run complete; no cluster created") return nil } diff --git a/internal/cli/core/exec.go b/internal/cli/core/exec.go index a0f331f..c4c65d9 100644 --- a/internal/cli/core/exec.go +++ b/internal/cli/core/exec.go @@ -11,16 +11,6 @@ import ( "strings" ) -// execCommand is a test seam for stubbing command creation in tests. -var execCommand = exec.Command - -// SwapExecCommand replaces the exec.Command seam used by the default executor (tests only). -func SwapExecCommand(f func(string, ...string) *exec.Cmd) (restore func()) { - prev := execCommand - execCommand = f - return func() { execCommand = prev } -} - // Command represents a command that can be executed. type Command interface { Output() ([]byte, error) @@ -58,7 +48,7 @@ func (osExecutor) Command(name string, args []string, validators ...ExecValidato return nil, err } } - return &execCmd{cmd: execCommand(name, args...)}, nil + return &execCmd{cmd: exec.Command(name, args...)}, nil // #nosec G204 -- callers pass ExecValidator checks before command construction. } var execExecutor Executor = osExecutor{} diff --git a/internal/cli/core/exec_test.go b/internal/cli/core/exec_test.go index 8797add..af744da 100644 --- a/internal/cli/core/exec_test.go +++ b/internal/cli/core/exec_test.go @@ -5,8 +5,11 @@ import ( "testing" ) -func TestExecCommand(t *testing.T) { - cmd := execCommand("echo", "hello") +func TestOSExecutorCommand(t *testing.T) { + cmd, err := osExecutor{}.Command("echo", []string{"hello"}) + if err != nil { + t.Fatalf("failed to create command: %v", err) + } out, err := cmd.Output() if err != nil { t.Fatalf("failed to execute command: %v", err) diff --git a/internal/cli/kube/file.go b/internal/cli/kube/file.go index c8f2716..34296e7 100644 --- a/internal/cli/kube/file.go +++ b/internal/cli/kube/file.go @@ -38,10 +38,11 @@ func WriteOutputFile(file string, data []byte) error { _ = f.Close() return fmt.Errorf("write output file: %w", io.ErrShortWrite) } - if err := f.Close(); err != nil { + if err := f.Chmod(0o600); err != nil { + _ = f.Close() return fmt.Errorf("write output file: %w", err) } - if err := os.Chmod(absPath, 0o600); err != nil { + if err := f.Close(); err != nil { return fmt.Errorf("write output file: %w", err) } return nil diff --git a/internal/cli/platformstatus/kubectl.go b/internal/cli/platformstatus/kubectl.go index c436969..a55e61f 100644 --- a/internal/cli/platformstatus/kubectl.go +++ b/internal/cli/platformstatus/kubectl.go @@ -8,8 +8,8 @@ import ( "mcp-runtime/internal/cli/kubeerr" ) -func runKubectlCombinedOutput(args []string) (string, error) { - cmd, err := core.DefaultKubectlClient().CommandArgs(args) +func runKubectlCombinedOutput(kubectl core.KubectlRunner, args []string) (string, error) { + cmd, err := kubectl.CommandArgs(args) if err != nil { return "", err } @@ -18,8 +18,8 @@ func runKubectlCombinedOutput(args []string) (string, error) { } // CheckClusterStatusQuiet probes cluster connectivity without printing status. -func CheckClusterStatusQuiet() error { - output, err := runKubectlCombinedOutput([]string{"cluster-info"}) +func CheckClusterStatusQuiet(kubectl core.KubectlRunner) error { + output, err := runKubectlCombinedOutput(kubectl, []string{"cluster-info"}) if err == nil { return nil } diff --git a/internal/cli/platformstatus/workloads.go b/internal/cli/platformstatus/workloads.go index 7fec6e7..5919ef1 100644 --- a/internal/cli/platformstatus/workloads.go +++ b/internal/cli/platformstatus/workloads.go @@ -36,12 +36,12 @@ var DefaultPlatformStatusWorkloads = []PlatformWorkload{ } // AnalyticsNamespaceInstalled reports whether the analytics namespace exists. -func AnalyticsNamespaceInstalled(clusterReachable bool) (bool, error) { +func AnalyticsNamespaceInstalled(kubectl core.KubectlRunner, clusterReachable bool) (bool, error) { if !clusterReachable { return false, nil } - output, err := runKubectlCombinedOutput([]string{"get", "namespace", core.DefaultAnalyticsNamespace, "-o", "jsonpath={.metadata.name}"}) + output, err := runKubectlCombinedOutput(kubectl, []string{"get", "namespace", core.DefaultAnalyticsNamespace, "-o", "jsonpath={.metadata.name}"}) if err == nil { return strings.TrimSpace(output) == core.DefaultAnalyticsNamespace, nil } @@ -64,23 +64,23 @@ func AnalyticsStackRow(status, details string) []string { } // WorkloadStatusRow renders one workload row for platform status tables. -func WorkloadStatusRow(workload PlatformWorkload, clusterReachable bool) []string { +func WorkloadStatusRow(kubectl core.KubectlRunner, workload PlatformWorkload, clusterReachable bool) []string { resource := fmt.Sprintf("%s/%s", workload.Kind, workload.Name) if !clusterReachable { return []string{workload.Component, workload.Namespace, resource, core.Red("ERROR"), "Cluster unavailable"} } - st, details := workloadReadinessStatus(workload) + st, details := workloadReadinessStatus(kubectl, workload) return []string{workload.Component, workload.Namespace, resource, st, details} } -func workloadReadinessStatus(workload PlatformWorkload) (string, string) { +func workloadReadinessStatus(kubectl core.KubectlRunner, workload PlatformWorkload) (string, string) { jsonPath, err := workloadReadyJSONPath(workload.Kind) if err != nil { return core.Red("ERROR"), err.Error() } - output, cmdErr := runKubectlCombinedOutput([]string{ + output, cmdErr := runKubectlCombinedOutput(kubectl, []string{ "get", workload.Kind, workload.Name, "-n", workload.Namespace, "-o", "jsonpath=" + jsonPath, diff --git a/internal/cli/sentinel/manager.go b/internal/cli/sentinel/manager.go index 72e0fc2..c021461 100644 --- a/internal/cli/sentinel/manager.go +++ b/internal/cli/sentinel/manager.go @@ -163,14 +163,14 @@ func (m *SentinelManager) ShowSentinelStatus() error { tableData := [][]string{{"Component", "Namespace", "Resource", "Status", "Details"}} clusterReachable := true - if err := platformstatus.CheckClusterStatusQuiet(); err != nil { + if err := platformstatus.CheckClusterStatusQuiet(m.kubectl); err != nil { clusterReachable = false tableData = append(tableData, platformstatus.AnalyticsStackRow(core.Red("ERROR"), err.Error())) core.TableBoxed(tableData) return nil } - installed, err := platformstatus.AnalyticsNamespaceInstalled(clusterReachable) + installed, err := platformstatus.AnalyticsNamespaceInstalled(m.kubectl, clusterReachable) switch { case err != nil: tableData = append(tableData, platformstatus.AnalyticsStackRow(core.Red("ERROR"), err.Error())) @@ -178,7 +178,7 @@ func (m *SentinelManager) ShowSentinelStatus() error { tableData = append(tableData, platformstatus.AnalyticsStackRow(core.Yellow("SKIPPED"), "Namespace not found")) default: for _, workload := range platformstatus.DefaultPlatformStatusWorkloads { - tableData = append(tableData, platformstatus.WorkloadStatusRow(workload, true)) + tableData = append(tableData, platformstatus.WorkloadStatusRow(m.kubectl, workload, true)) } } diff --git a/internal/cli/status/platform_status_test.go b/internal/cli/status/platform_status_test.go index 506d5f6..3f376dd 100644 --- a/internal/cli/status/platform_status_test.go +++ b/internal/cli/status/platform_status_test.go @@ -3,6 +3,7 @@ package status_test import ( "bytes" "encoding/json" + "io" "os" "os/exec" "strings" @@ -22,6 +23,31 @@ type commandResponse struct { ExitCode int `json:"exitCode"` } +type helperProcessCommand struct { + cmd *exec.Cmd +} + +func (c helperProcessCommand) Output() ([]byte, error) { return c.cmd.Output() } +func (c helperProcessCommand) CombinedOutput() ([]byte, error) { return c.cmd.CombinedOutput() } +func (c helperProcessCommand) Run() error { return c.cmd.Run() } +func (c helperProcessCommand) SetStdout(w io.Writer) { c.cmd.Stdout = w } +func (c helperProcessCommand) SetStderr(w io.Writer) { c.cmd.Stderr = w } +func (c helperProcessCommand) SetStdin(r io.Reader) { c.cmd.Stdin = r } + +type helperProcessExecutor struct { + command func(string, ...string) *exec.Cmd +} + +func (e helperProcessExecutor) Command(name string, args []string, validators ...core.ExecValidator) (core.Command, error) { + spec := core.ExecSpec{Name: name, Args: args} + for _, validate := range validators { + if err := validate(spec); err != nil { + return nil, err + } + } + return helperProcessCommand{cmd: e.command(name, args...)}, nil +} + func commandKey(name string, args ...string) string { return strings.Join(append([]string{name}, args...), " ") } @@ -124,8 +150,9 @@ func runShowPlatformStatusWithCalls(t *testing.T, responses map[string]commandRe t.Helper() logger := zap.NewNop() - restoreExec := core.SwapExecCommand(fakeExecCommand(t, exec.Command, responses, calls)) - t.Cleanup(restoreExec) + kubectl := core.NewTestKubectlClient(helperProcessExecutor{ + command: fakeExecCommand(t, exec.Command, responses, calls), + }) var buf bytes.Buffer pterm.SetDefaultOutput(&buf) @@ -136,7 +163,7 @@ func runShowPlatformStatusWithCalls(t *testing.T, responses map[string]commandRe pterm.EnableStyling() }) - if err := status.ShowPlatformStatus(logger); err != nil { + if err := status.ShowPlatformStatus(logger, kubectl); err != nil { t.Fatalf("ShowPlatformStatus() unexpected error = %v", err) } @@ -311,10 +338,11 @@ func TestAnalyticsNamespaceInstalledRequiresExactMatch(t *testing.T) { }, } - restore := core.SwapExecCommand(fakeExecCommand(t, exec.Command, responses, nil)) - t.Cleanup(restore) + kubectl := core.NewTestKubectlClient(helperProcessExecutor{ + command: fakeExecCommand(t, exec.Command, responses, nil), + }) - installed, err := platformstatus.AnalyticsNamespaceInstalled(true) + installed, err := platformstatus.AnalyticsNamespaceInstalled(kubectl, true) if err != nil { t.Fatalf("AnalyticsNamespaceInstalled() unexpected error = %v", err) } @@ -332,10 +360,11 @@ func TestAnalyticsNamespaceInstalledReturnsErrorOnEmptyFailure(t *testing.T) { }, } - restore := core.SwapExecCommand(fakeExecCommand(t, exec.Command, responses, nil)) - t.Cleanup(restore) + kubectl := core.NewTestKubectlClient(helperProcessExecutor{ + command: fakeExecCommand(t, exec.Command, responses, nil), + }) - installed, err := platformstatus.AnalyticsNamespaceInstalled(true) + installed, err := platformstatus.AnalyticsNamespaceInstalled(kubectl, true) if err == nil { t.Fatal("expected empty namespace probe failure to surface an error") } diff --git a/internal/cli/status/status.go b/internal/cli/status/status.go index 679e72b..e0a6faf 100644 --- a/internal/cli/status/status.go +++ b/internal/cli/status/status.go @@ -13,11 +13,15 @@ import ( ) type manager struct { - logger *zap.Logger + logger *zap.Logger + kubectl core.KubectlRunner } func newManager(runtime *core.Runtime) *manager { - return &manager{logger: runtime.Logger()} + return &manager{ + logger: runtime.Logger(), + kubectl: runtime.KubectlRunner(), + } } // New returns the status command. @@ -28,13 +32,13 @@ func New(runtime *core.Runtime) *cobra.Command { Short: "Show platform status", Long: "Show the overall status of the MCP platform", RunE: func(cmd *cobra.Command, args []string) error { - return ShowPlatformStatus(mgr.logger) + return ShowPlatformStatus(mgr.logger, mgr.kubectl) }, } } // ShowPlatformStatus prints the MCP platform status table and MCP server list. -func ShowPlatformStatus(logger *zap.Logger) error { +func ShowPlatformStatus(logger *zap.Logger, kubectl core.KubectlRunner) error { core.Header("MCP Platform Status") core.DefaultPrinter.Println() @@ -45,7 +49,7 @@ func ShowPlatformStatus(logger *zap.Logger) error { clusterReachable := true clusterStatus := core.Green("OK") clusterDetails := "Connected" - if err := platformstatus.CheckClusterStatusQuiet(); err != nil { + if err := platformstatus.CheckClusterStatusQuiet(kubectl); err != nil { clusterReachable = false clusterStatus = core.Red("ERROR") clusterDetails = err.Error() @@ -65,17 +69,19 @@ func ShowPlatformStatus(logger *zap.Logger) error { tableData = append(tableData, []string{"Registry", "-", "external", core.Cyan("EXTERNAL"), "Configured: " + extRegistry.URL}) default: tableData = append(tableData, platformstatus.WorkloadStatusRow( + kubectl, platformstatus.PlatformWorkload{Component: "Registry", Namespace: core.NamespaceRegistry, Kind: "deployment", Name: core.RegistryDeploymentName}, clusterReachable, )) } tableData = append(tableData, platformstatus.WorkloadStatusRow( + kubectl, platformstatus.PlatformWorkload{Component: "Operator", Namespace: core.NamespaceMCPRuntime, Kind: "deployment", Name: core.OperatorDeploymentName}, clusterReachable, )) - switch installed, analyticsErr := platformstatus.AnalyticsNamespaceInstalled(clusterReachable); { + switch installed, analyticsErr := platformstatus.AnalyticsNamespaceInstalled(kubectl, clusterReachable); { case !clusterReachable: tableData = append(tableData, platformstatus.AnalyticsStackRow(core.Red("ERROR"), "Cluster unavailable")) case analyticsErr != nil: @@ -84,7 +90,7 @@ func ShowPlatformStatus(logger *zap.Logger) error { tableData = append(tableData, platformstatus.AnalyticsStackRow(core.Yellow("SKIPPED"), "Namespace not found")) default: for _, workload := range platformstatus.DefaultPlatformStatusWorkloads { - tableData = append(tableData, platformstatus.WorkloadStatusRow(workload, true)) + tableData = append(tableData, platformstatus.WorkloadStatusRow(kubectl, workload, true)) } } @@ -100,7 +106,7 @@ func ShowPlatformStatus(logger *zap.Logger) error { return nil } - cmd, err := core.DefaultKubectlClient().CommandArgs([]string{"get", "mcpserver", "--all-namespaces", "-o", "custom-columns=NAMESPACE:.metadata.namespace,NAME:.metadata.name,IMAGE:.spec.image,REPLICAS:.spec.replicas,PATH:.spec.ingressPath"}) + cmd, err := kubectl.CommandArgs([]string{"get", "mcpserver", "--all-namespaces", "-o", "custom-columns=NAMESPACE:.metadata.namespace,NAME:.metadata.name,IMAGE:.spec.image,REPLICAS:.spec.replicas,PATH:.spec.ingressPath"}) if err != nil { core.Warn("Failed to list MCP servers: " + err.Error()) } else { diff --git a/test/e2e/kind.sh b/test/e2e/kind.sh index d59e1d4..b9c449a 100644 --- a/test/e2e/kind.sh +++ b/test/e2e/kind.sh @@ -63,6 +63,7 @@ OAUTH_UPSTREAM_PORT="${OAUTH_UPSTREAM_PORT:-18097}" PYTHON_EXAMPLE_PROXY_PORT="${PYTHON_EXAMPLE_PROXY_PORT:-18098}" RUST_EXAMPLE_PROXY_PORT="${RUST_EXAMPLE_PROXY_PORT:-18099}" GO_EXAMPLE_PROXY_PORT="${GO_EXAMPLE_PROXY_PORT:-18102}" +CLI_SENTINEL_API_PORT="${CLI_SENTINEL_API_PORT:-18103}" API_METRICS_PORT="${API_METRICS_PORT:-19090}" INGEST_METRICS_PORT="${INGEST_METRICS_PORT:-19091}" PROCESSOR_METRICS_PORT="${PROCESSOR_METRICS_PORT:-19092}" @@ -286,6 +287,26 @@ assert_file_contains() { grep -F -q -- "${needle}" "${file}" } +run_cli_allowing_cert_prereq_failure() { + local name="$1" + shift + local log_file="${WORKDIR}/${name}.log" + + if "$@" >"${log_file}" 2>&1; then + echo "[cli][pass] ${name}" + return 0 + fi + + if grep -E -q "cert-manager|Certificate not found|Certificate not ready|certificate not ready|ClusterIssuer .* not found|CA secret .* not found" "${log_file}"; then + echo "[cli][pass] ${name} reached expected missing TLS prerequisite path" + return 0 + fi + + echo "[cli][fail] ${name}" >&2 + cat "${log_file}" >&2 + exit 1 +} + decode_base64() { if base64 --help 2>/dev/null | grep -q -- "--decode"; then base64 --decode @@ -1626,6 +1647,11 @@ cp "${KUBECONFIG_FILE}" "${HOME}/.kube/config" echo "[build] rebuilding CLI" GOCACHE="${PROJECT_ROOT}/.gocache" go build -o bin/mcp-runtime ./cmd/mcp-runtime +echo "[cli] checking static command output" +./bin/mcp-runtime --version >/dev/null +./bin/mcp-runtime help >/dev/null +./bin/mcp-runtime completion bash >/dev/null + MCP_SMOKE_SOURCE_DIR="$(resolve_mcp_smoke_dir)" MCP_SMOKE_BIN="${WORKDIR}/mcp-smoke-agent" MCP_SMOKE_GOPATH="${WORKDIR}/mcp-smoke-gopath" @@ -1683,6 +1709,45 @@ echo "[cli] checking platform status commands" ./bin/mcp-runtime registry status ./bin/mcp-runtime registry info +echo "[cli] checking auth, bootstrap, cluster, registry, and sentinel commands" +MCP_RUNTIME_CONFIG_DIR="${WORKDIR}/auth-config" ./bin/mcp-runtime auth status +MCP_RUNTIME_CONFIG_DIR="${WORKDIR}/auth-config" ./bin/mcp-runtime auth login \ + --api-url "http://127.0.0.1:${SENTINEL_PORT}" \ + --token e2e-token \ + --skip-verify \ + --registry-host "${LOCAL_REGISTRY_PUSH_HOST}" +MCP_RUNTIME_CONFIG_DIR="${WORKDIR}/auth-config" ./bin/mcp-runtime auth status +MCP_RUNTIME_CONFIG_DIR="${WORKDIR}/auth-config" ./bin/mcp-runtime auth logout + +./bin/mcp-runtime bootstrap --provider generic +./bin/mcp-runtime cluster init +./bin/mcp-runtime cluster config --ingress none +./bin/mcp-runtime cluster provision \ + --provider kind \ + --name "${CLUSTER_NAME}-dry-run" \ + --nodes 1 \ + --dry-run >"${WORKDIR}/cluster-provision-dry-run.txt" +./bin/mcp-runtime cluster doctor +run_cli_allowing_cert_prereq_failure cluster-cert-status ./bin/mcp-runtime cluster cert status +run_cli_allowing_cert_prereq_failure cluster-cert-apply-dry-run ./bin/mcp-runtime cluster cert apply --dry-run +run_cli_allowing_cert_prereq_failure cluster-cert-wait ./bin/mcp-runtime cluster cert wait --timeout 1s +./bin/mcp-runtime registry provision \ + --url "${LOCAL_REGISTRY_PUSH_HOST}" \ + --username e2e \ + --password e2e \ + --dry-run >"${WORKDIR}/registry-provision-dry-run.txt" +./bin/mcp-runtime sentinel status +./bin/mcp-runtime sentinel events >"${WORKDIR}/sentinel-events.txt" +./bin/mcp-runtime sentinel logs api --tail 20 >"${WORKDIR}/sentinel-api-logs.txt" +./bin/mcp-runtime sentinel port-forward api \ + --port "${CLI_SENTINEL_API_PORT}" \ + --address 127.0.0.1 >"${WORKDIR}/sentinel-cli-port-forward.log" 2>&1 & +_cli_pf_pid="$!" +PIDS+=("${_cli_pf_pid}") +wait_port "${CLI_SENTINEL_API_PORT}" 30 +kill "${_cli_pf_pid}" >/dev/null 2>&1 || true +wait "${_cli_pf_pid}" 2>/dev/null || true + API_KEY="$(kubectl get secret mcp-sentinel-secrets -n mcp-sentinel -o jsonpath='{.data.API_KEYS}' | decode_base64 | cut -d',' -f1)" if [[ -z "${API_KEY}" ]]; then echo "[error] failed to resolve mcp-sentinel API key from secret" >&2 @@ -1775,6 +1840,30 @@ if ! kubectl rollout status "deploy/${SERVER_NAME}" -n mcp-servers --timeout=180 fi wait_for_server_ready +echo "[cli] checking server mutation helpers" +SERVER_EXPORT_FILE="${WORKDIR}/${SERVER_NAME}-export.yaml" +./bin/mcp-runtime server --use-kube apply --file "${MANIFEST_DIR}/${SERVER_NAME}.yaml" +./bin/mcp-runtime server --use-kube export "${SERVER_NAME}" \ + --namespace mcp-servers \ + --file "${SERVER_EXPORT_FILE}" +assert_file_contains "name: ${SERVER_NAME}" "${SERVER_EXPORT_FILE}" +./bin/mcp-runtime server --use-kube patch "${SERVER_NAME}" \ + --namespace mcp-servers \ + --type merge \ + --patch '{"metadata":{"annotations":{"e2e.mcpruntime.org/cli-patch":"true"}}}' +./bin/mcp-runtime server --use-kube status --namespace mcp-servers >"${WORKDIR}/server-status.txt" +assert_file_contains "${SERVER_NAME}" "${WORKDIR}/server-status.txt" +./bin/mcp-runtime server --use-kube logs "${SERVER_NAME}" \ + --namespace mcp-servers \ + --tail 20 >"${WORKDIR}/server-logs.txt" +TEMP_CLI_SERVER="${SERVER_NAME}-cli-create" +./bin/mcp-runtime server --use-kube create "${TEMP_CLI_SERVER}" \ + --namespace mcp-servers \ + --image docker.io/library/nginx \ + --tag 1.27-alpine +./bin/mcp-runtime server --use-kube delete "${TEMP_CLI_SERVER}" --namespace mcp-servers +kubectl wait --for=delete "mcpserver/${TEMP_CLI_SERVER}" -n mcp-servers --timeout=120s || true + echo "[deploy] deploying official SDK example MCP servers" deploy_example_server_via_pipeline \ "${PYTHON_EXAMPLE_SERVER_NAME}" \ @@ -1798,7 +1887,7 @@ deploy_example_server_via_pipeline \ echo "[cli] checking server commands" # --- server list: assert the primary server appears --- -_cli_list_out="$(./bin/mcp-runtime server list --namespace mcp-servers 2>&1)" +_cli_list_out="$(./bin/mcp-runtime server --use-kube list --namespace mcp-servers 2>&1)" if ! printf '%s\n' "${_cli_list_out}" | grep -qF "${SERVER_NAME}"; then echo "[cli][fail] 'server list' output does not contain ${SERVER_NAME}" >&2 printf '%s\n' "${_cli_list_out}" >&2 @@ -1807,7 +1896,7 @@ fi echo "[cli][pass] server list contains ${SERVER_NAME}" # --- server get: capture YAML and assert readiness fields --- -_cli_get_out="$(./bin/mcp-runtime server get "${SERVER_NAME}" --namespace mcp-servers 2>&1)" +_cli_get_out="$(./bin/mcp-runtime server --use-kube get "${SERVER_NAME}" --namespace mcp-servers 2>&1)" _cli_get_file="${WORKDIR}/${SERVER_NAME}-get.yaml" printf '%s\n' "${_cli_get_out}" >"${_cli_get_file}" @@ -1900,7 +1989,7 @@ spec: - name: upper decision: allow EOF -(cd "${WORKDIR}" && "${PROJECT_ROOT}/bin/mcp-runtime" access grant apply --file access-grant.yaml) +(cd "${WORKDIR}" && "${PROJECT_ROOT}/bin/mcp-runtime" access --use-kube grant apply --file access-grant.yaml) echo "[policy] applying low-trust session via CLI" cat >"${WORKDIR}/access-session.yaml" <"${WORKDIR}/server-policy.json" +assert_file_contains "${SESSION_ID}" "${WORKDIR}/server-policy.json" if scenario_selected "governance"; then echo "[cli] checking access management commands" - ./bin/mcp-runtime access grant list --namespace mcp-servers >"${WORKDIR}/access-grant-list.txt" + ./bin/mcp-runtime access --use-kube grant list --namespace mcp-servers >"${WORKDIR}/access-grant-list.txt" assert_file_contains "${SERVER_NAME}-grant" "${WORKDIR}/access-grant-list.txt" - ./bin/mcp-runtime access grant get "${SERVER_NAME}-grant" --namespace mcp-servers >"${WORKDIR}/access-grant-get.yaml" + ./bin/mcp-runtime access --use-kube grant get "${SERVER_NAME}-grant" --namespace mcp-servers >"${WORKDIR}/access-grant-get.yaml" assert_file_contains "maxTrust: high" "${WORKDIR}/access-grant-get.yaml" - ./bin/mcp-runtime access session list --namespace mcp-servers >"${WORKDIR}/access-session-list.txt" + ./bin/mcp-runtime access --use-kube session list --namespace mcp-servers >"${WORKDIR}/access-session-list.txt" assert_file_contains "${SESSION_ID}" "${WORKDIR}/access-session-list.txt" - ./bin/mcp-runtime access session get "${SESSION_ID}" --namespace mcp-servers >"${WORKDIR}/access-session-get.yaml" + ./bin/mcp-runtime access --use-kube session get "${SESSION_ID}" --namespace mcp-servers >"${WORKDIR}/access-session-get.yaml" assert_file_contains "consentedTrust: low" "${WORKDIR}/access-session-get.yaml" + + cat >"${WORKDIR}/access-temp.yaml" <