diff --git a/README.md b/README.md index 744a4d73..104e7e79 100644 --- a/README.md +++ b/README.md @@ -6,6 +6,8 @@ [![GitHub release (latest SemVer)](https://img.shields.io/github/v/release/containers/kubernetes-mcp-server?sort=semver)](https://github.com/containers/kubernetes-mcp-server/releases/latest) [![Build](https://github.com/containers/kubernetes-mcp-server/actions/workflows/build.yaml/badge.svg)](https://github.com/containers/kubernetes-mcp-server/actions/workflows/build.yaml) +[![Trust Score](https://archestra.ai/mcp-catalog/api/badge/quality/manusa/kubernetes-mcp-server)](https://archestra.ai/mcp-catalog/manusa__kubernetes-mcp-server) + [✨ Features](#features) | [🚀 Getting Started](#getting-started) | [🎥 Demos](#demos) | [⚙️ Configuration](#configuration) | [🛠️ Tools](#tools) | [🧑‍💻 Development](#development) https://github.com/user-attachments/assets/be2b67b3-fc1c-4d11-ae46-93deba8ed98e diff --git a/docs 2/images/kubernetes-mcp-server-github-copilot.jpg b/docs 2/images/kubernetes-mcp-server-github-copilot.jpg new file mode 100755 index 00000000..f27094a2 Binary files /dev/null and b/docs 2/images/kubernetes-mcp-server-github-copilot.jpg differ diff --git a/docs 2/images/vibe-coding.jpg b/docs 2/images/vibe-coding.jpg new file mode 100644 index 00000000..5240edfb Binary files /dev/null and b/docs 2/images/vibe-coding.jpg differ diff --git a/npm 2/kubernetes-mcp-server-windows-amd64/package.json b/npm 2/kubernetes-mcp-server-windows-amd64/package.json new file mode 100644 index 00000000..04b5d8ef --- /dev/null +++ b/npm 2/kubernetes-mcp-server-windows-amd64/package.json @@ -0,0 +1,11 @@ +{ + "name": "kubernetes-mcp-server-windows-amd64", + "version": "0.0.0", + "description": "Model Context Protocol (MCP) server for Kubernetes and OpenShift", + "os": [ + "win32" + ], + "cpu": [ + "x64" + ] +} diff --git a/npm 2/kubernetes-mcp-server-windows-arm64/package.json b/npm 2/kubernetes-mcp-server-windows-arm64/package.json new file mode 100644 index 00000000..38aa06f7 --- /dev/null +++ b/npm 2/kubernetes-mcp-server-windows-arm64/package.json @@ -0,0 +1,11 @@ +{ + "name": "kubernetes-mcp-server-windows-arm64", + "version": "0.0.0", + "description": "Model Context Protocol (MCP) server for Kubernetes and OpenShift", + "os": [ + "win32" + ], + "cpu": [ + "arm64" + ] +} diff --git a/pkg 2/config/config.go b/pkg 2/config/config.go new file mode 100644 index 00000000..970d8753 --- /dev/null +++ b/pkg 2/config/config.go @@ -0,0 +1,51 @@ +package config + +import ( + "os" + + "github.com/BurntSushi/toml" +) + +// StaticConfig is the configuration for the server. +// It allows to configure server specific settings and tools to be enabled or disabled. +type StaticConfig struct { + DeniedResources []GroupVersionKind `toml:"denied_resources"` + + LogLevel int `toml:"log_level,omitempty"` + Port string `toml:"port,omitempty"` + SSEBaseURL string `toml:"sse_base_url,omitempty"` + KubeConfig string `toml:"kubeconfig,omitempty"` + ListOutput string `toml:"list_output,omitempty"` + // When true, expose only tools annotated with readOnlyHint=true + ReadOnly bool `toml:"read_only,omitempty"` + // When true, disable tools annotated with destructiveHint=true + DisableDestructive bool `toml:"disable_destructive,omitempty"` + EnabledTools []string `toml:"enabled_tools,omitempty"` + DisabledTools []string `toml:"disabled_tools,omitempty"` + RequireOAuth bool `toml:"require_oauth,omitempty"` + AuthorizationURL string `toml:"authorization_url,omitempty"` + JwksURL string `toml:"jwks_url,omitempty"` + CertificateAuthority string `toml:"certificate_authority,omitempty"` + ServerURL string `toml:"server_url,omitempty"` +} + +type GroupVersionKind struct { + Group string `toml:"group"` + Version string `toml:"version"` + Kind string `toml:"kind,omitempty"` +} + +// ReadConfig reads the toml file and returns the StaticConfig. +func ReadConfig(configPath string) (*StaticConfig, error) { + configData, err := os.ReadFile(configPath) + if err != nil { + return nil, err + } + + var config *StaticConfig + err = toml.Unmarshal(configData, &config) + if err != nil { + return nil, err + } + return config, nil +} diff --git a/pkg 2/config/config_test.go b/pkg 2/config/config_test.go new file mode 100644 index 00000000..1f52361d --- /dev/null +++ b/pkg 2/config/config_test.go @@ -0,0 +1,156 @@ +package config + +import ( + "os" + "path/filepath" + "strings" + "testing" +) + +func TestReadConfigMissingFile(t *testing.T) { + config, err := ReadConfig("non-existent-config.toml") + t.Run("returns error for missing file", func(t *testing.T) { + if err == nil { + t.Fatal("Expected error for missing file, got nil") + } + if config != nil { + t.Fatalf("Expected nil config for missing file, got %v", config) + } + }) +} + +func TestReadConfigInvalid(t *testing.T) { + invalidConfigPath := writeConfig(t, ` +[[denied_resources]] +group = "apps" +version = "v1" +kind = "Deployment" +[[denied_resources]] +group = "rbac.authorization.k8s.io" +version = "v1" +kind = "Role +`) + + config, err := ReadConfig(invalidConfigPath) + t.Run("returns error for invalid file", func(t *testing.T) { + if err == nil { + t.Fatal("Expected error for invalid file, got nil") + } + if config != nil { + t.Fatalf("Expected nil config for invalid file, got %v", config) + } + }) + t.Run("error message contains toml error with line number", func(t *testing.T) { + expectedError := "toml: line 9" + if err != nil && !strings.HasPrefix(err.Error(), expectedError) { + t.Fatalf("Expected error message '%s' to contain line number, got %v", expectedError, err) + } + }) +} + +func TestReadConfigValid(t *testing.T) { + validConfigPath := writeConfig(t, ` +log_level = 1 +port = "9999" +sse_base_url = "https://example.com" +kubeconfig = "./path/to/config" +list_output = "yaml" +read_only = true +disable_destructive = true + +denied_resources = [ + {group = "apps", version = "v1", kind = "Deployment"}, + {group = "rbac.authorization.k8s.io", version = "v1", kind = "Role"} +] + +enabled_tools = ["configuration_view", "events_list", "namespaces_list", "pods_list", "resources_list", "resources_get", "resources_create_or_update", "resources_delete"] +disabled_tools = ["pods_delete", "pods_top", "pods_log", "pods_run", "pods_exec"] +`) + + config, err := ReadConfig(validConfigPath) + t.Run("reads and unmarshalls file", func(t *testing.T) { + if err != nil { + t.Fatalf("ReadConfig returned an error for a valid file: %v", err) + } + if config == nil { + t.Fatal("ReadConfig returned a nil config for a valid file") + } + }) + t.Run("denied resources are parsed correctly", func(t *testing.T) { + if len(config.DeniedResources) != 2 { + t.Fatalf("Expected 2 denied resources, got %d", len(config.DeniedResources)) + } + if config.DeniedResources[0].Group != "apps" || + config.DeniedResources[0].Version != "v1" || + config.DeniedResources[0].Kind != "Deployment" { + t.Errorf("Unexpected denied resources: %v", config.DeniedResources[0]) + } + }) + t.Run("log_level parsed correctly", func(t *testing.T) { + if config.LogLevel != 1 { + t.Fatalf("Unexpected log level: %v", config.LogLevel) + } + }) + t.Run("port parsed correctly", func(t *testing.T) { + if config.Port != "9999" { + t.Fatalf("Unexpected port value: %v", config.Port) + } + }) + t.Run("sse_base_url parsed correctly", func(t *testing.T) { + if config.SSEBaseURL != "https://example.com" { + t.Fatalf("Unexpected sse_base_url value: %v", config.SSEBaseURL) + } + }) + t.Run("kubeconfig parsed correctly", func(t *testing.T) { + if config.KubeConfig != "./path/to/config" { + t.Fatalf("Unexpected kubeconfig value: %v", config.KubeConfig) + } + }) + t.Run("list_output parsed correctly", func(t *testing.T) { + if config.ListOutput != "yaml" { + t.Fatalf("Unexpected list_output value: %v", config.ListOutput) + } + }) + t.Run("read_only parsed correctly", func(t *testing.T) { + if !config.ReadOnly { + t.Fatalf("Unexpected read-only mode: %v", config.ReadOnly) + } + }) + t.Run("disable_destructive parsed correctly", func(t *testing.T) { + if !config.DisableDestructive { + t.Fatalf("Unexpected disable destructive: %v", config.DisableDestructive) + } + }) + t.Run("enabled_tools parsed correctly", func(t *testing.T) { + if len(config.EnabledTools) != 8 { + t.Fatalf("Unexpected enabled tools: %v", config.EnabledTools) + + } + for i, tool := range []string{"configuration_view", "events_list", "namespaces_list", "pods_list", "resources_list", "resources_get", "resources_create_or_update", "resources_delete"} { + if config.EnabledTools[i] != tool { + t.Errorf("Expected enabled tool %d to be %s, got %s", i, tool, config.EnabledTools[i]) + } + } + }) + t.Run("disabled_tools parsed correctly", func(t *testing.T) { + if len(config.DisabledTools) != 5 { + t.Fatalf("Unexpected disabled tools: %v", config.DisabledTools) + } + for i, tool := range []string{"pods_delete", "pods_top", "pods_log", "pods_run", "pods_exec"} { + if config.DisabledTools[i] != tool { + t.Errorf("Expected disabled tool %d to be %s, got %s", i, tool, config.DisabledTools[i]) + } + } + }) +} + +func writeConfig(t *testing.T, content string) string { + t.Helper() + tempDir := t.TempDir() + path := filepath.Join(tempDir, "config.toml") + err := os.WriteFile(path, []byte(content), 0644) + if err != nil { + t.Fatalf("Failed to write config file %s: %v", path, err) + } + return path +} diff --git a/pkg 2/helm/helm.go b/pkg 2/helm/helm.go new file mode 100644 index 00000000..186b50df --- /dev/null +++ b/pkg 2/helm/helm.go @@ -0,0 +1,142 @@ +package helm + +import ( + "context" + "fmt" + "helm.sh/helm/v3/pkg/action" + "helm.sh/helm/v3/pkg/chart/loader" + "helm.sh/helm/v3/pkg/cli" + "helm.sh/helm/v3/pkg/registry" + "helm.sh/helm/v3/pkg/release" + "k8s.io/cli-runtime/pkg/genericclioptions" + "log" + "sigs.k8s.io/yaml" + "time" +) + +type Kubernetes interface { + genericclioptions.RESTClientGetter + NamespaceOrDefault(namespace string) string +} + +type Helm struct { + kubernetes Kubernetes +} + +// NewHelm creates a new Helm instance +func NewHelm(kubernetes Kubernetes) *Helm { + return &Helm{kubernetes: kubernetes} +} + +func (h *Helm) Install(ctx context.Context, chart string, values map[string]interface{}, name string, namespace string) (string, error) { + cfg, err := h.newAction(h.kubernetes.NamespaceOrDefault(namespace), false) + if err != nil { + return "", err + } + install := action.NewInstall(cfg) + if name == "" { + install.GenerateName = true + install.ReleaseName, _, _ = install.NameAndChart([]string{chart}) + } else { + install.ReleaseName = name + } + install.Namespace = h.kubernetes.NamespaceOrDefault(namespace) + install.Wait = true + install.Timeout = 5 * time.Minute + install.DryRun = false + + chartRequested, err := install.LocateChart(chart, cli.New()) + if err != nil { + return "", err + } + chartLoaded, err := loader.Load(chartRequested) + if err != nil { + return "", err + } + + installedRelease, err := install.RunWithContext(ctx, chartLoaded, values) + if err != nil { + return "", err + } + ret, err := yaml.Marshal(simplify(installedRelease)) + if err != nil { + return "", err + } + return string(ret), nil +} + +// List lists all the releases for the specified namespace (or current namespace if). Or allNamespaces is true, it lists all releases across all namespaces. +func (h *Helm) List(namespace string, allNamespaces bool) (string, error) { + cfg, err := h.newAction(namespace, allNamespaces) + if err != nil { + return "", err + } + list := action.NewList(cfg) + list.AllNamespaces = allNamespaces + releases, err := list.Run() + if err != nil { + return "", err + } else if len(releases) == 0 { + return "No Helm releases found", nil + } + ret, err := yaml.Marshal(simplify(releases...)) + if err != nil { + return "", err + } + return string(ret), nil +} + +func (h *Helm) Uninstall(name string, namespace string) (string, error) { + cfg, err := h.newAction(h.kubernetes.NamespaceOrDefault(namespace), false) + if err != nil { + return "", err + } + uninstall := action.NewUninstall(cfg) + uninstall.IgnoreNotFound = true + uninstall.Wait = true + uninstall.Timeout = 5 * time.Minute + uninstalledRelease, err := uninstall.Run(name) + if uninstalledRelease == nil && err == nil { + return fmt.Sprintf("Release %s not found", name), nil + } else if err != nil { + return "", err + } + return fmt.Sprintf("Uninstalled release %s %s", uninstalledRelease.Release.Name, uninstalledRelease.Info), nil +} + +func (h *Helm) newAction(namespace string, allNamespaces bool) (*action.Configuration, error) { + cfg := new(action.Configuration) + applicableNamespace := "" + if !allNamespaces { + applicableNamespace = h.kubernetes.NamespaceOrDefault(namespace) + } + registryClient, err := registry.NewClient() + if err != nil { + return nil, err + } + cfg.RegistryClient = registryClient + return cfg, cfg.Init(h.kubernetes, applicableNamespace, "", log.Printf) +} + +func simplify(release ...*release.Release) []map[string]interface{} { + ret := make([]map[string]interface{}, len(release)) + for i, r := range release { + ret[i] = map[string]interface{}{ + "name": r.Name, + "namespace": r.Namespace, + "revision": r.Version, + } + if r.Chart != nil { + ret[i]["chart"] = r.Chart.Metadata.Name + ret[i]["chartVersion"] = r.Chart.Metadata.Version + ret[i]["appVersion"] = r.Chart.Metadata.AppVersion + } + if r.Info != nil { + ret[i]["status"] = r.Info.Status.String() + if !r.Info.LastDeployed.IsZero() { + ret[i]["lastDeployed"] = r.Info.LastDeployed.Format(time.RFC1123Z) + } + } + } + return ret +} diff --git a/pkg 2/http/authorization.go b/pkg 2/http/authorization.go new file mode 100644 index 00000000..2b3152b8 --- /dev/null +++ b/pkg 2/http/authorization.go @@ -0,0 +1,179 @@ +package http + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/coreos/go-oidc/v3/oidc" + "github.com/go-jose/go-jose/v4" + "github.com/go-jose/go-jose/v4/jwt" + "k8s.io/klog/v2" + + "github.com/containers/kubernetes-mcp-server/pkg/mcp" +) + +const ( + Audience = "kubernetes-mcp-server" +) + +// AuthorizationMiddleware validates the OAuth flow using Kubernetes TokenReview API +func AuthorizationMiddleware(requireOAuth bool, serverURL string, oidcProvider *oidc.Provider, mcpServer *mcp.Server) func(http.Handler) http.Handler { + return func(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == healthEndpoint || r.URL.Path == oauthProtectedResourceEndpoint { + next.ServeHTTP(w, r) + return + } + if !requireOAuth { + next.ServeHTTP(w, r) + return + } + + audience := Audience + if serverURL != "" { + audience = serverURL + } + + authHeader := r.Header.Get("Authorization") + if authHeader == "" || !strings.HasPrefix(authHeader, "Bearer ") { + klog.V(1).Infof("Authentication failed - missing or invalid bearer token: %s %s from %s", r.Method, r.URL.Path, r.RemoteAddr) + + if serverURL == "" { + w.Header().Set("WWW-Authenticate", fmt.Sprintf(`Bearer realm="Kubernetes MCP Server", audience="%s", error="missing_token"`, audience)) + } else { + w.Header().Set("WWW-Authenticate", fmt.Sprintf(`Bearer realm="Kubernetes MCP Server", audience="%s"", resource_metadata="%s%s", error="missing_token"`, audience, serverURL, oauthProtectedResourceEndpoint)) + } + http.Error(w, "Unauthorized: Bearer token required", http.StatusUnauthorized) + return + } + + token := strings.TrimPrefix(authHeader, "Bearer ") + + // Validate the token offline for simple sanity check + // Because missing expected audience and expired tokens must be + // rejected already. + claims, err := ParseJWTClaims(token) + if err == nil && claims != nil { + err = claims.Validate(audience) + } + if err != nil { + klog.V(1).Infof("Authentication failed - JWT validation error: %s %s from %s, error: %v", r.Method, r.URL.Path, r.RemoteAddr, err) + + if serverURL == "" { + w.Header().Set("WWW-Authenticate", fmt.Sprintf(`Bearer realm="Kubernetes MCP Server", audience="%s", error="invalid_token"`, audience)) + } else { + w.Header().Set("WWW-Authenticate", fmt.Sprintf(`Bearer realm="Kubernetes MCP Server", audience="%s"", resource_metadata="%s%s", error="invalid_token"`, audience, serverURL, oauthProtectedResourceEndpoint)) + } + http.Error(w, "Unauthorized: Invalid token", http.StatusUnauthorized) + return + } + + if oidcProvider != nil { + // If OIDC Provider is configured, this token must be validated against it. + if err := validateTokenWithOIDC(r.Context(), oidcProvider, token, audience); err != nil { + klog.V(1).Infof("Authentication failed - OIDC token validation error: %s %s from %s, error: %v", r.Method, r.URL.Path, r.RemoteAddr, err) + + if serverURL == "" { + w.Header().Set("WWW-Authenticate", fmt.Sprintf(`Bearer realm="Kubernetes MCP Server", audience="%s", error="invalid_token"`, audience)) + } else { + w.Header().Set("WWW-Authenticate", fmt.Sprintf(`Bearer realm="Kubernetes MCP Server", audience="%s"", resource_metadata="%s%s", error="invalid_token"`, audience, serverURL, oauthProtectedResourceEndpoint)) + } + http.Error(w, "Unauthorized: Invalid token", http.StatusUnauthorized) + return + } + } + + // Scopes are likely to be used for authorization. + scopes := claims.GetScopes() + klog.V(2).Infof("JWT token validated - Scopes: %v", scopes) + r = r.WithContext(context.WithValue(r.Context(), mcp.TokenScopesContextKey, scopes)) + + // Now, there are a couple of options: + // 1. If there is no authorization url configured for this MCP Server, + // that means this token will be used against the Kubernetes API Server. + // So that we need to validate the token using Kubernetes TokenReview API beforehand. + // 2. If there is an authorization url configured for this MCP Server, + // that means up to this point, the token is validated against the OIDC Provider already. + // 2. a. If this is the only token in the headers, this validated token + // is supposed to be used against the Kubernetes API Server as well. Therefore, + // TokenReview request must succeed. + // 2. b. If this is not the only token in the headers, the token in here is used + // only for authentication and authorization. Therefore, we need to send TokenReview request + // with the other token in the headers (TODO: still need to validate aud and exp of this token separately). + _, _, err = mcpServer.VerifyTokenAPIServer(r.Context(), token, audience) + if err != nil { + klog.V(1).Infof("Authentication failed - API Server token validation error: %s %s from %s, error: %v", r.Method, r.URL.Path, r.RemoteAddr, err) + + if serverURL == "" { + w.Header().Set("WWW-Authenticate", fmt.Sprintf(`Bearer realm="Kubernetes MCP Server", audience="%s", error="invalid_token"`, audience)) + } else { + w.Header().Set("WWW-Authenticate", fmt.Sprintf(`Bearer realm="Kubernetes MCP Server", audience="%s"", resource_metadata="%s%s", error="invalid_token"`, audience, serverURL, oauthProtectedResourceEndpoint)) + } + http.Error(w, "Unauthorized: Invalid token", http.StatusUnauthorized) + return + } + + next.ServeHTTP(w, r) + }) + } +} + +var allSignatureAlgorithms = []jose.SignatureAlgorithm{ + jose.EdDSA, + jose.HS256, + jose.HS384, + jose.HS512, + jose.RS256, + jose.RS384, + jose.RS512, + jose.ES256, + jose.ES384, + jose.ES512, + jose.PS256, + jose.PS384, + jose.PS512, +} + +type JWTClaims struct { + jwt.Claims + Scope string `json:"scope,omitempty"` +} + +func (c *JWTClaims) GetScopes() []string { + if c.Scope == "" { + return nil + } + return strings.Fields(c.Scope) +} + +// Validate Checks if the JWT claims are valid and if the audience matches the expected one. +func (c *JWTClaims) Validate(audience string) error { + return c.Claims.Validate(jwt.Expected{ + AnyAudience: jwt.Audience{audience}, + }) +} + +func ParseJWTClaims(token string) (*JWTClaims, error) { + tkn, err := jwt.ParseSigned(token, allSignatureAlgorithms) + if err != nil { + return nil, fmt.Errorf("failed to parse JWT token: %w", err) + } + claims := &JWTClaims{} + err = tkn.UnsafeClaimsWithoutVerification(claims) + return claims, err +} + +func validateTokenWithOIDC(ctx context.Context, provider *oidc.Provider, token, audience string) error { + verifier := provider.Verifier(&oidc.Config{ + ClientID: audience, + }) + + _, err := verifier.Verify(ctx, token) + if err != nil { + return fmt.Errorf("JWT token verification failed: %v", err) + } + + return nil +} diff --git a/pkg 2/http/authorization_test.go b/pkg 2/http/authorization_test.go new file mode 100644 index 00000000..31ad8041 --- /dev/null +++ b/pkg 2/http/authorization_test.go @@ -0,0 +1,220 @@ +package http + +import ( + "strings" + "testing" + + "github.com/go-jose/go-jose/v4/jwt" +) + +const ( + // https://jwt.io/#token=eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6Ijk4ZDU3YmUwNWI3ZjUzNWIwMzYyYjg2MDJhNTJlNGYxIn0.eyJhdWQiOlsiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjLmNsdXN0ZXIubG9jYWwiLCJrdWJlcm5ldGVzLW1jcC1zZXJ2ZXIiXSwiZXhwIjoyNTM0MDIyOTcxOTksImlhdCI6MCwiaXNzIjoiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjLmNsdXN0ZXIubG9jYWwiLCJqdGkiOiI5OTIyMmQ1Ni0zNDBlLTRlYjYtODU4OC0yNjE0MTFmMzVkMjYiLCJrdWJlcm5ldGVzLmlvIjp7Im5hbWVzcGFjZSI6ImRlZmF1bHQiLCJzZXJ2aWNlYWNjb3VudCI6eyJuYW1lIjoiZGVmYXVsdCIsInVpZCI6ImVhY2I2YWQyLTgwYjctNDE3OS04NDNkLTkyZWIxZTZiYmJhNiJ9fSwibmJmIjowLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6ZGVmYXVsdDpkZWZhdWx0In0.0363P6xGmWpU-O9TAVkcOd95lPXxhI-_k5NKbHGNQeL--B8XMAz2vC8hpKnyC6rKOGifRTSR2XNHx_5fjd7lEA // notsecret + tokenBasicNotExpired = "eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6Ijk4ZDU3YmUwNWI3ZjUzNWIwMzYyYjg2MDJhNTJlNGYxIn0.eyJhdWQiOlsiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjLmNsdXN0ZXIubG9jYWwiLCJrdWJlcm5ldGVzLW1jcC1zZXJ2ZXIiXSwiZXhwIjoyNTM0MDIyOTcxOTksImlhdCI6MCwiaXNzIjoiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjLmNsdXN0ZXIubG9jYWwiLCJqdGkiOiI5OTIyMmQ1Ni0zNDBlLTRlYjYtODU4OC0yNjE0MTFmMzVkMjYiLCJrdWJlcm5ldGVzLmlvIjp7Im5hbWVzcGFjZSI6ImRlZmF1bHQiLCJzZXJ2aWNlYWNjb3VudCI6eyJuYW1lIjoiZGVmYXVsdCIsInVpZCI6ImVhY2I2YWQyLTgwYjctNDE3OS04NDNkLTkyZWIxZTZiYmJhNiJ9fSwibmJmIjowLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6ZGVmYXVsdDpkZWZhdWx0In0.0363P6xGmWpU-O9TAVkcOd95lPXxhI-_k5NKbHGNQeL--B8XMAz2vC8hpKnyC6rKOGifRTSR2XNHx_5fjd7lEA" // notsecret + // https://jwt.io/#token=eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6Ijk4ZDU3YmUwNWI3ZjUzNWIwMzYyYjg2MDJhNTJlNGYxIn0.eyJhdWQiOlsiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjLmNsdXN0ZXIubG9jYWwiLCJrdWJlcm5ldGVzLW1jcC1zZXJ2ZXIiXSwiZXhwIjoxLCJpYXQiOjAsImlzcyI6Imh0dHBzOi8va3ViZXJuZXRlcy5kZWZhdWx0LnN2Yy5jbHVzdGVyLmxvY2FsIiwianRpIjoiOTkyMjJkNTYtMzQwZS00ZWI2LTg1ODgtMjYxNDExZjM1ZDI2Iiwia3ViZXJuZXRlcy5pbyI6eyJuYW1lc3BhY2UiOiJkZWZhdWx0Iiwic2VydmljZWFjY291bnQiOnsibmFtZSI6ImRlZmF1bHQiLCJ1aWQiOiJlYWNiNmFkMi04MGI3LTQxNzktODQzZC05MmViMWU2YmJiYTYifX0sIm5iZiI6MCwic3ViIjoic3lzdGVtOnNlcnZpY2VhY2NvdW50OmRlZmF1bHQ6ZGVmYXVsdCJ9.USsuGLsB_7MwG9i0__cFkVVZa0djtmQpc8Vwi56GrapAgVAcyTfmae3s83XMDP5AwcFnxhYxLCfiZWRJri6GTA // notsecret + tokenBasicExpired = "eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6Ijk4ZDU3YmUwNWI3ZjUzNWIwMzYyYjg2MDJhNTJlNGYxIn0.eyJhdWQiOlsiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjLmNsdXN0ZXIubG9jYWwiLCJrdWJlcm5ldGVzLW1jcC1zZXJ2ZXIiXSwiZXhwIjoxLCJpYXQiOjAsImlzcyI6Imh0dHBzOi8va3ViZXJuZXRlcy5kZWZhdWx0LnN2Yy5jbHVzdGVyLmxvY2FsIiwianRpIjoiOTkyMjJkNTYtMzQwZS00ZWI2LTg1ODgtMjYxNDExZjM1ZDI2Iiwia3ViZXJuZXRlcy5pbyI6eyJuYW1lc3BhY2UiOiJkZWZhdWx0Iiwic2VydmljZWFjY291bnQiOnsibmFtZSI6ImRlZmF1bHQiLCJ1aWQiOiJlYWNiNmFkMi04MGI3LTQxNzktODQzZC05MmViMWU2YmJiYTYifX0sIm5iZiI6MCwic3ViIjoic3lzdGVtOnNlcnZpY2VhY2NvdW50OmRlZmF1bHQ6ZGVmYXVsdCJ9.USsuGLsB_7MwG9i0__cFkVVZa0djtmQpc8Vwi56GrapAgVAcyTfmae3s83XMDP5AwcFnxhYxLCfiZWRJri6GTA" // notsecret + // https://jwt.io/#token=eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6Ijk4ZDU3YmUwNWI3ZjUzNWIwMzYyYjg2MDJhNTJlNGYxIn0.eyJhdWQiOlsiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjLmNsdXN0ZXIubG9jYWwiLCJrdWJlcm5ldGVzLW1jcC1zZXJ2ZXIiXSwiZXhwIjoyNTM0MDIyOTcxOTksImlhdCI6MCwiaXNzIjoiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjLmNsdXN0ZXIubG9jYWwiLCJqdGkiOiI5OTIyMmQ1Ni0zNDBlLTRlYjYtODU4OC0yNjE0MTFmMzVkMjYiLCJrdWJlcm5ldGVzLmlvIjp7Im5hbWVzcGFjZSI6ImRlZmF1bHQiLCJzZXJ2aWNlYWNjb3VudCI6eyJuYW1lIjoiZGVmYXVsdCIsInVpZCI6ImVhY2I2YWQyLTgwYjctNDE3OS04NDNkLTkyZWIxZTZiYmJhNiJ9fSwibmJmIjowLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6ZGVmYXVsdDpkZWZhdWx0Iiwic2NvcGUiOiJyZWFkIHdyaXRlIn0.vl5se9BuxoVDhvR7M5wGfkLoyMSYUiORMZVxl0CQ7jw3x53mZfGEkU_kkIVIl9Ui371qCCVVxdvuZPcAgbM6pQ // notsecret + tokenMultipleAudienceNotExpired = "eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6Ijk4ZDU3YmUwNWI3ZjUzNWIwMzYyYjg2MDJhNTJlNGYxIn0.eyJhdWQiOlsiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjLmNsdXN0ZXIubG9jYWwiLCJrdWJlcm5ldGVzLW1jcC1zZXJ2ZXIiXSwiZXhwIjoyNTM0MDIyOTcxOTksImlhdCI6MCwiaXNzIjoiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjLmNsdXN0ZXIubG9jYWwiLCJqdGkiOiI5OTIyMmQ1Ni0zNDBlLTRlYjYtODU4OC0yNjE0MTFmMzVkMjYiLCJrdWJlcm5ldGVzLmlvIjp7Im5hbWVzcGFjZSI6ImRlZmF1bHQiLCJzZXJ2aWNlYWNjb3VudCI6eyJuYW1lIjoiZGVmYXVsdCIsInVpZCI6ImVhY2I2YWQyLTgwYjctNDE3OS04NDNkLTkyZWIxZTZiYmJhNiJ9fSwibmJmIjowLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6ZGVmYXVsdDpkZWZhdWx0Iiwic2NvcGUiOiJyZWFkIHdyaXRlIn0.vl5se9BuxoVDhvR7M5wGfkLoyMSYUiORMZVxl0CQ7jw3x53mZfGEkU_kkIVIl9Ui371qCCVVxdvuZPcAgbM6pQ" // notsecret +) + +func TestParseJWTClaimsPayloadValid(t *testing.T) { + basicClaims, err := ParseJWTClaims(tokenBasicNotExpired) + t.Run("Is parseable", func(t *testing.T) { + if err != nil { + t.Fatalf("expected no error, got %v", err) + } + if basicClaims == nil { + t.Fatal("expected claims, got nil") + } + }) + t.Run("Parses issuer", func(t *testing.T) { + if basicClaims.Issuer != "https://kubernetes.default.svc.cluster.local" { + t.Errorf("expected issuer 'https://kubernetes.default.svc.cluster.local', got %s", basicClaims.Issuer) + } + }) + t.Run("Parses audience", func(t *testing.T) { + expectedAudiences := []string{"https://kubernetes.default.svc.cluster.local", "kubernetes-mcp-server"} + for _, expected := range expectedAudiences { + if !basicClaims.Audience.Contains(expected) { + t.Errorf("expected audience to contain %s", expected) + } + } + }) + t.Run("Parses expiration", func(t *testing.T) { + if *basicClaims.Expiry != jwt.NumericDate(253402297199) { + t.Errorf("expected expiration 253402297199, got %d", basicClaims.Expiry) + } + }) + t.Run("Parses scope", func(t *testing.T) { + scopeClaims, err := ParseJWTClaims(tokenMultipleAudienceNotExpired) + if err != nil { + t.Fatalf("expected no error, got %v", err) + } + if scopeClaims == nil { + t.Fatal("expected claims, got nil") + } + + scopes := scopeClaims.GetScopes() + + expectedScopes := []string{"read", "write"} + if len(scopes) != len(expectedScopes) { + t.Errorf("expected %d scopes, got %d", len(expectedScopes), len(scopes)) + } + for i, expectedScope := range expectedScopes { + if scopes[i] != expectedScope { + t.Errorf("expected scope[%d] to be '%s', got '%s'", i, expectedScope, scopes[i]) + } + } + }) + t.Run("Parses expired token", func(t *testing.T) { + expiredClaims, err := ParseJWTClaims(tokenBasicExpired) + if err != nil { + t.Fatalf("expected no error, got %v", err) + } + + if *expiredClaims.Expiry != jwt.NumericDate(1) { + t.Errorf("expected expiration 1, got %d", basicClaims.Expiry) + } + }) +} + +func TestParseJWTClaimsPayloadInvalid(t *testing.T) { + t.Run("invalid token segments", func(t *testing.T) { + invalidToken := "header.payload.signature.extra" + + _, err := ParseJWTClaims(invalidToken) + if err == nil { + t.Fatal("expected error for invalid token segments, got nil") + } + + if !strings.Contains(err.Error(), "compact JWS format must have three parts") { + t.Errorf("expected invalid token segments error message, got %v", err) + } + }) + t.Run("invalid base64 payload", func(t *testing.T) { + invalidPayload := "invalid_base64" + tokenBasicNotExpired + + _, err := ParseJWTClaims(invalidPayload) + if err == nil { + t.Fatal("expected error for invalid base64, got nil") + } + + if !strings.Contains(err.Error(), "illegal base64 data") { + t.Errorf("expected decode error message, got %v", err) + } + }) +} + +func TestJWTTokenValidate(t *testing.T) { + t.Run("expired token returns error", func(t *testing.T) { + claims, err := ParseJWTClaims(tokenBasicExpired) + if err != nil { + t.Fatalf("expected no error for expired token parsing, got %v", err) + } + + err = claims.Validate("kubernetes-mcp-server") + if err == nil { + t.Fatalf("expected error for expired token, got nil") + } + + if !strings.Contains(err.Error(), "token is expired (exp)") { + t.Errorf("expected expiration error message, got %v", err) + } + }) + + t.Run("multiple audiences with correct one", func(t *testing.T) { + claims, err := ParseJWTClaims(tokenMultipleAudienceNotExpired) + if err != nil { + t.Fatalf("expected no error for multiple audience token parsing, got %v", err) + } + if claims == nil { + t.Fatalf("expected claims to be returned, got nil") + } + + err = claims.Validate("kubernetes-mcp-server") + if err != nil { + t.Fatalf("expected no error for valid audience, got %v", err) + } + }) + + t.Run("multiple audiences with mismatch returns error", func(t *testing.T) { + claims, err := ParseJWTClaims(tokenMultipleAudienceNotExpired) + if err != nil { + t.Fatalf("expected no error for multiple audience token parsing, got %v", err) + } + if claims == nil { + t.Fatalf("expected claims to be returned, got nil") + } + + err = claims.Validate("missing-audience") + if err == nil { + t.Fatalf("expected error for token with wrong audience, got nil") + } + + if !strings.Contains(err.Error(), "invalid audience claim (aud)") { + t.Errorf("expected audience mismatch error, got %v", err) + } + }) +} + +func TestJWTClaimsGetScopes(t *testing.T) { + t.Run("no scopes", func(t *testing.T) { + claims, err := ParseJWTClaims(tokenBasicExpired) + if err != nil { + t.Fatalf("expected no error for parsing token, got %v", err) + } + + if scopes := claims.GetScopes(); len(scopes) != 0 { + t.Errorf("expected no scopes, got %d", len(scopes)) + } + }) + t.Run("single scope", func(t *testing.T) { + claims := &JWTClaims{ + Scope: "read", + } + scopes := claims.GetScopes() + expected := []string{"read"} + + if len(scopes) != 1 { + t.Errorf("expected 1 scope, got %d", len(scopes)) + } + if scopes[0] != expected[0] { + t.Errorf("expected scope 'read', got '%s'", scopes[0]) + } + }) + + t.Run("multiple scopes", func(t *testing.T) { + claims := &JWTClaims{ + Scope: "read write admin", + } + scopes := claims.GetScopes() + expected := []string{"read", "write", "admin"} + + if len(scopes) != 3 { + t.Errorf("expected 3 scopes, got %d", len(scopes)) + } + + for i, expectedScope := range expected { + if i >= len(scopes) || scopes[i] != expectedScope { + t.Errorf("expected scope[%d] to be '%s', got '%s'", i, expectedScope, scopes[i]) + } + } + }) + + t.Run("scopes with extra whitespace", func(t *testing.T) { + claims := &JWTClaims{ + Scope: " read write admin ", + } + scopes := claims.GetScopes() + expected := []string{"read", "write", "admin"} + + if len(scopes) != 3 { + t.Errorf("expected 3 scopes, got %d", len(scopes)) + } + + for i, expectedScope := range expected { + if i >= len(scopes) || scopes[i] != expectedScope { + t.Errorf("expected scope[%d] to be '%s', got '%s'", i, expectedScope, scopes[i]) + } + } + }) +} diff --git a/pkg 2/http/http.go b/pkg 2/http/http.go new file mode 100644 index 00000000..602a4002 --- /dev/null +++ b/pkg 2/http/http.go @@ -0,0 +1,119 @@ +package http + +import ( + "context" + "encoding/json" + "errors" + "net/http" + "os" + "os/signal" + "syscall" + "time" + + "github.com/coreos/go-oidc/v3/oidc" + + "k8s.io/klog/v2" + + "github.com/containers/kubernetes-mcp-server/pkg/config" + "github.com/containers/kubernetes-mcp-server/pkg/mcp" +) + +const ( + oauthProtectedResourceEndpoint = "/.well-known/oauth-protected-resource" + healthEndpoint = "/healthz" + mcpEndpoint = "/mcp" + sseEndpoint = "/sse" + sseMessageEndpoint = "/message" +) + +func Serve(ctx context.Context, mcpServer *mcp.Server, staticConfig *config.StaticConfig, oidcProvider *oidc.Provider) error { + mux := http.NewServeMux() + + wrappedMux := RequestMiddleware( + AuthorizationMiddleware(staticConfig.RequireOAuth, staticConfig.ServerURL, oidcProvider, mcpServer)(mux), + ) + + httpServer := &http.Server{ + Addr: ":" + staticConfig.Port, + Handler: wrappedMux, + } + + sseServer := mcpServer.ServeSse(staticConfig.SSEBaseURL, httpServer) + streamableHttpServer := mcpServer.ServeHTTP(httpServer) + mux.Handle(sseEndpoint, sseServer) + mux.Handle(sseMessageEndpoint, sseServer) + mux.Handle(mcpEndpoint, streamableHttpServer) + mux.HandleFunc(healthEndpoint, func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + }) + mux.HandleFunc(oauthProtectedResourceEndpoint, func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + + var authServers []string + if staticConfig.AuthorizationURL != "" { + authServers = []string{staticConfig.AuthorizationURL} + } else { + // Fallback to Kubernetes API server host if authorization_server is not configured + if apiServerHost := mcpServer.GetKubernetesAPIServerHost(); apiServerHost != "" { + authServers = []string{apiServerHost} + } + } + + response := map[string]interface{}{ + "authorization_servers": authServers, + "authorization_server": authServers[0], + "scopes_supported": mcpServer.GetEnabledTools(), + "bearer_methods_supported": []string{"header"}, + } + + if staticConfig.ServerURL != "" { + response["resource"] = staticConfig.ServerURL + } + + if staticConfig.JwksURL != "" { + response["jwks_uri"] = staticConfig.JwksURL + } + + w.WriteHeader(http.StatusOK) + if err := json.NewEncoder(w).Encode(response); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } + }) + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + sigChan := make(chan os.Signal, 1) + signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM) + + serverErr := make(chan error, 1) + go func() { + klog.V(0).Infof("Streaming and SSE HTTP servers starting on port %s and paths /mcp, /sse, /message", staticConfig.Port) + if err := httpServer.ListenAndServe(); err != nil && !errors.Is(err, http.ErrServerClosed) { + serverErr <- err + } + }() + + select { + case sig := <-sigChan: + klog.V(0).Infof("Received signal %v, initiating graceful shutdown", sig) + cancel() + case <-ctx.Done(): + klog.V(0).Infof("Context cancelled, initiating graceful shutdown") + case err := <-serverErr: + klog.Errorf("HTTP server error: %v", err) + return err + } + + shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), 10*time.Second) + defer shutdownCancel() + + klog.V(0).Infof("Shutting down HTTP server gracefully...") + if err := httpServer.Shutdown(shutdownCtx); err != nil { + klog.Errorf("HTTP server shutdown error: %v", err) + return err + } + + klog.V(0).Infof("HTTP server shutdown complete") + return nil +} diff --git a/pkg 2/http/http_test.go b/pkg 2/http/http_test.go new file mode 100644 index 00000000..89e091fe --- /dev/null +++ b/pkg 2/http/http_test.go @@ -0,0 +1,515 @@ +package http + +import ( + "bufio" + "bytes" + "context" + "crypto/rand" + "crypto/rsa" + "flag" + "fmt" + "net" + "net/http" + "net/http/httptest" + "os" + "path/filepath" + "regexp" + "strconv" + "strings" + "testing" + "time" + + "github.com/coreos/go-oidc/v3/oidc" + "github.com/coreos/go-oidc/v3/oidc/oidctest" + "golang.org/x/sync/errgroup" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/client-go/tools/clientcmd/api" + "k8s.io/klog/v2" + "k8s.io/klog/v2/textlogger" + + "github.com/containers/kubernetes-mcp-server/pkg/config" + "github.com/containers/kubernetes-mcp-server/pkg/mcp" +) + +type httpContext struct { + klogState klog.State + LogBuffer bytes.Buffer + HttpAddress string // HTTP server address + timeoutCancel context.CancelFunc // Release resources if test completes before the timeout + StopServer context.CancelFunc + WaitForShutdown func() error + StaticConfig *config.StaticConfig + OidcProvider *oidc.Provider +} + +func (c *httpContext) beforeEach(t *testing.T) { + t.Helper() + http.DefaultClient.Timeout = 10 * time.Second + if c.StaticConfig == nil { + c.StaticConfig = &config.StaticConfig{} + } + // Fake Kubernetes configuration + fakeConfig := api.NewConfig() + fakeConfig.Clusters["fake"] = api.NewCluster() + fakeConfig.Clusters["fake"].Server = "https://example.com" + fakeConfig.Contexts["fake-context"] = api.NewContext() + fakeConfig.Contexts["fake-context"].Cluster = "fake" + fakeConfig.CurrentContext = "fake-context" + kubeConfig := filepath.Join(t.TempDir(), "config") + _ = clientcmd.WriteToFile(*fakeConfig, kubeConfig) + _ = os.Setenv("KUBECONFIG", kubeConfig) + // Capture logging + c.klogState = klog.CaptureState() + flags := flag.NewFlagSet("test", flag.ContinueOnError) + klog.InitFlags(flags) + _ = flags.Set("v", "5") + klog.SetLogger(textlogger.NewLogger(textlogger.NewConfig(textlogger.Verbosity(5), textlogger.Output(&c.LogBuffer)))) + // Start server in random port + ln, err := net.Listen("tcp", "0.0.0.0:0") + if err != nil { + t.Fatalf("Failed to find random port for HTTP server: %v", err) + } + c.HttpAddress = ln.Addr().String() + if randomPortErr := ln.Close(); randomPortErr != nil { + t.Fatalf("Failed to close random port listener: %v", randomPortErr) + } + c.StaticConfig.Port = fmt.Sprintf("%d", ln.Addr().(*net.TCPAddr).Port) + mcpServer, err := mcp.NewServer(mcp.Configuration{ + Profile: mcp.Profiles[0], + StaticConfig: c.StaticConfig, + }) + if err != nil { + t.Fatalf("Failed to create MCP server: %v", err) + } + var timeoutCtx, cancelCtx context.Context + timeoutCtx, c.timeoutCancel = context.WithTimeout(t.Context(), 10*time.Second) + group, gc := errgroup.WithContext(timeoutCtx) + cancelCtx, c.StopServer = context.WithCancel(gc) + group.Go(func() error { return Serve(cancelCtx, mcpServer, c.StaticConfig, c.OidcProvider) }) + c.WaitForShutdown = group.Wait + // Wait for HTTP server to start (using net) + for i := 0; i < 10; i++ { + conn, err := net.Dial("tcp", c.HttpAddress) + if err == nil { + _ = conn.Close() + break + } + time.Sleep(50 * time.Millisecond) // Wait before retrying + } +} + +func (c *httpContext) afterEach(t *testing.T) { + t.Helper() + c.StopServer() + err := c.WaitForShutdown() + if err != nil { + t.Errorf("HTTP server did not shut down gracefully: %v", err) + } + c.timeoutCancel() + c.klogState.Restore() + _ = os.Setenv("KUBECONFIG", "") +} + +func testCase(t *testing.T, test func(c *httpContext)) { + testCaseWithContext(t, &httpContext{}, test) +} + +func testCaseWithContext(t *testing.T, httpCtx *httpContext, test func(c *httpContext)) { + httpCtx.beforeEach(t) + t.Cleanup(func() { httpCtx.afterEach(t) }) + test(httpCtx) +} + +func NewOidcTestServer(t *testing.T) (privateKey *rsa.PrivateKey, oidcProvider *oidc.Provider, httpServer *httptest.Server) { + t.Helper() + privateKey, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + t.Fatalf("failed to generate private key for oidc: %v", err) + } + oidcServer := &oidctest.Server{ + PublicKeys: []oidctest.PublicKey{ + { + PublicKey: privateKey.Public(), + KeyID: "test-oidc-key-id", + Algorithm: oidc.RS256, + }, + }, + } + httpServer = httptest.NewServer(oidcServer) + oidcServer.SetIssuer(httpServer.URL) + oidcProvider, err = oidc.NewProvider(t.Context(), httpServer.URL) + if err != nil { + t.Fatalf("failed to create OIDC provider: %v", err) + } + return +} + +func TestGracefulShutdown(t *testing.T) { + testCase(t, func(ctx *httpContext) { + ctx.StopServer() + err := ctx.WaitForShutdown() + t.Run("Stops gracefully", func(t *testing.T) { + if err != nil { + t.Errorf("Expected graceful shutdown, but got error: %v", err) + } + }) + t.Run("Stops on context cancel", func(t *testing.T) { + if !strings.Contains(ctx.LogBuffer.String(), "Context cancelled, initiating graceful shutdown") { + t.Errorf("Context cancelled, initiating graceful shutdown, got: %s", ctx.LogBuffer.String()) + } + }) + t.Run("Starts server shutdown", func(t *testing.T) { + if !strings.Contains(ctx.LogBuffer.String(), "Shutting down HTTP server gracefully") { + t.Errorf("Expected graceful shutdown log, got: %s", ctx.LogBuffer.String()) + } + }) + t.Run("Server shutdown completes", func(t *testing.T) { + if !strings.Contains(ctx.LogBuffer.String(), "HTTP server shutdown complete") { + t.Errorf("Expected HTTP server shutdown completed log, got: %s", ctx.LogBuffer.String()) + } + }) + }) +} + +func TestSseTransport(t *testing.T) { + testCase(t, func(ctx *httpContext) { + sseResp, sseErr := http.Get(fmt.Sprintf("http://%s/sse", ctx.HttpAddress)) + t.Cleanup(func() { _ = sseResp.Body.Close() }) + t.Run("Exposes SSE endpoint at /sse", func(t *testing.T) { + if sseErr != nil { + t.Fatalf("Failed to get SSE endpoint: %v", sseErr) + } + if sseResp.StatusCode != http.StatusOK { + t.Errorf("Expected HTTP 200 OK, got %d", sseResp.StatusCode) + } + }) + t.Run("SSE endpoint returns text/event-stream content type", func(t *testing.T) { + if sseResp.Header.Get("Content-Type") != "text/event-stream" { + t.Errorf("Expected Content-Type text/event-stream, got %s", sseResp.Header.Get("Content-Type")) + } + }) + responseReader := bufio.NewReader(sseResp.Body) + event, eventErr := responseReader.ReadString('\n') + endpoint, endpointErr := responseReader.ReadString('\n') + t.Run("SSE endpoint returns stream with messages endpoint", func(t *testing.T) { + if eventErr != nil { + t.Fatalf("Failed to read SSE response body (event): %v", eventErr) + } + if event != "event: endpoint\n" { + t.Errorf("Expected SSE event 'endpoint', got %s", event) + } + if endpointErr != nil { + t.Fatalf("Failed to read SSE response body (endpoint): %v", endpointErr) + } + if !strings.HasPrefix(endpoint, "data: /message?sessionId=") { + t.Errorf("Expected SSE data: '/message', got %s", endpoint) + } + }) + messageResp, messageErr := http.Post( + fmt.Sprintf("http://%s/message?sessionId=%s", ctx.HttpAddress, strings.TrimSpace(endpoint[25:])), + "application/json", + bytes.NewBufferString("{}"), + ) + t.Cleanup(func() { _ = messageResp.Body.Close() }) + t.Run("Exposes message endpoint at /message", func(t *testing.T) { + if messageErr != nil { + t.Fatalf("Failed to get message endpoint: %v", messageErr) + } + if messageResp.StatusCode != http.StatusAccepted { + t.Errorf("Expected HTTP 202 OK, got %d", messageResp.StatusCode) + } + }) + }) +} + +func TestStreamableHttpTransport(t *testing.T) { + testCase(t, func(ctx *httpContext) { + mcpGetResp, mcpGetErr := http.Get(fmt.Sprintf("http://%s/mcp", ctx.HttpAddress)) + t.Cleanup(func() { _ = mcpGetResp.Body.Close() }) + t.Run("Exposes MCP GET endpoint at /mcp", func(t *testing.T) { + if mcpGetErr != nil { + t.Fatalf("Failed to get MCP endpoint: %v", mcpGetErr) + } + if mcpGetResp.StatusCode != http.StatusOK { + t.Errorf("Expected HTTP 200 OK, got %d", mcpGetResp.StatusCode) + } + }) + t.Run("MCP GET endpoint returns text/event-stream content type", func(t *testing.T) { + if mcpGetResp.Header.Get("Content-Type") != "text/event-stream" { + t.Errorf("Expected Content-Type text/event-stream (GET), got %s", mcpGetResp.Header.Get("Content-Type")) + } + }) + mcpPostResp, mcpPostErr := http.Post(fmt.Sprintf("http://%s/mcp", ctx.HttpAddress), "application/json", bytes.NewBufferString("{}")) + t.Cleanup(func() { _ = mcpPostResp.Body.Close() }) + t.Run("Exposes MCP POST endpoint at /mcp", func(t *testing.T) { + if mcpPostErr != nil { + t.Fatalf("Failed to post to MCP endpoint: %v", mcpPostErr) + } + if mcpPostResp.StatusCode != http.StatusOK { + t.Errorf("Expected HTTP 200 OK, got %d", mcpPostResp.StatusCode) + } + }) + t.Run("MCP POST endpoint returns application/json content type", func(t *testing.T) { + if mcpPostResp.Header.Get("Content-Type") != "application/json" { + t.Errorf("Expected Content-Type application/json (POST), got %s", mcpPostResp.Header.Get("Content-Type")) + } + }) + }) +} + +func TestHealthCheck(t *testing.T) { + testCase(t, func(ctx *httpContext) { + t.Run("Exposes health check endpoint at /healthz", func(t *testing.T) { + resp, err := http.Get(fmt.Sprintf("http://%s/healthz", ctx.HttpAddress)) + if err != nil { + t.Fatalf("Failed to get health check endpoint: %v", err) + } + t.Cleanup(func() { _ = resp.Body.Close }) + if resp.StatusCode != http.StatusOK { + t.Errorf("Expected HTTP 200 OK, got %d", resp.StatusCode) + } + }) + }) + // Health exposed even when require Authorization + testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true}}, func(ctx *httpContext) { + resp, err := http.Get(fmt.Sprintf("http://%s/healthz", ctx.HttpAddress)) + if err != nil { + t.Fatalf("Failed to get health check endpoint with OAuth: %v", err) + } + t.Cleanup(func() { _ = resp.Body.Close() }) + t.Run("Health check with OAuth returns HTTP 200 OK", func(t *testing.T) { + if resp.StatusCode != http.StatusOK { + t.Errorf("Expected HTTP 200 OK, got %d", resp.StatusCode) + } + }) + }) +} + +func TestWellKnownOAuthProtectedResource(t *testing.T) { + testCase(t, func(ctx *httpContext) { + resp, err := http.Get(fmt.Sprintf("http://%s/.well-known/oauth-protected-resource", ctx.HttpAddress)) + t.Cleanup(func() { _ = resp.Body.Close() }) + t.Run("Exposes .well-known/oauth-protected-resource endpoint", func(t *testing.T) { + if err != nil { + t.Fatalf("Failed to get .well-known/oauth-protected-resource endpoint: %v", err) + } + if resp.StatusCode != http.StatusOK { + t.Errorf("Expected HTTP 200 OK, got %d", resp.StatusCode) + } + }) + t.Run(".well-known/oauth-protected-resource returns application/json content type", func(t *testing.T) { + if resp.Header.Get("Content-Type") != "application/json" { + t.Errorf("Expected Content-Type application/json, got %s", resp.Header.Get("Content-Type")) + } + }) + }) +} + +func TestMiddlewareLogging(t *testing.T) { + testCase(t, func(ctx *httpContext) { + _, _ = http.Get(fmt.Sprintf("http://%s/.well-known/oauth-protected-resource", ctx.HttpAddress)) + t.Run("Logs HTTP requests and responses", func(t *testing.T) { + if !strings.Contains(ctx.LogBuffer.String(), "GET /.well-known/oauth-protected-resource 200") { + t.Errorf("Expected log entry for GET /.well-known/oauth-protected-resource, got: %s", ctx.LogBuffer.String()) + } + }) + t.Run("Logs HTTP request duration", func(t *testing.T) { + expected := `"GET /.well-known/oauth-protected-resource 200 (.+)"` + m := regexp.MustCompile(expected).FindStringSubmatch(ctx.LogBuffer.String()) + if len(m) != 2 { + t.Fatalf("Expected log entry to contain duration, got %s", ctx.LogBuffer.String()) + } + duration, err := time.ParseDuration(m[1]) + if err != nil { + t.Fatalf("Failed to parse duration from log entry: %v", err) + } + if duration < 0 { + t.Errorf("Expected duration to be non-negative, got %v", duration) + } + }) + }) +} + +func TestAuthorizationUnauthorized(t *testing.T) { + // Missing Authorization header + testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true}}, func(ctx *httpContext) { + resp, err := http.Get(fmt.Sprintf("http://%s/mcp", ctx.HttpAddress)) + if err != nil { + t.Fatalf("Failed to get protected endpoint: %v", err) + } + t.Cleanup(func() { _ = resp.Body.Close }) + t.Run("Protected resource with MISSING Authorization header returns 401 - Unauthorized", func(t *testing.T) { + if resp.StatusCode != 401 { + t.Errorf("Expected HTTP 401, got %d", resp.StatusCode) + } + }) + t.Run("Protected resource with MISSING Authorization header returns WWW-Authenticate header", func(t *testing.T) { + authHeader := resp.Header.Get("WWW-Authenticate") + expected := `Bearer realm="Kubernetes MCP Server", audience="kubernetes-mcp-server", error="missing_token"` + if authHeader != expected { + t.Errorf("Expected WWW-Authenticate header to be %q, got %q", expected, authHeader) + } + }) + t.Run("Protected resource with MISSING Authorization header logs error", func(t *testing.T) { + if !strings.Contains(ctx.LogBuffer.String(), "Authentication failed - missing or invalid bearer token") { + t.Errorf("Expected log entry for missing or invalid bearer token, got: %s", ctx.LogBuffer.String()) + } + }) + }) + // Authorization header without Bearer prefix + testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true}}, func(ctx *httpContext) { + req, err := http.NewRequest("GET", fmt.Sprintf("http://%s/mcp", ctx.HttpAddress), nil) + if err != nil { + t.Fatalf("Failed to create request: %v", err) + } + req.Header.Set("Authorization", "Basic YWxhZGRpbjpvcGVuc2VzYW1l") + resp, err := http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("Failed to get protected endpoint: %v", err) + } + t.Cleanup(func() { _ = resp.Body.Close }) + t.Run("Protected resource with INCOMPATIBLE Authorization header returns WWW-Authenticate header", func(t *testing.T) { + authHeader := resp.Header.Get("WWW-Authenticate") + expected := `Bearer realm="Kubernetes MCP Server", audience="kubernetes-mcp-server", error="missing_token"` + if authHeader != expected { + t.Errorf("Expected WWW-Authenticate header to be %q, got %q", expected, authHeader) + } + }) + t.Run("Protected resource with INCOMPATIBLE Authorization header logs error", func(t *testing.T) { + if !strings.Contains(ctx.LogBuffer.String(), "Authentication failed - missing or invalid bearer token") { + t.Errorf("Expected log entry for missing or invalid bearer token, got: %s", ctx.LogBuffer.String()) + } + }) + }) + // Invalid Authorization header + testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true}}, func(ctx *httpContext) { + req, err := http.NewRequest("GET", fmt.Sprintf("http://%s/mcp", ctx.HttpAddress), nil) + if err != nil { + t.Fatalf("Failed to create request: %v", err) + } + req.Header.Set("Authorization", "Bearer invalid_base64"+tokenBasicNotExpired) + resp, err := http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("Failed to get protected endpoint: %v", err) + } + t.Cleanup(func() { _ = resp.Body.Close }) + t.Run("Protected resource with INVALID Authorization header returns 401 - Unauthorized", func(t *testing.T) { + if resp.StatusCode != 401 { + t.Errorf("Expected HTTP 401, got %d", resp.StatusCode) + } + }) + t.Run("Protected resource with INVALID Authorization header returns WWW-Authenticate header", func(t *testing.T) { + authHeader := resp.Header.Get("WWW-Authenticate") + expected := `Bearer realm="Kubernetes MCP Server", audience="kubernetes-mcp-server", error="invalid_token"` + if authHeader != expected { + t.Errorf("Expected WWW-Authenticate header to be %q, got %q", expected, authHeader) + } + }) + t.Run("Protected resource with INVALID Authorization header logs error", func(t *testing.T) { + if !strings.Contains(ctx.LogBuffer.String(), "Authentication failed - JWT validation error") && + !strings.Contains(ctx.LogBuffer.String(), "error: failed to parse JWT token: illegal base64 data") { + t.Errorf("Expected log entry for JWT validation error, got: %s", ctx.LogBuffer.String()) + } + }) + }) + // Expired Authorization Bearer token + testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true}}, func(ctx *httpContext) { + req, err := http.NewRequest("GET", fmt.Sprintf("http://%s/mcp", ctx.HttpAddress), nil) + if err != nil { + t.Fatalf("Failed to create request: %v", err) + } + req.Header.Set("Authorization", "Bearer "+tokenBasicExpired) + resp, err := http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("Failed to get protected endpoint: %v", err) + } + t.Cleanup(func() { _ = resp.Body.Close }) + t.Run("Protected resource with EXPIRED Authorization header returns 401 - Unauthorized", func(t *testing.T) { + if resp.StatusCode != 401 { + t.Errorf("Expected HTTP 401, got %d", resp.StatusCode) + } + }) + t.Run("Protected resource with EXPIRED Authorization header returns WWW-Authenticate header", func(t *testing.T) { + authHeader := resp.Header.Get("WWW-Authenticate") + expected := `Bearer realm="Kubernetes MCP Server", audience="kubernetes-mcp-server", error="invalid_token"` + if authHeader != expected { + t.Errorf("Expected WWW-Authenticate header to be %q, got %q", expected, authHeader) + } + }) + t.Run("Protected resource with EXPIRED Authorization header logs error", func(t *testing.T) { + if !strings.Contains(ctx.LogBuffer.String(), "Authentication failed - JWT validation error") && + !strings.Contains(ctx.LogBuffer.String(), "validation failed, token is expired (exp)") { + t.Errorf("Expected log entry for JWT validation error, got: %s", ctx.LogBuffer.String()) + } + }) + }) + // Failed OIDC validation + key, oidcProvider, httpServer := NewOidcTestServer(t) + t.Cleanup(httpServer.Close) + testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true}, OidcProvider: oidcProvider}, func(ctx *httpContext) { + req, err := http.NewRequest("GET", fmt.Sprintf("http://%s/mcp", ctx.HttpAddress), nil) + if err != nil { + t.Fatalf("Failed to create request: %v", err) + } + req.Header.Set("Authorization", "Bearer "+tokenBasicNotExpired) + resp, err := http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("Failed to get protected endpoint: %v", err) + } + t.Cleanup(func() { _ = resp.Body.Close }) + t.Run("Protected resource with INVALID OIDC Authorization header returns 401 - Unauthorized", func(t *testing.T) { + if resp.StatusCode != 401 { + t.Errorf("Expected HTTP 401, got %d", resp.StatusCode) + } + }) + t.Run("Protected resource with INVALID OIDC Authorization header returns WWW-Authenticate header", func(t *testing.T) { + authHeader := resp.Header.Get("WWW-Authenticate") + expected := `Bearer realm="Kubernetes MCP Server", audience="kubernetes-mcp-server", error="invalid_token"` + if authHeader != expected { + t.Errorf("Expected WWW-Authenticate header to be %q, got %q", expected, authHeader) + } + }) + t.Run("Protected resource with INVALID OIDC Authorization header logs error", func(t *testing.T) { + if !strings.Contains(ctx.LogBuffer.String(), "Authentication failed - OIDC token validation error") && + !strings.Contains(ctx.LogBuffer.String(), "JWT token verification failed: oidc: id token issued by a different provider") { + t.Errorf("Expected log entry for OIDC validation error, got: %s", ctx.LogBuffer.String()) + } + }) + }) + // Failed Kubernetes TokenReview + rawClaims := `{ + "iss": "` + httpServer.URL + `", + "exp": ` + strconv.FormatInt(time.Now().Add(time.Hour).Unix(), 10) + `, + "aud": "kubernetes-mcp-server" + }` + validOidcToken := oidctest.SignIDToken(key, "test-oidc-key-id", oidc.RS256, rawClaims) + testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true}, OidcProvider: oidcProvider}, func(ctx *httpContext) { + req, err := http.NewRequest("GET", fmt.Sprintf("http://%s/mcp", ctx.HttpAddress), nil) + if err != nil { + t.Fatalf("Failed to create request: %v", err) + } + req.Header.Set("Authorization", "Bearer "+validOidcToken) + resp, err := http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("Failed to get protected endpoint: %v", err) + } + t.Cleanup(func() { _ = resp.Body.Close }) + t.Run("Protected resource with INVALID KUBERNETES Authorization header returns 401 - Unauthorized", func(t *testing.T) { + if resp.StatusCode != 401 { + t.Errorf("Expected HTTP 401, got %d", resp.StatusCode) + } + }) + t.Run("Protected resource with INVALID KUBERNETES Authorization header returns WWW-Authenticate header", func(t *testing.T) { + authHeader := resp.Header.Get("WWW-Authenticate") + expected := `Bearer realm="Kubernetes MCP Server", audience="kubernetes-mcp-server", error="invalid_token"` + if authHeader != expected { + t.Errorf("Expected WWW-Authenticate header to be %q, got %q", expected, authHeader) + } + }) + t.Run("Protected resource with INVALID KUBERNETES Authorization header logs error", func(t *testing.T) { + if !strings.Contains(ctx.LogBuffer.String(), "Authentication failed - API Server token validation error") { + t.Errorf("Expected log entry for Kubernetes TokenReview error, got: %s", ctx.LogBuffer.String()) + } + }) + }) +} diff --git a/pkg 2/http/middleware.go b/pkg 2/http/middleware.go new file mode 100644 index 00000000..0c4c4102 --- /dev/null +++ b/pkg 2/http/middleware.go @@ -0,0 +1,66 @@ +package http + +import ( + "bufio" + "net" + "net/http" + "time" + + "k8s.io/klog/v2" +) + +func RequestMiddleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/healthz" { + next.ServeHTTP(w, r) + return + } + + start := time.Now() + + lrw := &loggingResponseWriter{ + ResponseWriter: w, + statusCode: http.StatusOK, + } + + next.ServeHTTP(lrw, r) + + duration := time.Since(start) + klog.V(5).Infof("%s %s %d %v", r.Method, r.URL.Path, lrw.statusCode, duration) + }) +} + +type loggingResponseWriter struct { + http.ResponseWriter + statusCode int + headerWritten bool +} + +func (lrw *loggingResponseWriter) WriteHeader(code int) { + if !lrw.headerWritten { + lrw.statusCode = code + lrw.headerWritten = true + lrw.ResponseWriter.WriteHeader(code) + } +} + +func (lrw *loggingResponseWriter) Write(b []byte) (int, error) { + if !lrw.headerWritten { + lrw.statusCode = http.StatusOK + lrw.headerWritten = true + } + return lrw.ResponseWriter.Write(b) +} + +func (lrw *loggingResponseWriter) Flush() { + if flusher, ok := lrw.ResponseWriter.(http.Flusher); ok { + flusher.Flush() + } +} + +func (lrw *loggingResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { + if hijacker, ok := lrw.ResponseWriter.(http.Hijacker); ok { + return hijacker.Hijack() + } + return nil, nil, http.ErrNotSupported +} diff --git a/pkg 2/kubernetes/accesscontrol.go b/pkg 2/kubernetes/accesscontrol.go new file mode 100644 index 00000000..e35b5dfb --- /dev/null +++ b/pkg 2/kubernetes/accesscontrol.go @@ -0,0 +1,40 @@ +package kubernetes + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/runtime/schema" + + "github.com/containers/kubernetes-mcp-server/pkg/config" +) + +// isAllowed checks the resource is in denied list or not. +// If it is in denied list, this function returns false. +func isAllowed( + staticConfig *config.StaticConfig, // TODO: maybe just use the denied resource slice + gvk *schema.GroupVersionKind, +) bool { + if staticConfig == nil { + return true + } + + for _, val := range staticConfig.DeniedResources { + // If kind is empty, that means Group/Version pair is denied entirely + if val.Kind == "" { + if gvk.Group == val.Group && gvk.Version == val.Version { + return false + } + } + if gvk.Group == val.Group && + gvk.Version == val.Version && + gvk.Kind == val.Kind { + return false + } + } + + return true +} + +func isNotAllowedError(gvk *schema.GroupVersionKind) error { + return fmt.Errorf("resource not allowed: %s", gvk.String()) +} diff --git a/pkg 2/kubernetes/accesscontrol_clientset.go b/pkg 2/kubernetes/accesscontrol_clientset.go new file mode 100644 index 00000000..ed875c64 --- /dev/null +++ b/pkg 2/kubernetes/accesscontrol_clientset.go @@ -0,0 +1,141 @@ +package kubernetes + +import ( + "context" + "fmt" + + authenticationv1api "k8s.io/api/authentication/v1" + authorizationv1api "k8s.io/api/authorization/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/httpstream" + "k8s.io/client-go/discovery" + "k8s.io/client-go/kubernetes" + authenticationv1 "k8s.io/client-go/kubernetes/typed/authentication/v1" + authorizationv1 "k8s.io/client-go/kubernetes/typed/authorization/v1" + corev1 "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/remotecommand" + "k8s.io/metrics/pkg/apis/metrics" + metricsv1beta1api "k8s.io/metrics/pkg/apis/metrics/v1beta1" + metricsv1beta1 "k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1" + + "github.com/containers/kubernetes-mcp-server/pkg/config" +) + +// AccessControlClientset is a limited clientset delegating interface to the standard kubernetes.Clientset +// Only a limited set of functions are implemented with a single point of access to the kubernetes API where +// apiVersion and kinds are checked for allowed access +type AccessControlClientset struct { + cfg *rest.Config + delegate kubernetes.Interface + discoveryClient discovery.DiscoveryInterface + metricsV1beta1 *metricsv1beta1.MetricsV1beta1Client + staticConfig *config.StaticConfig // TODO: maybe just store the denied resource slice +} + +func (a *AccessControlClientset) DiscoveryClient() discovery.DiscoveryInterface { + return a.discoveryClient +} + +func (a *AccessControlClientset) Pods(namespace string) (corev1.PodInterface, error) { + gvk := &schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Pod"} + if !isAllowed(a.staticConfig, gvk) { + return nil, isNotAllowedError(gvk) + } + return a.delegate.CoreV1().Pods(namespace), nil +} + +func (a *AccessControlClientset) PodsExec(namespace, name string, podExecOptions *v1.PodExecOptions) (remotecommand.Executor, error) { + gvk := &schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Pod"} + if !isAllowed(a.staticConfig, gvk) { + return nil, isNotAllowedError(gvk) + } + // Compute URL + // https://github.com/kubernetes/kubectl/blob/5366de04e168bcbc11f5e340d131a9ca8b7d0df4/pkg/cmd/exec/exec.go#L382-L397 + execRequest := a.delegate.CoreV1().RESTClient(). + Post(). + Resource("pods"). + Namespace(namespace). + Name(name). + SubResource("exec") + execRequest.VersionedParams(podExecOptions, ParameterCodec) + spdyExec, err := remotecommand.NewSPDYExecutor(a.cfg, "POST", execRequest.URL()) + if err != nil { + return nil, err + } + webSocketExec, err := remotecommand.NewWebSocketExecutor(a.cfg, "GET", execRequest.URL().String()) + if err != nil { + return nil, err + } + return remotecommand.NewFallbackExecutor(webSocketExec, spdyExec, func(err error) bool { + return httpstream.IsUpgradeFailure(err) || httpstream.IsHTTPSProxyError(err) + }) +} + +func (a *AccessControlClientset) PodsMetricses(ctx context.Context, namespace, name string, listOptions metav1.ListOptions) (*metrics.PodMetricsList, error) { + gvk := &schema.GroupVersionKind{Group: metrics.GroupName, Version: metricsv1beta1api.SchemeGroupVersion.Version, Kind: "PodMetrics"} + if !isAllowed(a.staticConfig, gvk) { + return nil, isNotAllowedError(gvk) + } + versionedMetrics := &metricsv1beta1api.PodMetricsList{} + var err error + if name != "" { + m, err := a.metricsV1beta1.PodMetricses(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return nil, fmt.Errorf("failed to get metrics for pod %s/%s: %w", namespace, name, err) + } + versionedMetrics.Items = []metricsv1beta1api.PodMetrics{*m} + } else { + versionedMetrics, err = a.metricsV1beta1.PodMetricses(namespace).List(ctx, listOptions) + if err != nil { + return nil, fmt.Errorf("failed to list pod metrics in namespace %s: %w", namespace, err) + } + } + convertedMetrics := &metrics.PodMetricsList{} + return convertedMetrics, metricsv1beta1api.Convert_v1beta1_PodMetricsList_To_metrics_PodMetricsList(versionedMetrics, convertedMetrics, nil) +} + +func (a *AccessControlClientset) Services(namespace string) (corev1.ServiceInterface, error) { + gvk := &schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Service"} + if !isAllowed(a.staticConfig, gvk) { + return nil, isNotAllowedError(gvk) + } + return a.delegate.CoreV1().Services(namespace), nil +} + +func (a *AccessControlClientset) SelfSubjectAccessReviews() (authorizationv1.SelfSubjectAccessReviewInterface, error) { + gvk := &schema.GroupVersionKind{Group: authorizationv1api.GroupName, Version: authorizationv1api.SchemeGroupVersion.Version, Kind: "SelfSubjectAccessReview"} + if !isAllowed(a.staticConfig, gvk) { + return nil, isNotAllowedError(gvk) + } + return a.delegate.AuthorizationV1().SelfSubjectAccessReviews(), nil +} + +// TokenReview returns TokenReviewInterface +func (a *AccessControlClientset) TokenReview() (authenticationv1.TokenReviewInterface, error) { + gvk := &schema.GroupVersionKind{Group: authenticationv1api.GroupName, Version: authorizationv1api.SchemeGroupVersion.Version, Kind: "TokenReview"} + if !isAllowed(a.staticConfig, gvk) { + return nil, isNotAllowedError(gvk) + } + return a.delegate.AuthenticationV1().TokenReviews(), nil +} + +func NewAccessControlClientset(cfg *rest.Config, staticConfig *config.StaticConfig) (*AccessControlClientset, error) { + clientSet, err := kubernetes.NewForConfig(cfg) + if err != nil { + return nil, err + } + metricsClient, err := metricsv1beta1.NewForConfig(cfg) + if err != nil { + return nil, err + } + return &AccessControlClientset{ + cfg: cfg, + delegate: clientSet, + discoveryClient: clientSet.DiscoveryClient, + metricsV1beta1: metricsClient, + staticConfig: staticConfig, + }, nil +} diff --git a/pkg 2/kubernetes/accesscontrol_restmapper.go b/pkg 2/kubernetes/accesscontrol_restmapper.go new file mode 100644 index 00000000..06269480 --- /dev/null +++ b/pkg 2/kubernetes/accesscontrol_restmapper.go @@ -0,0 +1,80 @@ +package kubernetes + +import ( + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/restmapper" + + "github.com/containers/kubernetes-mcp-server/pkg/config" +) + +type AccessControlRESTMapper struct { + delegate *restmapper.DeferredDiscoveryRESTMapper + staticConfig *config.StaticConfig // TODO: maybe just store the denied resource slice +} + +var _ meta.RESTMapper = &AccessControlRESTMapper{} + +func (a AccessControlRESTMapper) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { + gvk, err := a.delegate.KindFor(resource) + if err != nil { + return schema.GroupVersionKind{}, err + } + if !isAllowed(a.staticConfig, &gvk) { + return schema.GroupVersionKind{}, isNotAllowedError(&gvk) + } + return gvk, nil +} + +func (a AccessControlRESTMapper) KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error) { + gvks, err := a.delegate.KindsFor(resource) + if err != nil { + return nil, err + } + for i := range gvks { + if !isAllowed(a.staticConfig, &gvks[i]) { + return nil, isNotAllowedError(&gvks[i]) + } + } + return gvks, nil +} + +func (a AccessControlRESTMapper) ResourceFor(input schema.GroupVersionResource) (schema.GroupVersionResource, error) { + return a.delegate.ResourceFor(input) +} + +func (a AccessControlRESTMapper) ResourcesFor(input schema.GroupVersionResource) ([]schema.GroupVersionResource, error) { + return a.delegate.ResourcesFor(input) +} + +func (a AccessControlRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error) { + for _, version := range versions { + gvk := &schema.GroupVersionKind{Group: gk.Group, Version: version, Kind: gk.Kind} + if !isAllowed(a.staticConfig, gvk) { + return nil, isNotAllowedError(gvk) + } + } + return a.delegate.RESTMapping(gk, versions...) +} + +func (a AccessControlRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*meta.RESTMapping, error) { + for _, version := range versions { + gvk := &schema.GroupVersionKind{Group: gk.Group, Version: version, Kind: gk.Kind} + if !isAllowed(a.staticConfig, gvk) { + return nil, isNotAllowedError(gvk) + } + } + return a.delegate.RESTMappings(gk, versions...) +} + +func (a AccessControlRESTMapper) ResourceSingularizer(resource string) (singular string, err error) { + return a.delegate.ResourceSingularizer(resource) +} + +func (a AccessControlRESTMapper) Reset() { + a.delegate.Reset() +} + +func NewAccessControlRESTMapper(delegate *restmapper.DeferredDiscoveryRESTMapper, staticConfig *config.StaticConfig) *AccessControlRESTMapper { + return &AccessControlRESTMapper{delegate: delegate, staticConfig: staticConfig} +} diff --git a/pkg 2/kubernetes/configuration.go b/pkg 2/kubernetes/configuration.go new file mode 100644 index 00000000..df88530f --- /dev/null +++ b/pkg 2/kubernetes/configuration.go @@ -0,0 +1,115 @@ +package kubernetes + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" + "k8s.io/client-go/tools/clientcmd/api/latest" +) + +// InClusterConfig is a variable that holds the function to get the in-cluster config +// Exposed for testing +var InClusterConfig = func() (*rest.Config, error) { + // TODO use kubernetes.default.svc instead of resolved server + // Currently running into: `http: server gave HTTP response to HTTPS client` + inClusterConfig, err := rest.InClusterConfig() + if inClusterConfig != nil { + inClusterConfig.Host = "https://kubernetes.default.svc" + } + return inClusterConfig, err +} + +// resolveKubernetesConfigurations resolves the required kubernetes configurations and sets them in the Kubernetes struct +func resolveKubernetesConfigurations(kubernetes *Manager) error { + // Always set clientCmdConfig + pathOptions := clientcmd.NewDefaultPathOptions() + if kubernetes.staticConfig.KubeConfig != "" { + pathOptions.LoadingRules.ExplicitPath = kubernetes.staticConfig.KubeConfig + } + kubernetes.clientCmdConfig = clientcmd.NewNonInteractiveDeferredLoadingClientConfig( + pathOptions.LoadingRules, + &clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: ""}}) + var err error + if kubernetes.IsInCluster() { + kubernetes.cfg, err = InClusterConfig() + if err == nil && kubernetes.cfg != nil { + return nil + } + } + // Out of cluster + kubernetes.cfg, err = kubernetes.clientCmdConfig.ClientConfig() + if kubernetes.cfg != nil && kubernetes.cfg.UserAgent == "" { + kubernetes.cfg.UserAgent = rest.DefaultKubernetesUserAgent() + } + return err +} + +func (m *Manager) IsInCluster() bool { + if m.staticConfig.KubeConfig != "" { + return false + } + cfg, err := InClusterConfig() + return err == nil && cfg != nil +} + +func (m *Manager) configuredNamespace() string { + if ns, _, nsErr := m.clientCmdConfig.Namespace(); nsErr == nil { + return ns + } + return "" +} + +func (m *Manager) NamespaceOrDefault(namespace string) string { + if namespace == "" { + return m.configuredNamespace() + } + return namespace +} + +func (k *Kubernetes) NamespaceOrDefault(namespace string) string { + return k.manager.NamespaceOrDefault(namespace) +} + +// ToRESTConfig returns the rest.Config object (genericclioptions.RESTClientGetter) +func (m *Manager) ToRESTConfig() (*rest.Config, error) { + return m.cfg, nil +} + +// ToRawKubeConfigLoader returns the clientcmd.ClientConfig object (genericclioptions.RESTClientGetter) +func (m *Manager) ToRawKubeConfigLoader() clientcmd.ClientConfig { + return m.clientCmdConfig +} + +func (m *Manager) ConfigurationView(minify bool) (runtime.Object, error) { + var cfg clientcmdapi.Config + var err error + if m.IsInCluster() { + cfg = *clientcmdapi.NewConfig() + cfg.Clusters["cluster"] = &clientcmdapi.Cluster{ + Server: m.cfg.Host, + InsecureSkipTLSVerify: m.cfg.Insecure, + } + cfg.AuthInfos["user"] = &clientcmdapi.AuthInfo{ + Token: m.cfg.BearerToken, + } + cfg.Contexts["context"] = &clientcmdapi.Context{ + Cluster: "cluster", + AuthInfo: "user", + } + cfg.CurrentContext = "context" + } else if cfg, err = m.clientCmdConfig.RawConfig(); err != nil { + return nil, err + } + if minify { + if err = clientcmdapi.MinifyConfig(&cfg); err != nil { + return nil, err + } + } + //nolint:staticcheck + if err = clientcmdapi.FlattenConfig(&cfg); err != nil { + // ignore error + //return "", err + } + return latest.Scheme.ConvertToVersion(&cfg, latest.ExternalVersion) +} diff --git a/pkg 2/kubernetes/configuration_test.go b/pkg 2/kubernetes/configuration_test.go new file mode 100644 index 00000000..084b99d7 --- /dev/null +++ b/pkg 2/kubernetes/configuration_test.go @@ -0,0 +1,155 @@ +package kubernetes + +import ( + "errors" + "os" + "path" + "runtime" + "strings" + "testing" + + "k8s.io/client-go/rest" + + "github.com/containers/kubernetes-mcp-server/pkg/config" +) + +func TestKubernetes_IsInCluster(t *testing.T) { + t.Run("with explicit kubeconfig", func(t *testing.T) { + m := Manager{ + staticConfig: &config.StaticConfig{ + KubeConfig: "kubeconfig", + }, + } + if m.IsInCluster() { + t.Errorf("expected not in cluster, got in cluster") + } + }) + t.Run("with empty kubeconfig and in cluster", func(t *testing.T) { + originalFunction := InClusterConfig + InClusterConfig = func() (*rest.Config, error) { + return &rest.Config{}, nil + } + defer func() { + InClusterConfig = originalFunction + }() + m := Manager{ + staticConfig: &config.StaticConfig{ + KubeConfig: "", + }, + } + if !m.IsInCluster() { + t.Errorf("expected in cluster, got not in cluster") + } + }) + t.Run("with empty kubeconfig and not in cluster (empty)", func(t *testing.T) { + originalFunction := InClusterConfig + InClusterConfig = func() (*rest.Config, error) { + return nil, nil + } + defer func() { + InClusterConfig = originalFunction + }() + m := Manager{ + staticConfig: &config.StaticConfig{ + KubeConfig: "", + }, + } + if m.IsInCluster() { + t.Errorf("expected not in cluster, got in cluster") + } + }) + t.Run("with empty kubeconfig and not in cluster (error)", func(t *testing.T) { + originalFunction := InClusterConfig + InClusterConfig = func() (*rest.Config, error) { + return nil, errors.New("error") + } + defer func() { + InClusterConfig = originalFunction + }() + m := Manager{ + staticConfig: &config.StaticConfig{ + KubeConfig: "", + }, + } + if m.IsInCluster() { + t.Errorf("expected not in cluster, got in cluster") + } + }) +} + +func TestKubernetes_ResolveKubernetesConfigurations_Explicit(t *testing.T) { + t.Run("with missing file", func(t *testing.T) { + if runtime.GOOS != "linux" && runtime.GOOS != "darwin" { + t.Skip("Skipping test on non-linux platforms") + } + tempDir := t.TempDir() + m := Manager{staticConfig: &config.StaticConfig{ + KubeConfig: path.Join(tempDir, "config"), + }} + err := resolveKubernetesConfigurations(&m) + if err == nil { + t.Errorf("expected error, got nil") + } + if !errors.Is(err, os.ErrNotExist) { + t.Errorf("expected file not found error, got %v", err) + } + if !strings.HasSuffix(err.Error(), ": no such file or directory") { + t.Errorf("expected file not found error, got %v", err) + } + }) + t.Run("with empty file", func(t *testing.T) { + tempDir := t.TempDir() + kubeconfigPath := path.Join(tempDir, "config") + if err := os.WriteFile(kubeconfigPath, []byte(""), 0644); err != nil { + t.Fatalf("failed to create kubeconfig file: %v", err) + } + m := Manager{staticConfig: &config.StaticConfig{ + KubeConfig: kubeconfigPath, + }} + err := resolveKubernetesConfigurations(&m) + if err == nil { + t.Errorf("expected error, got nil") + } + if !strings.Contains(err.Error(), "no configuration has been provided") { + t.Errorf("expected no kubeconfig error, got %v", err) + } + }) + t.Run("with valid file", func(t *testing.T) { + tempDir := t.TempDir() + kubeconfigPath := path.Join(tempDir, "config") + kubeconfigContent := ` +apiVersion: v1 +kind: Config +clusters: +- cluster: + server: https://example.com + name: example-cluster +contexts: +- context: + cluster: example-cluster + user: example-user + name: example-context +current-context: example-context +users: +- name: example-user + user: + token: example-token +` + if err := os.WriteFile(kubeconfigPath, []byte(kubeconfigContent), 0644); err != nil { + t.Fatalf("failed to create kubeconfig file: %v", err) + } + m := Manager{staticConfig: &config.StaticConfig{ + KubeConfig: kubeconfigPath, + }} + err := resolveKubernetesConfigurations(&m) + if err != nil { + t.Fatalf("expected no error, got %v", err) + } + if m.cfg == nil { + t.Errorf("expected non-nil config, got nil") + } + if m.cfg.Host != "https://example.com" { + t.Errorf("expected host https://example.com, got %s", m.cfg.Host) + } + }) +} diff --git a/pkg 2/kubernetes/events.go b/pkg 2/kubernetes/events.go new file mode 100644 index 00000000..e40720a3 --- /dev/null +++ b/pkg 2/kubernetes/events.go @@ -0,0 +1,51 @@ +package kubernetes + +import ( + "context" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "strings" +) + +func (k *Kubernetes) EventsList(ctx context.Context, namespace string) ([]map[string]any, error) { + var eventMap []map[string]any + raw, err := k.ResourcesList(ctx, &schema.GroupVersionKind{ + Group: "", Version: "v1", Kind: "Event", + }, namespace, ResourceListOptions{}) + if err != nil { + return eventMap, err + } + unstructuredList := raw.(*unstructured.UnstructuredList) + if len(unstructuredList.Items) == 0 { + return eventMap, nil + } + for _, item := range unstructuredList.Items { + event := &v1.Event{} + if err = runtime.DefaultUnstructuredConverter.FromUnstructured(item.Object, event); err != nil { + return eventMap, err + } + timestamp := event.EventTime.Time + if timestamp.IsZero() && event.Series != nil { + timestamp = event.Series.LastObservedTime.Time + } else if timestamp.IsZero() && event.Count > 1 { + timestamp = event.LastTimestamp.Time + } else if timestamp.IsZero() { + timestamp = event.FirstTimestamp.Time + } + eventMap = append(eventMap, map[string]any{ + "Namespace": event.Namespace, + "Timestamp": timestamp.String(), + "Type": event.Type, + "Reason": event.Reason, + "InvolvedObject": map[string]string{ + "apiVersion": event.InvolvedObject.APIVersion, + "Kind": event.InvolvedObject.Kind, + "Name": event.InvolvedObject.Name, + }, + "Message": strings.TrimSpace(event.Message), + }) + } + return eventMap, nil +} diff --git a/pkg 2/kubernetes/impersonate_roundtripper.go b/pkg 2/kubernetes/impersonate_roundtripper.go new file mode 100644 index 00000000..a2c15bf2 --- /dev/null +++ b/pkg 2/kubernetes/impersonate_roundtripper.go @@ -0,0 +1,17 @@ +package kubernetes + +import "net/http" + +// nolint:unused +type impersonateRoundTripper struct { + delegate http.RoundTripper +} + +// nolint:unused +func (irt *impersonateRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + // TODO: Solution won't work with discoveryclient which uses context.TODO() instead of the passed-in context + if v, ok := req.Context().Value(OAuthAuthorizationHeader).(string); ok { + req.Header.Set("Authorization", v) + } + return irt.delegate.RoundTrip(req) +} diff --git a/pkg 2/kubernetes/kubernetes.go b/pkg 2/kubernetes/kubernetes.go new file mode 100644 index 00000000..db0ac542 --- /dev/null +++ b/pkg 2/kubernetes/kubernetes.go @@ -0,0 +1,214 @@ +package kubernetes + +import ( + "context" + "errors" + "strings" + + "k8s.io/apimachinery/pkg/runtime" + + "github.com/fsnotify/fsnotify" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/client-go/discovery" + "k8s.io/client-go/discovery/cached/memory" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "k8s.io/client-go/restmapper" + "k8s.io/client-go/tools/clientcmd" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" + "k8s.io/klog/v2" + + "github.com/containers/kubernetes-mcp-server/pkg/config" + "github.com/containers/kubernetes-mcp-server/pkg/helm" + + _ "k8s.io/client-go/plugin/pkg/client/auth/oidc" +) + +type HeaderKey string + +const ( + CustomAuthorizationHeader = HeaderKey("kubernetes-authorization") + OAuthAuthorizationHeader = HeaderKey("Authorization") + + CustomUserAgent = "kubernetes-mcp-server/bearer-token-auth" +) + +type CloseWatchKubeConfig func() error + +type Kubernetes struct { + manager *Manager +} + +type Manager struct { + cfg *rest.Config + clientCmdConfig clientcmd.ClientConfig + discoveryClient discovery.CachedDiscoveryInterface + accessControlClientSet *AccessControlClientset + accessControlRESTMapper *AccessControlRESTMapper + dynamicClient *dynamic.DynamicClient + + staticConfig *config.StaticConfig + CloseWatchKubeConfig CloseWatchKubeConfig +} + +var Scheme = scheme.Scheme +var ParameterCodec = runtime.NewParameterCodec(Scheme) + +var _ helm.Kubernetes = &Manager{} + +func NewManager(config *config.StaticConfig) (*Manager, error) { + k8s := &Manager{ + staticConfig: config, + } + if err := resolveKubernetesConfigurations(k8s); err != nil { + return nil, err + } + // TODO: Won't work because not all client-go clients use the shared context (e.g. discovery client uses context.TODO()) + //k8s.cfg.Wrap(func(original http.RoundTripper) http.RoundTripper { + // return &impersonateRoundTripper{original} + //}) + var err error + k8s.accessControlClientSet, err = NewAccessControlClientset(k8s.cfg, k8s.staticConfig) + if err != nil { + return nil, err + } + k8s.discoveryClient = memory.NewMemCacheClient(k8s.accessControlClientSet.DiscoveryClient()) + k8s.accessControlRESTMapper = NewAccessControlRESTMapper( + restmapper.NewDeferredDiscoveryRESTMapper(k8s.discoveryClient), + k8s.staticConfig, + ) + k8s.dynamicClient, err = dynamic.NewForConfig(k8s.cfg) + if err != nil { + return nil, err + } + return k8s, nil +} + +func (m *Manager) WatchKubeConfig(onKubeConfigChange func() error) { + if m.clientCmdConfig == nil { + return + } + kubeConfigFiles := m.clientCmdConfig.ConfigAccess().GetLoadingPrecedence() + if len(kubeConfigFiles) == 0 { + return + } + watcher, err := fsnotify.NewWatcher() + if err != nil { + return + } + for _, file := range kubeConfigFiles { + _ = watcher.Add(file) + } + go func() { + for { + select { + case _, ok := <-watcher.Events: + if !ok { + return + } + _ = onKubeConfigChange() + case _, ok := <-watcher.Errors: + if !ok { + return + } + } + } + }() + if m.CloseWatchKubeConfig != nil { + _ = m.CloseWatchKubeConfig() + } + m.CloseWatchKubeConfig = watcher.Close +} + +func (m *Manager) Close() { + if m.CloseWatchKubeConfig != nil { + _ = m.CloseWatchKubeConfig() + } +} + +func (m *Manager) GetAPIServerHost() string { + if m.cfg == nil { + return "" + } + return m.cfg.Host +} + +func (m *Manager) ToDiscoveryClient() (discovery.CachedDiscoveryInterface, error) { + return m.discoveryClient, nil +} + +func (m *Manager) ToRESTMapper() (meta.RESTMapper, error) { + return m.accessControlRESTMapper, nil +} + +func (m *Manager) Derived(ctx context.Context) (*Kubernetes, error) { + authorization, ok := ctx.Value(OAuthAuthorizationHeader).(string) + if !ok || !strings.HasPrefix(authorization, "Bearer ") { + if m.staticConfig.RequireOAuth { + return nil, errors.New("oauth token required") + } + return &Kubernetes{manager: m}, nil + } + klog.V(5).Infof("%s header found (Bearer), using provided bearer token", OAuthAuthorizationHeader) + derivedCfg := &rest.Config{ + Host: m.cfg.Host, + APIPath: m.cfg.APIPath, + // Copy only server verification TLS settings (CA bundle and server name) + TLSClientConfig: rest.TLSClientConfig{ + Insecure: m.cfg.Insecure, + ServerName: m.cfg.ServerName, + CAFile: m.cfg.CAFile, + CAData: m.cfg.CAData, + }, + BearerToken: strings.TrimPrefix(authorization, "Bearer "), + // pass custom UserAgent to identify the client + UserAgent: CustomUserAgent, + QPS: m.cfg.QPS, + Burst: m.cfg.Burst, + Timeout: m.cfg.Timeout, + Impersonate: rest.ImpersonationConfig{}, + } + clientCmdApiConfig, err := m.clientCmdConfig.RawConfig() + if err != nil { + if m.staticConfig.RequireOAuth { + klog.Errorf("failed to get kubeconfig: %v", err) + return nil, errors.New("failed to get kubeconfig") + } + return &Kubernetes{manager: m}, nil + } + clientCmdApiConfig.AuthInfos = make(map[string]*clientcmdapi.AuthInfo) + derived := &Kubernetes{manager: &Manager{ + clientCmdConfig: clientcmd.NewDefaultClientConfig(clientCmdApiConfig, nil), + cfg: derivedCfg, + staticConfig: m.staticConfig, + }} + derived.manager.accessControlClientSet, err = NewAccessControlClientset(derived.manager.cfg, derived.manager.staticConfig) + if err != nil { + if m.staticConfig.RequireOAuth { + klog.Errorf("failed to get kubeconfig: %v", err) + return nil, errors.New("failed to get kubeconfig") + } + return &Kubernetes{manager: m}, nil + } + derived.manager.discoveryClient = memory.NewMemCacheClient(derived.manager.accessControlClientSet.DiscoveryClient()) + derived.manager.accessControlRESTMapper = NewAccessControlRESTMapper( + restmapper.NewDeferredDiscoveryRESTMapper(derived.manager.discoveryClient), + derived.manager.staticConfig, + ) + derived.manager.dynamicClient, err = dynamic.NewForConfig(derived.manager.cfg) + if err != nil { + if m.staticConfig.RequireOAuth { + klog.Errorf("failed to initialize dynamic client: %v", err) + return nil, errors.New("failed to initialize dynamic client") + } + return &Kubernetes{manager: m}, nil + } + return derived, nil +} + +func (k *Kubernetes) NewHelm() *helm.Helm { + // This is a derived Kubernetes, so it already has the Helm initialized + return helm.NewHelm(k.manager) +} diff --git a/pkg 2/kubernetes/kubernetes_test.go b/pkg 2/kubernetes/kubernetes_test.go new file mode 100644 index 00000000..2051ed48 --- /dev/null +++ b/pkg 2/kubernetes/kubernetes_test.go @@ -0,0 +1,316 @@ +package kubernetes + +import ( + "context" + "os" + "path" + "testing" + + "github.com/containers/kubernetes-mcp-server/pkg/config" +) + +func TestManager_Derived(t *testing.T) { + // Create a temporary kubeconfig file for testing + tempDir := t.TempDir() + kubeconfigPath := path.Join(tempDir, "config") + kubeconfigContent := ` +apiVersion: v1 +kind: Config +clusters: +- cluster: + server: https://test-cluster.example.com + name: test-cluster +contexts: +- context: + cluster: test-cluster + user: test-user + name: test-context +current-context: test-context +users: +- name: test-user + user: + username: test-username + password: test-password +` + if err := os.WriteFile(kubeconfigPath, []byte(kubeconfigContent), 0644); err != nil { + t.Fatalf("failed to create kubeconfig file: %v", err) + } + + t.Run("without authorization header returns original manager", func(t *testing.T) { + testStaticConfig := &config.StaticConfig{ + KubeConfig: kubeconfigPath, + DisabledTools: []string{"configuration_view"}, + DeniedResources: []config.GroupVersionKind{ + {Group: "apps", Version: "v1", Kind: "Deployment"}, + }, + } + + testManager, err := NewManager(testStaticConfig) + if err != nil { + t.Fatalf("failed to create manager: %v", err) + } + defer testManager.Close() + ctx := context.Background() + derived, err := testManager.Derived(ctx) + if err != nil { + t.Fatalf("failed to create manager: %v", err) + } + + if derived.manager != testManager { + t.Errorf("expected original manager, got different manager") + } + }) + + t.Run("with invalid authorization header returns original manager", func(t *testing.T) { + testStaticConfig := &config.StaticConfig{ + KubeConfig: kubeconfigPath, + DisabledTools: []string{"configuration_view"}, + DeniedResources: []config.GroupVersionKind{ + {Group: "apps", Version: "v1", Kind: "Deployment"}, + }, + } + + testManager, err := NewManager(testStaticConfig) + if err != nil { + t.Fatalf("failed to create manager: %v", err) + } + defer testManager.Close() + ctx := context.WithValue(context.Background(), OAuthAuthorizationHeader, "invalid-token") + derived, err := testManager.Derived(ctx) + if err != nil { + t.Fatalf("failed to create manager: %v", err) + } + + if derived.manager != testManager { + t.Errorf("expected original manager, got different manager") + } + }) + + t.Run("with valid bearer token creates derived manager with correct configuration", func(t *testing.T) { + testStaticConfig := &config.StaticConfig{ + KubeConfig: kubeconfigPath, + DisabledTools: []string{"configuration_view"}, + DeniedResources: []config.GroupVersionKind{ + {Group: "apps", Version: "v1", Kind: "Deployment"}, + }, + } + + testManager, err := NewManager(testStaticConfig) + if err != nil { + t.Fatalf("failed to create manager: %v", err) + } + defer testManager.Close() + testBearerToken := "test-bearer-token-123" + ctx := context.WithValue(context.Background(), OAuthAuthorizationHeader, "Bearer "+testBearerToken) + derived, err := testManager.Derived(ctx) + if err != nil { + t.Fatalf("failed to create manager: %v", err) + } + + if derived.manager == testManager { + t.Errorf("expected new derived manager, got original manager") + } + + if derived.manager.staticConfig != testStaticConfig { + t.Errorf("staticConfig not properly wired to derived manager") + } + + derivedCfg := derived.manager.cfg + if derivedCfg == nil { + t.Fatalf("derived config is nil") + } + + originalCfg := testManager.cfg + if derivedCfg.Host != originalCfg.Host { + t.Errorf("expected Host %s, got %s", originalCfg.Host, derivedCfg.Host) + } + if derivedCfg.APIPath != originalCfg.APIPath { + t.Errorf("expected APIPath %s, got %s", originalCfg.APIPath, derivedCfg.APIPath) + } + if derivedCfg.QPS != originalCfg.QPS { + t.Errorf("expected QPS %f, got %f", originalCfg.QPS, derivedCfg.QPS) + } + if derivedCfg.Burst != originalCfg.Burst { + t.Errorf("expected Burst %d, got %d", originalCfg.Burst, derivedCfg.Burst) + } + if derivedCfg.Timeout != originalCfg.Timeout { + t.Errorf("expected Timeout %v, got %v", originalCfg.Timeout, derivedCfg.Timeout) + } + + if derivedCfg.Insecure != originalCfg.Insecure { + t.Errorf("expected TLS Insecure %v, got %v", originalCfg.Insecure, derivedCfg.Insecure) + } + if derivedCfg.ServerName != originalCfg.ServerName { + t.Errorf("expected TLS ServerName %s, got %s", originalCfg.ServerName, derivedCfg.ServerName) + } + if derivedCfg.CAFile != originalCfg.CAFile { + t.Errorf("expected TLS CAFile %s, got %s", originalCfg.CAFile, derivedCfg.CAFile) + } + if string(derivedCfg.CAData) != string(originalCfg.CAData) { + t.Errorf("expected TLS CAData %s, got %s", string(originalCfg.CAData), string(derivedCfg.CAData)) + } + + if derivedCfg.BearerToken != testBearerToken { + t.Errorf("expected BearerToken %s, got %s", testBearerToken, derivedCfg.BearerToken) + } + if derivedCfg.UserAgent != CustomUserAgent { + t.Errorf("expected UserAgent %s, got %s", CustomUserAgent, derivedCfg.UserAgent) + } + + // Verify that sensitive fields are NOT copied to prevent credential leakage + // The derived config should only use the bearer token from the Authorization header + // and not inherit any authentication credentials from the original kubeconfig + if derivedCfg.CertFile != "" { + t.Errorf("expected TLS CertFile to be empty, got %s", derivedCfg.CertFile) + } + if derivedCfg.KeyFile != "" { + t.Errorf("expected TLS KeyFile to be empty, got %s", derivedCfg.KeyFile) + } + if len(derivedCfg.CertData) != 0 { + t.Errorf("expected TLS CertData to be empty, got %v", derivedCfg.CertData) + } + if len(derivedCfg.KeyData) != 0 { + t.Errorf("expected TLS KeyData to be empty, got %v", derivedCfg.KeyData) + } + + if derivedCfg.Username != "" { + t.Errorf("expected Username to be empty, got %s", derivedCfg.Username) + } + if derivedCfg.Password != "" { + t.Errorf("expected Password to be empty, got %s", derivedCfg.Password) + } + if derivedCfg.AuthProvider != nil { + t.Errorf("expected AuthProvider to be nil, got %v", derivedCfg.AuthProvider) + } + if derivedCfg.ExecProvider != nil { + t.Errorf("expected ExecProvider to be nil, got %v", derivedCfg.ExecProvider) + } + if derivedCfg.BearerTokenFile != "" { + t.Errorf("expected BearerTokenFile to be empty, got %s", derivedCfg.BearerTokenFile) + } + if derivedCfg.Impersonate.UserName != "" { + t.Errorf("expected Impersonate.UserName to be empty, got %s", derivedCfg.Impersonate.UserName) + } + + // Verify that the original manager still has the sensitive data + if originalCfg.Username == "" && originalCfg.Password == "" { + t.Logf("original kubeconfig shouldn't be modified") + } + + // Verify that the derived manager has proper clients initialized + if derived.manager.accessControlClientSet == nil { + t.Error("expected accessControlClientSet to be initialized") + } + if derived.manager.accessControlClientSet.staticConfig != testStaticConfig { + t.Errorf("staticConfig not properly wired to derived manager") + } + if derived.manager.discoveryClient == nil { + t.Error("expected discoveryClient to be initialized") + } + if derived.manager.accessControlRESTMapper == nil { + t.Error("expected accessControlRESTMapper to be initialized") + } + if derived.manager.accessControlRESTMapper.staticConfig != testStaticConfig { + t.Errorf("staticConfig not properly wired to derived manager") + } + if derived.manager.dynamicClient == nil { + t.Error("expected dynamicClient to be initialized") + } + }) + + t.Run("with RequireOAuth=true and no authorization header returns oauth token required error", func(t *testing.T) { + testStaticConfig := &config.StaticConfig{ + KubeConfig: kubeconfigPath, + RequireOAuth: true, + DisabledTools: []string{"configuration_view"}, + DeniedResources: []config.GroupVersionKind{ + {Group: "apps", Version: "v1", Kind: "Deployment"}, + }, + } + + testManager, err := NewManager(testStaticConfig) + if err != nil { + t.Fatalf("failed to create manager: %v", err) + } + defer testManager.Close() + ctx := context.Background() + derived, err := testManager.Derived(ctx) + if err == nil { + t.Fatal("expected error for missing oauth token, got nil") + } + if err.Error() != "oauth token required" { + t.Fatalf("expected error 'oauth token required', got %s", err.Error()) + } + if derived != nil { + t.Error("expected nil derived manager when oauth token required") + } + }) + + t.Run("with RequireOAuth=true and invalid authorization header returns oauth token required error", func(t *testing.T) { + testStaticConfig := &config.StaticConfig{ + KubeConfig: kubeconfigPath, + RequireOAuth: true, + DisabledTools: []string{"configuration_view"}, + DeniedResources: []config.GroupVersionKind{ + {Group: "apps", Version: "v1", Kind: "Deployment"}, + }, + } + + testManager, err := NewManager(testStaticConfig) + if err != nil { + t.Fatalf("failed to create manager: %v", err) + } + defer testManager.Close() + ctx := context.WithValue(context.Background(), OAuthAuthorizationHeader, "invalid-token") + derived, err := testManager.Derived(ctx) + if err == nil { + t.Fatal("expected error for invalid oauth token, got nil") + } + if err.Error() != "oauth token required" { + t.Fatalf("expected error 'oauth token required', got %s", err.Error()) + } + if derived != nil { + t.Error("expected nil derived manager when oauth token required") + } + }) + + t.Run("with RequireOAuth=true and valid bearer token creates derived manager", func(t *testing.T) { + testStaticConfig := &config.StaticConfig{ + KubeConfig: kubeconfigPath, + RequireOAuth: true, + DisabledTools: []string{"configuration_view"}, + DeniedResources: []config.GroupVersionKind{ + {Group: "apps", Version: "v1", Kind: "Deployment"}, + }, + } + + testManager, err := NewManager(testStaticConfig) + if err != nil { + t.Fatalf("failed to create manager: %v", err) + } + defer testManager.Close() + testBearerToken := "test-bearer-token-123" + ctx := context.WithValue(context.Background(), OAuthAuthorizationHeader, "Bearer "+testBearerToken) + derived, err := testManager.Derived(ctx) + if err != nil { + t.Fatalf("failed to create manager: %v", err) + } + + if derived.manager == testManager { + t.Error("expected new derived manager, got original manager") + } + + if derived.manager.staticConfig != testStaticConfig { + t.Error("staticConfig not properly wired to derived manager") + } + + derivedCfg := derived.manager.cfg + if derivedCfg == nil { + t.Fatal("derived config is nil") + } + + if derivedCfg.BearerToken != testBearerToken { + t.Errorf("expected BearerToken %s, got %s", testBearerToken, derivedCfg.BearerToken) + } + }) +} diff --git a/pkg 2/kubernetes/namespaces.go b/pkg 2/kubernetes/namespaces.go new file mode 100644 index 00000000..8c191c1e --- /dev/null +++ b/pkg 2/kubernetes/namespaces.go @@ -0,0 +1,19 @@ +package kubernetes + +import ( + "context" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +func (k *Kubernetes) NamespacesList(ctx context.Context, options ResourceListOptions) (runtime.Unstructured, error) { + return k.ResourcesList(ctx, &schema.GroupVersionKind{ + Group: "", Version: "v1", Kind: "Namespace", + }, "", options) +} + +func (k *Kubernetes) ProjectsList(ctx context.Context, options ResourceListOptions) (runtime.Unstructured, error) { + return k.ResourcesList(ctx, &schema.GroupVersionKind{ + Group: "project.openshift.io", Version: "v1", Kind: "Project", + }, "", options) +} diff --git a/pkg 2/kubernetes/openshift.go b/pkg 2/kubernetes/openshift.go new file mode 100644 index 00000000..e94f9875 --- /dev/null +++ b/pkg 2/kubernetes/openshift.go @@ -0,0 +1,16 @@ +package kubernetes + +import ( + "context" + + "k8s.io/apimachinery/pkg/runtime/schema" +) + +func (m *Manager) IsOpenShift(_ context.Context) bool { + // This method should be fast and not block (it's called at startup) + _, err := m.discoveryClient.ServerResourcesForGroupVersion(schema.GroupVersion{ + Group: "project.openshift.io", + Version: "v1", + }.String()) + return err == nil +} diff --git a/pkg 2/kubernetes/pods.go b/pkg 2/kubernetes/pods.go new file mode 100644 index 00000000..b6a12447 --- /dev/null +++ b/pkg 2/kubernetes/pods.go @@ -0,0 +1,251 @@ +package kubernetes + +import ( + "bytes" + "context" + "errors" + "fmt" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + labelutil "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/rand" + "k8s.io/client-go/tools/remotecommand" + "k8s.io/metrics/pkg/apis/metrics" + metricsv1beta1api "k8s.io/metrics/pkg/apis/metrics/v1beta1" + + "github.com/containers/kubernetes-mcp-server/pkg/version" +) + +type PodsTopOptions struct { + metav1.ListOptions + AllNamespaces bool + Namespace string + Name string +} + +func (k *Kubernetes) PodsListInAllNamespaces(ctx context.Context, options ResourceListOptions) (runtime.Unstructured, error) { + return k.ResourcesList(ctx, &schema.GroupVersionKind{ + Group: "", Version: "v1", Kind: "Pod", + }, "", options) +} + +func (k *Kubernetes) PodsListInNamespace(ctx context.Context, namespace string, options ResourceListOptions) (runtime.Unstructured, error) { + return k.ResourcesList(ctx, &schema.GroupVersionKind{ + Group: "", Version: "v1", Kind: "Pod", + }, namespace, options) +} + +func (k *Kubernetes) PodsGet(ctx context.Context, namespace, name string) (*unstructured.Unstructured, error) { + return k.ResourcesGet(ctx, &schema.GroupVersionKind{ + Group: "", Version: "v1", Kind: "Pod", + }, k.NamespaceOrDefault(namespace), name) +} + +func (k *Kubernetes) PodsDelete(ctx context.Context, namespace, name string) (string, error) { + namespace = k.NamespaceOrDefault(namespace) + pod, err := k.ResourcesGet(ctx, &schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Pod"}, namespace, name) + if err != nil { + return "", err + } + + isManaged := pod.GetLabels()[AppKubernetesManagedBy] == version.BinaryName + managedLabelSelector := labelutil.Set{ + AppKubernetesManagedBy: version.BinaryName, + AppKubernetesName: pod.GetLabels()[AppKubernetesName], + }.AsSelector() + + // Delete managed service + if isManaged { + services, err := k.manager.accessControlClientSet.Services(namespace) + if err != nil { + return "", err + } + if sl, _ := services.List(ctx, metav1.ListOptions{ + LabelSelector: managedLabelSelector.String(), + }); sl != nil { + for _, svc := range sl.Items { + _ = services.Delete(ctx, svc.Name, metav1.DeleteOptions{}) + } + } + } + + // Delete managed Route + if isManaged && k.supportsGroupVersion("route.openshift.io/v1") { + routeResources := k.manager.dynamicClient. + Resource(schema.GroupVersionResource{Group: "route.openshift.io", Version: "v1", Resource: "routes"}). + Namespace(namespace) + if rl, _ := routeResources.List(ctx, metav1.ListOptions{ + LabelSelector: managedLabelSelector.String(), + }); rl != nil { + for _, route := range rl.Items { + _ = routeResources.Delete(ctx, route.GetName(), metav1.DeleteOptions{}) + } + } + + } + return "Pod deleted successfully", + k.ResourcesDelete(ctx, &schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Pod"}, namespace, name) +} + +func (k *Kubernetes) PodsLog(ctx context.Context, namespace, name, container string) (string, error) { + tailLines := int64(256) + pods, err := k.manager.accessControlClientSet.Pods(k.NamespaceOrDefault(namespace)) + if err != nil { + return "", err + } + req := pods.GetLogs(name, &v1.PodLogOptions{ + TailLines: &tailLines, + Container: container, + }) + res := req.Do(ctx) + if res.Error() != nil { + return "", res.Error() + } + rawData, err := res.Raw() + if err != nil { + return "", err + } + return string(rawData), nil +} + +func (k *Kubernetes) PodsRun(ctx context.Context, namespace, name, image string, port int32) ([]*unstructured.Unstructured, error) { + if name == "" { + name = version.BinaryName + "-run-" + rand.String(5) + } + labels := map[string]string{ + AppKubernetesName: name, + AppKubernetesComponent: name, + AppKubernetesManagedBy: version.BinaryName, + AppKubernetesPartOf: version.BinaryName + "-run-sandbox", + } + // NewPod + var resources []any + pod := &v1.Pod{ + TypeMeta: metav1.TypeMeta{APIVersion: "v1", Kind: "Pod"}, + ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: k.NamespaceOrDefault(namespace), Labels: labels}, + Spec: v1.PodSpec{Containers: []v1.Container{{ + Name: name, + Image: image, + ImagePullPolicy: v1.PullAlways, + }}}, + } + resources = append(resources, pod) + if port > 0 { + pod.Spec.Containers[0].Ports = []v1.ContainerPort{{ContainerPort: port}} + resources = append(resources, &v1.Service{ + TypeMeta: metav1.TypeMeta{APIVersion: "v1", Kind: "Service"}, + ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: k.NamespaceOrDefault(namespace), Labels: labels}, + Spec: v1.ServiceSpec{ + Selector: labels, + Type: v1.ServiceTypeClusterIP, + Ports: []v1.ServicePort{{Port: port, TargetPort: intstr.FromInt32(port)}}, + }, + }) + } + if port > 0 && k.supportsGroupVersion("route.openshift.io/v1") { + resources = append(resources, &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "route.openshift.io/v1", + "kind": "Route", + "metadata": map[string]interface{}{ + "name": name, + "namespace": k.NamespaceOrDefault(namespace), + "labels": labels, + }, + "spec": map[string]interface{}{ + "to": map[string]interface{}{ + "kind": "Service", + "name": name, + "weight": 100, + }, + "port": map[string]interface{}{ + "targetPort": intstr.FromInt32(port), + }, + "tls": map[string]interface{}{ + "termination": "edge", + "insecureEdgeTerminationPolicy": "Redirect", + }, + }, + }, + }) + + } + + // Convert the objects to Unstructured and reuse resourcesCreateOrUpdate functionality + converter := runtime.DefaultUnstructuredConverter + var toCreate []*unstructured.Unstructured + for _, obj := range resources { + m, err := converter.ToUnstructured(obj) + if err != nil { + return nil, err + } + u := &unstructured.Unstructured{} + if err = converter.FromUnstructured(m, u); err != nil { + return nil, err + } + toCreate = append(toCreate, u) + } + return k.resourcesCreateOrUpdate(ctx, toCreate) +} + +func (k *Kubernetes) PodsTop(ctx context.Context, options PodsTopOptions) (*metrics.PodMetricsList, error) { + // TODO, maybe move to mcp Tools setup and omit in case metrics aren't available in the target cluster + if !k.supportsGroupVersion(metrics.GroupName + "/" + metricsv1beta1api.SchemeGroupVersion.Version) { + return nil, errors.New("metrics API is not available") + } + namespace := options.Namespace + if options.AllNamespaces && namespace == "" { + namespace = "" + } else { + namespace = k.NamespaceOrDefault(namespace) + } + return k.manager.accessControlClientSet.PodsMetricses(ctx, namespace, options.Name, options.ListOptions) +} + +func (k *Kubernetes) PodsExec(ctx context.Context, namespace, name, container string, command []string) (string, error) { + namespace = k.NamespaceOrDefault(namespace) + pods, err := k.manager.accessControlClientSet.Pods(namespace) + if err != nil { + return "", err + } + pod, err := pods.Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return "", err + } + // https://github.com/kubernetes/kubectl/blob/5366de04e168bcbc11f5e340d131a9ca8b7d0df4/pkg/cmd/exec/exec.go#L350-L352 + if pod.Status.Phase == v1.PodSucceeded || pod.Status.Phase == v1.PodFailed { + return "", fmt.Errorf("cannot exec into a container in a completed pod; current phase is %s", pod.Status.Phase) + } + if container == "" { + container = pod.Spec.Containers[0].Name + } + podExecOptions := &v1.PodExecOptions{ + Container: container, + Command: command, + Stdout: true, + Stderr: true, + } + executor, err := k.manager.accessControlClientSet.PodsExec(namespace, name, podExecOptions) + if err != nil { + return "", err + } + stdout := bytes.NewBuffer(make([]byte, 0)) + stderr := bytes.NewBuffer(make([]byte, 0)) + if err = executor.StreamWithContext(ctx, remotecommand.StreamOptions{ + Stdout: stdout, Stderr: stderr, Tty: false, + }); err != nil { + return "", err + } + if stdout.Len() > 0 { + return stdout.String(), nil + } + if stderr.Len() > 0 { + return stderr.String(), nil + } + return "", nil +} diff --git a/pkg 2/kubernetes/resources.go b/pkg 2/kubernetes/resources.go new file mode 100644 index 00000000..1f559e12 --- /dev/null +++ b/pkg 2/kubernetes/resources.go @@ -0,0 +1,209 @@ +package kubernetes + +import ( + "context" + "fmt" + "k8s.io/apimachinery/pkg/runtime" + "regexp" + "strings" + + "github.com/containers/kubernetes-mcp-server/pkg/version" + authv1 "k8s.io/api/authorization/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/yaml" +) + +const ( + AppKubernetesComponent = "app.kubernetes.io/component" + AppKubernetesManagedBy = "app.kubernetes.io/managed-by" + AppKubernetesName = "app.kubernetes.io/name" + AppKubernetesPartOf = "app.kubernetes.io/part-of" +) + +type ResourceListOptions struct { + metav1.ListOptions + AsTable bool +} + +func (k *Kubernetes) ResourcesList(ctx context.Context, gvk *schema.GroupVersionKind, namespace string, options ResourceListOptions) (runtime.Unstructured, error) { + gvr, err := k.resourceFor(gvk) + if err != nil { + return nil, err + } + + // Check if operation is allowed for all namespaces (applicable for namespaced resources) + isNamespaced, _ := k.isNamespaced(gvk) + if isNamespaced && !k.canIUse(ctx, gvr, namespace, "list") && namespace == "" { + namespace = k.manager.configuredNamespace() + } + if options.AsTable { + return k.resourcesListAsTable(ctx, gvk, gvr, namespace, options) + } + return k.manager.dynamicClient.Resource(*gvr).Namespace(namespace).List(ctx, options.ListOptions) +} + +func (k *Kubernetes) ResourcesGet(ctx context.Context, gvk *schema.GroupVersionKind, namespace, name string) (*unstructured.Unstructured, error) { + gvr, err := k.resourceFor(gvk) + if err != nil { + return nil, err + } + + // If it's a namespaced resource and namespace wasn't provided, try to use the default configured one + if namespaced, nsErr := k.isNamespaced(gvk); nsErr == nil && namespaced { + namespace = k.NamespaceOrDefault(namespace) + } + return k.manager.dynamicClient.Resource(*gvr).Namespace(namespace).Get(ctx, name, metav1.GetOptions{}) +} + +func (k *Kubernetes) ResourcesCreateOrUpdate(ctx context.Context, resource string) ([]*unstructured.Unstructured, error) { + separator := regexp.MustCompile(`\r?\n---\r?\n`) + resources := separator.Split(resource, -1) + var parsedResources []*unstructured.Unstructured + for _, r := range resources { + var obj unstructured.Unstructured + if err := yaml.NewYAMLToJSONDecoder(strings.NewReader(r)).Decode(&obj); err != nil { + return nil, err + } + parsedResources = append(parsedResources, &obj) + } + return k.resourcesCreateOrUpdate(ctx, parsedResources) +} + +func (k *Kubernetes) ResourcesDelete(ctx context.Context, gvk *schema.GroupVersionKind, namespace, name string) error { + gvr, err := k.resourceFor(gvk) + if err != nil { + return err + } + + // If it's a namespaced resource and namespace wasn't provided, try to use the default configured one + if namespaced, nsErr := k.isNamespaced(gvk); nsErr == nil && namespaced { + namespace = k.NamespaceOrDefault(namespace) + } + return k.manager.dynamicClient.Resource(*gvr).Namespace(namespace).Delete(ctx, name, metav1.DeleteOptions{}) +} + +// resourcesListAsTable retrieves a list of resources in a table format. +// It's almost identical to the dynamic.DynamicClient implementation, but it uses a specific Accept header to request the table format. +// dynamic.DynamicClient does not provide a way to set the HTTP header (TODO: create an issue to request this feature) +func (k *Kubernetes) resourcesListAsTable(ctx context.Context, gvk *schema.GroupVersionKind, gvr *schema.GroupVersionResource, namespace string, options ResourceListOptions) (runtime.Unstructured, error) { + var url []string + if len(gvr.Group) == 0 { + url = append(url, "api") + } else { + url = append(url, "apis", gvr.Group) + } + url = append(url, gvr.Version) + if len(namespace) > 0 { + url = append(url, "namespaces", namespace) + } + url = append(url, gvr.Resource) + var table metav1.Table + err := k.manager.discoveryClient.RESTClient(). + Get(). + SetHeader("Accept", strings.Join([]string{ + fmt.Sprintf("application/json;as=Table;v=%s;g=%s", metav1.SchemeGroupVersion.Version, metav1.GroupName), + fmt.Sprintf("application/json;as=Table;v=%s;g=%s", metav1beta1.SchemeGroupVersion.Version, metav1beta1.GroupName), + "application/json", + }, ",")). + AbsPath(url...). + SpecificallyVersionedParams(&options.ListOptions, ParameterCodec, schema.GroupVersion{Version: "v1"}). + Do(ctx).Into(&table) + if err != nil { + return nil, err + } + // Add metav1.Table apiVersion and kind to the unstructured object (server may not return these fields) + table.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind("Table")) + // Add additional columns for fields that aren't returned by the server + table.ColumnDefinitions = append([]metav1.TableColumnDefinition{ + {Name: "apiVersion", Type: "string"}, + {Name: "kind", Type: "string"}, + }, table.ColumnDefinitions...) + for i := range table.Rows { + row := &table.Rows[i] + row.Cells = append([]interface{}{ + gvr.GroupVersion().String(), + gvk.Kind, + }, row.Cells...) + } + unstructuredObject, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&table) + return &unstructured.Unstructured{Object: unstructuredObject}, err +} + +func (k *Kubernetes) resourcesCreateOrUpdate(ctx context.Context, resources []*unstructured.Unstructured) ([]*unstructured.Unstructured, error) { + for i, obj := range resources { + gvk := obj.GroupVersionKind() + gvr, rErr := k.resourceFor(&gvk) + if rErr != nil { + return nil, rErr + } + + namespace := obj.GetNamespace() + // If it's a namespaced resource and namespace wasn't provided, try to use the default configured one + if namespaced, nsErr := k.isNamespaced(&gvk); nsErr == nil && namespaced { + namespace = k.NamespaceOrDefault(namespace) + } + resources[i], rErr = k.manager.dynamicClient.Resource(*gvr).Namespace(namespace).Apply(ctx, obj.GetName(), obj, metav1.ApplyOptions{ + FieldManager: version.BinaryName, + }) + if rErr != nil { + return nil, rErr + } + // Clear the cache to ensure the next operation is performed on the latest exposed APIs (will change after the CRD creation) + if gvk.Kind == "CustomResourceDefinition" { + k.manager.accessControlRESTMapper.Reset() + } + } + return resources, nil +} + +func (k *Kubernetes) resourceFor(gvk *schema.GroupVersionKind) (*schema.GroupVersionResource, error) { + m, err := k.manager.accessControlRESTMapper.RESTMapping(schema.GroupKind{Group: gvk.Group, Kind: gvk.Kind}, gvk.Version) + if err != nil { + return nil, err + } + return &m.Resource, nil +} + +func (k *Kubernetes) isNamespaced(gvk *schema.GroupVersionKind) (bool, error) { + apiResourceList, err := k.manager.discoveryClient.ServerResourcesForGroupVersion(gvk.GroupVersion().String()) + if err != nil { + return false, err + } + for _, apiResource := range apiResourceList.APIResources { + if apiResource.Kind == gvk.Kind { + return apiResource.Namespaced, nil + } + } + return false, nil +} + +func (k *Kubernetes) supportsGroupVersion(groupVersion string) bool { + if _, err := k.manager.discoveryClient.ServerResourcesForGroupVersion(groupVersion); err != nil { + return false + } + return true +} + +func (k *Kubernetes) canIUse(ctx context.Context, gvr *schema.GroupVersionResource, namespace, verb string) bool { + accessReviews, err := k.manager.accessControlClientSet.SelfSubjectAccessReviews() + if err != nil { + return false + } + response, err := accessReviews.Create(ctx, &authv1.SelfSubjectAccessReview{ + Spec: authv1.SelfSubjectAccessReviewSpec{ResourceAttributes: &authv1.ResourceAttributes{ + Namespace: namespace, + Verb: verb, + Group: gvr.Group, + Version: gvr.Version, + Resource: gvr.Resource, + }}, + }, metav1.CreateOptions{}) + if err != nil { + // TODO: maybe return the error too + return false + } + return response.Status.Allowed +} diff --git a/pkg 2/kubernetes/token.go b/pkg 2/kubernetes/token.go new file mode 100644 index 00000000..bac697c8 --- /dev/null +++ b/pkg 2/kubernetes/token.go @@ -0,0 +1,39 @@ +package kubernetes + +import ( + "context" + "fmt" + authenticationv1api "k8s.io/api/authentication/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func (m *Manager) VerifyToken(ctx context.Context, token, audience string) (*authenticationv1api.UserInfo, []string, error) { + tokenReviewClient, err := m.accessControlClientSet.TokenReview() + if err != nil { + return nil, nil, err + } + tokenReview := &authenticationv1api.TokenReview{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "authentication.k8s.io/v1", + Kind: "TokenReview", + }, + Spec: authenticationv1api.TokenReviewSpec{ + Token: token, + Audiences: []string{audience}, + }, + } + + result, err := tokenReviewClient.Create(ctx, tokenReview, metav1.CreateOptions{}) + if err != nil { + return nil, nil, fmt.Errorf("failed to create token review: %v", err) + } + + if !result.Status.Authenticated { + if result.Status.Error != "" { + return nil, nil, fmt.Errorf("token authentication failed: %s", result.Status.Error) + } + return nil, nil, fmt.Errorf("token authentication failed") + } + + return &result.Status.User, result.Status.Audiences, nil +} diff --git a/pkg 2/mcp/common_test.go b/pkg 2/mcp/common_test.go new file mode 100644 index 00000000..8e4e49d3 --- /dev/null +++ b/pkg 2/mcp/common_test.go @@ -0,0 +1,421 @@ +package mcp + +import ( + "bytes" + "context" + "encoding/json" + "flag" + "fmt" + "net/http/httptest" + "os" + "path/filepath" + "runtime" + "strconv" + "testing" + "time" + + "github.com/containers/kubernetes-mcp-server/pkg/config" + "github.com/containers/kubernetes-mcp-server/pkg/output" + "github.com/mark3labs/mcp-go/client" + "github.com/mark3labs/mcp-go/client/transport" + "github.com/mark3labs/mcp-go/mcp" + "github.com/mark3labs/mcp-go/server" + "github.com/pkg/errors" + "github.com/spf13/afero" + "golang.org/x/sync/errgroup" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + apiextensionsv1spec "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/client-go/tools/clientcmd/api" + toolswatch "k8s.io/client-go/tools/watch" + "k8s.io/klog/v2" + "k8s.io/klog/v2/textlogger" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/envtest" + "sigs.k8s.io/controller-runtime/tools/setup-envtest/env" + "sigs.k8s.io/controller-runtime/tools/setup-envtest/remote" + "sigs.k8s.io/controller-runtime/tools/setup-envtest/store" + "sigs.k8s.io/controller-runtime/tools/setup-envtest/versions" + "sigs.k8s.io/controller-runtime/tools/setup-envtest/workflows" +) + +// envTest has an expensive setup, so we only want to do it once per entire test run. +var envTest *envtest.Environment +var envTestRestConfig *rest.Config +var envTestUser = envtest.User{Name: "test-user", Groups: []string{"test:users"}} + +func TestMain(m *testing.M) { + // Set up + envTestDir, err := store.DefaultStoreDir() + if err != nil { + panic(err) + } + envTestEnv := &env.Env{ + FS: afero.Afero{Fs: afero.NewOsFs()}, + Out: os.Stdout, + Client: &remote.HTTPClient{ + IndexURL: remote.DefaultIndexURL, + }, + Platform: versions.PlatformItem{ + Platform: versions.Platform{ + OS: runtime.GOOS, + Arch: runtime.GOARCH, + }, + }, + Version: versions.AnyVersion, + Store: store.NewAt(envTestDir), + } + envTestEnv.CheckCoherence() + workflows.Use{}.Do(envTestEnv) + versionDir := envTestEnv.Platform.BaseName(*envTestEnv.Version.AsConcrete()) + envTest = &envtest.Environment{ + BinaryAssetsDirectory: filepath.Join(envTestDir, "k8s", versionDir), + } + adminSystemMasterBaseConfig, _ := envTest.Start() + au, err := envTest.AddUser(envTestUser, adminSystemMasterBaseConfig) + if err != nil { + panic(err) + } + envTestRestConfig = au.Config() + + //Create test data as administrator + ctx := context.Background() + restoreAuth(ctx) + createTestData(ctx) + + // Test! + code := m.Run() + + // Tear down + if envTest != nil { + _ = envTest.Stop() + } + os.Exit(code) +} + +type mcpContext struct { + profile Profile + listOutput output.Output + logLevel int + + staticConfig *config.StaticConfig + clientOptions []transport.ClientOption + before func(*mcpContext) + after func(*mcpContext) + ctx context.Context + tempDir string + cancel context.CancelFunc + mcpServer *Server + mcpHttpServer *httptest.Server + mcpClient *client.Client + klogState klog.State + logBuffer bytes.Buffer +} + +func (c *mcpContext) beforeEach(t *testing.T) { + var err error + c.ctx, c.cancel = context.WithCancel(t.Context()) + c.tempDir = t.TempDir() + c.withKubeConfig(nil) + if c.profile == nil { + c.profile = &FullProfile{} + } + if c.listOutput == nil { + c.listOutput = output.Yaml + } + if c.staticConfig == nil { + c.staticConfig = &config.StaticConfig{ + ReadOnly: false, + DisableDestructive: false, + } + } + if c.before != nil { + c.before(c) + } + // Set up logging + c.klogState = klog.CaptureState() + flags := flag.NewFlagSet("test", flag.ContinueOnError) + klog.InitFlags(flags) + _ = flags.Set("v", strconv.Itoa(c.logLevel)) + klog.SetLogger(textlogger.NewLogger(textlogger.NewConfig(textlogger.Verbosity(c.logLevel), textlogger.Output(&c.logBuffer)))) + // MCP Server + if c.mcpServer, err = NewServer(Configuration{ + Profile: c.profile, + ListOutput: c.listOutput, + StaticConfig: c.staticConfig, + }); err != nil { + t.Fatal(err) + return + } + c.mcpHttpServer = server.NewTestServer(c.mcpServer.server, server.WithSSEContextFunc(contextFunc)) + if c.mcpClient, err = client.NewSSEMCPClient(c.mcpHttpServer.URL+"/sse", c.clientOptions...); err != nil { + t.Fatal(err) + return + } + // MCP Client + if err = c.mcpClient.Start(c.ctx); err != nil { + t.Fatal(err) + return + } + initRequest := mcp.InitializeRequest{} + initRequest.Params.ProtocolVersion = mcp.LATEST_PROTOCOL_VERSION + initRequest.Params.ClientInfo = mcp.Implementation{Name: "test", Version: "1.33.7"} + _, err = c.mcpClient.Initialize(c.ctx, initRequest) + if err != nil { + t.Fatal(err) + return + } +} + +func (c *mcpContext) afterEach() { + if c.after != nil { + c.after(c) + } + c.cancel() + c.mcpServer.Close() + _ = c.mcpClient.Close() + c.mcpHttpServer.Close() + c.klogState.Restore() +} + +func testCase(t *testing.T, test func(c *mcpContext)) { + testCaseWithContext(t, &mcpContext{profile: &FullProfile{}}, test) +} + +func testCaseWithContext(t *testing.T, mcpCtx *mcpContext, test func(c *mcpContext)) { + mcpCtx.beforeEach(t) + defer mcpCtx.afterEach() + test(mcpCtx) +} + +// withKubeConfig sets up a fake kubeconfig in the temp directory based on the provided rest.Config +func (c *mcpContext) withKubeConfig(rc *rest.Config) *api.Config { + fakeConfig := api.NewConfig() + fakeConfig.Clusters["fake"] = api.NewCluster() + fakeConfig.Clusters["fake"].Server = "https://127.0.0.1:6443" + fakeConfig.Clusters["additional-cluster"] = api.NewCluster() + fakeConfig.AuthInfos["fake"] = api.NewAuthInfo() + fakeConfig.AuthInfos["additional-auth"] = api.NewAuthInfo() + if rc != nil { + fakeConfig.Clusters["fake"].Server = rc.Host + fakeConfig.Clusters["fake"].CertificateAuthorityData = rc.CAData + fakeConfig.AuthInfos["fake"].ClientKeyData = rc.KeyData + fakeConfig.AuthInfos["fake"].ClientCertificateData = rc.CertData + } + fakeConfig.Contexts["fake-context"] = api.NewContext() + fakeConfig.Contexts["fake-context"].Cluster = "fake" + fakeConfig.Contexts["fake-context"].AuthInfo = "fake" + fakeConfig.Contexts["additional-context"] = api.NewContext() + fakeConfig.Contexts["additional-context"].Cluster = "additional-cluster" + fakeConfig.Contexts["additional-context"].AuthInfo = "additional-auth" + fakeConfig.CurrentContext = "fake-context" + kubeConfig := filepath.Join(c.tempDir, "config") + _ = clientcmd.WriteToFile(*fakeConfig, kubeConfig) + _ = os.Setenv("KUBECONFIG", kubeConfig) + if c.mcpServer != nil { + if err := c.mcpServer.reloadKubernetesClient(); err != nil { + panic(err) + } + } + return fakeConfig +} + +// withEnvTest sets up the environment for kubeconfig to be used with envTest +func (c *mcpContext) withEnvTest() { + c.withKubeConfig(envTestRestConfig) +} + +// inOpenShift sets up the kubernetes environment to seem to be running OpenShift +func inOpenShift(c *mcpContext) { + c.withEnvTest() + crdTemplate := ` + { + "apiVersion": "apiextensions.k8s.io/v1", + "kind": "CustomResourceDefinition", + "metadata": {"name": "%s"}, + "spec": { + "group": "%s", + "versions": [{ + "name": "v1","served": true,"storage": true, + "schema": {"openAPIV3Schema": {"type": "object","x-kubernetes-preserve-unknown-fields": true}} + }], + "scope": "%s", + "names": {"plural": "%s","singular": "%s","kind": "%s"} + } + }` + tasks, _ := errgroup.WithContext(c.ctx) + tasks.Go(func() error { + return c.crdApply(fmt.Sprintf(crdTemplate, "projects.project.openshift.io", "project.openshift.io", + "Cluster", "projects", "project", "Project")) + }) + tasks.Go(func() error { + return c.crdApply(fmt.Sprintf(crdTemplate, "routes.route.openshift.io", "route.openshift.io", + "Namespaced", "routes", "route", "Route")) + }) + if err := tasks.Wait(); err != nil { + panic(err) + } +} + +// inOpenShiftClear clears the kubernetes environment so it no longer seems to be running OpenShift +func inOpenShiftClear(c *mcpContext) { + tasks, _ := errgroup.WithContext(c.ctx) + tasks.Go(func() error { return c.crdDelete("projects.project.openshift.io") }) + tasks.Go(func() error { return c.crdDelete("routes.route.openshift.io") }) + if err := tasks.Wait(); err != nil { + panic(err) + } +} + +// newKubernetesClient creates a new Kubernetes client with the envTest kubeconfig +func (c *mcpContext) newKubernetesClient() *kubernetes.Clientset { + return kubernetes.NewForConfigOrDie(envTestRestConfig) +} + +// newApiExtensionsClient creates a new ApiExtensions client with the envTest kubeconfig +func (c *mcpContext) newApiExtensionsClient() *apiextensionsv1.ApiextensionsV1Client { + return apiextensionsv1.NewForConfigOrDie(envTestRestConfig) +} + +// crdApply creates a CRD from the provided resource string and waits for it to be established +func (c *mcpContext) crdApply(resource string) error { + apiExtensionsV1Client := c.newApiExtensionsClient() + var crd = &apiextensionsv1spec.CustomResourceDefinition{} + err := json.Unmarshal([]byte(resource), crd) + if err != nil { + return fmt.Errorf("failed to create CRD %v", err) + } + _, err = apiExtensionsV1Client.CustomResourceDefinitions().Create(c.ctx, crd, metav1.CreateOptions{}) + if err != nil { + return fmt.Errorf("failed to create CRD %v", err) + } + c.crdWaitUntilReady(crd.Name) + return nil +} + +// crdDelete deletes a CRD by name and waits for it to be removed +func (c *mcpContext) crdDelete(name string) error { + apiExtensionsV1Client := c.newApiExtensionsClient() + err := apiExtensionsV1Client.CustomResourceDefinitions().Delete(c.ctx, name, metav1.DeleteOptions{ + GracePeriodSeconds: ptr.To(int64(0)), + }) + iteration := 0 + for iteration < 100 { + if _, derr := apiExtensionsV1Client.CustomResourceDefinitions().Get(c.ctx, name, metav1.GetOptions{}); derr != nil { + break + } + time.Sleep(5 * time.Millisecond) + iteration++ + } + if err != nil { + return errors.Wrap(err, "failed to delete CRD") + } + return nil +} + +// crdWaitUntilReady waits for a CRD to be established +func (c *mcpContext) crdWaitUntilReady(name string) { + watcher, err := c.newApiExtensionsClient().CustomResourceDefinitions().Watch(c.ctx, metav1.ListOptions{ + FieldSelector: "metadata.name=" + name, + }) + if err != nil { + panic(fmt.Errorf("failed to watch CRD %v", err)) + } + _, err = toolswatch.UntilWithoutRetry(c.ctx, watcher, func(event watch.Event) (bool, error) { + for _, c := range event.Object.(*apiextensionsv1spec.CustomResourceDefinition).Status.Conditions { + if c.Type == apiextensionsv1spec.Established && c.Status == apiextensionsv1spec.ConditionTrue { + return true, nil + } + } + return false, nil + }) + if err != nil { + panic(fmt.Errorf("failed to wait for CRD %v", err)) + } +} + +// callTool helper function to call a tool by name with arguments +func (c *mcpContext) callTool(name string, args map[string]interface{}) (*mcp.CallToolResult, error) { + callToolRequest := mcp.CallToolRequest{} + callToolRequest.Params.Name = name + callToolRequest.Params.Arguments = args + return c.mcpClient.CallTool(c.ctx, callToolRequest) +} + +func restoreAuth(ctx context.Context) { + kubernetesAdmin := kubernetes.NewForConfigOrDie(envTest.Config) + // Authorization + _, _ = kubernetesAdmin.RbacV1().ClusterRoles().Update(ctx, &rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{Name: "allow-all"}, + Rules: []rbacv1.PolicyRule{{ + Verbs: []string{"*"}, + APIGroups: []string{"*"}, + Resources: []string{"*"}, + }}, + }, metav1.UpdateOptions{}) + _, _ = kubernetesAdmin.RbacV1().ClusterRoleBindings().Update(ctx, &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{Name: "allow-all"}, + Subjects: []rbacv1.Subject{{Kind: "Group", Name: envTestUser.Groups[0]}}, + RoleRef: rbacv1.RoleRef{Kind: "ClusterRole", Name: "allow-all"}, + }, metav1.UpdateOptions{}) +} + +func createTestData(ctx context.Context) { + kubernetesAdmin := kubernetes.NewForConfigOrDie(envTestRestConfig) + // Namespaces + _, _ = kubernetesAdmin.CoreV1().Namespaces(). + Create(ctx, &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "ns-1"}}, metav1.CreateOptions{}) + _, _ = kubernetesAdmin.CoreV1().Namespaces(). + Create(ctx, &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "ns-2"}}, metav1.CreateOptions{}) + _, _ = kubernetesAdmin.CoreV1().Namespaces(). + Create(ctx, &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "ns-to-delete"}}, metav1.CreateOptions{}) + _, _ = kubernetesAdmin.CoreV1().Pods("default").Create(ctx, &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "a-pod-in-default", + Labels: map[string]string{"app": "nginx"}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "nginx", + Image: "nginx", + }, + }, + }, + }, metav1.CreateOptions{}) + // Pods for listing + _, _ = kubernetesAdmin.CoreV1().Pods("ns-1").Create(ctx, &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "a-pod-in-ns-1", + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "nginx", + Image: "nginx", + }, + }, + }, + }, metav1.CreateOptions{}) + _, _ = kubernetesAdmin.CoreV1().Pods("ns-2").Create(ctx, &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "a-pod-in-ns-2", + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "nginx", + Image: "nginx", + }, + }, + }, + }, metav1.CreateOptions{}) + _, _ = kubernetesAdmin.CoreV1().ConfigMaps("default"). + Create(ctx, &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "a-configmap-to-delete"}}, metav1.CreateOptions{}) +} diff --git a/pkg 2/mcp/configuration.go b/pkg 2/mcp/configuration.go new file mode 100644 index 00000000..79ebaef4 --- /dev/null +++ b/pkg 2/mcp/configuration.go @@ -0,0 +1,46 @@ +package mcp + +import ( + "context" + "fmt" + + "github.com/mark3labs/mcp-go/mcp" + "github.com/mark3labs/mcp-go/server" + + "github.com/containers/kubernetes-mcp-server/pkg/output" +) + +func (s *Server) initConfiguration() []server.ServerTool { + tools := []server.ServerTool{ + {Tool: mcp.NewTool("configuration_view", + mcp.WithDescription("Get the current Kubernetes configuration content as a kubeconfig YAML"), + mcp.WithBoolean("minified", mcp.Description("Return a minified version of the configuration. "+ + "If set to true, keeps only the current-context and the relevant pieces of the configuration for that context. "+ + "If set to false, all contexts, clusters, auth-infos, and users are returned in the configuration. "+ + "(Optional, default true)")), + // Tool annotations + mcp.WithTitleAnnotation("Configuration: View"), + mcp.WithReadOnlyHintAnnotation(true), + mcp.WithDestructiveHintAnnotation(false), + mcp.WithOpenWorldHintAnnotation(true), + ), Handler: s.configurationView}, + } + return tools +} + +func (s *Server) configurationView(_ context.Context, ctr mcp.CallToolRequest) (*mcp.CallToolResult, error) { + minify := true + minified := ctr.GetArguments()["minified"] + if _, ok := minified.(bool); ok { + minify = minified.(bool) + } + ret, err := s.k.ConfigurationView(minify) + if err != nil { + return NewTextResult("", fmt.Errorf("failed to get configuration: %v", err)), nil + } + configurationYaml, err := output.MarshalYaml(ret) + if err != nil { + err = fmt.Errorf("failed to get configuration: %v", err) + } + return NewTextResult(configurationYaml, err), nil +} diff --git a/pkg 2/mcp/configuration_test.go b/pkg 2/mcp/configuration_test.go new file mode 100644 index 00000000..57fea486 --- /dev/null +++ b/pkg 2/mcp/configuration_test.go @@ -0,0 +1,178 @@ +package mcp + +import ( + "github.com/containers/kubernetes-mcp-server/pkg/kubernetes" + "github.com/mark3labs/mcp-go/mcp" + "k8s.io/client-go/rest" + v1 "k8s.io/client-go/tools/clientcmd/api/v1" + "sigs.k8s.io/yaml" + "testing" +) + +func TestConfigurationView(t *testing.T) { + testCase(t, func(c *mcpContext) { + toolResult, err := c.callTool("configuration_view", map[string]interface{}{}) + t.Run("configuration_view returns configuration", func(t *testing.T) { + if err != nil { + t.Fatalf("call tool failed %v", err) + } + }) + var decoded *v1.Config + err = yaml.Unmarshal([]byte(toolResult.Content[0].(mcp.TextContent).Text), &decoded) + t.Run("configuration_view has yaml content", func(t *testing.T) { + if err != nil { + t.Fatalf("invalid tool result content %v", err) + } + }) + t.Run("configuration_view returns current-context", func(t *testing.T) { + if decoded.CurrentContext != "fake-context" { + t.Errorf("fake-context not found: %v", decoded.CurrentContext) + } + }) + t.Run("configuration_view returns context info", func(t *testing.T) { + if len(decoded.Contexts) != 1 { + t.Errorf("invalid context count, expected 1, got %v", len(decoded.Contexts)) + } + if decoded.Contexts[0].Name != "fake-context" { + t.Errorf("fake-context not found: %v", decoded.Contexts) + } + if decoded.Contexts[0].Context.Cluster != "fake" { + t.Errorf("fake-cluster not found: %v", decoded.Contexts) + } + if decoded.Contexts[0].Context.AuthInfo != "fake" { + t.Errorf("fake-auth not found: %v", decoded.Contexts) + } + }) + t.Run("configuration_view returns cluster info", func(t *testing.T) { + if len(decoded.Clusters) != 1 { + t.Errorf("invalid cluster count, expected 1, got %v", len(decoded.Clusters)) + } + if decoded.Clusters[0].Name != "fake" { + t.Errorf("fake-cluster not found: %v", decoded.Clusters) + } + if decoded.Clusters[0].Cluster.Server != "https://127.0.0.1:6443" { + t.Errorf("fake-server not found: %v", decoded.Clusters) + } + }) + t.Run("configuration_view returns auth info", func(t *testing.T) { + if len(decoded.AuthInfos) != 1 { + t.Errorf("invalid auth info count, expected 1, got %v", len(decoded.AuthInfos)) + } + if decoded.AuthInfos[0].Name != "fake" { + t.Errorf("fake-auth not found: %v", decoded.AuthInfos) + } + }) + toolResult, err = c.callTool("configuration_view", map[string]interface{}{ + "minified": false, + }) + t.Run("configuration_view with minified=false returns configuration", func(t *testing.T) { + if err != nil { + t.Fatalf("call tool failed %v", err) + } + }) + err = yaml.Unmarshal([]byte(toolResult.Content[0].(mcp.TextContent).Text), &decoded) + t.Run("configuration_view with minified=false has yaml content", func(t *testing.T) { + if err != nil { + t.Fatalf("invalid tool result content %v", err) + } + }) + t.Run("configuration_view with minified=false returns additional context info", func(t *testing.T) { + if len(decoded.Contexts) != 2 { + t.Errorf("invalid context count, expected2, got %v", len(decoded.Contexts)) + } + if decoded.Contexts[0].Name != "additional-context" { + t.Errorf("additional-context not found: %v", decoded.Contexts) + } + if decoded.Contexts[0].Context.Cluster != "additional-cluster" { + t.Errorf("additional-cluster not found: %v", decoded.Contexts) + } + if decoded.Contexts[0].Context.AuthInfo != "additional-auth" { + t.Errorf("additional-auth not found: %v", decoded.Contexts) + } + if decoded.Contexts[1].Name != "fake-context" { + t.Errorf("fake-context not found: %v", decoded.Contexts) + } + }) + t.Run("configuration_view with minified=false returns cluster info", func(t *testing.T) { + if len(decoded.Clusters) != 2 { + t.Errorf("invalid cluster count, expected 2, got %v", len(decoded.Clusters)) + } + if decoded.Clusters[0].Name != "additional-cluster" { + t.Errorf("additional-cluster not found: %v", decoded.Clusters) + } + }) + t.Run("configuration_view with minified=false returns auth info", func(t *testing.T) { + if len(decoded.AuthInfos) != 2 { + t.Errorf("invalid auth info count, expected 2, got %v", len(decoded.AuthInfos)) + } + if decoded.AuthInfos[0].Name != "additional-auth" { + t.Errorf("additional-auth not found: %v", decoded.AuthInfos) + } + }) + }) +} + +func TestConfigurationViewInCluster(t *testing.T) { + kubernetes.InClusterConfig = func() (*rest.Config, error) { + return &rest.Config{ + Host: "https://kubernetes.default.svc", + BearerToken: "fake-token", + }, nil + } + defer func() { + kubernetes.InClusterConfig = rest.InClusterConfig + }() + testCase(t, func(c *mcpContext) { + toolResult, err := c.callTool("configuration_view", map[string]interface{}{}) + t.Run("configuration_view returns configuration", func(t *testing.T) { + if err != nil { + t.Fatalf("call tool failed %v", err) + } + }) + var decoded *v1.Config + err = yaml.Unmarshal([]byte(toolResult.Content[0].(mcp.TextContent).Text), &decoded) + t.Run("configuration_view has yaml content", func(t *testing.T) { + if err != nil { + t.Fatalf("invalid tool result content %v", err) + } + }) + t.Run("configuration_view returns current-context", func(t *testing.T) { + if decoded.CurrentContext != "context" { + t.Fatalf("context not found: %v", decoded.CurrentContext) + } + }) + t.Run("configuration_view returns context info", func(t *testing.T) { + if len(decoded.Contexts) != 1 { + t.Fatalf("invalid context count, expected 1, got %v", len(decoded.Contexts)) + } + if decoded.Contexts[0].Name != "context" { + t.Fatalf("context not found: %v", decoded.Contexts) + } + if decoded.Contexts[0].Context.Cluster != "cluster" { + t.Fatalf("cluster not found: %v", decoded.Contexts) + } + if decoded.Contexts[0].Context.AuthInfo != "user" { + t.Fatalf("user not found: %v", decoded.Contexts) + } + }) + t.Run("configuration_view returns cluster info", func(t *testing.T) { + if len(decoded.Clusters) != 1 { + t.Fatalf("invalid cluster count, expected 1, got %v", len(decoded.Clusters)) + } + if decoded.Clusters[0].Name != "cluster" { + t.Fatalf("cluster not found: %v", decoded.Clusters) + } + if decoded.Clusters[0].Cluster.Server != "https://kubernetes.default.svc" { + t.Fatalf("server not found: %v", decoded.Clusters) + } + }) + t.Run("configuration_view returns auth info", func(t *testing.T) { + if len(decoded.AuthInfos) != 1 { + t.Fatalf("invalid auth info count, expected 1, got %v", len(decoded.AuthInfos)) + } + if decoded.AuthInfos[0].Name != "user" { + t.Fatalf("user not found: %v", decoded.AuthInfos) + } + }) + }) +} diff --git a/pkg 2/mcp/events.go b/pkg 2/mcp/events.go new file mode 100644 index 00000000..4429a6cd --- /dev/null +++ b/pkg 2/mcp/events.go @@ -0,0 +1,49 @@ +package mcp + +import ( + "context" + "fmt" + + "github.com/mark3labs/mcp-go/mcp" + "github.com/mark3labs/mcp-go/server" + + "github.com/containers/kubernetes-mcp-server/pkg/output" +) + +func (s *Server) initEvents() []server.ServerTool { + return []server.ServerTool{ + {Tool: mcp.NewTool("events_list", + mcp.WithDescription("List all the Kubernetes events in the current cluster from all namespaces"), + mcp.WithString("namespace", + mcp.Description("Optional Namespace to retrieve the events from. If not provided, will list events from all namespaces")), + // Tool annotations + mcp.WithTitleAnnotation("Events: List"), + mcp.WithReadOnlyHintAnnotation(true), + mcp.WithDestructiveHintAnnotation(false), + mcp.WithOpenWorldHintAnnotation(true), + ), Handler: s.eventsList}, + } +} + +func (s *Server) eventsList(ctx context.Context, ctr mcp.CallToolRequest) (*mcp.CallToolResult, error) { + namespace := ctr.GetArguments()["namespace"] + if namespace == nil { + namespace = "" + } + derived, err := s.k.Derived(ctx) + if err != nil { + return nil, err + } + eventMap, err := derived.EventsList(ctx, namespace.(string)) + if err != nil { + return NewTextResult("", fmt.Errorf("failed to list events in all namespaces: %v", err)), nil + } + if len(eventMap) == 0 { + return NewTextResult("No events found", nil), nil + } + yamlEvents, err := output.MarshalYaml(eventMap) + if err != nil { + err = fmt.Errorf("failed to list events in all namespaces: %v", err) + } + return NewTextResult(fmt.Sprintf("The following events (YAML format) were found:\n%s", yamlEvents), err), nil +} diff --git a/pkg 2/mcp/events_test.go b/pkg 2/mcp/events_test.go new file mode 100644 index 00000000..f6609ed4 --- /dev/null +++ b/pkg 2/mcp/events_test.go @@ -0,0 +1,115 @@ +package mcp + +import ( + "github.com/containers/kubernetes-mcp-server/pkg/config" + "github.com/mark3labs/mcp-go/mcp" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "testing" +) + +func TestEventsList(t *testing.T) { + testCase(t, func(c *mcpContext) { + c.withEnvTest() + toolResult, err := c.callTool("events_list", map[string]interface{}{}) + t.Run("events_list with no events returns OK", func(t *testing.T) { + if err != nil { + t.Fatalf("call tool failed %v", err) + } + if toolResult.IsError { + t.Fatalf("call tool failed") + } + if toolResult.Content[0].(mcp.TextContent).Text != "No events found" { + t.Fatalf("unexpected result %v", toolResult.Content[0].(mcp.TextContent).Text) + } + }) + client := c.newKubernetesClient() + for _, ns := range []string{"default", "ns-1"} { + _, _ = client.CoreV1().Events(ns).Create(c.ctx, &v1.Event{ + ObjectMeta: metav1.ObjectMeta{ + Name: "an-event-in-" + ns, + }, + InvolvedObject: v1.ObjectReference{ + APIVersion: "v1", + Kind: "Pod", + Name: "a-pod", + Namespace: ns, + }, + Type: "Normal", + Message: "The event message", + }, metav1.CreateOptions{}) + } + toolResult, err = c.callTool("events_list", map[string]interface{}{}) + t.Run("events_list with events returns all OK", func(t *testing.T) { + if err != nil { + t.Fatalf("call tool failed %v", err) + } + if toolResult.IsError { + t.Fatalf("call tool failed") + } + if toolResult.Content[0].(mcp.TextContent).Text != "The following events (YAML format) were found:\n"+ + "- InvolvedObject:\n"+ + " Kind: Pod\n"+ + " Name: a-pod\n"+ + " apiVersion: v1\n"+ + " Message: The event message\n"+ + " Namespace: default\n"+ + " Reason: \"\"\n"+ + " Timestamp: 0001-01-01 00:00:00 +0000 UTC\n"+ + " Type: Normal\n"+ + "- InvolvedObject:\n"+ + " Kind: Pod\n"+ + " Name: a-pod\n"+ + " apiVersion: v1\n"+ + " Message: The event message\n"+ + " Namespace: ns-1\n"+ + " Reason: \"\"\n"+ + " Timestamp: 0001-01-01 00:00:00 +0000 UTC\n"+ + " Type: Normal\n" { + t.Fatalf("unexpected result %v", toolResult.Content[0].(mcp.TextContent).Text) + } + }) + toolResult, err = c.callTool("events_list", map[string]interface{}{ + "namespace": "ns-1", + }) + t.Run("events_list in namespace with events returns from namespace OK", func(t *testing.T) { + if err != nil { + t.Fatalf("call tool failed %v", err) + } + if toolResult.IsError { + t.Fatalf("call tool failed") + } + if toolResult.Content[0].(mcp.TextContent).Text != "The following events (YAML format) were found:\n"+ + "- InvolvedObject:\n"+ + " Kind: Pod\n"+ + " Name: a-pod\n"+ + " apiVersion: v1\n"+ + " Message: The event message\n"+ + " Namespace: ns-1\n"+ + " Reason: \"\"\n"+ + " Timestamp: 0001-01-01 00:00:00 +0000 UTC\n"+ + " Type: Normal\n" { + t.Fatalf("unexpected result %v", toolResult.Content[0].(mcp.TextContent).Text) + } + }) + }) +} + +func TestEventsListDenied(t *testing.T) { + deniedResourcesServer := &config.StaticConfig{DeniedResources: []config.GroupVersionKind{{Version: "v1", Kind: "Event"}}} + testCaseWithContext(t, &mcpContext{staticConfig: deniedResourcesServer}, func(c *mcpContext) { + c.withEnvTest() + eventList, _ := c.callTool("events_list", map[string]interface{}{}) + t.Run("events_list has error", func(t *testing.T) { + if !eventList.IsError { + t.Fatalf("call tool should fail") + } + }) + t.Run("events_list describes denial", func(t *testing.T) { + expectedMessage := "failed to list events in all namespaces: resource not allowed: /v1, Kind=Event" + if eventList.Content[0].(mcp.TextContent).Text != expectedMessage { + t.Fatalf("expected descriptive error '%s', got %v", expectedMessage, eventList.Content[0].(mcp.TextContent).Text) + } + }) + }) +} diff --git a/pkg 2/mcp/helm.go b/pkg 2/mcp/helm.go new file mode 100644 index 00000000..e2659653 --- /dev/null +++ b/pkg 2/mcp/helm.go @@ -0,0 +1,118 @@ +package mcp + +import ( + "context" + "fmt" + + "github.com/mark3labs/mcp-go/mcp" + "github.com/mark3labs/mcp-go/server" +) + +func (s *Server) initHelm() []server.ServerTool { + return []server.ServerTool{ + {Tool: mcp.NewTool("helm_install", + mcp.WithDescription("Install a Helm chart in the current or provided namespace"), + mcp.WithString("chart", mcp.Description("Chart reference to install (for example: stable/grafana, oci://ghcr.io/nginxinc/charts/nginx-ingress)"), mcp.Required()), + mcp.WithObject("values", mcp.Description("Values to pass to the Helm chart (Optional)")), + mcp.WithString("name", mcp.Description("Name of the Helm release (Optional, random name if not provided)")), + mcp.WithString("namespace", mcp.Description("Namespace to install the Helm chart in (Optional, current namespace if not provided)")), + // Tool annotations + mcp.WithTitleAnnotation("Helm: Install"), + mcp.WithReadOnlyHintAnnotation(false), + mcp.WithDestructiveHintAnnotation(false), + mcp.WithIdempotentHintAnnotation(false), // TODO: consider replacing implementation with equivalent to: helm upgrade --install + mcp.WithOpenWorldHintAnnotation(true), + ), Handler: s.helmInstall}, + {Tool: mcp.NewTool("helm_list", + mcp.WithDescription("List all the Helm releases in the current or provided namespace (or in all namespaces if specified)"), + mcp.WithString("namespace", mcp.Description("Namespace to list Helm releases from (Optional, all namespaces if not provided)")), + mcp.WithBoolean("all_namespaces", mcp.Description("If true, lists all Helm releases in all namespaces ignoring the namespace argument (Optional)")), + // Tool annotations + mcp.WithTitleAnnotation("Helm: List"), + mcp.WithReadOnlyHintAnnotation(true), + mcp.WithDestructiveHintAnnotation(false), + mcp.WithOpenWorldHintAnnotation(true), + ), Handler: s.helmList}, + {Tool: mcp.NewTool("helm_uninstall", + mcp.WithDescription("Uninstall a Helm release in the current or provided namespace"), + mcp.WithString("name", mcp.Description("Name of the Helm release to uninstall"), mcp.Required()), + mcp.WithString("namespace", mcp.Description("Namespace to uninstall the Helm release from (Optional, current namespace if not provided)")), + // Tool annotations + mcp.WithTitleAnnotation("Helm: Uninstall"), + mcp.WithReadOnlyHintAnnotation(false), + mcp.WithDestructiveHintAnnotation(true), + mcp.WithIdempotentHintAnnotation(true), + mcp.WithOpenWorldHintAnnotation(true), + ), Handler: s.helmUninstall}, + } +} + +func (s *Server) helmInstall(ctx context.Context, ctr mcp.CallToolRequest) (*mcp.CallToolResult, error) { + var chart string + ok := false + if chart, ok = ctr.GetArguments()["chart"].(string); !ok { + return NewTextResult("", fmt.Errorf("failed to install helm chart, missing argument chart")), nil + } + values := map[string]interface{}{} + if v, ok := ctr.GetArguments()["values"].(map[string]interface{}); ok { + values = v + } + name := "" + if v, ok := ctr.GetArguments()["name"].(string); ok { + name = v + } + namespace := "" + if v, ok := ctr.GetArguments()["namespace"].(string); ok { + namespace = v + } + derived, err := s.k.Derived(ctx) + if err != nil { + return nil, err + } + ret, err := derived.NewHelm().Install(ctx, chart, values, name, namespace) + if err != nil { + return NewTextResult("", fmt.Errorf("failed to install helm chart '%s': %w", chart, err)), nil + } + return NewTextResult(ret, err), nil +} + +func (s *Server) helmList(ctx context.Context, ctr mcp.CallToolRequest) (*mcp.CallToolResult, error) { + allNamespaces := false + if v, ok := ctr.GetArguments()["all_namespaces"].(bool); ok { + allNamespaces = v + } + namespace := "" + if v, ok := ctr.GetArguments()["namespace"].(string); ok { + namespace = v + } + derived, err := s.k.Derived(ctx) + if err != nil { + return nil, err + } + ret, err := derived.NewHelm().List(namespace, allNamespaces) + if err != nil { + return NewTextResult("", fmt.Errorf("failed to list helm releases in namespace '%s': %w", namespace, err)), nil + } + return NewTextResult(ret, err), nil +} + +func (s *Server) helmUninstall(ctx context.Context, ctr mcp.CallToolRequest) (*mcp.CallToolResult, error) { + var name string + ok := false + if name, ok = ctr.GetArguments()["name"].(string); !ok { + return NewTextResult("", fmt.Errorf("failed to uninstall helm chart, missing argument name")), nil + } + namespace := "" + if v, ok := ctr.GetArguments()["namespace"].(string); ok { + namespace = v + } + derived, err := s.k.Derived(ctx) + if err != nil { + return nil, err + } + ret, err := derived.NewHelm().Uninstall(name, namespace) + if err != nil { + return NewTextResult("", fmt.Errorf("failed to uninstall helm chart '%s': %w", name, err)), nil + } + return NewTextResult(ret, err), nil +} diff --git a/pkg 2/mcp/helm_test.go b/pkg 2/mcp/helm_test.go new file mode 100644 index 00000000..2195b20a --- /dev/null +++ b/pkg 2/mcp/helm_test.go @@ -0,0 +1,264 @@ +package mcp + +import ( + "context" + "encoding/base64" + "github.com/containers/kubernetes-mcp-server/pkg/config" + "github.com/mark3labs/mcp-go/mcp" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "path/filepath" + "runtime" + "sigs.k8s.io/yaml" + "strings" + "testing" +) + +func TestHelmInstall(t *testing.T) { + testCase(t, func(c *mcpContext) { + c.withEnvTest() + _, file, _, _ := runtime.Caller(0) + chartPath := filepath.Join(filepath.Dir(file), "testdata", "helm-chart-no-op") + toolResult, err := c.callTool("helm_install", map[string]interface{}{ + "chart": chartPath, + }) + t.Run("helm_install with local chart and no release name, returns installed chart", func(t *testing.T) { + if err != nil { + t.Fatalf("call tool failed %v", err) + } + if toolResult.IsError { + t.Fatalf("call tool failed") + } + var decoded []map[string]interface{} + err = yaml.Unmarshal([]byte(toolResult.Content[0].(mcp.TextContent).Text), &decoded) + if err != nil { + t.Fatalf("invalid tool result content %v", err) + } + if !strings.HasPrefix(decoded[0]["name"].(string), "helm-chart-no-op-") { + t.Fatalf("invalid helm install name, expected no-op-*, got %v", decoded[0]["name"]) + } + if decoded[0]["namespace"] != "default" { + t.Fatalf("invalid helm install namespace, expected default, got %v", decoded[0]["namespace"]) + } + if decoded[0]["chart"] != "no-op" { + t.Fatalf("invalid helm install name, expected release name, got empty") + } + if decoded[0]["chartVersion"] != "1.33.7" { + t.Fatalf("invalid helm install version, expected 1.33.7, got empty") + } + if decoded[0]["status"] != "deployed" { + t.Fatalf("invalid helm install status, expected deployed, got %v", decoded[0]["status"]) + } + if decoded[0]["revision"] != float64(1) { + t.Fatalf("invalid helm install revision, expected 1, got %v", decoded[0]["revision"]) + } + }) + }) +} + +func TestHelmInstallDenied(t *testing.T) { + deniedResourcesServer := &config.StaticConfig{DeniedResources: []config.GroupVersionKind{{Version: "v1", Kind: "Secret"}}} + testCaseWithContext(t, &mcpContext{staticConfig: deniedResourcesServer}, func(c *mcpContext) { + c.withEnvTest() + _, file, _, _ := runtime.Caller(0) + chartPath := filepath.Join(filepath.Dir(file), "testdata", "helm-chart-secret") + helmInstall, _ := c.callTool("helm_install", map[string]interface{}{ + "chart": chartPath, + }) + t.Run("helm_install has error", func(t *testing.T) { + if !helmInstall.IsError { + t.Fatalf("call tool should fail") + } + }) + t.Run("helm_install describes denial", func(t *testing.T) { + toolOutput := helmInstall.Content[0].(mcp.TextContent).Text + expectedMessage := ": resource not allowed: /v1, Kind=Secret" + if !strings.HasPrefix(toolOutput, "failed to install helm chart") || !strings.HasSuffix(toolOutput, expectedMessage) { + t.Fatalf("expected descriptive error '%s', got %v", expectedMessage, helmInstall.Content[0].(mcp.TextContent).Text) + } + }) + }) +} + +func TestHelmList(t *testing.T) { + testCase(t, func(c *mcpContext) { + c.withEnvTest() + kc := c.newKubernetesClient() + clearHelmReleases(c.ctx, kc) + toolResult, err := c.callTool("helm_list", map[string]interface{}{}) + t.Run("helm_list with no releases, returns not found", func(t *testing.T) { + if err != nil { + t.Fatalf("call tool failed %v", err) + } + if toolResult.IsError { + t.Fatalf("call tool failed") + } + if toolResult.Content[0].(mcp.TextContent).Text != "No Helm releases found" { + t.Fatalf("unexpected result %v", toolResult.Content[0].(mcp.TextContent).Text) + } + }) + _, _ = kc.CoreV1().Secrets("default").Create(c.ctx, &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "sh.helm.release.v1.release-to-list", + Labels: map[string]string{"owner": "helm", "name": "release-to-list"}, + }, + Data: map[string][]byte{ + "release": []byte(base64.StdEncoding.EncodeToString([]byte("{" + + "\"name\":\"release-to-list\"," + + "\"info\":{\"status\":\"deployed\"}" + + "}"))), + }, + }, metav1.CreateOptions{}) + toolResult, err = c.callTool("helm_list", map[string]interface{}{}) + t.Run("helm_list with deployed release, returns release", func(t *testing.T) { + if err != nil { + t.Fatalf("call tool failed %v", err) + } + if toolResult.IsError { + t.Fatalf("call tool failed") + } + var decoded []map[string]interface{} + err = yaml.Unmarshal([]byte(toolResult.Content[0].(mcp.TextContent).Text), &decoded) + if err != nil { + t.Fatalf("invalid tool result content %v", err) + } + if len(decoded) != 1 { + t.Fatalf("invalid helm list count, expected 1, got %v", len(decoded)) + } + if decoded[0]["name"] != "release-to-list" { + t.Fatalf("invalid helm list name, expected release-to-list, got %v", decoded[0]["name"]) + } + if decoded[0]["status"] != "deployed" { + t.Fatalf("invalid helm list status, expected deployed, got %v", decoded[0]["status"]) + } + }) + toolResult, err = c.callTool("helm_list", map[string]interface{}{"namespace": "ns-1"}) + t.Run("helm_list with deployed release in other namespaces, returns not found", func(t *testing.T) { + if err != nil { + t.Fatalf("call tool failed %v", err) + } + if toolResult.IsError { + t.Fatalf("call tool failed") + } + if toolResult.Content[0].(mcp.TextContent).Text != "No Helm releases found" { + t.Fatalf("unexpected result %v", toolResult.Content[0].(mcp.TextContent).Text) + } + }) + toolResult, err = c.callTool("helm_list", map[string]interface{}{"namespace": "ns-1", "all_namespaces": true}) + t.Run("helm_list with deployed release in all namespaces, returns release", func(t *testing.T) { + if err != nil { + t.Fatalf("call tool failed %v", err) + } + if toolResult.IsError { + t.Fatalf("call tool failed") + } + var decoded []map[string]interface{} + err = yaml.Unmarshal([]byte(toolResult.Content[0].(mcp.TextContent).Text), &decoded) + if err != nil { + t.Fatalf("invalid tool result content %v", err) + } + if len(decoded) != 1 { + t.Fatalf("invalid helm list count, expected 1, got %v", len(decoded)) + } + if decoded[0]["name"] != "release-to-list" { + t.Fatalf("invalid helm list name, expected release-to-list, got %v", decoded[0]["name"]) + } + if decoded[0]["status"] != "deployed" { + t.Fatalf("invalid helm list status, expected deployed, got %v", decoded[0]["status"]) + } + }) + }) +} + +func TestHelmUninstall(t *testing.T) { + testCase(t, func(c *mcpContext) { + c.withEnvTest() + kc := c.newKubernetesClient() + clearHelmReleases(c.ctx, kc) + toolResult, err := c.callTool("helm_uninstall", map[string]interface{}{ + "name": "release-to-uninstall", + }) + t.Run("helm_uninstall with no releases, returns not found", func(t *testing.T) { + if err != nil { + t.Fatalf("call tool failed %v", err) + } + if toolResult.IsError { + t.Fatalf("call tool failed") + } + if toolResult.Content[0].(mcp.TextContent).Text != "Release release-to-uninstall not found" { + t.Fatalf("unexpected result %v", toolResult.Content[0].(mcp.TextContent).Text) + } + }) + _, _ = kc.CoreV1().Secrets("default").Create(c.ctx, &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "sh.helm.release.v1.existent-release-to-uninstall.v0", + Labels: map[string]string{"owner": "helm", "name": "existent-release-to-uninstall"}, + }, + Data: map[string][]byte{ + "release": []byte(base64.StdEncoding.EncodeToString([]byte("{" + + "\"name\":\"existent-release-to-uninstall\"," + + "\"info\":{\"status\":\"deployed\"}" + + "}"))), + }, + }, metav1.CreateOptions{}) + toolResult, err = c.callTool("helm_uninstall", map[string]interface{}{ + "name": "existent-release-to-uninstall", + }) + t.Run("helm_uninstall with deployed release, returns uninstalled", func(t *testing.T) { + if err != nil { + t.Fatalf("call tool failed %v", err) + } + if toolResult.IsError { + t.Fatalf("call tool failed") + } + if !strings.HasPrefix(toolResult.Content[0].(mcp.TextContent).Text, "Uninstalled release existent-release-to-uninstall") { + t.Fatalf("unexpected result %v", toolResult.Content[0].(mcp.TextContent).Text) + } + _, err = kc.CoreV1().Secrets("default").Get(c.ctx, "sh.helm.release.v1.existent-release-to-uninstall.v0", metav1.GetOptions{}) + if !errors.IsNotFound(err) { + t.Fatalf("expected release to be deleted, but it still exists") + } + }) + }) +} + +func TestHelmUninstallDenied(t *testing.T) { + deniedResourcesServer := &config.StaticConfig{DeniedResources: []config.GroupVersionKind{{Version: "v1", Kind: "Secret"}}} + testCaseWithContext(t, &mcpContext{staticConfig: deniedResourcesServer}, func(c *mcpContext) { + c.withEnvTest() + kc := c.newKubernetesClient() + clearHelmReleases(c.ctx, kc) + _, _ = kc.CoreV1().Secrets("default").Create(c.ctx, &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "sh.helm.release.v1.existent-release-to-uninstall.v0", + Labels: map[string]string{"owner": "helm", "name": "existent-release-to-uninstall"}, + }, + Data: map[string][]byte{ + "release": []byte(base64.StdEncoding.EncodeToString([]byte("{" + + "\"name\":\"existent-release-to-uninstall\"," + + "\"info\":{\"status\":\"deployed\"}," + + "\"manifest\":\"apiVersion: v1\\nkind: Secret\\nmetadata:\\n name: secret-to-deny\\n namespace: default\\n\"" + + "}"))), + }, + }, metav1.CreateOptions{}) + helmUninstall, _ := c.callTool("helm_uninstall", map[string]interface{}{ + "name": "existent-release-to-uninstall", + }) + t.Run("helm_uninstall has error", func(t *testing.T) { + if !helmUninstall.IsError { + t.Fatalf("call tool should fail") + } + }) + }) +} + +func clearHelmReleases(ctx context.Context, kc *kubernetes.Clientset) { + secrets, _ := kc.CoreV1().Secrets("default").List(ctx, metav1.ListOptions{}) + for _, secret := range secrets.Items { + if strings.HasPrefix(secret.Name, "sh.helm.release.v1.") { + _ = kc.CoreV1().Secrets("default").Delete(ctx, secret.Name, metav1.DeleteOptions{}) + } + } +} diff --git a/pkg 2/mcp/mcp.go b/pkg 2/mcp/mcp.go new file mode 100644 index 00000000..cf7676a3 --- /dev/null +++ b/pkg 2/mcp/mcp.go @@ -0,0 +1,212 @@ +package mcp + +import ( + "bytes" + "context" + "fmt" + "k8s.io/klog/v2" + "net/http" + "slices" + + "github.com/mark3labs/mcp-go/mcp" + "github.com/mark3labs/mcp-go/server" + authenticationapiv1 "k8s.io/api/authentication/v1" + "k8s.io/utils/ptr" + + "github.com/containers/kubernetes-mcp-server/pkg/config" + internalk8s "github.com/containers/kubernetes-mcp-server/pkg/kubernetes" + "github.com/containers/kubernetes-mcp-server/pkg/output" + "github.com/containers/kubernetes-mcp-server/pkg/version" +) + +const TokenScopesContextKey = "TokenScopesContextKey" + +type Configuration struct { + Profile Profile + ListOutput output.Output + + StaticConfig *config.StaticConfig +} + +func (c *Configuration) isToolApplicable(tool server.ServerTool) bool { + if c.StaticConfig.ReadOnly && !ptr.Deref(tool.Tool.Annotations.ReadOnlyHint, false) { + return false + } + if c.StaticConfig.DisableDestructive && ptr.Deref(tool.Tool.Annotations.DestructiveHint, false) { + return false + } + if c.StaticConfig.EnabledTools != nil && !slices.Contains(c.StaticConfig.EnabledTools, tool.Tool.Name) { + return false + } + if c.StaticConfig.DisabledTools != nil && slices.Contains(c.StaticConfig.DisabledTools, tool.Tool.Name) { + return false + } + return true +} + +type Server struct { + configuration *Configuration + server *server.MCPServer + enabledTools []string + k *internalk8s.Manager +} + +func NewServer(configuration Configuration) (*Server, error) { + var serverOptions []server.ServerOption + serverOptions = append(serverOptions, + server.WithResourceCapabilities(true, true), + server.WithPromptCapabilities(true), + server.WithToolCapabilities(true), + server.WithLogging(), + server.WithToolHandlerMiddleware(toolCallLoggingMiddleware), + ) + if configuration.StaticConfig.RequireOAuth { + serverOptions = append(serverOptions, server.WithToolHandlerMiddleware(toolScopedAuthorizationMiddleware)) + } + + s := &Server{ + configuration: &configuration, + server: server.NewMCPServer( + version.BinaryName, + version.Version, + serverOptions..., + ), + } + if err := s.reloadKubernetesClient(); err != nil { + return nil, err + } + s.k.WatchKubeConfig(s.reloadKubernetesClient) + + return s, nil +} + +func (s *Server) reloadKubernetesClient() error { + k, err := internalk8s.NewManager(s.configuration.StaticConfig) + if err != nil { + return err + } + s.k = k + applicableTools := make([]server.ServerTool, 0) + for _, tool := range s.configuration.Profile.GetTools(s) { + if !s.configuration.isToolApplicable(tool) { + continue + } + applicableTools = append(applicableTools, tool) + s.enabledTools = append(s.enabledTools, tool.Tool.Name) + } + s.server.SetTools(applicableTools...) + return nil +} + +func (s *Server) ServeStdio() error { + return server.ServeStdio(s.server) +} + +func (s *Server) ServeSse(baseUrl string, httpServer *http.Server) *server.SSEServer { + options := make([]server.SSEOption, 0) + options = append(options, server.WithSSEContextFunc(contextFunc), server.WithHTTPServer(httpServer)) + if baseUrl != "" { + options = append(options, server.WithBaseURL(baseUrl)) + } + return server.NewSSEServer(s.server, options...) +} + +func (s *Server) ServeHTTP(httpServer *http.Server) *server.StreamableHTTPServer { + options := []server.StreamableHTTPOption{ + server.WithHTTPContextFunc(contextFunc), + server.WithStreamableHTTPServer(httpServer), + server.WithStateLess(true), + } + return server.NewStreamableHTTPServer(s.server, options...) +} + +// VerifyTokenAPIServer verifies the given token with the audience by +// sending an TokenReview request to API Server. +func (s *Server) VerifyTokenAPIServer(ctx context.Context, token string, audience string) (*authenticationapiv1.UserInfo, []string, error) { + if s.k == nil { + return nil, nil, fmt.Errorf("kubernetes manager is not initialized") + } + return s.k.VerifyToken(ctx, token, audience) +} + +// GetKubernetesAPIServerHost returns the Kubernetes API server host from the configuration. +func (s *Server) GetKubernetesAPIServerHost() string { + if s.k == nil { + return "" + } + return s.k.GetAPIServerHost() +} + +func (s *Server) GetEnabledTools() []string { + return s.enabledTools +} + +func (s *Server) Close() { + if s.k != nil { + s.k.Close() + } +} + +func NewTextResult(content string, err error) *mcp.CallToolResult { + if err != nil { + return &mcp.CallToolResult{ + IsError: true, + Content: []mcp.Content{ + mcp.TextContent{ + Type: "text", + Text: err.Error(), + }, + }, + } + } + return &mcp.CallToolResult{ + Content: []mcp.Content{ + mcp.TextContent{ + Type: "text", + Text: content, + }, + }, + } +} + +func contextFunc(ctx context.Context, r *http.Request) context.Context { + // Get the standard Authorization header (OAuth compliant) + authHeader := r.Header.Get(string(internalk8s.OAuthAuthorizationHeader)) + if authHeader != "" { + return context.WithValue(ctx, internalk8s.OAuthAuthorizationHeader, authHeader) + } + + // Fallback to custom header for backward compatibility + customAuthHeader := r.Header.Get(string(internalk8s.CustomAuthorizationHeader)) + if customAuthHeader != "" { + return context.WithValue(ctx, internalk8s.OAuthAuthorizationHeader, customAuthHeader) + } + + return ctx +} + +func toolCallLoggingMiddleware(next server.ToolHandlerFunc) server.ToolHandlerFunc { + return func(ctx context.Context, ctr mcp.CallToolRequest) (*mcp.CallToolResult, error) { + klog.V(5).Infof("mcp tool call: %s(%v)", ctr.Params.Name, ctr.Params.Arguments) + if ctr.Header != nil { + buffer := bytes.NewBuffer(make([]byte, 0)) + if err := ctr.Header.WriteSubset(buffer, map[string]bool{"Authorization": true, "authorization": true}); err == nil { + klog.V(7).Infof("mcp tool call headers: %s", buffer) + } + } + return next(ctx, ctr) + } +} + +func toolScopedAuthorizationMiddleware(next server.ToolHandlerFunc) server.ToolHandlerFunc { + return func(ctx context.Context, ctr mcp.CallToolRequest) (*mcp.CallToolResult, error) { + scopes, ok := ctx.Value(TokenScopesContextKey).([]string) + if !ok { + return NewTextResult("", fmt.Errorf("Authorization failed: Access denied: Tool '%s' requires scope 'mcp:%s' but no scope is available", ctr.Params.Name, ctr.Params.Name)), nil + } + if !slices.Contains(scopes, "mcp:"+ctr.Params.Name) && !slices.Contains(scopes, ctr.Params.Name) { + return NewTextResult("", fmt.Errorf("Authorization failed: Access denied: Tool '%s' requires scope 'mcp:%s' but only scopes %s are available", ctr.Params.Name, ctr.Params.Name, scopes)), nil + } + return next(ctx, ctr) + } +} diff --git a/pkg 2/mcp/mcp_test.go b/pkg 2/mcp/mcp_test.go new file mode 100644 index 00000000..9b2c78ef --- /dev/null +++ b/pkg 2/mcp/mcp_test.go @@ -0,0 +1,126 @@ +package mcp + +import ( + "context" + "net/http" + "os" + "path/filepath" + "runtime" + "testing" + "time" + + "github.com/mark3labs/mcp-go/client" + "github.com/mark3labs/mcp-go/mcp" +) + +func TestWatchKubeConfig(t *testing.T) { + if runtime.GOOS != "linux" && runtime.GOOS != "darwin" { + t.Skip("Skipping test on non-Unix-like platforms") + } + testCase(t, func(c *mcpContext) { + // Given + withTimeout, cancel := context.WithTimeout(c.ctx, 5*time.Second) + defer cancel() + var notification *mcp.JSONRPCNotification + c.mcpClient.OnNotification(func(n mcp.JSONRPCNotification) { + notification = &n + }) + // When + f, _ := os.OpenFile(filepath.Join(c.tempDir, "config"), os.O_APPEND|os.O_WRONLY, 0644) + _, _ = f.WriteString("\n") + for notification == nil { + select { + case <-withTimeout.Done(): + default: + time.Sleep(100 * time.Millisecond) + } + } + // Then + t.Run("WatchKubeConfig notifies tools change", func(t *testing.T) { + if notification == nil { + t.Fatalf("WatchKubeConfig did not notify") + } + if notification.Method != "notifications/tools/list_changed" { + t.Fatalf("WatchKubeConfig did not notify tools change, got %s", notification.Method) + } + }) + }) +} + +func TestSseHeaders(t *testing.T) { + mockServer := NewMockServer() + defer mockServer.Close() + before := func(c *mcpContext) { + c.withKubeConfig(mockServer.config) + c.clientOptions = append(c.clientOptions, client.WithHeaders(map[string]string{"kubernetes-authorization": "Bearer a-token-from-mcp-client"})) + } + pathHeaders := make(map[string]http.Header, 0) + mockServer.Handle(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + pathHeaders[req.URL.Path] = req.Header.Clone() + // Request Performed by DiscoveryClient to Kube API (Get API Groups legacy -core-) + if req.URL.Path == "/api" { + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write([]byte(`{"kind":"APIVersions","versions":["v1"],"serverAddressByClientCIDRs":[{"clientCIDR":"0.0.0.0/0"}]}`)) + return + } + // Request Performed by DiscoveryClient to Kube API (Get API Groups) + if req.URL.Path == "/apis" { + w.Header().Set("Content-Type", "application/json") + //w.Write([]byte(`{"kind":"APIGroupList","apiVersion":"v1","groups":[{"name":"apps","versions":[{"groupVersion":"apps/v1","version":"v1"}],"preferredVersion":{"groupVersion":"apps/v1","version":"v1"}}]}`)) + _, _ = w.Write([]byte(`{"kind":"APIGroupList","apiVersion":"v1","groups":[]}`)) + return + } + // Request Performed by DiscoveryClient to Kube API (Get API Resources) + if req.URL.Path == "/api/v1" { + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write([]byte(`{"kind":"APIResourceList","apiVersion":"v1","resources":[{"name":"pods","singularName":"","namespaced":true,"kind":"Pod","verbs":["get","list","watch","create","update","patch","delete"]}]}`)) + return + } + // Request Performed by DynamicClient + if req.URL.Path == "/api/v1/namespaces/default/pods" { + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write([]byte(`{"kind":"PodList","apiVersion":"v1","items":[]}`)) + return + } + // Request Performed by kubernetes.Interface + if req.URL.Path == "/api/v1/namespaces/default/pods/a-pod-to-delete" { + w.WriteHeader(200) + return + } + w.WriteHeader(404) + })) + testCaseWithContext(t, &mcpContext{before: before}, func(c *mcpContext) { + _, _ = c.callTool("pods_list", map[string]interface{}{}) + t.Run("DiscoveryClient propagates headers to Kube API", func(t *testing.T) { + if len(pathHeaders) == 0 { + t.Fatalf("No requests were made to Kube API") + } + if pathHeaders["/api"] == nil || pathHeaders["/api"].Get("Authorization") != "Bearer a-token-from-mcp-client" { + t.Fatalf("Overridden header Authorization not found in request to /api") + } + if pathHeaders["/apis"] == nil || pathHeaders["/apis"].Get("Authorization") != "Bearer a-token-from-mcp-client" { + t.Fatalf("Overridden header Authorization not found in request to /apis") + } + if pathHeaders["/api/v1"] == nil || pathHeaders["/api/v1"].Get("Authorization") != "Bearer a-token-from-mcp-client" { + t.Fatalf("Overridden header Authorization not found in request to /api/v1") + } + }) + t.Run("DynamicClient propagates headers to Kube API", func(t *testing.T) { + if len(pathHeaders) == 0 { + t.Fatalf("No requests were made to Kube API") + } + if pathHeaders["/api/v1/namespaces/default/pods"] == nil || pathHeaders["/api/v1/namespaces/default/pods"].Get("Authorization") != "Bearer a-token-from-mcp-client" { + t.Fatalf("Overridden header Authorization not found in request to /api/v1/namespaces/default/pods") + } + }) + _, _ = c.callTool("pods_delete", map[string]interface{}{"name": "a-pod-to-delete"}) + t.Run("kubernetes.Interface propagates headers to Kube API", func(t *testing.T) { + if len(pathHeaders) == 0 { + t.Fatalf("No requests were made to Kube API") + } + if pathHeaders["/api/v1/namespaces/default/pods/a-pod-to-delete"] == nil || pathHeaders["/api/v1/namespaces/default/pods/a-pod-to-delete"].Get("Authorization") != "Bearer a-token-from-mcp-client" { + t.Fatalf("Overridden header Authorization not found in request to /api/v1/namespaces/default/pods/a-pod-to-delete") + } + }) + }) +} diff --git a/pkg 2/mcp/mcp_tools_test.go b/pkg 2/mcp/mcp_tools_test.go new file mode 100644 index 00000000..4d12d306 --- /dev/null +++ b/pkg 2/mcp/mcp_tools_test.go @@ -0,0 +1,179 @@ +package mcp + +import ( + "github.com/mark3labs/mcp-go/client/transport" + "github.com/mark3labs/mcp-go/mcp" + "k8s.io/utils/ptr" + "regexp" + "strings" + "testing" + + "github.com/containers/kubernetes-mcp-server/pkg/config" +) + +func TestUnrestricted(t *testing.T) { + testCase(t, func(c *mcpContext) { + tools, err := c.mcpClient.ListTools(c.ctx, mcp.ListToolsRequest{}) + t.Run("ListTools returns tools", func(t *testing.T) { + if err != nil { + t.Fatalf("call ListTools failed %v", err) + } + }) + t.Run("Destructive tools ARE NOT read only", func(t *testing.T) { + for _, tool := range tools.Tools { + readOnly := ptr.Deref(tool.Annotations.ReadOnlyHint, false) + destructive := ptr.Deref(tool.Annotations.DestructiveHint, false) + if readOnly && destructive { + t.Errorf("Tool %s is read-only and destructive, which is not allowed", tool.Name) + } + } + }) + }) +} + +func TestReadOnly(t *testing.T) { + readOnlyServer := func(c *mcpContext) { c.staticConfig = &config.StaticConfig{ReadOnly: true} } + testCaseWithContext(t, &mcpContext{before: readOnlyServer}, func(c *mcpContext) { + tools, err := c.mcpClient.ListTools(c.ctx, mcp.ListToolsRequest{}) + t.Run("ListTools returns tools", func(t *testing.T) { + if err != nil { + t.Fatalf("call ListTools failed %v", err) + } + }) + t.Run("ListTools returns only read-only tools", func(t *testing.T) { + for _, tool := range tools.Tools { + if tool.Annotations.ReadOnlyHint == nil || !*tool.Annotations.ReadOnlyHint { + t.Errorf("Tool %s is not read-only but should be", tool.Name) + } + if tool.Annotations.DestructiveHint != nil && *tool.Annotations.DestructiveHint { + t.Errorf("Tool %s is destructive but should not be in read-only mode", tool.Name) + } + } + }) + }) +} + +func TestDisableDestructive(t *testing.T) { + disableDestructiveServer := func(c *mcpContext) { c.staticConfig = &config.StaticConfig{DisableDestructive: true} } + testCaseWithContext(t, &mcpContext{before: disableDestructiveServer}, func(c *mcpContext) { + tools, err := c.mcpClient.ListTools(c.ctx, mcp.ListToolsRequest{}) + t.Run("ListTools returns tools", func(t *testing.T) { + if err != nil { + t.Fatalf("call ListTools failed %v", err) + } + }) + t.Run("ListTools does not return destructive tools", func(t *testing.T) { + for _, tool := range tools.Tools { + if tool.Annotations.DestructiveHint != nil && *tool.Annotations.DestructiveHint { + t.Errorf("Tool %s is destructive but should not be", tool.Name) + } + } + }) + }) +} + +func TestEnabledTools(t *testing.T) { + testCaseWithContext(t, &mcpContext{ + staticConfig: &config.StaticConfig{ + EnabledTools: []string{"namespaces_list", "events_list"}, + }, + }, func(c *mcpContext) { + tools, err := c.mcpClient.ListTools(c.ctx, mcp.ListToolsRequest{}) + t.Run("ListTools returns tools", func(t *testing.T) { + if err != nil { + t.Fatalf("call ListTools failed %v", err) + } + }) + t.Run("ListTools returns only explicitly enabled tools", func(t *testing.T) { + if len(tools.Tools) != 2 { + t.Fatalf("ListTools should return 2 tools, got %d", len(tools.Tools)) + } + for _, tool := range tools.Tools { + if tool.Name != "namespaces_list" && tool.Name != "events_list" { + t.Errorf("Tool %s is not enabled but should be", tool.Name) + } + } + }) + }) +} + +func TestDisabledTools(t *testing.T) { + testCaseWithContext(t, &mcpContext{ + staticConfig: &config.StaticConfig{ + DisabledTools: []string{"namespaces_list", "events_list"}, + }, + }, func(c *mcpContext) { + tools, err := c.mcpClient.ListTools(c.ctx, mcp.ListToolsRequest{}) + t.Run("ListTools returns tools", func(t *testing.T) { + if err != nil { + t.Fatalf("call ListTools failed %v", err) + } + }) + t.Run("ListTools does not return disabled tools", func(t *testing.T) { + for _, tool := range tools.Tools { + if tool.Name == "namespaces_list" || tool.Name == "events_list" { + t.Errorf("Tool %s is not disabled but should be", tool.Name) + } + } + }) + }) +} + +func TestToolCallLogging(t *testing.T) { + testCaseWithContext(t, &mcpContext{logLevel: 5}, func(c *mcpContext) { + _, _ = c.callTool("configuration_view", map[string]interface{}{ + "minified": false, + }) + t.Run("Logs tool name", func(t *testing.T) { + expectedLog := "mcp tool call: configuration_view(" + if !strings.Contains(c.logBuffer.String(), expectedLog) { + t.Errorf("Expected log to contain '%s', got: %s", expectedLog, c.logBuffer.String()) + } + }) + t.Run("Logs tool call arguments", func(t *testing.T) { + expected := `"mcp tool call: configuration_view\((.+)\)"` + m := regexp.MustCompile(expected).FindStringSubmatch(c.logBuffer.String()) + if len(m) != 2 { + t.Fatalf("Expected log entry to contain arguments, got %s", c.logBuffer.String()) + } + if m[1] != "map[minified:false]" { + t.Errorf("Expected log arguments to be 'map[minified:false]', got %s", m[1]) + } + }) + }) + before := func(c *mcpContext) { + c.clientOptions = append(c.clientOptions, transport.WithHeaders(map[string]string{ + "Accept-Encoding": "gzip", + "Authorization": "Bearer should-not-be-logged", + "authorization": "Bearer should-not-be-logged", + "a-loggable-header": "should-be-logged", + })) + } + testCaseWithContext(t, &mcpContext{logLevel: 7, before: before}, func(c *mcpContext) { + _, _ = c.callTool("configuration_view", map[string]interface{}{ + "minified": false, + }) + t.Run("Logs tool call headers", func(t *testing.T) { + expectedLog := "mcp tool call headers: A-Loggable-Header: should-be-logged" + if !strings.Contains(c.logBuffer.String(), expectedLog) { + t.Errorf("Expected log to contain '%s', got: %s", expectedLog, c.logBuffer.String()) + } + }) + sensitiveHeaders := []string{ + "Authorization", + // TODO: Add more sensitive headers as needed + } + t.Run("Does not log sensitive headers", func(t *testing.T) { + for _, header := range sensitiveHeaders { + if strings.Contains(c.logBuffer.String(), header) { + t.Errorf("Log should not contain sensitive header '%s', got: %s", header, c.logBuffer.String()) + } + } + }) + t.Run("Does not log sensitive header values", func(t *testing.T) { + if strings.Contains(c.logBuffer.String(), "should-not-be-logged") { + t.Errorf("Log should not contain sensitive header value 'should-not-be-logged', got: %s", c.logBuffer.String()) + } + }) + }) +} diff --git a/pkg 2/mcp/mock_server_test.go b/pkg 2/mcp/mock_server_test.go new file mode 100644 index 00000000..124e5ab5 --- /dev/null +++ b/pkg 2/mcp/mock_server_test.go @@ -0,0 +1,151 @@ +package mcp + +import ( + "encoding/json" + "errors" + "io" + v1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/util/httpstream" + "k8s.io/apimachinery/pkg/util/httpstream/spdy" + "k8s.io/client-go/rest" + "net/http" + "net/http/httptest" +) + +type MockServer struct { + server *httptest.Server + config *rest.Config + restHandlers []http.HandlerFunc +} + +func NewMockServer() *MockServer { + ms := &MockServer{} + scheme := runtime.NewScheme() + codecs := serializer.NewCodecFactory(scheme) + ms.server = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + for _, handler := range ms.restHandlers { + handler(w, req) + } + })) + ms.config = &rest.Config{ + Host: ms.server.URL, + APIPath: "/api", + ContentConfig: rest.ContentConfig{ + NegotiatedSerializer: codecs, + ContentType: runtime.ContentTypeJSON, + GroupVersion: &v1.SchemeGroupVersion, + }, + } + ms.restHandlers = make([]http.HandlerFunc, 0) + return ms +} + +func (m *MockServer) Close() { + m.server.Close() +} + +func (m *MockServer) Handle(handler http.Handler) { + m.restHandlers = append(m.restHandlers, handler.ServeHTTP) +} + +func writeObject(w http.ResponseWriter, obj runtime.Object) { + w.Header().Set("Content-Type", runtime.ContentTypeJSON) + if err := json.NewEncoder(w).Encode(obj); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } +} + +type streamAndReply struct { + httpstream.Stream + replySent <-chan struct{} +} + +type streamContext struct { + conn io.Closer + stdinStream io.ReadCloser + stdoutStream io.WriteCloser + stderrStream io.WriteCloser + writeStatus func(status *apierrors.StatusError) error +} + +type StreamOptions struct { + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer +} + +func v4WriteStatusFunc(stream io.Writer) func(status *apierrors.StatusError) error { + return func(status *apierrors.StatusError) error { + bs, err := json.Marshal(status.Status()) + if err != nil { + return err + } + _, err = stream.Write(bs) + return err + } +} +func createHTTPStreams(w http.ResponseWriter, req *http.Request, opts *StreamOptions) (*streamContext, error) { + _, err := httpstream.Handshake(req, w, []string{"v4.channel.k8s.io"}) + if err != nil { + return nil, err + } + + upgrader := spdy.NewResponseUpgrader() + streamCh := make(chan streamAndReply) + conn := upgrader.UpgradeResponse(w, req, func(stream httpstream.Stream, replySent <-chan struct{}) error { + streamCh <- streamAndReply{Stream: stream, replySent: replySent} + return nil + }) + ctx := &streamContext{ + conn: conn, + } + + // wait for stream + replyChan := make(chan struct{}, 4) + defer close(replyChan) + receivedStreams := 0 + expectedStreams := 1 + if opts.Stdout != nil { + expectedStreams++ + } + if opts.Stdin != nil { + expectedStreams++ + } + if opts.Stderr != nil { + expectedStreams++ + } +WaitForStreams: + for { + select { + case stream := <-streamCh: + streamType := stream.Headers().Get(v1.StreamType) + switch streamType { + case v1.StreamTypeError: + replyChan <- struct{}{} + ctx.writeStatus = v4WriteStatusFunc(stream) + case v1.StreamTypeStdout: + replyChan <- struct{}{} + ctx.stdoutStream = stream + case v1.StreamTypeStdin: + replyChan <- struct{}{} + ctx.stdinStream = stream + case v1.StreamTypeStderr: + replyChan <- struct{}{} + ctx.stderrStream = stream + default: + // add other stream ... + return nil, errors.New("unimplemented stream type") + } + case <-replyChan: + receivedStreams++ + if receivedStreams == expectedStreams { + break WaitForStreams + } + } + } + + return ctx, nil +} diff --git a/pkg 2/mcp/namespaces.go b/pkg 2/mcp/namespaces.go new file mode 100644 index 00000000..a2890156 --- /dev/null +++ b/pkg 2/mcp/namespaces.go @@ -0,0 +1,62 @@ +package mcp + +import ( + "context" + "fmt" + + "github.com/mark3labs/mcp-go/mcp" + "github.com/mark3labs/mcp-go/server" + + "github.com/containers/kubernetes-mcp-server/pkg/kubernetes" +) + +func (s *Server) initNamespaces() []server.ServerTool { + ret := make([]server.ServerTool, 0) + ret = append(ret, server.ServerTool{ + Tool: mcp.NewTool("namespaces_list", + mcp.WithDescription("List all the Kubernetes namespaces in the current cluster"), + // Tool annotations + mcp.WithTitleAnnotation("Namespaces: List"), + mcp.WithReadOnlyHintAnnotation(true), + mcp.WithDestructiveHintAnnotation(false), + mcp.WithOpenWorldHintAnnotation(true), + ), Handler: s.namespacesList, + }) + if s.k.IsOpenShift(context.Background()) { + ret = append(ret, server.ServerTool{ + Tool: mcp.NewTool("projects_list", + mcp.WithDescription("List all the OpenShift projects in the current cluster"), + // Tool annotations + mcp.WithTitleAnnotation("Projects: List"), + mcp.WithReadOnlyHintAnnotation(true), + mcp.WithDestructiveHintAnnotation(false), + mcp.WithOpenWorldHintAnnotation(true), + ), Handler: s.projectsList, + }) + } + return ret +} + +func (s *Server) namespacesList(ctx context.Context, _ mcp.CallToolRequest) (*mcp.CallToolResult, error) { + derived, err := s.k.Derived(ctx) + if err != nil { + return nil, err + } + ret, err := derived.NamespacesList(ctx, kubernetes.ResourceListOptions{AsTable: s.configuration.ListOutput.AsTable()}) + if err != nil { + return NewTextResult("", fmt.Errorf("failed to list namespaces: %v", err)), nil + } + return NewTextResult(s.configuration.ListOutput.PrintObj(ret)), nil +} + +func (s *Server) projectsList(ctx context.Context, _ mcp.CallToolRequest) (*mcp.CallToolResult, error) { + derived, err := s.k.Derived(ctx) + if err != nil { + return nil, err + } + ret, err := derived.ProjectsList(ctx, kubernetes.ResourceListOptions{AsTable: s.configuration.ListOutput.AsTable()}) + if err != nil { + return NewTextResult("", fmt.Errorf("failed to list projects: %v", err)), nil + } + return NewTextResult(s.configuration.ListOutput.PrintObj(ret)), nil +} diff --git a/pkg 2/mcp/namespaces_test.go b/pkg 2/mcp/namespaces_test.go new file mode 100644 index 00000000..c3d5a41c --- /dev/null +++ b/pkg 2/mcp/namespaces_test.go @@ -0,0 +1,174 @@ +package mcp + +import ( + "github.com/containers/kubernetes-mcp-server/pkg/config" + "github.com/containers/kubernetes-mcp-server/pkg/output" + "github.com/mark3labs/mcp-go/mcp" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/dynamic" + "regexp" + "sigs.k8s.io/yaml" + "slices" + "testing" +) + +func TestNamespacesList(t *testing.T) { + testCase(t, func(c *mcpContext) { + c.withEnvTest() + toolResult, err := c.callTool("namespaces_list", map[string]interface{}{}) + t.Run("namespaces_list returns namespace list", func(t *testing.T) { + if err != nil { + t.Fatalf("call tool failed %v", err) + } + if toolResult.IsError { + t.Fatalf("call tool failed") + } + }) + var decoded []unstructured.Unstructured + err = yaml.Unmarshal([]byte(toolResult.Content[0].(mcp.TextContent).Text), &decoded) + t.Run("namespaces_list has yaml content", func(t *testing.T) { + if err != nil { + t.Fatalf("invalid tool result content %v", err) + } + }) + t.Run("namespaces_list returns at least 3 items", func(t *testing.T) { + if len(decoded) < 3 { + t.Errorf("invalid namespace count, expected at least 3, got %v", len(decoded)) + } + for _, expectedNamespace := range []string{"default", "ns-1", "ns-2"} { + idx := slices.IndexFunc(decoded, func(ns unstructured.Unstructured) bool { + return ns.GetName() == expectedNamespace + }) + if idx == -1 { + t.Errorf("namespace %s not found in the list", expectedNamespace) + } + } + }) + }) +} + +func TestNamespacesListDenied(t *testing.T) { + deniedResourcesServer := &config.StaticConfig{DeniedResources: []config.GroupVersionKind{{Version: "v1", Kind: "Namespace"}}} + testCaseWithContext(t, &mcpContext{staticConfig: deniedResourcesServer}, func(c *mcpContext) { + c.withEnvTest() + namespacesList, _ := c.callTool("namespaces_list", map[string]interface{}{}) + t.Run("namespaces_list has error", func(t *testing.T) { + if !namespacesList.IsError { + t.Fatalf("call tool should fail") + } + }) + t.Run("namespaces_list describes denial", func(t *testing.T) { + expectedMessage := "failed to list namespaces: resource not allowed: /v1, Kind=Namespace" + if namespacesList.Content[0].(mcp.TextContent).Text != expectedMessage { + t.Fatalf("expected descriptive error '%s', got %v", expectedMessage, namespacesList.Content[0].(mcp.TextContent).Text) + } + }) + }) +} + +func TestNamespacesListAsTable(t *testing.T) { + testCaseWithContext(t, &mcpContext{listOutput: output.Table}, func(c *mcpContext) { + c.withEnvTest() + toolResult, err := c.callTool("namespaces_list", map[string]interface{}{}) + t.Run("namespaces_list returns namespace list", func(t *testing.T) { + if err != nil { + t.Fatalf("call tool failed %v", err) + } + if toolResult.IsError { + t.Fatalf("call tool failed") + } + }) + out := toolResult.Content[0].(mcp.TextContent).Text + t.Run("namespaces_list returns column headers", func(t *testing.T) { + expectedHeaders := "APIVERSION\\s+KIND\\s+NAME\\s+STATUS\\s+AGE\\s+LABELS" + if m, e := regexp.MatchString(expectedHeaders, out); !m || e != nil { + t.Fatalf("Expected headers '%s' not found in output:\n%s", expectedHeaders, out) + } + }) + t.Run("namespaces_list returns formatted row for ns-1", func(t *testing.T) { + expectedRow := "(?v1)\\s+" + + "(?Namespace)\\s+" + + "(?ns-1)\\s+" + + "(?Active)\\s+" + + "(?(\\d+m)?(\\d+s)?)\\s+" + + "(?kubernetes.io/metadata.name=ns-1)" + if m, e := regexp.MatchString(expectedRow, out); !m || e != nil { + t.Fatalf("Expected row '%s' not found in output:\n%s", expectedRow, out) + } + }) + t.Run("namespaces_list returns formatted row for ns-2", func(t *testing.T) { + expectedRow := "(?v1)\\s+" + + "(?Namespace)\\s+" + + "(?ns-2)\\s+" + + "(?Active)\\s+" + + "(?(\\d+m)?(\\d+s)?)\\s+" + + "(?kubernetes.io/metadata.name=ns-2)" + if m, e := regexp.MatchString(expectedRow, out); !m || e != nil { + t.Fatalf("Expected row '%s' not found in output:\n%s", expectedRow, out) + } + }) + }) + +} + +func TestProjectsListInOpenShift(t *testing.T) { + testCaseWithContext(t, &mcpContext{before: inOpenShift, after: inOpenShiftClear}, func(c *mcpContext) { + dynamicClient := dynamic.NewForConfigOrDie(envTestRestConfig) + _, _ = dynamicClient.Resource(schema.GroupVersionResource{Group: "project.openshift.io", Version: "v1", Resource: "projects"}). + Create(c.ctx, &unstructured.Unstructured{Object: map[string]interface{}{ + "apiVersion": "project.openshift.io/v1", + "kind": "Project", + "metadata": map[string]interface{}{ + "name": "an-openshift-project", + }, + }}, metav1.CreateOptions{}) + toolResult, err := c.callTool("projects_list", map[string]interface{}{}) + t.Run("projects_list returns project list", func(t *testing.T) { + if err != nil { + t.Fatalf("call tool failed %v", err) + } + if toolResult.IsError { + t.Fatalf("call tool failed") + } + }) + var decoded []unstructured.Unstructured + err = yaml.Unmarshal([]byte(toolResult.Content[0].(mcp.TextContent).Text), &decoded) + t.Run("projects_list has yaml content", func(t *testing.T) { + if err != nil { + t.Fatalf("invalid tool result content %v", err) + } + }) + t.Run("projects_list returns at least 1 items", func(t *testing.T) { + if len(decoded) < 1 { + t.Errorf("invalid project count, expected at least 1, got %v", len(decoded)) + } + idx := slices.IndexFunc(decoded, func(ns unstructured.Unstructured) bool { + return ns.GetName() == "an-openshift-project" + }) + if idx == -1 { + t.Errorf("namespace %s not found in the list", "an-openshift-project") + } + }) + }) +} + +func TestProjectsListInOpenShiftDenied(t *testing.T) { + deniedResourcesServer := &config.StaticConfig{DeniedResources: []config.GroupVersionKind{{Group: "project.openshift.io", Version: "v1"}}} + testCaseWithContext(t, &mcpContext{staticConfig: deniedResourcesServer, before: inOpenShift, after: inOpenShiftClear}, func(c *mcpContext) { + c.withEnvTest() + projectsList, _ := c.callTool("projects_list", map[string]interface{}{}) + t.Run("projects_list has error", func(t *testing.T) { + if !projectsList.IsError { + t.Fatalf("call tool should fail") + } + }) + t.Run("projects_list describes denial", func(t *testing.T) { + expectedMessage := "failed to list projects: resource not allowed: project.openshift.io/v1, Kind=Project" + if projectsList.Content[0].(mcp.TextContent).Text != expectedMessage { + t.Fatalf("expected descriptive error '%s', got %v", expectedMessage, projectsList.Content[0].(mcp.TextContent).Text) + } + }) + }) +} diff --git a/pkg 2/mcp/pods.go b/pkg 2/mcp/pods.go new file mode 100644 index 00000000..0150031d --- /dev/null +++ b/pkg 2/mcp/pods.go @@ -0,0 +1,330 @@ +package mcp + +import ( + "bytes" + "context" + "errors" + "fmt" + + "github.com/mark3labs/mcp-go/mcp" + "github.com/mark3labs/mcp-go/server" + "k8s.io/kubectl/pkg/metricsutil" + + "github.com/containers/kubernetes-mcp-server/pkg/kubernetes" + "github.com/containers/kubernetes-mcp-server/pkg/output" +) + +func (s *Server) initPods() []server.ServerTool { + return []server.ServerTool{ + {Tool: mcp.NewTool("pods_list", + mcp.WithDescription("List all the Kubernetes pods in the current cluster from all namespaces"), + mcp.WithString("labelSelector", mcp.Description("Optional Kubernetes label selector (e.g. 'app=myapp,env=prod' or 'app in (myapp,yourapp)'), use this option when you want to filter the pods by label"), mcp.Pattern("([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]")), + // Tool annotations + mcp.WithTitleAnnotation("Pods: List"), + mcp.WithReadOnlyHintAnnotation(true), + mcp.WithDestructiveHintAnnotation(false), + mcp.WithOpenWorldHintAnnotation(true), + ), Handler: s.podsListInAllNamespaces}, + {Tool: mcp.NewTool("pods_list_in_namespace", + mcp.WithDescription("List all the Kubernetes pods in the specified namespace in the current cluster"), + mcp.WithString("namespace", mcp.Description("Namespace to list pods from"), mcp.Required()), + mcp.WithString("labelSelector", mcp.Description("Optional Kubernetes label selector (e.g. 'app=myapp,env=prod' or 'app in (myapp,yourapp)'), use this option when you want to filter the pods by label"), mcp.Pattern("([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]")), + // Tool annotations + mcp.WithTitleAnnotation("Pods: List in Namespace"), + mcp.WithReadOnlyHintAnnotation(true), + mcp.WithDestructiveHintAnnotation(false), + mcp.WithOpenWorldHintAnnotation(true), + ), Handler: s.podsListInNamespace}, + {Tool: mcp.NewTool("pods_get", + mcp.WithDescription("Get a Kubernetes Pod in the current or provided namespace with the provided name"), + mcp.WithString("namespace", mcp.Description("Namespace to get the Pod from")), + mcp.WithString("name", mcp.Description("Name of the Pod"), mcp.Required()), + // Tool annotations + mcp.WithTitleAnnotation("Pods: Get"), + mcp.WithReadOnlyHintAnnotation(true), + mcp.WithDestructiveHintAnnotation(false), + mcp.WithOpenWorldHintAnnotation(true), + ), Handler: s.podsGet}, + {Tool: mcp.NewTool("pods_delete", + mcp.WithDescription("Delete a Kubernetes Pod in the current or provided namespace with the provided name"), + mcp.WithString("namespace", mcp.Description("Namespace to delete the Pod from")), + mcp.WithString("name", mcp.Description("Name of the Pod to delete"), mcp.Required()), + // Tool annotations + mcp.WithTitleAnnotation("Pods: Delete"), + mcp.WithReadOnlyHintAnnotation(false), + mcp.WithDestructiveHintAnnotation(true), + mcp.WithIdempotentHintAnnotation(true), + mcp.WithOpenWorldHintAnnotation(true), + ), Handler: s.podsDelete}, + {Tool: mcp.NewTool("pods_top", + mcp.WithDescription("List the resource consumption (CPU and memory) as recorded by the Kubernetes Metrics Server for the specified Kubernetes Pods in the all namespaces, the provided namespace, or the current namespace"), + mcp.WithBoolean("all_namespaces", mcp.Description("If true, list the resource consumption for all Pods in all namespaces. If false, list the resource consumption for Pods in the provided namespace or the current namespace"), mcp.DefaultBool(true)), + mcp.WithString("namespace", mcp.Description("Namespace to get the Pods resource consumption from (Optional, current namespace if not provided and all_namespaces is false)")), + mcp.WithString("name", mcp.Description("Name of the Pod to get the resource consumption from (Optional, all Pods in the namespace if not provided)")), + mcp.WithString("label_selector", mcp.Description("Kubernetes label selector (e.g. 'app=myapp,env=prod' or 'app in (myapp,yourapp)'), use this option when you want to filter the pods by label (Optional, only applicable when name is not provided)"), mcp.Pattern("([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]")), + // Tool annotations + mcp.WithTitleAnnotation("Pods: Top"), + mcp.WithReadOnlyHintAnnotation(true), + mcp.WithDestructiveHintAnnotation(false), + mcp.WithIdempotentHintAnnotation(true), + mcp.WithOpenWorldHintAnnotation(true), + ), Handler: s.podsTop}, + {Tool: mcp.NewTool("pods_exec", + mcp.WithDescription("Execute a command in a Kubernetes Pod in the current or provided namespace with the provided name and command"), + mcp.WithString("namespace", mcp.Description("Namespace of the Pod where the command will be executed")), + mcp.WithString("name", mcp.Description("Name of the Pod where the command will be executed"), mcp.Required()), + mcp.WithArray("command", mcp.Description("Command to execute in the Pod container. "+ + "The first item is the command to be run, and the rest are the arguments to that command. "+ + `Example: ["ls", "-l", "/tmp"]`), + // TODO: manual fix to ensure that the items property gets initialized (Gemini) + // https://www.googlecloudcommunity.com/gc/AI-ML/Gemini-API-400-Bad-Request-Array-fields-breaks-function-calling/m-p/769835?nobounce + func(schema map[string]interface{}) { + schema["type"] = "array" + schema["items"] = map[string]interface{}{ + "type": "string", + } + }, + mcp.Required(), + ), + mcp.WithString("container", mcp.Description("Name of the Pod container where the command will be executed (Optional)")), + // Tool annotations + mcp.WithTitleAnnotation("Pods: Exec"), + mcp.WithReadOnlyHintAnnotation(false), + mcp.WithDestructiveHintAnnotation(true), // Depending on the Pod's entrypoint, executing certain commands may kill the Pod + mcp.WithIdempotentHintAnnotation(false), + mcp.WithOpenWorldHintAnnotation(true), + ), Handler: s.podsExec}, + {Tool: mcp.NewTool("pods_log", + mcp.WithDescription("Get the logs of a Kubernetes Pod in the current or provided namespace with the provided name"), + mcp.WithString("namespace", mcp.Description("Namespace to get the Pod logs from")), + mcp.WithString("name", mcp.Description("Name of the Pod to get the logs from"), mcp.Required()), + mcp.WithString("container", mcp.Description("Name of the Pod container to get the logs from (Optional)")), + // Tool annotations + mcp.WithTitleAnnotation("Pods: Log"), + mcp.WithReadOnlyHintAnnotation(true), + mcp.WithDestructiveHintAnnotation(false), + mcp.WithOpenWorldHintAnnotation(true), + ), Handler: s.podsLog}, + {Tool: mcp.NewTool("pods_run", + mcp.WithDescription("Run a Kubernetes Pod in the current or provided namespace with the provided container image and optional name"), + mcp.WithString("namespace", mcp.Description("Namespace to run the Pod in")), + mcp.WithString("name", mcp.Description("Name of the Pod (Optional, random name if not provided)")), + mcp.WithString("image", mcp.Description("Container Image to run in the Pod"), mcp.Required()), + mcp.WithNumber("port", mcp.Description("TCP/IP port to expose from the Pod container (Optional, no port exposed if not provided)")), + // Tool annotations + mcp.WithTitleAnnotation("Pods: Run"), + mcp.WithReadOnlyHintAnnotation(false), + mcp.WithDestructiveHintAnnotation(false), + mcp.WithIdempotentHintAnnotation(false), + mcp.WithOpenWorldHintAnnotation(true), + ), Handler: s.podsRun}, + } +} + +func (s *Server) podsListInAllNamespaces(ctx context.Context, ctr mcp.CallToolRequest) (*mcp.CallToolResult, error) { + labelSelector := ctr.GetArguments()["labelSelector"] + resourceListOptions := kubernetes.ResourceListOptions{ + AsTable: s.configuration.ListOutput.AsTable(), + } + if labelSelector != nil { + resourceListOptions.LabelSelector = labelSelector.(string) + } + derived, err := s.k.Derived(ctx) + if err != nil { + return nil, err + } + ret, err := derived.PodsListInAllNamespaces(ctx, resourceListOptions) + if err != nil { + return NewTextResult("", fmt.Errorf("failed to list pods in all namespaces: %v", err)), nil + } + return NewTextResult(s.configuration.ListOutput.PrintObj(ret)), nil +} + +func (s *Server) podsListInNamespace(ctx context.Context, ctr mcp.CallToolRequest) (*mcp.CallToolResult, error) { + ns := ctr.GetArguments()["namespace"] + if ns == nil { + return NewTextResult("", errors.New("failed to list pods in namespace, missing argument namespace")), nil + } + resourceListOptions := kubernetes.ResourceListOptions{ + AsTable: s.configuration.ListOutput.AsTable(), + } + labelSelector := ctr.GetArguments()["labelSelector"] + if labelSelector != nil { + resourceListOptions.LabelSelector = labelSelector.(string) + } + derived, err := s.k.Derived(ctx) + if err != nil { + return nil, err + } + ret, err := derived.PodsListInNamespace(ctx, ns.(string), resourceListOptions) + if err != nil { + return NewTextResult("", fmt.Errorf("failed to list pods in namespace %s: %v", ns, err)), nil + } + return NewTextResult(s.configuration.ListOutput.PrintObj(ret)), nil +} + +func (s *Server) podsGet(ctx context.Context, ctr mcp.CallToolRequest) (*mcp.CallToolResult, error) { + ns := ctr.GetArguments()["namespace"] + if ns == nil { + ns = "" + } + name := ctr.GetArguments()["name"] + if name == nil { + return NewTextResult("", errors.New("failed to get pod, missing argument name")), nil + } + derived, err := s.k.Derived(ctx) + if err != nil { + return nil, err + } + ret, err := derived.PodsGet(ctx, ns.(string), name.(string)) + if err != nil { + return NewTextResult("", fmt.Errorf("failed to get pod %s in namespace %s: %v", name, ns, err)), nil + } + return NewTextResult(output.MarshalYaml(ret)), nil +} + +func (s *Server) podsDelete(ctx context.Context, ctr mcp.CallToolRequest) (*mcp.CallToolResult, error) { + ns := ctr.GetArguments()["namespace"] + if ns == nil { + ns = "" + } + name := ctr.GetArguments()["name"] + if name == nil { + return NewTextResult("", errors.New("failed to delete pod, missing argument name")), nil + } + derived, err := s.k.Derived(ctx) + if err != nil { + return nil, err + } + ret, err := derived.PodsDelete(ctx, ns.(string), name.(string)) + if err != nil { + return NewTextResult("", fmt.Errorf("failed to delete pod %s in namespace %s: %v", name, ns, err)), nil + } + return NewTextResult(ret, err), nil +} + +func (s *Server) podsTop(ctx context.Context, ctr mcp.CallToolRequest) (*mcp.CallToolResult, error) { + podsTopOptions := kubernetes.PodsTopOptions{AllNamespaces: true} + if v, ok := ctr.GetArguments()["namespace"].(string); ok { + podsTopOptions.Namespace = v + } + if v, ok := ctr.GetArguments()["all_namespaces"].(bool); ok { + podsTopOptions.AllNamespaces = v + } + if v, ok := ctr.GetArguments()["name"].(string); ok { + podsTopOptions.Name = v + } + if v, ok := ctr.GetArguments()["label_selector"].(string); ok { + podsTopOptions.LabelSelector = v + } + derived, err := s.k.Derived(ctx) + if err != nil { + return nil, err + } + ret, err := derived.PodsTop(ctx, podsTopOptions) + if err != nil { + return NewTextResult("", fmt.Errorf("failed to get pods top: %v", err)), nil + } + buf := new(bytes.Buffer) + printer := metricsutil.NewTopCmdPrinter(buf) + err = printer.PrintPodMetrics(ret.Items, true, true, false, "", true) + if err != nil { + return NewTextResult("", fmt.Errorf("failed to get pods top: %v", err)), nil + } + return NewTextResult(buf.String(), nil), nil +} + +func (s *Server) podsExec(ctx context.Context, ctr mcp.CallToolRequest) (*mcp.CallToolResult, error) { + ns := ctr.GetArguments()["namespace"] + if ns == nil { + ns = "" + } + name := ctr.GetArguments()["name"] + if name == nil { + return NewTextResult("", errors.New("failed to exec in pod, missing argument name")), nil + } + container := ctr.GetArguments()["container"] + if container == nil { + container = "" + } + commandArg := ctr.GetArguments()["command"] + command := make([]string, 0) + if _, ok := commandArg.([]interface{}); ok { + for _, cmd := range commandArg.([]interface{}) { + if _, ok := cmd.(string); ok { + command = append(command, cmd.(string)) + } + } + } else { + return NewTextResult("", errors.New("failed to exec in pod, invalid command argument")), nil + } + derived, err := s.k.Derived(ctx) + if err != nil { + return nil, err + } + ret, err := derived.PodsExec(ctx, ns.(string), name.(string), container.(string), command) + if err != nil { + return NewTextResult("", fmt.Errorf("failed to exec in pod %s in namespace %s: %v", name, ns, err)), nil + } else if ret == "" { + ret = fmt.Sprintf("The executed command in pod %s in namespace %s has not produced any output", name, ns) + } + return NewTextResult(ret, err), nil +} + +func (s *Server) podsLog(ctx context.Context, ctr mcp.CallToolRequest) (*mcp.CallToolResult, error) { + ns := ctr.GetArguments()["namespace"] + if ns == nil { + ns = "" + } + name := ctr.GetArguments()["name"] + if name == nil { + return NewTextResult("", errors.New("failed to get pod log, missing argument name")), nil + } + container := ctr.GetArguments()["container"] + if container == nil { + container = "" + } + derived, err := s.k.Derived(ctx) + if err != nil { + return nil, err + } + ret, err := derived.PodsLog(ctx, ns.(string), name.(string), container.(string)) + if err != nil { + return NewTextResult("", fmt.Errorf("failed to get pod %s log in namespace %s: %v", name, ns, err)), nil + } else if ret == "" { + ret = fmt.Sprintf("The pod %s in namespace %s has not logged any message yet", name, ns) + } + return NewTextResult(ret, err), nil +} + +func (s *Server) podsRun(ctx context.Context, ctr mcp.CallToolRequest) (*mcp.CallToolResult, error) { + ns := ctr.GetArguments()["namespace"] + if ns == nil { + ns = "" + } + name := ctr.GetArguments()["name"] + if name == nil { + name = "" + } + image := ctr.GetArguments()["image"] + if image == nil { + return NewTextResult("", errors.New("failed to run pod, missing argument image")), nil + } + port := ctr.GetArguments()["port"] + if port == nil { + port = float64(0) + } + derived, err := s.k.Derived(ctx) + if err != nil { + return nil, err + } + resources, err := derived.PodsRun(ctx, ns.(string), name.(string), image.(string), int32(port.(float64))) + if err != nil { + return NewTextResult("", fmt.Errorf("failed to run pod %s in namespace %s: %v", name, ns, err)), nil + } + marshalledYaml, err := output.MarshalYaml(resources) + if err != nil { + err = fmt.Errorf("failed to run pod: %v", err) + } + return NewTextResult("# The following resources (YAML) have been created or updated successfully\n"+marshalledYaml, err), nil +} diff --git a/pkg 2/mcp/pods_exec_test.go b/pkg 2/mcp/pods_exec_test.go new file mode 100644 index 00000000..919e80b2 --- /dev/null +++ b/pkg 2/mcp/pods_exec_test.go @@ -0,0 +1,126 @@ +package mcp + +import ( + "bytes" + "github.com/containers/kubernetes-mcp-server/pkg/config" + "github.com/mark3labs/mcp-go/mcp" + "io" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "net/http" + "strings" + "testing" +) + +func TestPodsExec(t *testing.T) { + testCase(t, func(c *mcpContext) { + mockServer := NewMockServer() + defer mockServer.Close() + c.withKubeConfig(mockServer.config) + mockServer.Handle(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + if req.URL.Path != "/api/v1/namespaces/default/pods/pod-to-exec/exec" { + return + } + var stdin, stdout bytes.Buffer + ctx, err := createHTTPStreams(w, req, &StreamOptions{ + Stdin: &stdin, + Stdout: &stdout, + }) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + _, _ = w.Write([]byte(err.Error())) + return + } + defer func(conn io.Closer) { _ = conn.Close() }(ctx.conn) + _, _ = io.WriteString(ctx.stdoutStream, "command:"+strings.Join(req.URL.Query()["command"], " ")+"\n") + _, _ = io.WriteString(ctx.stdoutStream, "container:"+strings.Join(req.URL.Query()["container"], " ")+"\n") + })) + mockServer.Handle(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + if req.URL.Path != "/api/v1/namespaces/default/pods/pod-to-exec" { + return + } + writeObject(w, &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "pod-to-exec", + }, + Spec: v1.PodSpec{Containers: []v1.Container{{Name: "container-to-exec"}}}, + }) + })) + podsExecNilNamespace, err := c.callTool("pods_exec", map[string]interface{}{ + "name": "pod-to-exec", + "command": []interface{}{"ls", "-l"}, + }) + t.Run("pods_exec with name and nil namespace returns command output", func(t *testing.T) { + if err != nil { + t.Fatalf("call tool failed %v", err) + } + if podsExecNilNamespace.IsError { + t.Fatalf("call tool failed") + } + if !strings.Contains(podsExecNilNamespace.Content[0].(mcp.TextContent).Text, "command:ls -l\n") { + t.Errorf("unexpected result %v", podsExecNilNamespace.Content[0].(mcp.TextContent).Text) + } + }) + podsExecInNamespace, err := c.callTool("pods_exec", map[string]interface{}{ + "namespace": "default", + "name": "pod-to-exec", + "command": []interface{}{"ls", "-l"}, + }) + t.Run("pods_exec with name and namespace returns command output", func(t *testing.T) { + if err != nil { + t.Fatalf("call tool failed %v", err) + } + if podsExecInNamespace.IsError { + t.Fatalf("call tool failed") + } + if !strings.Contains(podsExecNilNamespace.Content[0].(mcp.TextContent).Text, "command:ls -l\n") { + t.Errorf("unexpected result %v", podsExecInNamespace.Content[0].(mcp.TextContent).Text) + } + }) + podsExecInNamespaceAndContainer, err := c.callTool("pods_exec", map[string]interface{}{ + "namespace": "default", + "name": "pod-to-exec", + "command": []interface{}{"ls", "-l"}, + "container": "a-specific-container", + }) + t.Run("pods_exec with name, namespace, and container returns command output", func(t *testing.T) { + if err != nil { + t.Fatalf("call tool failed %v", err) + } + if podsExecInNamespaceAndContainer.IsError { + t.Fatalf("call tool failed") + } + if !strings.Contains(podsExecInNamespaceAndContainer.Content[0].(mcp.TextContent).Text, "command:ls -l\n") { + t.Errorf("unexpected result %v", podsExecInNamespaceAndContainer.Content[0].(mcp.TextContent).Text) + } + if !strings.Contains(podsExecInNamespaceAndContainer.Content[0].(mcp.TextContent).Text, "container:a-specific-container\n") { + t.Errorf("expected container name not found %v", podsExecInNamespaceAndContainer.Content[0].(mcp.TextContent).Text) + } + }) + }) +} + +func TestPodsExecDenied(t *testing.T) { + deniedResourcesServer := &config.StaticConfig{DeniedResources: []config.GroupVersionKind{{Version: "v1", Kind: "Pod"}}} + testCaseWithContext(t, &mcpContext{staticConfig: deniedResourcesServer}, func(c *mcpContext) { + c.withEnvTest() + podsRun, _ := c.callTool("pods_exec", map[string]interface{}{ + "namespace": "default", + "name": "pod-to-exec", + "command": []interface{}{"ls", "-l"}, + "container": "a-specific-container", + }) + t.Run("pods_exec has error", func(t *testing.T) { + if !podsRun.IsError { + t.Fatalf("call tool should fail") + } + }) + t.Run("pods_exec describes denial", func(t *testing.T) { + expectedMessage := "failed to exec in pod pod-to-exec in namespace default: resource not allowed: /v1, Kind=Pod" + if podsRun.Content[0].(mcp.TextContent).Text != expectedMessage { + t.Fatalf("expected descriptive error '%s', got %v", expectedMessage, podsRun.Content[0].(mcp.TextContent).Text) + } + }) + }) +} diff --git a/pkg 2/mcp/pods_test.go b/pkg 2/mcp/pods_test.go new file mode 100644 index 00000000..de61dbe7 --- /dev/null +++ b/pkg 2/mcp/pods_test.go @@ -0,0 +1,1051 @@ +package mcp + +import ( + "github.com/containers/kubernetes-mcp-server/pkg/config" + "github.com/containers/kubernetes-mcp-server/pkg/output" + "regexp" + "strings" + "testing" + + "github.com/mark3labs/mcp-go/mcp" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/dynamic" + "sigs.k8s.io/yaml" +) + +func TestPodsListInAllNamespaces(t *testing.T) { + testCase(t, func(c *mcpContext) { + c.withEnvTest() + toolResult, err := c.callTool("pods_list", map[string]interface{}{}) + t.Run("pods_list returns pods list", func(t *testing.T) { + if err != nil { + t.Fatalf("call tool failed %v", err) + } + if toolResult.IsError { + t.Fatalf("call tool failed") + } + }) + var decoded []unstructured.Unstructured + err = yaml.Unmarshal([]byte(toolResult.Content[0].(mcp.TextContent).Text), &decoded) + t.Run("pods_list has yaml content", func(t *testing.T) { + if err != nil { + t.Fatalf("invalid tool result content %v", err) + } + }) + t.Run("pods_list returns 3 items", func(t *testing.T) { + if len(decoded) != 3 { + t.Fatalf("invalid pods count, expected 3, got %v", len(decoded)) + } + }) + t.Run("pods_list returns pod in ns-1", func(t *testing.T) { + if decoded[1].GetName() != "a-pod-in-ns-1" { + t.Fatalf("invalid pod name, expected a-pod-in-ns-1, got %v", decoded[1].GetName()) + } + if decoded[1].GetNamespace() != "ns-1" { + t.Fatalf("invalid pod namespace, expected ns-1, got %v", decoded[1].GetNamespace()) + } + }) + t.Run("pods_list returns pod in ns-2", func(t *testing.T) { + if decoded[2].GetName() != "a-pod-in-ns-2" { + t.Fatalf("invalid pod name, expected a-pod-in-ns-2, got %v", decoded[2].GetName()) + } + if decoded[2].GetNamespace() != "ns-2" { + t.Fatalf("invalid pod namespace, expected ns-2, got %v", decoded[2].GetNamespace()) + } + }) + t.Run("pods_list omits managed fields", func(t *testing.T) { + if decoded[1].GetManagedFields() != nil { + t.Fatalf("managed fields should be omitted, got %v", decoded[0].GetManagedFields()) + } + }) + }) +} + +func TestPodsListInAllNamespacesUnauthorized(t *testing.T) { + testCase(t, func(c *mcpContext) { + c.withEnvTest() + defer restoreAuth(c.ctx) + client := c.newKubernetesClient() + // Authorize user only for default/configured namespace + r, _ := client.RbacV1().Roles("default").Create(c.ctx, &rbacv1.Role{ + ObjectMeta: metav1.ObjectMeta{Name: "allow-pods-list"}, + Rules: []rbacv1.PolicyRule{{ + Verbs: []string{"get", "list"}, + APIGroups: []string{""}, + Resources: []string{"pods"}, + }}, + }, metav1.CreateOptions{}) + _, _ = client.RbacV1().RoleBindings("default").Create(c.ctx, &rbacv1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{Name: "allow-pods-list"}, + Subjects: []rbacv1.Subject{{Kind: "User", Name: envTestUser.Name}}, + RoleRef: rbacv1.RoleRef{Kind: "Role", Name: r.Name}, + }, metav1.CreateOptions{}) + // Deny cluster by removing cluster rule + _ = client.RbacV1().ClusterRoles().Delete(c.ctx, "allow-all", metav1.DeleteOptions{}) + toolResult, err := c.callTool("pods_list", map[string]interface{}{}) + t.Run("pods_list returns pods list for default namespace only", func(t *testing.T) { + if err != nil { + t.Fatalf("call tool failed %v", err) + return + } + if toolResult.IsError { + t.Fatalf("call tool failed") + return + } + }) + var decoded []unstructured.Unstructured + err = yaml.Unmarshal([]byte(toolResult.Content[0].(mcp.TextContent).Text), &decoded) + t.Run("pods_list has yaml content", func(t *testing.T) { + if err != nil { + t.Fatalf("invalid tool result content %v", err) + return + } + }) + t.Run("pods_list returns 1 items", func(t *testing.T) { + if len(decoded) != 1 { + t.Fatalf("invalid pods count, expected 1, got %v", len(decoded)) + return + } + }) + t.Run("pods_list returns pod in default", func(t *testing.T) { + if decoded[0].GetName() != "a-pod-in-default" { + t.Fatalf("invalid pod name, expected a-pod-in-default, got %v", decoded[0].GetName()) + return + } + if decoded[0].GetNamespace() != "default" { + t.Fatalf("invalid pod namespace, expected default, got %v", decoded[0].GetNamespace()) + return + } + }) + }) +} + +func TestPodsListInNamespace(t *testing.T) { + testCase(t, func(c *mcpContext) { + c.withEnvTest() + t.Run("pods_list_in_namespace with nil namespace returns error", func(t *testing.T) { + toolResult, _ := c.callTool("pods_list_in_namespace", map[string]interface{}{}) + if toolResult.IsError != true { + t.Fatalf("call tool should fail") + return + } + if toolResult.Content[0].(mcp.TextContent).Text != "failed to list pods in namespace, missing argument namespace" { + t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) + return + } + }) + toolResult, err := c.callTool("pods_list_in_namespace", map[string]interface{}{ + "namespace": "ns-1", + }) + t.Run("pods_list_in_namespace returns pods list", func(t *testing.T) { + if err != nil { + t.Fatalf("call tool failed %v", err) + } + if toolResult.IsError { + t.Fatalf("call tool failed") + } + }) + var decoded []unstructured.Unstructured + err = yaml.Unmarshal([]byte(toolResult.Content[0].(mcp.TextContent).Text), &decoded) + t.Run("pods_list_in_namespace has yaml content", func(t *testing.T) { + if err != nil { + t.Fatalf("invalid tool result content %v", err) + } + }) + t.Run("pods_list_in_namespace returns 1 items", func(t *testing.T) { + if len(decoded) != 1 { + t.Fatalf("invalid pods count, expected 1, got %v", len(decoded)) + } + }) + t.Run("pods_list_in_namespace returns pod in ns-1", func(t *testing.T) { + if decoded[0].GetName() != "a-pod-in-ns-1" { + t.Errorf("invalid pod name, expected a-pod-in-ns-1, got %v", decoded[0].GetName()) + } + if decoded[0].GetNamespace() != "ns-1" { + t.Errorf("invalid pod namespace, expected ns-1, got %v", decoded[0].GetNamespace()) + } + }) + t.Run("pods_list_in_namespace omits managed fields", func(t *testing.T) { + if decoded[0].GetManagedFields() != nil { + t.Fatalf("managed fields should be omitted, got %v", decoded[0].GetManagedFields()) + } + }) + }) +} + +func TestPodsListDenied(t *testing.T) { + deniedResourcesServer := &config.StaticConfig{DeniedResources: []config.GroupVersionKind{{Version: "v1", Kind: "Pod"}}} + testCaseWithContext(t, &mcpContext{staticConfig: deniedResourcesServer}, func(c *mcpContext) { + c.withEnvTest() + podsList, _ := c.callTool("pods_list", map[string]interface{}{}) + t.Run("pods_list has error", func(t *testing.T) { + if !podsList.IsError { + t.Fatalf("call tool should fail") + } + }) + t.Run("pods_list describes denial", func(t *testing.T) { + expectedMessage := "failed to list pods in all namespaces: resource not allowed: /v1, Kind=Pod" + if podsList.Content[0].(mcp.TextContent).Text != expectedMessage { + t.Fatalf("expected descriptive error '%s', got %v", expectedMessage, podsList.Content[0].(mcp.TextContent).Text) + } + }) + podsListInNamespace, _ := c.callTool("pods_list_in_namespace", map[string]interface{}{"namespace": "ns-1"}) + t.Run("pods_list_in_namespace has error", func(t *testing.T) { + if !podsListInNamespace.IsError { + t.Fatalf("call tool should fail") + } + }) + t.Run("pods_list_in_namespace describes denial", func(t *testing.T) { + expectedMessage := "failed to list pods in namespace ns-1: resource not allowed: /v1, Kind=Pod" + if podsListInNamespace.Content[0].(mcp.TextContent).Text != expectedMessage { + t.Fatalf("expected descriptive error '%s', got %v", expectedMessage, podsListInNamespace.Content[0].(mcp.TextContent).Text) + } + }) + }) +} + +func TestPodsListAsTable(t *testing.T) { + testCaseWithContext(t, &mcpContext{listOutput: output.Table}, func(c *mcpContext) { + c.withEnvTest() + podsList, err := c.callTool("pods_list", map[string]interface{}{}) + t.Run("pods_list returns pods list", func(t *testing.T) { + if err != nil { + t.Fatalf("call tool failed %v", err) + } + if podsList.IsError { + t.Fatalf("call tool failed") + } + }) + outPodsList := podsList.Content[0].(mcp.TextContent).Text + t.Run("pods_list returns table with 1 header and 3 rows", func(t *testing.T) { + lines := strings.Count(outPodsList, "\n") + if lines != 4 { + t.Fatalf("invalid line count, expected 4 (1 header, 3 row), got %v", lines) + } + }) + t.Run("pods_list_in_namespace returns column headers", func(t *testing.T) { + expectedHeaders := "NAMESPACE\\s+APIVERSION\\s+KIND\\s+NAME\\s+READY\\s+STATUS\\s+RESTARTS\\s+AGE\\s+IP\\s+NODE\\s+NOMINATED NODE\\s+READINESS GATES\\s+LABELS" + if m, e := regexp.MatchString(expectedHeaders, outPodsList); !m || e != nil { + t.Fatalf("Expected headers '%s' not found in output:\n%s", expectedHeaders, outPodsList) + } + }) + t.Run("pods_list_in_namespace returns formatted row for a-pod-in-ns-1", func(t *testing.T) { + expectedRow := "(?ns-1)\\s+" + + "(?v1)\\s+" + + "(?Pod)\\s+" + + "(?a-pod-in-ns-1)\\s+" + + "(?0\\/1)\\s+" + + "(?Pending)\\s+" + + "(?0)\\s+" + + "(?(\\d+m)?(\\d+s)?)\\s+" + + "(?)\\s+" + + "(?)\\s+" + + "(?)\\s+" + + "(?)\\s+" + + "(?)" + if m, e := regexp.MatchString(expectedRow, outPodsList); !m || e != nil { + t.Fatalf("Expected row '%s' not found in output:\n%s", expectedRow, outPodsList) + } + }) + t.Run("pods_list_in_namespace returns formatted row for a-pod-in-default", func(t *testing.T) { + expectedRow := "(?default)\\s+" + + "(?v1)\\s+" + + "(?Pod)\\s+" + + "(?a-pod-in-default)\\s+" + + "(?0\\/1)\\s+" + + "(?Pending)\\s+" + + "(?0)\\s+" + + "(?(\\d+m)?(\\d+s)?)\\s+" + + "(?)\\s+" + + "(?)\\s+" + + "(?)\\s+" + + "(?)\\s+" + + "(?app=nginx)" + if m, e := regexp.MatchString(expectedRow, outPodsList); !m || e != nil { + t.Fatalf("Expected row '%s' not found in output:\n%s", expectedRow, outPodsList) + } + }) + podsListInNamespace, err := c.callTool("pods_list_in_namespace", map[string]interface{}{ + "namespace": "ns-1", + }) + t.Run("pods_list_in_namespace returns pods list", func(t *testing.T) { + if err != nil { + t.Fatalf("call tool failed %v", err) + return + } + if podsListInNamespace.IsError { + t.Fatalf("call tool failed") + } + }) + outPodsListInNamespace := podsListInNamespace.Content[0].(mcp.TextContent).Text + t.Run("pods_list_in_namespace returns table with 1 header and 1 row", func(t *testing.T) { + lines := strings.Count(outPodsListInNamespace, "\n") + if lines != 2 { + t.Fatalf("invalid line count, expected 2 (1 header, 1 row), got %v", lines) + } + }) + t.Run("pods_list_in_namespace returns column headers", func(t *testing.T) { + expectedHeaders := "NAMESPACE\\s+APIVERSION\\s+KIND\\s+NAME\\s+READY\\s+STATUS\\s+RESTARTS\\s+AGE\\s+IP\\s+NODE\\s+NOMINATED NODE\\s+READINESS GATES\\s+LABELS" + if m, e := regexp.MatchString(expectedHeaders, outPodsListInNamespace); !m || e != nil { + t.Fatalf("Expected headers '%s' not found in output:\n%s", expectedHeaders, outPodsListInNamespace) + } + }) + t.Run("pods_list_in_namespace returns formatted row", func(t *testing.T) { + expectedRow := "(?ns-1)\\s+" + + "(?v1)\\s+" + + "(?Pod)\\s+" + + "(?a-pod-in-ns-1)\\s+" + + "(?0\\/1)\\s+" + + "(?Pending)\\s+" + + "(?0)\\s+" + + "(?(\\d+m)?(\\d+s)?)\\s+" + + "(?)\\s+" + + "(?)\\s+" + + "(?)\\s+" + + "(?)\\s+" + + "(?)" + if m, e := regexp.MatchString(expectedRow, outPodsListInNamespace); !m || e != nil { + t.Fatalf("Expected row '%s' not found in output:\n%s", expectedRow, outPodsListInNamespace) + } + }) + }) +} + +func TestPodsGet(t *testing.T) { + testCase(t, func(c *mcpContext) { + c.withEnvTest() + t.Run("pods_get with nil name returns error", func(t *testing.T) { + toolResult, _ := c.callTool("pods_get", map[string]interface{}{}) + if toolResult.IsError != true { + t.Fatalf("call tool should fail") + return + } + if toolResult.Content[0].(mcp.TextContent).Text != "failed to get pod, missing argument name" { + t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) + return + } + }) + t.Run("pods_get with not found name returns error", func(t *testing.T) { + toolResult, _ := c.callTool("pods_get", map[string]interface{}{"name": "not-found"}) + if toolResult.IsError != true { + t.Fatalf("call tool should fail") + return + } + if toolResult.Content[0].(mcp.TextContent).Text != "failed to get pod not-found in namespace : pods \"not-found\" not found" { + t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) + return + } + }) + podsGetNilNamespace, err := c.callTool("pods_get", map[string]interface{}{ + "name": "a-pod-in-default", + }) + t.Run("pods_get with name and nil namespace returns pod", func(t *testing.T) { + if err != nil { + t.Fatalf("call tool failed %v", err) + return + } + if podsGetNilNamespace.IsError { + t.Fatalf("call tool failed") + return + } + }) + var decodedNilNamespace unstructured.Unstructured + err = yaml.Unmarshal([]byte(podsGetNilNamespace.Content[0].(mcp.TextContent).Text), &decodedNilNamespace) + t.Run("pods_get with name and nil namespace has yaml content", func(t *testing.T) { + if err != nil { + t.Fatalf("invalid tool result content %v", err) + return + } + }) + t.Run("pods_get with name and nil namespace returns pod in default", func(t *testing.T) { + if decodedNilNamespace.GetName() != "a-pod-in-default" { + t.Fatalf("invalid pod name, expected a-pod-in-default, got %v", decodedNilNamespace.GetName()) + return + } + if decodedNilNamespace.GetNamespace() != "default" { + t.Fatalf("invalid pod namespace, expected default, got %v", decodedNilNamespace.GetNamespace()) + return + } + }) + t.Run("pods_get with name and nil namespace omits managed fields", func(t *testing.T) { + if decodedNilNamespace.GetManagedFields() != nil { + t.Fatalf("managed fields should be omitted, got %v", decodedNilNamespace.GetManagedFields()) + return + } + }) + podsGetInNamespace, err := c.callTool("pods_get", map[string]interface{}{ + "namespace": "ns-1", + "name": "a-pod-in-ns-1", + }) + t.Run("pods_get with name and namespace returns pod", func(t *testing.T) { + if err != nil { + t.Fatalf("call tool failed %v", err) + return + } + if podsGetInNamespace.IsError { + t.Fatalf("call tool failed") + return + } + }) + var decodedInNamespace unstructured.Unstructured + err = yaml.Unmarshal([]byte(podsGetInNamespace.Content[0].(mcp.TextContent).Text), &decodedInNamespace) + t.Run("pods_get with name and namespace has yaml content", func(t *testing.T) { + if err != nil { + t.Fatalf("invalid tool result content %v", err) + return + } + }) + t.Run("pods_get with name and namespace returns pod in ns-1", func(t *testing.T) { + if decodedInNamespace.GetName() != "a-pod-in-ns-1" { + t.Fatalf("invalid pod name, expected a-pod-in-ns-1, got %v", decodedInNamespace.GetName()) + return + } + if decodedInNamespace.GetNamespace() != "ns-1" { + t.Fatalf("invalid pod namespace, ns-1 ns-1, got %v", decodedInNamespace.GetNamespace()) + return + } + }) + }) +} + +func TestPodsGetDenied(t *testing.T) { + deniedResourcesServer := &config.StaticConfig{DeniedResources: []config.GroupVersionKind{{Version: "v1", Kind: "Pod"}}} + testCaseWithContext(t, &mcpContext{staticConfig: deniedResourcesServer}, func(c *mcpContext) { + c.withEnvTest() + podsGet, _ := c.callTool("pods_get", map[string]interface{}{"name": "a-pod-in-default"}) + t.Run("pods_get has error", func(t *testing.T) { + if !podsGet.IsError { + t.Fatalf("call tool should fail") + } + }) + t.Run("pods_get describes denial", func(t *testing.T) { + expectedMessage := "failed to get pod a-pod-in-default in namespace : resource not allowed: /v1, Kind=Pod" + if podsGet.Content[0].(mcp.TextContent).Text != expectedMessage { + t.Fatalf("expected descriptive error '%s', got %v", expectedMessage, podsGet.Content[0].(mcp.TextContent).Text) + } + }) + }) +} + +func TestPodsDelete(t *testing.T) { + testCase(t, func(c *mcpContext) { + c.withEnvTest() + // Errors + t.Run("pods_delete with nil name returns error", func(t *testing.T) { + toolResult, _ := c.callTool("pods_delete", map[string]interface{}{}) + if toolResult.IsError != true { + t.Errorf("call tool should fail") + return + } + if toolResult.Content[0].(mcp.TextContent).Text != "failed to delete pod, missing argument name" { + t.Errorf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) + return + } + }) + t.Run("pods_delete with not found name returns error", func(t *testing.T) { + toolResult, _ := c.callTool("pods_delete", map[string]interface{}{"name": "not-found"}) + if toolResult.IsError != true { + t.Errorf("call tool should fail") + return + } + if toolResult.Content[0].(mcp.TextContent).Text != "failed to delete pod not-found in namespace : pods \"not-found\" not found" { + t.Errorf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) + return + } + }) + // Default/nil Namespace + kc := c.newKubernetesClient() + _, _ = kc.CoreV1().Pods("default").Create(c.ctx, &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "a-pod-to-delete"}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, + }, metav1.CreateOptions{}) + podsDeleteNilNamespace, err := c.callTool("pods_delete", map[string]interface{}{ + "name": "a-pod-to-delete", + }) + t.Run("pods_delete with name and nil namespace returns success", func(t *testing.T) { + if err != nil { + t.Errorf("call tool failed %v", err) + return + } + if podsDeleteNilNamespace.IsError { + t.Errorf("call tool failed") + return + } + if podsDeleteNilNamespace.Content[0].(mcp.TextContent).Text != "Pod deleted successfully" { + t.Errorf("invalid tool result content, got %v", podsDeleteNilNamespace.Content[0].(mcp.TextContent).Text) + return + } + }) + t.Run("pods_delete with name and nil namespace deletes Pod", func(t *testing.T) { + p, pErr := kc.CoreV1().Pods("default").Get(c.ctx, "a-pod-to-delete", metav1.GetOptions{}) + if pErr == nil && p != nil && p.DeletionTimestamp == nil { + t.Errorf("Pod not deleted") + return + } + }) + // Provided Namespace + _, _ = kc.CoreV1().Pods("ns-1").Create(c.ctx, &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "a-pod-to-delete-in-ns-1"}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, + }, metav1.CreateOptions{}) + podsDeleteInNamespace, err := c.callTool("pods_delete", map[string]interface{}{ + "namespace": "ns-1", + "name": "a-pod-to-delete-in-ns-1", + }) + t.Run("pods_delete with name and namespace returns success", func(t *testing.T) { + if err != nil { + t.Errorf("call tool failed %v", err) + return + } + if podsDeleteInNamespace.IsError { + t.Errorf("call tool failed") + return + } + if podsDeleteInNamespace.Content[0].(mcp.TextContent).Text != "Pod deleted successfully" { + t.Errorf("invalid tool result content, got %v", podsDeleteInNamespace.Content[0].(mcp.TextContent).Text) + return + } + }) + t.Run("pods_delete with name and namespace deletes Pod", func(t *testing.T) { + p, pErr := kc.CoreV1().Pods("ns-1").Get(c.ctx, "a-pod-to-delete-in-ns-1", metav1.GetOptions{}) + if pErr == nil && p != nil && p.DeletionTimestamp == nil { + t.Errorf("Pod not deleted") + return + } + }) + // Managed Pod + managedLabels := map[string]string{ + "app.kubernetes.io/managed-by": "kubernetes-mcp-server", + "app.kubernetes.io/name": "a-manged-pod-to-delete", + } + _, _ = kc.CoreV1().Pods("default").Create(c.ctx, &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "a-managed-pod-to-delete", Labels: managedLabels}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, + }, metav1.CreateOptions{}) + _, _ = kc.CoreV1().Services("default").Create(c.ctx, &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{Name: "a-managed-service-to-delete", Labels: managedLabels}, + Spec: corev1.ServiceSpec{Selector: managedLabels, Ports: []corev1.ServicePort{{Port: 80}}}, + }, metav1.CreateOptions{}) + podsDeleteManaged, err := c.callTool("pods_delete", map[string]interface{}{ + "name": "a-managed-pod-to-delete", + }) + t.Run("pods_delete with managed pod returns success", func(t *testing.T) { + if err != nil { + t.Errorf("call tool failed %v", err) + return + } + if podsDeleteManaged.IsError { + t.Errorf("call tool failed") + return + } + if podsDeleteManaged.Content[0].(mcp.TextContent).Text != "Pod deleted successfully" { + t.Errorf("invalid tool result content, got %v", podsDeleteManaged.Content[0].(mcp.TextContent).Text) + return + } + }) + t.Run("pods_delete with managed pod deletes Pod and Service", func(t *testing.T) { + p, pErr := kc.CoreV1().Pods("default").Get(c.ctx, "a-managed-pod-to-delete", metav1.GetOptions{}) + if pErr == nil && p != nil && p.DeletionTimestamp == nil { + t.Errorf("Pod not deleted") + return + } + s, sErr := kc.CoreV1().Services("default").Get(c.ctx, "a-managed-service-to-delete", metav1.GetOptions{}) + if sErr == nil && s != nil && s.DeletionTimestamp == nil { + t.Errorf("Service not deleted") + return + } + }) + }) +} + +func TestPodsDeleteDenied(t *testing.T) { + deniedResourcesServer := &config.StaticConfig{DeniedResources: []config.GroupVersionKind{{Version: "v1", Kind: "Pod"}}} + testCaseWithContext(t, &mcpContext{staticConfig: deniedResourcesServer}, func(c *mcpContext) { + c.withEnvTest() + podsDelete, _ := c.callTool("pods_delete", map[string]interface{}{"name": "a-pod-in-default"}) + t.Run("pods_delete has error", func(t *testing.T) { + if !podsDelete.IsError { + t.Fatalf("call tool should fail") + } + }) + t.Run("pods_delete describes denial", func(t *testing.T) { + expectedMessage := "failed to delete pod a-pod-in-default in namespace : resource not allowed: /v1, Kind=Pod" + if podsDelete.Content[0].(mcp.TextContent).Text != expectedMessage { + t.Fatalf("expected descriptive error '%s', got %v", expectedMessage, podsDelete.Content[0].(mcp.TextContent).Text) + } + }) + }) +} + +func TestPodsDeleteInOpenShift(t *testing.T) { + testCaseWithContext(t, &mcpContext{before: inOpenShift, after: inOpenShiftClear}, func(c *mcpContext) { + managedLabels := map[string]string{ + "app.kubernetes.io/managed-by": "kubernetes-mcp-server", + "app.kubernetes.io/name": "a-manged-pod-to-delete", + } + kc := c.newKubernetesClient() + _, _ = kc.CoreV1().Pods("default").Create(c.ctx, &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "a-managed-pod-to-delete-in-openshift", Labels: managedLabels}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, + }, metav1.CreateOptions{}) + dynamicClient := dynamic.NewForConfigOrDie(envTestRestConfig) + _, _ = dynamicClient.Resource(schema.GroupVersionResource{Group: "route.openshift.io", Version: "v1", Resource: "routes"}). + Namespace("default").Create(c.ctx, &unstructured.Unstructured{Object: map[string]interface{}{ + "apiVersion": "route.openshift.io/v1", + "kind": "Route", + "metadata": map[string]interface{}{ + "name": "a-managed-route-to-delete", + "labels": managedLabels, + }, + }}, metav1.CreateOptions{}) + podsDeleteManagedOpenShift, err := c.callTool("pods_delete", map[string]interface{}{ + "name": "a-managed-pod-to-delete-in-openshift", + }) + t.Run("pods_delete with managed pod in OpenShift returns success", func(t *testing.T) { + if err != nil { + t.Errorf("call tool failed %v", err) + return + } + if podsDeleteManagedOpenShift.IsError { + t.Errorf("call tool failed") + return + } + if podsDeleteManagedOpenShift.Content[0].(mcp.TextContent).Text != "Pod deleted successfully" { + t.Errorf("invalid tool result content, got %v", podsDeleteManagedOpenShift.Content[0].(mcp.TextContent).Text) + return + } + }) + t.Run("pods_delete with managed pod in OpenShift deletes Pod and Route", func(t *testing.T) { + p, pErr := kc.CoreV1().Pods("default").Get(c.ctx, "a-managed-pod-to-delete-in-openshift", metav1.GetOptions{}) + if pErr == nil && p != nil && p.DeletionTimestamp == nil { + t.Errorf("Pod not deleted") + return + } + r, rErr := dynamicClient. + Resource(schema.GroupVersionResource{Group: "route.openshift.io", Version: "v1", Resource: "routes"}). + Namespace("default").Get(c.ctx, "a-managed-route-to-delete", metav1.GetOptions{}) + if rErr == nil && r != nil && r.GetDeletionTimestamp() == nil { + t.Errorf("Route not deleted") + return + } + }) + }) +} + +func TestPodsLog(t *testing.T) { + testCase(t, func(c *mcpContext) { + c.withEnvTest() + t.Run("pods_log with nil name returns error", func(t *testing.T) { + toolResult, _ := c.callTool("pods_log", map[string]interface{}{}) + if toolResult.IsError != true { + t.Fatalf("call tool should fail") + return + } + if toolResult.Content[0].(mcp.TextContent).Text != "failed to get pod log, missing argument name" { + t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) + return + } + }) + t.Run("pods_log with not found name returns error", func(t *testing.T) { + toolResult, _ := c.callTool("pods_log", map[string]interface{}{"name": "not-found"}) + if toolResult.IsError != true { + t.Fatalf("call tool should fail") + return + } + if toolResult.Content[0].(mcp.TextContent).Text != "failed to get pod not-found log in namespace : pods \"not-found\" not found" { + t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) + return + } + }) + podsLogNilNamespace, err := c.callTool("pods_log", map[string]interface{}{ + "name": "a-pod-in-default", + }) + t.Run("pods_log with name and nil namespace returns pod log", func(t *testing.T) { + if err != nil { + t.Fatalf("call tool failed %v", err) + return + } + if podsLogNilNamespace.IsError { + t.Fatalf("call tool failed") + return + } + }) + podsLogInNamespace, err := c.callTool("pods_log", map[string]interface{}{ + "namespace": "ns-1", + "name": "a-pod-in-ns-1", + }) + t.Run("pods_log with name and namespace returns pod log", func(t *testing.T) { + if err != nil { + t.Fatalf("call tool failed %v", err) + return + } + if podsLogInNamespace.IsError { + t.Fatalf("call tool failed") + return + } + }) + podsContainerLogInNamespace, err := c.callTool("pods_log", map[string]interface{}{ + "namespace": "ns-1", + "name": "a-pod-in-ns-1", + "container": "nginx", + }) + t.Run("pods_log with name, container and namespace returns pod log", func(t *testing.T) { + if err != nil { + t.Fatalf("call tool failed %v", err) + return + } + if podsContainerLogInNamespace.IsError { + t.Fatalf("call tool failed") + return + } + }) + toolResult, err := c.callTool("pods_log", map[string]interface{}{ + "namespace": "ns-1", + "name": "a-pod-in-ns-1", + "container": "a-not-existing-container", + }) + t.Run("pods_log with non existing container returns error", func(t *testing.T) { + if toolResult.IsError != true { + t.Fatalf("call tool should fail") + return + } + if toolResult.Content[0].(mcp.TextContent).Text != "failed to get pod a-pod-in-ns-1 log in namespace ns-1: container a-not-existing-container is not valid for pod a-pod-in-ns-1" { + t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) + return + } + }) + }) +} + +func TestPodsLogDenied(t *testing.T) { + deniedResourcesServer := &config.StaticConfig{DeniedResources: []config.GroupVersionKind{{Version: "v1", Kind: "Pod"}}} + testCaseWithContext(t, &mcpContext{staticConfig: deniedResourcesServer}, func(c *mcpContext) { + c.withEnvTest() + podsLog, _ := c.callTool("pods_log", map[string]interface{}{"name": "a-pod-in-default"}) + t.Run("pods_log has error", func(t *testing.T) { + if !podsLog.IsError { + t.Fatalf("call tool should fail") + } + }) + t.Run("pods_log describes denial", func(t *testing.T) { + expectedMessage := "failed to get pod a-pod-in-default log in namespace : resource not allowed: /v1, Kind=Pod" + if podsLog.Content[0].(mcp.TextContent).Text != expectedMessage { + t.Fatalf("expected descriptive error '%s', got %v", expectedMessage, podsLog.Content[0].(mcp.TextContent).Text) + } + }) + }) +} + +func TestPodsRun(t *testing.T) { + testCase(t, func(c *mcpContext) { + c.withEnvTest() + t.Run("pods_run with nil image returns error", func(t *testing.T) { + toolResult, _ := c.callTool("pods_run", map[string]interface{}{}) + if toolResult.IsError != true { + t.Errorf("call tool should fail") + return + } + if toolResult.Content[0].(mcp.TextContent).Text != "failed to run pod, missing argument image" { + t.Errorf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) + return + } + }) + podsRunNilNamespace, err := c.callTool("pods_run", map[string]interface{}{"image": "nginx"}) + t.Run("pods_run with image and nil namespace runs pod", func(t *testing.T) { + if err != nil { + t.Errorf("call tool failed %v", err) + return + } + if podsRunNilNamespace.IsError { + t.Errorf("call tool failed") + return + } + }) + var decodedNilNamespace []unstructured.Unstructured + err = yaml.Unmarshal([]byte(podsRunNilNamespace.Content[0].(mcp.TextContent).Text), &decodedNilNamespace) + t.Run("pods_run with image and nil namespace has yaml content", func(t *testing.T) { + if err != nil { + t.Errorf("invalid tool result content %v", err) + return + } + }) + t.Run("pods_run with image and nil namespace returns 1 item (Pod)", func(t *testing.T) { + if len(decodedNilNamespace) != 1 { + t.Errorf("invalid pods count, expected 1, got %v", len(decodedNilNamespace)) + return + } + if decodedNilNamespace[0].GetKind() != "Pod" { + t.Errorf("invalid pod kind, expected Pod, got %v", decodedNilNamespace[0].GetKind()) + return + } + }) + t.Run("pods_run with image and nil namespace returns pod in default", func(t *testing.T) { + if decodedNilNamespace[0].GetNamespace() != "default" { + t.Errorf("invalid pod namespace, expected default, got %v", decodedNilNamespace[0].GetNamespace()) + return + } + }) + t.Run("pods_run with image and nil namespace returns pod with random name", func(t *testing.T) { + if !strings.HasPrefix(decodedNilNamespace[0].GetName(), "kubernetes-mcp-server-run-") { + t.Errorf("invalid pod name, expected random, got %v", decodedNilNamespace[0].GetName()) + return + } + }) + t.Run("pods_run with image and nil namespace returns pod with labels", func(t *testing.T) { + labels := decodedNilNamespace[0].Object["metadata"].(map[string]interface{})["labels"].(map[string]interface{}) + if labels["app.kubernetes.io/name"] == "" { + t.Errorf("invalid labels, expected app.kubernetes.io/name, got %v", labels) + return + } + if labels["app.kubernetes.io/component"] == "" { + t.Errorf("invalid labels, expected app.kubernetes.io/component, got %v", labels) + return + } + if labels["app.kubernetes.io/managed-by"] != "kubernetes-mcp-server" { + t.Errorf("invalid labels, expected app.kubernetes.io/managed-by, got %v", labels) + return + } + if labels["app.kubernetes.io/part-of"] != "kubernetes-mcp-server-run-sandbox" { + t.Errorf("invalid labels, expected app.kubernetes.io/part-of, got %v", labels) + return + } + }) + t.Run("pods_run with image and nil namespace returns pod with nginx container", func(t *testing.T) { + containers := decodedNilNamespace[0].Object["spec"].(map[string]interface{})["containers"].([]interface{}) + if containers[0].(map[string]interface{})["image"] != "nginx" { + t.Errorf("invalid container name, expected nginx, got %v", containers[0].(map[string]interface{})["image"]) + return + } + }) + + podsRunNamespaceAndPort, err := c.callTool("pods_run", map[string]interface{}{"image": "nginx", "port": 80}) + t.Run("pods_run with image, namespace, and port runs pod", func(t *testing.T) { + if err != nil { + t.Errorf("call tool failed %v", err) + return + } + if podsRunNamespaceAndPort.IsError { + t.Errorf("call tool failed") + return + } + }) + var decodedNamespaceAndPort []unstructured.Unstructured + err = yaml.Unmarshal([]byte(podsRunNamespaceAndPort.Content[0].(mcp.TextContent).Text), &decodedNamespaceAndPort) + t.Run("pods_run with image, namespace, and port has yaml content", func(t *testing.T) { + if err != nil { + t.Errorf("invalid tool result content %v", err) + return + } + }) + t.Run("pods_run with image, namespace, and port returns 2 items (Pod + Service)", func(t *testing.T) { + if len(decodedNamespaceAndPort) != 2 { + t.Errorf("invalid pods count, expected 2, got %v", len(decodedNamespaceAndPort)) + return + } + if decodedNamespaceAndPort[0].GetKind() != "Pod" { + t.Errorf("invalid pod kind, expected Pod, got %v", decodedNamespaceAndPort[0].GetKind()) + return + } + if decodedNamespaceAndPort[1].GetKind() != "Service" { + t.Errorf("invalid service kind, expected Service, got %v", decodedNamespaceAndPort[1].GetKind()) + return + } + }) + t.Run("pods_run with image, namespace, and port returns pod with port", func(t *testing.T) { + containers := decodedNamespaceAndPort[0].Object["spec"].(map[string]interface{})["containers"].([]interface{}) + ports := containers[0].(map[string]interface{})["ports"].([]interface{}) + if ports[0].(map[string]interface{})["containerPort"] != int64(80) { + t.Errorf("invalid container port, expected 80, got %v", ports[0].(map[string]interface{})["containerPort"]) + return + } + }) + t.Run("pods_run with image, namespace, and port returns service with port and selector", func(t *testing.T) { + ports := decodedNamespaceAndPort[1].Object["spec"].(map[string]interface{})["ports"].([]interface{}) + if ports[0].(map[string]interface{})["port"] != int64(80) { + t.Errorf("invalid service port, expected 80, got %v", ports[0].(map[string]interface{})["port"]) + return + } + if ports[0].(map[string]interface{})["targetPort"] != int64(80) { + t.Errorf("invalid service target port, expected 80, got %v", ports[0].(map[string]interface{})["targetPort"]) + return + } + selector := decodedNamespaceAndPort[1].Object["spec"].(map[string]interface{})["selector"].(map[string]interface{}) + if selector["app.kubernetes.io/name"] == "" { + t.Errorf("invalid service selector, expected app.kubernetes.io/name, got %v", selector) + return + } + if selector["app.kubernetes.io/managed-by"] != "kubernetes-mcp-server" { + t.Errorf("invalid service selector, expected app.kubernetes.io/managed-by, got %v", selector) + return + } + if selector["app.kubernetes.io/part-of"] != "kubernetes-mcp-server-run-sandbox" { + t.Errorf("invalid service selector, expected app.kubernetes.io/part-of, got %v", selector) + return + } + }) + }) +} + +func TestPodsRunDenied(t *testing.T) { + deniedResourcesServer := &config.StaticConfig{DeniedResources: []config.GroupVersionKind{{Version: "v1", Kind: "Pod"}}} + testCaseWithContext(t, &mcpContext{staticConfig: deniedResourcesServer}, func(c *mcpContext) { + c.withEnvTest() + podsRun, _ := c.callTool("pods_run", map[string]interface{}{"image": "nginx"}) + t.Run("pods_run has error", func(t *testing.T) { + if !podsRun.IsError { + t.Fatalf("call tool should fail") + } + }) + t.Run("pods_run describes denial", func(t *testing.T) { + expectedMessage := "failed to run pod in namespace : resource not allowed: /v1, Kind=Pod" + if podsRun.Content[0].(mcp.TextContent).Text != expectedMessage { + t.Fatalf("expected descriptive error '%s', got %v", expectedMessage, podsRun.Content[0].(mcp.TextContent).Text) + } + }) + }) +} + +func TestPodsRunInOpenShift(t *testing.T) { + testCaseWithContext(t, &mcpContext{before: inOpenShift, after: inOpenShiftClear}, func(c *mcpContext) { + t.Run("pods_run with image, namespace, and port returns route with port", func(t *testing.T) { + podsRunInOpenShift, err := c.callTool("pods_run", map[string]interface{}{"image": "nginx", "port": 80}) + if err != nil { + t.Errorf("call tool failed %v", err) + return + } + if podsRunInOpenShift.IsError { + t.Errorf("call tool failed") + return + } + var decodedPodServiceRoute []unstructured.Unstructured + err = yaml.Unmarshal([]byte(podsRunInOpenShift.Content[0].(mcp.TextContent).Text), &decodedPodServiceRoute) + if err != nil { + t.Errorf("invalid tool result content %v", err) + return + } + if len(decodedPodServiceRoute) != 3 { + t.Errorf("invalid pods count, expected 3, got %v", len(decodedPodServiceRoute)) + return + } + if decodedPodServiceRoute[2].GetKind() != "Route" { + t.Errorf("invalid route kind, expected Route, got %v", decodedPodServiceRoute[2].GetKind()) + return + } + targetPort := decodedPodServiceRoute[2].Object["spec"].(map[string]interface{})["port"].(map[string]interface{})["targetPort"].(int64) + if targetPort != 80 { + t.Errorf("invalid route target port, expected 80, got %v", targetPort) + return + } + }) + }) +} + +func TestPodsListWithLabelSelector(t *testing.T) { + testCase(t, func(c *mcpContext) { + c.withEnvTest() + kc := c.newKubernetesClient() + // Create pods with labels + _, _ = kc.CoreV1().Pods("default").Create(c.ctx, &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-with-labels", + Labels: map[string]string{"app": "test", "env": "dev"}, + }, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, + }, metav1.CreateOptions{}) + _, _ = kc.CoreV1().Pods("ns-1").Create(c.ctx, &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "another-pod-with-labels", + Labels: map[string]string{"app": "test", "env": "prod"}, + }, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, + }, metav1.CreateOptions{}) + + // Test pods_list with label selector + t.Run("pods_list with label selector returns filtered pods", func(t *testing.T) { + toolResult, err := c.callTool("pods_list", map[string]interface{}{ + "labelSelector": "app=test", + }) + if err != nil { + t.Fatalf("call tool failed %v", err) + return + } + if toolResult.IsError { + t.Fatalf("call tool failed") + return + } + var decoded []unstructured.Unstructured + err = yaml.Unmarshal([]byte(toolResult.Content[0].(mcp.TextContent).Text), &decoded) + if err != nil { + t.Fatalf("invalid tool result content %v", err) + return + } + if len(decoded) != 2 { + t.Fatalf("invalid pods count, expected 2, got %v", len(decoded)) + return + } + }) + + // Test pods_list_in_namespace with label selector + t.Run("pods_list_in_namespace with label selector returns filtered pods", func(t *testing.T) { + toolResult, err := c.callTool("pods_list_in_namespace", map[string]interface{}{ + "namespace": "ns-1", + "labelSelector": "env=prod", + }) + if err != nil { + t.Fatalf("call tool failed %v", err) + return + } + if toolResult.IsError { + t.Fatalf("call tool failed") + return + } + var decoded []unstructured.Unstructured + err = yaml.Unmarshal([]byte(toolResult.Content[0].(mcp.TextContent).Text), &decoded) + if err != nil { + t.Fatalf("invalid tool result content %v", err) + return + } + if len(decoded) != 1 { + t.Fatalf("invalid pods count, expected 1, got %v", len(decoded)) + return + } + if decoded[0].GetName() != "another-pod-with-labels" { + t.Fatalf("invalid pod name, expected another-pod-with-labels, got %v", decoded[0].GetName()) + return + } + }) + + // Test multiple label selectors + t.Run("pods_list with multiple label selectors returns filtered pods", func(t *testing.T) { + toolResult, err := c.callTool("pods_list", map[string]interface{}{ + "labelSelector": "app=test,env=prod", + }) + if err != nil { + t.Fatalf("call tool failed %v", err) + return + } + if toolResult.IsError { + t.Fatalf("call tool failed") + return + } + var decoded []unstructured.Unstructured + err = yaml.Unmarshal([]byte(toolResult.Content[0].(mcp.TextContent).Text), &decoded) + if err != nil { + t.Fatalf("invalid tool result content %v", err) + return + } + if len(decoded) != 1 { + t.Fatalf("invalid pods count, expected 1, got %v", len(decoded)) + return + } + if decoded[0].GetName() != "another-pod-with-labels" { + t.Fatalf("invalid pod name, expected another-pod-with-labels, got %v", decoded[0].GetName()) + return + } + }) + }) +} diff --git a/pkg 2/mcp/pods_top_test.go b/pkg 2/mcp/pods_top_test.go new file mode 100644 index 00000000..0b63cac8 --- /dev/null +++ b/pkg 2/mcp/pods_top_test.go @@ -0,0 +1,248 @@ +package mcp + +import ( + "net/http" + "regexp" + "testing" + + "github.com/mark3labs/mcp-go/mcp" + + "github.com/containers/kubernetes-mcp-server/pkg/config" +) + +func TestPodsTopMetricsUnavailable(t *testing.T) { + testCase(t, func(c *mcpContext) { + mockServer := NewMockServer() + defer mockServer.Close() + c.withKubeConfig(mockServer.config) + mockServer.Handle(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + w.Header().Set("Content-Type", "application/json") + // Request Performed by DiscoveryClient to Kube API (Get API Groups legacy -core-) + if req.URL.Path == "/api" { + _, _ = w.Write([]byte(`{"kind":"APIVersions","versions":[],"serverAddressByClientCIDRs":[{"clientCIDR":"0.0.0.0/0"}]}`)) + return + } + // Request Performed by DiscoveryClient to Kube API (Get API Groups) + if req.URL.Path == "/apis" { + _, _ = w.Write([]byte(`{"kind":"APIGroupList","apiVersion":"v1","groups":[]}`)) + return + } + })) + podsTopMetricsApiUnavailable, err := c.callTool("pods_top", map[string]interface{}{}) + t.Run("pods_top with metrics API not available", func(t *testing.T) { + if err != nil { + t.Fatalf("call tool failed %v", err) + } + if !podsTopMetricsApiUnavailable.IsError { + t.Errorf("call tool should have returned an error") + } + if podsTopMetricsApiUnavailable.Content[0].(mcp.TextContent).Text != "failed to get pods top: metrics API is not available" { + t.Errorf("call tool returned unexpected content: %s", podsTopMetricsApiUnavailable.Content[0].(mcp.TextContent).Text) + } + }) + }) +} + +func TestPodsTopMetricsAvailable(t *testing.T) { + testCase(t, func(c *mcpContext) { + mockServer := NewMockServer() + defer mockServer.Close() + c.withKubeConfig(mockServer.config) + mockServer.Handle(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + println("Request received:", req.Method, req.URL.Path) // TODO: REMOVE LINE + w.Header().Set("Content-Type", "application/json") + // Request Performed by DiscoveryClient to Kube API (Get API Groups legacy -core-) + if req.URL.Path == "/api" { + _, _ = w.Write([]byte(`{"kind":"APIVersions","versions":["metrics.k8s.io/v1beta1"],"serverAddressByClientCIDRs":[{"clientCIDR":"0.0.0.0/0"}]}`)) + return + } + // Request Performed by DiscoveryClient to Kube API (Get API Groups) + if req.URL.Path == "/apis" { + _, _ = w.Write([]byte(`{"kind":"APIGroupList","apiVersion":"v1","groups":[]}`)) + return + } + // Request Performed by DiscoveryClient to Kube API (Get API Resources) + if req.URL.Path == "/apis/metrics.k8s.io/v1beta1" { + _, _ = w.Write([]byte(`{"kind":"APIResourceList","apiVersion":"v1","groupVersion":"metrics.k8s.io/v1beta1","resources":[{"name":"pods","singularName":"","namespaced":true,"kind":"PodMetrics","verbs":["get","list"]}]}`)) + return + } + // Pod Metrics from all namespaces + if req.URL.Path == "/apis/metrics.k8s.io/v1beta1/pods" { + if req.URL.Query().Get("labelSelector") == "app=pod-ns-5-42" { + _, _ = w.Write([]byte(`{"kind":"PodMetricsList","apiVersion":"metrics.k8s.io/v1beta1","items":[` + + `{"metadata":{"name":"pod-ns-5-42","namespace":"ns-5"},"containers":[{"name":"container-1","usage":{"cpu":"42m","memory":"42Mi"}}]}` + + `]}`)) + } else { + _, _ = w.Write([]byte(`{"kind":"PodMetricsList","apiVersion":"metrics.k8s.io/v1beta1","items":[` + + `{"metadata":{"name":"pod-1","namespace":"default"},"containers":[{"name":"container-1","usage":{"cpu":"100m","memory":"200Mi"}},{"name":"container-2","usage":{"cpu":"200m","memory":"300Mi"}}]},` + + `{"metadata":{"name":"pod-2","namespace":"ns-1"},"containers":[{"name":"container-1-ns-1","usage":{"cpu":"300m","memory":"400Mi"}}]}` + + `]}`)) + + } + return + } + // Pod Metrics from configured namespace + if req.URL.Path == "/apis/metrics.k8s.io/v1beta1/namespaces/default/pods" { + _, _ = w.Write([]byte(`{"kind":"PodMetricsList","apiVersion":"metrics.k8s.io/v1beta1","items":[` + + `{"metadata":{"name":"pod-1","namespace":"default"},"containers":[{"name":"container-1","usage":{"cpu":"10m","memory":"20Mi"}},{"name":"container-2","usage":{"cpu":"30m","memory":"40Mi"}}]}` + + `]}`)) + return + } + // Pod Metrics from ns-5 namespace + if req.URL.Path == "/apis/metrics.k8s.io/v1beta1/namespaces/ns-5/pods" { + _, _ = w.Write([]byte(`{"kind":"PodMetricsList","apiVersion":"metrics.k8s.io/v1beta1","items":[` + + `{"metadata":{"name":"pod-ns-5-1","namespace":"ns-5"},"containers":[{"name":"container-1","usage":{"cpu":"10m","memory":"20Mi"}}]}` + + `]}`)) + return + } + // Pod Metrics from ns-5 namespace with pod-ns-5-5 pod name + if req.URL.Path == "/apis/metrics.k8s.io/v1beta1/namespaces/ns-5/pods/pod-ns-5-5" { + _, _ = w.Write([]byte(`{"kind":"PodMetrics","apiVersion":"metrics.k8s.io/v1beta1",` + + `"metadata":{"name":"pod-ns-5-5","namespace":"ns-5"},` + + `"containers":[{"name":"container-1","usage":{"cpu":"13m","memory":"37Mi"}}]` + + `}`)) + } + })) + podsTopDefaults, err := c.callTool("pods_top", map[string]interface{}{}) + t.Run("pods_top defaults returns pod metrics from all namespaces", func(t *testing.T) { + if err != nil { + t.Fatalf("call tool failed %v", err) + } + textContent := podsTopDefaults.Content[0].(mcp.TextContent).Text + if podsTopDefaults.IsError { + t.Fatalf("call tool failed %s", textContent) + } + expectedHeaders := regexp.MustCompile(`(?m)^\s*NAMESPACE\s+POD\s+NAME\s+CPU\(cores\)\s+MEMORY\(bytes\)\s*$`) + if !expectedHeaders.MatchString(textContent) { + t.Errorf("Expected headers '%s' not found in output:\n%s", expectedHeaders.String(), textContent) + } + expectedRows := []string{ + "default\\s+pod-1\\s+container-1\\s+100m\\s+200Mi", + "default\\s+pod-1\\s+container-2\\s+200m\\s+300Mi", + "ns-1\\s+pod-2\\s+container-1-ns-1\\s+300m\\s+400Mi", + } + for _, row := range expectedRows { + if !regexp.MustCompile(row).MatchString(textContent) { + t.Errorf("Expected row '%s' not found in output:\n%s", row, textContent) + } + } + expectedTotal := regexp.MustCompile(`(?m)^\s+600m\s+900Mi\s*$`) + if !expectedTotal.MatchString(textContent) { + t.Errorf("Expected total row '%s' not found in output:\n%s", expectedTotal.String(), textContent) + } + }) + podsTopConfiguredNamespace, err := c.callTool("pods_top", map[string]interface{}{ + "all_namespaces": false, + }) + t.Run("pods_top[allNamespaces=false] returns pod metrics from configured namespace", func(t *testing.T) { + if err != nil { + t.Fatalf("call tool failed %v", err) + } + textContent := podsTopConfiguredNamespace.Content[0].(mcp.TextContent).Text + expectedRows := []string{ + "default\\s+pod-1\\s+container-1\\s+10m\\s+20Mi", + "default\\s+pod-1\\s+container-2\\s+30m\\s+40Mi", + } + for _, row := range expectedRows { + if !regexp.MustCompile(row).MatchString(textContent) { + t.Errorf("Expected row '%s' not found in output:\n%s", row, textContent) + } + } + expectedTotal := regexp.MustCompile(`(?m)^\s+40m\s+60Mi\s*$`) + if !expectedTotal.MatchString(textContent) { + t.Errorf("Expected total row '%s' not found in output:\n%s", expectedTotal.String(), textContent) + } + }) + podsTopNamespace, err := c.callTool("pods_top", map[string]interface{}{ + "namespace": "ns-5", + }) + t.Run("pods_top[namespace=ns-5] returns pod metrics from provided namespace", func(t *testing.T) { + if err != nil { + t.Fatalf("call tool failed %v", err) + } + textContent := podsTopNamespace.Content[0].(mcp.TextContent).Text + expectedRow := regexp.MustCompile(`ns-5\s+pod-ns-5-1\s+container-1\s+10m\s+20Mi`) + if !expectedRow.MatchString(textContent) { + t.Errorf("Expected row '%s' not found in output:\n%s", expectedRow.String(), textContent) + } + expectedTotal := regexp.MustCompile(`(?m)^\s+10m\s+20Mi\s*$`) + if !expectedTotal.MatchString(textContent) { + t.Errorf("Expected total row '%s' not found in output:\n%s", expectedTotal.String(), textContent) + } + }) + podsTopNamespaceName, err := c.callTool("pods_top", map[string]interface{}{ + "namespace": "ns-5", + "name": "pod-ns-5-5", + }) + t.Run("pods_top[namespace=ns-5,name=pod-ns-5-5] returns pod metrics from provided namespace and name", func(t *testing.T) { + if err != nil { + t.Fatalf("call tool failed %v", err) + } + textContent := podsTopNamespaceName.Content[0].(mcp.TextContent).Text + expectedRow := regexp.MustCompile(`ns-5\s+pod-ns-5-5\s+container-1\s+13m\s+37Mi`) + if !expectedRow.MatchString(textContent) { + t.Errorf("Expected row '%s' not found in output:\n%s", expectedRow.String(), textContent) + } + expectedTotal := regexp.MustCompile(`(?m)^\s+13m\s+37Mi\s*$`) + if !expectedTotal.MatchString(textContent) { + t.Errorf("Expected total row '%s' not found in output:\n%s", expectedTotal.String(), textContent) + } + }) + podsTopNamespaceLabelSelector, err := c.callTool("pods_top", map[string]interface{}{ + "label_selector": "app=pod-ns-5-42", + }) + t.Run("pods_top[label_selector=app=pod-ns-5-42] returns pod metrics from pods matching selector", func(t *testing.T) { + if err != nil { + t.Fatalf("call tool failed %v", err) + } + textContent := podsTopNamespaceLabelSelector.Content[0].(mcp.TextContent).Text + expectedRow := regexp.MustCompile(`ns-5\s+pod-ns-5-42\s+container-1\s+42m\s+42Mi`) + if !expectedRow.MatchString(textContent) { + t.Errorf("Expected row '%s' not found in output:\n%s", expectedRow.String(), textContent) + } + expectedTotal := regexp.MustCompile(`(?m)^\s+42m\s+42Mi\s*$`) + if !expectedTotal.MatchString(textContent) { + t.Errorf("Expected total row '%s' not found in output:\n%s", expectedTotal.String(), textContent) + } + }) + }) +} + +func TestPodsTopDenied(t *testing.T) { + deniedResourcesServer := &config.StaticConfig{DeniedResources: []config.GroupVersionKind{{Group: "metrics.k8s.io", Version: "v1beta1"}}} + testCaseWithContext(t, &mcpContext{staticConfig: deniedResourcesServer}, func(c *mcpContext) { + mockServer := NewMockServer() + defer mockServer.Close() + c.withKubeConfig(mockServer.config) + mockServer.Handle(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + w.Header().Set("Content-Type", "application/json") + // Request Performed by DiscoveryClient to Kube API (Get API Groups legacy -core-) + if req.URL.Path == "/api" { + _, _ = w.Write([]byte(`{"kind":"APIVersions","versions":["metrics.k8s.io/v1beta1"],"serverAddressByClientCIDRs":[{"clientCIDR":"0.0.0.0/0"}]}`)) + return + } + // Request Performed by DiscoveryClient to Kube API (Get API Groups) + if req.URL.Path == "/apis" { + _, _ = w.Write([]byte(`{"kind":"APIGroupList","apiVersion":"v1","groups":[]}`)) + return + } + // Request Performed by DiscoveryClient to Kube API (Get API Resources) + if req.URL.Path == "/apis/metrics.k8s.io/v1beta1" { + _, _ = w.Write([]byte(`{"kind":"APIResourceList","apiVersion":"v1","groupVersion":"metrics.k8s.io/v1beta1","resources":[{"name":"pods","singularName":"","namespaced":true,"kind":"PodMetrics","verbs":["get","list"]}]}`)) + return + } + })) + podsTop, _ := c.callTool("pods_top", map[string]interface{}{}) + t.Run("pods_run has error", func(t *testing.T) { + if !podsTop.IsError { + t.Fatalf("call tool should fail") + } + }) + t.Run("pods_run describes denial", func(t *testing.T) { + expectedMessage := "failed to get pods top: resource not allowed: metrics.k8s.io/v1beta1, Kind=PodMetrics" + if podsTop.Content[0].(mcp.TextContent).Text != expectedMessage { + t.Fatalf("expected descriptive error '%s', got %v", expectedMessage, podsTop.Content[0].(mcp.TextContent).Text) + } + }) + }) +} diff --git a/pkg 2/mcp/profiles.go b/pkg 2/mcp/profiles.go new file mode 100644 index 00000000..6c0d9741 --- /dev/null +++ b/pkg 2/mcp/profiles.go @@ -0,0 +1,54 @@ +package mcp + +import ( + "slices" + + "github.com/mark3labs/mcp-go/server" +) + +type Profile interface { + GetName() string + GetDescription() string + GetTools(s *Server) []server.ServerTool +} + +var Profiles = []Profile{ + &FullProfile{}, +} + +var ProfileNames []string + +func ProfileFromString(name string) Profile { + for _, profile := range Profiles { + if profile.GetName() == name { + return profile + } + } + return nil +} + +type FullProfile struct{} + +func (p *FullProfile) GetName() string { + return "full" +} +func (p *FullProfile) GetDescription() string { + return "Complete profile with all tools and extended outputs" +} +func (p *FullProfile) GetTools(s *Server) []server.ServerTool { + return slices.Concat( + s.initConfiguration(), + s.initEvents(), + s.initNamespaces(), + s.initPods(), + s.initResources(), + s.initHelm(), + ) +} + +func init() { + ProfileNames = make([]string, 0) + for _, profile := range Profiles { + ProfileNames = append(ProfileNames, profile.GetName()) + } +} diff --git a/pkg 2/mcp/profiles_test.go b/pkg 2/mcp/profiles_test.go new file mode 100644 index 00000000..4973595c --- /dev/null +++ b/pkg 2/mcp/profiles_test.go @@ -0,0 +1,88 @@ +package mcp + +import ( + "github.com/mark3labs/mcp-go/mcp" + "slices" + "strings" + "testing" +) + +func TestFullProfileTools(t *testing.T) { + expectedNames := []string{ + "configuration_view", + "events_list", + "helm_install", + "helm_list", + "helm_uninstall", + "namespaces_list", + "pods_list", + "pods_list_in_namespace", + "pods_get", + "pods_delete", + "pods_top", + "pods_log", + "pods_run", + "pods_exec", + "resources_list", + "resources_get", + "resources_create_or_update", + "resources_delete", + } + mcpCtx := &mcpContext{profile: &FullProfile{}} + testCaseWithContext(t, mcpCtx, func(c *mcpContext) { + tools, err := c.mcpClient.ListTools(c.ctx, mcp.ListToolsRequest{}) + t.Run("ListTools returns tools", func(t *testing.T) { + if err != nil { + t.Fatalf("call ListTools failed %v", err) + return + } + }) + nameSet := make(map[string]bool) + for _, tool := range tools.Tools { + nameSet[tool.Name] = true + } + for _, name := range expectedNames { + t.Run("ListTools has "+name+" tool", func(t *testing.T) { + if nameSet[name] != true { + t.Fatalf("tool %s not found", name) + return + } + }) + } + }) +} + +func TestFullProfileToolsInOpenShift(t *testing.T) { + mcpCtx := &mcpContext{ + profile: &FullProfile{}, + before: inOpenShift, + after: inOpenShiftClear, + } + testCaseWithContext(t, mcpCtx, func(c *mcpContext) { + tools, err := c.mcpClient.ListTools(c.ctx, mcp.ListToolsRequest{}) + t.Run("ListTools returns tools", func(t *testing.T) { + if err != nil { + t.Fatalf("call ListTools failed %v", err) + } + }) + t.Run("ListTools contains projects_list tool", func(t *testing.T) { + idx := slices.IndexFunc(tools.Tools, func(tool mcp.Tool) bool { + return tool.Name == "projects_list" + }) + if idx == -1 { + t.Fatalf("tool projects_list not found") + } + }) + t.Run("ListTools has resources_list tool with OpenShift hint", func(t *testing.T) { + idx := slices.IndexFunc(tools.Tools, func(tool mcp.Tool) bool { + return tool.Name == "resources_list" + }) + if idx == -1 { + t.Fatalf("tool resources_list not found") + } + if !strings.Contains(tools.Tools[idx].Description, ", route.openshift.io/v1 Route") { + t.Fatalf("tool resources_list does not have OpenShift hint, got %s", tools.Tools[9].Description) + } + }) + }) +} diff --git a/pkg 2/mcp/resources.go b/pkg 2/mcp/resources.go new file mode 100644 index 00000000..dfb99dd9 --- /dev/null +++ b/pkg 2/mcp/resources.go @@ -0,0 +1,258 @@ +package mcp + +import ( + "context" + "errors" + "fmt" + + "github.com/mark3labs/mcp-go/mcp" + "github.com/mark3labs/mcp-go/server" + "k8s.io/apimachinery/pkg/runtime/schema" + + "github.com/containers/kubernetes-mcp-server/pkg/kubernetes" + "github.com/containers/kubernetes-mcp-server/pkg/output" +) + +func (s *Server) initResources() []server.ServerTool { + commonApiVersion := "v1 Pod, v1 Service, v1 Node, apps/v1 Deployment, networking.k8s.io/v1 Ingress" + if s.k.IsOpenShift(context.Background()) { + commonApiVersion += ", route.openshift.io/v1 Route" + } + commonApiVersion = fmt.Sprintf("(common apiVersion and kind include: %s)", commonApiVersion) + return []server.ServerTool{ + {Tool: mcp.NewTool("resources_list", + mcp.WithDescription("List Kubernetes resources and objects in the current cluster by providing their apiVersion and kind and optionally the namespace and label selector\n"+ + commonApiVersion), + mcp.WithString("apiVersion", + mcp.Description("apiVersion of the resources (examples of valid apiVersion are: v1, apps/v1, networking.k8s.io/v1)"), + mcp.Required(), + ), + mcp.WithString("kind", + mcp.Description("kind of the resources (examples of valid kind are: Pod, Service, Deployment, Ingress)"), + mcp.Required(), + ), + mcp.WithString("namespace", + mcp.Description("Optional Namespace to retrieve the namespaced resources from (ignored in case of cluster scoped resources). If not provided, will list resources from all namespaces")), + mcp.WithString("labelSelector", + mcp.Description("Optional Kubernetes label selector (e.g. 'app=myapp,env=prod' or 'app in (myapp,yourapp)'), use this option when you want to filter the pods by label"), mcp.Pattern("([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]")), + // Tool annotations + mcp.WithTitleAnnotation("Resources: List"), + mcp.WithReadOnlyHintAnnotation(true), + mcp.WithDestructiveHintAnnotation(false), + mcp.WithOpenWorldHintAnnotation(true), + ), Handler: s.resourcesList}, + {Tool: mcp.NewTool("resources_get", + mcp.WithDescription("Get a Kubernetes resource in the current cluster by providing its apiVersion, kind, optionally the namespace, and its name\n"+ + commonApiVersion), + mcp.WithString("apiVersion", + mcp.Description("apiVersion of the resource (examples of valid apiVersion are: v1, apps/v1, networking.k8s.io/v1)"), + mcp.Required(), + ), + mcp.WithString("kind", + mcp.Description("kind of the resource (examples of valid kind are: Pod, Service, Deployment, Ingress)"), + mcp.Required(), + ), + mcp.WithString("namespace", + mcp.Description("Optional Namespace to retrieve the namespaced resource from (ignored in case of cluster scoped resources). If not provided, will get resource from configured namespace"), + ), + mcp.WithString("name", mcp.Description("Name of the resource"), mcp.Required()), + // Tool annotations + mcp.WithTitleAnnotation("Resources: Get"), + mcp.WithReadOnlyHintAnnotation(true), + mcp.WithDestructiveHintAnnotation(false), + mcp.WithOpenWorldHintAnnotation(true), + ), Handler: s.resourcesGet}, + {Tool: mcp.NewTool("resources_create_or_update", + mcp.WithDescription("Create or update a Kubernetes resource in the current cluster by providing a YAML or JSON representation of the resource\n"+ + commonApiVersion), + mcp.WithString("resource", + mcp.Description("A JSON or YAML containing a representation of the Kubernetes resource. Should include top-level fields such as apiVersion,kind,metadata, and spec"), + mcp.Required(), + ), + // Tool annotations + mcp.WithTitleAnnotation("Resources: Create or Update"), + mcp.WithReadOnlyHintAnnotation(false), + mcp.WithDestructiveHintAnnotation(true), + mcp.WithIdempotentHintAnnotation(true), + mcp.WithOpenWorldHintAnnotation(true), + ), Handler: s.resourcesCreateOrUpdate}, + {Tool: mcp.NewTool("resources_delete", + mcp.WithDescription("Delete a Kubernetes resource in the current cluster by providing its apiVersion, kind, optionally the namespace, and its name\n"+ + commonApiVersion), + mcp.WithString("apiVersion", + mcp.Description("apiVersion of the resource (examples of valid apiVersion are: v1, apps/v1, networking.k8s.io/v1)"), + mcp.Required(), + ), + mcp.WithString("kind", + mcp.Description("kind of the resource (examples of valid kind are: Pod, Service, Deployment, Ingress)"), + mcp.Required(), + ), + mcp.WithString("namespace", + mcp.Description("Optional Namespace to delete the namespaced resource from (ignored in case of cluster scoped resources). If not provided, will delete resource from configured namespace"), + ), + mcp.WithString("name", mcp.Description("Name of the resource"), mcp.Required()), + // Tool annotations + mcp.WithTitleAnnotation("Resources: Delete"), + mcp.WithReadOnlyHintAnnotation(false), + mcp.WithDestructiveHintAnnotation(true), + mcp.WithIdempotentHintAnnotation(true), + mcp.WithOpenWorldHintAnnotation(true), + ), Handler: s.resourcesDelete}, + } +} + +func (s *Server) resourcesList(ctx context.Context, ctr mcp.CallToolRequest) (*mcp.CallToolResult, error) { + namespace := ctr.GetArguments()["namespace"] + if namespace == nil { + namespace = "" + } + labelSelector := ctr.GetArguments()["labelSelector"] + resourceListOptions := kubernetes.ResourceListOptions{ + AsTable: s.configuration.ListOutput.AsTable(), + } + + if labelSelector != nil { + l, ok := labelSelector.(string) + if !ok { + return NewTextResult("", fmt.Errorf("labelSelector is not a string")), nil + } + resourceListOptions.LabelSelector = l + } + gvk, err := parseGroupVersionKind(ctr.GetArguments()) + if err != nil { + return NewTextResult("", fmt.Errorf("failed to list resources, %s", err)), nil + } + + ns, ok := namespace.(string) + if !ok { + return NewTextResult("", fmt.Errorf("namespace is not a string")), nil + } + + derived, err := s.k.Derived(ctx) + if err != nil { + return nil, err + } + ret, err := derived.ResourcesList(ctx, gvk, ns, resourceListOptions) + if err != nil { + return NewTextResult("", fmt.Errorf("failed to list resources: %v", err)), nil + } + return NewTextResult(s.configuration.ListOutput.PrintObj(ret)), nil +} + +func (s *Server) resourcesGet(ctx context.Context, ctr mcp.CallToolRequest) (*mcp.CallToolResult, error) { + namespace := ctr.GetArguments()["namespace"] + if namespace == nil { + namespace = "" + } + gvk, err := parseGroupVersionKind(ctr.GetArguments()) + if err != nil { + return NewTextResult("", fmt.Errorf("failed to get resource, %s", err)), nil + } + name := ctr.GetArguments()["name"] + if name == nil { + return NewTextResult("", errors.New("failed to get resource, missing argument name")), nil + } + + ns, ok := namespace.(string) + if !ok { + return NewTextResult("", fmt.Errorf("namespace is not a string")), nil + } + + n, ok := name.(string) + if !ok { + return NewTextResult("", fmt.Errorf("name is not a string")), nil + } + + derived, err := s.k.Derived(ctx) + if err != nil { + return nil, err + } + ret, err := derived.ResourcesGet(ctx, gvk, ns, n) + if err != nil { + return NewTextResult("", fmt.Errorf("failed to get resource: %v", err)), nil + } + return NewTextResult(output.MarshalYaml(ret)), nil +} + +func (s *Server) resourcesCreateOrUpdate(ctx context.Context, ctr mcp.CallToolRequest) (*mcp.CallToolResult, error) { + resource := ctr.GetArguments()["resource"] + if resource == nil || resource == "" { + return NewTextResult("", errors.New("failed to create or update resources, missing argument resource")), nil + } + + r, ok := resource.(string) + if !ok { + return NewTextResult("", fmt.Errorf("resource is not a string")), nil + } + + derived, err := s.k.Derived(ctx) + if err != nil { + return nil, err + } + resources, err := derived.ResourcesCreateOrUpdate(ctx, r) + if err != nil { + return NewTextResult("", fmt.Errorf("failed to create or update resources: %v", err)), nil + } + marshalledYaml, err := output.MarshalYaml(resources) + if err != nil { + err = fmt.Errorf("failed to create or update resources:: %v", err) + } + return NewTextResult("# The following resources (YAML) have been created or updated successfully\n"+marshalledYaml, err), nil +} + +func (s *Server) resourcesDelete(ctx context.Context, ctr mcp.CallToolRequest) (*mcp.CallToolResult, error) { + namespace := ctr.GetArguments()["namespace"] + if namespace == nil { + namespace = "" + } + gvk, err := parseGroupVersionKind(ctr.GetArguments()) + if err != nil { + return NewTextResult("", fmt.Errorf("failed to delete resource, %s", err)), nil + } + name := ctr.GetArguments()["name"] + if name == nil { + return NewTextResult("", errors.New("failed to delete resource, missing argument name")), nil + } + + ns, ok := namespace.(string) + if !ok { + return NewTextResult("", fmt.Errorf("namespace is not a string")), nil + } + + n, ok := name.(string) + if !ok { + return NewTextResult("", fmt.Errorf("name is not a string")), nil + } + + derived, err := s.k.Derived(ctx) + if err != nil { + return nil, err + } + err = derived.ResourcesDelete(ctx, gvk, ns, n) + if err != nil { + return NewTextResult("", fmt.Errorf("failed to delete resource: %v", err)), nil + } + return NewTextResult("Resource deleted successfully", err), nil +} + +func parseGroupVersionKind(arguments map[string]interface{}) (*schema.GroupVersionKind, error) { + apiVersion := arguments["apiVersion"] + if apiVersion == nil { + return nil, errors.New("missing argument apiVersion") + } + kind := arguments["kind"] + if kind == nil { + return nil, errors.New("missing argument kind") + } + + a, ok := apiVersion.(string) + if !ok { + return nil, fmt.Errorf("name is not a string") + } + + gv, err := schema.ParseGroupVersion(a) + if err != nil { + return nil, errors.New("invalid argument apiVersion") + } + return &schema.GroupVersionKind{Group: gv.Group, Version: gv.Version, Kind: kind.(string)}, nil +} diff --git a/pkg 2/mcp/resources_test.go b/pkg 2/mcp/resources_test.go new file mode 100644 index 00000000..ebd44195 --- /dev/null +++ b/pkg 2/mcp/resources_test.go @@ -0,0 +1,791 @@ +package mcp + +import ( + "regexp" + "strings" + "testing" + + "github.com/mark3labs/mcp-go/mcp" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/dynamic" + "sigs.k8s.io/yaml" + + "github.com/containers/kubernetes-mcp-server/pkg/config" + "github.com/containers/kubernetes-mcp-server/pkg/output" +) + +func TestResourcesList(t *testing.T) { + testCase(t, func(c *mcpContext) { + c.withEnvTest() + t.Run("resources_list with missing apiVersion returns error", func(t *testing.T) { + toolResult, _ := c.callTool("resources_list", map[string]interface{}{}) + if !toolResult.IsError { + t.Fatalf("call tool should fail") + } + if toolResult.Content[0].(mcp.TextContent).Text != "failed to list resources, missing argument apiVersion" { + t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) + } + }) + t.Run("resources_list with missing kind returns error", func(t *testing.T) { + toolResult, _ := c.callTool("resources_list", map[string]interface{}{"apiVersion": "v1"}) + if !toolResult.IsError { + t.Fatalf("call tool should fail") + } + if toolResult.Content[0].(mcp.TextContent).Text != "failed to list resources, missing argument kind" { + t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) + } + }) + t.Run("resources_list with invalid apiVersion returns error", func(t *testing.T) { + toolResult, _ := c.callTool("resources_list", map[string]interface{}{"apiVersion": "invalid/api/version", "kind": "Pod"}) + if !toolResult.IsError { + t.Fatalf("call tool should fail") + } + if toolResult.Content[0].(mcp.TextContent).Text != "failed to list resources, invalid argument apiVersion" { + t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) + } + }) + t.Run("resources_list with nonexistent apiVersion returns error", func(t *testing.T) { + toolResult, _ := c.callTool("resources_list", map[string]interface{}{"apiVersion": "custom.non.existent.example.com/v1", "kind": "Custom"}) + if !toolResult.IsError { + t.Fatalf("call tool should fail") + } + if toolResult.Content[0].(mcp.TextContent).Text != `failed to list resources: no matches for kind "Custom" in version "custom.non.existent.example.com/v1"` { + t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) + } + }) + namespaces, err := c.callTool("resources_list", map[string]interface{}{"apiVersion": "v1", "kind": "Namespace"}) + t.Run("resources_list returns namespaces", func(t *testing.T) { + if err != nil { + t.Fatalf("call tool failed %v", err) + return + } + if namespaces.IsError { + t.Fatalf("call tool failed") + return + } + }) + var decodedNamespaces []unstructured.Unstructured + err = yaml.Unmarshal([]byte(namespaces.Content[0].(mcp.TextContent).Text), &decodedNamespaces) + t.Run("resources_list has yaml content", func(t *testing.T) { + if err != nil { + t.Fatalf("invalid tool result content %v", err) + } + }) + t.Run("resources_list returns more than 2 items", func(t *testing.T) { + if len(decodedNamespaces) < 3 { + t.Fatalf("invalid namespace count, expected >2, got %v", len(decodedNamespaces)) + } + }) + + // Test label selector functionality + t.Run("resources_list with label selector returns filtered pods", func(t *testing.T) { + + // List pods with label selector + result, err := c.callTool("resources_list", map[string]interface{}{ + "apiVersion": "v1", + "kind": "Pod", + "namespace": "default", + "labelSelector": "app=nginx", + }) + + if err != nil { + t.Fatalf("call tool failed %v", err) + return + } + if result.IsError { + t.Fatalf("call tool failed") + return + } + + var decodedPods []unstructured.Unstructured + err = yaml.Unmarshal([]byte(result.Content[0].(mcp.TextContent).Text), &decodedPods) + if err != nil { + t.Fatalf("invalid tool result content %v", err) + return + } + + // Verify only the pod with matching label is returned + if len(decodedPods) != 1 { + t.Fatalf("expected 1 pod, got %d", len(decodedPods)) + return + } + + if decodedPods[0].GetName() != "a-pod-in-default" { + t.Fatalf("expected pod-with-label, got %s", decodedPods[0].GetName()) + return + } + + // Test that multiple label selectors work + result, err = c.callTool("resources_list", map[string]interface{}{ + "apiVersion": "v1", + "kind": "Pod", + "namespace": "default", + "labelSelector": "test-label=test-value,another=value", + }) + + if err != nil { + t.Fatalf("call tool failed %v", err) + return + } + if result.IsError { + t.Fatalf("call tool failed") + return + } + + err = yaml.Unmarshal([]byte(result.Content[0].(mcp.TextContent).Text), &decodedPods) + if err != nil { + t.Fatalf("invalid tool result content %v", err) + return + } + + // Verify no pods match multiple label selector + if len(decodedPods) != 0 { + t.Fatalf("expected 0 pods, got %d", len(decodedPods)) + return + } + }) + }) +} + +func TestResourcesListDenied(t *testing.T) { + deniedResourcesServer := &config.StaticConfig{ + DeniedResources: []config.GroupVersionKind{ + {Version: "v1", Kind: "Secret"}, + {Group: "rbac.authorization.k8s.io", Version: "v1"}, + }, + } + testCaseWithContext(t, &mcpContext{staticConfig: deniedResourcesServer}, func(c *mcpContext) { + c.withEnvTest() + deniedByKind, _ := c.callTool("resources_list", map[string]interface{}{"apiVersion": "v1", "kind": "Secret"}) + t.Run("resources_list (denied by kind) has error", func(t *testing.T) { + if !deniedByKind.IsError { + t.Fatalf("call tool should fail") + } + }) + t.Run("resources_list (denied by kind) describes denial", func(t *testing.T) { + expectedMessage := "failed to list resources: resource not allowed: /v1, Kind=Secret" + if deniedByKind.Content[0].(mcp.TextContent).Text != expectedMessage { + t.Fatalf("expected descriptive error '%s', got %v", expectedMessage, deniedByKind.Content[0].(mcp.TextContent).Text) + } + }) + deniedByGroup, _ := c.callTool("resources_list", map[string]interface{}{"apiVersion": "rbac.authorization.k8s.io/v1", "kind": "Role"}) + t.Run("resources_list (denied by group) has error", func(t *testing.T) { + if !deniedByGroup.IsError { + t.Fatalf("call tool should fail") + } + }) + t.Run("resources_list (denied by group) describes denial", func(t *testing.T) { + expectedMessage := "failed to list resources: resource not allowed: rbac.authorization.k8s.io/v1, Kind=Role" + if deniedByGroup.Content[0].(mcp.TextContent).Text != expectedMessage { + t.Fatalf("expected descriptive error '%s', got %v", expectedMessage, deniedByKind.Content[0].(mcp.TextContent).Text) + } + }) + allowedResource, _ := c.callTool("resources_list", map[string]interface{}{"apiVersion": "v1", "kind": "Namespace"}) + t.Run("resources_list (not denied) returns list", func(t *testing.T) { + if allowedResource.IsError { + t.Fatalf("call tool should not fail") + } + }) + }) +} + +func TestResourcesListAsTable(t *testing.T) { + testCaseWithContext(t, &mcpContext{listOutput: output.Table, before: inOpenShift, after: inOpenShiftClear}, func(c *mcpContext) { + c.withEnvTest() + kc := c.newKubernetesClient() + _, _ = kc.CoreV1().ConfigMaps("default").Create(t.Context(), &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "a-configmap-to-list-as-table", Labels: map[string]string{"resource": "config-map"}}, + Data: map[string]string{"key": "value"}, + }, metav1.CreateOptions{}) + configMapList, err := c.callTool("resources_list", map[string]interface{}{"apiVersion": "v1", "kind": "ConfigMap"}) + t.Run("resources_list returns ConfigMap list", func(t *testing.T) { + if err != nil { + t.Fatalf("call tool failed %v", err) + } + if configMapList.IsError { + t.Fatalf("call tool failed") + } + }) + outConfigMapList := configMapList.Content[0].(mcp.TextContent).Text + t.Run("resources_list returns column headers for ConfigMap list", func(t *testing.T) { + expectedHeaders := "NAMESPACE\\s+APIVERSION\\s+KIND\\s+NAME\\s+DATA\\s+AGE\\s+LABELS" + if m, e := regexp.MatchString(expectedHeaders, outConfigMapList); !m || e != nil { + t.Fatalf("Expected headers '%s' not found in output:\n%s", expectedHeaders, outConfigMapList) + } + }) + t.Run("resources_list returns formatted row for a-configmap-to-list-as-table", func(t *testing.T) { + expectedRow := "(?default)\\s+" + + "(?v1)\\s+" + + "(?ConfigMap)\\s+" + + "(?a-configmap-to-list-as-table)\\s+" + + "(?1)\\s+" + + "(?(\\d+m)?(\\d+s)?)\\s+" + + "(?resource=config-map)" + if m, e := regexp.MatchString(expectedRow, outConfigMapList); !m || e != nil { + t.Fatalf("Expected row '%s' not found in output:\n%s", expectedRow, outConfigMapList) + } + }) + // Custom Resource List + _, _ = dynamic.NewForConfigOrDie(envTestRestConfig). + Resource(schema.GroupVersionResource{Group: "route.openshift.io", Version: "v1", Resource: "routes"}). + Namespace("default"). + Create(c.ctx, &unstructured.Unstructured{Object: map[string]interface{}{ + "apiVersion": "route.openshift.io/v1", + "kind": "Route", + "metadata": map[string]interface{}{ + "name": "an-openshift-route-to-list-as-table", + }, + }}, metav1.CreateOptions{}) + routeList, err := c.callTool("resources_list", map[string]interface{}{"apiVersion": "route.openshift.io/v1", "kind": "Route"}) + t.Run("resources_list returns Route list", func(t *testing.T) { + if err != nil { + t.Fatalf("call tool failed %v", err) + } + if routeList.IsError { + t.Fatalf("call tool failed") + } + }) + outRouteList := routeList.Content[0].(mcp.TextContent).Text + t.Run("resources_list returns column headers for Route list", func(t *testing.T) { + expectedHeaders := "NAMESPACE\\s+APIVERSION\\s+KIND\\s+NAME\\s+AGE\\s+LABELS" + if m, e := regexp.MatchString(expectedHeaders, outRouteList); !m || e != nil { + t.Fatalf("Expected headers '%s' not found in output:\n%s", expectedHeaders, outRouteList) + } + }) + t.Run("resources_list returns formatted row for an-openshift-route-to-list-as-table", func(t *testing.T) { + expectedRow := "(?default)\\s+" + + "(?route.openshift.io/v1)\\s+" + + "(?Route)\\s+" + + "(?an-openshift-route-to-list-as-table)\\s+" + + "(?(\\d+m)?(\\d+s)?)\\s+" + + "(?)" + if m, e := regexp.MatchString(expectedRow, outRouteList); !m || e != nil { + t.Fatalf("Expected row '%s' not found in output:\n%s", expectedRow, outRouteList) + } + }) + }) +} + +func TestResourcesGet(t *testing.T) { + testCase(t, func(c *mcpContext) { + c.withEnvTest() + t.Run("resources_get with missing apiVersion returns error", func(t *testing.T) { + toolResult, _ := c.callTool("resources_get", map[string]interface{}{}) + if !toolResult.IsError { + t.Fatalf("call tool should fail") + return + } + if toolResult.Content[0].(mcp.TextContent).Text != "failed to get resource, missing argument apiVersion" { + t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) + return + } + }) + t.Run("resources_get with missing kind returns error", func(t *testing.T) { + toolResult, _ := c.callTool("resources_get", map[string]interface{}{"apiVersion": "v1"}) + if !toolResult.IsError { + t.Fatalf("call tool should fail") + return + } + if toolResult.Content[0].(mcp.TextContent).Text != "failed to get resource, missing argument kind" { + t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) + return + } + }) + t.Run("resources_get with invalid apiVersion returns error", func(t *testing.T) { + toolResult, _ := c.callTool("resources_get", map[string]interface{}{"apiVersion": "invalid/api/version", "kind": "Pod", "name": "a-pod"}) + if !toolResult.IsError { + t.Fatalf("call tool should fail") + return + } + if toolResult.Content[0].(mcp.TextContent).Text != "failed to get resource, invalid argument apiVersion" { + t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) + return + } + }) + t.Run("resources_get with nonexistent apiVersion returns error", func(t *testing.T) { + toolResult, _ := c.callTool("resources_get", map[string]interface{}{"apiVersion": "custom.non.existent.example.com/v1", "kind": "Custom", "name": "a-custom"}) + if !toolResult.IsError { + t.Fatalf("call tool should fail") + return + } + if toolResult.Content[0].(mcp.TextContent).Text != `failed to get resource: no matches for kind "Custom" in version "custom.non.existent.example.com/v1"` { + t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) + return + } + }) + t.Run("resources_get with missing name returns error", func(t *testing.T) { + toolResult, _ := c.callTool("resources_get", map[string]interface{}{"apiVersion": "v1", "kind": "Namespace"}) + if !toolResult.IsError { + t.Fatalf("call tool should fail") + return + } + if toolResult.Content[0].(mcp.TextContent).Text != "failed to get resource, missing argument name" { + t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) + return + } + }) + namespace, err := c.callTool("resources_get", map[string]interface{}{"apiVersion": "v1", "kind": "Namespace", "name": "default"}) + t.Run("resources_get returns namespace", func(t *testing.T) { + if err != nil { + t.Fatalf("call tool failed %v", err) + return + } + if namespace.IsError { + t.Fatalf("call tool failed") + return + } + }) + var decodedNamespace unstructured.Unstructured + err = yaml.Unmarshal([]byte(namespace.Content[0].(mcp.TextContent).Text), &decodedNamespace) + t.Run("resources_get has yaml content", func(t *testing.T) { + if err != nil { + t.Fatalf("invalid tool result content %v", err) + return + } + }) + t.Run("resources_get returns default namespace", func(t *testing.T) { + if decodedNamespace.GetName() != "default" { + t.Fatalf("invalid namespace name, expected default, got %v", decodedNamespace.GetName()) + return + } + }) + }) +} + +func TestResourcesGetDenied(t *testing.T) { + deniedResourcesServer := &config.StaticConfig{ + DeniedResources: []config.GroupVersionKind{ + {Version: "v1", Kind: "Secret"}, + {Group: "rbac.authorization.k8s.io", Version: "v1"}, + }, + } + testCaseWithContext(t, &mcpContext{staticConfig: deniedResourcesServer}, func(c *mcpContext) { + c.withEnvTest() + kc := c.newKubernetesClient() + _, _ = kc.CoreV1().Secrets("default").Create(c.ctx, &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: "denied-secret"}, + }, metav1.CreateOptions{}) + _, _ = kc.RbacV1().Roles("default").Create(c.ctx, &v1.Role{ + ObjectMeta: metav1.ObjectMeta{Name: "denied-role"}, + }, metav1.CreateOptions{}) + deniedByKind, _ := c.callTool("resources_get", map[string]interface{}{"apiVersion": "v1", "kind": "Secret", "namespace": "default", "name": "denied-secret"}) + t.Run("resources_get (denied by kind) has error", func(t *testing.T) { + if !deniedByKind.IsError { + t.Fatalf("call tool should fail") + } + }) + t.Run("resources_get (denied by kind) describes denial", func(t *testing.T) { + expectedMessage := "failed to get resource: resource not allowed: /v1, Kind=Secret" + if deniedByKind.Content[0].(mcp.TextContent).Text != expectedMessage { + t.Fatalf("expected descriptive error '%s', got %v", expectedMessage, deniedByKind.Content[0].(mcp.TextContent).Text) + } + }) + deniedByGroup, _ := c.callTool("resources_get", map[string]interface{}{"apiVersion": "rbac.authorization.k8s.io/v1", "kind": "Role", "namespace": "default", "name": "denied-role"}) + t.Run("resources_get (denied by group) has error", func(t *testing.T) { + if !deniedByGroup.IsError { + t.Fatalf("call tool should fail") + } + }) + t.Run("resources_get (denied by group) describes denial", func(t *testing.T) { + expectedMessage := "failed to get resource: resource not allowed: rbac.authorization.k8s.io/v1, Kind=Role" + if deniedByGroup.Content[0].(mcp.TextContent).Text != expectedMessage { + t.Fatalf("expected descriptive error '%s', got %v", expectedMessage, deniedByKind.Content[0].(mcp.TextContent).Text) + } + }) + allowedResource, _ := c.callTool("resources_get", map[string]interface{}{"apiVersion": "v1", "kind": "Namespace", "name": "default"}) + t.Run("resources_get (not denied) returns resource", func(t *testing.T) { + if allowedResource.IsError { + t.Fatalf("call tool should not fail") + } + }) + }) +} + +func TestResourcesCreateOrUpdate(t *testing.T) { + testCase(t, func(c *mcpContext) { + c.withEnvTest() + t.Run("resources_create_or_update with nil resource returns error", func(t *testing.T) { + toolResult, _ := c.callTool("resources_create_or_update", map[string]interface{}{}) + if toolResult.IsError != true { + t.Fatalf("call tool should fail") + return + } + if toolResult.Content[0].(mcp.TextContent).Text != "failed to create or update resources, missing argument resource" { + t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) + return + } + }) + t.Run("resources_create_or_update with empty resource returns error", func(t *testing.T) { + toolResult, _ := c.callTool("resources_create_or_update", map[string]interface{}{"resource": ""}) + if toolResult.IsError != true { + t.Fatalf("call tool should fail") + return + } + if toolResult.Content[0].(mcp.TextContent).Text != "failed to create or update resources, missing argument resource" { + t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) + return + } + }) + client := c.newKubernetesClient() + configMapYaml := "apiVersion: v1\nkind: ConfigMap\nmetadata:\n name: a-cm-created-or-updated\n namespace: default\n" + resourcesCreateOrUpdateCm1, err := c.callTool("resources_create_or_update", map[string]interface{}{"resource": configMapYaml}) + t.Run("resources_create_or_update with valid namespaced yaml resource returns success", func(t *testing.T) { + if err != nil { + t.Fatalf("call tool failed %v", err) + return + } + if resourcesCreateOrUpdateCm1.IsError { + t.Errorf("call tool failed") + return + } + }) + var decodedCreateOrUpdateCm1 []unstructured.Unstructured + err = yaml.Unmarshal([]byte(resourcesCreateOrUpdateCm1.Content[0].(mcp.TextContent).Text), &decodedCreateOrUpdateCm1) + t.Run("resources_create_or_update with valid namespaced yaml resource returns yaml content", func(t *testing.T) { + if err != nil { + t.Errorf("invalid tool result content %v", err) + return + } + if !strings.HasPrefix(resourcesCreateOrUpdateCm1.Content[0].(mcp.TextContent).Text, "# The following resources (YAML) have been created or updated successfully") { + t.Errorf("Excpected success message, got %v", resourcesCreateOrUpdateCm1.Content[0].(mcp.TextContent).Text) + return + } + if len(decodedCreateOrUpdateCm1) != 1 { + t.Errorf("invalid resource count, expected 1, got %v", len(decodedCreateOrUpdateCm1)) + return + } + if decodedCreateOrUpdateCm1[0].GetName() != "a-cm-created-or-updated" { + t.Errorf("invalid resource name, expected a-cm-created-or-updated, got %v", decodedCreateOrUpdateCm1[0].GetName()) + return + } + if decodedCreateOrUpdateCm1[0].GetUID() == "" { + t.Errorf("invalid uid, got %v", decodedCreateOrUpdateCm1[0].GetUID()) + return + } + }) + t.Run("resources_create_or_update with valid namespaced yaml resource creates ConfigMap", func(t *testing.T) { + cm, _ := client.CoreV1().ConfigMaps("default").Get(c.ctx, "a-cm-created-or-updated", metav1.GetOptions{}) + if cm == nil { + t.Fatalf("ConfigMap not found") + return + } + }) + configMapJson := "{\"apiVersion\": \"v1\", \"kind\": \"ConfigMap\", \"metadata\": {\"name\": \"a-cm-created-or-updated-2\", \"namespace\": \"default\"}}" + resourcesCreateOrUpdateCm2, err := c.callTool("resources_create_or_update", map[string]interface{}{"resource": configMapJson}) + t.Run("resources_create_or_update with valid namespaced json resource returns success", func(t *testing.T) { + if err != nil { + t.Fatalf("call tool failed %v", err) + return + } + if resourcesCreateOrUpdateCm2.IsError { + t.Fatalf("call tool failed") + return + } + }) + t.Run("resources_create_or_update with valid namespaced json resource creates config map", func(t *testing.T) { + cm, _ := client.CoreV1().ConfigMaps("default").Get(c.ctx, "a-cm-created-or-updated-2", metav1.GetOptions{}) + if cm == nil { + t.Fatalf("ConfigMap not found") + return + } + }) + customResourceDefinitionJson := ` + { + "apiVersion": "apiextensions.k8s.io/v1", + "kind": "CustomResourceDefinition", + "metadata": {"name": "customs.example.com"}, + "spec": { + "group": "example.com", + "versions": [{ + "name": "v1","served": true,"storage": true, + "schema": {"openAPIV3Schema": {"type": "object"}} + }], + "scope": "Namespaced", + "names": {"plural": "customs","singular": "custom","kind": "Custom"} + } + }` + resourcesCreateOrUpdateCrd, err := c.callTool("resources_create_or_update", map[string]interface{}{"resource": customResourceDefinitionJson}) + t.Run("resources_create_or_update with valid cluster-scoped json resource returns success", func(t *testing.T) { + if err != nil { + t.Fatalf("call tool failed %v", err) + return + } + if resourcesCreateOrUpdateCrd.IsError { + t.Fatalf("call tool failed") + return + } + }) + t.Run("resources_create_or_update with valid cluster-scoped json resource creates custom resource definition", func(t *testing.T) { + apiExtensionsV1Client := c.newApiExtensionsClient() + _, err = apiExtensionsV1Client.CustomResourceDefinitions().Get(c.ctx, "customs.example.com", metav1.GetOptions{}) + if err != nil { + t.Fatalf("custom resource definition not found") + return + } + }) + c.crdWaitUntilReady("customs.example.com") + customJson := "{\"apiVersion\": \"example.com/v1\", \"kind\": \"Custom\", \"metadata\": {\"name\": \"a-custom-resource\"}}" + resourcesCreateOrUpdateCustom, err := c.callTool("resources_create_or_update", map[string]interface{}{"resource": customJson}) + t.Run("resources_create_or_update with valid namespaced json resource returns success", func(t *testing.T) { + if err != nil { + t.Fatalf("call tool failed %v", err) + return + } + if resourcesCreateOrUpdateCustom.IsError { + t.Fatalf("call tool failed, got: %v", resourcesCreateOrUpdateCustom.Content) + return + } + }) + t.Run("resources_create_or_update with valid namespaced json resource creates custom resource", func(t *testing.T) { + dynamicClient := dynamic.NewForConfigOrDie(envTestRestConfig) + _, err = dynamicClient. + Resource(schema.GroupVersionResource{Group: "example.com", Version: "v1", Resource: "customs"}). + Namespace("default"). + Get(c.ctx, "a-custom-resource", metav1.GetOptions{}) + if err != nil { + t.Fatalf("custom resource not found") + return + } + }) + customJsonUpdated := "{\"apiVersion\": \"example.com/v1\", \"kind\": \"Custom\", \"metadata\": {\"name\": \"a-custom-resource\",\"annotations\": {\"updated\": \"true\"}}}" + resourcesCreateOrUpdateCustomUpdated, err := c.callTool("resources_create_or_update", map[string]interface{}{"resource": customJsonUpdated}) + t.Run("resources_create_or_update with valid namespaced json resource updates custom resource", func(t *testing.T) { + if err != nil { + t.Fatalf("call tool failed %v", err) + return + } + if resourcesCreateOrUpdateCustomUpdated.IsError { + t.Fatalf("call tool failed") + return + } + }) + t.Run("resources_create_or_update with valid namespaced json resource updates custom resource", func(t *testing.T) { + dynamicClient := dynamic.NewForConfigOrDie(envTestRestConfig) + customResource, _ := dynamicClient. + Resource(schema.GroupVersionResource{Group: "example.com", Version: "v1", Resource: "customs"}). + Namespace("default"). + Get(c.ctx, "a-custom-resource", metav1.GetOptions{}) + if customResource == nil { + t.Fatalf("custom resource not found") + return + } + annotations := customResource.GetAnnotations() + if annotations == nil || annotations["updated"] != "true" { + t.Fatalf("custom resource not updated") + return + } + }) + }) +} + +func TestResourcesCreateOrUpdateDenied(t *testing.T) { + deniedResourcesServer := &config.StaticConfig{ + DeniedResources: []config.GroupVersionKind{ + {Version: "v1", Kind: "Secret"}, + {Group: "rbac.authorization.k8s.io", Version: "v1"}, + }, + } + testCaseWithContext(t, &mcpContext{staticConfig: deniedResourcesServer}, func(c *mcpContext) { + c.withEnvTest() + secretYaml := "apiVersion: v1\nkind: Secret\nmetadata:\n name: a-denied-secret\n namespace: default\n" + deniedByKind, _ := c.callTool("resources_create_or_update", map[string]interface{}{"resource": secretYaml}) + t.Run("resources_create_or_update (denied by kind) has error", func(t *testing.T) { + if !deniedByKind.IsError { + t.Fatalf("call tool should fail") + } + }) + t.Run("resources_create_or_update (denied by kind) describes denial", func(t *testing.T) { + expectedMessage := "failed to create or update resources: resource not allowed: /v1, Kind=Secret" + if deniedByKind.Content[0].(mcp.TextContent).Text != expectedMessage { + t.Fatalf("expected descriptive error '%s', got %v", expectedMessage, deniedByKind.Content[0].(mcp.TextContent).Text) + } + }) + roleYaml := "apiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n name: a-denied-role\n namespace: default\n" + deniedByGroup, _ := c.callTool("resources_create_or_update", map[string]interface{}{"resource": roleYaml}) + t.Run("resources_create_or_update (denied by group) has error", func(t *testing.T) { + if !deniedByGroup.IsError { + t.Fatalf("call tool should fail") + } + }) + t.Run("resources_create_or_update (denied by group) describes denial", func(t *testing.T) { + expectedMessage := "failed to create or update resources: resource not allowed: rbac.authorization.k8s.io/v1, Kind=Role" + if deniedByGroup.Content[0].(mcp.TextContent).Text != expectedMessage { + t.Fatalf("expected descriptive error '%s', got %v", expectedMessage, deniedByKind.Content[0].(mcp.TextContent).Text) + } + }) + configMapYaml := "apiVersion: v1\nkind: ConfigMap\nmetadata:\n name: a-cm-created-or-updated\n namespace: default\n" + allowedResource, _ := c.callTool("resources_create_or_update", map[string]interface{}{"resource": configMapYaml}) + t.Run("resources_create_or_update (not denied) creates or updates resource", func(t *testing.T) { + if allowedResource.IsError { + t.Fatalf("call tool should not fail") + } + }) + }) +} + +func TestResourcesDelete(t *testing.T) { + testCase(t, func(c *mcpContext) { + c.withEnvTest() + t.Run("resources_delete with missing apiVersion returns error", func(t *testing.T) { + toolResult, _ := c.callTool("resources_delete", map[string]interface{}{}) + if !toolResult.IsError { + t.Fatalf("call tool should fail") + return + } + if toolResult.Content[0].(mcp.TextContent).Text != "failed to delete resource, missing argument apiVersion" { + t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) + return + } + }) + t.Run("resources_delete with missing kind returns error", func(t *testing.T) { + toolResult, _ := c.callTool("resources_delete", map[string]interface{}{"apiVersion": "v1"}) + if !toolResult.IsError { + t.Fatalf("call tool should fail") + return + } + if toolResult.Content[0].(mcp.TextContent).Text != "failed to delete resource, missing argument kind" { + t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) + return + } + }) + t.Run("resources_delete with invalid apiVersion returns error", func(t *testing.T) { + toolResult, _ := c.callTool("resources_delete", map[string]interface{}{"apiVersion": "invalid/api/version", "kind": "Pod", "name": "a-pod"}) + if !toolResult.IsError { + t.Fatalf("call tool should fail") + return + } + if toolResult.Content[0].(mcp.TextContent).Text != "failed to delete resource, invalid argument apiVersion" { + t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) + return + } + }) + t.Run("resources_delete with nonexistent apiVersion returns error", func(t *testing.T) { + toolResult, _ := c.callTool("resources_delete", map[string]interface{}{"apiVersion": "custom.non.existent.example.com/v1", "kind": "Custom", "name": "a-custom"}) + if !toolResult.IsError { + t.Fatalf("call tool should fail") + return + } + if toolResult.Content[0].(mcp.TextContent).Text != `failed to delete resource: no matches for kind "Custom" in version "custom.non.existent.example.com/v1"` { + t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) + return + } + }) + t.Run("resources_delete with missing name returns error", func(t *testing.T) { + toolResult, _ := c.callTool("resources_delete", map[string]interface{}{"apiVersion": "v1", "kind": "Namespace"}) + if !toolResult.IsError { + t.Fatalf("call tool should fail") + return + } + if toolResult.Content[0].(mcp.TextContent).Text != "failed to delete resource, missing argument name" { + t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) + return + } + }) + t.Run("resources_delete with nonexistent resource returns error", func(t *testing.T) { + toolResult, _ := c.callTool("resources_delete", map[string]interface{}{"apiVersion": "v1", "kind": "ConfigMap", "name": "nonexistent-configmap"}) + if !toolResult.IsError { + t.Fatalf("call tool should fail") + return + } + if toolResult.Content[0].(mcp.TextContent).Text != `failed to delete resource: configmaps "nonexistent-configmap" not found` { + t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) + return + } + }) + resourcesDeleteCm, err := c.callTool("resources_delete", map[string]interface{}{"apiVersion": "v1", "kind": "ConfigMap", "name": "a-configmap-to-delete"}) + t.Run("resources_delete with valid namespaced resource returns success", func(t *testing.T) { + if err != nil { + t.Fatalf("call tool failed %v", err) + return + } + if resourcesDeleteCm.IsError { + t.Fatalf("call tool failed") + return + } + if resourcesDeleteCm.Content[0].(mcp.TextContent).Text != "Resource deleted successfully" { + t.Fatalf("invalid tool result content got: %v", resourcesDeleteCm.Content[0].(mcp.TextContent).Text) + return + } + }) + client := c.newKubernetesClient() + t.Run("resources_delete with valid namespaced resource deletes ConfigMap", func(t *testing.T) { + _, err := client.CoreV1().ConfigMaps("default").Get(c.ctx, "a-configmap-to-delete", metav1.GetOptions{}) + if err == nil { + t.Fatalf("ConfigMap not deleted") + return + } + }) + resourcesDeleteNamespace, err := c.callTool("resources_delete", map[string]interface{}{"apiVersion": "v1", "kind": "Namespace", "name": "ns-to-delete"}) + t.Run("resources_delete with valid namespaced resource returns success", func(t *testing.T) { + if err != nil { + t.Fatalf("call tool failed %v", err) + return + } + if resourcesDeleteNamespace.IsError { + t.Fatalf("call tool failed") + return + } + if resourcesDeleteNamespace.Content[0].(mcp.TextContent).Text != "Resource deleted successfully" { + t.Fatalf("invalid tool result content got: %v", resourcesDeleteNamespace.Content[0].(mcp.TextContent).Text) + return + } + }) + t.Run("resources_delete with valid namespaced resource deletes Namespace", func(t *testing.T) { + ns, err := client.CoreV1().Namespaces().Get(c.ctx, "ns-to-delete", metav1.GetOptions{}) + if err == nil && ns != nil && ns.DeletionTimestamp == nil { + t.Fatalf("Namespace not deleted") + return + } + }) + }) +} + +func TestResourcesDeleteDenied(t *testing.T) { + deniedResourcesServer := &config.StaticConfig{ + DeniedResources: []config.GroupVersionKind{ + {Version: "v1", Kind: "Secret"}, + {Group: "rbac.authorization.k8s.io", Version: "v1"}, + }, + } + testCaseWithContext(t, &mcpContext{staticConfig: deniedResourcesServer}, func(c *mcpContext) { + c.withEnvTest() + kc := c.newKubernetesClient() + _, _ = kc.CoreV1().ConfigMaps("default").Create(c.ctx, &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "allowed-configmap-to-delete"}, + }, metav1.CreateOptions{}) + deniedByKind, _ := c.callTool("resources_delete", map[string]interface{}{"apiVersion": "v1", "kind": "Secret", "namespace": "default", "name": "denied-secret"}) + t.Run("resources_delete (denied by kind) has error", func(t *testing.T) { + if !deniedByKind.IsError { + t.Fatalf("call tool should fail") + } + }) + t.Run("resources_delete (denied by kind) describes denial", func(t *testing.T) { + expectedMessage := "failed to delete resource: resource not allowed: /v1, Kind=Secret" + if deniedByKind.Content[0].(mcp.TextContent).Text != expectedMessage { + t.Fatalf("expected descriptive error '%s', got %v", expectedMessage, deniedByKind.Content[0].(mcp.TextContent).Text) + } + }) + deniedByGroup, _ := c.callTool("resources_delete", map[string]interface{}{"apiVersion": "rbac.authorization.k8s.io/v1", "kind": "Role", "namespace": "default", "name": "denied-role"}) + t.Run("resources_delete (denied by group) has error", func(t *testing.T) { + if !deniedByGroup.IsError { + t.Fatalf("call tool should fail") + } + }) + t.Run("resources_delete (denied by group) describes denial", func(t *testing.T) { + expectedMessage := "failed to delete resource: resource not allowed: rbac.authorization.k8s.io/v1, Kind=Role" + if deniedByGroup.Content[0].(mcp.TextContent).Text != expectedMessage { + t.Fatalf("expected descriptive error '%s', got %v", expectedMessage, deniedByKind.Content[0].(mcp.TextContent).Text) + } + }) + allowedResource, _ := c.callTool("resources_delete", map[string]interface{}{"apiVersion": "v1", "kind": "ConfigMap", "name": "allowed-configmap-to-delete"}) + t.Run("resources_delete (not denied) deletes resource", func(t *testing.T) { + if allowedResource.IsError { + t.Fatalf("call tool should not fail") + } + }) + }) +} diff --git a/pkg 2/mcp/testdata/helm-chart-no-op/Chart.yaml b/pkg 2/mcp/testdata/helm-chart-no-op/Chart.yaml new file mode 100644 index 00000000..de13c067 --- /dev/null +++ b/pkg 2/mcp/testdata/helm-chart-no-op/Chart.yaml @@ -0,0 +1,3 @@ +apiVersion: v1 +name: no-op +version: 1.33.7 diff --git a/pkg 2/mcp/testdata/helm-chart-secret/Chart.yaml b/pkg 2/mcp/testdata/helm-chart-secret/Chart.yaml new file mode 100644 index 00000000..510edaf0 --- /dev/null +++ b/pkg 2/mcp/testdata/helm-chart-secret/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v2 +name: secret-chart +version: 0.1.0 +type: application + diff --git a/pkg 2/mcp/testdata/helm-chart-secret/templates/secret.yaml b/pkg 2/mcp/testdata/helm-chart-secret/templates/secret.yaml new file mode 100644 index 00000000..89829fe6 --- /dev/null +++ b/pkg 2/mcp/testdata/helm-chart-secret/templates/secret.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Secret +metadata: + name: {{ .Release.Name }}-secret + labels: + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} +type: Opaque +data: + username: {{ b64enc "aitana" }} + password: {{ b64enc "alex" }} + diff --git a/pkg 2/output/output.go b/pkg 2/output/output.go new file mode 100644 index 00000000..c558ae9d --- /dev/null +++ b/pkg 2/output/output.go @@ -0,0 +1,127 @@ +package output + +import ( + "bytes" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/cli-runtime/pkg/printers" + yml "sigs.k8s.io/yaml" +) + +var Yaml = &yaml{} + +var Table = &table{} + +type Output interface { + // GetName returns the name of the output format, will be used by the CLI to identify the output format. + GetName() string + // AsTable true if the kubernetes request should be made with the `application/json;as=Table;v=0.1` header. + AsTable() bool + // PrintObj prints the given object as a string. + PrintObj(obj runtime.Unstructured) (string, error) +} + +var Outputs = []Output{ + Yaml, + Table, +} + +var Names []string + +func FromString(name string) Output { + for _, output := range Outputs { + if output.GetName() == name { + return output + } + } + return nil +} + +type yaml struct{} + +func (p *yaml) GetName() string { + return "yaml" +} +func (p *yaml) AsTable() bool { + return false +} +func (p *yaml) PrintObj(obj runtime.Unstructured) (string, error) { + return MarshalYaml(obj) +} + +type table struct{} + +func (p *table) GetName() string { + return "table" +} +func (p *table) AsTable() bool { + return true +} +func (p *table) PrintObj(obj runtime.Unstructured) (string, error) { + var objectToPrint runtime.Object = obj + withNamespace := false + if obj.GetObjectKind().GroupVersionKind() == metav1.SchemeGroupVersion.WithKind("Table") { + t := &metav1.Table{} + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.UnstructuredContent(), t); err == nil { + objectToPrint = t + // Process the Raw object to retrieve the complete metadata (see kubectl/pkg/printers/table_printer.go) + for i := range t.Rows { + row := &t.Rows[i] + if row.Object.Raw == nil || row.Object.Object != nil { + continue + } + row.Object.Object, err = runtime.Decode(unstructured.UnstructuredJSONScheme, row.Object.Raw) + // Print namespace if at least one row has it (object is namespaced) + if err == nil && !withNamespace { + switch rowObject := row.Object.Object.(type) { + case *unstructured.Unstructured: + withNamespace = rowObject.GetNamespace() != "" + } + } + } + } + } + buf := new(bytes.Buffer) + // TablePrinter is mutable and not thread-safe, must create a new instance each time. + printer := printers.NewTablePrinter(printers.PrintOptions{ + WithNamespace: withNamespace, + WithKind: true, + Wide: true, + ShowLabels: true, + }) + err := printer.PrintObj(objectToPrint, buf) + return buf.String(), err +} + +func MarshalYaml(v any) (string, error) { + switch t := v.(type) { + //case unstructured.UnstructuredList: + // for i := range t.Items { + // t.Items[i].SetManagedFields(nil) + // } + // v = t.Items + case *unstructured.UnstructuredList: + for i := range t.Items { + t.Items[i].SetManagedFields(nil) + } + v = t.Items + //case unstructured.Unstructured: + // t.SetManagedFields(nil) + case *unstructured.Unstructured: + t.SetManagedFields(nil) + } + ret, err := yml.Marshal(v) + if err != nil { + return "", err + } + return string(ret), nil +} + +func init() { + Names = make([]string, 0) + for _, output := range Outputs { + Names = append(Names, output.GetName()) + } +} diff --git a/pkg 2/output/output_test.go b/pkg 2/output/output_test.go new file mode 100644 index 00000000..ecad73c0 --- /dev/null +++ b/pkg 2/output/output_test.go @@ -0,0 +1,32 @@ +package output + +import ( + "encoding/json" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "regexp" + "testing" +) + +func TestPlainTextUnstructuredList(t *testing.T) { + var podList unstructured.UnstructuredList + _ = json.Unmarshal([]byte(` + { "apiVersion": "v1", "kind": "PodList", "items": [{ + "apiVersion": "v1", "kind": "Pod", + "metadata": { + "name": "pod-1", "namespace": "default", "creationTimestamp": "2023-10-01T00:00:00Z", "labels": { "app": "nginx" } + }, + "spec": { "containers": [{ "name": "container-1", "image": "marcnuri/chuck-norris" }] } } + ]}`), &podList) + out, err := Table.PrintObj(&podList) + t.Run("processes the list", func(t *testing.T) { + if err != nil { + t.Fatalf("Error printing pod list: %v", err) + } + }) + t.Run("prints headers", func(t *testing.T) { + expectedHeaders := "NAME\\s+AGE\\s+LABELS" + if m, e := regexp.MatchString(expectedHeaders, out); !m || e != nil { + t.Errorf("Expected headers '%s' not found in output: %s", expectedHeaders, out) + } + }) +} diff --git a/pkg 2/version/version.go b/pkg 2/version/version.go new file mode 100644 index 00000000..da4ebbd1 --- /dev/null +++ b/pkg 2/version/version.go @@ -0,0 +1,6 @@ +package version + +var CommitHash = "unknown" +var BuildTime = "1970-01-01T00:00:00Z" +var Version = "0.0.0" +var BinaryName = "kubernetes-mcp-server"