diff --git a/.changes/unreleased/operator-Fixed-20260423-224719.yaml b/.changes/unreleased/operator-Fixed-20260423-224719.yaml new file mode 100644 index 000000000..47a906644 --- /dev/null +++ b/.changes/unreleased/operator-Fixed-20260423-224719.yaml @@ -0,0 +1,4 @@ +project: operator +kind: Fixed +body: Fixed an issue where in-use features from an enterprise cluster did not have deterministic sort order and could cause reconciliation storms due to status changes. +time: 2026-04-23T22:47:19.373877-04:00 diff --git a/ci/rp-controller-gen.nix b/ci/rp-controller-gen.nix index ea3176028..d68b7ec32 100644 --- a/ci/rp-controller-gen.nix +++ b/ci/rp-controller-gen.nix @@ -6,13 +6,13 @@ buildGo126Module rec { pname = "rp-controller-gen"; - version = "59451d668eb28f01f91354a2463d766866148ef4"; + version = "f56efc4bf824e209eba669c8f3607c04f7e25ce9"; src = fetchFromGitHub { owner = "redpanda-data"; repo = "common-go"; rev = "${version}"; - hash = "sha256-KoVBYGE0pkXpcPb+8145Wmu0m8lex8hc8c0QzFVzhho="; + hash = "sha256-gcSvonPNqEKUqT+s7/a/KG7xqLhW8s9bj951w2x09rk="; }; vendorHash = "sha256-PIKAvpLy0tTYkkzxg1UvHhDMhQGysPQ06k1J+5llN84="; diff --git a/gen/go.mod b/gen/go.mod index 03c079156..b8c47f057 100644 --- a/gen/go.mod +++ b/gen/go.mod @@ -304,6 +304,7 @@ require ( sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect sigs.k8s.io/kustomize/api v0.20.1 // indirect sigs.k8s.io/kustomize/kyaml v0.20.1 // indirect + sigs.k8s.io/mcs-api v0.4.1 // indirect sigs.k8s.io/randfill v1.0.0 // indirect sigs.k8s.io/structured-merge-diff/v6 v6.3.2 // indirect ) diff --git a/go.work.sum b/go.work.sum index ce1afa94f..d478afbd7 100644 --- a/go.work.sum +++ b/go.work.sum @@ -1107,6 +1107,7 @@ github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.29.0 github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.29.0/go.mod h1:Cz6ft6Dkn3Et6l2v2a9/RpN7epQ1GtDlO6lj8bEcOvw= github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0 h1:sBEjpZlNHzK1voKq9695PJSX2o5NEXl7/OL3coiIY0c= github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0/go.mod h1:P4WPRUkOhJC13W//jWpyfJNDAIpvRbAUIYLX/4jtlE0= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.31.0/go.mod h1:P4WPRUkOhJC13W//jWpyfJNDAIpvRbAUIYLX/4jtlE0= github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1 h1:UQ0AhxogsIRZDkElkblfnwjc3IaltCm2HUMvezQaL7s= github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1/go.mod h1:jyqM3eLpJ3IbIFDTKVz2rF9T/xWGW0rIriGwnz8l9Tk= github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.50.0/go.mod h1:ZV4VOm0/eHR06JLrXWe09068dHpr3TRpY9Uo7T+anuA= diff --git a/operator/chart/chart.go b/operator/chart/chart.go index 0aff44291..648ad1eac 100644 --- a/operator/chart/chart.go +++ b/operator/chart/chart.go @@ -54,6 +54,8 @@ func render(dot *helmette.Dot) []kube.Object { ConfigMap(dot), MetricsService(dot), WebhookService(dot), + OperatorService(dot), + OperatorServiceExport(dot), MutatingWebhookConfiguration(dot), ValidatingWebhookConfiguration(dot), ServiceAccount(dot), @@ -65,10 +67,14 @@ func render(dot *helmette.Dot) []kube.Object { MigrationJobServiceAccount(dot), } - for _, svc := range StretchClusterService(dot) { + for _, svc := range OperatorPeerServices(dot) { manifests = append(manifests, &svc) } + for _, si := range OperatorServiceImports(dot) { + manifests = append(manifests, &si) + } + for _, cr := range ClusterRoles(dot) { manifests = append(manifests, &cr) } @@ -77,10 +83,6 @@ func render(dot *helmette.Dot) []kube.Object { manifests = append(manifests, &crb) } - for _, svc := range StretchClusterService(dot) { - manifests = append(manifests, &svc) - } - // NB: This slice may contain nil interfaces! // Filtering happens elsewhere, don't call this function directly if you // can avoid it. diff --git a/operator/chart/service.go b/operator/chart/service.go index afc390c71..09d1bb61c 100644 --- a/operator/chart/service.go +++ b/operator/chart/service.go @@ -18,47 +18,229 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/utils/ptr" + mcsv1alpha1 "sigs.k8s.io/mcs-api/pkg/apis/v1alpha1" "github.com/redpanda-data/redpanda-operator/gotohelm/helmette" ) -func StretchClusterService(dot *helmette.Dot) []corev1.Service { +// OperatorServicePort is the gRPC port the operator listens on for +// cross-cluster raft traffic. Matches the --raft-address default. +const OperatorServicePort = 9443 + +// OperatorService renders the peer-facing gRPC Service in front of the +// operator Deployment when multicluster.service.enabled is true. The +// Service type and annotations come straight from values — any mesh +// that configures routing via annotations (Cilium ClusterMesh, +// Submariner, …) or any cloud-specific LoadBalancer knob fits without +// the chart hard-coding any particular implementation. +// +// The Service name equals the helm fullname so peers can address it as +// `..svc.cluster.local:9443`. That matches the +// naming convention the bootstrap tooling already uses for the TLS +// secret prefix. +func OperatorService(dot *helmette.Dot) *corev1.Service { values := helmette.Unwrap[Values](dot.Values) - if !values.Multicluster.ServicePerOperatorDeployment { + if !values.Multicluster.Enabled || !values.Multicluster.Service.Enabled { return nil } - var svcs []corev1.Service - annotations := helmette.Default(map[string]string{}, values.Annotations) + svcType := values.Multicluster.Service.Type + if svcType == "" { + svcType = corev1.ServiceTypeClusterIP + } + + annotations := helmette.Merge( + map[string]string{}, + helmette.Default(map[string]string{}, values.Annotations), + helmette.Default(map[string]string{}, values.Multicluster.Service.Annotations), + ) + + return &corev1.Service{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Service", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: Fullname(dot), + Namespace: dot.Release.Namespace, + Labels: Labels(dot), + Annotations: annotations, + }, + Spec: corev1.ServiceSpec{ + Type: svcType, + Selector: SelectorLabels(dot), + Ports: []corev1.ServicePort{ + { + Name: "raft", + Port: int32(OperatorServicePort), + TargetPort: intstr.FromInt32(int32(OperatorServicePort)), + Protocol: corev1.ProtocolTCP, + }, + }, + // Peers need to reach this Service before the local operator + // is Ready (Ready is gated on raft quorum forming), so don't + // wait for readiness to publish endpoint addresses. + PublishNotReadyAddresses: true, + }, + } +} + +// OperatorPeerServices renders a selectorless placeholder Service for +// every remote peer listed in multicluster.peers (excluding the local +// cluster itself). Rendered only when multicluster.service.mesh=true. +// +// Why: with Cilium ClusterMesh "global services", a Service named X +// on cluster A merges its endpoints into a Service also named X on +// cluster B — but only if both Services exist. Without the placeholder +// on cluster B, a pod on B looking up `X..svc.cluster.local` gets +// NXDOMAIN. This function emits the placeholders so every cluster can +// resolve every peer's operator Service. The real endpoints come from +// the OperatorService on the peer's own cluster; ClusterMesh merges +// them in via the matching name. +// +// Placeholders are always ClusterIP — they carry no selector and +// therefore no local endpoints, so a LoadBalancer type here would +// provision a cloud LB with nothing behind it. Only the local +// OperatorService respects Multicluster.Service.Type. +// +// Annotations are Multicluster.Service.Annotations merged with the +// peer's own Peer.Annotations (peer wins on conflict), so a user can +// set mesh-wide defaults and peer-specific overrides like Cilium +// `service.cilium.io/affinity: `. +func OperatorPeerServices(dot *helmette.Dot) []corev1.Service { + values := helmette.Unwrap[Values](dot.Values) + if !values.Multicluster.Enabled || !values.Multicluster.Service.Enabled { + return nil + } + if !values.Multicluster.Service.Mesh { + return nil + } + + self := values.Multicluster.Name + var svcs []corev1.Service for _, p := range values.Multicluster.Peers { + if p.Name == self { + continue + } + annotations := helmette.Merge( + map[string]string{}, + helmette.Default(map[string]string{}, values.Annotations), + helmette.Default(map[string]string{}, values.Multicluster.Service.Annotations), + helmette.Default(map[string]string{}, p.Annotations), + ) svcs = append(svcs, corev1.Service{ TypeMeta: metav1.TypeMeta{ APIVersion: "v1", Kind: "Service", }, ObjectMeta: metav1.ObjectMeta{ - Name: cleanForK8sWithSuffix(fmt.Sprintf("%s-%s", p.Name, helmette.Default(dot.Chart.Name, values.NameOverride)), "raft-service"), + Name: p.Name, Namespace: dot.Release.Namespace, Labels: Labels(dot), - Annotations: helmette.Merge(annotations, helmette.Default(map[string]string{}, p.AdditionalAnnotation)), + Annotations: annotations, }, Spec: corev1.ServiceSpec{ - Selector: p.SelectorOverwrite, + Type: corev1.ServiceTypeClusterIP, + // No selector → no local endpoints. Cilium ClusterMesh + // merges in the remote peer's endpoints. Ports: []corev1.ServicePort{ { - Port: int32(9443), - TargetPort: intstr.FromInt32(9443), + Name: "raft", + Port: int32(OperatorServicePort), + TargetPort: intstr.FromInt32(int32(OperatorServicePort)), + Protocol: corev1.ProtocolTCP, }, }, - PublishNotReadyAddresses: true, }, }) } return svcs } +// OperatorServiceExport renders an MCS ServiceExport for the operator +// Service when multicluster.service.mcs is true. A compliant MCS +// controller in the cluster mirrors the Service into every peer cluster +// under `..svc.clusterset.local`. +func OperatorServiceExport(dot *helmette.Dot) *mcsv1alpha1.ServiceExport { + values := helmette.Unwrap[Values](dot.Values) + + if !values.Multicluster.Enabled || + !values.Multicluster.Service.Enabled || + !values.Multicluster.Service.MCS { + return nil + } + + return &mcsv1alpha1.ServiceExport{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "multicluster.x-k8s.io/v1alpha1", + Kind: "ServiceExport", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: Fullname(dot), + Namespace: dot.Release.Namespace, + Labels: Labels(dot), + Annotations: helmette.Default(map[string]string{}, values.Annotations), + }, + } +} + +// OperatorServiceImports renders one ServiceImport per remote peer +// when multicluster.service.mcs is true. Each import gives this +// cluster a local clusterset-scoped entry point for that peer's +// exported operator Service, resolvable at +// `..svc.clusterset.local`. +// +// The local cluster is skipped: its own ServiceExport causes the MCS +// controller to auto-create a matching ServiceImport on every cluster +// in the clusterset including this one, so a chart-managed import for +// self would collide with the controller-managed one. +// +// Peers are trusted to be named after each remote operator's helm +// fullname — the same convention the chart uses elsewhere for matching +// peer.Name against the remote cluster's multicluster.name. +func OperatorServiceImports(dot *helmette.Dot) []mcsv1alpha1.ServiceImport { + values := helmette.Unwrap[Values](dot.Values) + + if !values.Multicluster.Enabled || + !values.Multicluster.Service.Enabled || + !values.Multicluster.Service.MCS { + return nil + } + + self := values.Multicluster.Name + var imports []mcsv1alpha1.ServiceImport + for _, p := range values.Multicluster.Peers { + if p.Name == self { + continue + } + imports = append(imports, mcsv1alpha1.ServiceImport{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "multicluster.x-k8s.io/v1alpha1", + Kind: "ServiceImport", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: p.Name, + Namespace: dot.Release.Namespace, + Labels: Labels(dot), + Annotations: helmette.Default(map[string]string{}, values.Annotations), + }, + Spec: mcsv1alpha1.ServiceImportSpec{ + Type: mcsv1alpha1.ClusterSetIP, + Ports: []mcsv1alpha1.ServicePort{ + { + Name: "raft", + Protocol: corev1.ProtocolTCP, + Port: int32(OperatorServicePort), + }, + }, + }, + }) + } + return imports +} + func WebhookService(dot *helmette.Dot) *corev1.Service { values := helmette.Unwrap[Values](dot.Values) diff --git a/operator/chart/templates/_chart.go.tpl b/operator/chart/templates/_chart.go.tpl index c247802ff..0b59db926 100644 --- a/operator/chart/templates/_chart.go.tpl +++ b/operator/chart/templates/_chart.go.tpl @@ -5,27 +5,27 @@ {{- $dot := (index .a 0) -}} {{- range $_ := (list 1) -}} {{- $_is_returning := false -}} -{{- $manifests := (list (get (fromJson (include "operator.Issuer" (dict "a" (list $dot)))) "r") (get (fromJson (include "operator.Certificate" (dict "a" (list $dot)))) "r") (get (fromJson (include "operator.ConfigMap" (dict "a" (list $dot)))) "r") (get (fromJson (include "operator.MetricsService" (dict "a" (list $dot)))) "r") (get (fromJson (include "operator.WebhookService" (dict "a" (list $dot)))) "r") (get (fromJson (include "operator.MutatingWebhookConfiguration" (dict "a" (list $dot)))) "r") (get (fromJson (include "operator.ValidatingWebhookConfiguration" (dict "a" (list $dot)))) "r") (get (fromJson (include "operator.ServiceAccount" (dict "a" (list $dot)))) "r") (get (fromJson (include "operator.ServiceMonitor" (dict "a" (list $dot)))) "r") (get (fromJson (include "operator.Deployment" (dict "a" (list $dot)))) "r") (get (fromJson (include "operator.PreInstallCRDJob" (dict "a" (list $dot)))) "r") (get (fromJson (include "operator.CRDJobServiceAccount" (dict "a" (list $dot)))) "r") (get (fromJson (include "operator.PostUpgradeMigrationJob" (dict "a" (list $dot)))) "r") (get (fromJson (include "operator.MigrationJobServiceAccount" (dict "a" (list $dot)))) "r")) -}} -{{- range $_, $svc := (get (fromJson (include "operator.StretchClusterService" (dict "a" (list $dot)))) "r") -}} +{{- $manifests := (list (get (fromJson (include "operator.Issuer" (dict "a" (list $dot)))) "r") (get (fromJson (include "operator.Certificate" (dict "a" (list $dot)))) "r") (get (fromJson (include "operator.ConfigMap" (dict "a" (list $dot)))) "r") (get (fromJson (include "operator.MetricsService" (dict "a" (list $dot)))) "r") (get (fromJson (include "operator.WebhookService" (dict "a" (list $dot)))) "r") (get (fromJson (include "operator.OperatorService" (dict "a" (list $dot)))) "r") (get (fromJson (include "operator.OperatorServiceExport" (dict "a" (list $dot)))) "r") (get (fromJson (include "operator.MutatingWebhookConfiguration" (dict "a" (list $dot)))) "r") (get (fromJson (include "operator.ValidatingWebhookConfiguration" (dict "a" (list $dot)))) "r") (get (fromJson (include "operator.ServiceAccount" (dict "a" (list $dot)))) "r") (get (fromJson (include "operator.ServiceMonitor" (dict "a" (list $dot)))) "r") (get (fromJson (include "operator.Deployment" (dict "a" (list $dot)))) "r") (get (fromJson (include "operator.PreInstallCRDJob" (dict "a" (list $dot)))) "r") (get (fromJson (include "operator.CRDJobServiceAccount" (dict "a" (list $dot)))) "r") (get (fromJson (include "operator.PostUpgradeMigrationJob" (dict "a" (list $dot)))) "r") (get (fromJson (include "operator.MigrationJobServiceAccount" (dict "a" (list $dot)))) "r")) -}} +{{- range $_, $svc := (get (fromJson (include "operator.OperatorPeerServices" (dict "a" (list $dot)))) "r") -}} {{- $manifests = (concat (default (list) $manifests) (list $svc)) -}} {{- end -}} {{- if $_is_returning -}} {{- break -}} {{- end -}} -{{- range $_, $cr := (get (fromJson (include "operator.ClusterRoles" (dict "a" (list $dot)))) "r") -}} -{{- $manifests = (concat (default (list) $manifests) (list $cr)) -}} +{{- range $_, $si := (get (fromJson (include "operator.OperatorServiceImports" (dict "a" (list $dot)))) "r") -}} +{{- $manifests = (concat (default (list) $manifests) (list $si)) -}} {{- end -}} {{- if $_is_returning -}} {{- break -}} {{- end -}} -{{- range $_, $crb := (get (fromJson (include "operator.ClusterRoleBindings" (dict "a" (list $dot)))) "r") -}} -{{- $manifests = (concat (default (list) $manifests) (list $crb)) -}} +{{- range $_, $cr := (get (fromJson (include "operator.ClusterRoles" (dict "a" (list $dot)))) "r") -}} +{{- $manifests = (concat (default (list) $manifests) (list $cr)) -}} {{- end -}} {{- if $_is_returning -}} {{- break -}} {{- end -}} -{{- range $_, $svc := (get (fromJson (include "operator.StretchClusterService" (dict "a" (list $dot)))) "r") -}} -{{- $manifests = (concat (default (list) $manifests) (list $svc)) -}} +{{- range $_, $crb := (get (fromJson (include "operator.ClusterRoleBindings" (dict "a" (list $dot)))) "r") -}} +{{- $manifests = (concat (default (list) $manifests) (list $crb)) -}} {{- end -}} {{- if $_is_returning -}} {{- break -}} diff --git a/operator/chart/templates/_service.go.tpl b/operator/chart/templates/_service.go.tpl index f754fccc7..264f4cc78 100644 --- a/operator/chart/templates/_service.go.tpl +++ b/operator/chart/templates/_service.go.tpl @@ -1,20 +1,50 @@ {{- /* GENERATED FILE DO NOT EDIT */ -}} {{- /* Transpiled by gotohelm from "github.com/redpanda-data/redpanda-operator/operator/chart/service.go" */ -}} -{{- define "operator.StretchClusterService" -}} +{{- define "operator.OperatorService" -}} {{- $dot := (index .a 0) -}} {{- range $_ := (list 1) -}} {{- $_is_returning := false -}} {{- $values := $dot.Values.AsMap -}} -{{- if (not $values.multicluster.servicePerOperatorDeployment) -}} +{{- if (or (not $values.multicluster.enabled) (not $values.multicluster.service.enabled)) -}} {{- $_is_returning = true -}} {{- (dict "r" (coalesce nil)) | toJson -}} {{- break -}} {{- end -}} +{{- $svcType := $values.multicluster.service.type -}} +{{- if (eq $svcType "") -}} +{{- $svcType = "ClusterIP" -}} +{{- end -}} +{{- $annotations := (merge (dict) (dict) (default (dict) $values.annotations) (default (dict) $values.multicluster.service.annotations)) -}} +{{- $_is_returning = true -}} +{{- (dict "r" (mustMergeOverwrite (dict "metadata" (dict) "spec" (dict) "status" (dict "loadBalancer" (dict))) (mustMergeOverwrite (dict) (dict "apiVersion" "v1" "kind" "Service")) (dict "metadata" (mustMergeOverwrite (dict) (dict "name" (get (fromJson (include "operator.Fullname" (dict "a" (list $dot)))) "r") "namespace" $dot.Release.Namespace "labels" (get (fromJson (include "operator.Labels" (dict "a" (list $dot)))) "r") "annotations" $annotations)) "spec" (mustMergeOverwrite (dict) (dict "type" $svcType "selector" (get (fromJson (include "operator.SelectorLabels" (dict "a" (list $dot)))) "r") "ports" (list (mustMergeOverwrite (dict "port" 0 "targetPort" 0) (dict "name" "raft" "port" ((9443 | int) | int) "targetPort" ((9443 | int) | int) "protocol" "TCP"))) "publishNotReadyAddresses" true))))) | toJson -}} +{{- break -}} +{{- end -}} +{{- end -}} + +{{- define "operator.OperatorPeerServices" -}} +{{- $dot := (index .a 0) -}} +{{- range $_ := (list 1) -}} +{{- $_is_returning := false -}} +{{- $values := $dot.Values.AsMap -}} +{{- if (or (not $values.multicluster.enabled) (not $values.multicluster.service.enabled)) -}} +{{- $_is_returning = true -}} +{{- (dict "r" (coalesce nil)) | toJson -}} +{{- break -}} +{{- end -}} +{{- if (not $values.multicluster.service.mesh) -}} +{{- $_is_returning = true -}} +{{- (dict "r" (coalesce nil)) | toJson -}} +{{- break -}} +{{- end -}} +{{- $self := $values.multicluster.name -}} {{- $svcs := (coalesce nil) -}} -{{- $annotations := (default (dict) $values.annotations) -}} {{- range $_, $p := $values.multicluster.peers -}} -{{- $svcs = (concat (default (list) $svcs) (list (mustMergeOverwrite (dict "metadata" (dict) "spec" (dict) "status" (dict "loadBalancer" (dict))) (mustMergeOverwrite (dict) (dict "apiVersion" "v1" "kind" "Service")) (dict "metadata" (mustMergeOverwrite (dict) (dict "name" (get (fromJson (include "operator.cleanForK8sWithSuffix" (dict "a" (list (printf "%s-%s" $p.name (default $dot.Chart.Name $values.nameOverride)) "raft-service")))) "r") "namespace" $dot.Release.Namespace "labels" (get (fromJson (include "operator.Labels" (dict "a" (list $dot)))) "r") "annotations" (merge (dict) $annotations (default (dict) $p.additionalAnnotation)))) "spec" (mustMergeOverwrite (dict) (dict "selector" $p.selectorOverwrite "ports" (list (mustMergeOverwrite (dict "port" 0 "targetPort" 0) (dict "port" ((9443 | int) | int) "targetPort" (9443 | int)))) "publishNotReadyAddresses" true)))))) -}} +{{- if (eq $p.name $self) -}} +{{- continue -}} +{{- end -}} +{{- $annotations := (merge (dict) (dict) (default (dict) $values.annotations) (default (dict) $values.multicluster.service.annotations) (default (dict) $p.annotations)) -}} +{{- $svcs = (concat (default (list) $svcs) (list (mustMergeOverwrite (dict "metadata" (dict) "spec" (dict) "status" (dict "loadBalancer" (dict))) (mustMergeOverwrite (dict) (dict "apiVersion" "v1" "kind" "Service")) (dict "metadata" (mustMergeOverwrite (dict) (dict "name" $p.name "namespace" $dot.Release.Namespace "labels" (get (fromJson (include "operator.Labels" (dict "a" (list $dot)))) "r") "annotations" $annotations)) "spec" (mustMergeOverwrite (dict) (dict "type" "ClusterIP" "ports" (list (mustMergeOverwrite (dict "port" 0 "targetPort" 0) (dict "name" "raft" "port" ((9443 | int) | int) "targetPort" ((9443 | int) | int) "protocol" "TCP"))))))))) -}} {{- end -}} {{- if $_is_returning -}} {{- break -}} @@ -25,6 +55,49 @@ {{- end -}} {{- end -}} +{{- define "operator.OperatorServiceExport" -}} +{{- $dot := (index .a 0) -}} +{{- range $_ := (list 1) -}} +{{- $_is_returning := false -}} +{{- $values := $dot.Values.AsMap -}} +{{- if (or (or (not $values.multicluster.enabled) (not $values.multicluster.service.enabled)) (not $values.multicluster.service.mcs)) -}} +{{- $_is_returning = true -}} +{{- (dict "r" (coalesce nil)) | toJson -}} +{{- break -}} +{{- end -}} +{{- $_is_returning = true -}} +{{- (dict "r" (mustMergeOverwrite (dict "metadata" (dict) "spec" (dict) "status" (dict)) (mustMergeOverwrite (dict) (dict "apiVersion" "multicluster.x-k8s.io/v1alpha1" "kind" "ServiceExport")) (dict "metadata" (mustMergeOverwrite (dict) (dict "name" (get (fromJson (include "operator.Fullname" (dict "a" (list $dot)))) "r") "namespace" $dot.Release.Namespace "labels" (get (fromJson (include "operator.Labels" (dict "a" (list $dot)))) "r") "annotations" (default (dict) $values.annotations)))))) | toJson -}} +{{- break -}} +{{- end -}} +{{- end -}} + +{{- define "operator.OperatorServiceImports" -}} +{{- $dot := (index .a 0) -}} +{{- range $_ := (list 1) -}} +{{- $_is_returning := false -}} +{{- $values := $dot.Values.AsMap -}} +{{- if (or (or (not $values.multicluster.enabled) (not $values.multicluster.service.enabled)) (not $values.multicluster.service.mcs)) -}} +{{- $_is_returning = true -}} +{{- (dict "r" (coalesce nil)) | toJson -}} +{{- break -}} +{{- end -}} +{{- $self := $values.multicluster.name -}} +{{- $imports := (coalesce nil) -}} +{{- range $_, $p := $values.multicluster.peers -}} +{{- if (eq $p.name $self) -}} +{{- continue -}} +{{- end -}} +{{- $imports = (concat (default (list) $imports) (list (mustMergeOverwrite (dict "metadata" (dict) "spec" (dict "ports" (coalesce nil) "type" "") "status" (dict)) (mustMergeOverwrite (dict) (dict "apiVersion" "multicluster.x-k8s.io/v1alpha1" "kind" "ServiceImport")) (dict "metadata" (mustMergeOverwrite (dict) (dict "name" $p.name "namespace" $dot.Release.Namespace "labels" (get (fromJson (include "operator.Labels" (dict "a" (list $dot)))) "r") "annotations" (default (dict) $values.annotations))) "spec" (mustMergeOverwrite (dict "ports" (coalesce nil) "type" "") (dict "type" "ClusterSetIP" "ports" (list (mustMergeOverwrite (dict "port" 0) (dict "name" "raft" "protocol" "TCP" "port" ((9443 | int) | int)))))))))) -}} +{{- end -}} +{{- if $_is_returning -}} +{{- break -}} +{{- end -}} +{{- $_is_returning = true -}} +{{- (dict "r" $imports) | toJson -}} +{{- break -}} +{{- end -}} +{{- end -}} + {{- define "operator.WebhookService" -}} {{- $dot := (index .a 0) -}} {{- range $_ := (list 1) -}} diff --git a/operator/chart/testdata/template-cases.golden.txtar b/operator/chart/testdata/template-cases.golden.txtar index 2e6d6f1d9..fa00b60e9 100644 --- a/operator/chart/testdata/template-cases.golden.txtar +++ b/operator/chart/testdata/template-cases.golden.txtar @@ -84186,6 +84186,4838 @@ subjects: # Source: operator/templates/entry-point.yaml apiVersion: batch/v1 kind: Job +metadata: + annotations: + helm.sh/hook: post-upgrade + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded,hook-failed + helm.sh/hook-weight: "-4" + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: operator + app.kubernetes.io/version: v26.1.1 + helm.sh/chart: operator-26.1.1 + name: operator-migration + namespace: default +spec: + template: + metadata: + annotations: {} + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/name: operator + spec: + automountServiceAccountToken: false + containers: + - args: + - migration + command: + - /redpanda-operator + image: docker.redpanda.com/redpandadata/redpanda-operator:v26.1.1 + imagePullPolicy: IfNotPresent + name: migration + resources: {} + securityContext: + allowPrivilegeEscalation: false + volumeMounts: + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: kube-api-access + readOnly: true + imagePullSecrets: [] + nodeSelector: {} + restartPolicy: OnFailure + serviceAccountName: operator-migration-job + terminationGracePeriodSeconds: 10 + tolerations: [] + volumes: + - name: kube-api-access + projected: + defaultMode: 420 + sources: + - serviceAccountToken: + expirationSeconds: 3607 + path: token + - configMap: + items: + - key: ca.crt + path: ca.crt + name: kube-root-ca.crt + - downwardAPI: + items: + - fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + path: namespace +-- testdata/multicluster service loadbalancer.yaml.golden -- +--- +# Source: operator/templates/entry-point.yaml +apiVersion: v1 +automountServiceAccountToken: false +kind: ServiceAccount +metadata: + annotations: null + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: operator + app.kubernetes.io/version: v26.1.1 + helm.sh/chart: operator-26.1.1 + name: operator + namespace: default +--- +# Source: operator/templates/entry-point.yaml +apiVersion: v1 +data: + controller_manager_config.yaml: |- + apiVersion: controller-runtime.sigs.k8s.io/v1alpha1 + health: + healthProbeBindAddress: :8081 + kind: ControllerManagerConfig + leaderElection: + leaderElect: true + resourceName: aa9fc693.vectorized.io + metrics: + bindAddress: 127.0.0.1:8080 + webhook: + port: 9443 +kind: ConfigMap +metadata: + annotations: null + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: operator + app.kubernetes.io/version: v26.1.1 + helm.sh/chart: operator-26.1.1 + name: operator-config + namespace: default +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + annotations: null + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: operator + app.kubernetes.io/version: v26.1.1 + helm.sh/chart: operator-26.1.1 + name: operator-default-metrics-reader +rules: +- nonResourceURLs: + - /metrics + verbs: + - get +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + annotations: {} + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: operator + app.kubernetes.io/version: v26.1.1 + helm.sh/chart: operator-26.1.1 + name: operator-default +rules: +- apiGroups: + - "" + resources: + - configmaps + - secrets + - serviceaccounts + - services + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - list + - watch +- apiGroups: + - apps + resources: + - deployments + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - autoscaling + resources: + - horizontalpodautoscalers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - cluster.redpanda.com + resources: + - consoles + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - cluster.redpanda.com + resources: + - consoles/status + verbs: + - get + - patch + - update +- apiGroups: + - monitoring.coreos.com + resources: + - servicemonitors + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create +- apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create +- apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - secrets + - serviceaccounts + verbs: + - create + - get + - patch + - update +- apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - patch + - watch +- apiGroups: + - "" + resources: + - persistentvolumeclaims + - pods + verbs: + - delete + - get + - list + - watch +- apiGroups: + - "" + resources: + - nodes + verbs: + - get +- apiGroups: + - "" + resources: + - configmaps + - endpoints + - events + - limitranges + - persistentvolumeclaims + - pods + - pods/log + - replicationcontrollers + - resourcequotas + - serviceaccounts + - services + verbs: + - get + - list +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - configmaps + - endpoints + - pods + - secrets + - serviceaccounts + - services + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - list + - watch +- apiGroups: + - apps + resources: + - controllerrevisions + verbs: + - get + - list + - watch +- apiGroups: + - apps + resources: + - deployments + - statefulsets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - autoscaling + resources: + - horizontalpodautoscalers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - batch + resources: + - jobs + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - cert-manager.io + resources: + - certificates + - issuers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - cluster.redpanda.com + resources: + - consoles + - nodepools + - redpandas + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - cluster.redpanda.com + resources: + - groups + - redpandaroles + - schemas + - shadowlinks + - stretchclusters + - topics + - users + verbs: + - get + - list + - patch + - update + - watch +- apiGroups: + - cluster.redpanda.com + resources: + - groups/finalizers + - nodepools/finalizers + - redpandaroles/finalizers + - redpandas/finalizers + - schemas/finalizers + - shadowlinks/finalizers + - stretchclusters/finalizers + - topics/finalizers + - users/finalizers + verbs: + - update +- apiGroups: + - cluster.redpanda.com + resources: + - groups/status + - nodepools/status + - redpandaroles/status + - redpandas/status + - schemas/status + - shadowlinks/status + - stretchclusters/status + - topics/status + - users/status + verbs: + - get + - patch + - update +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - monitoring.coreos.com + resources: + - podmonitors + - servicemonitors + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - multicluster.x-k8s.io + resources: + - serviceexports + - serviceimports + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterrolebindings + - clusterroles + - rolebindings + - roles + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + annotations: {} + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: operator + app.kubernetes.io/version: v26.1.1 + helm.sh/chart: operator-26.1.1 + name: operator-additional-controllers-default +rules: +- apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - patch +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +- apiGroups: + - "" + resources: + - persistentvolumeclaims + verbs: + - delete + - get + - list + - watch +- apiGroups: + - "" + resources: + - pods + - secrets + verbs: + - get + - list + - watch +- apiGroups: + - apps + resources: + - statefulsets + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - configmaps + - nodes + - secrets + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - delete + - get + - list + - patch + - update +- apiGroups: + - cluster.redpanda.com + resources: + - redpandas + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - persistentvolumeclaims + verbs: + - delete + - get + - list + - patch + - update +- apiGroups: + - "" + resources: + - configmaps + - pods + - secrets + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - persistentvolumeclaims + verbs: + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - patch + - update + - watch +- apiGroups: + - apps + resources: + - statefulsets/status + verbs: + - patch + - update +- apiGroups: + - cluster.redpanda.com + resources: + - redpandas + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - patch + - watch +- apiGroups: + - "" + resources: + - persistentvolumeclaims + - pods + verbs: + - delete + - get + - list + - watch +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + annotations: {} + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: operator + app.kubernetes.io/version: v26.1.1 + helm.sh/chart: operator-26.1.1 + name: operator-default +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: operator-default +subjects: +- kind: ServiceAccount + name: operator + namespace: default +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + annotations: {} + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: operator + app.kubernetes.io/version: v26.1.1 + helm.sh/chart: operator-26.1.1 + name: operator-additional-controllers-default +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: operator-additional-controllers-default +subjects: +- kind: ServiceAccount + name: operator + namespace: default +--- +# Source: operator/templates/entry-point.yaml +apiVersion: v1 +kind: Service +metadata: + annotations: null + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: operator + app.kubernetes.io/version: v26.1.1 + helm.sh/chart: operator-26.1.1 + name: operator-metrics-service + namespace: default +spec: + ports: + - name: https + port: 8443 + targetPort: https + selector: + app.kubernetes.io/instance: operator + app.kubernetes.io/name: operator +--- +# Source: operator/templates/entry-point.yaml +apiVersion: v1 +kind: Service +metadata: + annotations: + service.beta.kubernetes.io/aws-load-balancer-type: nlb + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: operator + app.kubernetes.io/version: v26.1.1 + helm.sh/chart: operator-26.1.1 + name: operator + namespace: default +spec: + ports: + - name: raft + port: 9443 + protocol: TCP + targetPort: 9443 + publishNotReadyAddresses: true + selector: + app.kubernetes.io/instance: operator + app.kubernetes.io/name: operator + type: LoadBalancer +--- +# Source: operator/templates/entry-point.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: null + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: operator + app.kubernetes.io/version: v26.1.1 + helm.sh/chart: operator-26.1.1 + name: operator + namespace: default +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/instance: operator + app.kubernetes.io/name: operator + strategy: + type: RollingUpdate + template: + metadata: + annotations: {} + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/name: operator + spec: + automountServiceAccountToken: false + containers: + - args: + - multicluster + - --base-image=docker.redpanda.com/redpandadata/redpanda-operator + - --base-tag=v26.1.1 + - --ca-file=/tls/ca.crt + - --certificate-file=/tls/tls.crt + - --health-probe-bind-address=:8081 + - --kubeconfig-name=operator + - --kubeconfig-namespace=default + - --kubernetes-api-address=https://dns.address.for.my.kubernetes.api.server:8080 + - --log-level=info + - --metrics-bind-address=:8443 + - --name=blue + - --private-key-file=/tls/tls.key + - --raft-address=0.0.0.0:9443 + - --peer=blue://blue.example.com:9443 + - --peer=west://west.example.com:9443 + command: + - /redpanda-operator + env: [] + image: docker.redpanda.com/redpandadata/redpanda-operator:v26.1.1 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /healthz/ + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + name: manager + ports: + - containerPort: 9443 + name: webhook-server + protocol: TCP + - containerPort: 8443 + name: https + protocol: TCP + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + resources: {} + securityContext: + allowPrivilegeEscalation: false + volumeMounts: + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: kube-api-access + readOnly: true + - mountPath: /tls + name: operator-multicluster-certificates + readOnly: true + ephemeralContainers: null + imagePullSecrets: [] + initContainers: [] + nodeSelector: {} + securityContext: + runAsUser: 65532 + serviceAccountName: operator + terminationGracePeriodSeconds: 10 + tolerations: [] + volumes: + - name: kube-api-access + projected: + defaultMode: 420 + sources: + - serviceAccountToken: + expirationSeconds: 3607 + path: token + - configMap: + items: + - key: ca.crt + path: ca.crt + name: kube-root-ca.crt + - downwardAPI: + items: + - fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + path: namespace + - name: operator-multicluster-certificates + secret: + items: + - key: tls.crt + path: tls.crt + - key: tls.key + path: tls.key + - key: ca.crt + path: ca.crt + secretName: operator-multicluster-certificates +--- +# Source: operator/templates/entry-point.yaml +apiVersion: v1 +automountServiceAccountToken: false +kind: ServiceAccount +metadata: + annotations: + helm.sh/hook: pre-install,pre-upgrade + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded,hook-failed + helm.sh/hook-weight: "-10" + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: operator + app.kubernetes.io/version: v26.1.1 + helm.sh/chart: operator-26.1.1 + name: operator-crd-job + namespace: default +--- +# Source: operator/templates/entry-point.yaml +apiVersion: v1 +automountServiceAccountToken: false +kind: ServiceAccount +metadata: + annotations: + helm.sh/hook: post-upgrade + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded,hook-failed + helm.sh/hook-weight: "-10" + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: operator + app.kubernetes.io/version: v26.1.1 + helm.sh/chart: operator-26.1.1 + name: operator-migration-job + namespace: default +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + annotations: + helm.sh/hook: pre-install,pre-upgrade + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded,hook-failed + helm.sh/hook-weight: "-10" + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: operator + app.kubernetes.io/version: v26.1.1 + helm.sh/chart: operator-26.1.1 + name: operator-crd-job-default +rules: +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - create + - get + - patch + - update +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + annotations: + helm.sh/hook: post-upgrade + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded,hook-failed + helm.sh/hook-weight: "-10" + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: operator + app.kubernetes.io/version: v26.1.1 + helm.sh/chart: operator-26.1.1 + name: operator-migration-job-default +rules: +- apiGroups: + - "" + resources: + - configmaps + - secrets + - serviceaccounts + - services + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - list + - watch +- apiGroups: + - apps + resources: + - deployments + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - autoscaling + resources: + - horizontalpodautoscalers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - cluster.redpanda.com + resources: + - consoles + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - cluster.redpanda.com + resources: + - consoles/status + verbs: + - get + - patch + - update +- apiGroups: + - monitoring.coreos.com + resources: + - servicemonitors + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create +- apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create +- apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - secrets + - serviceaccounts + verbs: + - create + - get + - patch + - update +- apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - patch + - watch +- apiGroups: + - "" + resources: + - persistentvolumeclaims + - pods + verbs: + - delete + - get + - list + - watch +- apiGroups: + - "" + resources: + - nodes + verbs: + - get +- apiGroups: + - "" + resources: + - configmaps + - endpoints + - events + - limitranges + - persistentvolumeclaims + - pods + - pods/log + - replicationcontrollers + - resourcequotas + - serviceaccounts + - services + verbs: + - get + - list +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - configmaps + - endpoints + - pods + - secrets + - serviceaccounts + - services + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - list + - watch +- apiGroups: + - apps + resources: + - controllerrevisions + verbs: + - get + - list + - watch +- apiGroups: + - apps + resources: + - deployments + - statefulsets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - autoscaling + resources: + - horizontalpodautoscalers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - batch + resources: + - jobs + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - cert-manager.io + resources: + - certificates + - issuers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - cluster.redpanda.com + resources: + - consoles + - nodepools + - redpandas + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - cluster.redpanda.com + resources: + - groups + - redpandaroles + - schemas + - shadowlinks + - stretchclusters + - topics + - users + verbs: + - get + - list + - patch + - update + - watch +- apiGroups: + - cluster.redpanda.com + resources: + - groups/finalizers + - nodepools/finalizers + - redpandaroles/finalizers + - redpandas/finalizers + - schemas/finalizers + - shadowlinks/finalizers + - stretchclusters/finalizers + - topics/finalizers + - users/finalizers + verbs: + - update +- apiGroups: + - cluster.redpanda.com + resources: + - groups/status + - nodepools/status + - redpandaroles/status + - redpandas/status + - schemas/status + - shadowlinks/status + - stretchclusters/status + - topics/status + - users/status + verbs: + - get + - patch + - update +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - monitoring.coreos.com + resources: + - podmonitors + - servicemonitors + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - multicluster.x-k8s.io + resources: + - serviceexports + - serviceimports + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterrolebindings + - clusterroles + - rolebindings + - roles + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + annotations: + helm.sh/hook: pre-install,pre-upgrade + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded,hook-failed + helm.sh/hook-weight: "-10" + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: operator + app.kubernetes.io/version: v26.1.1 + helm.sh/chart: operator-26.1.1 + name: operator-crd-job-default +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: operator-crd-job-default +subjects: +- kind: ServiceAccount + name: operator-crd-job + namespace: default +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + annotations: + helm.sh/hook: post-upgrade + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded,hook-failed + helm.sh/hook-weight: "-10" + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: operator + app.kubernetes.io/version: v26.1.1 + helm.sh/chart: operator-26.1.1 + name: operator-migration-job-default +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: operator-migration-job-default +subjects: +- kind: ServiceAccount + name: operator-migration-job + namespace: default +--- +# Source: operator/templates/entry-point.yaml +apiVersion: batch/v1 +kind: Job +metadata: + annotations: + helm.sh/hook: pre-install,pre-upgrade + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded,hook-failed + helm.sh/hook-weight: "-5" + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: operator + app.kubernetes.io/version: v26.1.1 + helm.sh/chart: operator-26.1.1 + name: operator-crds + namespace: default +spec: + template: + metadata: + annotations: {} + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/name: operator + spec: + automountServiceAccountToken: false + containers: + - args: + - crd + - --multicluster + command: + - /redpanda-operator + image: docker.redpanda.com/redpandadata/redpanda-operator:v26.1.1 + imagePullPolicy: IfNotPresent + name: crd-installation + resources: {} + securityContext: + allowPrivilegeEscalation: false + volumeMounts: + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: kube-api-access + readOnly: true + imagePullSecrets: [] + nodeSelector: {} + restartPolicy: OnFailure + serviceAccountName: operator-crd-job + terminationGracePeriodSeconds: 10 + tolerations: [] + volumes: + - name: kube-api-access + projected: + defaultMode: 420 + sources: + - serviceAccountToken: + expirationSeconds: 3607 + path: token + - configMap: + items: + - key: ca.crt + path: ca.crt + name: kube-root-ca.crt + - downwardAPI: + items: + - fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + path: namespace +--- +# Source: operator/templates/entry-point.yaml +apiVersion: batch/v1 +kind: Job +metadata: + annotations: + helm.sh/hook: post-upgrade + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded,hook-failed + helm.sh/hook-weight: "-4" + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: operator + app.kubernetes.io/version: v26.1.1 + helm.sh/chart: operator-26.1.1 + name: operator-migration + namespace: default +spec: + template: + metadata: + annotations: {} + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/name: operator + spec: + automountServiceAccountToken: false + containers: + - args: + - migration + command: + - /redpanda-operator + image: docker.redpanda.com/redpandadata/redpanda-operator:v26.1.1 + imagePullPolicy: IfNotPresent + name: migration + resources: {} + securityContext: + allowPrivilegeEscalation: false + volumeMounts: + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: kube-api-access + readOnly: true + imagePullSecrets: [] + nodeSelector: {} + restartPolicy: OnFailure + serviceAccountName: operator-migration-job + terminationGracePeriodSeconds: 10 + tolerations: [] + volumes: + - name: kube-api-access + projected: + defaultMode: 420 + sources: + - serviceAccountToken: + expirationSeconds: 3607 + path: token + - configMap: + items: + - key: ca.crt + path: ca.crt + name: kube-root-ca.crt + - downwardAPI: + items: + - fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + path: namespace +-- testdata/multicluster service mcs.yaml.golden -- +--- +# Source: operator/templates/entry-point.yaml +apiVersion: v1 +automountServiceAccountToken: false +kind: ServiceAccount +metadata: + annotations: null + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: operator + app.kubernetes.io/version: v26.1.1 + helm.sh/chart: operator-26.1.1 + name: operator + namespace: default +--- +# Source: operator/templates/entry-point.yaml +apiVersion: v1 +data: + controller_manager_config.yaml: |- + apiVersion: controller-runtime.sigs.k8s.io/v1alpha1 + health: + healthProbeBindAddress: :8081 + kind: ControllerManagerConfig + leaderElection: + leaderElect: true + resourceName: aa9fc693.vectorized.io + metrics: + bindAddress: 127.0.0.1:8080 + webhook: + port: 9443 +kind: ConfigMap +metadata: + annotations: null + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: operator + app.kubernetes.io/version: v26.1.1 + helm.sh/chart: operator-26.1.1 + name: operator-config + namespace: default +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + annotations: null + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: operator + app.kubernetes.io/version: v26.1.1 + helm.sh/chart: operator-26.1.1 + name: operator-default-metrics-reader +rules: +- nonResourceURLs: + - /metrics + verbs: + - get +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + annotations: {} + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: operator + app.kubernetes.io/version: v26.1.1 + helm.sh/chart: operator-26.1.1 + name: operator-default +rules: +- apiGroups: + - "" + resources: + - configmaps + - secrets + - serviceaccounts + - services + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - list + - watch +- apiGroups: + - apps + resources: + - deployments + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - autoscaling + resources: + - horizontalpodautoscalers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - cluster.redpanda.com + resources: + - consoles + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - cluster.redpanda.com + resources: + - consoles/status + verbs: + - get + - patch + - update +- apiGroups: + - monitoring.coreos.com + resources: + - servicemonitors + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create +- apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create +- apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - secrets + - serviceaccounts + verbs: + - create + - get + - patch + - update +- apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - patch + - watch +- apiGroups: + - "" + resources: + - persistentvolumeclaims + - pods + verbs: + - delete + - get + - list + - watch +- apiGroups: + - "" + resources: + - nodes + verbs: + - get +- apiGroups: + - "" + resources: + - configmaps + - endpoints + - events + - limitranges + - persistentvolumeclaims + - pods + - pods/log + - replicationcontrollers + - resourcequotas + - serviceaccounts + - services + verbs: + - get + - list +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - configmaps + - endpoints + - pods + - secrets + - serviceaccounts + - services + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - list + - watch +- apiGroups: + - apps + resources: + - controllerrevisions + verbs: + - get + - list + - watch +- apiGroups: + - apps + resources: + - deployments + - statefulsets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - autoscaling + resources: + - horizontalpodautoscalers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - batch + resources: + - jobs + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - cert-manager.io + resources: + - certificates + - issuers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - cluster.redpanda.com + resources: + - consoles + - nodepools + - redpandas + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - cluster.redpanda.com + resources: + - groups + - redpandaroles + - schemas + - shadowlinks + - stretchclusters + - topics + - users + verbs: + - get + - list + - patch + - update + - watch +- apiGroups: + - cluster.redpanda.com + resources: + - groups/finalizers + - nodepools/finalizers + - redpandaroles/finalizers + - redpandas/finalizers + - schemas/finalizers + - shadowlinks/finalizers + - stretchclusters/finalizers + - topics/finalizers + - users/finalizers + verbs: + - update +- apiGroups: + - cluster.redpanda.com + resources: + - groups/status + - nodepools/status + - redpandaroles/status + - redpandas/status + - schemas/status + - shadowlinks/status + - stretchclusters/status + - topics/status + - users/status + verbs: + - get + - patch + - update +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - monitoring.coreos.com + resources: + - podmonitors + - servicemonitors + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - multicluster.x-k8s.io + resources: + - serviceexports + - serviceimports + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterrolebindings + - clusterroles + - rolebindings + - roles + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + annotations: {} + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: operator + app.kubernetes.io/version: v26.1.1 + helm.sh/chart: operator-26.1.1 + name: operator-additional-controllers-default +rules: +- apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - patch +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +- apiGroups: + - "" + resources: + - persistentvolumeclaims + verbs: + - delete + - get + - list + - watch +- apiGroups: + - "" + resources: + - pods + - secrets + verbs: + - get + - list + - watch +- apiGroups: + - apps + resources: + - statefulsets + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - configmaps + - nodes + - secrets + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - delete + - get + - list + - patch + - update +- apiGroups: + - cluster.redpanda.com + resources: + - redpandas + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - persistentvolumeclaims + verbs: + - delete + - get + - list + - patch + - update +- apiGroups: + - "" + resources: + - configmaps + - pods + - secrets + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - persistentvolumeclaims + verbs: + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - patch + - update + - watch +- apiGroups: + - apps + resources: + - statefulsets/status + verbs: + - patch + - update +- apiGroups: + - cluster.redpanda.com + resources: + - redpandas + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - patch + - watch +- apiGroups: + - "" + resources: + - persistentvolumeclaims + - pods + verbs: + - delete + - get + - list + - watch +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + annotations: {} + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: operator + app.kubernetes.io/version: v26.1.1 + helm.sh/chart: operator-26.1.1 + name: operator-default +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: operator-default +subjects: +- kind: ServiceAccount + name: operator + namespace: default +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + annotations: {} + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: operator + app.kubernetes.io/version: v26.1.1 + helm.sh/chart: operator-26.1.1 + name: operator-additional-controllers-default +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: operator-additional-controllers-default +subjects: +- kind: ServiceAccount + name: operator + namespace: default +--- +# Source: operator/templates/entry-point.yaml +apiVersion: v1 +kind: Service +metadata: + annotations: null + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: operator + app.kubernetes.io/version: v26.1.1 + helm.sh/chart: operator-26.1.1 + name: operator-metrics-service + namespace: default +spec: + ports: + - name: https + port: 8443 + targetPort: https + selector: + app.kubernetes.io/instance: operator + app.kubernetes.io/name: operator +--- +# Source: operator/templates/entry-point.yaml +apiVersion: v1 +kind: Service +metadata: + annotations: {} + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: operator + app.kubernetes.io/version: v26.1.1 + helm.sh/chart: operator-26.1.1 + name: operator + namespace: default +spec: + ports: + - name: raft + port: 9443 + protocol: TCP + targetPort: 9443 + publishNotReadyAddresses: true + selector: + app.kubernetes.io/instance: operator + app.kubernetes.io/name: operator + type: ClusterIP +--- +# Source: operator/templates/entry-point.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: null + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: operator + app.kubernetes.io/version: v26.1.1 + helm.sh/chart: operator-26.1.1 + name: operator + namespace: default +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/instance: operator + app.kubernetes.io/name: operator + strategy: + type: RollingUpdate + template: + metadata: + annotations: {} + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/name: operator + spec: + automountServiceAccountToken: false + containers: + - args: + - multicluster + - --base-image=docker.redpanda.com/redpandadata/redpanda-operator + - --base-tag=v26.1.1 + - --ca-file=/tls/ca.crt + - --certificate-file=/tls/tls.crt + - --health-probe-bind-address=:8081 + - --kubeconfig-name=operator + - --kubeconfig-namespace=default + - --kubernetes-api-address=https://dns.address.for.my.kubernetes.api.server:8080 + - --log-level=info + - --metrics-bind-address=:8443 + - --name=blue + - --private-key-file=/tls/tls.key + - --raft-address=0.0.0.0:9443 + - --peer=blue://blue.example.com:9443 + - --peer=west://west.example.com:9443 + - --peer=east://east.example.com:9443 + command: + - /redpanda-operator + env: [] + image: docker.redpanda.com/redpandadata/redpanda-operator:v26.1.1 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /healthz/ + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + name: manager + ports: + - containerPort: 9443 + name: webhook-server + protocol: TCP + - containerPort: 8443 + name: https + protocol: TCP + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + resources: {} + securityContext: + allowPrivilegeEscalation: false + volumeMounts: + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: kube-api-access + readOnly: true + - mountPath: /tls + name: operator-multicluster-certificates + readOnly: true + ephemeralContainers: null + imagePullSecrets: [] + initContainers: [] + nodeSelector: {} + securityContext: + runAsUser: 65532 + serviceAccountName: operator + terminationGracePeriodSeconds: 10 + tolerations: [] + volumes: + - name: kube-api-access + projected: + defaultMode: 420 + sources: + - serviceAccountToken: + expirationSeconds: 3607 + path: token + - configMap: + items: + - key: ca.crt + path: ca.crt + name: kube-root-ca.crt + - downwardAPI: + items: + - fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + path: namespace + - name: operator-multicluster-certificates + secret: + items: + - key: tls.crt + path: tls.crt + - key: tls.key + path: tls.key + - key: ca.crt + path: ca.crt + secretName: operator-multicluster-certificates +--- +# Source: operator/templates/entry-point.yaml +apiVersion: multicluster.x-k8s.io/v1alpha1 +kind: ServiceExport +metadata: + annotations: {} + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: operator + app.kubernetes.io/version: v26.1.1 + helm.sh/chart: operator-26.1.1 + name: operator + namespace: default +spec: {} +--- +# Source: operator/templates/entry-point.yaml +apiVersion: multicluster.x-k8s.io/v1alpha1 +kind: ServiceImport +metadata: + annotations: {} + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: operator + app.kubernetes.io/version: v26.1.1 + helm.sh/chart: operator-26.1.1 + name: west + namespace: default +spec: + ports: + - name: raft + port: 9443 + protocol: TCP + type: ClusterSetIP +--- +# Source: operator/templates/entry-point.yaml +apiVersion: multicluster.x-k8s.io/v1alpha1 +kind: ServiceImport +metadata: + annotations: {} + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: operator + app.kubernetes.io/version: v26.1.1 + helm.sh/chart: operator-26.1.1 + name: east + namespace: default +spec: + ports: + - name: raft + port: 9443 + protocol: TCP + type: ClusterSetIP +--- +# Source: operator/templates/entry-point.yaml +apiVersion: v1 +automountServiceAccountToken: false +kind: ServiceAccount +metadata: + annotations: + helm.sh/hook: pre-install,pre-upgrade + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded,hook-failed + helm.sh/hook-weight: "-10" + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: operator + app.kubernetes.io/version: v26.1.1 + helm.sh/chart: operator-26.1.1 + name: operator-crd-job + namespace: default +--- +# Source: operator/templates/entry-point.yaml +apiVersion: v1 +automountServiceAccountToken: false +kind: ServiceAccount +metadata: + annotations: + helm.sh/hook: post-upgrade + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded,hook-failed + helm.sh/hook-weight: "-10" + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: operator + app.kubernetes.io/version: v26.1.1 + helm.sh/chart: operator-26.1.1 + name: operator-migration-job + namespace: default +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + annotations: + helm.sh/hook: pre-install,pre-upgrade + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded,hook-failed + helm.sh/hook-weight: "-10" + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: operator + app.kubernetes.io/version: v26.1.1 + helm.sh/chart: operator-26.1.1 + name: operator-crd-job-default +rules: +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - create + - get + - patch + - update +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + annotations: + helm.sh/hook: post-upgrade + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded,hook-failed + helm.sh/hook-weight: "-10" + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: operator + app.kubernetes.io/version: v26.1.1 + helm.sh/chart: operator-26.1.1 + name: operator-migration-job-default +rules: +- apiGroups: + - "" + resources: + - configmaps + - secrets + - serviceaccounts + - services + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - list + - watch +- apiGroups: + - apps + resources: + - deployments + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - autoscaling + resources: + - horizontalpodautoscalers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - cluster.redpanda.com + resources: + - consoles + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - cluster.redpanda.com + resources: + - consoles/status + verbs: + - get + - patch + - update +- apiGroups: + - monitoring.coreos.com + resources: + - servicemonitors + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create +- apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create +- apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - secrets + - serviceaccounts + verbs: + - create + - get + - patch + - update +- apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - patch + - watch +- apiGroups: + - "" + resources: + - persistentvolumeclaims + - pods + verbs: + - delete + - get + - list + - watch +- apiGroups: + - "" + resources: + - nodes + verbs: + - get +- apiGroups: + - "" + resources: + - configmaps + - endpoints + - events + - limitranges + - persistentvolumeclaims + - pods + - pods/log + - replicationcontrollers + - resourcequotas + - serviceaccounts + - services + verbs: + - get + - list +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - configmaps + - endpoints + - pods + - secrets + - serviceaccounts + - services + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - list + - watch +- apiGroups: + - apps + resources: + - controllerrevisions + verbs: + - get + - list + - watch +- apiGroups: + - apps + resources: + - deployments + - statefulsets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - autoscaling + resources: + - horizontalpodautoscalers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - batch + resources: + - jobs + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - cert-manager.io + resources: + - certificates + - issuers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - cluster.redpanda.com + resources: + - consoles + - nodepools + - redpandas + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - cluster.redpanda.com + resources: + - groups + - redpandaroles + - schemas + - shadowlinks + - stretchclusters + - topics + - users + verbs: + - get + - list + - patch + - update + - watch +- apiGroups: + - cluster.redpanda.com + resources: + - groups/finalizers + - nodepools/finalizers + - redpandaroles/finalizers + - redpandas/finalizers + - schemas/finalizers + - shadowlinks/finalizers + - stretchclusters/finalizers + - topics/finalizers + - users/finalizers + verbs: + - update +- apiGroups: + - cluster.redpanda.com + resources: + - groups/status + - nodepools/status + - redpandaroles/status + - redpandas/status + - schemas/status + - shadowlinks/status + - stretchclusters/status + - topics/status + - users/status + verbs: + - get + - patch + - update +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - monitoring.coreos.com + resources: + - podmonitors + - servicemonitors + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - multicluster.x-k8s.io + resources: + - serviceexports + - serviceimports + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterrolebindings + - clusterroles + - rolebindings + - roles + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + annotations: + helm.sh/hook: pre-install,pre-upgrade + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded,hook-failed + helm.sh/hook-weight: "-10" + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: operator + app.kubernetes.io/version: v26.1.1 + helm.sh/chart: operator-26.1.1 + name: operator-crd-job-default +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: operator-crd-job-default +subjects: +- kind: ServiceAccount + name: operator-crd-job + namespace: default +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + annotations: + helm.sh/hook: post-upgrade + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded,hook-failed + helm.sh/hook-weight: "-10" + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: operator + app.kubernetes.io/version: v26.1.1 + helm.sh/chart: operator-26.1.1 + name: operator-migration-job-default +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: operator-migration-job-default +subjects: +- kind: ServiceAccount + name: operator-migration-job + namespace: default +--- +# Source: operator/templates/entry-point.yaml +apiVersion: batch/v1 +kind: Job +metadata: + annotations: + helm.sh/hook: pre-install,pre-upgrade + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded,hook-failed + helm.sh/hook-weight: "-5" + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: operator + app.kubernetes.io/version: v26.1.1 + helm.sh/chart: operator-26.1.1 + name: operator-crds + namespace: default +spec: + template: + metadata: + annotations: {} + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/name: operator + spec: + automountServiceAccountToken: false + containers: + - args: + - crd + - --multicluster + command: + - /redpanda-operator + image: docker.redpanda.com/redpandadata/redpanda-operator:v26.1.1 + imagePullPolicy: IfNotPresent + name: crd-installation + resources: {} + securityContext: + allowPrivilegeEscalation: false + volumeMounts: + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: kube-api-access + readOnly: true + imagePullSecrets: [] + nodeSelector: {} + restartPolicy: OnFailure + serviceAccountName: operator-crd-job + terminationGracePeriodSeconds: 10 + tolerations: [] + volumes: + - name: kube-api-access + projected: + defaultMode: 420 + sources: + - serviceAccountToken: + expirationSeconds: 3607 + path: token + - configMap: + items: + - key: ca.crt + path: ca.crt + name: kube-root-ca.crt + - downwardAPI: + items: + - fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + path: namespace +--- +# Source: operator/templates/entry-point.yaml +apiVersion: batch/v1 +kind: Job +metadata: + annotations: + helm.sh/hook: post-upgrade + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded,hook-failed + helm.sh/hook-weight: "-4" + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: operator + app.kubernetes.io/version: v26.1.1 + helm.sh/chart: operator-26.1.1 + name: operator-migration + namespace: default +spec: + template: + metadata: + annotations: {} + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/name: operator + spec: + automountServiceAccountToken: false + containers: + - args: + - migration + command: + - /redpanda-operator + image: docker.redpanda.com/redpandadata/redpanda-operator:v26.1.1 + imagePullPolicy: IfNotPresent + name: migration + resources: {} + securityContext: + allowPrivilegeEscalation: false + volumeMounts: + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: kube-api-access + readOnly: true + imagePullSecrets: [] + nodeSelector: {} + restartPolicy: OnFailure + serviceAccountName: operator-migration-job + terminationGracePeriodSeconds: 10 + tolerations: [] + volumes: + - name: kube-api-access + projected: + defaultMode: 420 + sources: + - serviceAccountToken: + expirationSeconds: 3607 + path: token + - configMap: + items: + - key: ca.crt + path: ca.crt + name: kube-root-ca.crt + - downwardAPI: + items: + - fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + path: namespace +-- testdata/multicluster service mesh.yaml.golden -- +--- +# Source: operator/templates/entry-point.yaml +apiVersion: v1 +automountServiceAccountToken: false +kind: ServiceAccount +metadata: + annotations: null + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: operator + app.kubernetes.io/version: v26.1.1 + helm.sh/chart: operator-26.1.1 + name: operator + namespace: default +--- +# Source: operator/templates/entry-point.yaml +apiVersion: v1 +data: + controller_manager_config.yaml: |- + apiVersion: controller-runtime.sigs.k8s.io/v1alpha1 + health: + healthProbeBindAddress: :8081 + kind: ControllerManagerConfig + leaderElection: + leaderElect: true + resourceName: aa9fc693.vectorized.io + metrics: + bindAddress: 127.0.0.1:8080 + webhook: + port: 9443 +kind: ConfigMap +metadata: + annotations: null + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: operator + app.kubernetes.io/version: v26.1.1 + helm.sh/chart: operator-26.1.1 + name: operator-config + namespace: default +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + annotations: null + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: operator + app.kubernetes.io/version: v26.1.1 + helm.sh/chart: operator-26.1.1 + name: operator-default-metrics-reader +rules: +- nonResourceURLs: + - /metrics + verbs: + - get +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + annotations: {} + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: operator + app.kubernetes.io/version: v26.1.1 + helm.sh/chart: operator-26.1.1 + name: operator-default +rules: +- apiGroups: + - "" + resources: + - configmaps + - secrets + - serviceaccounts + - services + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - list + - watch +- apiGroups: + - apps + resources: + - deployments + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - autoscaling + resources: + - horizontalpodautoscalers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - cluster.redpanda.com + resources: + - consoles + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - cluster.redpanda.com + resources: + - consoles/status + verbs: + - get + - patch + - update +- apiGroups: + - monitoring.coreos.com + resources: + - servicemonitors + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create +- apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create +- apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - secrets + - serviceaccounts + verbs: + - create + - get + - patch + - update +- apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - patch + - watch +- apiGroups: + - "" + resources: + - persistentvolumeclaims + - pods + verbs: + - delete + - get + - list + - watch +- apiGroups: + - "" + resources: + - nodes + verbs: + - get +- apiGroups: + - "" + resources: + - configmaps + - endpoints + - events + - limitranges + - persistentvolumeclaims + - pods + - pods/log + - replicationcontrollers + - resourcequotas + - serviceaccounts + - services + verbs: + - get + - list +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - configmaps + - endpoints + - pods + - secrets + - serviceaccounts + - services + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - list + - watch +- apiGroups: + - apps + resources: + - controllerrevisions + verbs: + - get + - list + - watch +- apiGroups: + - apps + resources: + - deployments + - statefulsets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - autoscaling + resources: + - horizontalpodautoscalers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - batch + resources: + - jobs + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - cert-manager.io + resources: + - certificates + - issuers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - cluster.redpanda.com + resources: + - consoles + - nodepools + - redpandas + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - cluster.redpanda.com + resources: + - groups + - redpandaroles + - schemas + - shadowlinks + - stretchclusters + - topics + - users + verbs: + - get + - list + - patch + - update + - watch +- apiGroups: + - cluster.redpanda.com + resources: + - groups/finalizers + - nodepools/finalizers + - redpandaroles/finalizers + - redpandas/finalizers + - schemas/finalizers + - shadowlinks/finalizers + - stretchclusters/finalizers + - topics/finalizers + - users/finalizers + verbs: + - update +- apiGroups: + - cluster.redpanda.com + resources: + - groups/status + - nodepools/status + - redpandaroles/status + - redpandas/status + - schemas/status + - shadowlinks/status + - stretchclusters/status + - topics/status + - users/status + verbs: + - get + - patch + - update +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - monitoring.coreos.com + resources: + - podmonitors + - servicemonitors + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - multicluster.x-k8s.io + resources: + - serviceexports + - serviceimports + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterrolebindings + - clusterroles + - rolebindings + - roles + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + annotations: {} + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: operator + app.kubernetes.io/version: v26.1.1 + helm.sh/chart: operator-26.1.1 + name: operator-additional-controllers-default +rules: +- apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - patch +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +- apiGroups: + - "" + resources: + - persistentvolumeclaims + verbs: + - delete + - get + - list + - watch +- apiGroups: + - "" + resources: + - pods + - secrets + verbs: + - get + - list + - watch +- apiGroups: + - apps + resources: + - statefulsets + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - configmaps + - nodes + - secrets + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - delete + - get + - list + - patch + - update +- apiGroups: + - cluster.redpanda.com + resources: + - redpandas + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - persistentvolumeclaims + verbs: + - delete + - get + - list + - patch + - update +- apiGroups: + - "" + resources: + - configmaps + - pods + - secrets + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - persistentvolumeclaims + verbs: + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - patch + - update + - watch +- apiGroups: + - apps + resources: + - statefulsets/status + verbs: + - patch + - update +- apiGroups: + - cluster.redpanda.com + resources: + - redpandas + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - patch + - watch +- apiGroups: + - "" + resources: + - persistentvolumeclaims + - pods + verbs: + - delete + - get + - list + - watch +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + annotations: {} + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: operator + app.kubernetes.io/version: v26.1.1 + helm.sh/chart: operator-26.1.1 + name: operator-default +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: operator-default +subjects: +- kind: ServiceAccount + name: operator + namespace: default +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + annotations: {} + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: operator + app.kubernetes.io/version: v26.1.1 + helm.sh/chart: operator-26.1.1 + name: operator-additional-controllers-default +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: operator-additional-controllers-default +subjects: +- kind: ServiceAccount + name: operator + namespace: default +--- +# Source: operator/templates/entry-point.yaml +apiVersion: v1 +kind: Service +metadata: + annotations: null + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: operator + app.kubernetes.io/version: v26.1.1 + helm.sh/chart: operator-26.1.1 + name: operator-metrics-service + namespace: default +spec: + ports: + - name: https + port: 8443 + targetPort: https + selector: + app.kubernetes.io/instance: operator + app.kubernetes.io/name: operator +--- +# Source: operator/templates/entry-point.yaml +apiVersion: v1 +kind: Service +metadata: + annotations: + service.cilium.io/global: "true" + service.cilium.io/shared: "false" + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: operator + app.kubernetes.io/version: v26.1.1 + helm.sh/chart: operator-26.1.1 + name: operator + namespace: default +spec: + ports: + - name: raft + port: 9443 + protocol: TCP + targetPort: 9443 + publishNotReadyAddresses: true + selector: + app.kubernetes.io/instance: operator + app.kubernetes.io/name: operator + type: ClusterIP +--- +# Source: operator/templates/entry-point.yaml +apiVersion: v1 +kind: Service +metadata: + annotations: + service.cilium.io/affinity: west + service.cilium.io/global: "true" + service.cilium.io/shared: "false" + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: operator + app.kubernetes.io/version: v26.1.1 + helm.sh/chart: operator-26.1.1 + name: west + namespace: default +spec: + ports: + - name: raft + port: 9443 + protocol: TCP + targetPort: 9443 + type: ClusterIP +--- +# Source: operator/templates/entry-point.yaml +apiVersion: v1 +kind: Service +metadata: + annotations: + service.cilium.io/global: "true" + service.cilium.io/shared: "false" + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: operator + app.kubernetes.io/version: v26.1.1 + helm.sh/chart: operator-26.1.1 + name: east + namespace: default +spec: + ports: + - name: raft + port: 9443 + protocol: TCP + targetPort: 9443 + type: ClusterIP +--- +# Source: operator/templates/entry-point.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: null + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: operator + app.kubernetes.io/version: v26.1.1 + helm.sh/chart: operator-26.1.1 + name: operator + namespace: default +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/instance: operator + app.kubernetes.io/name: operator + strategy: + type: RollingUpdate + template: + metadata: + annotations: {} + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/name: operator + spec: + automountServiceAccountToken: false + containers: + - args: + - multicluster + - --base-image=docker.redpanda.com/redpandadata/redpanda-operator + - --base-tag=v26.1.1 + - --ca-file=/tls/ca.crt + - --certificate-file=/tls/tls.crt + - --health-probe-bind-address=:8081 + - --kubeconfig-name=operator + - --kubeconfig-namespace=default + - --kubernetes-api-address=https://dns.address.for.my.kubernetes.api.server:8080 + - --log-level=info + - --metrics-bind-address=:8443 + - --name=blue + - --private-key-file=/tls/tls.key + - --raft-address=0.0.0.0:9443 + - --peer=blue://blue.example.com:9443 + - --peer=west://west.example.com:9443 + - --peer=east://east.example.com:9443 + command: + - /redpanda-operator + env: [] + image: docker.redpanda.com/redpandadata/redpanda-operator:v26.1.1 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /healthz/ + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + name: manager + ports: + - containerPort: 9443 + name: webhook-server + protocol: TCP + - containerPort: 8443 + name: https + protocol: TCP + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + resources: {} + securityContext: + allowPrivilegeEscalation: false + volumeMounts: + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: kube-api-access + readOnly: true + - mountPath: /tls + name: operator-multicluster-certificates + readOnly: true + ephemeralContainers: null + imagePullSecrets: [] + initContainers: [] + nodeSelector: {} + securityContext: + runAsUser: 65532 + serviceAccountName: operator + terminationGracePeriodSeconds: 10 + tolerations: [] + volumes: + - name: kube-api-access + projected: + defaultMode: 420 + sources: + - serviceAccountToken: + expirationSeconds: 3607 + path: token + - configMap: + items: + - key: ca.crt + path: ca.crt + name: kube-root-ca.crt + - downwardAPI: + items: + - fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + path: namespace + - name: operator-multicluster-certificates + secret: + items: + - key: tls.crt + path: tls.crt + - key: tls.key + path: tls.key + - key: ca.crt + path: ca.crt + secretName: operator-multicluster-certificates +--- +# Source: operator/templates/entry-point.yaml +apiVersion: v1 +automountServiceAccountToken: false +kind: ServiceAccount +metadata: + annotations: + helm.sh/hook: pre-install,pre-upgrade + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded,hook-failed + helm.sh/hook-weight: "-10" + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: operator + app.kubernetes.io/version: v26.1.1 + helm.sh/chart: operator-26.1.1 + name: operator-crd-job + namespace: default +--- +# Source: operator/templates/entry-point.yaml +apiVersion: v1 +automountServiceAccountToken: false +kind: ServiceAccount +metadata: + annotations: + helm.sh/hook: post-upgrade + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded,hook-failed + helm.sh/hook-weight: "-10" + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: operator + app.kubernetes.io/version: v26.1.1 + helm.sh/chart: operator-26.1.1 + name: operator-migration-job + namespace: default +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + annotations: + helm.sh/hook: pre-install,pre-upgrade + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded,hook-failed + helm.sh/hook-weight: "-10" + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: operator + app.kubernetes.io/version: v26.1.1 + helm.sh/chart: operator-26.1.1 + name: operator-crd-job-default +rules: +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - create + - get + - patch + - update +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + annotations: + helm.sh/hook: post-upgrade + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded,hook-failed + helm.sh/hook-weight: "-10" + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: operator + app.kubernetes.io/version: v26.1.1 + helm.sh/chart: operator-26.1.1 + name: operator-migration-job-default +rules: +- apiGroups: + - "" + resources: + - configmaps + - secrets + - serviceaccounts + - services + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - list + - watch +- apiGroups: + - apps + resources: + - deployments + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - autoscaling + resources: + - horizontalpodautoscalers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - cluster.redpanda.com + resources: + - consoles + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - cluster.redpanda.com + resources: + - consoles/status + verbs: + - get + - patch + - update +- apiGroups: + - monitoring.coreos.com + resources: + - servicemonitors + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create +- apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create +- apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - secrets + - serviceaccounts + verbs: + - create + - get + - patch + - update +- apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - patch + - watch +- apiGroups: + - "" + resources: + - persistentvolumeclaims + - pods + verbs: + - delete + - get + - list + - watch +- apiGroups: + - "" + resources: + - nodes + verbs: + - get +- apiGroups: + - "" + resources: + - configmaps + - endpoints + - events + - limitranges + - persistentvolumeclaims + - pods + - pods/log + - replicationcontrollers + - resourcequotas + - serviceaccounts + - services + verbs: + - get + - list +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - configmaps + - endpoints + - pods + - secrets + - serviceaccounts + - services + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - list + - watch +- apiGroups: + - apps + resources: + - controllerrevisions + verbs: + - get + - list + - watch +- apiGroups: + - apps + resources: + - deployments + - statefulsets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - autoscaling + resources: + - horizontalpodautoscalers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - batch + resources: + - jobs + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - cert-manager.io + resources: + - certificates + - issuers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - cluster.redpanda.com + resources: + - consoles + - nodepools + - redpandas + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - cluster.redpanda.com + resources: + - groups + - redpandaroles + - schemas + - shadowlinks + - stretchclusters + - topics + - users + verbs: + - get + - list + - patch + - update + - watch +- apiGroups: + - cluster.redpanda.com + resources: + - groups/finalizers + - nodepools/finalizers + - redpandaroles/finalizers + - redpandas/finalizers + - schemas/finalizers + - shadowlinks/finalizers + - stretchclusters/finalizers + - topics/finalizers + - users/finalizers + verbs: + - update +- apiGroups: + - cluster.redpanda.com + resources: + - groups/status + - nodepools/status + - redpandaroles/status + - redpandas/status + - schemas/status + - shadowlinks/status + - stretchclusters/status + - topics/status + - users/status + verbs: + - get + - patch + - update +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - monitoring.coreos.com + resources: + - podmonitors + - servicemonitors + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - multicluster.x-k8s.io + resources: + - serviceexports + - serviceimports + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterrolebindings + - clusterroles + - rolebindings + - roles + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + annotations: + helm.sh/hook: pre-install,pre-upgrade + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded,hook-failed + helm.sh/hook-weight: "-10" + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: operator + app.kubernetes.io/version: v26.1.1 + helm.sh/chart: operator-26.1.1 + name: operator-crd-job-default +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: operator-crd-job-default +subjects: +- kind: ServiceAccount + name: operator-crd-job + namespace: default +--- +# Source: operator/templates/entry-point.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + annotations: + helm.sh/hook: post-upgrade + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded,hook-failed + helm.sh/hook-weight: "-10" + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: operator + app.kubernetes.io/version: v26.1.1 + helm.sh/chart: operator-26.1.1 + name: operator-migration-job-default +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: operator-migration-job-default +subjects: +- kind: ServiceAccount + name: operator-migration-job + namespace: default +--- +# Source: operator/templates/entry-point.yaml +apiVersion: batch/v1 +kind: Job +metadata: + annotations: + helm.sh/hook: pre-install,pre-upgrade + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded,hook-failed + helm.sh/hook-weight: "-5" + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: operator + app.kubernetes.io/version: v26.1.1 + helm.sh/chart: operator-26.1.1 + name: operator-crds + namespace: default +spec: + template: + metadata: + annotations: {} + labels: + app.kubernetes.io/instance: operator + app.kubernetes.io/name: operator + spec: + automountServiceAccountToken: false + containers: + - args: + - crd + - --multicluster + command: + - /redpanda-operator + image: docker.redpanda.com/redpandadata/redpanda-operator:v26.1.1 + imagePullPolicy: IfNotPresent + name: crd-installation + resources: {} + securityContext: + allowPrivilegeEscalation: false + volumeMounts: + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: kube-api-access + readOnly: true + imagePullSecrets: [] + nodeSelector: {} + restartPolicy: OnFailure + serviceAccountName: operator-crd-job + terminationGracePeriodSeconds: 10 + tolerations: [] + volumes: + - name: kube-api-access + projected: + defaultMode: 420 + sources: + - serviceAccountToken: + expirationSeconds: 3607 + path: token + - configMap: + items: + - key: ca.crt + path: ca.crt + name: kube-root-ca.crt + - downwardAPI: + items: + - fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + path: namespace +--- +# Source: operator/templates/entry-point.yaml +apiVersion: batch/v1 +kind: Job metadata: annotations: helm.sh/hook: post-upgrade diff --git a/operator/chart/testdata/template-cases.txtar b/operator/chart/testdata/template-cases.txtar index 599602f33..6cdb0489d 100644 --- a/operator/chart/testdata/template-cases.txtar +++ b/operator/chart/testdata/template-cases.txtar @@ -136,3 +136,64 @@ multicluster: enterprise: licenseSecretRef: name: my-secret + +-- multicluster service mesh -- +crds: + enabled: true +multicluster: + enabled: true + name: blue + apiServerExternalAddress: https://dns.address.for.my.kubernetes.api.server:8080 + peers: + - name: blue + address: blue.example.com + - name: west + address: west.example.com + annotations: + service.cilium.io/affinity: west + - name: east + address: east.example.com + service: + enabled: true + type: ClusterIP + mesh: true + annotations: + service.cilium.io/global: "true" + service.cilium.io/shared: "false" + +-- multicluster service mcs -- +crds: + enabled: true +multicluster: + enabled: true + name: blue + apiServerExternalAddress: https://dns.address.for.my.kubernetes.api.server:8080 + peers: + - name: blue + address: blue.example.com + - name: west + address: west.example.com + - name: east + address: east.example.com + service: + enabled: true + type: ClusterIP + mcs: true + +-- multicluster service loadbalancer -- +crds: + enabled: true +multicluster: + enabled: true + name: blue + apiServerExternalAddress: https://dns.address.for.my.kubernetes.api.server:8080 + peers: + - name: blue + address: blue.example.com + - name: west + address: west.example.com + service: + enabled: true + type: LoadBalancer + annotations: + service.beta.kubernetes.io/aws-load-balancer-type: nlb diff --git a/operator/chart/values.go b/operator/chart/values.go index 9f5b8832c..3e697e920 100644 --- a/operator/chart/values.go +++ b/operator/chart/values.go @@ -13,6 +13,8 @@ package operator import ( _ "embed" + "github.com/invopop/jsonschema" + orderedmap "github.com/wk8/go-ordered-map/v2" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" ) @@ -26,18 +28,72 @@ var ( ) type Peer struct { - Name string `json:"name,omitempty" jsonschema:"required"` - Address string `json:"address,omitempty" jsonschema:"required"` - AdditionalAnnotation map[string]string `json:"additionalAnnotation,omitempty"` - SelectorOverwrite map[string]string `json:"selectorOverwrite,omitempty"` + Name string `json:"name,omitempty" jsonschema:"required"` + Address string `json:"address,omitempty" jsonschema:"required"` + // Annotations are merged on top of Multicluster.Service.Annotations + // on the peer placeholder Service rendered for this peer. + Annotations map[string]string `json:"annotations,omitempty"` +} + +// MulticlusterService configures an optional peer-facing Service in +// front of the operator Deployment. The chart does not attempt to +// manage flat-network pod-IP endpoint syncing — that has a chicken-and- +// egg problem where operators must be reachable before they can learn +// peer addresses — so only Service-level exposure is supported here. +type MulticlusterService struct { + // Enabled renders a per-operator Service alongside the Deployment. + // Defaults to false so existing installs that provision their own + // Service keep working unchanged. + Enabled bool `json:"enabled"` + // Type is the Kubernetes Service type for the local operator + // Service. Only ClusterIP and LoadBalancer are supported — ClusterIP + // for in-cluster mesh routing (Cilium ClusterMesh, Submariner, + // Istio, MCS) and LoadBalancer for cross-cloud deployments without + // a service mesh. Headless (ClusterIP=None) is intentionally not + // offered: the operator runs as a single-replica Deployment, so + // per-pod DNS is useless and would actively misroute when multiple + // replicas exist. Only the local Service is load-balanced; peer + // placeholders (see Mesh) are always ClusterIP since they carry no + // endpoints. + Type corev1.ServiceType `json:"type" jsonschema:"pattern=^(ClusterIP|LoadBalancer)$"` + // Annotations are merged onto the generated Service metadata. Use + // them to inject mesh-specific hints or cloud LB tuning — for + // example, {"service.cilium.io/global":"true"} for Cilium ClusterMesh + // or {"service.beta.kubernetes.io/aws-load-balancer-type":"nlb"} + // for AWS NLB. + Annotations map[string]string `json:"annotations"` + // Mesh renders one selectorless ClusterIP "placeholder" Service per + // remote peer so a mesh like Cilium ClusterMesh that merges Services + // by name has something to merge into on every cluster. Without the + // placeholder, DNS lookups for `..svc.cluster.local` on + // the local cluster return NXDOMAIN. Mutually exclusive with MCS. + Mesh bool `json:"mesh"` + // MCS additionally renders a Multi-Cluster Services ServiceExport + // for the local Service and one ServiceImport per remote peer, so + // the operator becomes reachable at + // `..svc.clusterset.local` via a compliant MCS + // controller (Submariner Lighthouse, GKE MCS, …). Mutually + // exclusive with Mesh. + MCS bool `json:"mcs"` +} + +// +gotohelm:ignore=true +func (MulticlusterService) JSONSchemaExtend(schema *jsonschema.Schema) { + props := orderedmap.New[string, *jsonschema.Schema]() + props.Set("mesh", &jsonschema.Schema{Const: true}) + props.Set("mcs", &jsonschema.Schema{Const: true}) + schema.Not = &jsonschema.Schema{ + Properties: props, + Required: []string{"mesh", "mcs"}, + } } type Multicluster struct { - Enabled bool `json:"enabled"` - ServicePerOperatorDeployment bool `json:"servicePerOperatorDeployment"` - Name string `json:"name"` - KubernetesAPIExternalAddress string `json:"apiServerExternalAddress"` - Peers []Peer `json:"peers"` + Enabled bool `json:"enabled"` + Service MulticlusterService `json:"service"` + Name string `json:"name"` + KubernetesAPIExternalAddress string `json:"apiServerExternalAddress"` + Peers []Peer `json:"peers"` } type Enterprise struct { diff --git a/operator/chart/values.schema.json b/operator/chart/values.schema.json index 02062f8b1..0d1c189a4 100644 --- a/operator/chart/values.schema.json +++ b/operator/chart/values.schema.json @@ -897,23 +897,17 @@ "items": { "additionalProperties": false, "properties": { - "additionalAnnotation": { - "additionalProperties": { - "type": "string" - }, - "type": "object" - }, "address": { "type": "string" }, - "name": { - "type": "string" - }, - "selectorOverwrite": { + "annotations": { "additionalProperties": { "type": "string" }, "type": "object" + }, + "name": { + "type": "string" } }, "required": [ @@ -929,8 +923,44 @@ } ] }, - "servicePerOperatorDeployment": { - "type": "boolean" + "service": { + "additionalProperties": false, + "not": { + "properties": { + "mcs": { + "const": true + }, + "mesh": { + "const": true + } + }, + "required": [ + "mesh", + "mcs" + ] + }, + "properties": { + "annotations": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + "enabled": { + "type": "boolean" + }, + "mcs": { + "type": "boolean" + }, + "mesh": { + "type": "boolean" + }, + "type": { + "pattern": "^(ClusterIP|LoadBalancer)$", + "type": "string" + } + }, + "type": "object" } }, "type": "object" diff --git a/operator/chart/values.yaml b/operator/chart/values.yaml index 3e92bb877..c32d0d2e8 100644 --- a/operator/chart/values.yaml +++ b/operator/chart/values.yaml @@ -201,5 +201,50 @@ multicluster: # peers follows the form: # - name: peer-1 # address: ip/dns of operator host + # # annotations (optional) are merged on top of + # # multicluster.service.annotations on the placeholder Service + # # rendered for this peer — useful for per-peer hints like + # # Cilium's service.cilium.io/affinity: . + # annotations: {} peers: [] - servicePerOperatorDeployment: false + # service controls whether the chart renders a peer-facing Service in + # front of the operator Deployment. Headless isn't offered — the + # operator runs single-replica, so per-pod DNS has no value here. + service: + # enabled turns the feature on. Defaults to false so existing + # installs that provision their own Service keep working unchanged. + enabled: false + # type is the Kubernetes Service type for the local operator + # Service. ClusterIP for in-cluster mesh routing (Cilium + # ClusterMesh, Submariner, Istio, MCS). LoadBalancer for cross- + # cloud deployments without a service mesh. Peer placeholders (see + # mesh) are always ClusterIP — they have no endpoints so a + # LoadBalancer would allocate cloud LBs for nothing. + type: ClusterIP + # annotations are merged onto the local operator Service and, if + # mesh=true, onto each peer placeholder Service. Use them to inject + # mesh-specific hints or cloud LB tuning. Examples: + # + # # Cilium ClusterMesh + # annotations: + # service.cilium.io/global: "true" + # service.cilium.io/shared: "false" + # + # # AWS NLB + # annotations: + # service.beta.kubernetes.io/aws-load-balancer-type: "nlb" + annotations: {} + # mesh renders a selectorless ClusterIP "placeholder" Service per + # remote peer so a mesh like Cilium ClusterMesh that merges + # Services by name has something to merge into on every cluster. + # Without the placeholder, local DNS lookups for + # ..svc.cluster.local return NXDOMAIN. Mutually exclusive + # with mcs. + mesh: false + # mcs additionally renders a Multi-Cluster Services ServiceExport + # for the local Service and one ServiceImport per remote peer, so + # the operator becomes reachable at + # ..svc.clusterset.local via a compliant MCS + # controller (Submariner Lighthouse, GKE MCS, …). Mutually + # exclusive with mesh. + mcs: false diff --git a/operator/chart/values_partial.gen.go b/operator/chart/values_partial.gen.go index fa10c0110..70f0b3ada 100644 --- a/operator/chart/values_partial.gen.go +++ b/operator/chart/values_partial.gen.go @@ -96,11 +96,11 @@ type PartialVectorizedControllers struct { } type PartialMulticluster struct { - Enabled *bool "json:\"enabled,omitempty\"" - ServicePerOperatorDeployment *bool "json:\"servicePerOperatorDeployment,omitempty\"" - Name *string "json:\"name,omitempty\"" - KubernetesAPIExternalAddress *string "json:\"apiServerExternalAddress,omitempty\"" - Peers []PartialPeer "json:\"peers,omitempty\"" + Enabled *bool "json:\"enabled,omitempty\"" + Service *PartialMulticlusterService "json:\"service,omitempty\"" + Name *string "json:\"name,omitempty\"" + KubernetesAPIExternalAddress *string "json:\"apiServerExternalAddress,omitempty\"" + Peers []PartialPeer "json:\"peers,omitempty\"" } type PartialEnterprise struct { @@ -129,14 +129,21 @@ type PartialLeaderElectionConfig struct { ResourceName *string "json:\"resourceName,omitempty\"" } +type PartialMulticlusterService struct { + Enabled *bool "json:\"enabled,omitempty\"" + Type *corev1.ServiceType "json:\"type,omitempty\" jsonschema:\"pattern=^(ClusterIP|LoadBalancer)$\"" + Annotations map[string]string "json:\"annotations,omitempty\"" + Mesh *bool "json:\"mesh,omitempty\"" + MCS *bool "json:\"mcs,omitempty\"" +} + type PartialMetadata struct { Labels map[string]string "json:\"labels,omitempty\"" Annotations map[string]string "json:\"annotations,omitempty\"" } type PartialPeer struct { - Name *string "json:\"name,omitempty\" jsonschema:\"required\"" - Address *string "json:\"address,omitempty\" jsonschema:\"required\"" - AdditionalAnnotation map[string]string "json:\"additionalAnnotation,omitempty\"" - SelectorOverwrite map[string]string "json:\"selectorOverwrite,omitempty\"" + Name *string "json:\"name,omitempty\" jsonschema:\"required\"" + Address *string "json:\"address,omitempty\" jsonschema:\"required\"" + Annotations map[string]string "json:\"annotations,omitempty\"" } diff --git a/operator/cmd/rpk-k8s/k8s/multicluster/bootstrap.go b/operator/cmd/rpk-k8s/k8s/multicluster/bootstrap.go index afffff668..ba0eac17b 100644 --- a/operator/cmd/rpk-k8s/k8s/multicluster/bootstrap.go +++ b/operator/cmd/rpk-k8s/k8s/multicluster/bootstrap.go @@ -14,6 +14,7 @@ import ( "fmt" "io" "strings" + "time" "github.com/spf13/cobra" @@ -24,12 +25,14 @@ import ( // It can be populated from CLI flags via the cobra command or set // programmatically for testing. type BootstrapConfig struct { - Connection ConnectionConfig - Organization string - DNSOverrides []string - TLS bool - Kubeconfigs bool - CreateNS bool + Connection ConnectionConfig + Organization string + DNSOverrides []string + TLS bool + Kubeconfigs bool + CreateNS bool + ProvisionLoadBalancers bool + LoadBalancerTimeout time.Duration } // Run executes the bootstrap operation. @@ -63,6 +66,33 @@ func (c *BootstrapConfig) Run(ctx context.Context, out io.Writer) error { RemoteClusters: remoteClusters, } + // Provision peer LoadBalancers BEFORE signing TLS certs so the + // external address each cluster publishes ends up in the cert's + // SAN list. Running this at the CLI layer (rather than through + // BootstrapClusterConfiguration.ProvisionLoadBalancers) keeps the + // interactive spinner out of the library and lets programmatic + // callers drive their own progress UI. + if c.ProvisionLoadBalancers { + lbCfg := bootstrap.PeerLoadBalancerConfig{ + ProvisionTimeout: c.LoadBalancerTimeout, + } + fmt.Fprintf(out, "Provisioning peer LoadBalancers for %d clusters...\n", len(remoteClusters)) + for i := range config.RemoteClusters { + cluster := config.RemoteClusters[i] + if cluster.ServiceAddress != "" { + fmt.Fprintf(out, " [%s] using provided address %s (skipping LoadBalancer)\n", + cluster.ContextName, cluster.ServiceAddress) + continue + } + address, err := provisionWithSpinner(ctx, out, cluster, config, lbCfg) + if err != nil { + return fmt.Errorf("provisioning LoadBalancer on %s: %w", cluster.ContextName, err) + } + config.RemoteClusters[i].ServiceAddress = address + } + fmt.Fprintln(out) + } + fmt.Fprintf(out, "Bootstrapping %d clusters...\n", len(remoteClusters)) if err := bootstrap.BootstrapKubernetesClusters(ctx, c.Organization, config); err != nil { @@ -70,9 +100,55 @@ func (c *BootstrapConfig) Run(ctx context.Context, out io.Writer) error { } fmt.Fprintln(out, "Bootstrap complete.") + + if c.ProvisionLoadBalancers { + printPeersBlock(out, config.RemoteClusters) + } return nil } +// provisionWithSpinner calls bootstrap.EnsurePeerLoadBalancer with an +// interactive spinner attached. On success the spinner is replaced with +// a "✓ ->
" line; on failure it's replaced with "✗". +func provisionWithSpinner( + ctx context.Context, + out io.Writer, + cluster bootstrap.RemoteConfiguration, + config bootstrap.BootstrapClusterConfiguration, + lbCfg bootstrap.PeerLoadBalancerConfig, +) (string, error) { + sp := newSpinner(out, fmt.Sprintf("[%s] waiting for LoadBalancer", cluster.ContextName)) + sp.Start() + + address, err := bootstrap.EnsurePeerLoadBalancer(ctx, cluster, config, lbCfg) + if err != nil { + sp.Stop(fmt.Sprintf("✗ [%s] %v", cluster.ContextName, err)) + return "", err + } + sp.Stop(fmt.Sprintf("✓ [%s] %s", cluster.ContextName, address)) + return address, nil +} + +// printPeersBlock writes a ready-to-paste helm peers block using the +// provisioned addresses. The block matches the shape the operator chart +// expects under multicluster.peers so the user can copy it straight into +// their values file. +func printPeersBlock(out io.Writer, clusters []bootstrap.RemoteConfiguration) { + fmt.Fprintln(out) + fmt.Fprintln(out, "Use these as multicluster.peers in your helm values:") + fmt.Fprintln(out) + fmt.Fprintln(out, " multicluster:") + fmt.Fprintln(out, " peers:") + for _, c := range clusters { + name := c.Name + if name == "" { + name = c.ContextName + } + fmt.Fprintf(out, " - name: %s\n", name) + fmt.Fprintf(out, " address: %s\n", c.ServiceAddress) + } +} + func bootstrapCommand() *cobra.Command { cfg := BootstrapConfig{ Organization: "Redpanda", @@ -93,7 +169,14 @@ secrets so that the multicluster operator can communicate across clusters. If --kubeconfig is provided, all contexts in the file are used automatically and --context flags are not required. If both are provided, only the specified -contexts from the kubeconfig file are used.`, +contexts from the kubeconfig file are used. + +--loadbalancer provisions a standalone LoadBalancer Service on each cluster +before signing certificates, waits for the cloud provider to assign an +external address, and bakes that address into the cert SANs. This resolves +the deploy/redeploy cycle that otherwise forces a first helm install just +to learn each cluster's external IP/hostname. The resulting peer list is +printed on success for pasting into helm values.`, Example: ` # Bootstrap all clusters from a kubeconfig file rpk k8s multicluster bootstrap \ --kubeconfig /path/to/kubeconfig \ @@ -119,6 +202,12 @@ contexts from the kubeconfig file are used.`, --dns-override cluster-b=cluster-b.example.com \ --namespace redpanda + # Provision LoadBalancer Services and use their addresses for cert SANs + rpk k8s multicluster bootstrap \ + --kubeconfig /path/to/kubeconfig \ + --namespace redpanda \ + --loadbalancer + # Bootstrap only TLS certificates rpk k8s multicluster bootstrap \ --kubeconfig /path/to/kubeconfig \ @@ -135,6 +224,8 @@ contexts from the kubeconfig file are used.`, cmd.Flags().BoolVar(&cfg.TLS, "tls", cfg.TLS, "Bootstrap TLS certificates") cmd.Flags().BoolVar(&cfg.Kubeconfigs, "kubeconfigs", cfg.Kubeconfigs, "Bootstrap kubeconfig secrets") cmd.Flags().BoolVar(&cfg.CreateNS, "create-namespace", cfg.CreateNS, "Create the namespace if it does not exist") + cmd.Flags().BoolVar(&cfg.ProvisionLoadBalancers, "loadbalancer", cfg.ProvisionLoadBalancers, "Provision a standalone LoadBalancer Service per cluster and use its external address for TLS SANs") + cmd.Flags().DurationVar(&cfg.LoadBalancerTimeout, "loadbalancer-timeout", 0, "Per-cluster timeout waiting for a LoadBalancer address (0 = default of 10m)") return cmd } diff --git a/operator/cmd/rpk-k8s/k8s/multicluster/checks/cluster_tls_san.go b/operator/cmd/rpk-k8s/k8s/multicluster/checks/cluster_tls_san.go index 799ceb021..fb3edf78e 100644 --- a/operator/cmd/rpk-k8s/k8s/multicluster/checks/cluster_tls_san.go +++ b/operator/cmd/rpk-k8s/k8s/multicluster/checks/cluster_tls_san.go @@ -12,28 +12,66 @@ package checks import ( "context" "fmt" + "net" "strings" ) -// TLSSANCheck re-validates TLS certificate SANs after raft status is available. -// This runs after both TLSCheck and RaftCheck have populated CheckContext. +// TLSSANCheck re-validates TLS certificate SANs after raft status and the +// Deployment have been inspected. It checks that the cert would accept a +// TLS handshake for the address this cluster publishes to its peers via +// the Deployment's --peer=://:9443 flag — delegating to +// crypto/x509.Certificate.VerifyHostname so DNS wildcards and IP SANs +// are handled the same way Go's TLS stack would handle them during an +// actual peer dial. type TLSSANCheck struct{} func (c *TLSSANCheck) Name() string { return "tls-san" } func (c *TLSSANCheck) Run(_ context.Context, cc *CheckContext) []Result { - if cc.TLSCert == nil || cc.RaftStatus == nil || cc.RaftStatus.Name == "" { + if cc.TLSCert == nil || cc.RaftStatus == nil || cc.RaftStatus.Name == "" || cc.DeployArgs == nil { return nil } - expectedName := cc.RaftStatus.Name - for _, dns := range cc.TLSCert.DNSNames { - if strings.Contains(dns, expectedName) { - return []Result{Pass(c.Name(), fmt.Sprintf("tls.crt SAN matches expected name %q", expectedName))} + + // Find the --peer entry that names this cluster itself. + selfName := cc.RaftStatus.Name + var selfAddr string + for _, p := range ExtractFlagAll(cc.DeployArgs, "--peer") { + // Peer format: name://host-or-ip:port + name, rest, ok := strings.Cut(p, "://") + if !ok || name != selfName { + continue + } + // Strip the port. SplitHostPort handles IPv6 brackets; fall + // back to the raw value if there's no port present. + if host, _, err := net.SplitHostPort(rest); err == nil { + selfAddr = host + } else { + selfAddr = rest } + break } + secretName := "" if cc.TLSSecret != nil { secretName = cc.TLSSecret.Name } - return []Result{Fail(c.Name(), fmt.Sprintf("secret %s: tls.crt SANs %v do not contain expected name %q", secretName, cc.TLSCert.DNSNames, expectedName))} + + if selfAddr == "" { + return []Result{Fail(c.Name(), fmt.Sprintf( + "no --peer=%s://... flag on Deployment — cannot determine expected SAN", + selfName, + ))} + } + + if err := cc.TLSCert.VerifyHostname(selfAddr); err != nil { + return []Result{Fail(c.Name(), fmt.Sprintf( + "secret %s: tls.crt does not authenticate peer address %q: %v (DNS SANs: %v, IP SANs: %v)", + secretName, selfAddr, err, cc.TLSCert.DNSNames, cc.TLSCert.IPAddresses, + ))} + } + + return []Result{Pass(c.Name(), fmt.Sprintf( + "tls.crt authenticates peer address %s (DNS SANs: %v, IP SANs: %v)", + selfAddr, cc.TLSCert.DNSNames, cc.TLSCert.IPAddresses, + ))} } diff --git a/operator/cmd/rpk-k8s/k8s/multicluster/spinner.go b/operator/cmd/rpk-k8s/k8s/multicluster/spinner.go new file mode 100644 index 000000000..531e2afaa --- /dev/null +++ b/operator/cmd/rpk-k8s/k8s/multicluster/spinner.go @@ -0,0 +1,110 @@ +// Copyright 2026 Redpanda Data, Inc. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.md +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0 + +package multicluster + +import ( + "fmt" + "io" + "os" + "sync" + "time" + + "github.com/mattn/go-isatty" +) + +// spinner renders a minimal progress indicator for long-running +// interactive operations (LoadBalancer provisioning). When the output +// isn't a TTY it falls back to plain-text lines so log capture stays +// readable. Safe to UpdateMessage from any goroutine; all writes go +// through the embedded mutex. +type spinner struct { + out io.Writer + isTTY bool + msg string + mu sync.Mutex + stop chan struct{} + done chan struct{} + startedAt time.Time +} + +// newSpinner constructs a spinner bound to out. Call Start to begin +// animating and Stop(finalMsg) to tear it down and emit a final line. +func newSpinner(out io.Writer, msg string) *spinner { + s := &spinner{ + out: out, + msg: msg, + stop: make(chan struct{}), + done: make(chan struct{}), + } + // Only animate when out is our own stdout AND stdout is a TTY. + // Every other case (redirected, piped, tests with bytes.Buffer) + // renders plain-text events. + if f, ok := out.(*os.File); ok && f == os.Stdout && isatty.IsTerminal(f.Fd()) { + s.isTTY = true + } + return s +} + +// Start begins animating. Noop-idempotent guard omitted intentionally — +// callers pair Start/Stop once. +func (s *spinner) Start() { + s.startedAt = time.Now() + if !s.isTTY { + fmt.Fprintf(s.out, " %s...\n", s.msg) + close(s.done) + return + } + go s.run() +} + +// UpdateMessage changes the text shown next to the spinner. Effective +// on the next tick (~120ms). +func (s *spinner) UpdateMessage(msg string) { + s.mu.Lock() + s.msg = msg + s.mu.Unlock() +} + +// Stop halts animation, clears the current line, and prints finalMsg +// on its own line. finalMsg should include any success/failure marker. +func (s *spinner) Stop(finalMsg string) { + if s.isTTY { + close(s.stop) + } + <-s.done + if s.isTTY { + fmt.Fprintf(s.out, "\r\033[K%s\n", finalMsg) + } else { + fmt.Fprintf(s.out, " %s\n", finalMsg) + } +} + +func (s *spinner) run() { + defer close(s.done) + // Braille-dots cycle — renders the same in common monospace + // terminals (iTerm2, Alacritty, Ghostty, Terminal.app). + frames := []string{"⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"} + ticker := time.NewTicker(120 * time.Millisecond) + defer ticker.Stop() + i := 0 + for { + select { + case <-s.stop: + return + case <-ticker.C: + s.mu.Lock() + msg := s.msg + s.mu.Unlock() + elapsed := time.Since(s.startedAt).Round(time.Second) + fmt.Fprintf(s.out, "\r\033[K%s %s (%s)", frames[i%len(frames)], msg, elapsed) + i++ + } + } +} diff --git a/operator/go.mod b/operator/go.mod index c3256be88..258bd9f74 100644 --- a/operator/go.mod +++ b/operator/go.mod @@ -15,7 +15,9 @@ require ( github.com/go-logr/logr v1.4.3 github.com/google/gofuzz v1.2.0 github.com/imdario/mergo v0.3.16 + github.com/invopop/jsonschema v0.12.0 github.com/json-iterator/go v1.1.12 + github.com/mattn/go-isatty v0.0.20 github.com/moby/moby v24.0.7+incompatible github.com/moby/sys/mountinfo v0.7.2 github.com/onsi/ginkgo/v2 v2.27.2 @@ -47,6 +49,7 @@ require ( github.com/twmb/franz-go/pkg/kadm v1.17.2 github.com/twmb/franz-go/pkg/kmsg v1.12.0 github.com/twmb/franz-go/pkg/sr v1.7.0 + github.com/wk8/go-ordered-map/v2 v2.1.8 go.opentelemetry.io/otel v1.43.0 go.uber.org/zap v1.27.1 golang.org/x/crypto v0.49.0 @@ -209,7 +212,6 @@ require ( github.com/homeport/dyff v1.7.1 // indirect github.com/huandu/xstrings v1.5.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/invopop/jsonschema v0.12.0 // indirect github.com/itchyny/gojq v0.12.17 // indirect github.com/itchyny/timefmt-go v0.1.6 // indirect github.com/jcmturner/aescts/v2 v2.0.0 // indirect @@ -246,7 +248,6 @@ require ( github.com/mailru/easyjson v0.9.1 // indirect github.com/mattn/go-ciede2000 v0.0.0-20170301095244-782e8c62fec3 // indirect github.com/mattn/go-colorable v0.1.14 // indirect - github.com/mattn/go-isatty v0.0.20 // indirect github.com/mattn/go-runewidth v0.0.16 // indirect github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect github.com/mitchellh/copystructure v1.2.0 // indirect @@ -303,7 +304,6 @@ require ( github.com/twmb/franz-go/pkg/sasl/kerberos v1.1.0 // indirect github.com/twmb/tlscfg v1.2.1 // indirect github.com/virtuald/go-ordered-json v0.0.0-20170621173500-b18e6e673d74 // indirect - github.com/wk8/go-ordered-map/v2 v2.1.8 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/xlab/treeprint v1.2.0 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect diff --git a/operator/internal/controller/redpanda/multicluster_controller.go b/operator/internal/controller/redpanda/multicluster_controller.go index 4ba1b827e..0c0da81e3 100644 --- a/operator/internal/controller/redpanda/multicluster_controller.go +++ b/operator/internal/controller/redpanda/multicluster_controller.go @@ -15,6 +15,7 @@ import ( "fmt" "net" "reflect" + "sort" "strings" "time" @@ -427,7 +428,7 @@ func (r *MulticlusterReconciler) fetchInitialState(ctx context.Context, sc *redp // Populate pod endpoints on the cluster struct so the renderer can // produce Endpoints/EndpointSlices for flat network mode. - sccluster.PodEndpoints = pools.PodEndpoints() + sccluster.PodEndpoints = pools.PodEndpoints(ctx) return &stretchClusterReconciliationState{ cluster: sccluster, @@ -1034,6 +1035,12 @@ func (r *MulticlusterReconciler) reconcileLicense(ctx context.Context, state *st inUseFeatures = append(inUseFeatures, feature.Name) } } + // features.Features iterates a map under the hood; sort the + // resulting slice so repeated reconciles produce byte-identical + // status. Without this, the slice order flips on every write, + // bumps resourceVersion, and triggers a watch-driven hot + // reconcile loop on the StretchCluster controller. + sort.Strings(inUseFeatures) status := &redpandav1alpha2.RedpandaLicenseStatus{ InUseFeatures: inUseFeatures, diff --git a/operator/internal/controller/redpanda/redpanda_controller.go b/operator/internal/controller/redpanda/redpanda_controller.go index 267d3130c..35e808dcc 100644 --- a/operator/internal/controller/redpanda/redpanda_controller.go +++ b/operator/internal/controller/redpanda/redpanda_controller.go @@ -14,6 +14,7 @@ import ( "bytes" "context" "fmt" + "sort" "strings" "time" @@ -684,6 +685,7 @@ func (r *RedpandaReconciler) reconcileLicense(ctx context.Context, state *cluste inUseFeatures = append(inUseFeatures, feature.Name) } } + sort.Strings(inUseFeatures) status := &redpandav1alpha2.RedpandaLicenseStatus{ InUseFeatures: inUseFeatures, diff --git a/operator/internal/lifecycle/client.go b/operator/internal/lifecycle/client.go index 765ee78fa..f75158d09 100644 --- a/operator/internal/lifecycle/client.go +++ b/operator/internal/lifecycle/client.go @@ -378,19 +378,35 @@ func (r *ResourceClient[T, U]) FetchExistingAndDesiredPools(ctx context.Context, pools := NewPoolTracker(cluster.GetGeneration()) logger := log.FromContext(ctx) for _, clusterName := range r.clusterList(cluster) { + canonical := CanonicalClusterName(clusterName, r.manager.GetLocalClusterName) existingPools, err := r.fetchExistingPools(ctx, cluster, clusterName) if err != nil { return nil, fmt.Errorf("fetching existing pools: %w", err) } + totalPods := 0 + for _, ep := range existingPools { + totalPods += len(ep.pods) + } + logger.V(log.TraceLevel).Info( + "fetched existing pools", + "cluster", canonical, + "pools", len(existingPools), + "totalPods", totalPods, + ) desired, err := r.nodePoolRenderer.Render(ctx, cluster, clusterName) if err != nil { if clusterName != mcmanager.LocalCluster { - logger.Info("remote cluster unreachable during pool render, skipping", "cluster", CanonicalClusterName(clusterName, r.manager.GetLocalClusterName), "error", err) + logger.Info("remote cluster unreachable during pool render, skipping", "cluster", canonical, "error", err) continue } return nil, fmt.Errorf("constructing desired pools: %w", err) } + logger.V(log.TraceLevel).Info( + "rendered desired pools", + "cluster", canonical, + "desiredCount", len(desired), + ) wrapped := []*MulticlusterStatefulSet{} for _, set := range desired { @@ -624,33 +640,42 @@ func (r *ResourceClient[T, U]) DeleteAll(ctx context.Context, owner U) (bool, er // reconciliation is not blocked by an unreachable peer. // Errors on the local cluster are always propagated. func (r *ResourceClient[T, U]) fetchExistingPools(ctx context.Context, cluster U, clusterName string) ([]*poolWithOrdinals, error) { - logger := log.FromContext(ctx) + logger := log.FromContext(ctx).WithName("fetchExistingPools") + canonical := CanonicalClusterName(clusterName, r.manager.GetLocalClusterName) ctl, err := r.ctl(ctx, clusterName) if err != nil { if clusterName != mcmanager.LocalCluster { - logger.Info("remote cluster unreachable in fetchExistingPools, treating as empty", "cluster", CanonicalClusterName(clusterName, r.manager.GetLocalClusterName), "error", err) + logger.Info("remote cluster unreachable in fetchExistingPools, treating as empty", "cluster", canonical, "error", err) return nil, nil } return nil, err } - sets, err := kube.List[appsv1.StatefulSetList](ctx, ctl, cluster.GetNamespace(), client.MatchingLabels(r.ownershipResolver.GetOwnerLabels(cluster))) + ownerLabels := r.ownershipResolver.GetOwnerLabels(cluster) + sets, err := kube.List[appsv1.StatefulSetList](ctx, ctl, cluster.GetNamespace(), client.MatchingLabels(ownerLabels)) if err != nil { if clusterName != mcmanager.LocalCluster { - logger.Info("could not list StatefulSets on remote cluster in fetchExistingPools, treating as empty", "cluster", CanonicalClusterName(clusterName, r.manager.GetLocalClusterName), "error", err) + logger.Info("could not list StatefulSets on remote cluster in fetchExistingPools, treating as empty", "cluster", canonical, "error", err) return nil, nil } return nil, errors.Wrapf(err, "listing StatefulSets") } + logger.V(log.TraceLevel).Info( + "listed StatefulSets", + "cluster", canonical, + "ownerLabels", ownerLabels, + "setsFound", len(sets.Items), + ) expectedOwner, err := r.ownershipResolver.ResolveOwnerReference(ctx, cluster, clusterName, ctl) if err != nil { // If the cluster object doesn't exist on this cluster yet (e.g. during // initial rollout), there can't be any owned StatefulSets either. if apierrors.IsNotFound(err) { + logger.V(log.TraceLevel).Info("owner reference not found, returning empty pool list", "cluster", canonical) return nil, nil } if clusterName != mcmanager.LocalCluster { - logger.Info("could not resolve owner reference on remote cluster in fetchExistingPools, treating as empty", "cluster", CanonicalClusterName(clusterName, r.manager.GetLocalClusterName), "error", err) + logger.Info("could not resolve owner reference on remote cluster in fetchExistingPools, treating as empty", "cluster", canonical, "error", err) return nil, nil } return nil, errors.Wrapf(err, "resolving owner reference") @@ -658,6 +683,7 @@ func (r *ResourceClient[T, U]) fetchExistingPools(ctx context.Context, cluster U // swap cluster to correct one cluster = expectedOwner + totalBeforeFilter := len(sets.Items) i := 0 for _, set := range sets.Items { isOwned := slices.ContainsFunc(set.OwnerReferences, func(ref metav1.OwnerReference) bool { @@ -669,6 +695,13 @@ func (r *ResourceClient[T, U]) fetchExistingPools(ctx context.Context, cluster U } } sets.Items = sets.Items[:i] + logger.V(log.TraceLevel).Info( + "filtered StatefulSets by owner UID", + "cluster", canonical, + "expectedOwnerUID", cluster.GetUID(), + "beforeFilter", totalBeforeFilter, + "afterFilter", len(sets.Items), + ) existing := []*poolWithOrdinals{} for _, statefulSet := range sets.Items { diff --git a/operator/internal/lifecycle/pool.go b/operator/internal/lifecycle/pool.go index 84f389c29..e9e1c8bac 100644 --- a/operator/internal/lifecycle/pool.go +++ b/operator/internal/lifecycle/pool.go @@ -484,11 +484,26 @@ type PodEndpoint struct { // PodEndpoints returns the IP and cluster information for all existing pods // across all clusters. This is used by the controller to manage EndpointSlices // for cross-cluster per-pod Services in flat network mode. -func (p *PoolTracker) PodEndpoints() []PodEndpoint { +func (p *PoolTracker) PodEndpoints(ctx context.Context) []PodEndpoint { + logger := log.FromContext(ctx).WithName("PoolTracker.PodEndpoints") var endpoints []PodEndpoint + // Per-cluster tallies so we can tell at a glance which cluster is + // contributing zero pods to the rendered state — the usual cause of + // flat-mode cross-cluster Endpoints churn. + clusterPodCount := map[string]int{} + clusterReadyCount := map[string]int{} + clusterNoIPCount := map[string]int{} for _, pool := range p.existingPools { + clusterPodCount[pool.set.clusterName] += len(pool.pods) for _, pod := range pool.pods { if pod.pod.Status.PodIP == "" { + clusterNoIPCount[pool.set.clusterName]++ + logger.V(log.TraceLevel).Info( + "pod has no PodIP yet, skipping", + "cluster", pool.set.clusterName, + "pod", pod.pod.Name, + "phase", pod.pod.Status.Phase, + ) continue } ready := false @@ -498,6 +513,9 @@ func (p *PoolTracker) PodEndpoints() []PodEndpoint { break } } + if ready { + clusterReadyCount[pool.set.clusterName]++ + } endpoints = append(endpoints, PodEndpoint{ Name: pod.pod.Name, IP: pod.pod.Status.PodIP, @@ -506,6 +524,14 @@ func (p *PoolTracker) PodEndpoints() []PodEndpoint { }) } } + logger.V(log.TraceLevel).Info( + "gathered pod endpoints", + "total", len(endpoints), + "existingPools", len(p.existingPools), + "clusterPodCount", clusterPodCount, + "clusterReadyCount", clusterReadyCount, + "clusterNoIPCount", clusterNoIPCount, + ) return endpoints } diff --git a/operator/internal/lifecycle/stretch_cluster_ownership.go b/operator/internal/lifecycle/stretch_cluster_ownership.go index d4a5c7459..a9090ed0b 100644 --- a/operator/internal/lifecycle/stretch_cluster_ownership.go +++ b/operator/internal/lifecycle/stretch_cluster_ownership.go @@ -87,6 +87,15 @@ func (m *StretchClusterOwnershipResolver) ResolveOwnerReference(ctx context.Cont newOwner := NewStretchClusterWithPools(sc.DeepCopy(), owner.clusters, owner.NodePools...) newOwner.Kind = "StretchCluster" newOwner.APIVersion = redpandav1alpha2.GroupVersion.String() + // Preserve the cross-cluster pod endpoint view computed by the + // caller. NewStretchClusterWithPools rebuilds the wrapper around + // the freshly-fetched StretchCluster CR and only carries forward + // NodePools; without re-plumbing PodEndpoints here, SyncAll + // iterations that target peer clusters would render flat-mode + // per-pod Endpoints with an empty IP list, causing the Syncer + // to garbage collect the cross-cluster Endpoints/EndpointSlices + // on every reconcile cycle. + newOwner.PodEndpoints = owner.PodEndpoints return newOwner, nil } // Ensure GVK is set on the returned owner so that downstream callers diff --git a/operator/internal/lifecycle/stretch_cluster_simple_resources.go b/operator/internal/lifecycle/stretch_cluster_simple_resources.go index 5619e4a17..e55e4e91c 100644 --- a/operator/internal/lifecycle/stretch_cluster_simple_resources.go +++ b/operator/internal/lifecycle/stretch_cluster_simple_resources.go @@ -73,6 +73,9 @@ func (m *StretchClusterSimpleResourceRenderer) Render(ctx context.Context, clust if err != nil { return nil, errors.WithStack(err) } + // Thread the reconcile context so render helpers can emit contextual + // (trace/debug) logs tied to the invoking reconcile ID. + state.WithContext(ctx) // Pass pod endpoints for flat network Endpoints/EndpointSlice rendering. if len(cluster.PodEndpoints) > 0 { diff --git a/operator/internal/statuses/zz_generated_status.go b/operator/internal/statuses/zz_generated_status.go index e3fece7d0..c6553333b 100644 --- a/operator/internal/statuses/zz_generated_status.go +++ b/operator/internal/statuses/zz_generated_status.go @@ -712,9 +712,9 @@ func (s *ClusterStatus) StatusConditionConfigs(o client.Object) []*applymetav1.C func (s *ClusterStatus) getRateLimit(conditionType string) time.Duration { switch conditionType { case ClusterLicenseValid: - return time.Minute + return 5 * time.Minute case ClusterConfigurationApplied: - return time.Minute + return 5 * time.Minute } return 0 } @@ -1128,9 +1128,9 @@ func (s *StretchClusterStatus) StatusConditionConfigs(o client.Object) []*applym func (s *StretchClusterStatus) getRateLimit(conditionType string) time.Duration { switch conditionType { case StretchClusterLicenseValid: - return time.Minute + return 5 * time.Minute case StretchClusterConfigurationApplied: - return time.Minute + return 5 * time.Minute } return 0 } diff --git a/operator/multicluster/endpoints.go b/operator/multicluster/endpoints.go index 88dfd6f6d..1061b411a 100644 --- a/operator/multicluster/endpoints.go +++ b/operator/multicluster/endpoints.go @@ -11,6 +11,7 @@ package multicluster import ( "github.com/redpanda-data/common-go/kube" + "github.com/redpanda-data/common-go/otelutil/log" corev1 "k8s.io/api/core/v1" discoveryv1 "k8s.io/api/discovery/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -22,12 +23,26 @@ import ( // Endpoints object (for CoreDNS) and an EndpointSlice (for kube-proxy/mesh) // pointing to the actual pod IP. func perPodEndpoints(state *RenderState) []kube.Object { + logger := log.FromContext(state.Context()).WithName("perPodEndpoints") if !state.Spec().Networking.IsFlatNetwork() { + logger.V(log.TraceLevel).Info("not flat-network, returning no endpoints") return nil } if len(state.podEndpoints) == 0 { + // This is the failure mode that causes the cross-cluster + // Endpoints/EndpointSlices to be GC'd by the Syncer — the + // renderer produced nothing, so the Syncer sees existing objects + // as "not in desired state" and deletes them. If we see this log + // fire at reconcile boundaries, the upstream PoolTracker needs + // investigation. + logger.Info("flat-mode: podEndpoints is empty, rendering no per-pod Endpoints (Syncer will GC existing)") return nil } + logger.V(log.TraceLevel).Info( + "flat-mode: rendering per-pod Endpoints", + "podEndpointsCount", len(state.podEndpoints), + "pools", len(state.pools), + ) spec := state.Spec() ports := perPodServicePorts(spec) @@ -47,12 +62,17 @@ func perPodEndpoints(state *RenderState) []kube.Object { } } if !found { + logger.V(log.TraceLevel).Info( + "no PodEndpoint entry for per-pod service, skipping (its Endpoints will be GC'd)", + "svcName", svcName, + ) continue } objects = append(objects, endpointsForService(state, svcName, ep, ports)...) } } + logger.V(log.TraceLevel).Info("rendered per-pod endpoints", "objectCount", len(objects)) return objects } diff --git a/operator/multicluster/render_state.go b/operator/multicluster/render_state.go index c97e27341..49f835fad 100644 --- a/operator/multicluster/render_state.go +++ b/operator/multicluster/render_state.go @@ -51,12 +51,34 @@ type RenderState struct { client *kube.Ctl + // ctx carries reconciliation-scoped values (logger, trace span) so that + // render helpers can emit structured logs bound to the reconcile that + // invoked them. Set via WithContext(); nil is fine for unit tests and + // yields a background-context logger. + ctx context.Context + seedServers []string bootstrapUserSecret *corev1.Secret statefulSetPodLabels map[string]string statefulSetSelector map[string]string } +// WithContext stores the reconciliation context so render helpers can emit +// contextual logs. Safe to call after NewRenderState. +func (r *RenderState) WithContext(ctx context.Context) *RenderState { + r.ctx = ctx + return r +} + +// Context returns the stored reconciliation context, falling back to a +// background context if WithContext was never called (e.g. in tests). +func (r *RenderState) Context() context.Context { + if r.ctx == nil { + return context.Background() + } + return r.ctx +} + func seedServersFromNodePools(cluster *redpandav1alpha2.StretchCluster, pools []*redpandav1alpha2.NodePool) []string { // In MCS mode, use the clusterset.local domain so DNS resolves via the // MCS controller across cluster boundaries. diff --git a/operator/statuses.yaml b/operator/statuses.yaml index 647ca6a6f..027aa13ac 100644 --- a/operator/statuses.yaml +++ b/operator/statuses.yaml @@ -98,7 +98,7 @@ This reason is used with the "Healthy" condition when it evaluates to False because a cluster's health endpoint says the cluster is not healthy. - name: LicenseValid - rateLimit: time.Minute + rateLimit: 5 * time.Minute printerColumns: - name: License message: true @@ -146,7 +146,7 @@ This condition defaults to "False" with a reason of "NotReconciled" and must be set by a controller when it subsequently reconciles a cluster. - rateLimit: time.Minute + rateLimit: 5 * time.Minute reasons: - name: Applied message: Cluster configuration successfully applied @@ -272,7 +272,7 @@ This reason is used with the "Healthy" condition when it evaluates to False because a cluster's health endpoint says the cluster is not healthy. - name: LicenseValid - rateLimit: time.Minute + rateLimit: 5 * time.Minute printerColumns: - name: License message: true @@ -320,7 +320,7 @@ This condition defaults to "False" with a reason of "NotReconciled" and must be set by a controller when it subsequently reconciles a cluster. - rateLimit: time.Minute + rateLimit: 5 * time.Minute reasons: - name: Applied message: Cluster configuration successfully applied diff --git a/pkg/multicluster/bootstrap/bootstrapper.go b/pkg/multicluster/bootstrap/bootstrapper.go index b6c00521e..e28d10c5f 100644 --- a/pkg/multicluster/bootstrap/bootstrapper.go +++ b/pkg/multicluster/bootstrap/bootstrapper.go @@ -11,6 +11,7 @@ package bootstrap import ( "context" + "fmt" "strings" "k8s.io/client-go/rest" @@ -74,12 +75,43 @@ type BootstrapClusterConfiguration struct { BootstrapTLS bool BootstrapKubeconfigs bool EnsureNamespace bool - OperatorNamespace string - ServiceName string - RemoteClusters []RemoteConfiguration + // ProvisionLoadBalancers, if true, creates a standalone + // LoadBalancer Service per cluster BEFORE TLS certificates are + // signed, waits for an external address, and uses that address as + // each cluster's ServiceAddress for cert signing. Solves the + // deploy/redeploy cycle that otherwise forces callers to install + // the operator twice: once to provision the LB, then again with + // the now-known peer addresses baked into --peer flags and + // cert SANs. + ProvisionLoadBalancers bool + // LoadBalancer tunes the LoadBalancer provisioning behavior when + // ProvisionLoadBalancers is true. Ignored otherwise. + LoadBalancer PeerLoadBalancerConfig + OperatorNamespace string + ServiceName string + RemoteClusters []RemoteConfiguration } func BootstrapKubernetesClusters(ctx context.Context, organization string, configuration BootstrapClusterConfiguration) error { + if configuration.ProvisionLoadBalancers { + // Provision first and mutate in place so that downstream cert + // signing and the caller (which reads configuration back to + // emit the helm peers block) both see the resolved addresses. + for i := range configuration.RemoteClusters { + cluster := configuration.RemoteClusters[i] + if cluster.ServiceAddress != "" { + // Caller already supplied an address (e.g. via + // --dns-override) — trust it, don't reprovision. + continue + } + address, err := EnsurePeerLoadBalancer(ctx, cluster, configuration, configuration.LoadBalancer) + if err != nil { + return fmt.Errorf("provisioning peer LoadBalancer for %s: %w", cluster.ContextName, err) + } + configuration.RemoteClusters[i].ServiceAddress = address + } + } + caCertificate, err := GenerateCA(organization, "Root CA", nil) if err != nil { return err diff --git a/pkg/multicluster/bootstrap/loadbalancer.go b/pkg/multicluster/bootstrap/loadbalancer.go new file mode 100644 index 000000000..75f16da9e --- /dev/null +++ b/pkg/multicluster/bootstrap/loadbalancer.go @@ -0,0 +1,224 @@ +// Copyright 2026 Redpanda Data, Inc. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.md +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0 + +package bootstrap + +import ( + "context" + "errors" + "fmt" + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/wait" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" +) + +const ( + // PeerLoadBalancerPort is the port peers dial on another cluster's + // operator for raft gRPC. + PeerLoadBalancerPort = 9443 + + // peerLoadBalancerSuffix is the suffix appended to the operator's helm + // fullname to form the bootstrap-managed peer Service name. Kept + // distinct from the helm chart's own Service (which uses the + // fullname unsuffixed) so the two don't collide when + // multicluster.service.enabled is flipped on/off. + peerLoadBalancerSuffix = "-multicluster-peer" + + // defaultLoadBalancerProvisionTimeout is how long we wait for the + // cloud provider to assign an external IP/hostname to a newly + // created LoadBalancer Service before giving up. + defaultLoadBalancerProvisionTimeout = 10 * time.Minute + + // defaultLoadBalancerPollInterval is how frequently we re-read the + // Service to check for a provisioned ingress. + defaultLoadBalancerPollInterval = 5 * time.Second +) + +// LoadBalancerLabelKey marks Services created by the bootstrap so callers +// can identify them without inferring from the name. +const LoadBalancerLabelKey = "operator.redpanda.com/bootstrap-managed" + +// PeerLoadBalancerConfig tunes EnsurePeerLoadBalancer. Zero values use the +// defaults above. +type PeerLoadBalancerConfig struct { + // ProvisionTimeout bounds how long to wait for the cloud provider + // to assign an ingress address. + ProvisionTimeout time.Duration + + // PollInterval controls how often the Service is re-read while + // waiting for provisioning. + PollInterval time.Duration + + // Annotations is merged onto the Service at creation time (e.g. + // "service.beta.kubernetes.io/aws-load-balancer-type: nlb" for + // EKS NLB, or an internal-LB annotation for cloud-native private + // peering). Ignored when a Service already exists — we don't + // reshape an already-provisioned LB. + Annotations map[string]string +} + +// peerLoadBalancerName returns the Service name the bootstrap uses for a +// cluster's peer-facing LoadBalancer. Prefers the per-cluster helm fullname +// (cluster.Name) and falls back to the shared ServiceName so the result is +// stable regardless of whether the caller passes a per-cluster override. +func peerLoadBalancerName(cluster RemoteConfiguration, config BootstrapClusterConfiguration) string { + base := cluster.Name + if base == "" { + base = config.ServiceName + } + return base + peerLoadBalancerSuffix +} + +// EnsurePeerLoadBalancer creates (or reuses) a LoadBalancer Service that +// fronts the operator pod on a single remote cluster and waits for the +// cloud provider to assign an external address. The returned address is a +// hostname (AWS ELB) or an IP literal (Azure/GCP) — whichever the provider +// publishes. Callers feed this back into the operator's --peer list so +// peers dial a stable endpoint. +// +// The Service is labelled with LoadBalancerLabelKey so it's distinguishable +// from the helm-managed operator Service. The selector matches the +// operator pods that a helm install with release name == cluster.Name will +// later create, so the LB is provisioned before the pods exist and starts +// forwarding as soon as they come up. +// +// Idempotent — repeated calls with the same configuration reuse the +// existing Service and re-read the already-provisioned address. +func EnsurePeerLoadBalancer( + ctx context.Context, + cluster RemoteConfiguration, + config BootstrapClusterConfiguration, + lbConfig PeerLoadBalancerConfig, +) (string, error) { + restConfig, err := cluster.Config() + if err != nil { + return "", fmt.Errorf("getting REST config for %s: %w", cluster.ContextName, err) + } + + cl, err := client.New(restConfig, client.Options{}) + if err != nil { + return "", fmt.Errorf("initializing client for %s: %w", cluster.ContextName, err) + } + + if config.EnsureNamespace { + if err := EnsureNamespace(ctx, config.OperatorNamespace, cl); err != nil { + return "", fmt.Errorf("ensuring namespace exists on %s: %w", cluster.ContextName, err) + } + } + + instance := cluster.Name + if instance == "" { + instance = config.ServiceName + } + + name := peerLoadBalancerName(cluster, config) + svc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: config.OperatorNamespace, + }, + } + + if _, err := controllerutil.CreateOrUpdate(ctx, cl, svc, func() error { + // Labels are refreshed on every call so we don't orphan + // Services that were created before the label was introduced. + if svc.Labels == nil { + svc.Labels = map[string]string{} + } + svc.Labels[LoadBalancerLabelKey] = "true" + svc.Labels["app.kubernetes.io/name"] = config.ServiceName + svc.Labels["app.kubernetes.io/instance"] = instance + + // Only seed annotations on create — once the LB exists, the + // cloud provider may have rewritten annotations (e.g. AWS + // stamps the ELB ID), and we don't want to clobber those on + // a re-run. + if len(svc.Annotations) == 0 && len(lbConfig.Annotations) > 0 { + svc.Annotations = map[string]string{} + for k, v := range lbConfig.Annotations { + svc.Annotations[k] = v + } + } + + svc.Spec.Type = corev1.ServiceTypeLoadBalancer + svc.Spec.Selector = map[string]string{ + "app.kubernetes.io/name": config.ServiceName, + "app.kubernetes.io/instance": instance, + } + // Peer dials happen over TLS before pods report Ready (health + // checks run on the same port), so publish not-ready + // addresses: the LB becomes addressable as soon as the pod + // attaches rather than after readiness. + svc.Spec.PublishNotReadyAddresses = true + + // Reconcile the raft port. Preserve NodePort when one is + // already assigned so we don't cause the cloud provider to + // re-provision the LB on every run. + var nodePort int32 + for _, p := range svc.Spec.Ports { + if p.Name == "raft" { + nodePort = p.NodePort + break + } + } + svc.Spec.Ports = []corev1.ServicePort{{ + Name: "raft", + Protocol: corev1.ProtocolTCP, + Port: PeerLoadBalancerPort, + TargetPort: intstr.FromInt(PeerLoadBalancerPort), + NodePort: nodePort, + }} + return nil + }); err != nil { + return "", fmt.Errorf("creating/updating Service %s/%s on %s: %w", + config.OperatorNamespace, name, cluster.ContextName, err) + } + + provisionTimeout := lbConfig.ProvisionTimeout + if provisionTimeout == 0 { + provisionTimeout = defaultLoadBalancerProvisionTimeout + } + pollInterval := lbConfig.PollInterval + if pollInterval == 0 { + pollInterval = defaultLoadBalancerPollInterval + } + + var address string + err = wait.PollUntilContextTimeout(ctx, pollInterval, provisionTimeout, true, func(ctx context.Context) (bool, error) { + var current corev1.Service + if err := cl.Get(ctx, client.ObjectKeyFromObject(svc), ¤t); err != nil { + return false, fmt.Errorf("reading Service %s/%s: %w", + config.OperatorNamespace, name, err) + } + for _, ing := range current.Status.LoadBalancer.Ingress { + if ing.Hostname != "" { + address = ing.Hostname + return true, nil + } + if ing.IP != "" { + address = ing.IP + return true, nil + } + } + return false, nil + }) + if err != nil { + return "", fmt.Errorf("waiting for LoadBalancer ingress on %s (Service %s/%s): %w", + cluster.ContextName, config.OperatorNamespace, name, err) + } + if address == "" { + return "", errors.New("LoadBalancer reported ready but published no address") + } + return address, nil +}