diff --git a/bootstrap/api/v1beta1/condition_consts.go b/bootstrap/api/v1beta1/condition_consts.go index 15c7c6c5..3000fd21 100644 --- a/bootstrap/api/v1beta1/condition_consts.go +++ b/bootstrap/api/v1beta1/condition_consts.go @@ -16,7 +16,7 @@ limitations under the License. package v1beta1 -import clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" +import clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" // Conditions and condition Reasons for the KThreesConfig object. @@ -26,7 +26,7 @@ const ( // NOTE: When the DataSecret generation starts the process completes immediately and within the // same reconciliation, so the user will always see a transition from Wait to Generated without having // evidence that BootstrapSecret generation is started/in progress. - DataSecretAvailableCondition clusterv1.ConditionType = "DataSecretAvailable" + DataSecretAvailableCondition clusterv1beta1.ConditionType = "DataSecretAvailable" // WaitingForClusterInfrastructureReason (Severity=Info) document a bootstrap secret generation process // waiting for the cluster infrastructure to be ready. @@ -43,7 +43,7 @@ const ( // Deprecated: This has been deprecated in v1beta1 and will be removed in a future version. // Switch to WaitingForControlPlaneAvailableReason constant from the `sigs.k8s.io/cluster-api/api/v1beta1` // package. - WaitingForControlPlaneAvailableReason = clusterv1.WaitingForControlPlaneAvailableReason + WaitingForControlPlaneAvailableReason = clusterv1beta1.WaitingForControlPlaneAvailableReason // DataSecretGenerationFailedReason (Severity=Warning) documents a KThreesConfig controller detecting // an error while generating a data secret; those kind of errors are usually due to misconfigurations @@ -58,7 +58,7 @@ const ( // machine, if the cluster is not using a control plane ref object, if the certificates are not provided // by the users. // IMPORTANT: This condition won't be re-created after clusterctl move. - CertificatesAvailableCondition clusterv1.ConditionType = "CertificatesAvailable" + CertificatesAvailableCondition clusterv1beta1.ConditionType = "CertificatesAvailable" // CertificatesGenerationFailedReason (Severity=Warning) documents a KThreesConfig controller detecting // an error while generating certificates; those kind of errors are usually temporary and the controller diff --git a/bootstrap/api/v1beta1/kthreesconfig_types.go b/bootstrap/api/v1beta1/kthreesconfig_types.go index f9436f37..14cd46c0 100644 --- a/bootstrap/api/v1beta1/kthreesconfig_types.go +++ b/bootstrap/api/v1beta1/kthreesconfig_types.go @@ -18,7 +18,7 @@ package v1beta1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. @@ -173,7 +173,7 @@ type KThreesConfigStatus struct { // Conditions defines current service state of the KThreesConfig. // +optional - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1beta1.Conditions `json:"conditions,omitempty"` } // +kubebuilder:object:root=true @@ -188,11 +188,11 @@ type KThreesConfig struct { Status KThreesConfigStatus `json:"status,omitempty"` } -func (c *KThreesConfig) GetConditions() clusterv1.Conditions { +func (c *KThreesConfig) GetConditions() clusterv1beta1.Conditions { return c.Status.Conditions } -func (c *KThreesConfig) SetConditions(conditions clusterv1.Conditions) { +func (c *KThreesConfig) SetConditions(conditions clusterv1beta1.Conditions) { c.Status.Conditions = conditions } diff --git a/bootstrap/api/v1beta1/zz_generated.deepcopy.go b/bootstrap/api/v1beta1/zz_generated.deepcopy.go index 5b7c52d6..c76562bf 100644 --- a/bootstrap/api/v1beta1/zz_generated.deepcopy.go +++ b/bootstrap/api/v1beta1/zz_generated.deepcopy.go @@ -22,7 +22,7 @@ package v1beta1 import ( "k8s.io/apimachinery/pkg/runtime" - apiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" + corev1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -204,7 +204,7 @@ func (in *KThreesConfigStatus) DeepCopyInto(out *KThreesConfigStatus) { } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make(apiv1beta1.Conditions, len(*in)) + *out = make(corev1beta1.Conditions, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } diff --git a/bootstrap/api/v1beta2/condition_consts.go b/bootstrap/api/v1beta2/condition_consts.go index 187eeda0..e7b66629 100644 --- a/bootstrap/api/v1beta2/condition_consts.go +++ b/bootstrap/api/v1beta2/condition_consts.go @@ -16,7 +16,7 @@ limitations under the License. package v1beta2 -import clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" +import clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" // Conditions and condition Reasons for the KThreesConfig object. @@ -26,7 +26,7 @@ const ( // NOTE: When the DataSecret generation starts the process completes immediately and within the // same reconciliation, so the user will always see a transition from Wait to Generated without having // evidence that BootstrapSecret generation is started/in progress. - DataSecretAvailableCondition clusterv1.ConditionType = "DataSecretAvailable" + DataSecretAvailableCondition clusterv1beta1.ConditionType = "DataSecretAvailable" // WaitingForClusterInfrastructureReason (Severity=Info) document a bootstrap secret generation process // waiting for the cluster infrastructure to be ready. @@ -43,7 +43,7 @@ const ( // Deprecated: This has been deprecated in v1beta1 and will be removed in a future version. // Switch to WaitingForControlPlaneAvailableReason constant from the `sigs.k8s.io/cluster-api/api/v1beta1` // package. - WaitingForControlPlaneAvailableReason = clusterv1.WaitingForControlPlaneAvailableReason + WaitingForControlPlaneAvailableReason = clusterv1beta1.WaitingForControlPlaneAvailableReason // DataSecretGenerationFailedReason (Severity=Warning) documents a KThreesConfig controller detecting // an error while generating a data secret; those kind of errors are usually due to misconfigurations @@ -58,7 +58,7 @@ const ( // machine, if the cluster is not using a control plane ref object, if the certificates are not provided // by the users. // IMPORTANT: This condition won't be re-created after clusterctl move. - CertificatesAvailableCondition clusterv1.ConditionType = "CertificatesAvailable" + CertificatesAvailableCondition clusterv1beta1.ConditionType = "CertificatesAvailable" // CertificatesGenerationFailedReason (Severity=Warning) documents a KThreesConfig controller detecting // an error while generating certificates; those kind of errors are usually temporary and the controller diff --git a/bootstrap/api/v1beta2/kthreesconfig_types.go b/bootstrap/api/v1beta2/kthreesconfig_types.go index 9fb711d9..166bb1a7 100644 --- a/bootstrap/api/v1beta2/kthreesconfig_types.go +++ b/bootstrap/api/v1beta2/kthreesconfig_types.go @@ -18,7 +18,7 @@ package v1beta2 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. @@ -195,7 +195,36 @@ type KThreesConfigStatus struct { // Conditions defines current service state of the KThreesConfig. // +optional - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1beta1.Conditions `json:"conditions,omitempty"` + + // v1beta2 groups all the fields that will be added or modified in KThreesConfig's status with the V1Beta2 version. + // +optional + V1Beta2 *KThreesConfigV1Beta2Status `json:"v1beta2,omitempty"` + + // initialization provides observations of the KThreesConfig initialization process. + // NOTE: Fields in this struct are part of the Cluster API contract and are used to orchestrate initial Machine provisioning. + // +optional + Initialization KThreesConfigInitializationStatus `json:"initialization,omitempty,omitzero"` +} + +// KThreesConfigInitializationStatus provides observations of the KThreesConfig initialization process. +type KThreesConfigInitializationStatus struct { + // dataSecretCreated is true when the Machine's boostrap secret is created. + // NOTE: this field is part of the Cluster API contract, and it is used to orchestrate initial Machine provisioning. + // +optional + DataSecretCreated bool `json:"dataSecretCreated,omitempty"` +} + +// KThreesConfigV1Beta2Status groups all the fields that will be added or modified in KThreesConfigStatus with the V1Beta2 version. +// See https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/proposals/20240916-improve-status-in-CAPI-resources.md for more context. +type KThreesConfigV1Beta2Status struct { + // conditions represents the observations of a KThreesConfig's current state. + // Known condition types are Ready, Paused. + // +optional + // +listType=map + // +listMapKey=type + // +kubebuilder:validation:MaxItems=32 + Conditions []metav1.Condition `json:"conditions,omitempty"` } // +kubebuilder:object:root=true @@ -211,14 +240,30 @@ type KThreesConfig struct { Status KThreesConfigStatus `json:"status,omitempty"` } -func (c *KThreesConfig) GetConditions() clusterv1.Conditions { +func (c *KThreesConfig) GetConditions() clusterv1beta1.Conditions { return c.Status.Conditions } -func (c *KThreesConfig) SetConditions(conditions clusterv1.Conditions) { +func (c *KThreesConfig) SetConditions(conditions clusterv1beta1.Conditions) { c.Status.Conditions = conditions } +// GetV1Beta2Conditions returns the set of conditions for this object. +func (c *KThreesConfig) GetV1Beta2Conditions() []metav1.Condition { + if c.Status.V1Beta2 == nil { + return nil + } + return c.Status.V1Beta2.Conditions +} + +// SetV1Beta2Conditions sets conditions for an API object. +func (c *KThreesConfig) SetV1Beta2Conditions(conditions []metav1.Condition) { + if c.Status.V1Beta2 == nil { + c.Status.V1Beta2 = &KThreesConfigV1Beta2Status{} + } + c.Status.V1Beta2.Conditions = conditions +} + // +kubebuilder:object:root=true // KThreesConfigList contains a list of KThreesConfig. diff --git a/bootstrap/api/v1beta2/zz_generated.deepcopy.go b/bootstrap/api/v1beta2/zz_generated.deepcopy.go index d4bc8ca7..a9ecda51 100644 --- a/bootstrap/api/v1beta2/zz_generated.deepcopy.go +++ b/bootstrap/api/v1beta2/zz_generated.deepcopy.go @@ -22,7 +22,7 @@ package v1beta2 import ( "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. diff --git a/bootstrap/controllers/kthreesconfig_controller.go b/bootstrap/controllers/kthreesconfig_controller.go index 772db147..845d4d57 100644 --- a/bootstrap/controllers/kthreesconfig_controller.go +++ b/bootstrap/controllers/kthreesconfig_controller.go @@ -32,11 +32,13 @@ import ( "k8s.io/apimachinery/pkg/types" kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" bsutil "sigs.k8s.io/cluster-api/bootstrap/util" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/annotations" "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" //nolint:staticcheck "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/cluster-api/util/predicates" ctrl "sigs.k8s.io/controller-runtime" @@ -153,8 +155,8 @@ func (r *KThreesConfigReconciler) Reconcile(ctx context.Context, req ctrl.Reques defer func() { // always update the readyCondition; the summary is represented using the "1 of x completed" notation. - conditions.SetSummary(config, - conditions.WithConditions( + v1beta1conditions.SetSummary(config, + v1beta1conditions.WithConditions( bootstrapv1.DataSecretAvailableCondition, bootstrapv1.CertificatesAvailableCondition, ), @@ -175,16 +177,16 @@ func (r *KThreesConfigReconciler) Reconcile(ctx context.Context, req ctrl.Reques switch { // Wait for the infrastructure to be ready. - case !cluster.Status.InfrastructureReady: + case !ptr.Deref(cluster.Status.Initialization.InfrastructureProvisioned, false): log.Info("Cluster infrastructure is not ready, waiting") - conditions.MarkFalse(config, bootstrapv1.DataSecretAvailableCondition, bootstrapv1.WaitingForClusterInfrastructureReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(config, bootstrapv1.DataSecretAvailableCondition, bootstrapv1.WaitingForClusterInfrastructureReason, clusterv1beta1.ConditionSeverityInfo, "") return ctrl.Result{RequeueAfter: 1 * time.Minute}, nil // Reconcile status for machines that already have a secret reference, but our status isn't up to date. // This case solves the pivoting scenario (or a backup restore) which doesn't preserve the status subresource on objects. - case configOwner.DataSecretName() != nil && (!config.Status.Ready || config.Status.DataSecretName == nil): + case configOwner.DataSecretName() != nil && (!ptr.Deref(config.Status.Initialization.DataSecretCreated, false) || config.Status.DataSecretName == ""): config.Status.Ready = true config.Status.DataSecretName = configOwner.DataSecretName() - conditions.MarkTrue(config, bootstrapv1.DataSecretAvailableCondition) + v1beta1conditions.MarkTrue(config, bootstrapv1.DataSecretAvailableCondition) return ctrl.Result{}, nil // Status is ready means a config has been generated. case config.Status.Ready: @@ -193,7 +195,7 @@ func (r *KThreesConfigReconciler) Reconcile(ctx context.Context, req ctrl.Reques } // Note: can't use IsFalse here because we need to handle the absence of the condition as well as false. - if !conditions.IsTrue(cluster, clusterv1.ControlPlaneInitializedCondition) { + if !conditions.IsTrue(cluster, clusterv1.ClusterControlPlaneInitializedCondition) { return r.handleClusterNotInitialized(ctx, scope) } @@ -225,7 +227,7 @@ func (r *KThreesConfigReconciler) joinControlplane(ctx context.Context, scope *S tokn, err := token.Lookup(ctx, r.Client, client.ObjectKeyFromObject(scope.Cluster)) if err != nil { - conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableCondition, bootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) + v1beta1conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableCondition, bootstrapv1.DataSecretGenerationFailedReason, clusterv1beta1.ConditionSeverityWarning, err.Error()) return err } @@ -247,14 +249,14 @@ func (r *KThreesConfigReconciler) joinControlplane(ctx context.Context, scope *S files, err := r.resolveFiles(ctx, scope.Config) if err != nil { - conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableCondition, bootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) + v1beta1conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableCondition, bootstrapv1.DataSecretGenerationFailedReason, clusterv1beta1.ConditionSeverityWarning, err.Error()) return err } if scope.Config.Spec.IsEtcdEmbedded() { etcdProxyFile, err := r.resolveEtcdProxyFile(scope.Config) if err != nil { - conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableCondition, bootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) + v1beta1conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableCondition, bootstrapv1.DataSecretGenerationFailedReason, clusterv1beta1.ConditionSeverityWarning, err.Error()) return fmt.Errorf("failed to resolve etcd proxy file: %w", err) } @@ -298,7 +300,7 @@ func (r *KThreesConfigReconciler) joinWorker(ctx context.Context, scope *Scope) tokn, err := token.Lookup(ctx, r.Client, client.ObjectKeyFromObject(scope.Cluster)) if err != nil { - conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableCondition, bootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) + v1beta1conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableCondition, bootstrapv1.DataSecretGenerationFailedReason, clusterv1beta1.ConditionSeverityWarning, err.Error()) return err } @@ -318,7 +320,7 @@ func (r *KThreesConfigReconciler) joinWorker(ctx context.Context, scope *Scope) files, err := r.resolveFiles(ctx, scope.Config) if err != nil { - conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableCondition, bootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) + v1beta1conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableCondition, bootstrapv1.DataSecretGenerationFailedReason, clusterv1beta1.ConditionSeverityWarning, err.Error()) return err } @@ -421,8 +423,8 @@ func (r *KThreesConfigReconciler) handleClusterNotInitialized(ctx context.Contex // initialize the DataSecretAvailableCondition if missing. // this is required in order to avoid the condition's LastTransitionTime to flicker in case of errors surfacing // using the DataSecretGeneratedFailedReason - if conditions.GetReason(scope.Config, bootstrapv1.DataSecretAvailableCondition) != bootstrapv1.DataSecretGenerationFailedReason { - conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableCondition, clusterv1.WaitingForControlPlaneAvailableReason, clusterv1.ConditionSeverityInfo, "") + if v1beta1conditions.GetReason(scope.Config, bootstrapv1.DataSecretAvailableCondition) != bootstrapv1.DataSecretGenerationFailedReason { + v1beta1conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableCondition, clusterv1beta1.WaitingForControlPlaneAvailableReason, clusterv1beta1.ConditionSeverityInfo, "") } // if it's NOT a control plane machine, requeue @@ -465,10 +467,10 @@ func (r *KThreesConfigReconciler) handleClusterNotInitialized(ctx context.Contex *metav1.NewControllerRef(scope.Config, bootstrapv1.GroupVersion.WithKind("KThreesConfig")), ) if err != nil { - conditions.MarkFalse(scope.Config, bootstrapv1.CertificatesAvailableCondition, bootstrapv1.CertificatesGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) + v1beta1conditions.MarkFalse(scope.Config, bootstrapv1.CertificatesAvailableCondition, bootstrapv1.CertificatesGenerationFailedReason, clusterv1beta1.ConditionSeverityWarning, err.Error()) return ctrl.Result{}, err } - conditions.MarkTrue(scope.Config, bootstrapv1.CertificatesAvailableCondition) + v1beta1conditions.MarkTrue(scope.Config, bootstrapv1.CertificatesAvailableCondition) token, err := token.Lookup(ctx, r.Client, client.ObjectKeyFromObject(scope.Cluster)) if err != nil { @@ -497,14 +499,14 @@ func (r *KThreesConfigReconciler) handleClusterNotInitialized(ctx context.Contex files, err := r.resolveFiles(ctx, scope.Config) if err != nil { - conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableCondition, bootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) + v1beta1conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableCondition, bootstrapv1.DataSecretGenerationFailedReason, clusterv1beta1.ConditionSeverityWarning, err.Error()) return ctrl.Result{}, err } if scope.Config.Spec.IsEtcdEmbedded() { etcdProxyFile, err := r.resolveEtcdProxyFile(scope.Config) if err != nil { - conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableCondition, bootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) + v1beta1conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableCondition, bootstrapv1.DataSecretGenerationFailedReason, clusterv1beta1.ConditionSeverityWarning, err.Error()) return ctrl.Result{}, fmt.Errorf("failed to resolve etcd proxy file: %w", err) } files = append(files, *etcdProxyFile) @@ -590,7 +592,7 @@ func (r *KThreesConfigReconciler) storeBootstrapData(ctx context.Context, scope scope.Config.Status.DataSecretName = ptr.To[string](secret.Name) scope.Config.Status.Ready = true - conditions.MarkTrue(scope.Config, bootstrapv1.DataSecretAvailableCondition) + v1beta1conditions.MarkTrue(scope.Config, bootstrapv1.DataSecretAvailableCondition) return nil } @@ -598,8 +600,8 @@ func (r *KThreesConfigReconciler) reconcileTopLevelObjectSettings(_ *clusterv1.C log := r.Log.WithValues("kthreesconfig", fmt.Sprintf("%s/%s", config.Namespace, config.Name)) // If there are no Version settings defined in Config, use Version from machine, if defined - if config.Spec.Version == "" && machine.Spec.Version != nil { - config.Spec.Version = *machine.Spec.Version + if config.Spec.Version == "" && machine.Spec.Version != "" { + config.Spec.Version = machine.Spec.Version log.Info("Altering Config", "Version", config.Spec.Version) } } diff --git a/bootstrap/main.go b/bootstrap/main.go index 29aa35ba..17353bff 100644 --- a/bootstrap/main.go +++ b/bootstrap/main.go @@ -24,8 +24,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" - clusterv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" - expv1beta1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/log/zap" @@ -44,8 +43,7 @@ var ( func init() { _ = clientgoscheme.AddToScheme(scheme) - _ = clusterv1beta1.AddToScheme(scheme) - _ = expv1beta1.AddToScheme(scheme) + _ = clusterv1.AddToScheme(scheme) _ = bootstrapv1beta1.AddToScheme(scheme) _ = bootstrapv1.AddToScheme(scheme) // +kubebuilder:scaffold:scheme diff --git a/controlplane/api/v1beta1/condition_consts.go b/controlplane/api/v1beta1/condition_consts.go index a9d7c70e..f19b36e0 100644 --- a/controlplane/api/v1beta1/condition_consts.go +++ b/controlplane/api/v1beta1/condition_consts.go @@ -16,19 +16,19 @@ limitations under the License. package v1beta1 -import clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" +import clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" // Conditions and condition Reasons for the KThreesControlPlane object. const ( // MachinesReady reports an aggregate of current status of the machines controlled by the KThreesControlPlane. - MachinesReadyCondition clusterv1.ConditionType = "MachinesReady" + MachinesReadyCondition clusterv1beta1.ConditionType = "MachinesReady" ) const ( // CertificatesAvailableCondition documents that cluster certificates were generated as part of the // processing of a KThreesControlPlane object. - CertificatesAvailableCondition clusterv1.ConditionType = "CertificatesAvailable" + CertificatesAvailableCondition clusterv1beta1.ConditionType = "CertificatesAvailable" // CertificatesGenerationFailedReason (Severity=Warning) documents a KThreesControlPlane controller detecting // an error while generating certificates; those kind of errors are usually temporary and the controller @@ -39,7 +39,7 @@ const ( const ( // AvailableCondition documents that the first control plane instance has completed the server init operation // and so the control plane is available and an API server instance is ready for processing requests. - AvailableCondition clusterv1.ConditionType = "Available" + AvailableCondition clusterv1beta1.ConditionType = "Available" // WaitingForKthreesServerReason (Severity=Info) documents a KThreesControlPlane object waiting for the first // control plane instance to complete the k3s server operation. @@ -49,7 +49,7 @@ const ( const ( // MachinesSpecUpToDateCondition documents that the spec of the machines controlled by the KThreesControlPlane // is up to date. Whe this condition is false, the KThreesControlPlane is executing a rolling upgrade. - MachinesSpecUpToDateCondition clusterv1.ConditionType = "MachinesSpecUpToDate" + MachinesSpecUpToDateCondition clusterv1beta1.ConditionType = "MachinesSpecUpToDate" // RollingUpdateInProgressReason (Severity=Warning) documents a KThreesControlPlane object executing a // rolling upgrade for aligning the machines spec to the desired state. @@ -58,7 +58,7 @@ const ( const ( // ResizedCondition documents a KThreesControlPlane that is resizing the set of controlled machines. - ResizedCondition clusterv1.ConditionType = "Resized" + ResizedCondition clusterv1beta1.ConditionType = "Resized" // ScalingUpReason (Severity=Info) documents a KThreesControlPlane that is increasing the number of replicas. ScalingUpReason = "ScalingUp" @@ -69,7 +69,7 @@ const ( const ( // ControlPlaneComponentsHealthyCondition reports the overall status of the k3s server. - ControlPlaneComponentsHealthyCondition clusterv1.ConditionType = "ControlPlaneComponentsHealthy" + ControlPlaneComponentsHealthyCondition clusterv1beta1.ConditionType = "ControlPlaneComponentsHealthy" // ControlPlaneComponentsUnhealthyReason (Severity=Error) documents a control plane component not healthy. ControlPlaneComponentsUnhealthyReason = "ControlPlaneComponentsUnhealthy" @@ -81,7 +81,7 @@ const ( ControlPlaneComponentsInspectionFailedReason = "ControlPlaneComponentsInspectionFailed" // MachineAgentHealthyCondition reports a machine's k3s agent's operational status. - MachineAgentHealthyCondition clusterv1.ConditionType = "AgentHealthy" + MachineAgentHealthyCondition clusterv1beta1.ConditionType = "AgentHealthy" // PodProvisioningReason (Severity=Info) documents a pod waiting to be provisioned i.e., Pod is in "Pending" phase. PodProvisioningReason = "PodProvisioning" @@ -99,7 +99,7 @@ const ( const ( // EtcdClusterHealthyCondition documents the overall etcd cluster's health. - EtcdClusterHealthyCondition clusterv1.ConditionType = "EtcdClusterHealthyCondition" + EtcdClusterHealthyCondition clusterv1beta1.ConditionType = "EtcdClusterHealthyCondition" // EtcdClusterInspectionFailedReason documents a failure in inspecting the etcd cluster status. EtcdClusterInspectionFailedReason = "EtcdClusterInspectionFailed" @@ -112,7 +112,7 @@ const ( // MachineEtcdMemberHealthyCondition report the machine's etcd member's health status. // NOTE: This conditions exists only if a stacked etcd cluster is used. - MachineEtcdMemberHealthyCondition clusterv1.ConditionType = "EtcdMemberHealthy" + MachineEtcdMemberHealthyCondition clusterv1beta1.ConditionType = "EtcdMemberHealthy" // EtcdMemberInspectionFailedReason documents a failure in inspecting the etcd member status. EtcdMemberInspectionFailedReason = "MemberInspectionFailed" @@ -123,7 +123,7 @@ const ( const ( // TokenAvailableCondition documents whether the token required for nodes to join the cluster is available. - TokenAvailableCondition clusterv1.ConditionType = "TokenAvailable" + TokenAvailableCondition clusterv1beta1.ConditionType = "TokenAvailable" // TokenGenerationFailedReason documents that the token required for nodes to join the cluster could not be generated. TokenGenerationFailedReason = "TokenGenerationFailed" diff --git a/controlplane/api/v1beta1/kthreescontrolplane_types.go b/controlplane/api/v1beta1/kthreescontrolplane_types.go index 18ed34d4..2ad68aa2 100644 --- a/controlplane/api/v1beta1/kthreescontrolplane_types.go +++ b/controlplane/api/v1beta1/kthreescontrolplane_types.go @@ -21,7 +21,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" bootstrapv1beta1 "github.com/k3s-io/cluster-api-k3s/bootstrap/api/v1beta1" "github.com/k3s-io/cluster-api-k3s/pkg/errors" @@ -107,7 +107,7 @@ type KThreesControlPlaneMachineTemplate struct { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - ObjectMeta clusterv1.ObjectMeta `json:"metadata,omitempty"` + ObjectMeta clusterv1beta1.ObjectMeta `json:"metadata,omitempty"` } // RemediationStrategy allows to define how control plane machine remediation happens. @@ -212,7 +212,7 @@ type KThreesControlPlaneStatus struct { // Conditions defines current service state of the KThreesControlPlane. // +optional - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1beta1.Conditions `json:"conditions,omitempty"` // LastRemediation stores info about last remediation performed. // +optional @@ -254,11 +254,11 @@ type KThreesControlPlane struct { Status KThreesControlPlaneStatus `json:"status,omitempty"` } -func (in *KThreesControlPlane) GetConditions() clusterv1.Conditions { +func (in *KThreesControlPlane) GetConditions() clusterv1beta1.Conditions { return in.Status.Conditions } -func (in *KThreesControlPlane) SetConditions(conditions clusterv1.Conditions) { +func (in *KThreesControlPlane) SetConditions(conditions clusterv1beta1.Conditions) { in.Status.Conditions = conditions } diff --git a/controlplane/api/v1beta2/condition_consts.go b/controlplane/api/v1beta2/condition_consts.go index b181cf78..2cefe5b4 100644 --- a/controlplane/api/v1beta2/condition_consts.go +++ b/controlplane/api/v1beta2/condition_consts.go @@ -16,19 +16,19 @@ limitations under the License. package v1beta2 -import clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" +import clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" // Conditions and condition Reasons for the KThreesControlPlane object. const ( // MachinesReady reports an aggregate of current status of the machines controlled by the KThreesControlPlane. - MachinesReadyCondition clusterv1.ConditionType = "MachinesReady" + MachinesReadyCondition clusterv1beta1.ConditionType = "MachinesReady" ) const ( // CertificatesAvailableCondition documents that cluster certificates were generated as part of the // processing of a KThreesControlPlane object. - CertificatesAvailableCondition clusterv1.ConditionType = "CertificatesAvailable" + CertificatesAvailableCondition clusterv1beta1.ConditionType = "CertificatesAvailable" // CertificatesGenerationFailedReason (Severity=Warning) documents a KThreesControlPlane controller detecting // an error while generating certificates; those kind of errors are usually temporary and the controller @@ -39,7 +39,7 @@ const ( const ( // AvailableCondition documents that the first control plane instance has completed the server init operation // and so the control plane is available and an API server instance is ready for processing requests. - AvailableCondition clusterv1.ConditionType = "Available" + AvailableCondition clusterv1beta1.ConditionType = "Available" // WaitingForKthreesServerReason (Severity=Info) documents a KThreesControlPlane object waiting for the first // control plane instance to complete the k3s server operation. @@ -49,7 +49,7 @@ const ( const ( // MachinesSpecUpToDateCondition documents that the spec of the machines controlled by the KThreesControlPlane // is up to date. Whe this condition is false, the KThreesControlPlane is executing a rolling upgrade. - MachinesSpecUpToDateCondition clusterv1.ConditionType = "MachinesSpecUpToDate" + MachinesSpecUpToDateCondition clusterv1beta1.ConditionType = "MachinesSpecUpToDate" // RollingUpdateInProgressReason (Severity=Warning) documents a KThreesControlPlane object executing a // rolling upgrade for aligning the machines spec to the desired state. @@ -58,7 +58,7 @@ const ( const ( // ResizedCondition documents a KThreesControlPlane that is resizing the set of controlled machines. - ResizedCondition clusterv1.ConditionType = "Resized" + ResizedCondition clusterv1beta1.ConditionType = "Resized" // ScalingUpReason (Severity=Info) documents a KThreesControlPlane that is increasing the number of replicas. ScalingUpReason = "ScalingUp" @@ -69,7 +69,7 @@ const ( const ( // ControlPlaneComponentsHealthyCondition reports the overall status of the k3s server. - ControlPlaneComponentsHealthyCondition clusterv1.ConditionType = "ControlPlaneComponentsHealthy" + ControlPlaneComponentsHealthyCondition clusterv1beta1.ConditionType = "ControlPlaneComponentsHealthy" // ControlPlaneComponentsUnhealthyReason (Severity=Error) documents a control plane component not healthy. ControlPlaneComponentsUnhealthyReason = "ControlPlaneComponentsUnhealthy" @@ -81,7 +81,7 @@ const ( ControlPlaneComponentsInspectionFailedReason = "ControlPlaneComponentsInspectionFailed" // MachineAgentHealthyCondition reports a machine's k3s agent's operational status. - MachineAgentHealthyCondition clusterv1.ConditionType = "AgentHealthy" + MachineAgentHealthyCondition clusterv1beta1.ConditionType = "AgentHealthy" // PodProvisioningReason (Severity=Info) documents a pod waiting to be provisioned i.e., Pod is in "Pending" phase. PodProvisioningReason = "PodProvisioning" @@ -99,7 +99,7 @@ const ( const ( // EtcdClusterHealthyCondition documents the overall etcd cluster's health. - EtcdClusterHealthyCondition clusterv1.ConditionType = "EtcdClusterHealthyCondition" + EtcdClusterHealthyCondition clusterv1beta1.ConditionType = "EtcdClusterHealthyCondition" // EtcdClusterInspectionFailedReason documents a failure in inspecting the etcd cluster status. EtcdClusterInspectionFailedReason = "EtcdClusterInspectionFailed" @@ -112,7 +112,7 @@ const ( // MachineEtcdMemberHealthyCondition report the machine's etcd member's health status. // NOTE: This conditions exists only if a stacked etcd cluster is used. - MachineEtcdMemberHealthyCondition clusterv1.ConditionType = "EtcdMemberHealthy" + MachineEtcdMemberHealthyCondition clusterv1beta1.ConditionType = "EtcdMemberHealthy" // EtcdMemberInspectionFailedReason documents a failure in inspecting the etcd member status. EtcdMemberInspectionFailedReason = "MemberInspectionFailed" @@ -123,7 +123,7 @@ const ( const ( // TokenAvailableCondition documents whether the token required for nodes to join the cluster is available. - TokenAvailableCondition clusterv1.ConditionType = "TokenAvailable" + TokenAvailableCondition clusterv1beta1.ConditionType = "TokenAvailable" // TokenGenerationFailedReason documents that the token required for nodes to join the cluster could not be generated. TokenGenerationFailedReason = "TokenGenerationFailed" diff --git a/controlplane/api/v1beta2/kthreescontrolplane_types.go b/controlplane/api/v1beta2/kthreescontrolplane_types.go index 78ce9f88..5c29a017 100644 --- a/controlplane/api/v1beta2/kthreescontrolplane_types.go +++ b/controlplane/api/v1beta2/kthreescontrolplane_types.go @@ -21,7 +21,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" bootstrapv1beta2 "github.com/k3s-io/cluster-api-k3s/bootstrap/api/v1beta2" "github.com/k3s-io/cluster-api-k3s/pkg/errors" @@ -92,7 +92,7 @@ type KThreesControlPlaneMachineTemplate struct { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - ObjectMeta clusterv1.ObjectMeta `json:"metadata,omitempty"` + ObjectMeta clusterv1beta1.ObjectMeta `json:"metadata,omitempty"` // InfrastructureRef is a required reference to a custom resource // offered by an infrastructure provider. InfrastructureRef corev1.ObjectReference `json:"infrastructureRef"` @@ -219,11 +219,27 @@ type KThreesControlPlaneStatus struct { // Conditions defines current service state of the KThreesControlPlane. // +optional - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1beta1.Conditions `json:"conditions,omitempty"` // LastRemediation stores info about last remediation performed. // +optional LastRemediation *LastRemediationStatus `json:"lastRemediation,omitempty"` + + // v1beta2 groups all the fields that will be added or modified in KThreesControlPlane's status with the V1Beta2 version. + // +optional + V1Beta2 *KThreesControlPlaneV1Beta2Status `json:"v1beta2,omitempty"` +} + +// KThreesControlPlaneV1Beta2Status groups all the fields that will be added or modified in KThreesControlPlaneStatus with the V1Beta2 version. +// See https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/proposals/20240916-improve-status-in-CAPI-resources.md for more context. +type KThreesControlPlaneV1Beta2Status struct { + // conditions represents the observations of a KThreesControlPlane's current state. + // Known condition types are Ready, Paused. + // +optional + // +listType=map + // +listMapKey=type + // +kubebuilder:validation:MaxItems=32 + Conditions []metav1.Condition `json:"conditions,omitempty"` } // LastRemediationStatus stores info about last remediation performed. @@ -262,14 +278,30 @@ type KThreesControlPlane struct { Status KThreesControlPlaneStatus `json:"status,omitempty"` } -func (in *KThreesControlPlane) GetConditions() clusterv1.Conditions { +func (in *KThreesControlPlane) GetConditions() clusterv1beta1.Conditions { return in.Status.Conditions } -func (in *KThreesControlPlane) SetConditions(conditions clusterv1.Conditions) { +func (in *KThreesControlPlane) SetConditions(conditions clusterv1beta1.Conditions) { in.Status.Conditions = conditions } +// GetV1Beta2Conditions returns the set of conditions for this object. +func (c *KThreesControlPlane) GetV1Beta2Conditions() []metav1.Condition { + if c.Status.V1Beta2 == nil { + return nil + } + return c.Status.V1Beta2.Conditions +} + +// SetV1Beta2Conditions sets conditions for an API object. +func (c *KThreesControlPlane) SetV1Beta2Conditions(conditions []metav1.Condition) { + if c.Status.V1Beta2 == nil { + c.Status.V1Beta2 = &KThreesControlPlaneV1Beta2Status{} + } + c.Status.V1Beta2.Conditions = conditions +} + // +kubebuilder:object:root=true // KThreesControlPlaneList contains a list of KThreesControlPlane. diff --git a/controlplane/controllers/kthreescontrolplane_controller.go b/controlplane/controllers/kthreescontrolplane_controller.go index a21f8554..e2ff38bb 100644 --- a/controlplane/controllers/kthreescontrolplane_controller.go +++ b/controlplane/controllers/kthreescontrolplane_controller.go @@ -31,13 +31,14 @@ import ( kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/client-go/tools/record" "k8s.io/klog/v2" + "k8s.io/utils/ptr" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/controllers/external" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/annotations" "sigs.k8s.io/cluster-api/util/certs" "sigs.k8s.io/cluster-api/util/collections" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" //nolint:staticcheck "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/cluster-api/util/predicates" ctrl "sigs.k8s.io/controller-runtime" @@ -111,7 +112,7 @@ func (r *KThreesControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl. } // Wait for the cluster infrastructure to be ready before creating machines - if !cluster.Status.InfrastructureReady { + if !ptr.Deref(cluster.Status.Initialization.InfrastructureProvisioned, false) { return reconcile.Result{}, nil } @@ -179,7 +180,7 @@ func (r *KThreesControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl. // status without waiting for a full resync (by default 10 minutes). // Otherwise this condition can lead to a delay in provisioning MachineDeployments when MachineSet preflight checks are enabled. // The alternative solution to this requeue would be watching the relevant pods inside each workload cluster which would be very expensive. - if conditions.IsFalse(kcp, controlplanev1.ControlPlaneComponentsHealthyCondition) { + if v1beta1conditions.IsFalse(kcp, controlplanev1.ControlPlaneComponentsHealthyCondition) { res = ctrl.Result{RequeueAfter: 20 * time.Second} } } @@ -223,12 +224,12 @@ func (r *KThreesControlPlaneReconciler) reconcileDelete(ctx context.Context, clu // source ref (reason@machine/name) so the problem can be easily tracked down to its source machine. // However, during delete we are hiding the counter (1 of x) because it does not make sense given that // all the machines are deleted in parallel. - conditions.SetAggregate(kcp, controlplanev1.MachinesReadyCondition, ownedMachines.ConditionGetters(), conditions.AddSourceRef(), conditions.WithStepCounterIf(false)) + v1beta1conditions.SetAggregate(kcp, controlplanev1.MachinesReadyCondition, ownedMachines.ConditionGetters(), v1beta1conditions.AddSourceRef(), v1beta1conditions.WithStepCounterIf(false)) // Verify that only control plane machines remain if len(allMachines) != len(ownedMachines) { logger.Info("Waiting for worker nodes to be deleted first") - conditions.MarkFalse(kcp, controlplanev1.ResizedCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "Waiting for worker nodes to be deleted first") + v1beta1conditions.MarkFalse(kcp, controlplanev1.ResizedCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "Waiting for worker nodes to be deleted first") return ctrl.Result{RequeueAfter: deleteRequeueAfter}, nil } @@ -249,14 +250,14 @@ func (r *KThreesControlPlaneReconciler) reconcileDelete(ctx context.Context, clu "Failed to delete control plane Machines for cluster %s/%s control plane: %v", cluster.Namespace, cluster.Name, err) return reconcile.Result{}, err } - conditions.MarkFalse(kcp, controlplanev1.ResizedCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(kcp, controlplanev1.ResizedCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") return ctrl.Result{RequeueAfter: deleteRequeueAfter}, nil } func patchKThreesControlPlane(ctx context.Context, patchHelper *patch.Helper, kcp *controlplanev1.KThreesControlPlane) error { // Always update the readyCondition by summarizing the state of other conditions. - conditions.SetSummary(kcp, - conditions.WithConditions( + v1beta1conditions.SetSummary(kcp, + v1beta1conditions.WithConditions( controlplanev1.MachinesSpecUpToDateCondition, controlplanev1.ResizedCondition, controlplanev1.MachinesReadyCondition, @@ -295,7 +296,7 @@ func (r *KThreesControlPlaneReconciler) SetupWithManager(ctx context.Context, mg &clusterv1.Cluster{}, handler.EnqueueRequestsFromMapFunc(r.ClusterToKThreesControlPlane(ctx, log)), builder.WithPredicates( - predicates.ClusterPausedTransitionsOrInfrastructureReady(mgr.GetScheme(), r.Log), + predicates.ClusterPausedTransitionsOrInfrastructureProvisioned(mgr.GetScheme(), r.Log), ), ).Build(r) if err != nil { @@ -382,24 +383,24 @@ func (r *KThreesControlPlaneReconciler) updateStatus(ctx context.Context, kcp *c machinesWithAgentHealthy := controlPlane.Machines.Filter(machinefilters.AgentHealthy()) lowestVersion := machinesWithAgentHealthy.LowestVersion() - if lowestVersion != nil { - controlPlane.KCP.Status.Version = lowestVersion + if lowestVersion != "" { + controlPlane.KCP.Status.Version = ptr.To(lowestVersion) } switch { // We are scaling up case replicas < desiredReplicas: - conditions.MarkFalse(kcp, controlplanev1.ResizedCondition, controlplanev1.ScalingUpReason, clusterv1.ConditionSeverityWarning, "Scaling up control plane to %d replicas (actual %d)", desiredReplicas, replicas) + v1beta1conditions.MarkFalse(kcp, controlplanev1.ResizedCondition, controlplanev1.ScalingUpReason, clusterv1.ConditionSeverityWarning, "Scaling up control plane to %d replicas (actual %d)", desiredReplicas, replicas) // We are scaling down case replicas > desiredReplicas: - conditions.MarkFalse(kcp, controlplanev1.ResizedCondition, controlplanev1.ScalingDownReason, clusterv1.ConditionSeverityWarning, "Scaling down control plane to %d replicas (actual %d)", desiredReplicas, replicas) + v1beta1conditions.MarkFalse(kcp, controlplanev1.ResizedCondition, controlplanev1.ScalingDownReason, clusterv1.ConditionSeverityWarning, "Scaling down control plane to %d replicas (actual %d)", desiredReplicas, replicas) default: // make sure last resize operation is marked as completed. // NOTE: we are checking the number of machines ready so we report resize completed only when the machines // are actually provisioned (vs reporting completed immediately after the last machine object is created). readyMachines := ownedMachines.Filter(collections.IsReady()) if int32(len(readyMachines)) == replicas { - conditions.MarkTrue(kcp, controlplanev1.ResizedCondition) + v1beta1conditions.MarkTrue(kcp, controlplanev1.ResizedCondition) } } @@ -423,7 +424,7 @@ func (r *KThreesControlPlaneReconciler) updateStatus(ctx context.Context, kcp *c if kcp.Status.ReadyReplicas > 0 { kcp.Status.Ready = true - conditions.MarkTrue(kcp, controlplanev1.AvailableCondition) + v1beta1conditions.MarkTrue(kcp, controlplanev1.AvailableCondition) } // Surface lastRemediation data in status. @@ -472,16 +473,16 @@ func (r *KThreesControlPlaneReconciler) reconcile(ctx context.Context, cluster * controllerRef := metav1.NewControllerRef(kcp, controlplanev1.GroupVersion.WithKind("KThreesControlPlane")) if err := certificates.LookupOrGenerate(ctx, r.Client, util.ObjectKey(cluster), *controllerRef); err != nil { logger.Error(err, "unable to lookup or create cluster certificates") - conditions.MarkFalse(kcp, controlplanev1.CertificatesAvailableCondition, controlplanev1.CertificatesGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) + v1beta1conditions.MarkFalse(kcp, controlplanev1.CertificatesAvailableCondition, controlplanev1.CertificatesGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) return reconcile.Result{}, err } - conditions.MarkTrue(kcp, controlplanev1.CertificatesAvailableCondition) + v1beta1conditions.MarkTrue(kcp, controlplanev1.CertificatesAvailableCondition) if err := token.Reconcile(ctx, r.Client, client.ObjectKeyFromObject(cluster), kcp); err != nil { - conditions.MarkFalse(kcp, controlplanev1.TokenAvailableCondition, controlplanev1.TokenGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) + v1beta1conditions.MarkFalse(kcp, controlplanev1.TokenAvailableCondition, controlplanev1.TokenGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) return reconcile.Result{}, err } - conditions.MarkTrue(kcp, controlplanev1.TokenAvailableCondition) + v1beta1conditions.MarkTrue(kcp, controlplanev1.TokenAvailableCondition) // If ControlPlaneEndpoint is not set, return early if !cluster.Spec.ControlPlaneEndpoint.IsValid() { @@ -526,7 +527,7 @@ func (r *KThreesControlPlaneReconciler) reconcile(ctx context.Context, cluster * // Aggregate the operational state of all the machines; while aggregating we are adding the // source ref (reason@machine/name) so the problem can be easily tracked down to its source machine. - conditions.SetAggregate(controlPlane.KCP, controlplanev1.MachinesReadyCondition, ownedMachines.ConditionGetters(), conditions.AddSourceRef(), conditions.WithStepCounterIf(false)) + v1beta1conditions.SetAggregate(controlPlane.KCP, controlplanev1.MachinesReadyCondition, ownedMachines.ConditionGetters(), v1beta1conditions.AddSourceRef(), v1beta1conditions.WithStepCounterIf(false)) // Updates conditions reporting the status of static pods and the status of the etcd cluster. // NOTE: Conditions reporting KCP operation progress like e.g. Resized or SpecUpToDate are inlined with the rest of the execution. @@ -551,14 +552,14 @@ func (r *KThreesControlPlaneReconciler) reconcile(ctx context.Context, cluster * switch { case len(needRollout) > 0: logger.Info("Rolling out Control Plane machines", "needRollout", needRollout.Names()) - conditions.MarkFalse(controlPlane.KCP, controlplanev1.MachinesSpecUpToDateCondition, controlplanev1.RollingUpdateInProgressReason, clusterv1.ConditionSeverityWarning, "Rolling %d replicas with outdated spec (%d replicas up to date)", len(needRollout), len(controlPlane.Machines)-len(needRollout)) + v1beta1conditions.MarkFalse(controlPlane.KCP, controlplanev1.MachinesSpecUpToDateCondition, controlplanev1.RollingUpdateInProgressReason, clusterv1.ConditionSeverityWarning, "Rolling %d replicas with outdated spec (%d replicas up to date)", len(needRollout), len(controlPlane.Machines)-len(needRollout)) return r.upgradeControlPlane(ctx, cluster, kcp, controlPlane, needRollout) default: // make sure last upgrade operation is marked as completed. // NOTE: we are checking the condition already exists in order to avoid to set this condition at the first // reconciliation/before a rolling upgrade actually starts. - if conditions.Has(controlPlane.KCP, controlplanev1.MachinesSpecUpToDateCondition) { - conditions.MarkTrue(controlPlane.KCP, controlplanev1.MachinesSpecUpToDateCondition) + if v1beta1conditions.Has(controlPlane.KCP, controlplanev1.MachinesSpecUpToDateCondition) { + v1beta1conditions.MarkTrue(controlPlane.KCP, controlplanev1.MachinesSpecUpToDateCondition) } } @@ -571,7 +572,7 @@ func (r *KThreesControlPlaneReconciler) reconcile(ctx context.Context, cluster * case numMachines < desiredReplicas && numMachines == 0: // Create new Machine w/ init logger.Info("Initializing control plane", "Desired", desiredReplicas, "Existing", numMachines) - conditions.MarkFalse(controlPlane.KCP, controlplanev1.AvailableCondition, controlplanev1.WaitingForKthreesServerReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(controlPlane.KCP, controlplanev1.AvailableCondition, controlplanev1.WaitingForKthreesServerReason, clusterv1.ConditionSeverityInfo, "") return r.initializeControlPlane(ctx, cluster, kcp, controlPlane) // We are scaling up case numMachines < desiredReplicas && numMachines > 0: @@ -816,7 +817,7 @@ func (r *KThreesControlPlaneReconciler) reconcileEtcdMembers(ctx context.Context // Collect all the node names. nodeNames := []string{} for _, machine := range controlPlane.Machines { - if machine.Status.NodeRef == nil { + if !machine.Status.NodeRef.IsDefined() { // If there are provisioning machines (machines without a node yet), return. return nil } @@ -825,7 +826,7 @@ func (r *KThreesControlPlaneReconciler) reconcileEtcdMembers(ctx context.Context // Potential inconsistencies between the list of members and the list of machines/nodes are // surfaced using the EtcdClusterHealthyCondition; if this condition is true, meaning no inconsistencies exists, return early. - if conditions.IsTrue(controlPlane.KCP, controlplanev1.EtcdClusterHealthyCondition) { + if v1beta1conditions.IsTrue(controlPlane.KCP, controlplanev1.EtcdClusterHealthyCondition) { return nil } diff --git a/controlplane/controllers/remediation.go b/controlplane/controllers/remediation.go index 21a3bb1e..0136048a 100644 --- a/controlplane/controllers/remediation.go +++ b/controlplane/controllers/remediation.go @@ -32,6 +32,7 @@ import ( "sigs.k8s.io/cluster-api/util/annotations" "sigs.k8s.io/cluster-api/util/collections" "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" //nolint:staticcheck "sigs.k8s.io/cluster-api/util/patch" ctrl "sigs.k8s.io/controller-runtime" @@ -145,21 +146,21 @@ func (r *KThreesControlPlaneReconciler) reconcileUnhealthyMachines(ctx context.C // The cluster MUST have more than one replica, because this is the smallest cluster size that allows any etcd failure tolerance. if controlPlane.Machines.Len() <= 1 { log.Info("A control plane machine needs remediation, but the number of current replicas is less or equal to 1. Skipping remediation", "Replicas", controlPlane.Machines.Len()) - conditions.MarkFalse(machineToBeRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KCP can't remediate if current replicas are less or equal to 1") + v1beta1conditions.MarkFalse(machineToBeRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KCP can't remediate if current replicas are less or equal to 1") return ctrl.Result{}, nil } // The cluster MUST NOT have healthy machines still being provisioned. This rule prevents KCP taking actions while the cluster is in a transitional state. if controlPlane.HasHealthyMachineStillProvisioning() { log.Info("A control plane machine needs remediation, but there are other control-plane machines being provisioned. Skipping remediation") - conditions.MarkFalse(machineToBeRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KCP waiting for control plane machine provisioning to complete before triggering remediation") + v1beta1conditions.MarkFalse(machineToBeRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KCP waiting for control plane machine provisioning to complete before triggering remediation") return ctrl.Result{}, nil } // The cluster MUST have no machines with a deletion timestamp. This rule prevents KCP taking actions while the cluster is in a transitional state. if controlPlane.HasDeletingMachine() { log.Info("A control plane machine needs remediation, but there are other control-plane machines being deleted. Skipping remediation") - conditions.MarkFalse(machineToBeRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KCP waiting for control plane machine deletion to complete before triggering remediation") + v1beta1conditions.MarkFalse(machineToBeRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KCP waiting for control plane machine deletion to complete before triggering remediation") return ctrl.Result{}, nil } @@ -168,12 +169,12 @@ func (r *KThreesControlPlaneReconciler) reconcileUnhealthyMachines(ctx context.C if controlPlane.IsEtcdManaged() { canSafelyRemediate, err := r.canSafelyRemoveEtcdMember(ctx, controlPlane, machineToBeRemediated) if err != nil { - conditions.MarkFalse(machineToBeRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.RemediationFailedReason, clusterv1.ConditionSeverityError, err.Error()) + v1beta1conditions.MarkFalse(machineToBeRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.RemediationFailedReason, clusterv1.ConditionSeverityError, err.Error()) return ctrl.Result{}, err } if !canSafelyRemediate { log.Info("A control plane machine needs remediation, but removing this machine could result in etcd quorum loss. Skipping remediation") - conditions.MarkFalse(machineToBeRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KCP can't remediate this machine because this could result in etcd loosing quorum") + v1beta1conditions.MarkFalse(machineToBeRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KCP can't remediate this machine because this could result in etcd loosing quorum") return ctrl.Result{}, nil } } @@ -195,13 +196,13 @@ func (r *KThreesControlPlaneReconciler) reconcileUnhealthyMachines(ctx context.C etcdLeaderCandidate := controlPlane.HealthyMachines().Newest() if etcdLeaderCandidate == nil { log.Info("A control plane machine needs remediation, but there is no healthy machine to forward etcd leadership to") - conditions.MarkFalse(machineToBeRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.RemediationFailedReason, clusterv1.ConditionSeverityWarning, + v1beta1conditions.MarkFalse(machineToBeRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.RemediationFailedReason, clusterv1.ConditionSeverityWarning, "A control plane machine needs remediation, but there is no healthy machine to forward etcd leadership to. Skipping remediation") return ctrl.Result{}, nil } if err := workloadCluster.ForwardEtcdLeadership(ctx, machineToBeRemediated, etcdLeaderCandidate); err != nil { log.Error(err, "Failed to move etcd leadership to candidate machine", "candidate", klog.KObj(etcdLeaderCandidate)) - conditions.MarkFalse(machineToBeRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.RemediationFailedReason, clusterv1.ConditionSeverityError, err.Error()) + v1beta1conditions.MarkFalse(machineToBeRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.RemediationFailedReason, clusterv1.ConditionSeverityError, err.Error()) return ctrl.Result{}, err } @@ -222,13 +223,13 @@ func (r *KThreesControlPlaneReconciler) reconcileUnhealthyMachines(ctx context.C // Delete the machine if err := r.Client.Delete(ctx, machineToBeRemediated); err != nil { - conditions.MarkFalse(machineToBeRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.RemediationFailedReason, clusterv1.ConditionSeverityError, err.Error()) + v1beta1conditions.MarkFalse(machineToBeRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.RemediationFailedReason, clusterv1.ConditionSeverityError, err.Error()) return ctrl.Result{}, errors.Wrapf(err, "failed to delete unhealthy machine %s", machineToBeRemediated.Name) } // Surface the operation is in progress. log.Info("Remediating unhealthy machine") - conditions.MarkFalse(machineToBeRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.RemediationInProgressReason, clusterv1.ConditionSeverityWarning, "") + v1beta1conditions.MarkFalse(machineToBeRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.RemediationInProgressReason, clusterv1.ConditionSeverityWarning, "") // Prepare the info for tracking the remediation progress into the RemediationInProgressAnnotation. remediationInProgressValue, err := remediationInProgressData.Marshal() @@ -324,7 +325,7 @@ func (r *KThreesControlPlaneReconciler) checkRetryLimits(log logr.Logger, machin // Check if remediation can happen because retryPeriod is passed. if lastRemediationTime.Add(retryPeriod).After(reconciliationTime) { log.Info(fmt.Sprintf("A control plane machine needs remediation, but the operation already failed in the latest %s. Skipping remediation", retryPeriod)) - conditions.MarkFalse(machineToBeRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KCP can't remediate this machine because the operation already failed in the latest %s (RetryPeriod)", retryPeriod) + v1beta1conditions.MarkFalse(machineToBeRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KCP can't remediate this machine because the operation already failed in the latest %s (RetryPeriod)", retryPeriod) return remediationInProgressData, false, nil } @@ -333,7 +334,7 @@ func (r *KThreesControlPlaneReconciler) checkRetryLimits(log logr.Logger, machin maxRetry := int(*controlPlane.KCP.Spec.RemediationStrategy.MaxRetry) if remediationInProgressData.RetryCount >= maxRetry { log.Info(fmt.Sprintf("A control plane machine needs remediation, but the operation already failed %d times (MaxRetry %d). Skipping remediation", remediationInProgressData.RetryCount, maxRetry)) - conditions.MarkFalse(machineToBeRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KCP can't remediate this machine because the operation already failed %d times (MaxRetry)", maxRetry) + v1beta1conditions.MarkFalse(machineToBeRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KCP can't remediate this machine because the operation already failed %d times (MaxRetry)", maxRetry) return remediationInProgressData, false, nil } } @@ -408,7 +409,7 @@ func (r *KThreesControlPlaneReconciler) canSafelyRemoveEtcdMember(ctx context.Co // Search for the machine corresponding to the etcd member. var machine *clusterv1.Machine for _, m := range controlPlane.Machines { - if m.Status.NodeRef != nil && m.Status.NodeRef.Name == etcdMember { + if m.Status.NodeRef.IsDefined() && m.Status.NodeRef.Name == etcdMember { machine = m break } @@ -426,7 +427,7 @@ func (r *KThreesControlPlaneReconciler) canSafelyRemoveEtcdMember(ctx context.Co } // Check member health as reported by machine's health conditions - if !conditions.IsTrue(machine, controlplanev1.MachineEtcdMemberHealthyCondition) { + if !v1beta1conditions.IsTrue(machine, controlplanev1.MachineEtcdMemberHealthyCondition) { targetUnhealthyMembers++ unhealthyMembers = append(unhealthyMembers, fmt.Sprintf("%s (%s)", etcdMember, machine.Name)) continue diff --git a/controlplane/controllers/scale.go b/controlplane/controllers/scale.go index 1fe1afd4..cfdd5bad 100644 --- a/controlplane/controllers/scale.go +++ b/controlplane/controllers/scale.go @@ -18,7 +18,6 @@ package controllers import ( "context" - "encoding/json" "fmt" "strings" @@ -27,20 +26,20 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/types" kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apiserver/pkg/storage/names" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/controllers/external" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/collections" - "sigs.k8s.io/cluster-api/util/conditions" + "sigs.k8s.io/cluster-api/util/conditions" //nolint:staticcheck "sigs.k8s.io/cluster-api/util/patch" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" bootstrapv1 "github.com/k3s-io/cluster-api-k3s/bootstrap/api/v1beta2" controlplanev1 "github.com/k3s-io/cluster-api-k3s/controlplane/api/v1beta2" + "github.com/k3s-io/cluster-api-k3s/controlplane/internal/desiredstate" k3s "github.com/k3s-io/cluster-api-k3s/pkg/k3s" "github.com/k3s-io/cluster-api-k3s/pkg/util/ssa" ) @@ -225,16 +224,16 @@ loopmachines: return ctrl.Result{}, nil } -func preflightCheckCondition(kind string, obj conditions.Getter, condition clusterv1.ConditionType) error { - c := conditions.Get(obj, condition) +func preflightCheckCondition(kind string, obj clusterv1.Machine, conditionType string) error { + c := conditions.Get(obj, conditionType) if c == nil { - return fmt.Errorf("%s %s does not have %s condition: %w", kind, obj.GetName(), condition, ErrPreConditionFailed) + return fmt.Errorf("%s %s does not have %s condition: %w", kind, obj.GetName(), conditionType, ErrPreConditionFailed) } - if c.Status == corev1.ConditionFalse { - return fmt.Errorf("%s %s reports %s condition is false (%s, %s): %w", kind, obj.GetName(), condition, c.Severity, c.Message, ErrPreConditionFailed) + if c.Status == metav1.ConditionFalse { + return errors.Errorf("%s %s reports %s condition is false (%s)", kind, obj.GetName(), conditionType, c.Message) } - if c.Status == corev1.ConditionUnknown { - return fmt.Errorf("%s %s reports %s condition is unknown (%s): %w", kind, obj.GetName(), condition, c.Message, ErrPreConditionFailed) + if c.Status == metav1.ConditionUnknown { + return errors.Errorf("%s %s reports %s condition is unknown (%s)", kind, obj.GetName(), conditionType, c.Message) } return nil @@ -253,11 +252,11 @@ func selectMachineForScaleDown(ctx context.Context, controlPlane *k3s.ControlPla return controlPlane.MachineInFailureDomainWithMostMachines(ctx, machines) } -func (r *KThreesControlPlaneReconciler) cloneConfigsAndGenerateMachine(ctx context.Context, cluster *clusterv1.Cluster, kcp *controlplanev1.KThreesControlPlane, bootstrapSpec *bootstrapv1.KThreesConfigSpec, failureDomain *string) error { +func (r *KThreesControlPlaneReconciler) cloneConfigsAndGenerateMachine(ctx context.Context, cluster *clusterv1.Cluster, kcp *controlplanev1.KThreesControlPlane, bootstrapSpec *bootstrapv1.KThreesConfigSpec, failureDomain string) error { var errs []error // Compute desired Machine - machine, err := r.computeDesiredMachine(kcp, cluster, failureDomain, nil) + machine, err := desiredstate.ComputeDesiredMachine(kcp, cluster, failureDomain, nil) if err != nil { return errors.Wrap(err, "failed to create Machine: failed to compute desired Machine") } @@ -332,6 +331,7 @@ func (r *KThreesControlPlaneReconciler) cleanupFromGeneration(ctx context.Contex return kerrors.NewAggregate(errs) } + func (r *KThreesControlPlaneReconciler) generateKThreesConfig(ctx context.Context, kcp *controlplanev1.KThreesControlPlane, cluster *clusterv1.Cluster, spec *bootstrapv1.KThreesConfigSpec) (*corev1.ObjectReference, error) { // Create an owner reference without a controller reference because the owning controller is the machine controller owner := metav1.OwnerReference{ @@ -409,92 +409,3 @@ func (r *KThreesControlPlaneReconciler) updateMachine(ctx context.Context, machi } return updatedMachine, nil } - -// computeDesiredMachine computes the desired Machine. -// This Machine will be used during reconciliation to: -// * create a new Machine -// * update an existing Machine -// Because we are using Server-Side-Apply we always have to calculate the full object. -// There are small differences in how we calculate the Machine depending on if it -// is a create or update. Example: for a new Machine we have to calculate a new name, -// while for an existing Machine we have to use the name of the existing Machine. -// Also, for an existing Machine, we will not copy its labels, as they are not managed by the KThreesControlPlane controller. -func (r *KThreesControlPlaneReconciler) computeDesiredMachine(kcp *controlplanev1.KThreesControlPlane, cluster *clusterv1.Cluster, failureDomain *string, existingMachine *clusterv1.Machine) (*clusterv1.Machine, error) { - var machineName string - var machineUID types.UID - var version *string - annotations := map[string]string{} - if existingMachine == nil { - // Creating a new machine - machineName = names.SimpleNameGenerator.GenerateName(kcp.Name + "-") - version = &kcp.Spec.Version - - // Machine's bootstrap config may be missing ClusterConfiguration if it is not the first machine in the control plane. - // We store ClusterConfiguration as annotation here to detect any changes in KCP ClusterConfiguration and rollout the machine if any. - serverConfig, err := json.Marshal(kcp.Spec.KThreesConfigSpec.ServerConfig) - if err != nil { - return nil, errors.Wrap(err, "failed to marshal cluster configuration") - } - annotations[controlplanev1.KThreesServerConfigurationAnnotation] = string(serverConfig) - - // In case this machine is being created as a consequence of a remediation, then add an annotation - // tracking remediating data. - // NOTE: This is required in order to track remediation retries. - if remediationData, ok := kcp.Annotations[controlplanev1.RemediationInProgressAnnotation]; ok { - annotations[controlplanev1.RemediationForAnnotation] = remediationData - } - } else { - // Updating an existing machine - machineName = existingMachine.Name - machineUID = existingMachine.UID - version = existingMachine.Spec.Version - - // For existing machine only set the ClusterConfiguration annotation if the machine already has it. - // We should not add the annotation if it was missing in the first place because we do not have enough - // information. - if serverConfig, ok := existingMachine.Annotations[controlplanev1.KThreesServerConfigurationAnnotation]; ok { - annotations[controlplanev1.KThreesServerConfigurationAnnotation] = serverConfig - } - - // If the machine already has remediation data then preserve it. - // NOTE: This is required in order to track remediation retries. - if remediationData, ok := existingMachine.Annotations[controlplanev1.RemediationForAnnotation]; ok { - annotations[controlplanev1.RemediationForAnnotation] = remediationData - } - } - - // Construct the basic Machine. - desiredMachine := &clusterv1.Machine{ - ObjectMeta: metav1.ObjectMeta{ - Name: machineName, - Namespace: kcp.Namespace, - UID: machineUID, - Labels: k3s.ControlPlaneLabelsForCluster(cluster.Name, kcp.Spec.MachineTemplate), - OwnerReferences: []metav1.OwnerReference{ - *metav1.NewControllerRef(kcp, controlplanev1.GroupVersion.WithKind("KThreesControlPlane")), - }, - }, - Spec: clusterv1.MachineSpec{ - ClusterName: cluster.Name, - Version: version, - FailureDomain: failureDomain, - NodeDrainTimeout: kcp.Spec.MachineTemplate.NodeDrainTimeout, - NodeVolumeDetachTimeout: kcp.Spec.MachineTemplate.NodeVolumeDetachTimeout, - NodeDeletionTimeout: kcp.Spec.MachineTemplate.NodeDeletionTimeout, - }, - } - - // Set annotations - for k, v := range kcp.Spec.MachineTemplate.ObjectMeta.Annotations { - annotations[k] = v - } - - desiredMachine.SetAnnotations(annotations) - - if existingMachine != nil { - desiredMachine.Spec.InfrastructureRef = existingMachine.Spec.InfrastructureRef - desiredMachine.Spec.Bootstrap.ConfigRef = existingMachine.Spec.Bootstrap.ConfigRef - } - - return desiredMachine, nil -} diff --git a/controlplane/internal/desiredstate/desired_state.go b/controlplane/internal/desiredstate/desired_state.go new file mode 100644 index 00000000..0901c5a4 --- /dev/null +++ b/controlplane/internal/desiredstate/desired_state.go @@ -0,0 +1,106 @@ +package desiredstate + +import ( + "encoding/json" + + "github.com/k3s-io/cluster-api-k3s/pkg/k3s" + "github.com/pkg/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + + controlplanev1 "github.com/k3s-io/cluster-api-k3s/controlplane/api/v1beta2" + "github.com/k3s-io/cluster-api-k3s/controlplane/internal/names" +) + +// computeDesiredMachine computes the desired Machine. +// This Machine will be used during reconciliation to: +// * create a new Machine +// * update an existing Machine +// Because we are using Server-Side-Apply we always have to calculate the full object. +// There are small differences in how we calculate the Machine depending on if it +// is a create or update. Example: for a new Machine we have to calculate a new name, +// while for an existing Machine we have to use the name of the existing Machine. +// Also, for an existing Machine, we will not copy its labels, as they are not managed by the KThreesControlPlane controller. +func ComputeDesiredMachine(kcp *controlplanev1.KThreesControlPlane, cluster *clusterv1.Cluster, failureDomain string, existingMachine *clusterv1.Machine) (*clusterv1.Machine, error) { + var machineName string + var machineUID types.UID + var version string + annotations := map[string]string{} + if existingMachine == nil { + // Creating a new machine + nameTemplate := "{{.kthreesControlPlane.name}}-{{.random}}" + generatedMachineName, err := names.KCPMachineNameGenerator(nameTemplate, cluster.Name, kcp.Name).GenerateName() + if err != nil { + return nil, errors.Wrap(err, "failed to compute desired Machine: failed to generate Machine name") + } + machineName = generatedMachineName + version = kcp.Spec.Version + + // Machine's bootstrap config may be missing ClusterConfiguration if it is not the first machine in the control plane. + // We store ClusterConfiguration as annotation here to detect any changes in KCP ClusterConfiguration and rollout the machine if any. + serverConfig, err := json.Marshal(kcp.Spec.KThreesConfigSpec.ServerConfig) + if err != nil { + return nil, errors.Wrap(err, "failed to marshal cluster configuration") + } + annotations[controlplanev1.KThreesServerConfigurationAnnotation] = string(serverConfig) + + // In case this machine is being created as a consequence of a remediation, then add an annotation + // tracking remediating data. + // NOTE: This is required in order to track remediation retries. + if remediationData, ok := kcp.Annotations[controlplanev1.RemediationInProgressAnnotation]; ok { + annotations[controlplanev1.RemediationForAnnotation] = remediationData + } + } else { + // Updating an existing machine + machineName = existingMachine.Name + machineUID = existingMachine.UID + version = existingMachine.Spec.Version + + // For existing machine only set the ClusterConfiguration annotation if the machine already has it. + // We should not add the annotation if it was missing in the first place because we do not have enough + // information. + if serverConfig, ok := existingMachine.Annotations[controlplanev1.KThreesServerConfigurationAnnotation]; ok { + annotations[controlplanev1.KThreesServerConfigurationAnnotation] = serverConfig + } + + // If the machine already has remediation data then preserve it. + // NOTE: This is required in order to track remediation retries. + if remediationData, ok := existingMachine.Annotations[controlplanev1.RemediationForAnnotation]; ok { + annotations[controlplanev1.RemediationForAnnotation] = remediationData + } + } + + // Construct the basic Machine. + desiredMachine := &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: machineName, + Namespace: kcp.Namespace, + UID: machineUID, + Labels: k3s.ControlPlaneLabelsForCluster(cluster.Name, kcp.Spec.MachineTemplate), + OwnerReferences: []metav1.OwnerReference{ + *metav1.NewControllerRef(kcp, controlplanev1.GroupVersion.WithKind("KThreesControlPlane")), + }, + }, + Spec: clusterv1.MachineSpec{ + ClusterName: cluster.Name, + Version: version, + FailureDomain: failureDomain, + }, + } + + // Set annotations + for k, v := range kcp.Spec.MachineTemplate.ObjectMeta.Annotations { + annotations[k] = v + } + + desiredMachine.SetAnnotations(annotations) + + if existingMachine != nil { + desiredMachine.Spec.InfrastructureRef = existingMachine.Spec.InfrastructureRef + desiredMachine.Spec.Bootstrap.ConfigRef = existingMachine.Spec.Bootstrap.ConfigRef + } + + return desiredMachine, nil +} diff --git a/controlplane/internal/names/namesp.go b/controlplane/internal/names/namesp.go new file mode 100644 index 00000000..69546fcf --- /dev/null +++ b/controlplane/internal/names/namesp.go @@ -0,0 +1,179 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package names implements name generators for managed topology. +package names + +import ( + "bytes" + "fmt" + "text/template" + + "github.com/pkg/errors" + utilrand "k8s.io/apimachinery/pkg/util/rand" +) + +// This is a copy of the constants at k8s.io/apiserver/pkg/storage/names. +const ( + maxNameLength = 63 + randomLength = 5 + maxGeneratedNameLength = maxNameLength - randomLength +) + +type simpleNameGenerator struct { + base string +} + +func (s *simpleNameGenerator) GenerateName() (string, error) { + base := s.base + if len(base) > maxGeneratedNameLength { + base = base[:maxGeneratedNameLength] + } + return fmt.Sprintf("%s%s", base, utilrand.String(randomLength)), nil +} + +// NameGenerator generates names for objects. +type NameGenerator interface { + // GenerateName generates a valid name. The generator is responsible for + // knowing the maximum valid name length. + GenerateName() (string, error) +} + +// SimpleNameGenerator returns a NameGenerator which is based on +// k8s.io/apiserver/pkg/storage/names.SimpleNameGenerator. +func SimpleNameGenerator(base string) NameGenerator { + return &simpleNameGenerator{ + base: base, + } +} + +// ControlPlaneNameGenerator returns a generator for creating a control plane name. +func ControlPlaneNameGenerator(templateString, clusterName string) NameGenerator { + return newTemplateGenerator(templateString, clusterName, + map[string]interface{}{}) +} + +// MachineDeploymentNameGenerator returns a generator for creating a machinedeployment name. +func MachineDeploymentNameGenerator(templateString, clusterName, topologyName string) NameGenerator { + return newTemplateGenerator(templateString, clusterName, + map[string]interface{}{ + "machineDeployment": map[string]interface{}{ + "topologyName": topologyName, + }, + }) +} + +// MachinePoolNameGenerator returns a generator for creating a machinepool name. +func MachinePoolNameGenerator(templateString, clusterName, topologyName string) NameGenerator { + return newTemplateGenerator(templateString, clusterName, + map[string]interface{}{ + "machinePool": map[string]interface{}{ + "topologyName": topologyName, + }, + }) +} + +// KCPMachineNameGenerator returns a generator for creating a kcp machine name. +func KCPMachineNameGenerator(templateString, clusterName, kthreesControlPlaneName string) NameGenerator { + return newTemplateGenerator(templateString, clusterName, + map[string]interface{}{ + "kthreesControlPlane": map[string]interface{}{ + "name": kthreesControlPlaneName, + }, + }) +} + +// MachineSetMachineNameGenerator returns a generator for creating a machineSet machine name. +func MachineSetMachineNameGenerator(templateString, clusterName, machineSetName string) NameGenerator { + return newTemplateGenerator(templateString, clusterName, + map[string]interface{}{ + "machineSet": map[string]interface{}{ + "name": machineSetName, + }, + }) +} + +// InfraClusterNameGenerator returns a generator for creating a infrastructure cluster name. +func InfraClusterNameGenerator(templateString, clusterName string) NameGenerator { + return newTemplateGenerator(templateString, clusterName, + map[string]interface{}{}) +} + +// templateGenerator parses the template string as text/template and executes it using +// the passed data to generate a name. +type templateGenerator struct { + template string + data map[string]interface{} +} + +func newTemplateGenerator(template, clusterName string, data map[string]interface{}) NameGenerator { + data["cluster"] = map[string]interface{}{ + "name": clusterName, + } + data["random"] = utilrand.String(randomLength) + + return &templateGenerator{ + template: template, + data: data, + } +} + +func (g *templateGenerator) GenerateName() (string, error) { + tpl, err := template.New("template name generator").Option("missingkey=error").Parse(g.template) + if err != nil { + return "", errors.Wrapf(err, "parsing template %q", g.template) + } + + var buf bytes.Buffer + if err := tpl.Execute(&buf, g.data); err != nil { + return "", errors.Wrap(err, "rendering template") + } + + name := buf.String() + + // If the name exceeds the maxNameLength: trim to maxGeneratedNameLength and add + // a random suffix. + if len(name) > maxNameLength { + name = name[:maxGeneratedNameLength] + utilrand.String(randomLength) + } + + return name, nil +} + +// BootstrapTemplateNamePrefix calculates the name prefix for a BootstrapTemplate. +func BootstrapTemplateNamePrefix(clusterName, machineDeploymentTopologyName string) string { + return fmt.Sprintf("%s-%s-", clusterName, machineDeploymentTopologyName) +} + +// InfrastructureMachineTemplateNamePrefix calculates the name prefix for a InfrastructureMachineTemplate. +func InfrastructureMachineTemplateNamePrefix(clusterName, machineDeploymentTopologyName string) string { + return fmt.Sprintf("%s-%s-", clusterName, machineDeploymentTopologyName) +} + +// BootstrapConfigNamePrefix calculates the name prefix for a BootstrapConfig. +func BootstrapConfigNamePrefix(clusterName, machinePoolTopologyName string) string { + return fmt.Sprintf("%s-%s-", clusterName, machinePoolTopologyName) +} + +// InfrastructureMachinePoolNamePrefix calculates the name prefix for a InfrastructureMachinePool. +func InfrastructureMachinePoolNamePrefix(clusterName, machinePoolTopologyName string) string { + return fmt.Sprintf("%s-%s-", clusterName, machinePoolTopologyName) +} + +// ControlPlaneInfrastructureMachineTemplateNamePrefix calculates the name prefix for a InfrastructureMachineTemplate. +func ControlPlaneInfrastructureMachineTemplateNamePrefix(clusterName string) string { + return fmt.Sprintf("%s-", clusterName) +} diff --git a/devbox.json b/devbox.json index 8b1ab99d..c06bfe61 100644 --- a/devbox.json +++ b/devbox.json @@ -1,7 +1,7 @@ { "$schema": "https://raw.githubusercontent.com/jetify-com/devbox/0.14.2/.schema/devbox.schema.json", "packages": [ - "go@1.23.0", + "go@1.24.0", "kind@latest", "tilt@latest", "envsubst@latest" diff --git a/devbox.lock b/devbox.lock index 28c92bf3..1398f099 100644 --- a/devbox.lock +++ b/devbox.lock @@ -53,51 +53,51 @@ "last_modified": "2025-05-02T17:15:29Z", "resolved": "github:NixOS/nixpkgs/9a7caecf30a0494c88b7daeeed29244cd9a52e7d?lastModified=1746206129&narHash=sha256-JA4DynBKhY7t4DdJZTuomRLAiXFDUgCGGwxgt%2BXGiik%3D" }, - "go@1.23.0": { - "last_modified": "2024-08-31T10:12:23Z", - "resolved": "github:NixOS/nixpkgs/5629520edecb69630a3f4d17d3d33fc96c13f6fe#go_1_23", + "go@1.24.0": { + "last_modified": "2025-02-23T09:42:26Z", + "resolved": "github:NixOS/nixpkgs/2d068ae5c6516b2d04562de50a58c682540de9bf#go_1_24", "source": "devbox-search", - "version": "1.23.0", + "version": "1.24.0", "systems": { "aarch64-darwin": { "outputs": [ { "name": "out", - "path": "/nix/store/4cijk6gwv59c84h1l9yhxzsaz93f67mz-go-1.23.0", + "path": "/nix/store/v495d2fb3ffi08ri6jffvhzr08p104pk-go-1.24.0", "default": true } ], - "store_path": "/nix/store/4cijk6gwv59c84h1l9yhxzsaz93f67mz-go-1.23.0" + "store_path": "/nix/store/v495d2fb3ffi08ri6jffvhzr08p104pk-go-1.24.0" }, "aarch64-linux": { "outputs": [ { "name": "out", - "path": "/nix/store/mhwsyzk9v43q67ic34c02sxnsnbj7qbh-go-1.23.0", + "path": "/nix/store/fy5xhvha2ha7jcyqp73haqrpg8npw63b-go-1.24.0", "default": true } ], - "store_path": "/nix/store/mhwsyzk9v43q67ic34c02sxnsnbj7qbh-go-1.23.0" + "store_path": "/nix/store/fy5xhvha2ha7jcyqp73haqrpg8npw63b-go-1.24.0" }, "x86_64-darwin": { "outputs": [ { "name": "out", - "path": "/nix/store/vbcqda38ha9gqsbwjw4q0swpwlvmnb1i-go-1.23.0", + "path": "/nix/store/gg6947k6wwxq7ld2f90i62fkcf3kdd55-go-1.24.0", "default": true } ], - "store_path": "/nix/store/vbcqda38ha9gqsbwjw4q0swpwlvmnb1i-go-1.23.0" + "store_path": "/nix/store/gg6947k6wwxq7ld2f90i62fkcf3kdd55-go-1.24.0" }, "x86_64-linux": { "outputs": [ { "name": "out", - "path": "/nix/store/h5wkf711ql98c59n7yxa146jbjf9vrj5-go-1.23.0", + "path": "/nix/store/wk1vg9ksvmqwxhgj7cmvdv1g62v9kff0-go-1.24.0", "default": true } ], - "store_path": "/nix/store/h5wkf711ql98c59n7yxa146jbjf9vrj5-go-1.23.0" + "store_path": "/nix/store/wk1vg9ksvmqwxhgj7cmvdv1g62v9kff0-go-1.24.0" } } }, diff --git a/go.mod b/go.mod index 1541485a..0854d544 100644 --- a/go.mod +++ b/go.mod @@ -1,36 +1,36 @@ module github.com/k3s-io/cluster-api-k3s -go 1.23.0 +go 1.24.0 require ( - github.com/coredns/corefile-migration v1.0.26 + github.com/coredns/corefile-migration v1.0.28 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc - github.com/go-logr/logr v1.4.2 + github.com/go-logr/logr v1.4.3 github.com/google/uuid v1.6.0 github.com/onsi/ginkgo v1.16.5 - github.com/onsi/ginkgo/v2 v2.23.3 - github.com/onsi/gomega v1.36.3 + github.com/onsi/ginkgo/v2 v2.23.4 + github.com/onsi/gomega v1.38.0 github.com/pkg/errors v0.9.1 - go.etcd.io/etcd/api/v3 v3.5.20 - go.etcd.io/etcd/client/v3 v3.5.20 + go.etcd.io/etcd/api/v3 v3.5.22 + go.etcd.io/etcd/client/v3 v3.5.22 golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 - google.golang.org/grpc v1.67.3 - google.golang.org/protobuf v1.36.5 - k8s.io/api v0.32.3 - k8s.io/apimachinery v0.32.3 - k8s.io/apiserver v0.32.3 - k8s.io/client-go v0.32.3 + google.golang.org/grpc v1.71.3 + google.golang.org/protobuf v1.36.6 + k8s.io/api v0.33.3 + k8s.io/apimachinery v0.33.3 + k8s.io/apiserver v0.33.3 + k8s.io/client-go v0.33.3 k8s.io/klog/v2 v2.130.1 k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 - sigs.k8s.io/cluster-api v1.10.1 - sigs.k8s.io/cluster-api/test v1.10.1 - sigs.k8s.io/controller-runtime v0.20.4 - sigs.k8s.io/yaml v1.4.0 + sigs.k8s.io/cluster-api v1.11.2 + sigs.k8s.io/cluster-api/test v1.11.2 + sigs.k8s.io/controller-runtime v0.21.0 + sigs.k8s.io/yaml v1.6.0 ) require ( al.essio.dev/pkg/shellescape v1.5.1 // indirect - cel.dev/expr v0.18.0 // indirect + cel.dev/expr v0.19.1 // indirect dario.cat/mergo v1.0.1 // indirect github.com/BurntSushi/toml v1.4.0 // indirect github.com/MakeNowJust/heredoc v1.0.0 // indirect @@ -51,7 +51,7 @@ require ( github.com/coreos/go-semver v0.3.1 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/distribution/reference v0.6.0 // indirect - github.com/docker/docker v28.0.2+incompatible // indirect + github.com/docker/docker v28.3.3+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.4.0 // indirect github.com/drone/envsubst/v2 v2.0.0-20210730161058-179042472c46 // indirect @@ -67,21 +67,21 @@ require ( github.com/go-openapi/jsonreference v0.20.2 // indirect github.com/go-openapi/swag v0.23.0 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect - github.com/go-viper/mapstructure/v2 v2.2.1 // indirect + github.com/go-viper/mapstructure/v2 v2.4.0 // indirect github.com/gobuffalo/flect v1.0.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/btree v1.1.3 // indirect - github.com/google/cel-go v0.22.0 // indirect - github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/cel-go v0.23.2 // indirect + github.com/google/gnostic-models v0.6.9 // indirect github.com/google/go-cmp v0.7.0 // indirect github.com/google/go-github/v53 v53.2.0 // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad // indirect + github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2 // indirect - github.com/gorilla/websocket v1.5.3 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect + github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 // indirect github.com/huandu/xstrings v1.5.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/josharian/intern v1.0.0 // indirect @@ -104,9 +104,9 @@ require ( github.com/opencontainers/image-spec v1.0.2 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/pelletier/go-toml/v2 v2.2.3 // indirect - github.com/prometheus/client_golang v1.19.1 // indirect + github.com/prometheus/client_golang v1.22.0 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.55.0 // indirect + github.com/prometheus/common v0.62.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/rivo/uniseg v0.4.2 // indirect github.com/sagikazarmark/locafero v0.7.0 // indirect @@ -115,45 +115,47 @@ require ( github.com/spf13/afero v1.12.0 // indirect github.com/spf13/cast v1.7.1 // indirect github.com/spf13/cobra v1.9.1 // indirect - github.com/spf13/pflag v1.0.6 // indirect - github.com/spf13/viper v1.20.0 // indirect + github.com/spf13/pflag v1.0.7 // indirect + github.com/spf13/viper v1.20.1 // indirect github.com/stoewer/go-strcase v1.3.0 // indirect github.com/subosito/gotenv v1.6.0 // indirect github.com/valyala/fastjson v1.6.4 // indirect github.com/x448/float16 v0.8.4 // indirect - go.etcd.io/etcd/client/pkg/v3 v3.5.20 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 // indirect - go.opentelemetry.io/otel v1.29.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 // indirect - go.opentelemetry.io/otel/metric v1.29.0 // indirect - go.opentelemetry.io/otel/sdk v1.29.0 // indirect - go.opentelemetry.io/otel/trace v1.29.0 // indirect - go.opentelemetry.io/proto/otlp v1.3.1 // indirect + go.etcd.io/etcd/client/pkg/v3 v3.5.22 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect + go.opentelemetry.io/otel v1.34.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 // indirect + go.opentelemetry.io/otel/metric v1.34.0 // indirect + go.opentelemetry.io/otel/sdk v1.34.0 // indirect + go.opentelemetry.io/otel/trace v1.34.0 // indirect + go.opentelemetry.io/proto/otlp v1.4.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/crypto v0.39.0 // indirect - golang.org/x/net v0.40.0 // indirect - golang.org/x/oauth2 v0.28.0 // indirect - golang.org/x/sync v0.15.0 // indirect - golang.org/x/sys v0.33.0 // indirect - golang.org/x/term v0.32.0 // indirect - golang.org/x/text v0.26.0 // indirect - golang.org/x/time v0.8.0 // indirect - golang.org/x/tools v0.33.0 // indirect + go.yaml.in/yaml/v2 v2.4.2 // indirect + golang.org/x/crypto v0.40.0 // indirect + golang.org/x/net v0.42.0 // indirect + golang.org/x/oauth2 v0.30.0 // indirect + golang.org/x/sync v0.16.0 // indirect + golang.org/x/sys v0.34.0 // indirect + golang.org/x/term v0.33.0 // indirect + golang.org/x/text v0.27.0 // indirect + golang.org/x/time v0.9.0 // indirect + golang.org/x/tools v0.34.0 // indirect gomodules.xyz/jsonpatch/v2 v2.5.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20241223144023-3abc09e42ca8 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250106144421-5f5ef82da422 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiextensions-apiserver v0.32.3 // indirect - k8s.io/cluster-bootstrap v0.32.3 // indirect - k8s.io/component-base v0.32.3 // indirect - k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect - sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 // indirect + k8s.io/apiextensions-apiserver v0.33.3 // indirect + k8s.io/cluster-bootstrap v0.33.3 // indirect + k8s.io/component-base v0.33.3 // indirect + k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect + sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 // indirect sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect - sigs.k8s.io/kind v0.27.0 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect + sigs.k8s.io/kind v0.30.0 // indirect + sigs.k8s.io/randfill v1.0.0 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect ) diff --git a/go.sum b/go.sum index 23a344bd..2604e026 100644 --- a/go.sum +++ b/go.sum @@ -2,6 +2,7 @@ al.essio.dev/pkg/shellescape v1.5.1 h1:86HrALUujYS/h+GtqoB26SBEdkWfmMI6FubjXlsXy al.essio.dev/pkg/shellescape v1.5.1/go.mod h1:6sIqp7X2P6mThCQ7twERpZTuigpr6KbZWtls1U8I890= cel.dev/expr v0.18.0 h1:CJ6drgk+Hf96lkLikr4rFf19WrU0BOWEihyZnI2TAzo= cel.dev/expr v0.18.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= +cel.dev/expr v0.19.1/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s= dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= @@ -46,6 +47,7 @@ github.com/coredns/caddy v1.1.1 h1:2eYKZT7i6yxIfGP3qLJoJ7HAsDJqYB+X68g4NYjSrE0= github.com/coredns/caddy v1.1.1/go.mod h1:A6ntJQlAWuQfFlsd9hvigKbo2WS0VUs2l1e2F+BawD4= github.com/coredns/corefile-migration v1.0.26 h1:xiiEkVB1Dwolb24pkeDUDBfygV9/XsOSq79yFCrhptY= github.com/coredns/corefile-migration v1.0.26/go.mod h1:56DPqONc3njpVPsdilEnfijCwNGC3/kTJLl7i7SPavY= +github.com/coredns/corefile-migration v1.0.28/go.mod h1:56DPqONc3njpVPsdilEnfijCwNGC3/kTJLl7i7SPavY= github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= @@ -60,6 +62,7 @@ github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5Qvfr github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/docker/docker v28.0.2+incompatible h1:9BILleFwug5FSSqWBgVevgL3ewDJfWWWyZVqlDMttE8= github.com/docker/docker v28.0.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v28.3.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= @@ -88,6 +91,8 @@ github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXE github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= @@ -105,6 +110,7 @@ github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1v github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss= github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/gobuffalo/flect v1.0.3 h1:xeWBM2nui+qnVvNM4S3foBhCAL2XgPU+a7FdpelbTq4= github.com/gobuffalo/flect v1.0.3/go.mod h1:A5msMlrHtLqh9umBSnvabjsMrCcCpAyzglnDvkbYKHs= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= @@ -123,8 +129,11 @@ github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/cel-go v0.22.0 h1:b3FJZxpiv1vTMo2/5RDUqAHPxkT8mmMfJIrq1llbf7g= github.com/google/cel-go v0.22.0/go.mod h1:BuznPXXfQDpXKWQ9sPW3TzlAJN5zzFe+i9tIs0yC4s8= +github.com/google/cel-go v0.23.2/go.mod h1:52Pb6QsDbC5kvgxvZhiL9QX1oZEkcUF/ZqaPx1J5Wwo= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= +github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw= +github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -142,6 +151,7 @@ github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad h1:a6HEuzUHeKH6hwfN/ZoQgRgVIWFJljSWa/zetS2WTvg= github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA= github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2 h1:SJ+NtwL6QaZ21U+IrK7d0gGgpjGGvd2kz+FzTHVzdqI= github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2/go.mod h1:Tv1PlzqC9t8wNnpPdctvtSUOPUUg4SHeE6vR1Ir2hmg= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= @@ -151,10 +161,13 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo= +github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0/go.mod h1:qztMSjm835F2bXf+5HKAPIS5qsmQDqZna/PgVt4rWtI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI= github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= @@ -215,10 +228,13 @@ github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/ginkgo/v2 v2.23.3 h1:edHxnszytJ4lD9D5Jjc4tiDkPBZ3siDeJJkUZJJVkp0= github.com/onsi/ginkgo/v2 v2.23.3/go.mod h1:zXTP6xIp3U8aVuXN8ENK9IXRaTjFnpVB9mGmaSRvxnM= +github.com/onsi/ginkgo/v2 v2.23.4/go.mod h1:Bt66ApGPBFzHyR+JO10Zbt0Gsp4uWxu5mIOTusL46e8= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.36.3 h1:hID7cr8t3Wp26+cYnfcjR6HpJ00fdogN6dqZ1t6IylU= github.com/onsi/gomega v1.36.3/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0= +github.com/onsi/gomega v1.38.0 h1:c/WX+w8SLAinvuKKQFh77WEucCnPk4j2OTUr7lt7BeY= +github.com/onsi/gomega v1.38.0/go.mod h1:OcXcwId0b9QsE7Y49u+BTrL4IdKOBOKnD6VQNTJEB6o= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= @@ -234,10 +250,14 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= +github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= @@ -263,8 +283,11 @@ github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.7 h1:vN6T9TfwStFPFM5XzjsvmzZkLuaLX+HS+0SeFLRgU6M= +github.com/spf13/pflag v1.0.7/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.20.0 h1:zrxIyR3RQIOsarIrgL8+sAvALXul9jeEPa06Y0Ph6vY= github.com/spf13/viper v1.20.0/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4= +github.com/spf13/viper v1.20.1/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4= github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs= github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -288,42 +311,61 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.etcd.io/etcd/api/v3 v3.5.20 h1:aKfz3nPZECWoZJXMSH9y6h2adXjtOHaHTGEVCuCmaz0= go.etcd.io/etcd/api/v3 v3.5.20/go.mod h1:QqKGViq4KTgOG43dr/uH0vmGWIaoJY3ggFi6ZH0TH/U= +go.etcd.io/etcd/api/v3 v3.5.22 h1:jRqZlcmndfKs1fO9I1Euqk3O5acEyBICyMKunxxhL94= +go.etcd.io/etcd/api/v3 v3.5.22/go.mod h1:/mQQOEMyP7nAjMKZTJSCtMmlOoNAG5s7IjKZGvMN9Yc= go.etcd.io/etcd/client/pkg/v3 v3.5.20 h1:sZIAtra+xCo56gdf6BR62to/hiie5Bwl7hQIqMzVTEM= go.etcd.io/etcd/client/pkg/v3 v3.5.20/go.mod h1:qaOi1k4ZA9lVLejXNvyPABrVEe7VymMF2433yyRQ7O0= +go.etcd.io/etcd/client/pkg/v3 v3.5.22 h1:dFVbXCohuH0JKo6X+IznK6aohJSWcU+SF8QZF5EsaOQ= +go.etcd.io/etcd/client/pkg/v3 v3.5.22/go.mod h1:cSURbmmXBOqyX839SdTbuO2zYtUjcZvKeQ7DEiYVOmU= go.etcd.io/etcd/client/v3 v3.5.20 h1:jMT2MwQEhyvhQg49Cec+1ZHJzfUf6ZgcmV0GjPv0tIQ= go.etcd.io/etcd/client/v3 v3.5.20/go.mod h1:J5lbzYRMUR20YolS5UjlqqMcu3/wdEvG5VNBhzyo3m0= +go.etcd.io/etcd/client/v3 v3.5.22 h1:h+y9ocnclCPLPsoBoKwneZy7gy71VjvJ+8z4IPWxTn8= +go.etcd.io/etcd/client/v3 v3.5.22/go.mod h1:p4ROLdCiv8gwQtnozOILXkqIjjSI0MR6Ly5F9qpFg+8= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 h1:r6I7RJCN86bpD/FQwedZ0vSixDpwuWREjW9oRMsmqDc= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0/go.mod h1:B9yO6b04uB80CzjedvewuqDhxJxi11s7/GtiGa8bAjI= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q= go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw= go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8= +go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY= +go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0/go.mod h1:cpgtDBaqD/6ok/UG0jT15/uKjAY8mRA53diogHBg3UI= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 h1:qFffATk0X+HD+f1Z8lswGiOQYKHRlzfmdJm0wEaVrFA= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0/go.mod h1:57gTHJSE5S1tqg+EKsLPlTWhpHMsWlVmer+LA926XiA= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.22.0 h1:FyjCyI9jVEfqhUh2MoSkmolPjfh5fp2hnV0b0irxH4Q= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.22.0/go.mod h1:hYwym2nDEeZfG/motx0p7L7J1N1vyzIThemQsb4g2qY= go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2g+8YLc= go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8= +go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE= go.opentelemetry.io/otel/sdk v1.29.0 h1:vkqKjk7gwhS8VaWb0POZKmIEDimRCMsopNYnriHyryo= go.opentelemetry.io/otel/sdk v1.29.0/go.mod h1:pM8Dx5WKnvxLCb+8lG1PRNIDxu9g9b9g59Qr7hfAAok= +go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU= go.opentelemetry.io/otel/trace v1.29.0 h1:J/8ZNK4XgR7a21DZUAsbF8pZ5Jcw1VhACmnYt39JTi4= go.opentelemetry.io/otel/trace v1.29.0/go.mod h1:eHl3w0sp3paPkYstJOmAimxhiFXPg+MMTlEh3nsQgWQ= +go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k= +go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= +go.opentelemetry.io/proto/otlp v1.4.0/go.mod h1:PPBWZIP98o2ElSqI35IHfu7hIhSwvc5N38Jw8pXuGFY= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= +go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM= golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U= +golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY= golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= @@ -337,14 +379,20 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY= golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds= +golang.org/x/net v0.42.0 h1:jzkYrhi3YQWD6MLBJcsklgQsoAcw89EcZbJw8Z614hs= +golang.org/x/net v0.42.0/go.mod h1:FF1RA5d3u7nAYA4z2TkclSCKh68eSXtiFwcWQpPXdt8= golang.org/x/oauth2 v0.28.0 h1:CrgCKl8PPAVtLnU3c+EDw6x11699EWlsDeWNWKdIOkc= golang.org/x/oauth2 v0.28.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= +golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= +golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8= golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= +golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -363,15 +411,23 @@ golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA= +golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg= golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ= +golang.org/x/term v0.33.0 h1:NuFncQrRcaRvVmgRkvM3j/F00gWIAlcmlB8ACEKmGIg= +golang.org/x/term v0.33.0/go.mod h1:s18+ql9tYWp1IfpV9DmCtQDDSRBUjKaw9M1eAv5UeF0= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M= golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA= +golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4= +golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU= golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= +golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -379,6 +435,7 @@ golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.33.0 h1:4qz2S3zmRxbGIhDIAgjxvFutSvH5EfnsYrRBj0UI0bc= golang.org/x/tools v0.33.0/go.mod h1:CIJMaWEY88juyUfo7UbgPqbC8rU2OqfAV1h2Qp0oMYI= +golang.org/x/tools v0.34.0/go.mod h1:pAP9OwEaY1CAW3HOmg3hLZC5Z0CCmzjAF2UQMSqNARg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -387,10 +444,16 @@ gomodules.xyz/jsonpatch/v2 v2.5.0 h1:JELs8RLM12qJGXU4u/TO3V25KW8GreMKl9pdkk14RM0 gomodules.xyz/jsonpatch/v2 v2.5.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 h1:CkkIfIt50+lT6NHAVoRYEyAvQGFM7xEwXUUywFvEb3Q= google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576/go.mod h1:1R3kvZ1dtP3+4p4d3G8uJ8rFk/fWlScl38vanWACI08= +google.golang.org/genproto/googleapis/api v0.0.0-20250106144421-5f5ef82da422 h1:GVIKPyP/kLIyVOgOnTwFOrvQaQUzOzGMCxgFUOEmm24= +google.golang.org/genproto/googleapis/api v0.0.0-20250106144421-5f5ef82da422/go.mod h1:b6h1vNKhxaSoEI+5jc3PJUCustfli/mRab7295pY7rw= google.golang.org/genproto/googleapis/rpc v0.0.0-20241223144023-3abc09e42ca8 h1:TqExAhdPaB60Ux47Cn0oLV07rGnxZzIsaRhQaqS666A= google.golang.org/genproto/googleapis/rpc v0.0.0-20241223144023-3abc09e42ca8/go.mod h1:lcTa1sDdWEIHMWlITnIczmw5w60CF9ffkb8Z+DVmmjA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f h1:OxYkA3wjPsZyBylwymxSHa7ViiW1Sml4ToBrncvFehI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:+2Yz8+CLJbIfL9z73EW45avw8Lmge3xVElCP9zEKi50= google.golang.org/grpc v1.67.3 h1:OgPcDAFKHnH8X3O4WcO4XUc8GRDeKsKReqbQtiCj7N8= google.golang.org/grpc v1.67.3/go.mod h1:YGaHCc6Oap+FzBJTZLBzkGSYt/cvGPFTPxkn7QfSU8s= +google.golang.org/grpc v1.71.3 h1:iEhneYTxOruJyZAxdAv8Y0iRZvsc5M6KoW7UA0/7jn0= +google.golang.org/grpc v1.71.3/go.mod h1:H0GRtasmQOh9LkFoCPDu3ZrwUtD1YGE+b2vYBYd/8Ec= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -399,6 +462,8 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= +google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= @@ -419,37 +484,66 @@ gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g= k8s.io/api v0.32.3 h1:Hw7KqxRusq+6QSplE3NYG4MBxZw1BZnq4aP4cJVINls= k8s.io/api v0.32.3/go.mod h1:2wEDTXADtm/HA7CCMD8D8bK4yuBUptzaRhYcYEEYA3k= +k8s.io/api v0.33.3 h1:SRd5t//hhkI1buzxb288fy2xvjubstenEKL9K51KBI8= +k8s.io/api v0.33.3/go.mod h1:01Y/iLUjNBM3TAvypct7DIj0M0NIZc+PzAHCIo0CYGE= k8s.io/apiextensions-apiserver v0.32.3 h1:4D8vy+9GWerlErCwVIbcQjsWunF9SUGNu7O7hiQTyPY= k8s.io/apiextensions-apiserver v0.32.3/go.mod h1:8YwcvVRMVzw0r1Stc7XfGAzB/SIVLunqApySV5V7Dss= +k8s.io/apiextensions-apiserver v0.33.3 h1:qmOcAHN6DjfD0v9kxL5udB27SRP6SG/MTopmge3MwEs= +k8s.io/apiextensions-apiserver v0.33.3/go.mod h1:oROuctgo27mUsyp9+Obahos6CWcMISSAPzQ77CAQGz8= k8s.io/apimachinery v0.32.3 h1:JmDuDarhDmA/Li7j3aPrwhpNBA94Nvk5zLeOge9HH1U= k8s.io/apimachinery v0.32.3/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= +k8s.io/apimachinery v0.33.3 h1:4ZSrmNa0c/ZpZJhAgRdcsFcZOw1PQU1bALVQ0B3I5LA= +k8s.io/apimachinery v0.33.3/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM= k8s.io/apiserver v0.32.3 h1:kOw2KBuHOA+wetX1MkmrxgBr648ksz653j26ESuWNY8= k8s.io/apiserver v0.32.3/go.mod h1:q1x9B8E/WzShF49wh3ADOh6muSfpmFL0I2t+TG0Zdgc= +k8s.io/apiserver v0.33.3/go.mod h1:05632ifFEe6TxwjdAIrwINHWE2hLwyADFk5mBsQa15E= k8s.io/client-go v0.32.3 h1:RKPVltzopkSgHS7aS98QdscAgtgah/+zmpAogooIqVU= k8s.io/client-go v0.32.3/go.mod h1:3v0+3k4IcT9bXTc4V2rt+d2ZPPG700Xy6Oi0Gdl2PaY= +k8s.io/client-go v0.33.3 h1:M5AfDnKfYmVJif92ngN532gFqakcGi6RvaOF16efrpA= +k8s.io/client-go v0.33.3/go.mod h1:luqKBQggEf3shbxHY4uVENAxrDISLOarxpTKMiUuujg= k8s.io/cluster-bootstrap v0.32.3 h1:AqIpsUhB6MUeaAsl1WvaUw54AHRd2hfZrESlKChtd8s= k8s.io/cluster-bootstrap v0.32.3/go.mod h1:CHbBwgOb6liDV6JFUTkx5t85T2xidy0sChBDoyYw344= +k8s.io/cluster-bootstrap v0.33.3 h1:u2NTxJ5CFSBFXaDxLQoOWMly8eni31psVso+caq6uwI= +k8s.io/cluster-bootstrap v0.33.3/go.mod h1:p970f8u8jf273zyQ5raD8WUu2XyAl0SAWOY82o7i/ds= k8s.io/component-base v0.32.3 h1:98WJvvMs3QZ2LYHBzvltFSeJjEx7t5+8s71P7M74u8k= k8s.io/component-base v0.32.3/go.mod h1:LWi9cR+yPAv7cu2X9rZanTiFKB2kHA+JjmhkKjCZRpI= +k8s.io/component-base v0.33.3 h1:mlAuyJqyPlKZM7FyaoM/LcunZaaY353RXiOd2+B5tGA= +k8s.io/component-base v0.33.3/go.mod h1:ktBVsBzkI3imDuxYXmVxZ2zxJnYTZ4HAsVj9iF09qp4= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y= k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4= +k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4= +k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8= k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro= k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 h1:CPT0ExVicCzcpeN4baWEV2ko2Z/AsiZgEdwgcfwLgMo= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= sigs.k8s.io/cluster-api v1.10.1 h1:5vsLNgQ4SkPudJ1USK532B0SIdJxRsCNKt2DZtBf+ww= sigs.k8s.io/cluster-api v1.10.1/go.mod h1:aiPMrNPoaJc/GuJ4TCpWX8bVe11+iCJ4HI0f3c9QiJg= +sigs.k8s.io/cluster-api v1.11.2 h1:uAczaBavU5Y6aDgyoXWtq28k1kalpSZnVItwXHusw1c= +sigs.k8s.io/cluster-api v1.11.2/go.mod h1:C1gJVAjMXRG+M+djjGYNkoi5kBMhFnOUI9QqZDAtMms= sigs.k8s.io/cluster-api/test v1.10.1 h1:cSgbfROhT42+M1YQg6Wj5DQWNjRZtkFQGIDSu6JJjmk= sigs.k8s.io/cluster-api/test v1.10.1/go.mod h1:CYFajf4HhV5zhoiPl+m5Omk0a3WtnyXHtFVPqWIcUy4= +sigs.k8s.io/cluster-api/test v1.11.2/go.mod h1:COviHWIKTcip0VADeIh8Rm5bjqzyZ1LuzKBW1EqjJRc= sigs.k8s.io/controller-runtime v0.20.4 h1:X3c+Odnxz+iPTRobG4tp092+CvBU9UK0t/bRf+n0DGU= sigs.k8s.io/controller-runtime v0.20.4/go.mod h1:xg2XB0K5ShQzAgsoujxuKN4LNXR2LfwwHsPj7Iaw+XY= +sigs.k8s.io/controller-runtime v0.21.0 h1:CYfjpEuicjUecRk+KAeyYh+ouUBn4llGyDYytIGcJS8= +sigs.k8s.io/controller-runtime v0.21.0/go.mod h1:OSg14+F65eWqIu4DceX7k/+QRAbTTvxeQSNSOQpukWM= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= sigs.k8s.io/kind v0.27.0 h1:PQ3f0iAWNIj66LYkZ1ivhEg/+Zb6UPMbO+qVei/INZA= sigs.k8s.io/kind v0.27.0/go.mod h1:RZVFmy6qcwlSWwp6xeIUv7kXCPF3i8MXsEXxW/J+gJY= +sigs.k8s.io/kind v0.30.0/go.mod h1:FSqriGaoTPruiXWfRnUXNykF8r2t+fHtK0P0m1AbGF8= +sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= +sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA= sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= +sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc= +sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= +sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= +sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4=