diff --git a/apis/nodecore/v1alpha1/common.go b/apis/nodecore/v1alpha1/common.go index 4f54f321..8d74a975 100644 --- a/apis/nodecore/v1alpha1/common.go +++ b/apis/nodecore/v1alpha1/common.go @@ -78,10 +78,8 @@ type Configuration struct { // LiqoCredentials contains the credentials of a Liqo cluster to enstablish a peering. type LiqoCredentials struct { - ClusterID string `json:"clusterID"` - ClusterName string `json:"clusterName"` - Token string `json:"token"` - Endpoint string `json:"endpoint"` + ClusterID string `json:"liqoID"` + Kubeconfig string `json:"kubeconfig"` } // ParseConfiguration parses the configuration data into the correct type. diff --git a/apis/nodecore/v1alpha1/flavor_webhook.go b/apis/nodecore/v1alpha1/flavor_webhook.go index e3a7a4ca..7ea8b9e4 100644 --- a/apis/nodecore/v1alpha1/flavor_webhook.go +++ b/apis/nodecore/v1alpha1/flavor_webhook.go @@ -15,6 +15,8 @@ package v1alpha1 import ( + "context" + "k8s.io/apimachinery/pkg/runtime" ctrl "sigs.k8s.io/controller-runtime" logf "sigs.k8s.io/controller-runtime/pkg/log" @@ -28,34 +30,43 @@ var flavorlog = logf.Log.WithName("flavor-resource") // SetupWebhookWithManager setups the webhooks for the Flavor resource with the manager. func (r *Flavor) SetupWebhookWithManager(mgr ctrl.Manager) error { return ctrl.NewWebhookManagedBy(mgr). - For(r). + For(&Flavor{}). + WithDefaulter(&Flavor{}). + WithValidator(&Flavor{}). Complete() } //nolint:lll // kubebuilder directives are too long, but they must be on the same line //+kubebuilder:webhook:path=/mutate-nodecore-fluidos-eu-v1alpha1-flavor,mutating=true,failurePolicy=fail,sideEffects=None,groups=nodecore.fluidos.eu,resources=flavors,verbs=create;update,versions=v1alpha1,name=mflavor.kb.io,admissionReviewVersions=v1 -var _ webhook.Defaulter = &Flavor{} +var _ webhook.CustomDefaulter = &Flavor{} // Default implements webhook.Defaulter so a webhook will be registered for the type. -func (r *Flavor) Default() { +func (r *Flavor) Default(ctx context.Context, obj runtime.Object) error { + _ = ctx + // Parse obj to Flavor + flavor := obj.(*Flavor) flavorlog.Info("DEFAULT WEBHOOK") - flavorlog.Info("default", "name", r.Name) + flavorlog.Info("default", "name", flavor.Name) + + return nil } // TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. //nolint:lll // kubebuilder directives are too long, but they must be on the same line //+kubebuilder:webhook:path=/validate-nodecore-fluidos-eu-v1alpha1-flavor,mutating=false,failurePolicy=fail,sideEffects=None,groups=nodecore.fluidos.eu,resources=flavors,verbs=create;update,versions=v1alpha1,name=vflavor.kb.io,admissionReviewVersions=v1 -var _ webhook.Validator = &Flavor{} +var _ webhook.CustomValidator = &Flavor{} // ValidateCreate implements webhook.Validator so a webhook will be registered for the type. -func (r *Flavor) ValidateCreate() (admission.Warnings, error) { +func (r *Flavor) ValidateCreate(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { + _ = ctx + flavor := obj.(*Flavor) flavorlog.Info("VALIDATE CREATE WEBHOOK") - flavorlog.Info("validate create", "name", r.Name) + flavorlog.Info("validate create", "name", flavor.Name) // Validate creation of Flavor checking FlavorType->TypeIdenfier matches the struct inside the FlavorType->TypeData - typeIdenfier, _, err := ParseFlavorType(r) + typeIdenfier, _, err := ParseFlavorType(flavor) if err != nil { return nil, err } @@ -74,14 +85,16 @@ func (r *Flavor) ValidateCreate() (admission.Warnings, error) { } // ValidateUpdate implements webhook.Validator so a webhook will be registered for the type. -func (r *Flavor) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { +func (r *Flavor) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) (admission.Warnings, error) { + _ = ctx + flavor := newObj.(*Flavor) flavorlog.Info("VALIDATE UPDATE WEBHOOK") - flavorlog.Info("validate update", "name", r.Name) + flavorlog.Info("validate update", "name", flavor.Name) - flavorlog.Info("old", "old", old) + flavorlog.Info("old", "old", oldObj) // Validate creation of Flavor checking FlavorType->TypeIdenfier matches the struct inside the FlavorType->TypeData - typeIdenfier, _, err := ParseFlavorType(r) + typeIdenfier, _, err := ParseFlavorType(flavor) if err != nil { return nil, err } @@ -100,9 +113,11 @@ func (r *Flavor) ValidateUpdate(old runtime.Object) (admission.Warnings, error) } // ValidateDelete implements webhook.Validator so a webhook will be registered for the type. -func (r *Flavor) ValidateDelete() (admission.Warnings, error) { +func (r *Flavor) ValidateDelete(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { + _ = ctx + flavor := obj.(*Flavor) flavorlog.Info("VALIDATE DELETE WEBHOOK") - flavorlog.Info("validate delete", "name", r.Name) + flavorlog.Info("validate delete", "name", flavor.Name) // TODO(user): fill in your validation logic upon object deletion. return nil, nil diff --git a/apis/nodecore/v1alpha1/service_blueprint_webhook.go b/apis/nodecore/v1alpha1/service_blueprint_webhook.go index 0d5930e4..fc20ed77 100644 --- a/apis/nodecore/v1alpha1/service_blueprint_webhook.go +++ b/apis/nodecore/v1alpha1/service_blueprint_webhook.go @@ -15,6 +15,8 @@ package v1alpha1 import ( + "context" + "k8s.io/apimachinery/pkg/runtime" ctrl "sigs.k8s.io/controller-runtime" logf "sigs.k8s.io/controller-runtime/pkg/log" @@ -28,34 +30,42 @@ var serviceblueprintlog = logf.Log.WithName("serviceblueprint-resource") // SetupWebhookWithManager setups the webhooks for the ServiceBlueprint resource with the manager. func (r *ServiceBlueprint) SetupWebhookWithManager(mgr ctrl.Manager) error { return ctrl.NewWebhookManagedBy(mgr). - For(r). + For(&ServiceBlueprint{}). + WithDefaulter(&ServiceBlueprint{}). + WithValidator(&ServiceBlueprint{}). Complete() } //nolint:lll // kubebuilder directives are too long, but they must be on the same line //+kubebuilder:webhook:path=/mutate-nodecore-fluidos-eu-v1alpha1-serviceblueprint,mutating=true,failurePolicy=fail,sideEffects=None,groups=nodecore.fluidos.eu,resources=serviceblueprints,verbs=create;update,versions=v1alpha1,name=mserviceblueprint.kb.io,admissionReviewVersions=v1 -var _ webhook.Defaulter = &ServiceBlueprint{} +var _ webhook.CustomDefaulter = &ServiceBlueprint{} // Default implements webhook.Defaulter so a webhook will be registered for the type. -func (r *ServiceBlueprint) Default() { +func (r *ServiceBlueprint) Default(ctx context.Context, obj runtime.Object) error { + _ = ctx + serviceblueprint := obj.(*ServiceBlueprint) serviceblueprintlog.Info("DEFAULT WEBHOOK") - serviceblueprintlog.Info("default", "name", r.Name) + serviceblueprintlog.Info("default", "name", serviceblueprint.Name) + + return nil } // TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. //nolint:lll // kubebuilder directives are too long, but they must be on the same line //+kubebuilder:webhook:path=/validate-nodecore-fluidos-eu-v1alpha1-serviceblueprint,mutating=false,failurePolicy=fail,sideEffects=None,groups=nodecore.fluidos.eu,resources=serviceblueprints,verbs=create;update,versions=v1alpha1,name=vserviceblueprint.kb.io,admissionReviewVersions=v1 -var _ webhook.Validator = &ServiceBlueprint{} +var _ webhook.CustomValidator = &ServiceBlueprint{} // ValidateCreate implements webhook.Validator so a webhook will be registered for the type. -func (r *ServiceBlueprint) ValidateCreate() (admission.Warnings, error) { +func (r *ServiceBlueprint) ValidateCreate(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { + _ = ctx + serviceblueprint := obj.(*ServiceBlueprint) serviceblueprintlog.Info("VALIDATE CREATE WEBHOOK") - serviceblueprintlog.Info("validate create", "name", r.Name) + serviceblueprintlog.Info("validate create", "name", serviceblueprint.Name) // Validate ServiceBlueprint templates - manifests, err := ValidateAndExtractManifests(r.Spec.Templates) + manifests, err := ValidateAndExtractManifests(serviceblueprint.Spec.Templates) if err != nil { return nil, err } @@ -67,14 +77,16 @@ func (r *ServiceBlueprint) ValidateCreate() (admission.Warnings, error) { } // ValidateUpdate implements webhook.Validator so a webhook will be registered for the type. -func (r *ServiceBlueprint) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { +func (r *ServiceBlueprint) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) (admission.Warnings, error) { + _ = ctx + serviceblueprint := newObj.(*ServiceBlueprint) serviceblueprintlog.Info("VALIDATE UPDATE WEBHOOK") - serviceblueprintlog.Info("validate update", "name", r.Name) + serviceblueprintlog.Info("validate update", "name", serviceblueprint.Name) - serviceblueprintlog.Info("old", "old", old) + serviceblueprintlog.Info("old", "old", oldObj) // Validate ServiceBlueprint templates - manifests, err := ValidateAndExtractManifests(r.Spec.Templates) + manifests, err := ValidateAndExtractManifests(serviceblueprint.Spec.Templates) if err != nil { return nil, err } @@ -86,9 +98,11 @@ func (r *ServiceBlueprint) ValidateUpdate(old runtime.Object) (admission.Warning } // ValidateDelete implements webhook.Validator so a webhook will be registered for the type. -func (r *ServiceBlueprint) ValidateDelete() (admission.Warnings, error) { +func (r *ServiceBlueprint) ValidateDelete(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { + _ = ctx + serviceblueprint := obj.(*ServiceBlueprint) serviceblueprintlog.Info("VALIDATE DELETE WEBHOOK") - serviceblueprintlog.Info("validate delete", "name", r.Name) + serviceblueprintlog.Info("validate delete", "name", serviceblueprint.Name) // TODO(user): fill in your validation logic upon object deletion. return nil, nil diff --git a/apis/nodecore/v1alpha1/solver_webhook.go b/apis/nodecore/v1alpha1/solver_webhook.go index 5ca436ae..597bacf1 100644 --- a/apis/nodecore/v1alpha1/solver_webhook.go +++ b/apis/nodecore/v1alpha1/solver_webhook.go @@ -15,6 +15,8 @@ package v1alpha1 import ( + "context" + "k8s.io/apimachinery/pkg/runtime" ctrl "sigs.k8s.io/controller-runtime" logf "sigs.k8s.io/controller-runtime/pkg/log" @@ -28,33 +30,41 @@ var solverlog = logf.Log.WithName("solver-resource") // SetupWebhookWithManager sets up and registers the webhook with the manager. func (r *Solver) SetupWebhookWithManager(mgr ctrl.Manager) error { return ctrl.NewWebhookManagedBy(mgr). - For(r). + For(&Solver{}). + WithDefaulter(&Solver{}). + WithValidator(&Solver{}). Complete() } //nolint:lll // kubebuilder directives are too long, but they must be on the same line //+kubebuilder:webhook:path=/mutate-nodecore-fluidos-eu-v1alpha1-solver,mutating=true,failurePolicy=fail,sideEffects=None,groups=nodecore.fluidos.eu,resources=solvers,verbs=create;update,versions=v1alpha1,name=msolver.kb.io,admissionReviewVersions=v1 -var _ webhook.Defaulter = &Solver{} +var _ webhook.CustomDefaulter = &Solver{} // Default implements webhook.Defaulter so a webhook will be registered for the type. -func (r *Solver) Default() { +func (r *Solver) Default(ctx context.Context, obj runtime.Object) error { + _ = ctx + solver := obj.(*Solver) solverlog.Info("DEFAULT WEBHOOK") - solverlog.Info("default", "name", r.Name) + solverlog.Info("default", "name", solver.Name) + + return nil } // TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. //nolint:lll // kubebuilder directives are too long, but they must be on the same line //+kubebuilder:webhook:path=/validate-nodecore-fluidos-eu-v1alpha1-solver,mutating=false,failurePolicy=fail,sideEffects=None,groups=nodecore.fluidos.eu,resources=solvers,verbs=create;update,versions=v1alpha1,name=vsolver.kb.io,admissionReviewVersions=v1 -var _ webhook.Validator = &Solver{} +var _ webhook.CustomValidator = &Solver{} // ValidateCreate implements webhook.Validator so a webhook will be registered for the type. -func (r *Solver) ValidateCreate() (admission.Warnings, error) { +func (r *Solver) ValidateCreate(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { + _ = ctx + solver := obj.(*Solver) solverlog.Info("VALIDATE CREATE WEBHOOK") - solverlog.Info("validate create", "name", r.Name) + solverlog.Info("validate create", "name", solver.Name) - if err := validateSelector(r.Spec.Selector); err != nil { + if err := validateSelector(solver.Spec.Selector); err != nil { return nil, err } @@ -62,13 +72,15 @@ func (r *Solver) ValidateCreate() (admission.Warnings, error) { } // ValidateUpdate implements webhook.Validator so a webhook will be registered for the type. -func (r *Solver) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { +func (r *Solver) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) (admission.Warnings, error) { + _ = ctx + solver := newObj.(*Solver) solverlog.Info("VALIDATE UPDATE WEBHOOK") - solverlog.Info("validate update", "name", r.Name) + solverlog.Info("validate update", "name", solver.Name) - solverlog.Info("old", "old", old) + solverlog.Info("old", "old", oldObj) - if err := validateSelector(r.Spec.Selector); err != nil { + if err := validateSelector(solver.Spec.Selector); err != nil { return nil, err } @@ -76,9 +88,11 @@ func (r *Solver) ValidateUpdate(old runtime.Object) (admission.Warnings, error) } // ValidateDelete implements webhook.Validator so a webhook will be registered for the type. -func (r *Solver) ValidateDelete() (admission.Warnings, error) { +func (r *Solver) ValidateDelete(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { + _ = ctx + solver := obj.(*Solver) solverlog.Info("VALIDATE DELETE WEBHOOK") - solverlog.Info("validate delete", "name", r.Name) + solverlog.Info("validate delete", "name", solver.Name) // TODO(user): fill in your validation logic upon object deletion. return nil, nil diff --git a/apis/reservation/v1alpha1/contract_webhook.go b/apis/reservation/v1alpha1/contract_webhook.go index 1aba310d..8153bd5d 100644 --- a/apis/reservation/v1alpha1/contract_webhook.go +++ b/apis/reservation/v1alpha1/contract_webhook.go @@ -15,6 +15,8 @@ package v1alpha1 import ( + "context" + "k8s.io/apimachinery/pkg/runtime" ctrl "sigs.k8s.io/controller-runtime" logf "sigs.k8s.io/controller-runtime/pkg/log" @@ -27,36 +29,44 @@ import ( // log is for logging in this package. var contractlog = logf.Log.WithName("contract-resource") -// SetupWebhookWithManager sets up and registers the webhook with the manager. +// SetupWebhookWithManager sets up and registers the webhook with the managecontract. func (r *Contract) SetupWebhookWithManager(mgr ctrl.Manager) error { return ctrl.NewWebhookManagedBy(mgr). - For(r). + For(&Contract{}). + WithDefaulter(&Contract{}). + WithValidator(&Contract{}). Complete() } //nolint:lll // kubebuilder directives are too long, but they must be on the same line //+kubebuilder:webhook:path=/mutate-contract-fluidos-eu-v1alpha1-contract,mutating=true,failurePolicy=fail,sideEffects=None,groups=contract.fluidos.eu,resources=contracts,verbs=create;update,versions=v1alpha1,name=mcontract.kb.io,admissionReviewVersions=v1 -var _ webhook.Defaulter = &Contract{} +var _ webhook.CustomDefaulter = &Contract{} // Default implements webhook.Defaulter so a webhook will be registered for the type. -func (r *Contract) Default() { +func (r *Contract) Default(ctx context.Context, obj runtime.Object) error { + _ = ctx + contract := obj.(*Contract) contractlog.Info("CONTRACT DEFAULT WEBHOOK") - contractlog.Info("default", "name", r.Name) + contractlog.Info("default", "name", contract.Name) + + return nil } // TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. //nolint:lll // kubebuilder directives are too long, but they must be on the same line //+kubebuilder:webhook:path=/validate-contract-fluidos-eu-v1alpha1-contract,mutating=false,failurePolicy=fail,sideEffects=None,groups=contract.fluidos.eu,resources=contracts,verbs=create;update,versions=v1alpha1,name=vcontract.kb.io,admissionReviewVersions=v1 -var _ webhook.Validator = &Contract{} +var _ webhook.CustomValidator = &Contract{} // ValidateCreate implements webhook.Validator so a webhook will be registered for the type. -func (r *Contract) ValidateCreate() (admission.Warnings, error) { +func (r *Contract) ValidateCreate(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { + _ = ctx + contract := obj.(*Contract) contractlog.Info("CONTRACT VALIDATE CREATE WEBHOOK") - contractlog.Info("validate create", "name", r.Name) + contractlog.Info("validate create", "name", contract.Name) - if err := validateConfiguration(r.Spec.Configuration, &r.Spec.Flavor); err != nil { + if err := validateConfiguration(contract.Spec.Configuration, &contract.Spec.Flavor); err != nil { return nil, err } @@ -64,13 +74,15 @@ func (r *Contract) ValidateCreate() (admission.Warnings, error) { } // ValidateUpdate implements webhook.Validator so a webhook will be registered for the type. -func (r *Contract) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { +func (r *Contract) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) (admission.Warnings, error) { + _ = ctx + contract := newObj.(*Contract) contractlog.Info("CONTRACT VALIDATE UPDATE WEBHOOK") - contractlog.Info("validate update", "name", r.Name) + contractlog.Info("validate update", "name", contract.Name) - contractlog.Info("old", "old", old) + contractlog.Info("old", "old", oldObj) - if err := validateConfiguration(r.Spec.Configuration, &r.Spec.Flavor); err != nil { + if err := validateConfiguration(contract.Spec.Configuration, &contract.Spec.Flavor); err != nil { return nil, err } @@ -78,9 +90,11 @@ func (r *Contract) ValidateUpdate(old runtime.Object) (admission.Warnings, error } // ValidateDelete implements webhook.Validator so a webhook will be registered for the type. -func (r *Contract) ValidateDelete() (admission.Warnings, error) { +func (r *Contract) ValidateDelete(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { + _ = ctx + contract := obj.(*Contract) contractlog.Info("CONTRACT VALIDATE DELETE WEBHOOK") - contractlog.Info("validate delete", "name", r.Name) + contractlog.Info("validate delete", "name", contract.Name) // TODO(user): fill in your validation logic upon object deletion. return nil, nil diff --git a/apis/reservation/v1alpha1/reservation_webhook.go b/apis/reservation/v1alpha1/reservation_webhook.go index 9602af09..fce4f988 100644 --- a/apis/reservation/v1alpha1/reservation_webhook.go +++ b/apis/reservation/v1alpha1/reservation_webhook.go @@ -39,7 +39,9 @@ var ctxReservation context.Context // SetupWebhookWithManager sets up and registers the webhook with the manager. func (r *Reservation) SetupWebhookWithManager(mgr ctrl.Manager) error { err := ctrl.NewWebhookManagedBy(mgr). - For(r). + For(&Reservation{}). + WithDefaulter(&Reservation{}). + WithValidator(&Reservation{}). Complete() if err != nil { @@ -58,27 +60,33 @@ func (r *Reservation) SetupWebhookWithManager(mgr ctrl.Manager) error { //nolint:lll // kubebuilder directives are too long, but they must be on the same line //+kubebuilder:webhook:path=/mutate-reservation-fluidos-eu-v1alpha1-reservation,mutating=true,failurePolicy=fail,sideEffects=None,groups=reservation.fluidos.eu,resources=reservations,verbs=create;update,versions=v1alpha1,name=mreservation.kb.io,admissionReviewVersions=v1 -var _ webhook.Defaulter = &Reservation{} +var _ webhook.CustomDefaulter = &Reservation{} // Default implements webhook.Defaulter so a webhook will be registered for the type. -func (r *Reservation) Default() { +func (r *Reservation) Default(ctx context.Context, obj runtime.Object) error { + _ = ctx + reservation := obj.(*Reservation) reservationlog.Info("RESERVATION DEFAULT WEBHOOK") - reservationlog.Info("default", "name", r.Name) + reservationlog.Info("default", "name", reservation.Name) + + return nil } // TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. //nolint:lll // kubebuilder directives are too long, but they must be on the same line //+kubebuilder:webhook:path=/validate-reservation-fluidos-eu-v1alpha1-reservation,mutating=false,failurePolicy=fail,sideEffects=None,groups=reservation.fluidos.eu,resources=reservations,verbs=create;update,versions=v1alpha1,name=vreservation.kb.io,admissionReviewVersions=v1 -var _ webhook.Validator = &Reservation{} +var _ webhook.CustomValidator = &Reservation{} // ValidateCreate implements webhook.Validator so a webhook will be registered for the type. -func (r *Reservation) ValidateCreate() (admission.Warnings, error) { +func (r *Reservation) ValidateCreate(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { + _ = ctx + reservation := obj.(*Reservation) reservationlog.Info("RESERVATION VALIDATE CREATE WEBHOOK") - reservationlog.Info("validate create", "name", r.Name) + reservationlog.Info("validate create", "name", reservation.Name) // Validate the Reservation - if err := validateReservation(r); err != nil { + if err := validateReservation(reservation); err != nil { reservationlog.Error(err, "Error validating Reservation in create") return nil, err } @@ -87,14 +95,16 @@ func (r *Reservation) ValidateCreate() (admission.Warnings, error) { } // ValidateUpdate implements webhook.Validator so a webhook will be registered for the type. -func (r *Reservation) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { +func (r *Reservation) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) (admission.Warnings, error) { + _ = ctx + reservation := newObj.(*Reservation) reservationlog.Info("RESERVATION VALIDATE UPDATE WEBHOOK") - reservationlog.Info("validate update", "name", r.Name) + reservationlog.Info("validate update", "name", reservation.Name) - reservationlog.Info("old", "old", old) + reservationlog.Info("old", "old", oldObj) // Validate the Reservation - if err := validateReservation(r); err != nil { + if err := validateReservation(reservation); err != nil { reservationlog.Error(err, "Error validating Reservation in update") return nil, err } @@ -103,9 +113,11 @@ func (r *Reservation) ValidateUpdate(old runtime.Object) (admission.Warnings, er } // ValidateDelete implements webhook.Validator so a webhook will be registered for the type. -func (r *Reservation) ValidateDelete() (admission.Warnings, error) { +func (r *Reservation) ValidateDelete(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { + _ = ctx + reservation := obj.(*Reservation) reservationlog.Info("RESERVATION VALIDATE DELETE WEBHOOK") - reservationlog.Info("validate delete", "name", r.Name) + reservationlog.Info("validate delete", "name", reservation.Name) // TODO(user): fill in your validation logic upon object deletion. return nil, nil diff --git a/apis/reservation/v1alpha1/transaction_webhook.go b/apis/reservation/v1alpha1/transaction_webhook.go index 7d139892..e1e9d667 100644 --- a/apis/reservation/v1alpha1/transaction_webhook.go +++ b/apis/reservation/v1alpha1/transaction_webhook.go @@ -36,10 +36,12 @@ var k8sClientTransaction client.Client // Context. var ctxTransaction context.Context -// SetupWebhookWithManager sets up and registers the webhook with the manager. +// SetupWebhookWithManager sets up and registers the webhook with the managetransaction. func (r *Transaction) SetupWebhookWithManager(mgr ctrl.Manager) error { err := ctrl.NewWebhookManagedBy(mgr). - For(r). + For(&Transaction{}). + WithDefaulter(&Transaction{}). + WithValidator(&Transaction{}). Complete() if err != nil { @@ -58,26 +60,32 @@ func (r *Transaction) SetupWebhookWithManager(mgr ctrl.Manager) error { //nolint:lll // kubebuilder directives are too long, but they must be on the same line //+kubebuilder:webhook:path=/mutate-reservation-fluidos-eu-v1alpha1-transaction,mutating=true,failurePolicy=fail,sideEffects=None,groups=reservation.fluidos.eu,resources=transactions,verbs=create;update,versions=v1alpha1,name=mtransaction.kb.io,admissionReviewVersions=v1 -var _ webhook.Defaulter = &Transaction{} +var _ webhook.CustomDefaulter = &Transaction{} // Default implements webhook.Defaulter so a webhook will be registered for the type. -func (r *Transaction) Default() { +func (r *Transaction) Default(ctx context.Context, obj runtime.Object) error { + _ = ctx + transaction := obj.(*Transaction) transactionlog.Info("TRANSACTION DEFAULT WEBHOOK") - transactionlog.Info("default", "name", r.Name) + transactionlog.Info("default", "name", transaction.Name) + + return nil } // TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. //nolint:lll // kubebuilder directives are too long, but they must be on the same line //+kubebuilder:webhook:path=/validate-reservation-fluidos-eu-v1alpha1-transaction,mutating=false,failurePolicy=fail,sideEffects=None,groups=reservation.fluidos.eu,resources=transactions,verbs=create;update,versions=v1alpha1,name=vtransaction.kb.io,admissionReviewVersions=v1 -var _ webhook.Validator = &Transaction{} +var _ webhook.CustomValidator = &Transaction{} // ValidateCreate implements webhook.Validator so a webhook will be registered for the type. -func (r *Transaction) ValidateCreate() (admission.Warnings, error) { +func (r *Transaction) ValidateCreate(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { + _ = ctx + transaction := obj.(*Transaction) transactionlog.Info("TRANSACTION VALIDATE CREATE WEBHOOK") - transactionlog.Info("validate create", "name", r.Name) + transactionlog.Info("validate create", "name", transaction.Name) - if err := validateTransaction(r); err != nil { + if err := validateTransaction(transaction); err != nil { transactionlog.Error(err, "Error validating Transaction in update") return nil, err } @@ -86,13 +94,15 @@ func (r *Transaction) ValidateCreate() (admission.Warnings, error) { } // ValidateUpdate implements webhook.Validator so a webhook will be registered for the type. -func (r *Transaction) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { +func (r *Transaction) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) (admission.Warnings, error) { + _ = ctx + transaction := newObj.(*Transaction) transactionlog.Info("TRANSACTION VALIDATE UPDATE WEBHOOK") - transactionlog.Info("validate update", "name", r.Name) + transactionlog.Info("validate update", "name", transaction.Name) - transactionlog.Info("old", "name", old) + transactionlog.Info("old", "name", oldObj) - if err := validateTransaction(r); err != nil { + if err := validateTransaction(transaction); err != nil { transactionlog.Error(err, "Error validating Transaction in update") return nil, err } @@ -101,9 +111,11 @@ func (r *Transaction) ValidateUpdate(old runtime.Object) (admission.Warnings, er } // ValidateDelete implements webhook.Validator so a webhook will be registered for the type. -func (r *Transaction) ValidateDelete() (admission.Warnings, error) { +func (r *Transaction) ValidateDelete(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { + _ = ctx + transaction := obj.(*Transaction) transactionlog.Info("TRANSACTION VALIDATE DELETE WEBHOOK") - transactionlog.Info("validate delete", "name", r.Name) + transactionlog.Info("validate delete", "name", transaction.Name) // TODO(user): fill in your validation logic upon object deletion. return nil, nil diff --git a/build/common/Dockerfile b/build/common/Dockerfile index 11d28da0..401fea00 100644 --- a/build/common/Dockerfile +++ b/build/common/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.21 as builder +FROM golang:1.23 as builder WORKDIR /tmp/builder COPY go.mod ./go.mod diff --git a/cmd/local-resource-manager/main.go b/cmd/local-resource-manager/main.go index 21ff641a..54946911 100644 --- a/cmd/local-resource-manager/main.go +++ b/cmd/local-resource-manager/main.go @@ -27,6 +27,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/healthz" "sigs.k8s.io/controller-runtime/pkg/log/zap" + "sigs.k8s.io/controller-runtime/pkg/metrics/server" "sigs.k8s.io/controller-runtime/pkg/webhook" nodecorev1alpha1 "github.com/fluidos-project/node/apis/nodecore/v1alpha1" @@ -97,8 +98,10 @@ func main() { } mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ - Scheme: scheme, - MetricsBindAddress: metricsAddr, + Scheme: scheme, + Metrics: server.Options{ + BindAddress: metricsAddr, + }, WebhookServer: webhookServer, HealthProbeBindAddress: probeAddr, LeaderElection: enableLeaderElection, diff --git a/cmd/network-manager/main.go b/cmd/network-manager/main.go index 62bdbe0e..8a00899a 100644 --- a/cmd/network-manager/main.go +++ b/cmd/network-manager/main.go @@ -26,6 +26,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/healthz" "sigs.k8s.io/controller-runtime/pkg/log/zap" + "sigs.k8s.io/controller-runtime/pkg/metrics/server" networkv1alpha1 "github.com/fluidos-project/node/apis/network/v1alpha1" nodecorev1alpha1 "github.com/fluidos-project/node/apis/nodecore/v1alpha1" @@ -72,8 +73,10 @@ func main() { } mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ - Scheme: scheme, - MetricsBindAddress: metricsAddr, + Scheme: scheme, + Metrics: server.Options{ + BindAddress: metricsAddr, + }, HealthProbeBindAddress: probeAddr, LeaderElection: enableLeaderElection, LeaderElectionID: "a0b0c1d1.fluidos.eu", diff --git a/cmd/rear-controller/main.go b/cmd/rear-controller/main.go index e5f10c31..1c23c546 100644 --- a/cmd/rear-controller/main.go +++ b/cmd/rear-controller/main.go @@ -31,6 +31,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/healthz" "sigs.k8s.io/controller-runtime/pkg/log/zap" "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/metrics/server" "sigs.k8s.io/controller-runtime/pkg/webhook" advertisementv1alpha1 "github.com/fluidos-project/node/apis/advertisement/v1alpha1" @@ -40,7 +41,6 @@ import ( contractmanager "github.com/fluidos-project/node/pkg/rear-controller/contract-manager" discoverymanager "github.com/fluidos-project/node/pkg/rear-controller/discovery-manager" gateway "github.com/fluidos-project/node/pkg/rear-controller/gateway" - "github.com/fluidos-project/node/pkg/rear-controller/grpc" "github.com/fluidos-project/node/pkg/utils/flags" ) @@ -64,7 +64,6 @@ func main() { var probeAddr string flag.StringVar(&metricsAddr, "metrics-bind-address", ":8080", "The address the metric endpoint binds to.") flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.") - flag.StringVar(&flags.GRPCPort, "grpc-port", "2710", "Port of the HTTP server") flag.StringVar(&flags.HTTPPort, "http-port", "3004", "Port of the HTTP server") flag.BoolVar(&enableLeaderElection, "leader-elect", false, "Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.") @@ -86,8 +85,10 @@ func main() { } mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ - Scheme: scheme, - MetricsBindAddress: metricsAddr, + Scheme: scheme, + Metrics: server.Options{ + BindAddress: metricsAddr, + }, WebhookServer: webhookServer, HealthProbeBindAddress: probeAddr, LeaderElection: enableLeaderElection, @@ -168,8 +169,7 @@ func main() { os.Exit(1) } - gw := gateway.NewGateway(mgr.GetClient()) - grpcServer := grpc.NewGrpcServer(mgr.GetClient()) + gw := gateway.NewGateway(mgr.GetClient(), mgr.GetConfig()) if err = (&discoverymanager.DiscoveryReconciler{ Client: mgr.GetClient(), @@ -181,9 +181,10 @@ func main() { } if err = (&contractmanager.ReservationReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - Gateway: gw, + Client: mgr.GetClient(), + RestConfig: mgr.GetConfig(), + Scheme: mgr.GetScheme(), + Gateway: gw, }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "Reservation") os.Exit(1) @@ -242,12 +243,6 @@ func main() { os.Exit(1) } - // Start the REAR GRPC server - if err := mgr.Add(manager.RunnableFunc(grpcServer.Start)); err != nil { - klog.Errorf("Unable to set up Gateway GRPC server: %s", err) - os.Exit(1) - } - setupLog.Info("starting manager") if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { setupLog.Error(err, "problem running manager") diff --git a/cmd/rear-manager/main.go b/cmd/rear-manager/main.go index 2867a3a8..38cbf03c 100644 --- a/cmd/rear-manager/main.go +++ b/cmd/rear-manager/main.go @@ -19,10 +19,11 @@ import ( "flag" "os" - // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) - // to ensure that exec-entrypoint and run can make use of them. - liqodiscovery "github.com/liqotech/liqo/apis/discovery/v1alpha1" - liqooffloading "github.com/liqotech/liqo/apis/offloading/v1alpha1" + liqoauthentication "github.com/liqotech/liqo/apis/authentication/v1beta1" + liqocore "github.com/liqotech/liqo/apis/core/v1beta1" + liqoipam "github.com/liqotech/liqo/apis/ipam/v1alpha1" + liqonetworking "github.com/liqotech/liqo/apis/networking/v1beta1" + liqooffloading "github.com/liqotech/liqo/apis/offloading/v1beta1" "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" @@ -31,6 +32,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/healthz" "sigs.k8s.io/controller-runtime/pkg/log/zap" + "sigs.k8s.io/controller-runtime/pkg/metrics/server" "sigs.k8s.io/controller-runtime/pkg/webhook" advertisementv1alpha1 "github.com/fluidos-project/node/apis/advertisement/v1alpha1" @@ -49,8 +51,11 @@ func init() { utilruntime.Must(nodecorev1alpha1.AddToScheme(scheme)) utilruntime.Must(advertisementv1alpha1.AddToScheme(scheme)) utilruntime.Must(reservationv1alpha1.AddToScheme(scheme)) - utilruntime.Must(liqodiscovery.AddToScheme(scheme)) + utilruntime.Must(liqocore.AddToScheme(scheme)) utilruntime.Must(liqooffloading.AddToScheme(scheme)) + utilruntime.Must(liqoipam.AddToScheme(scheme)) + utilruntime.Must(liqonetworking.AddToScheme(scheme)) + utilruntime.Must(liqoauthentication.AddToScheme(scheme)) //+kubebuilder:scaffold:scheme } @@ -81,8 +86,10 @@ func main() { } mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ - Scheme: scheme, - MetricsBindAddress: metricsAddr, + Scheme: scheme, + Metrics: server.Options{ + BindAddress: metricsAddr, + }, WebhookServer: webhookServer, HealthProbeBindAddress: probeAddr, LeaderElection: enableLeaderElection, @@ -115,9 +122,10 @@ func main() { } if err = (&rearmanager.AllocationReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - Manager: mgr, + Client: mgr.GetClient(), + RestConfig: mgr.GetConfig(), + Scheme: mgr.GetScheme(), + Manager: mgr, }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "Allocation") os.Exit(1) diff --git a/deployments/node/crds/reservation.fluidos.eu_contracts.yaml b/deployments/node/crds/reservation.fluidos.eu_contracts.yaml index f2381bab..545c98e0 100644 --- a/deployments/node/crds/reservation.fluidos.eu_contracts.yaml +++ b/deployments/node/crds/reservation.fluidos.eu_contracts.yaml @@ -289,19 +289,13 @@ spec: description: This credentials will be used by the customer to connect and enstablish a peering with the seller FLUIDOS Node through Liqo. properties: - clusterID: + kubeconfig: type: string - clusterName: - type: string - endpoint: - type: string - token: + liqoID: type: string required: - - clusterID - - clusterName - - endpoint - - token + - kubeconfig + - liqoID type: object seller: description: This is the Node identity of the seller FLUIDOS Node. diff --git a/deployments/node/files/node-rear-controller-ClusterRole.yaml b/deployments/node/files/node-rear-controller-ClusterRole.yaml index 7584d016..6984767e 100644 --- a/deployments/node/files/node-rear-controller-ClusterRole.yaml +++ b/deployments/node/files/node-rear-controller-ClusterRole.yaml @@ -51,13 +51,29 @@ rules: - get - patch - update +- apiGroups: + - authentication.liqo.io + resources: + - '*' + verbs: + - create + - delete + - get + - list + - patch + - update + - watch - apiGroups: - "" resources: - '*' verbs: + - create + - delete - get - list + - patch + - update - watch - apiGroups: - "" @@ -67,6 +83,30 @@ rules: - get - list - watch +- apiGroups: + - core.liqo.io + resources: + - '*' + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - ipam.liqo.io + resources: + - '*' + verbs: + - create + - delete + - get + - list + - patch + - update + - watch - apiGroups: - network.fluidos.eu resources: @@ -75,6 +115,18 @@ rules: - get - list - watch +- apiGroups: + - networking.liqo.io + resources: + - '*' + verbs: + - create + - delete + - get + - list + - patch + - update + - watch - apiGroups: - nodecore.fluidos.eu resources: @@ -127,6 +179,44 @@ rules: - get - patch - update +- apiGroups: + - offloading.liqo.io + resources: + - '*' + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterrolebindings + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterroles + verbs: + - bind + - create + - delete + - escalate + - get + - list + - patch + - update + - watch - apiGroups: - reservation.fluidos.eu resources: diff --git a/deployments/node/files/node-rear-manager-ClusterRole.yaml b/deployments/node/files/node-rear-manager-ClusterRole.yaml index 883f4da7..0ed48363 100644 --- a/deployments/node/files/node-rear-manager-ClusterRole.yaml +++ b/deployments/node/files/node-rear-manager-ClusterRole.yaml @@ -181,6 +181,18 @@ rules: - patch - update - watch +- apiGroups: + - authentication.liqo.io + resources: + - '*' + verbs: + - create + - delete + - get + - list + - patch + - update + - watch - apiGroups: - batch resources: @@ -231,6 +243,38 @@ rules: - patch - update - watch +- apiGroups: + - core.liqo.io + resources: + - foreignclusters + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - core.liqo.io + resources: + - foreignclusters/finalizers + verbs: + - get + - patch + - update +- apiGroups: + - core.liqo.io + resources: + - foreignclusters/status + verbs: + - create + - delete + - get + - list + - patch + - update + - watch - apiGroups: - discovery.liqo.io resources: @@ -263,6 +307,18 @@ rules: - patch - update - watch +- apiGroups: + - ipam.liqo.io + resources: + - '*' + verbs: + - create + - delete + - get + - list + - patch + - update + - watch - apiGroups: - networking.k8s.io resources: @@ -276,6 +332,18 @@ rules: - patch - update - watch +- apiGroups: + - networking.liqo.io + resources: + - '*' + verbs: + - create + - delete + - get + - list + - patch + - update + - watch - apiGroups: - nodecore.fluidos.eu resources: @@ -389,11 +457,15 @@ rules: - apiGroups: - offloading.liqo.io resources: - - namespaceoffloadings/status + - virtualnodes verbs: + - create + - delete - get + - list - patch - update + - watch - apiGroups: - reservation.fluidos.eu resources: diff --git a/deployments/node/templates/fluidos-post-install-hook.yaml b/deployments/node/templates/fluidos-post-install-hook.yaml deleted file mode 100644 index 997b71e0..00000000 --- a/deployments/node/templates/fluidos-post-install-hook.yaml +++ /dev/null @@ -1,31 +0,0 @@ -apiVersion: batch/v1 -kind: Job -metadata: - name: configure-liqo - namespace: {{ .Release.Namespace }} - labels: - app: liqo - annotations: - "helm.sh/hook": post-install - "helm.sh/hook-delete-policy": hook-succeeded -spec: - template: - spec: - serviceAccountName: helm-service-account - hostNetwork: true - containers: - - name: configure-liqo - image: dtzar/helm-kubectl:latest - command: ["/bin/sh", "-c", "cp /scripts/configure-liqo.sh /tmp/configure-liqo.sh && chmod +x /tmp/configure-liqo.sh && /tmp/configure-liqo.sh"] - volumeMounts: - - name: script - mountPath: /scripts - - name: tmp-scripts - mountPath: /tmp - restartPolicy: OnFailure - volumes: - - name: script - configMap: - name: configure-liqo-script - - name: tmp-scripts - emptyDir: {} \ No newline at end of file diff --git a/deployments/node/templates/fluidos-rear-controller-deployment.yaml b/deployments/node/templates/fluidos-rear-controller-deployment.yaml index c8c497ad..94badc76 100644 --- a/deployments/node/templates/fluidos-rear-controller-deployment.yaml +++ b/deployments/node/templates/fluidos-rear-controller-deployment.yaml @@ -46,7 +46,6 @@ spec: name: {{ $rearControllerConfig.name }} command: ["/usr/bin/rear-controller"] args: - - --grpc-port={{ .Values.rearController.service.grpc.port }} - --http-port={{ .Values.rearController.service.gateway.port }} resources: {{- toYaml .Values.rearController.pod.resources | nindent 10 }} ports: diff --git a/deployments/node/templates/fuidos-configure-liqo-script-cm.yaml b/deployments/node/templates/fuidos-configure-liqo-script-cm.yaml deleted file mode 100644 index e6e737fe..00000000 --- a/deployments/node/templates/fuidos-configure-liqo-script-cm.yaml +++ /dev/null @@ -1,21 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: configure-liqo-script - labels: - app: liqo -data: - configure-liqo.sh: | - #!/bin/sh - kubectl patch deployment liqo-controller-manager -n liqo --type='json' -p='[ - { - "op": "add", - "path": "/spec/template/spec/containers/0/args/-", - "value": "--enable-resource-enforcement" - }, - { - "op": "add", - "path": "/spec/template/spec/containers/0/args/-", - "value": "--resource-plugin-address=node-rear-controller-grpc.fluidos:2710" - } - ]' \ No newline at end of file diff --git a/go.mod b/go.mod index bbf2b748..dcc853f4 100644 --- a/go.mod +++ b/go.mod @@ -1,95 +1,92 @@ module github.com/fluidos-project/node //node -go 1.21.0 +go 1.23.3 require ( - github.com/gorilla/mux v1.8.0 - github.com/liqotech/liqo v0.9.4 - google.golang.org/grpc v1.59.0-dev - k8s.io/api v0.28.2 - k8s.io/apimachinery v0.28.2 - k8s.io/client-go v0.28.2 - k8s.io/klog/v2 v2.100.1 - k8s.io/metrics v0.28.2 - k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 - sigs.k8s.io/controller-runtime v0.15.1 + github.com/gorilla/mux v1.8.1 + github.com/liqotech/liqo v1.0.0-rc.3 + k8s.io/api v0.32.1 + k8s.io/apimachinery v0.32.1 + k8s.io/client-go v0.32.1 + k8s.io/klog/v2 v2.130.1 + k8s.io/metrics v0.32.1 + k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 + sigs.k8s.io/controller-runtime v0.20.0 ) require ( github.com/Masterminds/goutils v1.1.1 // indirect github.com/Masterminds/semver v1.5.0 // indirect - github.com/huandu/xstrings v1.4.0 // indirect + github.com/aws/aws-sdk-go v1.54.6 // indirect + github.com/fxamacker/cbor/v2 v2.7.0 // indirect + github.com/gofrs/flock v0.8.1 // indirect + github.com/google/btree v1.1.3 // indirect + github.com/huandu/xstrings v1.5.0 // indirect + github.com/jmespath/go-jmespath v0.4.0 // indirect + github.com/klauspost/compress v1.17.9 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.67.0 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect + github.com/x448/float16 v0.8.4 // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect + golang.org/x/sync v0.10.0 // indirect + gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect + sigs.k8s.io/aws-iam-authenticator v0.6.27 // indirect ) require ( github.com/Masterminds/sprig v2.22.0+incompatible - github.com/aws/aws-sdk-go v1.44.213 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/emicklei/go-restful/v3 v3.10.1 // indirect - github.com/evanphx/json-patch/v5 v5.6.0 // indirect - github.com/fsnotify/fsnotify v1.6.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/emicklei/go-restful/v3 v3.11.1 // indirect + github.com/evanphx/json-patch/v5 v5.9.0 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/ghodss/yaml v1.0.0 - github.com/go-logr/logr v1.2.4 // indirect - github.com/go-logr/zapr v1.2.4 // indirect - github.com/go-openapi/jsonpointer v0.19.6 // indirect - github.com/go-openapi/jsonreference v0.20.2 // indirect - github.com/go-openapi/swag v0.22.3 // indirect + github.com/go-logr/logr v1.4.2 // indirect + github.com/go-logr/zapr v1.3.0 // indirect + github.com/go-openapi/jsonpointer v0.21.0 // indirect + github.com/go-openapi/jsonreference v0.20.4 // indirect + github.com/go-openapi/swag v0.23.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.3 // indirect + github.com/golang/protobuf v1.5.4 // indirect github.com/google/gnostic-models v0.6.8 // indirect - github.com/google/go-cmp v0.5.9 // indirect + github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/uuid v1.3.1 // indirect - github.com/imdario/mergo v0.3.15 // indirect - github.com/jmespath/go-jmespath v0.4.0 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/imdario/mergo v0.3.16 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/mailru/easyjson v0.7.7 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/pkg/errors v0.9.1 // indirect - github.com/prometheus/client_golang v1.16.0 // indirect - github.com/prometheus/client_model v0.4.0 // indirect - github.com/prometheus/common v0.44.0 // indirect - github.com/prometheus/procfs v0.10.1 // indirect + github.com/prometheus/client_golang v1.20.4 // indirect + github.com/prometheus/client_model v0.6.1 // indirect + github.com/prometheus/common v0.60.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect github.com/spf13/pflag v1.0.5 // indirect - github.com/stretchr/testify v1.9.0 // indirect - github.com/virtual-kubelet/virtual-kubelet v1.10.0 // indirect github.com/xeipuuv/gojsonschema v1.2.0 - go.uber.org/atomic v1.10.0 // indirect go.uber.org/multierr v1.11.0 // indirect - go.uber.org/zap v1.24.0 // indirect - golang.org/x/crypto v0.21.0 // indirect - golang.org/x/exp v0.0.0-20221114191408-850992195362 // indirect - golang.org/x/net v0.22.0 // indirect - golang.org/x/oauth2 v0.10.0 // indirect - golang.org/x/sys v0.18.0 // indirect - golang.org/x/term v0.18.0 // indirect - golang.org/x/text v0.14.0 // indirect - golang.org/x/time v0.3.0 // indirect - golang.org/x/tools v0.19.0 // indirect - golang.zx2c4.com/wireguard/wgctrl v0.0.0-20220504211119-3d4a969bb56b // indirect + go.uber.org/zap v1.27.0 // indirect + golang.org/x/crypto v0.31.0 // indirect + golang.org/x/net v0.30.0 // indirect + golang.org/x/oauth2 v0.23.0 // indirect + golang.org/x/sys v0.28.0 // indirect + golang.org/x/term v0.27.0 // indirect + golang.org/x/text v0.21.0 // indirect + golang.org/x/time v0.7.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 // indirect - google.golang.org/protobuf v1.31.0 // indirect + google.golang.org/protobuf v1.35.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiextensions-apiserver v0.28.2 // indirect - k8s.io/component-base v0.28.2 // indirect - k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect - k8s.io/kubectl v0.28.2 // indirect - sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect - sigs.k8s.io/yaml v1.3.0 // indirect + k8s.io/apiextensions-apiserver v0.32.0 // indirect + k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect + sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect + sigs.k8s.io/yaml v1.4.0 // indirect ) diff --git a/go.sum b/go.sum index e34ffa15..677cf86a 100644 --- a/go.sum +++ b/go.sum @@ -4,69 +4,68 @@ github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3Q github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Masterminds/sprig v2.22.0+incompatible h1:z4yfnGrZ7netVz+0EDJ0Wi+5VZCSYp4Z0m2dk6cEM60= github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= -github.com/aws/aws-sdk-go v1.44.213 h1:WahquyWs7cQdz0vpDVWyWETEemgSoORx0PbWL9oz2WA= -github.com/aws/aws-sdk-go v1.44.213/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= -github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= -github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/aws/aws-sdk-go v1.54.6 h1:HEYUib3yTt8E6vxjMWM3yAq5b+qjj/6aKA62mkgux9g= +github.com/aws/aws-sdk-go v1.54.6/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/emicklei/go-restful/v3 v3.10.1 h1:rc42Y5YTp7Am7CS630D7JmhRjq4UlEUuEKfrDac4bSQ= -github.com/emicklei/go-restful/v3 v3.10.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= -github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww= -github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= -github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= -github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/emicklei/go-restful/v3 v3.11.1 h1:S+9bSbua1z3FgCnV0KKOSSZ3mDthb5NyEPL5gEpCvyk= +github.com/emicklei/go-restful/v3 v3.11.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lShz4oaXpDTX2bLe7ls= +github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= +github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= -github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/zapr v1.2.4 h1:QHVo+6stLbfJmYGkQ7uGHUCu5hnAFAj6mDe6Ea0SeOo= -github.com/go-logr/zapr v1.2.4/go.mod h1:FyHWQIzQORZ0QVE1BtVHv3cKtNLuXsbNLtpuhNapBOA= -github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= -github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= -github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= -github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= -github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= -github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= +github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= +github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= +github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= +github.com/go-openapi/jsonreference v0.20.4 h1:bKlDxQxQJgwpUSgOENiMPzCTBVuc7vTdXSSgNeAhojU= +github.com/go-openapi/jsonreference v0.20.4/go.mod h1:5pZJyJP2MnYCpoeoMAql78cCHauHj0V9Lhc506VOpw4= +github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= +github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= +github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/pprof v0.0.0-20230323073829-e72429f035bd h1:r8yyd+DJDmsUhGrRBxH5Pj7KeFK5l+Y3FsgT8keqKtk= -github.com/google/pprof v0.0.0-20230323073829-e72429f035bd/go.mod h1:79YE0hCXdHag9sBkw2o+N/YnZtTkXi0UT9Nnixa5eYk= -github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= -github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= -github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= -github.com/huandu/xstrings v1.4.0 h1:D17IlohoQq4UcpqD7fDk80P7l+lwAmlFaBHgOipl2FU= -github.com/huandu/xstrings v1.4.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= -github.com/imdario/mergo v0.3.15 h1:M8XP7IuFNsqUx6VPK2P9OSmsYsI/YFaGil0uD21V3dM= -github.com/imdario/mergo v0.3.15/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= -github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo= +github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= +github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= +github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= +github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= +github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI= +github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= +github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= @@ -77,26 +76,26 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= +github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/liqotech/liqo v0.9.4 h1:LwKDSO/0E5uCXd6vfxoEQ59bFVJ1jb1JfR1AEB8jjK8= -github.com/liqotech/liqo v0.9.4/go.mod h1:SYbULfgspXacnmyIY7PLbYtHsUnXozRbCvPVGf65N/w= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/liqotech/liqo v1.0.0-rc.3 h1:1tagrX3RUiJM8iVU8RHEdHV0aBz8/efI439UdfbnSoI= +github.com/liqotech/liqo v1.0.0-rc.3/go.mod h1:7+XC+3HPFBTKoCLqRU6zspiE9WijLHAR00f2fLTkW0Y= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= -github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA= -github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME= +github.com/miekg/dns v1.1.57 h1:Jzi7ApEIzwEPLHWRcafCN9LZSBbqQpxjt/wpgvg7wcM= +github.com/miekg/dns v1.1.57/go.mod h1:uqRjCRUuEAA6qsOiJvDd+CFo/vW+y5WR6SNmHE55hZk= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU= +github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -104,39 +103,42 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/onsi/ginkgo/v2 v2.12.0 h1:UIVDowFPwpg6yMUpPjGkYvf06K3RAiJXUhCxEwQVHRI= -github.com/onsi/ginkgo/v2 v2.12.0/go.mod h1:ZNEzXISYlqpb8S36iN71ifqLi3vVD1rVJGvWRCJOUpQ= -github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI= -github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= +github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= +github.com/onsi/gomega v1.36.1 h1:bJDPBO7ibjxcbHMgSCoo4Yj18UWbKDlLwX1x9sybDcw= +github.com/onsi/gomega v1.36.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= -github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= -github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= -github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= -github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= -github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= -github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= -github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= -github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= -github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.67.0 h1:q4oq0KX1vPbvXGUwEXN4D3mFzvQR/WmWK4lfIj0K5oI= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.67.0/go.mod h1:KZHvrby65G+rA4V/vMTUXDV22TI+GgLIrCigYClpjzk= +github.com/prometheus/client_golang v1.20.4 h1:Tgh3Yr67PaOv/uTqloMsCEdeuFTatm5zIq5+qNN23vI= +github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.60.0 h1:+V9PAREWNvJMAuJ1x1BaWl9dewMW4YrHZQbx0sJNllA= +github.com/prometheus/common v0.60.0/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/virtual-kubelet/virtual-kubelet v1.10.0 h1:eV/mFFqThOJLz7Gjn1Ev8LchanGKGA2qZlsW6wipb4g= -github.com/virtual-kubelet/virtual-kubelet v1.10.0/go.mod h1:7Pvdei1p82C9uWS1VzLrnXbHTwQcGBoqShahChpacgI= +github.com/virtual-kubelet/virtual-kubelet v1.11.0 h1:LOMcZQfP083xmYH9mYtyHAR+ybFbK1uMaRA+EtDcd1I= +github.com/virtual-kubelet/virtual-kubelet v1.11.0/go.mod h1:WQfPHbIlzfhMNYkh6hFXF1ctGfNM8UJCYLYpLa/trxc= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= @@ -146,112 +148,67 @@ github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17 github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= -go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= -go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= -go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= -go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= -go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= -golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= -golang.org/x/exp v0.0.0-20221114191408-850992195362 h1:NoHlPRbyl1VFI6FjwHtPQCN7wAMXI6cKcqrmXhOOfBQ= -golang.org/x/exp v0.0.0-20221114191408-850992195362/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic= -golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= -golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc= -golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= -golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8= -golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= +golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= +golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= +golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= -golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= -golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= +golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= -golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= +golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= +golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.19.0 h1:tfGCXNR1OsFG+sVdLAitlpjAvD/I6dHDKnYrpEZUHkw= -golang.org/x/tools v0.19.0/go.mod h1:qoJWxmGSIBmAeriMx19ogtrEPrGtDbPK634QFIcLAhc= +golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ= +golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.zx2c4.com/wireguard/wgctrl v0.0.0-20220504211119-3d4a969bb56b h1:9JncmKXcUwE918my+H6xmjBdhK2jM/UTUNXxhRG1BAk= -golang.zx2c4.com/wireguard/wgctrl v0.0.0-20220504211119-3d4a969bb56b/go.mod h1:yp4gl6zOlnDGOZeWeDfMwQcsdOIQnMdhuPx9mwwWBL4= gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 h1:bVf09lpb+OJbByTj913DRJioFFAjf/ZGxEz7MajTp2U= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM= -google.golang.org/grpc v1.59.0-dev h1:kGGaLrcUHIrrkQ82zIKBy3ERlPAO3DpVN3gbA8NShgg= -google.golang.org/grpc v1.59.0-dev/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= -google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= +google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= +gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -260,31 +217,29 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.28.2 h1:9mpl5mOb6vXZvqbQmankOfPIGiudghwCoLl1EYfUZbw= -k8s.io/api v0.28.2/go.mod h1:RVnJBsjU8tcMq7C3iaRSGMeaKt2TWEUXcpIt/90fjEg= -k8s.io/apiextensions-apiserver v0.28.2 h1:J6/QRWIKV2/HwBhHRVITMLYoypCoPY1ftigDM0Kn+QU= -k8s.io/apiextensions-apiserver v0.28.2/go.mod h1:5tnkxLGa9nefefYzWuAlWZ7RZYuN/765Au8cWLA6SRg= -k8s.io/apimachinery v0.28.2 h1:KCOJLrc6gu+wV1BYgwik4AF4vXOlVJPdiqn0yAWWwXQ= -k8s.io/apimachinery v0.28.2/go.mod h1:RdzF87y/ngqk9H4z3EL2Rppv5jj95vGS/HaFXrLDApU= -k8s.io/client-go v0.28.2 h1:DNoYI1vGq0slMBN/SWKMZMw0Rq+0EQW6/AK4v9+3VeY= -k8s.io/client-go v0.28.2/go.mod h1:sMkApowspLuc7omj1FOSUxSoqjr+d5Q0Yc0LOFnYFJY= -k8s.io/component-base v0.28.2 h1:Yc1yU+6AQSlpJZyvehm/NkJBII72rzlEsd6MkBQ+G0E= -k8s.io/component-base v0.28.2/go.mod h1:4IuQPQviQCg3du4si8GpMrhAIegxpsgPngPRR/zWpzc= -k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= -k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ= -k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM= -k8s.io/kubectl v0.28.2 h1:fOWOtU6S0smdNjG1PB9WFbqEIMlkzU5ahyHkc7ESHgM= -k8s.io/kubectl v0.28.2/go.mod h1:6EQWTPySF1fn7yKoQZHYf9TPwIl2AygHEcJoxFekr64= -k8s.io/metrics v0.28.2 h1:Z/oMk5SmiT/Ji1SaWOPfW2l9W831BLO9/XxDq9iS3ak= -k8s.io/metrics v0.28.2/go.mod h1:QTIIdjMrq+KodO+rmp6R9Pr1LZO8kTArNtkWoQXw0sw= -k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 h1:qY1Ad8PODbnymg2pRbkyMT/ylpTrCM8P2RJ0yroCyIk= -k8s.io/utils v0.0.0-20230406110748-d93618cff8a2/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -sigs.k8s.io/controller-runtime v0.15.1 h1:9UvgKD4ZJGcj24vefUFgZFP3xej/3igL9BsOUTb/+4c= -sigs.k8s.io/controller-runtime v0.15.1/go.mod h1:7ngYvp1MLT+9GeZ+6lH3LOlcHkp/+tzA/fmHa4iq9kk= -sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= -sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= -sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= -sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= +k8s.io/api v0.32.1 h1:f562zw9cy+GvXzXf0CKlVQ7yHJVYzLfL6JAS4kOAaOc= +k8s.io/api v0.32.1/go.mod h1:/Yi/BqkuueW1BgpoePYBRdDYfjPF5sgTr5+YqDZra5k= +k8s.io/apiextensions-apiserver v0.32.0 h1:S0Xlqt51qzzqjKPxfgX1xh4HBZE+p8KKBq+k2SWNOE0= +k8s.io/apiextensions-apiserver v0.32.0/go.mod h1:86hblMvN5yxMvZrZFX2OhIHAuFIMJIZ19bTvzkP+Fmw= +k8s.io/apimachinery v0.32.1 h1:683ENpaCBjma4CYqsmZyhEzrGz6cjn1MY/X2jB2hkZs= +k8s.io/apimachinery v0.32.1/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= +k8s.io/client-go v0.32.1 h1:otM0AxdhdBIaQh7l1Q0jQpmo7WOFIk5FFa4bg6YMdUU= +k8s.io/client-go v0.32.1/go.mod h1:aTTKZY7MdxUaJ/KiUs8D+GssR9zJZi77ZqtzcGXIiDg= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y= +k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4= +k8s.io/metrics v0.32.1 h1:Ou4nrEtZS2vFf7OJCf9z3+2kr0A00kQzfoSwxg0gXps= +k8s.io/metrics v0.32.1/go.mod h1:cLnai9XKYby1tNMX+xe8p9VLzTqrxYPcmqfCBoWObcM= +k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro= +k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/aws-iam-authenticator v0.6.27 h1:uzSwFYh+hrrbpv7goZ+2FN/2oCQddiKpb8l5vBbY1i4= +sigs.k8s.io/aws-iam-authenticator v0.6.27/go.mod h1:8CAmUtqsLmv5QvnhXQ2+byy1EL+TCDyyYTGFXDyt0sk= +sigs.k8s.io/controller-runtime v0.20.0 h1:jjkMo29xEXH+02Md9qaVXfEIaMESSpy3TBWPrsfQkQs= +sigs.k8s.io/controller-runtime v0.20.0/go.mod h1:BrP3w158MwvB3ZbNpaAcIKkHQ7YGpYnzpoSTZ8E14WU= +sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= +sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= +sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA= +sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/pkg/local-resource-manager/node_controller.go b/pkg/local-resource-manager/node_controller.go index 679c376a..cf3342a8 100644 --- a/pkg/local-resource-manager/node_controller.go +++ b/pkg/local-resource-manager/node_controller.go @@ -16,6 +16,7 @@ package localresourcemanager import ( "context" + "fmt" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -165,6 +166,12 @@ func (r *NodeReconciler) createFlavor(ctx context.Context, nodeInfo *models.Node // Forge the Flavor from the NodeInfo and NodeIdentity flavorResult := resourceforge.ForgeK8SliceFlavorFromMetrics(nodeInfo, nodeIdentity, ownerReferences) + if flavorResult == nil { + klog.Error("Error forging Flavor") + return nil, fmt.Errorf("error forging Flavor, Flavor is nil") + } + klog.Infof("Ready to create Flavor %s of type %s", flavorResult.Name, flavorResult.Spec.FlavorType.TypeIdentifier) + // Create the Flavor err = r.Create(ctx, flavorResult) if err != nil { diff --git a/pkg/rear-controller/contract-manager/reservation_controller.go b/pkg/rear-controller/contract-manager/reservation_controller.go index 692c4de7..c1ec1a25 100644 --- a/pkg/rear-controller/contract-manager/reservation_controller.go +++ b/pkg/rear-controller/contract-manager/reservation_controller.go @@ -19,6 +19,7 @@ import ( "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/rest" "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -36,11 +37,11 @@ import ( // ReservationReconciler reconciles a Reservation object. type ReservationReconciler struct { client.Client - Scheme *runtime.Scheme - Gateway *gateway.Gateway + RestConfig *rest.Config + Scheme *runtime.Scheme + Gateway *gateway.Gateway } -// clusterRole //+kubebuilder:rbac:groups=reservation.fluidos.eu,resources=reservations,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups=reservation.fluidos.eu,resources=reservations/status,verbs=get;update;patch //+kubebuilder:rbac:groups=reservation.fluidos.eu,resources=reservations/finalizers,verbs=update @@ -50,7 +51,17 @@ type ReservationReconciler struct { //+kubebuilder:rbac:groups=reservation.fluidos.eu,resources=transactions,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups=reservation.fluidos.eu,resources=transactions/status,verbs=get;update;patch //+kubebuilder:rbac:groups=reservation.fluidos.eu,resources=transactions/finalizers,verbs=update -//+kubebuilder:rbac:groups=core,resources=configmaps,verbs=get;list;watch + +//+kubebuilder:rbac:groups=core,resources=*,verbs=get;list;watch;create;update;patch;delete + +//+kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=clusterroles,verbs=get;list;watch;create;update;patch;delete;bind;escalate +//+kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=clusterrolebindings,verbs=get;list;watch;create;update;patch;delete + +//+kubebuilder:rbac:groups=authentication.liqo.io,resources=*,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=core.liqo.io,resources=*,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=networking.liqo.io,resources=*,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=offloading.liqo.io,resources=*,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=ipam.liqo.io,resources=*,verbs=get;list;watch;create;update;patch;delete // Reconcile is part of the main kubernetes reconciliation loop which aims to // move the current state of the cluster closer to the desired state. @@ -316,7 +327,7 @@ func (r *ReservationReconciler) handlePurchase(ctx context.Context, case nodecorev1alpha1.TypeK8Slice: contract, err = r.Gateway.PurchaseFlavor(ctx, transactionID, reservation.Spec.Seller, nil) case nodecorev1alpha1.TypeService: - liqoCredentials, err = getters.GetLiqoCredentials(context.Background(), r.Client) + liqoCredentials, err = getters.GetLiqoCredentials(context.Background(), r.Client, r.RestConfig) if err != nil { klog.Errorf("Error getting Liqo Credentials: %s", err) return ctrl.Result{}, err diff --git a/pkg/rear-controller/discovery-manager/peeringcandidate_wh.go b/pkg/rear-controller/discovery-manager/peeringcandidate_wh.go deleted file mode 100644 index f532d575..00000000 --- a/pkg/rear-controller/discovery-manager/peeringcandidate_wh.go +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright 2022-2024 FLUIDOS Project -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// TODO: Transport this file to the apis section following the kubebuilder conventions - -package discoverymanager - -import ( - "context" - "fmt" - "net/http" - - admissionv1 "k8s.io/api/admission/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/klog/v2" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/webhook/admission" - - advertisementv1alpha1 "github.com/fluidos-project/node/apis/advertisement/v1alpha1" -) - -//nolint:lll // This is a long line -// clusterRole -//+kubebuilder:webhook:path=/validate/peeringcandidate,mutating=false,failurePolicy=ignore,groups=advertisement.node.fluidos.io,resources=peeringcandidates,verbs=create;update;delete,versions=v1alpha1,name=pc.validate.fluidos.eu,sideEffects=None,admissionReviewVersions={v1,v1beta1} - -// PCValidator is the PeerinCandidate validator. -type PCValidator struct { - client client.Client - decoder *admission.Decoder -} - -// NewPCValidator creates a new PCValidator. -func NewPCValidator(c client.Client) *PCValidator { - return &PCValidator{client: c, decoder: admission.NewDecoder(runtime.NewScheme())} -} - -// Handle manages the validation of the PeeringCandidate. -// -//nolint:gocritic // This function cannot be changed -func (v *PCValidator) Handle(ctx context.Context, req admission.Request) admission.Response { - switch req.Operation { - case admissionv1.Create: - return v.HandleCreate(ctx, req) - case admissionv1.Delete: - return v.HandleDelete(ctx, req) - case admissionv1.Update: - return v.HandleUpdate(ctx, req) - default: - return admission.Errored(http.StatusBadRequest, fmt.Errorf("unsupported operation %q", req.Operation)) - } -} - -// HandleCreate manages the validation of the PeeringCandidate creation. -// -//nolint:gocritic // This function cannot be changed -func (v *PCValidator) HandleCreate(_ context.Context, req admission.Request) admission.Response { - pc, err := v.DecodePeeringCandidate(req.Object) - if err != nil { - klog.Errorf("Failed to decode peering candidate: %v", err) - return admission.Errored(http.StatusBadRequest, fmt.Errorf("failed to decode peering candidate: %w", err)) - } - - if pc.Spec.Available && len(pc.Spec.InterestedSolverIDs) == 0 { - return admission.Denied("Can't create a peering candidate wihout a triggering solver") - } - - if !pc.Spec.Available { - return admission.Denied("Can't create a peering candidate with Available flag set to false") - } - - return admission.Allowed("") -} - -// HandleDelete manages the validation of the PeeringCandidate deletion. -// -//nolint:gocritic // This function cannot be changed -func (v *PCValidator) HandleDelete(_ context.Context, req admission.Request) admission.Response { - // Here we could check if the peering candidate is reserved and if so,we need to check if the solver ID - // matches the one of the solver that is deleting the peering candidate - // or if the solver ID is empty, we need to check if there is a Contract that is using this peering candidate - // Maybe this is not the right logic but it need to be discussed and implemented - _ = req - return admission.Allowed("") -} - -// HandleUpdate manages the validation of the PeeringCandidate update. -// -//nolint:gocritic // This function cannot be changed -func (v *PCValidator) HandleUpdate(_ context.Context, req admission.Request) admission.Response { - pc, err := v.DecodePeeringCandidate(req.Object) - if err != nil { - klog.Errorf("Failed to decode peering candidate: %v", err) - return admission.Errored(http.StatusBadRequest, fmt.Errorf("failed to decode peering candidate: %w", err)) - } - - pcOld, err := v.DecodePeeringCandidate(req.OldObject) - if err != nil { - klog.Errorf("Failed to decode peering old candidate: %v", err) - return admission.Errored(http.StatusBadRequest, fmt.Errorf("failed to decode peering old candidate: %w", err)) - } - - if !pcOld.Spec.Available && !pc.Spec.Available { - return admission.Denied("Peering candidate can be updated if Available flag is changed from false to true") - } - - //nolint:lll // This is a long line - return admission.Allowed("") -} - -// DecodePeeringCandidate decodes the PeeringCandidate. -func (v *PCValidator) DecodePeeringCandidate(obj runtime.RawExtension) (pc *advertisementv1alpha1.PeeringCandidate, err error) { - pc = &advertisementv1alpha1.PeeringCandidate{} - err = v.decoder.DecodeRaw(obj, pc) - return -} diff --git a/pkg/rear-controller/gateway/client.go b/pkg/rear-controller/gateway/client.go index 027d9be2..e65f1d83 100644 --- a/pkg/rear-controller/gateway/client.go +++ b/pkg/rear-controller/gateway/client.go @@ -40,7 +40,7 @@ func (g *Gateway) ReserveFlavor(ctx context.Context, return nil, err } - liqoCredentials, err := getters.GetLiqoCredentials(ctx, g.client) + liqoCredentials, err := getters.GetLiqoCredentials(ctx, g.client, g.restConfig) if err != nil { klog.Errorf("Error when getting Liqo credentials: %s", err) return nil, err @@ -57,7 +57,8 @@ func (g *Gateway) ReserveFlavor(ctx context.Context, IP: g.ID.IP, Domain: g.ID.Domain, AdditionalInformation: &models.NodeIdentityAdditionalInfo{ - LiqoID: liqoCredentials.ClusterID, + LiqoID: liqoCredentials.ClusterID, + Kubeconfig: liqoCredentials.Kubeconfig, }, }, Configuration: func() *models.Configuration { diff --git a/pkg/rear-controller/gateway/gateway.go b/pkg/rear-controller/gateway/gateway.go index 14555029..1a76143c 100644 --- a/pkg/rear-controller/gateway/gateway.go +++ b/pkg/rear-controller/gateway/gateway.go @@ -24,6 +24,7 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/rest" "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/client" @@ -56,6 +57,9 @@ type Gateway struct { // client is the Kubernetes client client client.Client + // restConfig is the Kubernetes REST configuration + restConfig *rest.Config + // Readyness of the Gateway. It is set when liqo is installed LiqoReady bool @@ -64,9 +68,10 @@ type Gateway struct { } // NewGateway creates a new Gateway object. -func NewGateway(c client.Client) *Gateway { +func NewGateway(c client.Client, restConfig *rest.Config) *Gateway { return &Gateway{ client: c, + restConfig: restConfig, Transactions: make(map[string]*models.Transaction), LiqoReady: false, ClusterID: "", @@ -178,7 +183,7 @@ func (g *Gateway) checkLiqoReadiness(ctx context.Context) (bool, error) { return false, nil } - if cm.Data["CLUSTER_ID"] != "" && cm.Data["CLUSTER_NAME"] != "" { + if cm.Data["CLUSTER_ID"] != "" { klog.Infof("Liqo is ready") g.LiqoReady = true g.ClusterID = cm.Data["CLUSTER_ID"] diff --git a/pkg/rear-controller/gateway/provider.go b/pkg/rear-controller/gateway/provider.go index c9745890..4d9c508d 100644 --- a/pkg/rear-controller/gateway/provider.go +++ b/pkg/rear-controller/gateway/provider.go @@ -365,7 +365,7 @@ func (g *Gateway) reserveFlavor(w http.ResponseWriter, r *http.Request) { } if !supported { - klog.Errorf("Hosting policy %s not supported by the flavor", serviceConfiguration.HostingPolicy) + klog.Errorf("Hosting policy %v not supported by the flavor", serviceConfiguration.HostingPolicy) http.Error(w, "Hosting policy not supported by the flavor", http.StatusBadRequest) return } @@ -494,7 +494,7 @@ func (g *Gateway) purchaseFlavor(w http.ResponseWriter, r *http.Request) { switch flavorSold.Spec.FlavorType.TypeIdentifier { case nodecorev1alpha1.TypeK8Slice: // Create a new Liqo credentials for the K8Slice flavor based on the ones provided by the provider - liqoCredentials, err = getters.GetLiqoCredentials(context.Background(), g.client) + liqoCredentials, err = getters.GetLiqoCredentials(context.Background(), g.client, g.restConfig) if err != nil { klog.Errorf("Error getting Liqo Credentials: %s", err) http.Error(w, "Error getting Liqo Credentials", http.StatusInternalServerError) @@ -513,7 +513,7 @@ func (g *Gateway) purchaseFlavor(w http.ResponseWriter, r *http.Request) { return } // Override the Liqo credentials with the ones sent by the client - liqoCredentials, err = resourceforge.ForgeLiqoCredentialsFromObj(purchase.LiqoCredentials) + liqoCredentials, err = getters.GetLiqoCredentials(context.Background(), g.client, g.restConfig) if err != nil { klog.Errorf("Error forging the Liqo credentials: %s", err) http.Error(w, "Error forging the Liqo credentials", http.StatusInternalServerError) @@ -531,7 +531,7 @@ func (g *Gateway) purchaseFlavor(w http.ResponseWriter, r *http.Request) { } // Obtaining the Seller Liqo Cluster ID - sellerLiqoCredentials, err := getters.GetLiqoCredentials(context.Background(), g.client) + sellerLiqoCredentials, err := getters.GetLiqoCredentials(context.Background(), g.client, g.restConfig) if err != nil { klog.Errorf("Error getting Liqo Credentials: %s", err) http.Error(w, "Error getting Liqo Credentials", http.StatusInternalServerError) diff --git a/pkg/rear-controller/grpc/doc.go b/pkg/rear-controller/grpc/doc.go deleted file mode 100644 index 8b3f7d20..00000000 --- a/pkg/rear-controller/grpc/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2022-2024 FLUIDOS Project -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package grpc provides the gRPC server for Liqo Controller Manager. -package grpc diff --git a/pkg/rear-controller/grpc/liqo-resource-manager.go b/pkg/rear-controller/grpc/liqo-resource-manager.go deleted file mode 100644 index 6e47f581..00000000 --- a/pkg/rear-controller/grpc/liqo-resource-manager.go +++ /dev/null @@ -1,162 +0,0 @@ -// Copyright 2022-2024 FLUIDOS Project -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package grpc - -import ( - context "context" - "fmt" - "log" - "net" - "sync" - - resourcemonitors "github.com/liqotech/liqo/pkg/liqo-controller-manager/resource-request-controller/resource-monitors" - grpc "google.golang.org/grpc" - "k8s.io/apimachinery/pkg/api/resource" - "k8s.io/klog/v2" - "sigs.k8s.io/controller-runtime/pkg/client" - - "github.com/fluidos-project/node/pkg/utils/flags" -) - -// Server is the object that contains all the logical data stractures of the REAR gRPC Server. -type Server struct { - Server *grpc.Server - client client.Client - resourcemonitors.ResourceReaderServer - subscribers sync.Map -} - -// NewGrpcServer creates a new gRPC server. -func NewGrpcServer(cl client.Client) *Server { - return &Server{ - Server: grpc.NewServer(), - client: cl, - } -} - -// Start starts the gRPC server. -func (s *Server) Start(ctx context.Context) error { - _ = ctx - // server setup. The stream is not initialized here because it needs a subscriber so - // it will be initialized in the Subscribe method below - - // gRPC Configuration - klog.Info("Configuring gRPC Server") - grpcURL := ":" + flags.GRPCPort - lis, err := net.Listen("tcp", grpcURL) - if err != nil { - klog.Infof("gRPC failed to listen: %v", err) - return fmt.Errorf("gRPC failed to listen: %w", err) - } - - // register this server using the register interface defined in liqo - resourcemonitors.RegisterResourceReaderServer(s.Server, s) - klog.Infof("gRPC Server Listening on %s", grpcURL) - // gRPC Server start listener - if err := s.Server.Serve(lis); err != nil { - return fmt.Errorf("gRPC server failed to serve: %w", err) - } - - return nil -} - -// ReadResources is the method that returns the resources assigned to a specific ClusterID. -func (s *Server) ReadResources(ctx context.Context, req *resourcemonitors.ClusterIdentity) (*resourcemonitors.PoolResourceList, error) { - _ = ctx - klog.Infof("Reading resources for cluster %s", req.ClusterID) - resources, err := s.GetOfferResourcesByClusterID(req.ClusterID) - if err != nil { - // TODO: maybe should be returned an empty resource list - return nil, err - } - - log.Printf("Retrieved resources for clusterID %s: %v", req.ClusterID, resources) - - resourceList := []*resourcemonitors.ResourceList{{Resources: resources}} - response := resourcemonitors.PoolResourceList{ResourceLists: resourceList} - - return &response, nil -} - -// Subscribe is the method that subscribes a the Liqo controller manager to the gRPC server. -func (s *Server) Subscribe(req *resourcemonitors.Empty, srv resourcemonitors.ResourceReader_SubscribeServer) error { - klog.Info("Liqo controller manager subscribed to the gRPC server") - - // Store the stream. Using req as key since each request will have a different req object. - s.subscribers.Store(req, srv) - ctx := srv.Context() - - // This notification is useful since you can edit the resources declared in the deployment and apply it to the cluster when one or more - // foreign clusters are already peered so this broadcast notification will update the resources for those clusters. - err := s.NotifyChange(context.Background(), &resourcemonitors.ClusterIdentity{ClusterID: resourcemonitors.AllClusterIDs}) - if err != nil { - klog.Infof("Error during sending notification to liqo: %s", err) - } - - for { - <-ctx.Done() - s.subscribers.Delete(req) - klog.Infof("Liqo controller manager disconnected") - return nil - } -} - -// NotifyChange is the method that notifies a change to the Liqo controller manager. -func (s *Server) NotifyChange(ctx context.Context, req *resourcemonitors.ClusterIdentity) error { - _ = ctx - klog.Infof("Notifying change to Liqo controller manager for cluster %s", req.ClusterID) - var err error - s.subscribers.Range(func(key, value interface{}) bool { - stream := value.(resourcemonitors.ResourceReader_SubscribeServer) - - klog.Infof("Key: %v, Value: %v", key, value) - - err = stream.Send(req) - if err != nil { - err = fmt.Errorf("error: error during sending a notification %w", err) - } - return true - }) - if err != nil { - klog.Infof("%s", err) - return err - } - klog.Infof("Notification sent to Liqo controller manager for cluster %s", req.ClusterID) - return nil -} - -// RemoveCluster is the method that removes a cluster from the gRPC server. -func (s *Server) RemoveCluster(ctx context.Context, req *resourcemonitors.ClusterIdentity) (*resourcemonitors.Empty, error) { - _ = ctx - klog.Infof("Removing cluster %s", req.ClusterID) - klog.Info("Method RemoveCluster not implemented yet") - // Implement here your logic - return &resourcemonitors.Empty{}, nil -} - -// GetOfferResourcesByClusterID is the method that returns the resources assigned to a specific ClusterID. -func (s *Server) GetOfferResourcesByClusterID(clusterID string) (map[string]*resource.Quantity, error) { - log.Printf("Getting resources for cluster ID: %s", clusterID) - resources, err := getContractResourcesByClusterID(s.client, clusterID) - if err != nil { - return nil, err - } - return resources, nil -} - -// UpdatePeeringOffer is the method that updates the peering offer. -func (s *Server) UpdatePeeringOffer(clusterID string) { - _ = s.NotifyChange(context.Background(), &resourcemonitors.ClusterIdentity{ClusterID: clusterID}) -} diff --git a/pkg/rear-manager/allocation_controller.go b/pkg/rear-manager/allocation_controller.go index c67083e2..5a366aff 100644 --- a/pkg/rear-manager/allocation_controller.go +++ b/pkg/rear-manager/allocation_controller.go @@ -19,9 +19,9 @@ import ( "encoding/json" "github.com/ghodss/yaml" - liqodiscovery "github.com/liqotech/liqo/apis/discovery/v1alpha1" - discovery "github.com/liqotech/liqo/pkg/discovery" - fcutils "github.com/liqotech/liqo/pkg/utils/foreignCluster" + "github.com/liqotech/liqo/apis/core/v1beta1" + constsLiqo "github.com/liqotech/liqo/pkg/consts" + fcutils "github.com/liqotech/liqo/pkg/utils/foreigncluster" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" @@ -29,6 +29,7 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/rest" "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" @@ -59,11 +60,14 @@ import ( // +kubebuilder:rbac:groups=nodecore.fluidos.eu,resources=serviceblueprints,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=nodecore.fluidos.eu,resources=serviceblueprints/status,verbs=get;update;patch // +kubebuilder:rbac:groups=nodecore.fluidos.eu,resources=allocations/finalizers,verbs=update -// +kubebuilder:rbac:groups=discovery.liqo.io,resources=foreignclusters,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=discovery.liqo.io,resources=foreignclusters/status,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=discovery.liqo.io,resources=foreignclusters/finalizers,verbs=get;update;patch +// +kubebuilder:rbac:groups=core.liqo.io,resources=foreignclusters,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=core.liqo.io,resources=foreignclusters/status,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=core.liqo.io,resources=foreignclusters/finalizers,verbs=get;update;patch // +kubebuilder:rbac:groups=offloading.liqo.io,resources=namespaceoffloadings,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=offloading.liqo.io,resources=namespaceoffloadings/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=offloading.liqo.io,resources=virtualnodes,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=ipam.liqo.io,resources=*,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=networking.liqo.io,resources=*,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=authentication.liqo.io,resources=*,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=core,resources=namespaces,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups="",resources=configmaps,verbs=create;delete;deletecollection;patch;update;get;list;watch // +kubebuilder:rbac:groups="",resources=events,verbs=create;delete;deletecollection;patch;update;get;list;watch @@ -82,8 +86,9 @@ import ( // AllocationReconciler reconciles a Allocation object. type AllocationReconciler struct { client.Client - Manager manager.Manager - Scheme *runtime.Scheme + RestConfig *rest.Config + Manager manager.Manager + Scheme *runtime.Scheme } // Reconcile is part of the main kubernetes reconciliation loop which aims to @@ -228,7 +233,10 @@ func (r *AllocationReconciler) handleK8SliceProviderAllocation(ctx context.Conte // If the ForeignCluster is Ready the Allocation can be set to Active // else we need to wait for the ForeignCluster to be Ready klog.Infof("Allocation %s is provisioning", req.NamespacedName) - fc, err := fcutils.GetForeignClusterByID(ctx, r.Client, contract.Spec.Buyer.AdditionalInformation.LiqoID) + + // Retrieve + + fc, err := fcutils.GetForeignClusterByID(ctx, r.Client, v1beta1.ClusterID(contract.Spec.Buyer.AdditionalInformation.LiqoID)) // check if not found if err != nil { if apierrors.IsNotFound(err) { @@ -244,10 +252,13 @@ func (r *AllocationReconciler) handleK8SliceProviderAllocation(ctx context.Conte } return ctrl.Result{}, nil } - if fcutils.IsIncomingJoined(fc) && - fcutils.IsNetworkingEstablishedOrExternal(fc) && - fcutils.IsAuthenticated(fc) && - !fcutils.IsUnpeered(fc) { + // Check if the ForeignCluster is ready with Liqo checks + // Network is established, authentication is enabled, offloading is enabled + if fcutils.IsConsumer(fc.Status.Role) && + fcutils.IsNetworkingModuleEnabled(fc) && + fcutils.IsNetworkingEstablished(fc) && + fcutils.IsAuthenticationModuleEnabled(fc) && + fcutils.IsOffloadingModuleEnabled(fc) { klog.Infof("ForeignCluster %s is ready, incoming peering established", contract.Spec.PeeringTargetCredentials.ClusterID) allocation.SetStatus(nodecorev1alpha1.Active, "Incoming peering ready, Allocation is now Active") } else { @@ -321,7 +332,7 @@ func (r *AllocationReconciler) handleK8SliceConsumerAllocation(ctx context.Conte // We need to check if the ForeignCluster is ready // Get the foreign cluster related to the Allocation - fc, err := fcutils.GetForeignClusterByID(ctx, r.Client, contract.Spec.PeeringTargetCredentials.ClusterID) + fc, err := fcutils.GetForeignClusterByID(ctx, r.Client, v1beta1.ClusterID(contract.Spec.PeeringTargetCredentials.ClusterID)) if err != nil { if apierrors.IsNotFound(err) { // The ForeignCluster is not found @@ -350,10 +361,11 @@ func (r *AllocationReconciler) handleK8SliceConsumerAllocation(ctx context.Conte }) // Check if the ForeignCluster is ready with Liqo checks - if fcutils.IsOutgoingJoined(fc) && - fcutils.IsAuthenticated(fc) && - fcutils.IsNetworkingEstablishedOrExternal(fc) && - !fcutils.IsUnpeered(fc) { + if fcutils.IsProvider(fc.Status.Role) && + fcutils.IsNetworkingModuleEnabled(fc) && + fcutils.IsNetworkingEstablished(fc) && + fcutils.IsAuthenticationModuleEnabled(fc) && + fcutils.IsOffloadingModuleEnabled(fc) { // The ForeignCluster is ready klog.Infof("ForeignCluster %s is ready, outgoing peering established", contract.Spec.PeeringTargetCredentials.ClusterID) // Change the status of the Allocation to Active @@ -386,16 +398,43 @@ func (r *AllocationReconciler) handleK8SliceConsumerAllocation(ctx context.Conte // Get the Liqo credentials for the peering target cluster, that in this scenario is the provider credentials := contract.Spec.PeeringTargetCredentials // Check if a Liqo peering has been already established - _, err := fcutils.GetForeignClusterByID(ctx, r.Client, credentials.ClusterID) + _, err := fcutils.GetForeignClusterByID(ctx, r.Client, v1beta1.ClusterID(credentials.ClusterID)) if err != nil { if apierrors.IsNotFound(err) { // Establish peering - klog.InfofDepth(1, "Allocation %s is peering with cluster %s", req.NamespacedName, credentials.ClusterName) - _, err := virtualfabricmanager.PeerWithCluster(ctx, r.Client, credentials.ClusterID, - credentials.ClusterName, credentials.Endpoint, credentials.Token) + klog.InfofDepth(1, "Allocation %s is peering with cluster %s", req.NamespacedName, credentials.ClusterID) + // Decode Kubeconfig + kubeconfig, err := virtualfabricmanager.DecodeKubeconfig(credentials.Kubeconfig) + if err != nil { + klog.Errorf("Error when decoding Kubeconfig: %v", err) + allocation.SetStatus(nodecorev1alpha1.Error, "Error when decoding Kubeconfig") + if err := r.updateAllocationStatus(ctx, allocation); err != nil { + klog.Errorf("Error when updating Allocation %s status: %v", req.NamespacedName, err) + return ctrl.Result{}, err + } + return ctrl.Result{}, nil + } + remoteClient, remoteRestConfig, err := virtualfabricmanager.CreateKubeClientFromConfig(kubeconfig, r.Client.Scheme()) + if err != nil { + klog.Errorf("Error when creating remote client: %v", err) + allocation.SetStatus(nodecorev1alpha1.Error, "Error when creating remote client") + if err := r.updateAllocationStatus(ctx, allocation); err != nil { + klog.Errorf("Error when updating Allocation %s status: %v", req.NamespacedName, err) + return ctrl.Result{}, err + } + return ctrl.Result{}, nil + } + _, err = virtualfabricmanager.PeerWithCluster( + ctx, + r.Client, + r.RestConfig, + remoteClient, + remoteRestConfig, + contract, + ) if err != nil { - klog.Errorf("Error when peering with cluster %s: %s", credentials.ClusterName, err) - allocation.SetStatus(nodecorev1alpha1.Error, "Error when peering with cluster "+credentials.ClusterName) + klog.Errorf("Error when peering with cluster %s: %s", credentials.ClusterID, err) + allocation.SetStatus(nodecorev1alpha1.Error, "Error when peering with cluster "+credentials.ClusterID) if err := r.updateAllocationStatus(ctx, allocation); err != nil { klog.Errorf("Error when updating Solver %s status: %s", req.NamespacedName, err) return ctrl.Result{}, err @@ -403,7 +442,7 @@ func (r *AllocationReconciler) handleK8SliceConsumerAllocation(ctx context.Conte return ctrl.Result{}, err } // Peering established - klog.Infof("Allocation %s has started the peering with cluster %s", req.NamespacedName.Name, credentials.ClusterName) + klog.Infof("Allocation %s has started the peering with cluster %s", req.NamespacedName.Name, credentials.ClusterID) // Change the status of the Allocation to Active allocation.SetStatus(nodecorev1alpha1.Active, "Allocation is now Active") @@ -421,7 +460,7 @@ func (r *AllocationReconciler) handleK8SliceConsumerAllocation(ctx context.Conte } } else { // Peering already established - klog.Infof("Allocation %s has already peered with cluster %s", req.NamespacedName.Name, credentials.ClusterName) + klog.Infof("Allocation %s has already peered with cluster %s", req.NamespacedName.Name, credentials.ClusterID) return ctrl.Result{}, nil } @@ -454,7 +493,7 @@ func (r *AllocationReconciler) handleServiceProviderAllocation(ctx context.Conte case nodecorev1alpha1.Active: // We need to check if the ForeignCluster is still ready // Get the foreign cluster related to the Allocation - fc, err := fcutils.GetForeignClusterByID(ctx, r.Client, contract.Spec.PeeringTargetCredentials.ClusterID) + fc, err := fcutils.GetForeignClusterByID(ctx, r.Client, v1beta1.ClusterID(contract.Spec.Buyer.AdditionalInformation.LiqoID)) if err != nil { if apierrors.IsNotFound(err) { // The ForeignCluster is not found @@ -476,17 +515,17 @@ func (r *AllocationReconciler) handleServiceProviderAllocation(ctx context.Conte } // Check if the ForeignCluster is ready with Liqo checks - if fcutils.IsOutgoingJoined(fc) && - fcutils.IsAuthenticated(fc) && - fcutils.IsNetworkingEstablishedOrExternal(fc) && - !fcutils.IsUnpeered(fc) { + if fcutils.IsNetworkingModuleEnabled(fc) && + fcutils.IsNetworkingEstablished(fc) && + fcutils.IsAuthenticationModuleEnabled(fc) && + fcutils.IsOffloadingModuleEnabled(fc) { // The ForeignCluster is ready - klog.Infof("ForeignCluster %s is ready, outgoing peering established", contract.Spec.PeeringTargetCredentials.ClusterID) + klog.Infof("ForeignCluster %s is ready, outgoing peering established", contract.Spec.Buyer.AdditionalInformation.LiqoID) // Change the status of the Allocation to Active // allocation.SetStatus(nodecorev1alpha1.Active, "Outgoing peering ready, Allocation is Active") } else { // The ForeignCluster is not ready - klog.Infof("ForeignCluster %s is not ready yet", contract.Spec.PeeringTargetCredentials.ClusterID) + klog.Infof("ForeignCluster %s is not ready yet", contract.Spec.Buyer.AdditionalInformation.LiqoID) // Change the status of the Allocation to Active allocation.SetStatus(nodecorev1alpha1.Active, "Outgoing peering not yet ready, Allocation is Active") @@ -536,6 +575,7 @@ func (r *AllocationReconciler) handleServiceProviderAllocation(ctx context.Conte klog.Infof("Allocation %s is provisioning", req.NamespacedName) + // Check if the peering has been established readiness, err := r.checkOutgoingForeignClusterReadiness(ctx, contract.Spec.Buyer.AdditionalInformation.LiqoID, allocation) if err != nil { klog.Errorf("Error when checking ForeignCluster readiness: %v", err) @@ -555,38 +595,6 @@ func (r *AllocationReconciler) handleServiceProviderAllocation(ctx context.Conte return ctrl.Result{}, err } - return ctrl.Result{}, nil - case nodecorev1alpha1.Peering: - // Create peering with the consumer - klog.Infof("Allocation %s is peering", req.NamespacedName) - - // Get the Liqo credentials for the peering target cluster, that in this scenario is the provider - credentials := contract.Spec.PeeringTargetCredentials - - // Establish peering - klog.InfofDepth(1, "Allocation %s is peering with cluster %s", req.NamespacedName, credentials.ClusterName) - _, err := virtualfabricmanager.PeerWithCluster(ctx, r.Client, credentials.ClusterID, - credentials.ClusterName, credentials.Endpoint, credentials.Token) - if err != nil { - klog.Errorf("Error when peering with cluster %s: %s", credentials.ClusterName, err) - allocation.SetStatus(nodecorev1alpha1.Error, "Error when peering with cluster "+credentials.ClusterName) - if err := r.updateAllocationStatus(ctx, allocation); err != nil { - klog.Errorf("Error when updating Solver %s status: %s", req.NamespacedName, err) - return ctrl.Result{}, err - } - return ctrl.Result{}, err - } - - // Peering established - klog.Infof("Allocation %s has started the peering with cluster %s", req.NamespacedName.Name, credentials.ClusterName) - - // Change the status of the Allocation to Active - allocation.SetStatus(nodecorev1alpha1.Provisioning, "Allocation is now provisioning") - if err := r.updateAllocationStatus(ctx, allocation); err != nil { - klog.Errorf("Error when updating Solver %s status: %s", req.NamespacedName, err) - return ctrl.Result{}, err - } - return ctrl.Result{}, nil case nodecorev1alpha1.Released: // The Allocation is released, @@ -622,8 +630,8 @@ func (r *AllocationReconciler) handleServiceProviderAllocation(ctx context.Conte klog.Infof("Flavor %s availability reduced", contract.Spec.Flavor.Name) - // Switch to Peering state - allocation.SetStatus(nodecorev1alpha1.Peering, "Starting peering") + // Switch to Provisioning state + allocation.SetStatus(nodecorev1alpha1.Provisioning, "Resources reserved, switching to Provisioning") if err := r.updateAllocationStatus(ctx, allocation); err != nil { klog.Errorf("Error when updating Allocation %s status: %v", req.NamespacedName, err) return ctrl.Result{}, err @@ -638,14 +646,14 @@ func (r *AllocationReconciler) handleServiceProviderAllocation(ctx context.Conte func (r *AllocationReconciler) checkOutgoingForeignClusterReadiness(ctx context.Context, clusterID string, allocation *nodecorev1alpha1.Allocation) (ready bool, err error) { // Get the foreign cluster related to the Allocation - fc, er := fcutils.GetForeignClusterByID(ctx, r.Client, clusterID) + fc, er := fcutils.GetForeignClusterByID(ctx, r.Client, v1beta1.ClusterID(clusterID)) if er != nil { if apierrors.IsNotFound(er) { // The ForeignCluster is not found // We need to roll back to the establish peering phase klog.Infof("ForeignCluster %s not found", clusterID) // Change the status of the Allocation to Reserved - allocation.SetStatus(nodecorev1alpha1.Peering, "ForeignCluster not found, peering not yet started") + allocation.SetStatus(nodecorev1alpha1.Provisioning, "ForeignCluster not found, peering not yet started") } else { // Error when getting the ForeignCluster klog.Errorf("Error when getting ForeignCluster %s: %v", clusterID, er) @@ -657,10 +665,10 @@ func (r *AllocationReconciler) checkOutgoingForeignClusterReadiness(ctx context. } // Check if the ForeignCluster is ready with Liqo checks - if fcutils.IsOutgoingJoined(fc) && - fcutils.IsAuthenticated(fc) && - fcutils.IsNetworkingEstablishedOrExternal(fc) && - !fcutils.IsUnpeered(fc) { + if fcutils.IsProvider(fc.Status.Role) && + fcutils.IsNetworkingModuleEnabled(fc) && + fcutils.IsNetworkingEstablished(fc) && + fcutils.IsAuthenticationModuleEnabled(fc) { // The ForeignCluster is ready klog.Infof("ForeignCluster %s is ready, outgoing peering established", clusterID) } else { @@ -723,11 +731,11 @@ func (r *AllocationReconciler) createConsumerNamespace(ctx context.Context, req klog.Infof("PodOffloadingStrategy %s forged", podOffloadingStrategy) klog.Infof("PeeringTargetCredentials %s forged", contract.Spec.PeeringTargetCredentials) - credentials := contract.Spec.PeeringTargetCredentials + credentials := contract.Spec.Buyer.AdditionalInformation.LiqoID - klog.Infof("Credentials clusterID: %s", credentials.ClusterID) + klog.Infof("Credentials clusterID: %s", credentials) - no, err := virtualfabricmanager.OffloadNamespace(ctx, r.Client, namespaceName, podOffloadingStrategy, credentials.ClusterID) + no, err := virtualfabricmanager.OffloadNamespace(ctx, r.Client, namespaceName, podOffloadingStrategy, credentials) if err != nil { klog.Errorf("Error when offloading namespace %s: %v", namespaceName, err) @@ -739,7 +747,7 @@ func (r *AllocationReconciler) createConsumerNamespace(ctx context.Context, req return "", err } - klog.Infof("Namespace %s offloaded to cluster %s with OffloadingNamespace %s", namespaceName, credentials.ClusterName, no.Name) + klog.Infof("Namespace %s offloaded to cluster %s with OffloadingNamespace %s", namespaceName, credentials, no.Name) return namespaceName, nil } @@ -871,7 +879,7 @@ func (r *AllocationReconciler) handleServiceConsumerAllocation(ctx context.Conte klog.Infof("Allocation %s is provisioning", req.NamespacedName) // The Allocation can look for incoming peering associated with the contract - fc, err := fcutils.GetForeignClusterByID(ctx, r.Client, contract.Spec.Seller.AdditionalInformation.LiqoID) + fc, err := fcutils.GetForeignClusterByID(ctx, r.Client, v1beta1.ClusterID(contract.Spec.Seller.AdditionalInformation.LiqoID)) // check if not found if err != nil { if apierrors.IsNotFound(err) { @@ -887,10 +895,11 @@ func (r *AllocationReconciler) handleServiceConsumerAllocation(ctx context.Conte } return ctrl.Result{}, nil } - if fcutils.IsIncomingJoined(fc) && - fcutils.IsNetworkingEstablishedOrExternal(fc) && - fcutils.IsAuthenticated(fc) && - !fcutils.IsUnpeered(fc) { + if fcutils.IsConsumer(fc.Status.Role) && + fcutils.IsNetworkingModuleEnabled(fc) && + fcutils.IsNetworkingEstablished(fc) && + fcutils.IsAuthenticationModuleEnabled(fc) && + fcutils.IsOffloadingModuleEnabled(fc) { klog.Infof("ForeignCluster %s is ready, incoming peering established", contract.Spec.Seller.AdditionalInformation.LiqoID) allocation.SetStatus(nodecorev1alpha1.Active, "Incoming peering ready, Allocation is now Active") } else { @@ -922,6 +931,8 @@ func (r *AllocationReconciler) handleServiceConsumerAllocation(ctx context.Conte return ctrl.Result{}, nil } + // Check if the offloaded namespaces are found + // If not found, the Allocation is still provisioning if len(offloadedNamespaces.Items) == 0 { klog.Infof("No offloaded namespaces found") allocation.SetStatus(nodecorev1alpha1.Provisioning, "No offloaded namespaces found, still provisioning") @@ -1013,11 +1024,75 @@ func (r *AllocationReconciler) handleServiceConsumerAllocation(ctx context.Conte klog.Infof("Allocation %s is released", req.NamespacedName) // We need to check if the ForeignCluster is again ready return ctrl.Result{}, nil + case nodecorev1alpha1.Peering: + // Create peering with the provider + klog.Infof("Allocation %s is peering", req.NamespacedName) + + // Get the Liqo credentials for the peering target cluster, that in this scenario is the provider + credentials := contract.Spec.PeeringTargetCredentials + + // Establish peering + klog.InfofDepth(1, "Allocation %s is peering with cluster %s", req.NamespacedName, credentials.ClusterID) + // Decode the kubeconfig + kubeconfig, err := virtualfabricmanager.DecodeKubeconfig(credentials.Kubeconfig) + if err != nil { + klog.Errorf("Error when decoding kubeconfig: %v", err) + allocation.SetStatus(nodecorev1alpha1.Error, "Error when decoding kubeconfig") + if err := r.updateAllocationStatus(ctx, allocation); err != nil { + klog.Errorf("Error when updating Allocation %s status: %v", req.NamespacedName, err) + return ctrl.Result{}, err + } + return ctrl.Result{}, err + } + // Create Kubernetes client for the target cluster + remoteClient, remoteRestConfig, err := virtualfabricmanager.CreateKubeClientFromConfig(kubeconfig, r.Client.Scheme()) + if err != nil { + klog.Errorf("Error when creating Kubernetes client: %v", err) + allocation.SetStatus(nodecorev1alpha1.Error, "Error when creating Kubernetes client") + if err := r.updateAllocationStatus(ctx, allocation); err != nil { + klog.Errorf("Error when updating Allocation %s status: %v", req.NamespacedName, err) + return ctrl.Result{}, err + } + return ctrl.Result{}, err + } + + // Perform peering with inverted localCluster as Remote and remoteCluster as Local + // This is to establish a peering in a direction Provider->Consumer, so the provider can consume consumer resources to deploy its service. + _, err = virtualfabricmanager.PeerWithCluster( + ctx, + remoteClient, + remoteRestConfig, + r.Client, + r.RestConfig, + contract, + ) + if err != nil { + klog.Errorf("Error when peering with cluster %s: %s", credentials.ClusterID, err) + allocation.SetStatus(nodecorev1alpha1.Error, "Error when peering with cluster "+credentials.ClusterID) + if err := r.updateAllocationStatus(ctx, allocation); err != nil { + klog.Errorf("Error when updating Solver %s status: %s", req.NamespacedName, err) + return ctrl.Result{}, err + } + return ctrl.Result{}, err + } + + // Peering established + klog.Infof("Allocation %s has started the peering with cluster %s", req.NamespacedName.Name, credentials.ClusterID) + + // Change the status of the Allocation to Active + allocation.SetStatus(nodecorev1alpha1.Provisioning, "Allocation is now provisioning") + if err := r.updateAllocationStatus(ctx, allocation); err != nil { + klog.Errorf("Error when updating Solver %s status: %s", req.NamespacedName, err) + return ctrl.Result{}, err + } + + return ctrl.Result{}, nil + case nodecorev1alpha1.Inactive: klog.Infof("Allocation %s is inactive", req.NamespacedName) - // No particular action is needed, the Allocation can be set to Provisioning - allocation.SetStatus(nodecorev1alpha1.Provisioning, "Resources provisioning") + // No particular action is needed, the Allocation can be set to Peering + allocation.SetStatus(nodecorev1alpha1.Peering, "Allocation is now in Peering") if err := r.updateAllocationStatus(ctx, allocation); err != nil { klog.Errorf("Error when updating Allocation %s status: %v", req.NamespacedName, err) return ctrl.Result{}, err @@ -1206,7 +1281,7 @@ func (r *AllocationReconciler) updateAllocationStatus(ctx context.Context, alloc func (r *AllocationReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&nodecorev1alpha1.Allocation{}). - Watches(&liqodiscovery.ForeignCluster{}, handler.EnqueueRequestsFromMapFunc(r.fcToAllocation), builder.WithPredicates(foreignClusterPredicate())). + Watches(&v1beta1.ForeignCluster{}, handler.EnqueueRequestsFromMapFunc(r.fcToAllocation), builder.WithPredicates(foreignClusterPredicate())). Watches(&corev1.Namespace{}, handler.EnqueueRequestsFromMapFunc(r.nsToAllocation), builder.WithPredicates(namespacePredicate())). Watches(&corev1.Secret{}, handler.EnqueueRequestsFromMapFunc(r.secretToAllocation), builder.WithPredicates(secretPredicate())). Complete(r) @@ -1215,8 +1290,8 @@ func (r *AllocationReconciler) SetupWithManager(mgr ctrl.Manager) error { func foreignClusterPredicate() predicate.Predicate { return predicate.Funcs{ UpdateFunc: func(e event.UpdateEvent) bool { - return fcutils.IsOutgoingJoined(e.ObjectNew.(*liqodiscovery.ForeignCluster)) || - fcutils.IsIncomingJoined(e.ObjectNew.(*liqodiscovery.ForeignCluster)) + return fcutils.IsConsumer(e.ObjectNew.(*v1beta1.ForeignCluster).Status.Role) || + fcutils.IsProvider(e.ObjectNew.(*v1beta1.ForeignCluster).Status.Role) }, } } @@ -1256,7 +1331,7 @@ func secretPredicate() predicate.Predicate { } func (r *AllocationReconciler) fcToAllocation(_ context.Context, o client.Object) []reconcile.Request { - clusterID := o.GetLabels()[discovery.ClusterIDLabel] + clusterID := o.GetLabels()[constsLiqo.RemoteClusterID] allocations, err := getters.GetAllocationByClusterIDSpec(context.Background(), r.Client, clusterID) if err != nil { klog.Errorf("Error when getting Allocation by clusterID %s: %v", clusterID, err) @@ -1270,12 +1345,12 @@ func (r *AllocationReconciler) fcToAllocation(_ context.Context, o client.Object var filteredAllocations []nodecorev1alpha1.Allocation for i := range allocations.Items { allocation := allocations.Items[i] - if allocation.Status.Status != nodecorev1alpha1.Peering { + if allocation.Status.Status == nodecorev1alpha1.Active || allocation.Status.Status == nodecorev1alpha1.Provisioning { filteredAllocations = append(filteredAllocations, allocation) } } if len(filteredAllocations) == 0 { - klog.Infof("No Allocation found with clusterID %s not in Peering status", clusterID) + klog.Infof("No allocations found in Active or Provisioning status") return nil } var requests []reconcile.Request diff --git a/pkg/rear-manager/allocation_wh.go b/pkg/rear-manager/allocation_wh.go index d0147275..e514efd2 100644 --- a/pkg/rear-manager/allocation_wh.go +++ b/pkg/rear-manager/allocation_wh.go @@ -28,7 +28,7 @@ import ( // Validator is the allocation webhook validator. type Validator struct { client client.Client - decoder *admission.Decoder + decoder admission.Decoder } // NewValidator creates a new allocation webhook validator. diff --git a/pkg/rear-manager/solver_controller.go b/pkg/rear-manager/solver_controller.go index d16bbfb3..91eeb9b1 100644 --- a/pkg/rear-manager/solver_controller.go +++ b/pkg/rear-manager/solver_controller.go @@ -579,7 +579,7 @@ func (r *SolverReconciler) handlePeering( return ctrl.Result{}, err } // VirtualNode Name is not used in the current implementation - vnName := namings.ForgeVirtualNodeName(contract.Spec.PeeringTargetCredentials.ClusterName) + vnName := namings.ForgeVirtualNodeName(contract.Spec.PeeringTargetCredentials.ClusterID) klog.Infof("Virtual Node Name: %s", vnName) allocation := resourceforge.ForgeAllocation(&contract) diff --git a/pkg/utils/getters/getters.go b/pkg/utils/getters/getters.go index 2dbd8786..da36abfc 100644 --- a/pkg/utils/getters/getters.go +++ b/pkg/utils/getters/getters.go @@ -18,12 +18,12 @@ import ( "context" "fmt" - "github.com/liqotech/liqo/pkg/auth" "github.com/liqotech/liqo/pkg/utils" - foreigncluster "github.com/liqotech/liqo/pkg/utils/foreignCluster" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/client" @@ -32,6 +32,7 @@ import ( reservationv1alpha1 "github.com/fluidos-project/node/apis/reservation/v1alpha1" "github.com/fluidos-project/node/pkg/utils/consts" "github.com/fluidos-project/node/pkg/utils/flags" + virtualfabricmanager "github.com/fluidos-project/node/pkg/virtual-fabric-manager" ) // GetNodeIdentity retrieves the node identity from the local cluster. @@ -96,33 +97,37 @@ func GetLocalProviders(ctx context.Context, cl client.Client) []string { } // GetLiqoCredentials retrieves the Liqo credentials from the local cluster. -func GetLiqoCredentials(ctx context.Context, cl client.Client) (*nodecorev1alpha1.LiqoCredentials, error) { - localToken, err := auth.GetToken(ctx, cl, consts.LiqoNamespace) +func GetLiqoCredentials(ctx context.Context, cl client.Client, restConfig *rest.Config) (*nodecorev1alpha1.LiqoCredentials, error) { + // Transform the client to a clientSet + kubeClient, err := kubernetes.NewForConfig(restConfig) if err != nil { + klog.Errorf("Error creating the clientSet: %s", err) return nil, err } - clusterIdentity, err := utils.GetClusterIdentityWithControllerClient(ctx, cl, consts.LiqoNamespace) + localClusterID, err := utils.GetClusterID(ctx, kubeClient, consts.LiqoNamespace) if err != nil { + klog.Errorf("Error getting the local cluster ID: %s", err) return nil, err } - authEP, err := foreigncluster.GetHomeAuthURL(ctx, cl, consts.LiqoNamespace) + // Generate Local Kubeconfig for remote cluster + kubeconfig, err := virtualfabricmanager.CreateKubeconfigForPeering(ctx, cl, string(localClusterID)) if err != nil { + klog.Errorf("Error generating the kubeconfig: %s", err) return nil, err } - // If the local cluster has not a cluster name, we print the use the local clusterID to not leave this field empty. - // This can be changed by the user when pasting this value in a remote cluster. - if clusterIdentity.ClusterName == "" { - clusterIdentity.ClusterName = clusterIdentity.ClusterID + // Encode the kubeconfig + kubeconfigEncoded, err := virtualfabricmanager.EncodeKubeconfig(kubeconfig) + if err != nil { + klog.Errorf("Error encoding the kubeconfig: %s", err) + return nil, err } return &nodecorev1alpha1.LiqoCredentials{ - ClusterName: clusterIdentity.ClusterName, - ClusterID: clusterIdentity.ClusterID, - Endpoint: authEP, - Token: localToken, + ClusterID: string(localClusterID), + Kubeconfig: kubeconfigEncoded, }, nil } diff --git a/pkg/utils/models/models.go b/pkg/utils/models/models.go index a1089999..869f2668 100644 --- a/pkg/utils/models/models.go +++ b/pkg/utils/models/models.go @@ -86,7 +86,8 @@ type Location struct { // NodeIdentityAdditionalInfo represents additional information about a NodeIdentity. type NodeIdentityAdditionalInfo struct { - LiqoID string `json:"liqoID,omitempty"` + LiqoID string `json:"liqoID,omitempty"` + Kubeconfig string `json:"kubeconfig,omitempty"` } // NodeIdentity represents the owner of a Flavor, with associated ID, IP, and domain name. diff --git a/pkg/utils/models/reservation.go b/pkg/utils/models/reservation.go index 7ef2c1bd..666bf4b2 100644 --- a/pkg/utils/models/reservation.go +++ b/pkg/utils/models/reservation.go @@ -56,8 +56,6 @@ type Contract struct { // LiqoCredentials contains the credentials of a Liqo cluster to establish a peering. type LiqoCredentials struct { - ClusterID string `json:"clusterID"` - ClusterName string `json:"clusterName"` - Token string `json:"token"` - Endpoint string `json:"endpoint"` + ClusterID string `json:"clusterID"` + Kubeconfig string `json:"kubeconfig"` } diff --git a/pkg/utils/parseutil/parseutil.go b/pkg/utils/parseutil/parseutil.go index 61af30c6..565b295f 100644 --- a/pkg/utils/parseutil/parseutil.go +++ b/pkg/utils/parseutil/parseutil.go @@ -771,10 +771,8 @@ func ParseContract(contract *reservationv1alpha1.Contract) *models.Contract { }(), Seller: ParseNodeIdentity(contract.Spec.Seller), PeeringTargetCredentials: models.LiqoCredentials{ - ClusterID: contract.Spec.PeeringTargetCredentials.ClusterID, - ClusterName: contract.Spec.PeeringTargetCredentials.ClusterName, - Token: contract.Spec.PeeringTargetCredentials.Token, - Endpoint: contract.Spec.PeeringTargetCredentials.Endpoint, + ClusterID: contract.Spec.PeeringTargetCredentials.ClusterID, + Kubeconfig: contract.Spec.PeeringTargetCredentials.Kubeconfig, }, ExpirationTime: contract.Spec.ExpirationTime, ExtraInformation: contract.Spec.ExtraInformation, diff --git a/pkg/utils/resourceforge/forge.go b/pkg/utils/resourceforge/forge.go index 5be7fc84..aa2eb0e6 100644 --- a/pkg/utils/resourceforge/forge.go +++ b/pkg/utils/resourceforge/forge.go @@ -24,7 +24,7 @@ import ( "time" "github.com/Masterminds/sprig" - offloadingv1alpha1 "github.com/liqotech/liqo/apis/offloading/v1alpha1" + offloadingv1beta1 "github.com/liqotech/liqo/apis/offloading/v1beta1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -427,10 +427,8 @@ func ForgeContractObj(contract *reservationv1alpha1.Contract) models.Contract { BuyerClusterID: contract.Spec.BuyerClusterID, Seller: parseutil.ParseNodeIdentity(contract.Spec.Seller), PeeringTargetCredentials: models.LiqoCredentials{ - ClusterID: contract.Spec.PeeringTargetCredentials.ClusterID, - ClusterName: contract.Spec.PeeringTargetCredentials.ClusterName, - Token: contract.Spec.PeeringTargetCredentials.Token, - Endpoint: contract.Spec.PeeringTargetCredentials.Endpoint, + ClusterID: contract.Spec.PeeringTargetCredentials.ClusterID, + Kubeconfig: contract.Spec.PeeringTargetCredentials.Kubeconfig, }, Configuration: func() *models.Configuration { if contract.Spec.Configuration != nil { @@ -489,10 +487,8 @@ func ForgeContractFromObj(contract *models.Contract) (*reservationv1alpha1.Contr BuyerClusterID: contract.BuyerClusterID, Seller: *ForgeNodeIdentitiesFromObj(&contract.Seller), PeeringTargetCredentials: nodecorev1alpha1.LiqoCredentials{ - ClusterID: contract.PeeringTargetCredentials.ClusterID, - ClusterName: contract.PeeringTargetCredentials.ClusterName, - Token: contract.PeeringTargetCredentials.Token, - Endpoint: contract.PeeringTargetCredentials.Endpoint, + ClusterID: contract.PeeringTargetCredentials.ClusterID, + Kubeconfig: contract.PeeringTargetCredentials.Kubeconfig, }, TransactionID: contract.TransactionID, Configuration: func() *nodecorev1alpha1.Configuration { @@ -1217,32 +1213,28 @@ func ForgeDefaultServiceConfiguration(serviceFlavor *nodecorev1alpha1.ServiceFla // ForgeLiqoCredentialsObj creates a LiqoCredentials object from a LiqoCredentials CR. func ForgeLiqoCredentialsObj(liqoCredentials *nodecorev1alpha1.LiqoCredentials) (*models.LiqoCredentials, error) { return &models.LiqoCredentials{ - ClusterID: liqoCredentials.ClusterID, - ClusterName: liqoCredentials.ClusterName, - Token: liqoCredentials.Token, - Endpoint: liqoCredentials.Endpoint, + ClusterID: liqoCredentials.ClusterID, + Kubeconfig: liqoCredentials.Kubeconfig, }, nil } // ForgeLiqoCredentialsFromObj creates a LiqoCredentials CR from a LiqoCredentials object. func ForgeLiqoCredentialsFromObj(liqoCredentials *models.LiqoCredentials) (*nodecorev1alpha1.LiqoCredentials, error) { return &nodecorev1alpha1.LiqoCredentials{ - ClusterID: liqoCredentials.ClusterID, - ClusterName: liqoCredentials.ClusterName, - Token: liqoCredentials.Token, - Endpoint: liqoCredentials.Endpoint, + ClusterID: liqoCredentials.ClusterID, + Kubeconfig: liqoCredentials.Kubeconfig, }, nil } // ForgePodOffloadingStrategy creates a PodOffloadingStrategy CR from a nodecorev1alpha1.HostingPolicy. -func ForgePodOffloadingStrategy(hostingPolicy *nodecorev1alpha1.HostingPolicy) (offloadingv1alpha1.PodOffloadingStrategyType, error) { +func ForgePodOffloadingStrategy(hostingPolicy *nodecorev1alpha1.HostingPolicy) (offloadingv1beta1.PodOffloadingStrategyType, error) { switch *hostingPolicy { case nodecorev1alpha1.HostingPolicyProvider: - return offloadingv1alpha1.LocalPodOffloadingStrategyType, nil + return offloadingv1beta1.LocalPodOffloadingStrategyType, nil case nodecorev1alpha1.HostingPolicyConsumer: - return offloadingv1alpha1.RemotePodOffloadingStrategyType, nil + return offloadingv1beta1.RemotePodOffloadingStrategyType, nil case nodecorev1alpha1.HostingPolicyShared: - return offloadingv1alpha1.LocalAndRemotePodOffloadingStrategyType, nil + return offloadingv1beta1.LocalAndRemotePodOffloadingStrategyType, nil default: return "", fmt.Errorf("hosting policy not recognized") } diff --git a/pkg/virtual-fabric-manager/services.go b/pkg/virtual-fabric-manager/services.go index 8ea21525..0830d404 100644 --- a/pkg/virtual-fabric-manager/services.go +++ b/pkg/virtual-fabric-manager/services.go @@ -16,21 +16,33 @@ package virtualfabricmanager import ( "context" + "encoding/base64" "fmt" + "time" - discoveryv1alpha1 "github.com/liqotech/liqo/apis/discovery/v1alpha1" - offloadingv1alpha1 "github.com/liqotech/liqo/apis/offloading/v1alpha1" - "github.com/liqotech/liqo/pkg/discovery" + "github.com/liqotech/liqo/apis/authentication/v1beta1" + corev1beta1 "github.com/liqotech/liqo/apis/core/v1beta1" + networkingv1beta1 "github.com/liqotech/liqo/apis/networking/v1beta1" + offloadingv1beta1 "github.com/liqotech/liqo/apis/offloading/v1beta1" + liqoConsts "github.com/liqotech/liqo/pkg/consts" + authenticationForge "github.com/liqotech/liqo/pkg/liqo-controller-manager/authentication/forge" + authenticationUtils "github.com/liqotech/liqo/pkg/liqo-controller-manager/authentication/utils" + networkForgeLiqo "github.com/liqotech/liqo/pkg/liqo-controller-manager/networking/forge" "github.com/liqotech/liqo/pkg/utils" - foreigncluster "github.com/liqotech/liqo/pkg/utils/foreignCluster" + ipamLiqo "github.com/liqotech/liqo/pkg/utils/ipam" corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" klog "k8s.io/klog/v2" - pointer "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + reservation "github.com/fluidos-project/node/apis/reservation/v1alpha1" "github.com/fluidos-project/node/pkg/utils/consts" ) @@ -38,144 +50,1200 @@ import ( //+kubebuilder:rbac:groups=discovery.liqo.io,resources=foreignclusters,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups=core,resources=*,verbs=get;list;watch -// PeerWithCluster creates a ForeignCluster resource to peer with a remote cluster. -func PeerWithCluster(ctx context.Context, cl client.Client, clusterID, - clusterName, clusterAuthURL, clusterToken string) (*discoveryv1alpha1.ForeignCluster, error) { - // Retrieve the cluster identity associated with the current cluster. - clusterIdentity, err := utils.GetClusterIdentityWithControllerClient(ctx, cl, consts.LiqoNamespace) +// EncodeKubeconfig encodes a clientcmdapi.Config into a Base64 string. +func EncodeKubeconfig(kubeconfig *clientcmdapi.Config) (string, error) { + if kubeconfig == nil { + return "", fmt.Errorf("kubeconfig is nil") + } + + // Convert the Kubeconfig struct to YAML + kubeconfigYAML, err := clientcmd.Write(*kubeconfig) + if err != nil { + return "", fmt.Errorf("failed to serialize kubeconfig: %w", err) + } + + // Encode the YAML to Base64 + encodedKubeconfig := base64.StdEncoding.EncodeToString(kubeconfigYAML) + return encodedKubeconfig, nil +} + +// DecodeKubeconfig decodes a Base64 string into a clientcmdapi.Config. +func DecodeKubeconfig(encodedKubeconfig string) (*clientcmdapi.Config, error) { + if encodedKubeconfig == "" { + return nil, fmt.Errorf("encoded Kubeconfig string is empty") + } + + // Decode Base64 string + kubeconfigYAML, err := base64.StdEncoding.DecodeString(encodedKubeconfig) + if err != nil { + return nil, fmt.Errorf("failed to decode Base64 string: %w", err) + } + + // Deserialize the YAML into a clientcmdapi.Config + kubeconfig, err := clientcmd.Load(kubeconfigYAML) + if err != nil { + return nil, fmt.Errorf("failed to parse kubeconfig YAML: %w", err) + } + + return kubeconfig, nil +} + +// CreateKubeconfigForPeering creates a kubeconfig for peering with a remote cluster. +func CreateKubeconfigForPeering(ctx context.Context, cl client.Client, consumerClusterID string) (*clientcmdapi.Config, error) { + // Create a Service Account + sa, err := createOrGetPeeringServiceAccount(ctx, consumerClusterID, cl) + if err != nil { + klog.Error(err) + return nil, err + } + // Create a ClusterRole + cr, err := createOrGetPeeringClusterRole(ctx, consumerClusterID, cl) + if err != nil { + klog.Error(err) + return nil, err + } + // Create a ClusterRoleBinding + _, err = createOrGetPeeringClusterRoleBinding(ctx, consumerClusterID, cr, sa, cl) + if err != nil { + klog.Error(err) + return nil, err + } + // Create a Secret + secret, err := createOrGetPeeringSecret(ctx, consumerClusterID, sa, cl) if err != nil { + klog.Error(err) return nil, err } - // Check whether cluster IDs are the same, as we cannot peer with ourselves. - if clusterIdentity.ClusterID == clusterID { - return nil, fmt.Errorf("the Cluster ID of the remote cluster is the same of that of the local cluster") + // Sleep for a while to allow the Secret to be populated. + time.Sleep(5 * time.Second) + + // Retrieve the token from the secret just created. + if err := cl.Get(ctx, client.ObjectKey{Name: secret.Name, Namespace: secret.Namespace}, secret); err != nil { + klog.Error(err) + return nil, err } - // Create the secret containing the authentication token. - err = storeInSecret(ctx, cl, clusterID, clusterToken, consts.LiqoNamespace) + token := string(secret.Data[corev1.ServiceAccountTokenKey]) + caCert := string(secret.Data[corev1.ServiceAccountRootCAKey]) + // TODO: Retrieve the server URL from the remote cluster. + serverURL, err := getControlPlaneURL(ctx, cl) if err != nil { + klog.Error(err) return nil, err } - return enforceForeignCluster(ctx, cl, clusterID, clusterName, clusterAuthURL) + + // Create a kubeconfig for peering with the remote cluster. + kubeConfig := &clientcmdapi.Config{ + APIVersion: "v1", + Kind: "Config", + Clusters: map[string]*clientcmdapi.Cluster{ + consumerClusterID: { + Server: serverURL, + CertificateAuthorityData: []byte(caCert), + }, + }, + AuthInfos: map[string]*clientcmdapi.AuthInfo{ + consumerClusterID: { + Token: token, + }, + }, + Contexts: map[string]*clientcmdapi.Context{ + consumerClusterID: { + Cluster: consumerClusterID, + AuthInfo: consumerClusterID, + }, + }, + CurrentContext: consumerClusterID, + } + + return kubeConfig, nil } -func enforceForeignCluster(ctx context.Context, cl client.Client, - clusterID, clusterName, clusterAuthURL string) (*discoveryv1alpha1.ForeignCluster, error) { - fc, err := foreigncluster.GetForeignClusterByID(ctx, cl, clusterID) - if client.IgnoreNotFound(err) == nil { - fc = &discoveryv1alpha1.ForeignCluster{ObjectMeta: metav1.ObjectMeta{Name: clusterName, - Labels: map[string]string{discovery.ClusterIDLabel: clusterID}}} - } else if err != nil { - return nil, err +// getControlPlaneURL retrieves the public control plane URL of the cluster. +func getControlPlaneURL(ctx context.Context, cl client.Client) (string, error) { + port := "6443" + + // Get control plane node list + nodeList := &corev1.NodeList{} + if err := cl.List(ctx, nodeList); err != nil { + klog.Error(err) + return "", err } - _, err = controllerutil.CreateOrUpdate(ctx, cl, fc, func() error { - if fc.Spec.PeeringType != discoveryv1alpha1.PeeringTypeUnknown && fc.Spec.PeeringType != discoveryv1alpha1.PeeringTypeOutOfBand { - return fmt.Errorf("a peering of type %s already exists towards remote cluster %q, cannot be changed to %s", - fc.Spec.PeeringType, clusterName, discoveryv1alpha1.PeeringTypeOutOfBand) + // Iterate over nodes to find the control plane node + for i := range nodeList.Items { + node := &nodeList.Items[i] + klog.InfofDepth(1, "Node: %s - Found %d labels", node.Name, len(node.Labels)) + + _, existsControl := node.Labels["node-role.kubernetes.io/control-plane"] + _, existsMaster := node.Labels["node-role.kubernetes.io/master"] + + if existsControl || existsMaster { + // Get the control plane node IP + for _, address := range node.Status.Addresses { + if address.Type == corev1.NodeInternalIP { + return "https://" + address.Address + ":" + port, nil + } + } } + } + + return "", fmt.Errorf("unable to retrieve the control plane URL") +} - fc.Spec.PeeringType = discoveryv1alpha1.PeeringTypeOutOfBand - fc.Spec.ClusterIdentity.ClusterID = clusterID - if fc.Spec.ClusterIdentity.ClusterName == "" { - fc.Spec.ClusterIdentity.ClusterName = clusterName +// createPeeringServiceAccount creates a ServiceAccount to be used for peering with a remote cluster. +func createOrGetPeeringServiceAccount(ctx context.Context, consumerClusterID string, cl client.Client) (*corev1.ServiceAccount, error) { + // Get the ServiceAccount if it already exists + // If the ServiceAccount does not exist, create it + sa := &corev1.ServiceAccount{} + err := cl.Get(ctx, client.ObjectKey{Name: "liqo-cluster-" + consumerClusterID, Namespace: consts.LiqoNamespace}, sa) + if err != nil { + if client.IgnoreNotFound(err) != nil { + klog.Error(err) + return nil, err } + klog.InfofDepth(1, "ServiceAccount does not exist, creating a new one") - fc.Spec.ForeignAuthURL = clusterAuthURL - fc.Spec.ForeignProxyURL = "" - fc.Spec.OutgoingPeeringEnabled = discoveryv1alpha1.PeeringEnabledYes - if fc.Spec.IncomingPeeringEnabled == "" { - fc.Spec.IncomingPeeringEnabled = discoveryv1alpha1.PeeringEnabledAuto + sa = &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: "liqo-cluster-" + consumerClusterID, + Namespace: consts.LiqoNamespace, + }, } - if fc.Spec.InsecureSkipTLSVerify == nil { - //nolint:staticcheck // referring to the Liqo implementation - fc.Spec.InsecureSkipTLSVerify = pointer.BoolPtr(true) + + err = cl.Create(ctx, sa) + if err != nil { + klog.Error(err) + return nil, err + } + } + return sa, nil +} + +// createPeeringClusterRole creates a ClusterRole to be used for peering with a remote cluster. +func createOrGetPeeringClusterRole(ctx context.Context, consumerClusterID string, cl client.Client) (*rbacv1.ClusterRole, error) { + // Get the ClusterRole if it already exists + // If the ClusterRole does not exist, create it + cr := &rbacv1.ClusterRole{} + err := cl.Get(ctx, client.ObjectKey{Name: "liqo-cluster-" + consumerClusterID}, cr) + if err != nil { + if client.IgnoreNotFound(err) != nil { + klog.Error(err) + return nil, err + } + klog.InfofDepth(1, "ClusterRole does not exist, creating a new one") + + cr = &rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{ + Name: "liqo-cluster-" + consumerClusterID, + }, + // TODO: Define the exact rules for Liqo peering + Rules: []rbacv1.PolicyRule{ + { + APIGroups: []string{"networking.liqo.io"}, + Resources: []string{"*"}, + Verbs: []string{"get", "list", "watch", "create", "update", "patch", "delete"}, + }, + { + APIGroups: []string{"offloading.liqo.io"}, + Resources: []string{"*"}, + Verbs: []string{"get", "list", "watch", "create", "update", "patch", "delete"}, + }, + { + APIGroups: []string{"authentication.liqo.io"}, + Resources: []string{"*"}, + Verbs: []string{"get", "list", "watch", "create", "update", "patch", "delete"}, + }, + { + APIGroups: []string{"core.liqo.io"}, + Resources: []string{"*"}, + Verbs: []string{"get", "list", "watch", "create", "update", "patch", "delete"}, + }, + { + APIGroups: []string{"ipam.liqo.io"}, + Resources: []string{"*"}, + Verbs: []string{"get", "list", "watch", "create", "update", "patch", "delete"}, + }, + { + APIGroups: []string{""}, + Resources: []string{"*"}, + Verbs: []string{"get", "list", "watch", "create", "update", "patch", "delete"}, + }, + }, } - return nil - }) - return fc, err + err := cl.Create(ctx, cr) + if err != nil { + klog.Error(err) + return nil, err + } + } + return cr, nil } -func storeInSecret(ctx context.Context, cl client.Client, - clusterID, authToken, liqoNamespace string) error { - secretName := fmt.Sprintf("%v%v", consts.LiqoAuthTokenSecretNamePrefix, clusterID) +// createPeeringClusterRoleBinding creates a ClusterRoleBinding to be used for peering with a remote cluster. +func createOrGetPeeringClusterRoleBinding( + ctx context.Context, + consumerClusterID string, + cr *rbacv1.ClusterRole, + sa *corev1.ServiceAccount, + cl client.Client) (*rbacv1.ClusterRoleBinding, error) { + // Get the ClusterRoleBinding if it already exists + // If the ClusterRoleBinding does not exist, create it + crb := &rbacv1.ClusterRoleBinding{} + err := cl.Get(ctx, client.ObjectKey{Name: "liqo-cluster-" + consumerClusterID, Namespace: consts.LiqoNamespace}, crb) + if err != nil { + if client.IgnoreNotFound(err) != nil { + klog.Error(err) + return nil, err + } + klog.InfofDepth(1, "ClusterRoleBinding does not exist, creating a new one") + crb = &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "liqo-cluster-" + consumerClusterID, + }, + Subjects: []rbacv1.Subject{ + { + Kind: "ServiceAccount", + Name: sa.Name, + Namespace: sa.Namespace, + }, + }, + RoleRef: rbacv1.RoleRef{ + Kind: "ClusterRole", + Name: cr.Name, + APIGroup: "rbac.authorization.k8s.io", + }, + } + + err := cl.Create(ctx, crb) + if err != nil { + klog.Error(err) + return nil, err + } + } + return crb, nil +} + +// createOrGetPeeringSecret creates a Secret to be used for peering with a remote cluster. +func createOrGetPeeringSecret(ctx context.Context, consumerClusterID string, sa *corev1.ServiceAccount, cl client.Client) (*corev1.Secret, error) { + // Get the Secret if it already exists + // If the Secret does not exist, create it secret := &corev1.Secret{} + err := cl.Get(ctx, client.ObjectKey{Name: "liqo-cluster-" + consumerClusterID, Namespace: consts.LiqoNamespace}, secret) + if err != nil { + if client.IgnoreNotFound(err) != nil { + klog.Error(err) + return nil, err + } + klog.InfofDepth(1, "Secret not found, creating a new one") + // Create the Secret + secret = &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "liqo-cluster-" + consumerClusterID, + Namespace: consts.LiqoNamespace, + Annotations: map[string]string{ + "kubernetes.io/service-account.name": sa.Name, + }, + }, + Type: corev1.SecretTypeServiceAccountToken, + } - err := cl.Get(ctx, types.NamespacedName{Name: secretName}, secret) - if client.IgnoreNotFound(err) == nil { - return createAuthTokenSecret(ctx, cl, secretName, liqoNamespace, clusterID, authToken) + err := cl.Create(ctx, secret) + if err != nil { + klog.Error(err) + return nil, err + } } + + return secret, nil +} + +func createTenantNamespace(ctx context.Context, cl client.Client, clusterID corev1beta1.ClusterID) (string, error) { + // Create tenant namespace + name := "liqo-tenant-" + string(clusterID) + tenantNamespace := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Labels: map[string]string{ + liqoConsts.RemoteClusterID: string(clusterID), + liqoConsts.TenantNamespaceLabel: "true", + }, + }, + } + err := cl.Create(ctx, tenantNamespace) if err != nil { klog.Error(err) - return err + return "", err + } + klog.InfofDepth(1, "Tenant namespace %s created in %s cluster", name, clusterID) + return name, nil +} + +// CreateKubeClientFromConfig creates a Kubernetes client from a clientcmdapi.Config. +func CreateKubeClientFromConfig(kubeconfig *clientcmdapi.Config, localScheme *runtime.Scheme) (client.Client, *rest.Config, error) { + if kubeconfig == nil { + return nil, nil, fmt.Errorf("kubeconfig is nil") + } + + // Convert clientcmdapi.Config to a rest.Config + restConfig, err := clientcmd.NewDefaultClientConfig(*kubeconfig, &clientcmd.ConfigOverrides{}).ClientConfig() + if err != nil { + return nil, nil, fmt.Errorf("failed to create REST config: %w", err) + } + + // Create the Kubernetes client using the REST config + k8sClient, err := client.New(restConfig, client.Options{ + Scheme: localScheme, + }) + if err != nil { + return nil, nil, fmt.Errorf("failed to create Kubernetes client: %w", err) } - // the secret already exists, update it - return updateAuthTokenSecret(ctx, cl, secret, clusterID, authToken) + return k8sClient, restConfig, nil } -func updateAuthTokenSecret(ctx context.Context, cl client.Client, - secret *corev1.Secret, clusterID, authToken string) error { - labels := secret.GetLabels() - labels[discovery.ClusterIDLabel] = clusterID - labels[discovery.AuthTokenLabel] = "" - secret.SetLabels(labels) +// EstablishNetwork enables the networking module of Liqo between two clusters. +func EstablishNetwork( + ctx context.Context, + localClient client.Client, + localRestConfig *rest.Config, + remoteClient client.Client, + remoteRestConfig *rest.Config, +) (localConn, remoteConn *networkingv1beta1.Connection, localNsName, remoteNsName string, er error) { + // Retrieve remote liqo cluster id + + klog.InfofDepth(1, "Establishing network...") + + // Transform the client to a clientSet + remoteKubeClient, err := kubernetes.NewForConfig(remoteRestConfig) + if err != nil { + klog.Errorf("Error creating the clientSet: %s", err) + return nil, nil, "", "", err + } + + remoteClusterIdentity, err := utils.GetClusterID(ctx, remoteKubeClient, consts.LiqoNamespace) + if err != nil { + klog.Error(err) + return nil, nil, "", "", err + } + + localKubeClient, err := kubernetes.NewForConfig(localRestConfig) + if err != nil { + klog.Errorf("Error creating the clientSet: %s", err) + return nil, nil, "", "", err + } + + // Retrieve local liqo cluster id + localClusterIdentity, err := utils.GetClusterID(ctx, localKubeClient, consts.LiqoNamespace) + if err != nil { + klog.Error(err) + return nil, nil, "", "", err + } + + klog.InfoDepth(1, "Creating tenant namespaces...") + + // Create local tenant namespaces + localNamespaceName, err := createTenantNamespace(ctx, localClient, remoteClusterIdentity) + if err != nil { + klog.Error(err) + return nil, nil, "", "", err + } + + // Create remote tenant namespaces + remoteNamespaceName, err := createTenantNamespace(ctx, remoteClient, localClusterIdentity) + if err != nil { + klog.Error(err) + return nil, nil, "", "", err + } + + klog.InfoDepth(1, "Creating configurations...") + + // Create local configuration + localConfiguration, err := createConfiguration( + ctx, + localClient, + remoteNamespaceName, + localClusterIdentity, + ) + + if err != nil { + klog.Error(err) + return nil, nil, "", "", err + } + + // Remote cluster applies Local configuration + err = remoteClient.Create(ctx, localConfiguration) + if err != nil { + klog.Error(err) + return nil, nil, "", "", err + } + + klog.InfofDepth(1, "Local configuration %s created", localConfiguration.Name) + + // Create remote configuration + remoteConfiguration, err := createConfiguration( + ctx, + remoteClient, + localNamespaceName, + remoteClusterIdentity, + ) + + if err != nil { + klog.Error(err) + return nil, nil, "", "", err + } + + // Local cluster applies Remote configuration + err = localClient.Create(ctx, remoteConfiguration) + if err != nil { + klog.Error(err) + return nil, nil, "", "", err + } + + klog.InfofDepth(1, "Remote configuration %s created", remoteConfiguration.Name) + + klog.InfoDepth(1, "Creating Gateway Server and Client...") + + gwServer, gatewayServerIP, gatewayServerPort, remoteSecretRef, err := createGatewayServer( + ctx, + localClusterIdentity, + remoteClient, + remoteKubeClient, + remoteNamespaceName, + ) + if err != nil { + klog.Error(err) + return nil, nil, "", "", err + } + + klog.InfofDepth( + 1, + "Gateway Server IP %s and Port %d created. Remote secret ref %s detected.", + gatewayServerIP, + gatewayServerPort, + remoteSecretRef.Name, + ) - if secret.StringData == nil { - secret.StringData = map[string]string{} + gwClient, localSecretRef, err := createGatewayClient( + ctx, + localClient, + localKubeClient, + localNamespaceName, + remoteClusterIdentity, + gatewayServerIP, + gatewayServerPort, + ) + if err != nil { + klog.Error(err) + return nil, nil, "", "", err } - secret.StringData[consts.LiqoTokenKey] = authToken - err := cl.Update(ctx, secret) + klog.InfofDepth(1, "Gatewat Client created. Local secret reference %s found", localSecretRef.Name) + + // Generate public key on Local cluster + localPublicKey, err := generatePublicKey( + ctx, + localClient, + localClusterIdentity, + remoteNamespaceName, + localSecretRef, + ) + if err != nil { + klog.Error(err) + return nil, nil, "", "", err + } + + // Create public key on Local cluster + err = remoteClient.Create(ctx, localPublicKey) + if err != nil { + klog.Error(err) + return nil, nil, "", "", err + } + + klog.InfofDepth(1, "Local public key %s created", localPublicKey.Name) + + // Generate public key on Remote cluster + remotePublicKey, err := generatePublicKey( + ctx, + remoteClient, + remoteClusterIdentity, + localNamespaceName, + remoteSecretRef, + ) + if err != nil { + klog.Error(err) + return nil, nil, "", "", err + } + + // Create public key on Remote cluster + err = localClient.Create(ctx, remotePublicKey) + if err != nil { + klog.Error(err) + return nil, nil, "", "", err + } + + klog.InfofDepth(1, "Remote public key %s created", remotePublicKey.Name) + + klog.InfoDepth(1, "Checking connections both on local and remote clusters...") + + localConnection, remoteConnection, err := checkConnections( + ctx, + localClient, + localClusterIdentity, + remoteClient, + remoteClusterIdentity, + gwClient.Name, + gwServer.Name, + ) + if err != nil { + klog.Error(err) + return nil, nil, "", "", err + } + + return localConnection, remoteConnection, localNamespaceName, remoteNamespaceName, nil +} + +func checkConnections( + ctx context.Context, + localClient client.Client, + localClusterIdentity corev1beta1.ClusterID, + remoteClient client.Client, + remoteClusterIdentity corev1beta1.ClusterID, + gwClientName, + gwServerName string, +) (localConn, remoteConn *networkingv1beta1.Connection, er error) { + // Check connection on local cluster + localConnection := &networkingv1beta1.Connection{} + remoteConnection := &networkingv1beta1.Connection{} + + timeout := time.After(2 * time.Minute) + tick := time.Tick(5 * time.Second) + +outerLoopLocalConnection: + for { + select { + case <-timeout: + return nil, nil, fmt.Errorf("timed out waiting for local connection to be ready") + case <-tick: + // Get the local connection, between all the connections in the cluster + // Choose the one labeled liqo.io/remote-cluster-id to be the remote cluster id and owner reference to be the gateway client + localConnections := &networkingv1beta1.ConnectionList{} + err := localClient.List(ctx, localConnections, client.MatchingLabels{liqoConsts.RemoteClusterID: string(remoteClusterIdentity)}) + if err != nil { + if client.IgnoreNotFound(err) != nil { + klog.Error(err) + return nil, nil, err + } + } else { + break outerLoopLocalConnection + } + // Get the connection with the gateway client as owner reference + for i := range localConnections.Items { + conn := &localConnections.Items[i] + if conn.OwnerReferences[0].Name == gwClientName { + localConnection = conn + // Check the connection status + if localConnection.Status.Value == networkingv1beta1.Connected { + break outerLoopLocalConnection + } + klog.InfofDepth(1, "Local connection %s not ready, it is in status %s", localConnection.Name, localConnection.Status.Value) + } + } + } + } + + klog.InfofDepth(1, "Local connection %s found", localConnection.Name) + + // Check connection on remote cluster +outerLoopRemoteConnection: + for { + select { + case <-timeout: + return nil, nil, fmt.Errorf("timed out waiting for remote connection to be ready") + case <-tick: + // Get the remote connection, between all the connections in the cluster + // Choose the one labeled liqo.io/remote-cluster-id to be the remote cluster id and owner reference to be the gateway server + remoteConnections := &networkingv1beta1.ConnectionList{} + err := remoteClient.List(ctx, remoteConnections, client.MatchingLabels{liqoConsts.RemoteClusterID: string(localClusterIdentity)}) + if err != nil { + if client.IgnoreNotFound(err) != nil { + klog.Error(err) + return nil, nil, err + } + } else { + break outerLoopRemoteConnection + } + // Get the connection with the gateway server as owner reference + for i := range remoteConnections.Items { + conn := &remoteConnections.Items[i] + if conn.OwnerReferences[0].Name == gwServerName { + remoteConnection = conn + // Check the connection status + if remoteConnection.Status.Value == networkingv1beta1.Connected { + break outerLoopRemoteConnection + } + klog.InfofDepth(1, "Remote connection %s not ready, it is in status %s", remoteConnection.Name, remoteConnection.Status.Value) + } + } + } + } + + klog.InfofDepth(1, "Remote connection %s found", remoteConnection.Name) + + return localConnection, remoteConnection, nil +} + +func createGatewayClient( + ctx context.Context, + localClient client.Client, + localKubeClient kubernetes.Interface, + localNamespaceName string, + remoteClusterIdentity corev1beta1.ClusterID, + gatewayServerIP []string, + gatewayServerPort int32, +) (gwCl *networkingv1beta1.GatewayClient, localSecRef *corev1.ObjectReference, er error) { + // Create GatewayClient on Local cluster + gwClientName := string(remoteClusterIdentity) + gatewayClient, err := networkForgeLiqo.GatewayClient( + localNamespaceName, + &gwClientName, + &networkForgeLiqo.GwClientOptions{ + KubeClient: localKubeClient, + RemoteClusterID: remoteClusterIdentity, + GatewayType: networkForgeLiqo.DefaultGwClientType, + TemplateName: networkForgeLiqo.DefaultGwClientTemplateName, + TemplateNamespace: consts.LiqoNamespace, + MTU: 1340, + Addresses: gatewayServerIP, + Port: gatewayServerPort, + Protocol: networkForgeLiqo.DefaultProtocol, + }, + ) + if err != nil { + klog.Error(err) + return nil, nil, err + } + + // Create GatewayClient on Local cluster + err = localClient.Create(ctx, gatewayClient) + if err != nil { + klog.Error(err) + return nil, nil, err + } + + klog.InfofDepth(1, "GatewayClient %s created", gwClientName) + + klog.InfoDepth(1, "Creating public keys...") + + // Retrieve local localSecret for the local cluster + timeout := time.After(2 * time.Minute) + tick := time.Tick(5 * time.Second) + +outerLoopGwClient: + for { + select { + case <-timeout: + return nil, nil, fmt.Errorf("timed out waiting for GatewayClient Secret Ref to be ready") + case <-tick: + // Retrieve the gateway client + err = localClient.Get(ctx, client.ObjectKey{Name: gwClientName, Namespace: localNamespaceName}, gatewayClient) + if err != nil { + klog.Error(err) + return nil, nil, err + } + if gatewayClient.Status.SecretRef != nil { + break outerLoopGwClient + } + } + } + + localSecretRef := gatewayClient.Status.SecretRef + + return gatewayClient, localSecretRef, nil +} + +func createGatewayServer( + ctx context.Context, + localClusterIdentity corev1beta1.ClusterID, + remoteClient client.Client, + remoteKubeClient kubernetes.Interface, + remoteNamespaceName string, +) ( + gatewayServer *networkingv1beta1.GatewayServer, + gatewayServerIP []string, + gatewayServerPort int32, + remoteSecretRef *corev1.ObjectReference, + err error, +) { + // Create GatewayServer on Remote cluster + gwServerName := string(localClusterIdentity) + gatewayServer, err = networkForgeLiqo.GatewayServer( + remoteNamespaceName, + &gwServerName, + &networkForgeLiqo.GwServerOptions{ + KubeClient: remoteKubeClient, + RemoteClusterID: localClusterIdentity, + GatewayType: networkForgeLiqo.DefaultGwServerType, + TemplateName: networkForgeLiqo.DefaultGwServerTemplateName, + TemplateNamespace: consts.LiqoNamespace, + ServiceType: corev1.ServiceTypeNodePort, + MTU: 1340, + Port: networkForgeLiqo.DefaultGwServerPort, + }, + ) + if err != nil { + klog.Error(err) + return nil, nil, 0, nil, err + } + + err = remoteClient.Create(ctx, gatewayServer) + if err != nil { + klog.Error(err) + return nil, nil, 0, nil, err + } + + klog.InfofDepth(1, "GatewayServer %s created", gwServerName) + + // Wait for the GatewayServer to be ready with a timeout + timeout := time.After(2 * time.Minute) + tick := time.Tick(5 * time.Second) + +outerLoopGwServer: + for { + select { + case <-timeout: + return nil, nil, 0, nil, fmt.Errorf("timed out waiting for GatewayServer to be ready") + case <-tick: + // Retrieve the GatewayServer IP + err = remoteClient.Get(ctx, client.ObjectKey{Name: gwServerName, Namespace: remoteNamespaceName}, gatewayServer) + if err != nil { + klog.Error(err) + return nil, nil, 0, nil, err + } + if gatewayServer.Status.Endpoint != nil && gatewayServer.Status.Endpoint.Addresses != nil && gatewayServer.Status.SecretRef != nil { + break outerLoopGwServer + } + } + } + + // Retrieve the GatewayServer IP + gatewayServerIP = gatewayServer.Status.Endpoint.Addresses + gatewayServerPort = gatewayServer.Status.Endpoint.Port + remoteSecretRef = gatewayServer.Status.SecretRef + + return gatewayServer, gatewayServerIP, gatewayServerPort, remoteSecretRef, nil +} + +// Authentication enables the authentication module of Liqo between two clusters. +func Authentication( + ctx context.Context, + localClient client.Client, + localRestConfig *rest.Config, + remoteClient client.Client, + remoteRestConfig *rest.Config, + localNamespaceName, + remoteNamespaceName string) error { + klog.Infof("Authentication...") + + // Transform the client to a clientSet + localKubeClient, err := kubernetes.NewForConfig(localRestConfig) + if err != nil { + klog.Errorf("Error creating the clientSet: %s", err) + return err + } + + remoteKubeClient, err := kubernetes.NewForConfig(remoteRestConfig) + if err != nil { + klog.Errorf("Error creating the clientSet: %s", err) + return err + } + + // Get local cluster id + localClusterIdentity, err := utils.GetClusterID(ctx, localKubeClient, consts.LiqoNamespace) + if err != nil { + klog.Error(err) + return err + } + + // Get remote cluster id + remoteClusterIdentity, err := utils.GetClusterID(ctx, remoteKubeClient, consts.LiqoNamespace) + if err != nil { + klog.Error(err) + return err + } + + // Forge Nonce secret + nonceSecret := authenticationForge.Nonce(remoteNamespaceName) + + err = authenticationForge.MutateNonce(nonceSecret, localClusterIdentity) + if err != nil { + klog.Error(err) + return err + } + + // Create Nonce secret on Remote cluster + err = remoteClient.Create(ctx, nonceSecret) + if err != nil { + klog.Error(err) + return err + } + + klog.InfofDepth(1, "Nonce secret %s created in remote cluster %s", nonceSecret.Name, remoteClusterIdentity) + + // Get signed nonce + timeout := time.After(2 * time.Minute) + tick := time.Tick(5 * time.Second) +outerLoopNonce: + for { + select { + case <-timeout: + return fmt.Errorf("timed out waiting for signed nonce") + case <-tick: + // Retrieve the nonce secret + err = remoteClient.Get(ctx, client.ObjectKey{Name: nonceSecret.Name, Namespace: remoteNamespaceName}, nonceSecret) + if err != nil { + klog.Error(err) + return err + } + if nonceSecret.Data["nonce"] != nil { + break outerLoopNonce + } + } + } + + nonceData := nonceSecret.Data["nonce"] + + // Ensure signed nonce + klog.InfoDepth(1, "Ensuring signed nonce...") + err = authenticationUtils.EnsureSignedNonceSecret( + ctx, + localClient, + remoteClusterIdentity, + localNamespaceName, + ptr.To(string(nonceData)), + ) + if err != nil { + klog.Error(err) + return err + } + + // Retrieving signed nonce + klog.InfoDepth(1, "Retrieving signed nonce...") + signedNonce := []byte{} + _ = signedNonce +outerLoopSignedNonce: + for { + select { + case <-timeout: + return fmt.Errorf("timed out waiting for signed nonce") + case <-tick: + // Retrieve the signed nonce secret + signedNonce, err = authenticationUtils.RetrieveSignedNonce(ctx, localClient, remoteClusterIdentity) + if err != nil { + if client.IgnoreNotFound(err) != nil { + klog.Error(err) + return err + } + } + if signedNonce != nil { + break outerLoopSignedNonce + } + } + } + + tenant, err := authenticationUtils.GenerateTenant(ctx, localClient, localClusterIdentity, consts.LiqoNamespace, signedNonce, nil) + if err != nil { + klog.Error(err) + return err + } + + // Create Tenant on Remote cluster + err = remoteClient.Create(ctx, tenant) + if err != nil { + klog.Error(err) + return err + } + + klog.InfofDepth(1, "Tenant %s created in remote cluster %s", tenant.Name, remoteClusterIdentity) + + // Wait for tenant status to be ready +outerLoopTenant: + for { + select { + case <-timeout: + return fmt.Errorf("timed out waiting for tenant to be ready") + case <-tick: + // Retrieve the tenant + err = remoteClient.Get(ctx, client.ObjectKey{Name: tenant.Name, Namespace: tenant.Namespace}, tenant) + if err != nil { + klog.Error(err) + return err + } + if tenant.Status.AuthParams != nil && tenant.Status.TenantNamespace != "" { + break outerLoopTenant + } + } + } + + klog.InfofDepth(1, "Tenant %s ready", tenant.Name) + + klog.Infof("Creating Identity...") + // From the provider cluster generate identity controlplane + identity, err := authenticationUtils.GenerateIdentityControlPlane( + ctx, + remoteClient, + localClusterIdentity, + localNamespaceName, + remoteClusterIdentity, + ) + if err != nil { klog.Error(err) return err } + // Create Identity on Local cluster + err = localClient.Create(ctx, identity) + if err != nil { + klog.Error(err) + return err + } + + klog.InfofDepth(1, "Identity %s created in local cluster %s", identity.Name, localClusterIdentity) + return nil } -func createAuthTokenSecret(ctx context.Context, cl client.Client, - secretName, liqoNamespace, clusterID, authToken string) error { - secret := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: secretName, - Namespace: liqoNamespace, - Labels: map[string]string{ - discovery.ClusterIDLabel: clusterID, - discovery.AuthTokenLabel: "", - }, - }, - StringData: map[string]string{ - "token": authToken, +// Offloading enables the offloading module of Liqo between two clusters. +func Offloading( + ctx context.Context, + localClient client.Client, + remoteRestConfig *rest.Config, + localNamespaceName string, + contract *reservation.Contract, +) error { + // Forge resourceslice + rs := authenticationForge.ResourceSlice(contract.Name, localNamespaceName) + if rs == nil { + return fmt.Errorf("unable to forge resourceslice") + } + + remoteKubeClient, err := kubernetes.NewForConfig(remoteRestConfig) + if err != nil { + klog.Errorf("Error creating the clientSet: %s", err) + return err + } + + // Get remote cluster id + remoteClusterIdentity, err := utils.GetClusterID(ctx, remoteKubeClient, consts.LiqoNamespace) + if err != nil { + klog.Error(err) + return err + } + + err = authenticationForge.MutateResourceSlice( + rs, + remoteClusterIdentity, + &authenticationForge.ResourceSliceOptions{ + Class: v1beta1.ResourceSliceClassDefault, + Resources: func() map[corev1.ResourceName]string { + resources, err := getContractResourcesByClusterID(contract) + if err != nil { + klog.Error(err) + return nil + } + return resources + }(), }, + true, + ) + if err != nil { + klog.Error(err) + return err } - err := cl.Create(ctx, secret) + // Create resourceslice on Local cluster + err = localClient.Create(ctx, rs) if err != nil { klog.Error(err) return err } + // Wait for resource slice status authentication to be ready and resources accepted + timeout := time.After(2 * time.Minute) + tick := time.Tick(5 * time.Second) + +outerLoopQuotas: + for { + select { + case <-timeout: + return fmt.Errorf("timed out waiting for quotas to be ready") + case <-tick: + // Retrieve the resourceslice + err = localClient.Get(ctx, client.ObjectKey{Name: rs.Name, Namespace: rs.Namespace}, rs) + if err != nil { + klog.Error(err) + return err + } + if rs.Status.Resources != nil && rs.Status.AuthParams != nil { + break outerLoopQuotas + } + } + } + + klog.InfofDepth(1, "ResourceSlice %s created in local cluster %s", rs.Name, localNamespaceName) + + klog.InfoDepth(1, "Waiting for VirtualNode...") + // Wait for virtualnode created in local cluster +outerLoopVirtualNode: + for { + select { + case <-timeout: + return fmt.Errorf("timed out waiting for virtual node to be ready") + case <-tick: + // Retrieve the virtual node + virtualNode := &offloadingv1beta1.VirtualNode{} + err = localClient.Get(ctx, client.ObjectKey{Name: rs.Name, Namespace: rs.Namespace}, virtualNode) + if err != nil { + if client.IgnoreNotFound(err) != nil { + klog.Error(err) + return err + } + } + break outerLoopVirtualNode + } + } + + klog.InfofDepth(1, "VirtualNode %s created in namespace %s", rs.Name, localNamespaceName) + return nil } +func generatePublicKey( + ctx context.Context, + cl client.Client, + localClusterID corev1beta1.ClusterID, + namespaceName string, + secretRef *corev1.ObjectReference) (*networkingv1beta1.PublicKey, error) { + // Retrieve the secret + secret := &corev1.Secret{} + err := cl.Get(ctx, client.ObjectKey{Name: secretRef.Name, Namespace: secretRef.Namespace}, secret) + if err != nil { + klog.Error(err) + return nil, err + } + + // Retrieve the public key from field in the secret + stringPublicKey := string(secret.Data["publicKey"]) + if stringPublicKey == "" { + return nil, fmt.Errorf("public key not found") + } + + stringLocalClusterIdentity := string(localClusterID) + + // Generate public key on Local cluster + publicKey, err := networkForgeLiqo.PublicKey( + namespaceName, + &stringLocalClusterIdentity, + localClusterID, + []byte(stringPublicKey), + ) + if err != nil { + klog.Error(err) + return nil, err + } + + return publicKey, nil +} + +func createConfiguration( + ctx context.Context, + cl client.Client, + destinationNamespace string, + localClusterID corev1beta1.ClusterID) (*networkingv1beta1.Configuration, error) { + klog.InfoDepth(1, "Creating configuration...") + // Retrieve local pod CIDR + localPodCIDR, err := ipamLiqo.GetPodCIDR(ctx, cl) + if err != nil { + klog.Error(err) + return nil, err + } + + // Retrieve local external CIDR + localExternalCIDR, err := ipamLiqo.GetExternalCIDR(ctx, cl) + if err != nil { + klog.Error(err) + return nil, err + } + + // Create local configuration + configuration := networkForgeLiqo.Configuration( + string(localClusterID), + destinationNamespace, + localClusterID, + localPodCIDR, + localExternalCIDR, + ) + + if configuration == nil { + return nil, fmt.Errorf("unable to create local configuration") + } + + return configuration, nil +} + +// PeerWithCluster creates a ForeignCluster resource to peer with a remote cluster. +func PeerWithCluster( + ctx context.Context, + localClient client.Client, + localRestConfig *rest.Config, + remoteclient client.Client, + remoteRestConfig *rest.Config, + contract *reservation.Contract) (*networkingv1beta1.Connection, error) { + // Establish network with remote cluster + localConnection, _, localNamespaceName, remoteNamespaceName, err := EstablishNetwork( + ctx, + localClient, + localRestConfig, + remoteclient, + remoteRestConfig) + if err != nil { + klog.Error(err) + return nil, err + } + + // Authenticate with remote cluster + err = Authentication(ctx, localClient, localRestConfig, remoteclient, remoteRestConfig, localNamespaceName, remoteNamespaceName) + if err != nil { + klog.Error(err) + return nil, err + } + + err = Offloading(ctx, localClient, remoteRestConfig, localNamespaceName, contract) + if err != nil { + klog.Error(err) + return nil, err + } + + return localConnection, nil +} + // OffloadNamespace creates a NamespaceOffloading inside the specified namespace with given pod offloading strategy and cluster selector. -func OffloadNamespace(ctx context.Context, cl client.Client, namespaceName string, strategy offloadingv1alpha1.PodOffloadingStrategyType, - clusterTargetID string) (*offloadingv1alpha1.NamespaceOffloading, error) { +func OffloadNamespace(ctx context.Context, cl client.Client, namespaceName string, strategy offloadingv1beta1.PodOffloadingStrategyType, + clusterTargetID string) (*offloadingv1beta1.NamespaceOffloading, error) { nodeValues := make([]string, 0) nodeValues = append(nodeValues, clusterTargetID) // Create a NamespaceOffloading - namespaceOffloading := &offloadingv1alpha1.NamespaceOffloading{ + namespaceOffloading := &offloadingv1beta1.NamespaceOffloading{ ObjectMeta: metav1.ObjectMeta{ Name: "offloading", Namespace: namespaceName, }, - Spec: offloadingv1alpha1.NamespaceOffloadingSpec{ - NamespaceMappingStrategy: offloadingv1alpha1.EnforceSameNameMappingStrategyType, + Spec: offloadingv1beta1.NamespaceOffloadingSpec{ + NamespaceMappingStrategy: offloadingv1beta1.EnforceSameNameMappingStrategyType, PodOffloadingStrategy: strategy, ClusterSelector: corev1.NodeSelector{ NodeSelectorTerms: []corev1.NodeSelectorTerm{ diff --git a/pkg/rear-controller/grpc/service.go b/pkg/virtual-fabric-manager/utils.go similarity index 58% rename from pkg/rear-controller/grpc/service.go rename to pkg/virtual-fabric-manager/utils.go index aea733cd..2d74e2a9 100644 --- a/pkg/rear-controller/grpc/service.go +++ b/pkg/virtual-fabric-manager/utils.go @@ -1,4 +1,4 @@ -// Copyright 2022-2024 FLUIDOS Project +// Copyright 2022-2025 FLUIDOS Project // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,84 +12,33 @@ // See the License for the specific language governing permissions and // limitations under the License. -package grpc +package virtualfabricmanager import ( - "context" - "fmt" - corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/klog/v2" - "sigs.k8s.io/controller-runtime/pkg/client" nodecorev1alpha1 "github.com/fluidos-project/node/apis/nodecore/v1alpha1" reservationv1alpha1 "github.com/fluidos-project/node/apis/reservation/v1alpha1" ) -func getContractResourcesByClusterID(cl client.Client, clusterID string) (map[string]*resource.Quantity, error) { - var contracts reservationv1alpha1.ContractList - - var contractsBuyer reservationv1alpha1.ContractList - var contractsSeller reservationv1alpha1.ContractList - - // Retrieve contracts where the cluster which is requesting the peering is the buyer - if err := cl.List(context.Background(), &contractsBuyer, client.MatchingFields{"spec.buyer.additionalInformation.liqoID": clusterID}); err != nil { - if client.IgnoreNotFound(err) != nil { - klog.Errorf("Error when listing Contracts: %s", err) - return nil, err - } - } - - // Retrieve contracts where the cluster which is requesting the peering is the seller - if err := cl.List(context.Background(), &contractsSeller, client.MatchingFields{"spec.seller.additionalInformation.liqoID": clusterID}); err != nil { - if client.IgnoreNotFound(err) != nil { - klog.Errorf("Error when listing Contracts: %s", err) - return nil, err - } - } - - contracts.Items = append(contracts.Items, contractsBuyer.Items...) - contracts.Items = append(contracts.Items, contractsSeller.Items...) - - if len(contracts.Items) == 0 { - klog.Errorf("No contracts found for cluster %s", clusterID) - return nil, fmt.Errorf("no contracts found for cluster %s", clusterID) - } - - if len(contracts.Items) > 1 { - resources := multipleContractLogic(contracts.Items) - return resources, nil - } - - contract := contracts.Items[0] - +func getContractResourcesByClusterID(contract *reservationv1alpha1.Contract) (map[corev1.ResourceName]string, error) { if contract.Spec.Configuration != nil { - return addResources(make(map[string]*resource.Quantity), contract.Spec.Configuration, &contract.Spec.Flavor), nil + return addResources(make(map[corev1.ResourceName]string), contract.Spec.Configuration, &contract.Spec.Flavor) } - return addResourceByFlavor(make(map[string]*resource.Quantity), &contract.Spec.Flavor), nil + return addResourceByFlavor(make(map[corev1.ResourceName]string), &contract.Spec.Flavor) } -func multipleContractLogic(contracts []reservationv1alpha1.Contract) map[string]*resource.Quantity { - resources := make(map[string]*resource.Quantity) - for i := range contracts { - if contracts[i].Spec.Configuration != nil { - resources = addResources(resources, contracts[i].Spec.Configuration, &contracts[i].Spec.Flavor) - } else { - resources = addResourceByFlavor(resources, &contracts[i].Spec.Flavor) - } - } - return resources -} - -func addResourceByFlavor(resources map[string]*resource.Quantity, flavor *nodecorev1alpha1.Flavor) map[string]*resource.Quantity { - var resourcesToAdd = make(map[string]*resource.Quantity) +func addResourceByFlavor(resources map[corev1.ResourceName]string, flavor *nodecorev1alpha1.Flavor) (map[corev1.ResourceName]string, error) { + klog.InfofDepth(1, "Current resources: %v", len(resources)) + var resourcesToAdd = make(map[corev1.ResourceName]string) // Parse flavor flavorType, flavorData, err := nodecorev1alpha1.ParseFlavorType(flavor) if err != nil { klog.Errorf("Error when parsing flavor: %s", err) - return nil + return nil, err } switch flavorType { case nodecorev1alpha1.TypeK8Slice: @@ -109,38 +58,32 @@ func addResourceByFlavor(resources map[string]*resource.Quantity, flavor *nodeco // TODO (Sensor): Implement Liqo resource management for Sensors default: klog.Errorf("Flavor type %s not supported", flavorType) - return nil + return nil, err } - for key, value := range resourcesToAdd { - if prevRes, ok := resources[key]; !ok { - resources[key] = value - } else { - prevRes.Add(*value) - resources[key] = prevRes - } - } + resources = resourcesToAdd - return resources + return resources, nil } // This function adds the resources of a contract to the existing resourceList. -func addResources(resources map[string]*resource.Quantity, +func addResources(resources map[corev1.ResourceName]string, configuration *nodecorev1alpha1.Configuration, - flavor *nodecorev1alpha1.Flavor) map[string]*resource.Quantity { - var resourcesToAdd = make(map[string]*resource.Quantity) + flavor *nodecorev1alpha1.Flavor) (map[corev1.ResourceName]string, error) { + var resourcesToAdd = make(map[corev1.ResourceName]string) + klog.InfofDepth(1, "Current resources: %v", len(resources)) // Parse configuration configurationType, configurationData, err := nodecorev1alpha1.ParseConfiguration(configuration, flavor) if err != nil { klog.Errorf("Error when parsing configuration: %s", err) - return nil + return nil, err } // Parse flavor flavorType, flavorData, err := nodecorev1alpha1.ParseFlavorType(flavor) if err != nil { klog.Errorf("Error when parsing flavor: %s", err) - return nil + return nil, err } switch configurationType { @@ -157,18 +100,18 @@ func addResources(resources map[string]*resource.Quantity, serviceConfiguration, ok := configurationData.(nodecorev1alpha1.ServiceConfiguration) if !ok { klog.Errorf("Error when casting ServiceConfiguration") - return nil + return nil, err } // Check if the flavor type matches the configuration type if flavorType != nodecorev1alpha1.TypeService { klog.Errorf("Flavor type %s does not match configuration type %s", flavorType, configurationType) - return nil + return nil, err } // Force casting of the flavor serviceFlavor, ok := flavorData.(nodecorev1alpha1.ServiceFlavor) if !ok { klog.Errorf("Error when casting ServiceFlavor") - return nil + return nil, err } // Obtain the resources from the configuration resourcesToAdd = mapServiceWithConfigurationToResources(&serviceFlavor, &serviceConfiguration) @@ -177,36 +120,28 @@ func addResources(resources map[string]*resource.Quantity, klog.Errorf("Sensor configuration not supported yet") default: klog.Errorf("Configuration type %s not supported", configurationType) - return nil + return nil, err } - // Add the resources of the flavor to the existing resources - for key, value := range resourcesToAdd { - if prevRes, ok := resources[key]; !ok { - resources[key] = value - } else { - prevRes.Add(*value) - resources[key] = prevRes - } - } + resources = resourcesToAdd - return resources + return resources, nil } -func mapK8SliceConfigurationToResources(k8SliceConfiguration *nodecorev1alpha1.K8SliceConfiguration) map[string]*resource.Quantity { - resources := make(map[string]*resource.Quantity) - resources[corev1.ResourceCPU.String()] = &k8SliceConfiguration.CPU - resources[corev1.ResourceMemory.String()] = &k8SliceConfiguration.Memory - resources[corev1.ResourcePods.String()] = &k8SliceConfiguration.Pods +func mapK8SliceConfigurationToResources(k8SliceConfiguration *nodecorev1alpha1.K8SliceConfiguration) map[corev1.ResourceName]string { + resources := make(map[corev1.ResourceName]string) + resources[corev1.ResourceCPU] = k8SliceConfiguration.CPU.String() + resources[corev1.ResourceMemory] = k8SliceConfiguration.Memory.String() + resources[corev1.ResourcePods] = k8SliceConfiguration.Pods.String() if k8SliceConfiguration.Storage != nil { - resources[corev1.ResourceStorage.String()] = k8SliceConfiguration.Storage - resources[corev1.ResourceEphemeralStorage.String()] = k8SliceConfiguration.Storage + resources[corev1.ResourceStorage] = k8SliceConfiguration.Storage.String() + resources[corev1.ResourceEphemeralStorage] = k8SliceConfiguration.Storage.String() } return resources } func mapServiceWithConfigurationToResources(service *nodecorev1alpha1.ServiceFlavor, - serviceConfiguration *nodecorev1alpha1.ServiceConfiguration) map[string]*resource.Quantity { + serviceConfiguration *nodecorev1alpha1.ServiceConfiguration) map[corev1.ResourceName]string { var hostingPolicy nodecorev1alpha1.HostingPolicy // Check the hosting policy of the service @@ -230,18 +165,18 @@ func mapServiceWithConfigurationToResources(service *nodecorev1alpha1.ServiceFla func mapServiceToResourcesWithHostingPolicy( service *nodecorev1alpha1.ServiceFlavor, - hostingPolicy nodecorev1alpha1.HostingPolicy) map[string]*resource.Quantity { - resources := make(map[string]*resource.Quantity) + hostingPolicy nodecorev1alpha1.HostingPolicy) map[corev1.ResourceName]string { + resources := make(map[corev1.ResourceName]string) // Set default resources to minimum values: 500m CPU, 500MB memory, 10 pod - resources[corev1.ResourceCPU.String()] = resource.NewMilliQuantity(800, resource.DecimalSI) - resources[corev1.ResourceMemory.String()] = resource.NewScaledQuantity(800, resource.Mega) - resources[corev1.ResourcePods.String()] = resource.NewQuantity(10, resource.DecimalSI) + resources[corev1.ResourceCPU] = resource.NewMilliQuantity(800, resource.DecimalSI).String() + resources[corev1.ResourceMemory] = resource.NewScaledQuantity(800, resource.Mega).String() + resources[corev1.ResourcePods] = resource.NewQuantity(10, resource.DecimalSI).String() // Print default resources klog.Infof("Default resources for service %s:", service.Name) for key, value := range resources { - klog.Infof("%s: %s", key, value.String()) + klog.Infof("%s: %s", key, value) } switch hostingPolicy { @@ -265,14 +200,14 @@ func mapServiceToResourcesWithHostingPolicy( } } -func mapK8SliceToResources(k8Slice *nodecorev1alpha1.K8Slice) map[string]*resource.Quantity { - resources := make(map[string]*resource.Quantity) - resources[corev1.ResourceCPU.String()] = &k8Slice.Characteristics.CPU - resources[corev1.ResourceMemory.String()] = &k8Slice.Characteristics.Memory - resources[corev1.ResourcePods.String()] = &k8Slice.Characteristics.Pods +func mapK8SliceToResources(k8Slice *nodecorev1alpha1.K8Slice) map[corev1.ResourceName]string { + resources := make(map[corev1.ResourceName]string) + resources[corev1.ResourceCPU] = k8Slice.Characteristics.CPU.String() + resources[corev1.ResourceMemory] = k8Slice.Characteristics.Memory.String() + resources[corev1.ResourcePods] = k8Slice.Characteristics.Pods.String() if k8Slice.Characteristics.Storage != nil { - resources[corev1.ResourceStorage.String()] = k8Slice.Characteristics.Storage - resources[corev1.ResourceEphemeralStorage.String()] = k8Slice.Characteristics.Storage + resources[corev1.ResourceStorage] = k8Slice.Characteristics.Storage.String() + resources[corev1.ResourceEphemeralStorage] = k8Slice.Characteristics.Storage.String() } return resources } diff --git a/tools/scripts/install_liqo.sh b/tools/scripts/install_liqo.sh index 6d334b21..2140cbb6 100644 --- a/tools/scripts/install_liqo.sh +++ b/tools/scripts/install_liqo.sh @@ -1,25 +1,11 @@ #!/usr/bin/bash - -# Function to check if liqoctl is installed -check_and_install_liqoctl() { - if ! command -v liqoctl &> /dev/null; then - echo "liqoctl not found. Installing liqoctl..." - # Example installation command for liqoctl, you may need to update this based on the official installation instructions - curl -sL https://get.liqo.io | bash || { echo "Failed to install liqoctl"; exit 1; } - echo "liqoctl installed successfully." - else - echo "liqoctl is already installed." - fi -} - # Check if provider parameter is provided if [ -z "$1" ]; then echo "No provider specified. Please provide a cloud provider (aws, azure, gcp, etc.)." exit 1 fi -check_and_install_liqoctl # Get the provider parameter # Get the provider parameter @@ -31,11 +17,13 @@ CLUSTER_NAME=$2 # Get the Kubeconfig KUBECONFIG_LIQO=$3 -helm repo update +LIQOCTL_PATH=$4 + +# Print Liqo version +$LIQOCTL_PATH version --client # Install Liqo based on the provider -liqoctl install "$PROVIDER" --cluster-name "$CLUSTER_NAME" --kubeconfig "$KUBECONFIG_LIQO" || { echo "Failed to install Liqo for provider: $PROVIDER"; exit 1; } -liqoctl install "$PROVIDER" --cluster-name "$CLUSTER_NAME" --kubeconfig "$KUBECONFIG_LIQO" || { echo "Failed to install Liqo for provider: $PROVIDER"; exit 1; } +$LIQOCTL_PATH install "$PROVIDER" --cluster-id "$CLUSTER_NAME" --kubeconfig "$KUBECONFIG_LIQO" || { echo "Failed to install Liqo for provider: $PROVIDER"; exit 1; } # liqoctl install "$PROVIDER" || { echo "Failed to install Liqo for provider: $PROVIDER"; exit 1; } echo "Liqo installation for provider $PROVIDER completed successfully." diff --git a/tools/scripts/installation.sh b/tools/scripts/installation.sh index ee3fb47a..f6bd3634 100644 --- a/tools/scripts/installation.sh +++ b/tools/scripts/installation.sh @@ -103,6 +103,8 @@ function install_components() { echo "Value: ${clusters[$cluster]}" done + echo "Liqoctl version in installation.sh: $(liqoctl version 2>&1 | grep -oP 'Client version: \K\S+')" + if [ "$local_repositories" == "true" ]; then unset COMPONENT_MAP declare -A COMPONENT_MAP @@ -135,6 +137,8 @@ function install_components() { pids=() fi + echo "Liqoctl version in installation.sh: $(liqoctl version 2>&1 | grep -oP 'Client version: \K\S+')" + # Iterate over the clusters for cluster in "${!clusters[@]}"; do @@ -142,6 +146,10 @@ function install_components() { echo "Cluster is: $cluster" echo "Cluster value is: ${clusters[$cluster]}" + liqoctl_path=$(alias liqoctl | sed -E "s/alias liqoctl='(.*)'/\1/") + echo "Liqoctl version in installation.sh CLUSTER LOOP: $(liqoctl version 2>&1 | grep -oP 'Client version: \K\S+')" + echo "Liqoctl path is: $liqoctl_path" + # Get the kubeconfig file which depends on variable installation_type KUBECONFIG=$(jq -r '.kubeconfig' <<< "${clusters[$cluster]}") @@ -183,7 +191,7 @@ function install_components() { # Install liqo chmod +x "$SCRIPT_DIR"/install_liqo.sh - "$SCRIPT_DIR"/install_liqo.sh "$installation_type" "$cluster" "$KUBECONFIG" || { echo "Failed to install Liqo in cluster $cluster"; exit 1; } + "$SCRIPT_DIR"/install_liqo.sh "$installation_type" "$cluster" "$KUBECONFIG" "$liqoctl_path" || { echo "Failed to install Liqo in cluster $cluster"; exit 1; } chmod -x "$SCRIPT_DIR"/install_liqo.sh # Skipping the installation of the node Helm chart if the cluster is a provider and its installation type is not kind diff --git a/tools/scripts/requirements.sh b/tools/scripts/requirements.sh index 37b7934f..f977274b 100644 --- a/tools/scripts/requirements.sh +++ b/tools/scripts/requirements.sh @@ -158,17 +158,48 @@ function install_liqoctl() { # Check liqoctl function function check_liqoctl() { print_title "Check liqoctl..." - if ! liqoctl version --client; then - echo "Please install liqoctl first." - # Ask the user if they want to install liqoctl - read -r -p "Do you want to install liqoctl? (y/n): " install_liqoctl - if [ "$install_liqoctl" == "y" ]; then - install_liqoctl - else - echo "LIQO is required to continue. Exiting..." - exit 1 - fi + check_and_install_liqoctl +} + +# Function to check if liqoctl is installed +check_and_install_liqoctl() { + if ! command -v liqoctl &> /dev/null; then + echo "liqoctl not found. Installing liqoctl..." + # Example installation command for liqoctl, you may need to update this based on the official installation instructions + install_liqo_not_stable_version + echo "liqoctl installed successfully." + else + # Check the version of the client version of liqo + CLIENT_VERSION=$(liqoctl version 2>&1 | grep -oP 'Client version: \K\S+') + if [ -z "$CLIENT_VERSION" ]; then + echo "Failed to retrieve liqoctl client version" + exit 1 + else + echo "liqoctl client version: $CLIENT_VERSION" + # TODO: Update the version check based on the stable version + # Version currently used is an unstable version, rc.3 + if [ "$CLIENT_VERSION" != "v1.0.0-rc.3" ]; then + echo "liqoctl is not installed at the desired version of v1.0.0-rc.3. Installing liqoctl..." + install_liqo_not_stable_version + else + echo "liqoctl is already installed at the version $CLIENT_VERSION." + fi fi + fi +} + +install_liqo_not_stable_version() { + # Delete if exists the temporary liqo folder + rm -rf /tmp/liqo + # Clone Liqo repository to local tmp folder + git clone --depth 1 --branch v1.0.0-rc.3 https://github.com/liqotech/liqo.git /tmp/liqo || { echo "Failed to clone Liqo repository"; exit 1; } + make -C /tmp/liqo ctl || { echo "Failed to install Liqo"; exit 1; } + echo "Liqo compiled successfully in /tmp/liqo." + # Create temporary alias for liqoctl to make it available in the current shell + alias liqoctl=/tmp/liqo/liqoctl + echo "liqoctl alias created to /tmp/liqo/liqoctl for the current shell." + + shopt -s expand_aliases } # Install jq function diff --git a/tools/scripts/setup.sh b/tools/scripts/setup.sh index 2ec6043e..1dd6a6cb 100755 --- a/tools/scripts/setup.sh +++ b/tools/scripts/setup.sh @@ -100,6 +100,8 @@ fi # Check requirements with function check_tools from requirements.sh check_tools +echo Liqoctl version in setup.sh: "$(liqoctl version --client)" + echo "All the tools are installed." # Check if the input is 1, 2 or 3