From 8edf0f2fd98e411a3eb437c09e4d65ee064f48d0 Mon Sep 17 00:00:00 2001 From: michaelawyu Date: Thu, 22 Jan 2026 12:52:24 +1100 Subject: [PATCH 01/10] fix: address a mismatched log entry in a UT (#416) Minor fixes Signed-off-by: michaelawyu --- pkg/utils/informer/readiness/readiness_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/utils/informer/readiness/readiness_test.go b/pkg/utils/informer/readiness/readiness_test.go index 973324fdc..8d25ac866 100644 --- a/pkg/utils/informer/readiness/readiness_test.go +++ b/pkg/utils/informer/readiness/readiness_test.go @@ -38,7 +38,7 @@ func TestReadinessChecker(t *testing.T) { name: "nil informer", resourceInformer: nil, expectError: true, - errorContains: "resource informer not initialized", + errorContains: "resource informer is nil", }, { name: "no resources registered", From 93002eac0f2a58035ed92a5e043933fb5a3abc2e Mon Sep 17 00:00:00 2001 From: Yetkin Timocin Date: Thu, 29 Jan 2026 13:16:05 -0800 Subject: [PATCH 02/10] fix: default KUBECONFIG to ~/.kube/config in E2E tests (#405) Signed-off-by: Yetkin Timocin --- test/e2e/framework/cluster.go | 13 ++++++++++++- test/e2e/setup_test.go | 4 ---- test/upgrade/after/setup_test.go | 5 ----- test/upgrade/before/setup_test.go | 5 ----- 4 files changed, 12 insertions(+), 15 deletions(-) diff --git a/test/e2e/framework/cluster.go b/test/e2e/framework/cluster.go index 39042f7cc..a994fc030 100644 --- a/test/e2e/framework/cluster.go +++ b/test/e2e/framework/cluster.go @@ -18,6 +18,7 @@ package framework import ( "os" + "path/filepath" "github.com/onsi/gomega" "k8s.io/apimachinery/pkg/api/meta" @@ -33,9 +34,19 @@ import ( ) var ( - kubeconfigPath = os.Getenv("KUBECONFIG") + kubeconfigPath string ) +func init() { + kubeconfigPath = os.Getenv("KUBECONFIG") + if kubeconfigPath == "" { + // Default to $HOME/.kube/config like kubectl does. + if home, err := os.UserHomeDir(); err == nil { + kubeconfigPath = filepath.Join(home, ".kube", "config") + } + } +} + // Cluster object defines the required clients based on the kubeconfig of the test cluster. type Cluster struct { Scheme *runtime.Scheme diff --git a/test/e2e/setup_test.go b/test/e2e/setup_test.go index e3c5af1c5..62b5d54ab 100644 --- a/test/e2e/setup_test.go +++ b/test/e2e/setup_test.go @@ -76,7 +76,6 @@ const ( hubClusterSAName = "fleet-hub-agent" fleetSystemNS = "fleet-system" - kubeConfigPathEnvVarName = "KUBECONFIG" propertyProviderEnvVarName = "PROPERTY_PROVIDER" azurePropertyProviderEnvVarValue = "azure" fleetClusterResourceIDAnnotationKey = "fleet.azure.com/cluster-resource-id" @@ -324,9 +323,6 @@ func beforeSuiteForAllProcesses() { klog.InitFlags(fs) Expect(fs.Parse([]string{"--v", "5", "-add_dir_header", "true"})).Should(Succeed()) - // Check if the required environment variable, which specifies the path to kubeconfig file, has been set. - Expect(os.Getenv(kubeConfigPathEnvVarName)).NotTo(BeEmpty(), "Required environment variable KUBECONFIG is not set") - resourceSnapshotCreationMinimumIntervalEnv := os.Getenv("RESOURCE_SNAPSHOT_CREATION_MINIMUM_INTERVAL") if resourceSnapshotCreationMinimumIntervalEnv == "" { // If the environment variable is not set, use a default value. diff --git a/test/upgrade/after/setup_test.go b/test/upgrade/after/setup_test.go index 5a2c1a3ad..849e910e5 100644 --- a/test/upgrade/after/setup_test.go +++ b/test/upgrade/after/setup_test.go @@ -63,8 +63,6 @@ const ( memberCluster1EastProdSAName = "fleet-member-agent-cluster-1" memberCluster2EastCanarySAName = "fleet-member-agent-cluster-2" memberCluster3WestProdSAName = "fleet-member-agent-cluster-3" - - kubeConfigPathEnvVarName = "KUBECONFIG" ) const ( @@ -183,9 +181,6 @@ func beforeSuiteForAllProcesses() { klog.InitFlags(fs) Expect(fs.Parse([]string{"--v", "5", "-add_dir_header", "true"})).Should(Succeed()) - // Check if the required environment variable, which specifies the path to kubeconfig file, has been set. - Expect(os.Getenv(kubeConfigPathEnvVarName)).NotTo(BeEmpty(), "Required environment variable KUBECONFIG is not set") - // Initialize the cluster objects and their clients. hubCluster = framework.NewCluster(hubClusterName, "", scheme, nil) Expect(hubCluster).NotTo(BeNil(), "Failed to initialize cluster object") diff --git a/test/upgrade/before/setup_test.go b/test/upgrade/before/setup_test.go index 1974d6542..13eae0a56 100644 --- a/test/upgrade/before/setup_test.go +++ b/test/upgrade/before/setup_test.go @@ -67,8 +67,6 @@ const ( hubClusterSAName = "fleet-hub-agent" fleetSystemNS = "fleet-system" - - kubeConfigPathEnvVarName = "KUBECONFIG" ) const ( @@ -207,9 +205,6 @@ func beforeSuiteForAllProcesses() { klog.InitFlags(fs) Expect(fs.Parse([]string{"--v", "5", "-add_dir_header", "true"})).Should(Succeed()) - // Check if the required environment variable, which specifies the path to kubeconfig file, has been set. - Expect(os.Getenv(kubeConfigPathEnvVarName)).NotTo(BeEmpty(), "Required environment variable KUBECONFIG is not set") - // Initialize the cluster objects and their clients. hubCluster = framework.NewCluster(hubClusterName, "", scheme, nil) Expect(hubCluster).NotTo(BeNil(), "Failed to initialize cluster object") From 3675ead99d11b7d42b8cb380f0eb3c238b8caeda Mon Sep 17 00:00:00 2001 From: michaelawyu Date: Mon, 2 Feb 2026 11:21:52 +1100 Subject: [PATCH 03/10] fix: use max. 5 second work applier requeue delay for the E2E environment (#392) Minor fixes Signed-off-by: michaelawyu --- charts/member-agent/README.md | 9 +++++++ charts/member-agent/templates/deployment.yaml | 24 +++++++++++++++++++ test/e2e/setup.sh | 6 +++++ 3 files changed, 39 insertions(+) diff --git a/charts/member-agent/README.md b/charts/member-agent/README.md index 4c74d64fb..db1d894c5 100644 --- a/charts/member-agent/README.md +++ b/charts/member-agent/README.md @@ -42,8 +42,17 @@ helm upgrade member-agent member-agent/ --namespace fleet-system | logVerbosity | Log level. Uses V logs (klog) | `3` | | propertyProvider | The property provider to use with the member agent; if none is specified, the Fleet member agent will start with no property provider (i.e., the agent will expose no cluster properties, and collect only limited resource usage information) | `` | | region | The region where the member cluster resides | `` | +| workApplierRequeueRateLimiterAttemptsWithFixedDelay | This parameter is a set of values to control how frequent KubeFleet should reconcile (processed) manifests; it specifies then number of attempts to requeue with fixed delay before switching to exponential backoff | `1` | +| workApplierRequeueRateLimiterFixedDelaySeconds | This parameter is a set of values to control how frequent KubeFleet should reconcile (process) manifests; it specifies the fixed delay in seconds for initial requeue attempts | `5` | +| workApplierRequeueRateLimiterExponentialBaseForSlowBackoff | This parameter is a set of values to control how frequent KubeFleet should reconcile (process) manifests; it specifies the exponential base for the slow backoff stage | `1.2` | +| workApplierRequeueRateLimiterInitialSlowBackoffDelaySeconds | This parameter is a set of values to control how frequent KubeFleet should reconcile (process) manifests; it specifies the initial delay in seconds for the slow backoff stage | `2` | +| workApplierRequeueRateLimiterMaxSlowBackoffDelaySeconds | This parameter is a set of values to control how frequent KubeFleet should reconcile (process) manifests; it specifies the maximum delay in seconds for the slow backoff stage | `15` | +| workApplierRequeueRateLimiterExponentialBaseForFastBackoff | This parameter is a set of values to control how frequent KubeFleet should reconcile (process) manifests; it specifies the exponential base for the fast backoff stage | `1.5` | +| workApplierRequeueRateLimiterMaxFastBackoffDelaySeconds | This parameter is a set of values to control how frequent KubeFleet should reconcile (process) manifests; it specifies the maximum delay in seconds for the fast backoff stage | `900` | +| workApplierRequeueRateLimiterSkipToFastBackoffForAvailableOrDiffReportedWorkObjs | This parameter is a set of values to control how frequent KubeFleet should reconcile (process) manifests; it specifies whether to skip the slow backoff stage and start fast backoff immediately for available or diff-reported work objects | `true` | | config.azureCloudConfig | The cloud provider configuration | **required if property provider is set to azure** | + ## Override Azure cloud config **If PropertyProvider feature is set to azure, then a cloud configuration is required.** diff --git a/charts/member-agent/templates/deployment.yaml b/charts/member-agent/templates/deployment.yaml index 39e0dec34..8644b57e6 100644 --- a/charts/member-agent/templates/deployment.yaml +++ b/charts/member-agent/templates/deployment.yaml @@ -36,6 +36,30 @@ spec: - --enable-pprof={{ .Values.enablePprof }} - --pprof-port={{ .Values.pprofPort }} - --hub-pprof-port={{ .Values.hubPprofPort }} + {{- if .Values.workApplierRequeueRateLimiterAttemptsWithFixedDelay }} + - --work-applier-requeue-rate-limiter-attempts-with-fixed-delay={{ .Values.workApplierRequeueRateLimiterAttemptsWithFixedDelay }} + {{- end }} + {{- if .Values.workApplierRequeueRateLimiterFixedDelaySeconds }} + - --work-applier-requeue-rate-limiter-fixed-delay-seconds={{ .Values.workApplierRequeueRateLimiterFixedDelaySeconds }} + {{- end }} + {{- if .Values.workApplierRequeueRateLimiterExponentialBaseForSlowBackoff }} + - --work-applier-requeue-rate-limiter-exponential-base-for-slow-backoff={{ .Values.workApplierRequeueRateLimiterExponentialBaseForSlowBackoff }} + {{- end }} + {{- if .Values.workApplierRequeueRateLimiterInitialSlowBackoffDelaySeconds }} + - --work-applier-requeue-rate-limiter-initial-slow-backoff-delay-seconds={{ .Values.workApplierRequeueRateLimiterInitialSlowBackoffDelaySeconds }} + {{- end }} + {{- if .Values.workApplierRequeueRateLimiterMaxSlowBackoffDelaySeconds }} + - --work-applier-requeue-rate-limiter-max-slow-backoff-delay-seconds={{ .Values.workApplierRequeueRateLimiterMaxSlowBackoffDelaySeconds }} + {{- end }} + {{- if .Values.workApplierRequeueRateLimiterExponentialBaseForFastBackoff }} + - --work-applier-requeue-rate-limiter-exponential-base-for-fast-backoff={{ .Values.workApplierRequeueRateLimiterExponentialBaseForFastBackoff }} + {{- end }} + {{- if .Values.workApplierRequeueRateLimiterMaxFastBackoffDelaySeconds }} + - --work-applier-requeue-rate-limiter-max-fast-backoff-delay-seconds={{ .Values.workApplierRequeueRateLimiterMaxFastBackoffDelaySeconds }} + {{- end }} + {{- if .Values.workApplierRequeueRateLimiterSkipToFastBackoffForAvailableOrDiffReportedWorkObjs }} + - --work-applier-requeue-rate-limiter-skip-to-fast-backoff-for-available-or-diff-reported-work-objs={{ .Values.workApplierRequeueRateLimiterSkipToFastBackoffForAvailableOrDiffReportedWorkObjs }} + {{- end }} {{- if .Values.propertyProvider }} - --property-provider={{ .Values.propertyProvider }} {{- end }} diff --git a/test/e2e/setup.sh b/test/e2e/setup.sh index 802584007..df505b4c4 100755 --- a/test/e2e/setup.sh +++ b/test/e2e/setup.sh @@ -194,6 +194,8 @@ kind export kubeconfig --name $HUB_CLUSTER HUB_SERVER_URL="https://$(docker inspect $HUB_CLUSTER-control-plane --format='{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}'):6443" # Install the member agents and related components +# Note that the work applier in the member agent are set to requeue at max. every 5 seconds instead of using the default +# exponential backoff behavior; this is to accommodate some of the timeout settings in the E2E test specs. for (( i=0; i<${MEMBER_CLUSTER_COUNT}; i++ )); do kind export kubeconfig --name "${MEMBER_CLUSTERS[$i]}" @@ -210,6 +212,8 @@ do --set logVerbosity=5 \ --set namespace=fleet-system \ --set enableV1Beta1APIs=true \ + --set workApplierRequeueRateLimiterMaxSlowBackoffDelaySeconds=5 \ + --set workApplierRequeueRateLimiterMaxFastBackoffDelaySeconds=5 \ --set propertyProvider=$PROPERTY_PROVIDER \ --set region=${REGIONS[$i]} \ $( [ "$PROPERTY_PROVIDER" = "azure" ] && echo "-f azure_valid_config.yaml" ) @@ -226,6 +230,8 @@ do --set logVerbosity=5 \ --set namespace=fleet-system \ --set enableV1Beta1APIs=true \ + --set workApplierRequeueRateLimiterMaxSlowBackoffDelaySeconds=5 \ + --set workApplierRequeueRateLimiterMaxFastBackoffDelaySeconds=5 \ --set propertyProvider=$PROPERTY_PROVIDER \ $( [ "$PROPERTY_PROVIDER" = "azure" ] && echo "-f azure_valid_config.yaml" ) fi From 5e7013cf6c986c58a538fb154256d2fd8350bfa0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 1 Feb 2026 16:55:55 -0800 Subject: [PATCH 04/10] chore: bump actions/checkout from 6.0.1 to 6.0.2 (#417) --- .github/workflows/chart.yml | 2 +- .github/workflows/ci.yml | 4 ++-- .github/workflows/code-lint.yml | 4 ++-- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/codespell.yml | 2 +- .github/workflows/markdown-lint.yml | 2 +- .github/workflows/release.yml | 4 ++-- .github/workflows/trivy.yml | 2 +- .github/workflows/upgrade.yml | 6 +++--- 9 files changed, 14 insertions(+), 14 deletions(-) diff --git a/.github/workflows/chart.yml b/.github/workflows/chart.yml index a2d709555..f3022c580 100644 --- a/.github/workflows/chart.yml +++ b/.github/workflows/chart.yml @@ -18,7 +18,7 @@ jobs: deploy: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v6.0.1 + - uses: actions/checkout@v6.0.2 with: submodules: true fetch-depth: 0 diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index df7ab6a51..9b44dbc85 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -41,7 +41,7 @@ jobs: go-version: ${{ env.GO_VERSION }} - name: Check out code into the Go module directory - uses: actions/checkout@v6.0.1 + uses: actions/checkout@v6.0.2 - name: Set up Ginkgo CLI run: | @@ -92,7 +92,7 @@ jobs: go-version: ${{ env.GO_VERSION }} - name: Check out code into the Go module directory - uses: actions/checkout@v6.0.1 + uses: actions/checkout@v6.0.2 - name: Install Ginkgo CLI run: | diff --git a/.github/workflows/code-lint.yml b/.github/workflows/code-lint.yml index ac3906219..6b01223e2 100644 --- a/.github/workflows/code-lint.yml +++ b/.github/workflows/code-lint.yml @@ -43,7 +43,7 @@ jobs: go-version: ${{ env.GO_VERSION }} - name: Checkout - uses: actions/checkout@v6.0.1 + uses: actions/checkout@v6.0.2 with: submodules: true @@ -64,7 +64,7 @@ jobs: go-version: ${{ env.GO_VERSION }} - name: Check out code into the Go module directory - uses: actions/checkout@v6.0.1 + uses: actions/checkout@v6.0.2 - name: golangci-lint run: make lint diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 91deab101..2f47f189e 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -38,7 +38,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v6.0.1 + uses: actions/checkout@v6.0.2 # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL diff --git a/.github/workflows/codespell.yml b/.github/workflows/codespell.yml index b4aa7b8b0..aac7f666b 100644 --- a/.github/workflows/codespell.yml +++ b/.github/workflows/codespell.yml @@ -16,7 +16,7 @@ jobs: with: egress-policy: audit - - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v4.1.7 + - uses: actions/checkout@0c366fd6a839edf440554fa01a7085ccba70ac98 # v4.1.7 - uses: codespell-project/actions-codespell@8f01853be192eb0f849a5c7d721450e7a467c579 # master with: check_filenames: true diff --git a/.github/workflows/markdown-lint.yml b/.github/workflows/markdown-lint.yml index bb2858063..d1ddd51d0 100644 --- a/.github/workflows/markdown-lint.yml +++ b/.github/workflows/markdown-lint.yml @@ -10,7 +10,7 @@ jobs: markdown-link-check: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v6.0.1 + - uses: actions/checkout@v6.0.2 - uses: tcort/github-action-markdown-link-check@v1 with: # this will only show errors in the output diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index d6fff39de..5d9503594 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -30,7 +30,7 @@ jobs: tag: ${{ steps.export.outputs.tag }} steps: - name: Checkout code - uses: actions/checkout@v6.0.1 + uses: actions/checkout@v6.0.2 - id: export run: | @@ -64,7 +64,7 @@ jobs: go-version: ${{ env.GO_VERSION }} - name: Checkout code - uses: actions/checkout@v6.0.1 + uses: actions/checkout@v6.0.2 - name: Login to ghcr.io uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef diff --git a/.github/workflows/trivy.yml b/.github/workflows/trivy.yml index 995d8dd19..642663457 100644 --- a/.github/workflows/trivy.yml +++ b/.github/workflows/trivy.yml @@ -44,7 +44,7 @@ jobs: go-version: ${{ env.GO_VERSION }} - name: Checkout code - uses: actions/checkout@v6.0.1 + uses: actions/checkout@v6.0.2 - name: Login to ${{ env.REGISTRY }} uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef diff --git a/.github/workflows/upgrade.yml b/.github/workflows/upgrade.yml index d33864ae8..4d7a87432 100644 --- a/.github/workflows/upgrade.yml +++ b/.github/workflows/upgrade.yml @@ -44,7 +44,7 @@ jobs: go-version: ${{ env.GO_VERSION }} - name: Check out code into the Go module directory - uses: actions/checkout@v6.0.1 + uses: actions/checkout@v6.0.2 with: # Fetch the history of all branches and tags. # This is needed for the test suite to switch between releases. @@ -127,7 +127,7 @@ jobs: go-version: ${{ env.GO_VERSION }} - name: Check out code into the Go module directory - uses: actions/checkout@v6.0.1 + uses: actions/checkout@v6.0.2 with: # Fetch the history of all branches and tags. # This is needed for the test suite to switch between releases. @@ -210,7 +210,7 @@ jobs: go-version: ${{ env.GO_VERSION }} - name: Check out code into the Go module directory - uses: actions/checkout@v6.0.1 + uses: actions/checkout@v6.0.2 with: # Fetch the history of all branches and tags. # This is needed for the test suite to switch between releases. From 80e3bf3531705f84f1cbe0de8a1f7078d07f51a3 Mon Sep 17 00:00:00 2001 From: michaelawyu Date: Wed, 4 Feb 2026 01:56:30 +1100 Subject: [PATCH 05/10] fix: address an issue in AppliedWork object processing when the agent leaves then re-joins (#421) --- pkg/controllers/workapplier/controller.go | 63 +++--- .../workapplier/controller_test.go | 208 +++++++++++++++++- 2 files changed, 244 insertions(+), 27 deletions(-) diff --git a/pkg/controllers/workapplier/controller.go b/pkg/controllers/workapplier/controller.go index 259b0ac9a..bc6a185f7 100644 --- a/pkg/controllers/workapplier/controller.go +++ b/pkg/controllers/workapplier/controller.go @@ -551,23 +551,40 @@ func (r *Reconciler) forgetWorkAndRemoveFinalizer(ctx context.Context, work *fle // ensureAppliedWork makes sure that an associated appliedWork and a finalizer on the work resource exists on the cluster. func (r *Reconciler) ensureAppliedWork(ctx context.Context, work *fleetv1beta1.Work) (*fleetv1beta1.AppliedWork, error) { workRef := klog.KObj(work) - appliedWork := &fleetv1beta1.AppliedWork{} - hasFinalizer := false - if controllerutil.ContainsFinalizer(work, fleetv1beta1.WorkFinalizer) { - hasFinalizer = true - err := r.spokeClient.Get(ctx, types.NamespacedName{Name: work.Name}, appliedWork) - switch { - case apierrors.IsNotFound(err): - klog.ErrorS(err, "AppliedWork finalizer resource does not exist even with the finalizer, it will be recreated", "appliedWork", workRef.Name) - case err != nil: - klog.ErrorS(err, "Failed to retrieve the appliedWork ", "appliedWork", workRef.Name) - return nil, controller.NewAPIServerError(true, err) - default: - return appliedWork, nil + + // Add a finalizer to the Work object. + if !controllerutil.ContainsFinalizer(work, fleetv1beta1.WorkFinalizer) { + work.Finalizers = append(work.Finalizers, fleetv1beta1.WorkFinalizer) + + if err := r.hubClient.Update(ctx, work, &client.UpdateOptions{}); err != nil { + klog.ErrorS(err, "Failed to add the cleanup finalizer to the work", "work", workRef) + return nil, controller.NewAPIServerError(false, err) } + klog.V(2).InfoS("Added the cleanup finalizer to the Work object", "work", workRef) + } + + // Check if an AppliedWork object already exists for the Work object. + // + // Since we only create an AppliedWork object after adding the finalizer to the Work object, + // usually it is safe for us to assume that if the finalizer is absent, the AppliedWork object should + // not exist. This is not the case with the work applier though, as the controller features a + // Leave method that will strip all Work objects off their finalizers, which is called when the + // member cluster leaves the fleet. If the member cluster chooses to re-join the fleet, the controller + // will see a Work object with no finalizer but with an AppliedWork object. Because of this, here we always + // check for the existence of the AppliedWork object, with or without the finalizer. + appliedWork := &fleetv1beta1.AppliedWork{} + err := r.spokeClient.Get(ctx, types.NamespacedName{Name: work.Name}, appliedWork) + switch { + case err == nil: + // The AppliedWork already exists; no further action is needed. + klog.V(2).InfoS("Found an AppliedWork for the Work object", "work", workRef, "appliedWork", klog.KObj(appliedWork)) + return appliedWork, nil + case !apierrors.IsNotFound(err): + klog.ErrorS(err, "Failed to retrieve the appliedWork object", "appliedWork", workRef.Name) + return nil, controller.NewAPIServerError(true, err) } - // we create the appliedWork before setting the finalizer, so it should always exist unless it's deleted behind our back + // The AppliedWork object does not exist; create one. appliedWork = &fleetv1beta1.AppliedWork{ ObjectMeta: metav1.ObjectMeta{ Name: work.Name, @@ -577,20 +594,14 @@ func (r *Reconciler) ensureAppliedWork(ctx context.Context, work *fleetv1beta1.W WorkNamespace: work.Namespace, }, } - if err := r.spokeClient.Create(ctx, appliedWork); err != nil && !apierrors.IsAlreadyExists(err) { - klog.ErrorS(err, "AppliedWork create failed", "appliedWork", workRef.Name) + if err := r.spokeClient.Create(ctx, appliedWork); err != nil { + // Note: the controller must retry on AppliedWork AlreadyExists errors; otherwise the + // controller will run the reconciliation loop with an AppliedWork that has no UID, + // which might lead to takeover failures in later steps. + klog.ErrorS(err, "Failed to create an AppliedWork object for the Work object", "appliedWork", klog.KObj(appliedWork), "work", workRef) return nil, controller.NewAPIServerError(false, err) } - if !hasFinalizer { - klog.InfoS("Add the finalizer to the work", "work", workRef) - work.Finalizers = append(work.Finalizers, fleetv1beta1.WorkFinalizer) - - if err := r.hubClient.Update(ctx, work, &client.UpdateOptions{}); err != nil { - klog.ErrorS(err, "Failed to add the finalizer to the work", "work", workRef) - return nil, controller.NewAPIServerError(false, err) - } - } - klog.InfoS("Recreated the appliedWork resource", "appliedWork", workRef.Name) + klog.V(2).InfoS("Created an AppliedWork for the Work object", "work", workRef, "appliedWork", klog.KObj(appliedWork)) return appliedWork, nil } diff --git a/pkg/controllers/workapplier/controller_test.go b/pkg/controllers/workapplier/controller_test.go index b10df268f..47e4dc092 100644 --- a/pkg/controllers/workapplier/controller_test.go +++ b/pkg/controllers/workapplier/controller_test.go @@ -17,6 +17,7 @@ limitations under the License. package workapplier import ( + "context" "fmt" "log" "os" @@ -32,8 +33,10 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes/scheme" "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client/fake" fleetv1beta1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" ) @@ -208,7 +211,8 @@ var ( ) var ( - ignoreFieldTypeMetaInNamespace = cmpopts.IgnoreFields(corev1.Namespace{}, "TypeMeta") + ignoreFieldTypeMetaInNamespace = cmpopts.IgnoreFields(corev1.Namespace{}, "TypeMeta") + ignoreFieldObjectMetaresourceVersion = cmpopts.IgnoreFields(metav1.ObjectMeta{}, "ResourceVersion") lessFuncAppliedResourceMeta = func(i, j fleetv1beta1.AppliedResourceMeta) bool { iStr := fmt.Sprintf("%s/%s/%s/%s/%s", i.Group, i.Version, i.Kind, i.Namespace, i.Name) @@ -262,6 +266,14 @@ func TestMain(m *testing.M) { os.Exit(m.Run()) } +func fakeClientScheme(t *testing.T) *runtime.Scheme { + scheme := runtime.NewScheme() + if err := fleetv1beta1.AddToScheme(scheme); err != nil { + t.Fatalf("Failed to add placement v1beta1 scheme: %v", err) + } + return scheme +} + func initializeVariables() { var err error @@ -332,3 +344,197 @@ func TestPrepareManifestProcessingBundles(t *testing.T) { t.Errorf("prepareManifestProcessingBundles() mismatches (-got +want):\n%s", diff) } } + +// TestEnsureAppliedWork tests the ensureAppliedWork method. +func TestEnsureAppliedWork(t *testing.T) { + ctx := context.Background() + + fakeUID := types.UID("foo") + testCases := []struct { + name string + work *fleetv1beta1.Work + appliedWork *fleetv1beta1.AppliedWork + wantWork *fleetv1beta1.Work + wantAppliedWork *fleetv1beta1.AppliedWork + }{ + { + name: "with work cleanup finalizer present, but no corresponding AppliedWork exists", + work: &fleetv1beta1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: workName, + Namespace: memberReservedNSName1, + Finalizers: []string{ + fleetv1beta1.WorkFinalizer, + }, + }, + }, + wantWork: &fleetv1beta1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: workName, + Namespace: memberReservedNSName1, + Finalizers: []string{ + fleetv1beta1.WorkFinalizer, + }, + }, + }, + wantAppliedWork: &fleetv1beta1.AppliedWork{ + ObjectMeta: metav1.ObjectMeta{ + Name: workName, + }, + Spec: fleetv1beta1.AppliedWorkSpec{ + WorkName: workName, + WorkNamespace: memberReservedNSName1, + }, + }, + }, + { + name: "with work cleanup finalizer present, and corresponding AppliedWork exists", + work: &fleetv1beta1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: workName, + Namespace: memberReservedNSName1, + Finalizers: []string{ + fleetv1beta1.WorkFinalizer, + }, + }, + }, + appliedWork: &fleetv1beta1.AppliedWork{ + ObjectMeta: metav1.ObjectMeta{ + Name: workName, + // Add the UID field to track if the method returns the existing object. + UID: fakeUID, + }, + Spec: fleetv1beta1.AppliedWorkSpec{ + WorkName: workName, + WorkNamespace: memberReservedNSName1, + }, + }, + wantWork: &fleetv1beta1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: workName, + Namespace: memberReservedNSName1, + Finalizers: []string{ + fleetv1beta1.WorkFinalizer, + }, + }, + }, + wantAppliedWork: &fleetv1beta1.AppliedWork{ + ObjectMeta: metav1.ObjectMeta{ + Name: workName, + UID: fakeUID, + }, + Spec: fleetv1beta1.AppliedWorkSpec{ + WorkName: workName, + WorkNamespace: memberReservedNSName1, + }, + }, + }, + { + name: "without work cleanup finalizer, but corresponding AppliedWork exists", + work: &fleetv1beta1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: workName, + Namespace: memberReservedNSName1, + }, + }, + appliedWork: &fleetv1beta1.AppliedWork{ + ObjectMeta: metav1.ObjectMeta{ + Name: workName, + // Add the UID field to track if the method returns the existing object. + UID: fakeUID, + }, + Spec: fleetv1beta1.AppliedWorkSpec{ + WorkName: workName, + WorkNamespace: memberReservedNSName1, + }, + }, + wantWork: &fleetv1beta1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: workName, + Namespace: memberReservedNSName1, + Finalizers: []string{ + fleetv1beta1.WorkFinalizer, + }, + }, + }, + wantAppliedWork: &fleetv1beta1.AppliedWork{ + ObjectMeta: metav1.ObjectMeta{ + Name: workName, + UID: fakeUID, + }, + Spec: fleetv1beta1.AppliedWorkSpec{ + WorkName: workName, + WorkNamespace: memberReservedNSName1, + }, + }, + }, + { + name: "without work cleanup finalizer, and no corresponding AppliedWork exists", + work: &fleetv1beta1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: workName, + Namespace: memberReservedNSName1, + }, + }, + wantWork: &fleetv1beta1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: workName, + Namespace: memberReservedNSName1, + Finalizers: []string{ + fleetv1beta1.WorkFinalizer, + }, + }, + }, + wantAppliedWork: &fleetv1beta1.AppliedWork{ + ObjectMeta: metav1.ObjectMeta{ + Name: workName, + }, + Spec: fleetv1beta1.AppliedWorkSpec{ + WorkName: workName, + WorkNamespace: memberReservedNSName1, + }, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + hubClientScheme := fakeClientScheme(t) + fakeHubClient := fake.NewClientBuilder(). + WithScheme(hubClientScheme). + WithObjects(tc.work). + Build() + + memberClientScheme := fakeClientScheme(t) + fakeMemberClientBuilder := fake.NewClientBuilder().WithScheme(memberClientScheme) + if tc.appliedWork != nil { + fakeMemberClientBuilder = fakeMemberClientBuilder.WithObjects(tc.appliedWork) + } + fakeMemberClient := fakeMemberClientBuilder.Build() + + r := &Reconciler{ + hubClient: fakeHubClient, + spokeClient: fakeMemberClient, + } + + gotAppliedWork, err := r.ensureAppliedWork(ctx, tc.work) + if err != nil { + t.Fatalf("ensureAppliedWork() = %v, want no error", err) + } + + // Verify the Work object. + gotWork := &fleetv1beta1.Work{} + if err := fakeHubClient.Get(ctx, types.NamespacedName{Name: tc.work.Name, Namespace: tc.work.Namespace}, gotWork); err != nil { + t.Fatalf("failed to get Work object from fake hub client: %v", err) + } + if diff := cmp.Diff(gotWork, tc.wantWork, ignoreFieldObjectMetaresourceVersion); diff != "" { + t.Errorf("Work objects diff (-got +want):\n%s", diff) + } + + // Verify the AppliedWork object. + if diff := cmp.Diff(gotAppliedWork, tc.wantAppliedWork, ignoreFieldObjectMetaresourceVersion); diff != "" { + t.Errorf("AppliedWork objects diff (-got +want):\n%s", diff) + } + }) + } +} From a2e4bf804cbd328bd4e81d34a91b4102272a3179 Mon Sep 17 00:00:00 2001 From: Britania Rodriguez Reyes <145056127+britaniar@users.noreply.github.com> Date: Tue, 3 Feb 2026 09:56:19 -0800 Subject: [PATCH 06/10] feat: resource selection struct and selection related functions (#418) --- .../placement/resource_selector.go | 2 +- pkg/resourcewatcher/change_dector.go | 2 +- pkg/utils/common.go | 73 +- pkg/utils/common_test.go | 219 -- .../controller/resource_selector_resolver.go | 617 +++++ .../resource_selector_resolver_test.go | 2221 +++++++++++++++++ 6 files changed, 2841 insertions(+), 293 deletions(-) create mode 100644 pkg/utils/controller/resource_selector_resolver.go create mode 100644 pkg/utils/controller/resource_selector_resolver_test.go diff --git a/pkg/controllers/placement/resource_selector.go b/pkg/controllers/placement/resource_selector.go index 0eb15cda1..530e4ed6d 100644 --- a/pkg/controllers/placement/resource_selector.go +++ b/pkg/controllers/placement/resource_selector.go @@ -294,7 +294,7 @@ func (r *Reconciler) shouldPropagateObj(namespace, placementName string, obj run return false, nil } - shouldInclude, err := utils.ShouldPropagateObj(r.InformerManager, uObj, r.EnableWorkload) + shouldInclude, err := controller.ShouldPropagateObj(r.InformerManager, uObj, r.EnableWorkload) if err != nil { klog.ErrorS(err, "Cannot determine if we should propagate an object", "namespace", namespace, "placement", placementName, "object", uObjKObj) return false, err diff --git a/pkg/resourcewatcher/change_dector.go b/pkg/resourcewatcher/change_dector.go index 0dcfc41e6..dd0efe74f 100644 --- a/pkg/resourcewatcher/change_dector.go +++ b/pkg/resourcewatcher/change_dector.go @@ -168,7 +168,7 @@ func (d *ChangeDetector) dynamicResourceFilter(obj interface{}) bool { } if unstructuredObj, ok := obj.(*unstructured.Unstructured); ok { - shouldPropagate, err := utils.ShouldPropagateObj(d.InformerManager, unstructuredObj.DeepCopy(), d.EnableWorkload) + shouldPropagate, err := controller.ShouldPropagateObj(d.InformerManager, unstructuredObj.DeepCopy(), d.EnableWorkload) if err != nil || !shouldPropagate { klog.V(5).InfoS("Skip watching resource in namespace", "namespace", cwKey.Namespace, "group", cwKey.Group, "version", cwKey.Version, "kind", cwKey.Kind, "object", cwKey.Name) diff --git a/pkg/utils/common.go b/pkg/utils/common.go index 03b0589df..50fdc74fb 100644 --- a/pkg/utils/common.go +++ b/pkg/utils/common.go @@ -27,7 +27,6 @@ import ( appv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" - discoveryv1 "k8s.io/api/discovery/v1" networkingv1 "k8s.io/api/networking/v1" policyv1 "k8s.io/api/policy/v1" rbacv1 "k8s.io/api/rbac/v1" @@ -35,10 +34,7 @@ import ( storagev1 "k8s.io/api/storage/v1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/api/equality" - apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/discovery" "k8s.io/client-go/util/retry" @@ -50,8 +46,6 @@ import ( placementv1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1" placementv1beta1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" "github.com/kubefleet-dev/kubefleet/pkg/utils/condition" - "github.com/kubefleet-dev/kubefleet/pkg/utils/controller" - "github.com/kubefleet-dev/kubefleet/pkg/utils/informer" ) const ( @@ -505,71 +499,6 @@ func CheckCRDInstalled(discoveryClient discovery.DiscoveryInterface, gvk schema. return err } -// ShouldPropagateObj decides if one should propagate the object. -// PVCs are only propagated when enableWorkload is false (workloads not allowed on hub). -func ShouldPropagateObj(informerManager informer.Manager, uObj *unstructured.Unstructured, enableWorkload bool) (bool, error) { - // TODO: add more special handling for different resource kind - switch uObj.GroupVersionKind() { - case appv1.SchemeGroupVersion.WithKind(ReplicaSetKind): - // Skip ReplicaSets if they are managed by Deployments (have owner references). - // Standalone ReplicaSets (without owners) can be propagated. - if len(uObj.GetOwnerReferences()) > 0 { - return false, nil - } - case appv1.SchemeGroupVersion.WithKind("ControllerRevision"): - // Skip ControllerRevisions if they are managed by DaemonSets/StatefulSets (have owner references). - // Standalone ControllerRevisions (without owners) can be propagated. - if len(uObj.GetOwnerReferences()) > 0 { - return false, nil - } - case corev1.SchemeGroupVersion.WithKind(ConfigMapKind): - // Skip the built-in custom CA certificate created in the namespace. - if uObj.GetName() == "kube-root-ca.crt" { - return false, nil - } - case corev1.SchemeGroupVersion.WithKind("ServiceAccount"): - // Skip the default service account created in the namespace. - if uObj.GetName() == "default" { - return false, nil - } - case corev1.SchemeGroupVersion.WithKind("Secret"): - // The secret, with type 'kubernetes.io/service-account-token', is created along with `ServiceAccount` should be - // prevented from propagating. - var secret corev1.Secret - if err := runtime.DefaultUnstructuredConverter.FromUnstructured(uObj.Object, &secret); err != nil { - return false, controller.NewUnexpectedBehaviorError(fmt.Errorf("failed to convert a secret object %s in namespace %s: %w", uObj.GetName(), uObj.GetNamespace(), err)) - } - if secret.Type == corev1.SecretTypeServiceAccountToken { - return false, nil - } - case corev1.SchemeGroupVersion.WithKind("PersistentVolumeClaim"): - // Skip PersistentVolumeClaims by default to avoid conflicts with the PVCs created by statefulset controller. - // This only happens if the workloads are allowed to run on the hub cluster. - if enableWorkload { - return false, nil - } - case corev1.SchemeGroupVersion.WithKind("Endpoints"): - // we assume that all endpoints with the same name of a service is created by the service controller - if _, err := informerManager.Lister(ServiceGVR).ByNamespace(uObj.GetNamespace()).Get(uObj.GetName()); err != nil { - if apierrors.IsNotFound(err) { - // there is no service of the same name as the end point, - // we assume that this endpoint is created by the user - return true, nil - } - return false, controller.NewAPIServerError(true, fmt.Errorf("failed to get the service %s in namespace %s: %w", uObj.GetName(), uObj.GetNamespace(), err)) - } - // we find a service of the same name as the endpoint, we assume it's created by the service - return false, nil - case discoveryv1.SchemeGroupVersion.WithKind("EndpointSlice"): - // all EndpointSlice created by the EndpointSlice controller has a managed by label - if _, exist := uObj.GetLabels()[discoveryv1.LabelManagedBy]; exist { - // do not propagate hub cluster generated endpoint slice - return false, nil - } - } - return true, nil -} - // IsReservedNamespace indicates if an argued namespace is reserved. func IsReservedNamespace(namespace string) bool { return strings.HasPrefix(namespace, fleetPrefix) || strings.HasPrefix(namespace, kubePrefix) @@ -774,7 +703,7 @@ var LessFuncDiffedResourcePlacementsV1 = func(a, b placementv1.DiffedResourcePla return aStr < bStr } -// LessFuncCondition is a less function for sorting conditions based on its types. +// LessFuncConditionByType is a less function for sorting conditions based on its types. var LessFuncConditionByType = func(a, b metav1.Condition) bool { return a.Type < b.Type } diff --git a/pkg/utils/common_test.go b/pkg/utils/common_test.go index d99d11711..483a06a42 100644 --- a/pkg/utils/common_test.go +++ b/pkg/utils/common_test.go @@ -6,7 +6,6 @@ import ( "github.com/google/go-cmp/cmp" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/utils/ptr" fleetv1beta1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" @@ -1190,221 +1189,3 @@ func TestIsDiffedResourcePlacementEqual(t *testing.T) { }) } } - -func TestShouldPropagateObj(t *testing.T) { - tests := []struct { - name string - obj map[string]interface{} - ownerReferences []metav1.OwnerReference - enableWorkload bool - want bool - }{ - { - name: "standalone replicaset without ownerReferences should propagate", - obj: map[string]interface{}{ - "apiVersion": "apps/v1", - "kind": "ReplicaSet", - "metadata": map[string]interface{}{ - "name": "standalone-rs", - "namespace": "default", - }, - }, - ownerReferences: nil, - enableWorkload: true, - want: true, - }, - { - name: "standalone replicaset without ownerReferences should propagate if workload is disabled", - obj: map[string]interface{}{ - "apiVersion": "apps/v1", - "kind": "ReplicaSet", - "metadata": map[string]interface{}{ - "name": "standalone-rs", - "namespace": "default", - }, - }, - ownerReferences: nil, - enableWorkload: false, - want: true, - }, - { - name: "standalone pod without ownerReferences should propagate", - obj: map[string]interface{}{ - "apiVersion": "v1", - "kind": "Pod", - "metadata": map[string]interface{}{ - "name": "standalone-pod", - "namespace": "default", - }, - }, - ownerReferences: nil, - enableWorkload: true, - want: true, - }, - { - name: "replicaset with deployment owner should NOT propagate", - obj: map[string]interface{}{ - "apiVersion": "apps/v1", - "kind": "ReplicaSet", - "metadata": map[string]interface{}{ - "name": "test-deploy-abc123", - "namespace": "default", - }, - }, - ownerReferences: []metav1.OwnerReference{ - { - APIVersion: "apps/v1", - Kind: "Deployment", - Name: "test-deploy", - UID: "12345", - }, - }, - enableWorkload: true, - want: false, - }, - { - name: "pod owned by replicaset - passes ShouldPropagateObj but filtered by resource config", - obj: map[string]interface{}{ - "apiVersion": "v1", - "kind": "Pod", - "metadata": map[string]interface{}{ - "name": "test-deploy-abc123-xyz", - "namespace": "default", - }, - }, - ownerReferences: []metav1.OwnerReference{ - { - APIVersion: "apps/v1", - Kind: "ReplicaSet", - Name: "test-deploy-abc123", - UID: "67890", - }, - }, - enableWorkload: false, - want: true, // ShouldPropagateObj doesn't filter Pods - they're filtered by NewResourceConfig - }, - { - name: "controllerrevision owned by daemonset should NOT propagate", - obj: map[string]interface{}{ - "apiVersion": "apps/v1", - "kind": "ControllerRevision", - "metadata": map[string]interface{}{ - "name": "test-ds-7b9848797f", - "namespace": "default", - }, - }, - ownerReferences: []metav1.OwnerReference{ - { - APIVersion: "apps/v1", - Kind: "DaemonSet", - Name: "test-ds", - UID: "abcdef", - }, - }, - enableWorkload: false, - want: false, - }, - { - name: "controllerrevision owned by statefulset should NOT propagate", - obj: map[string]interface{}{ - "apiVersion": "apps/v1", - "kind": "ControllerRevision", - "metadata": map[string]interface{}{ - "name": "test-ss-7878b4b446", - "namespace": "default", - }, - }, - ownerReferences: []metav1.OwnerReference{ - { - APIVersion: "apps/v1", - Kind: "StatefulSet", - Name: "test-ss", - UID: "fedcba", - }, - }, - enableWorkload: false, - want: false, - }, - { - name: "standalone controllerrevision without owner should propagate", - obj: map[string]interface{}{ - "apiVersion": "apps/v1", - "kind": "ControllerRevision", - "metadata": map[string]interface{}{ - "name": "custom-revision", - "namespace": "default", - }, - }, - ownerReferences: nil, - enableWorkload: false, - want: true, - }, - { - name: "PVC should propagate when workload is disabled", - obj: map[string]interface{}{ - "apiVersion": "v1", - "kind": "PersistentVolumeClaim", - "metadata": map[string]interface{}{ - "name": "test-pvc", - "namespace": "default", - }, - }, - ownerReferences: nil, - enableWorkload: false, - want: true, - }, - { - name: "PVC should NOT propagate when workload is enabled", - obj: map[string]interface{}{ - "apiVersion": "v1", - "kind": "PersistentVolumeClaim", - "metadata": map[string]interface{}{ - "name": "test-pvc", - "namespace": "default", - }, - }, - ownerReferences: nil, - enableWorkload: true, - want: false, - }, - { - name: "PVC with ownerReferences should NOT propagate when workload is enabled", - obj: map[string]interface{}{ - "apiVersion": "v1", - "kind": "PersistentVolumeClaim", - "metadata": map[string]interface{}{ - "name": "data-statefulset-0", - "namespace": "default", - }, - }, - ownerReferences: []metav1.OwnerReference{ - { - APIVersion: "apps/v1", - Kind: "StatefulSet", - Name: "statefulset", - UID: "sts-uid", - }, - }, - enableWorkload: true, - want: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - uObj := &unstructured.Unstructured{Object: tt.obj} - if tt.ownerReferences != nil { - uObj.SetOwnerReferences(tt.ownerReferences) - } - - got, err := ShouldPropagateObj(nil, uObj, tt.enableWorkload) - if err != nil { - t.Errorf("ShouldPropagateObj() error = %v", err) - return - } - if got != tt.want { - t.Errorf("ShouldPropagateObj() = %v, want %v", got, tt.want) - } - }) - } -} diff --git a/pkg/utils/controller/resource_selector_resolver.go b/pkg/utils/controller/resource_selector_resolver.go new file mode 100644 index 000000000..c2c37c662 --- /dev/null +++ b/pkg/utils/controller/resource_selector_resolver.go @@ -0,0 +1,617 @@ +/* +Copyright 2025 The KubeFleet Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "fmt" + "sort" + "strings" + + appv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + discoveryv1 "k8s.io/api/discovery/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/klog/v2" + "k8s.io/kubectl/pkg/util/deployment" + "sigs.k8s.io/controller-runtime/pkg/client" + + placementv1beta1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" + "github.com/kubefleet-dev/kubefleet/pkg/utils" + "github.com/kubefleet-dev/kubefleet/pkg/utils/informer" +) + +var ( + // resourceSortOrder is the order in which resources are sorted when KubeFleet + // organizes the resources in a resource snapshot. + // + // Note (chenyu1): the sort order here does not affect the order in which resources + // are applied on a selected member cluster (the work applier will handle the resources + // in batch with its own grouping logic). KubeFleet sorts resources here solely + // for consistency (deterministic processing) reasons (i.e., if the set of the + // resources remain the same, no new snapshots are generated). + // + // Important (chenyu1): changing the sort order here may induce side effects in + // existing KubeFleet deployments, as a new snapshot might be prepared and rolled out. + // Do not update the sort order unless absolutely necessary. + resourceSortOrder = map[string]int{ + "PriorityClass": 0, + "Namespace": 1, + "NetworkPolicy": 2, + "ResourceQuota": 3, + "LimitRange": 4, + "PodDisruptionBudget": 5, + "ServiceAccount": 6, + "Secret": 7, + "ConfigMap": 8, + "StorageClass": 9, + "PersistentVolume": 10, + "PersistentVolumeClaim": 11, + "CustomResourceDefinition": 12, + "ClusterRole": 13, + "ClusterRoleBinding": 14, + "Role": 15, + "RoleBinding": 16, + "Service": 17, + "DaemonSet": 18, + "Pod": 19, + "ReplicationController": 20, + "ReplicaSet": 21, + "Deployment": 22, + "HorizontalPodAutoscaler": 23, + "StatefulSet": 24, + "Job": 25, + "CronJob": 26, + "IngressClass": 27, + "Ingress": 28, + "APIService": 29, + "MutatingWebhookConfiguration": 30, + "ValidatingWebhookConfiguration": 31, + } +) + +type ResourceSelectorResolver struct { + // SkippedNamespaces contains the namespaces that should be skipped when selecting resources. + SkippedNamespaces map[string]bool + + // ResourceConfig contains the resource configuration. + ResourceConfig *utils.ResourceConfig + + // InformerManager is the informer manager. + InformerManager informer.Manager + + // RestMapper is the rest mapper used to convert between gvk and gvr on known resources. + RestMapper meta.RESTMapper + + // EnableWorkload indicates whether workload resources are allowed to be selected. + EnableWorkload bool +} + +// SelectResourcesForPlacement selects the resources according to the placement resourceSelectors. +// It also generates an array of resource content and resource identifier based on the selected resources. +// It also returns the number of envelope configmaps so the CRP controller can have the right expectation of the number of work objects. +func (rs *ResourceSelectorResolver) SelectResourcesForPlacement(placementObj placementv1beta1.PlacementObj) (int, []placementv1beta1.ResourceContent, []placementv1beta1.ResourceIdentifier, error) { + envelopeObjCount := 0 + selectedObjects, err := rs.gatherSelectedResource(types.NamespacedName{ + Name: placementObj.GetName(), + Namespace: placementObj.GetNamespace(), + }, placementObj.GetPlacementSpec().ResourceSelectors) + if err != nil { + return 0, nil, nil, err + } + + resources := make([]placementv1beta1.ResourceContent, len(selectedObjects)) + resourcesIDs := make([]placementv1beta1.ResourceIdentifier, len(selectedObjects)) + for i, unstructuredObj := range selectedObjects { + rc, err := generateResourceContent(unstructuredObj) + if err != nil { + return 0, nil, nil, err + } + uGVK := unstructuredObj.GetObjectKind().GroupVersionKind().GroupKind() + switch uGVK { + case utils.ClusterResourceEnvelopeGK: + envelopeObjCount++ + case utils.ResourceEnvelopeGK: + envelopeObjCount++ + } + resources[i] = *rc + ri := placementv1beta1.ResourceIdentifier{ + Group: unstructuredObj.GroupVersionKind().Group, + Version: unstructuredObj.GroupVersionKind().Version, + Kind: unstructuredObj.GroupVersionKind().Kind, + Name: unstructuredObj.GetName(), + Namespace: unstructuredObj.GetNamespace(), + } + resourcesIDs[i] = ri + } + return envelopeObjCount, resources, resourcesIDs, nil +} + +// generateResourceContent creates a resource content from the unstructured obj. +func generateResourceContent(object *unstructured.Unstructured) (*placementv1beta1.ResourceContent, error) { + rawContent, err := generateRawContent(object) + if err != nil { + return nil, NewUnexpectedBehaviorError(err) + } + return &placementv1beta1.ResourceContent{ + RawExtension: runtime.RawExtension{Raw: rawContent}, + }, nil +} + +// generateRawContent strips all the unnecessary fields to prepare the objects for dispatch. +func generateRawContent(object *unstructured.Unstructured) ([]byte, error) { + // Make a deep copy of the object as we are modifying it. + object = object.DeepCopy() + // we keep the annotation/label/finalizer/owner references/delete grace period + object.SetResourceVersion("") + object.SetGeneration(0) + object.SetUID("") + object.SetSelfLink("") + object.SetDeletionTimestamp(nil) + object.SetManagedFields(nil) + + annots := object.GetAnnotations() + if annots != nil { + // Remove kubectl last applied annotation if exist + delete(annots, corev1.LastAppliedConfigAnnotation) + // Remove the revision annotation set by deployment + delete(annots, deployment.RevisionAnnotation) + if len(annots) == 0 { + object.SetAnnotations(nil) + } else { + object.SetAnnotations(annots) + } + } + // Remove all the owner references as the UID in the owner reference can't be transferred to + // the member clusters + // TODO: Establish a way to keep the ownership relation through work-api + object.SetOwnerReferences(nil) + unstructured.RemoveNestedField(object.Object, "metadata", "creationTimestamp") + unstructured.RemoveNestedField(object.Object, "status") + + // TODO: see if there are other cases that we may have some extra fields + if object.GetKind() == "Service" && object.GetAPIVersion() == "v1" { + if clusterIP, exist, _ := unstructured.NestedString(object.Object, "spec", "clusterIP"); exist && clusterIP != corev1.ClusterIPNone { + unstructured.RemoveNestedField(object.Object, "spec", "clusterIP") + unstructured.RemoveNestedField(object.Object, "spec", "clusterIPs") + } + // We should remove all node ports that are assigned by hubcluster if any. + unstructured.RemoveNestedField(object.Object, "spec", "healthCheckNodePort") + + vals, found, err := unstructured.NestedFieldNoCopy(object.Object, "spec", "ports") + if found && err == nil { + if ports, ok := vals.([]interface{}); ok { + for i := range ports { + if each, ok := ports[i].(map[string]interface{}); ok { + delete(each, "nodePort") + } + } + } + } + if err != nil { + return nil, fmt.Errorf("failed to get the ports field in Service object, name =%s: %w", object.GetName(), err) + } + } else if object.GetKind() == "Job" && object.GetAPIVersion() == batchv1.SchemeGroupVersion.String() { + if manualSelector, exist, _ := unstructured.NestedBool(object.Object, "spec", "manualSelector"); !exist || !manualSelector { + // remove the selector field and labels added by the api-server if the job is not created with manual selector + // whose value conflict with the ones created by the member cluster api server + // https://github.com/kubernetes/kubernetes/blob/d4fde1e92a83cb533ae63b3abe9d49f08efb7a2f/pkg/registry/batch/job/strategy.go#L219 + // k8s used to add an old label called "controller-uid" but use a new label called "batch.kubernetes.io/controller-uid" after 1.26 + unstructured.RemoveNestedField(object.Object, "spec", "selector", "matchLabels", "controller-uid") + unstructured.RemoveNestedField(object.Object, "spec", "selector", "matchLabels", "batch.kubernetes.io/controller-uid") + unstructured.RemoveNestedField(object.Object, "spec", "template", "metadata", "creationTimestamp") + unstructured.RemoveNestedField(object.Object, "spec", "template", "metadata", "labels", "controller-uid") + unstructured.RemoveNestedField(object.Object, "spec", "template", "metadata", "labels", "batch.kubernetes.io/controller-uid") + } + } + + rawContent, err := object.MarshalJSON() + if err != nil { + return nil, fmt.Errorf("failed to marshal the unstructured object gvk = %s, name =%s: %w", object.GroupVersionKind(), object.GetName(), err) + } + return rawContent, nil +} + +// gatherSelectedResource gets all the resources according to the resource selector. +func (rs *ResourceSelectorResolver) gatherSelectedResource(placementKey types.NamespacedName, selectors []placementv1beta1.ResourceSelectorTerm) ([]*unstructured.Unstructured, error) { + var resources []*unstructured.Unstructured + var resourceMap = make(map[placementv1beta1.ResourceIdentifier]bool) + for _, selector := range selectors { + gvk := schema.GroupVersionKind{ + Group: selector.Group, + Version: selector.Version, + Kind: selector.Kind, + } + + if rs.ResourceConfig.IsResourceDisabled(gvk) { + klog.V(2).InfoS("Skip select resource", "group version kind", gvk.String()) + continue + } + var objs []runtime.Object + var err error + if gvk == utils.NamespaceGVK && placementKey.Namespace == "" && selector.SelectionScope != placementv1beta1.NamespaceOnly { + objs, err = rs.fetchNamespaceResources(selector, placementKey.Name) + } else { + objs, err = rs.fetchResources(selector, placementKey) + } + if err != nil { + return nil, err + } + for _, obj := range objs { + uObj := obj.(*unstructured.Unstructured) + ri := placementv1beta1.ResourceIdentifier{ + Group: obj.GetObjectKind().GroupVersionKind().Group, + Version: obj.GetObjectKind().GroupVersionKind().Version, + Kind: obj.GetObjectKind().GroupVersionKind().Kind, + Name: uObj.GetName(), + Namespace: uObj.GetNamespace(), + } + if _, exist := resourceMap[ri]; exist { + err = fmt.Errorf("found duplicate resource %+v", ri) + klog.ErrorS(err, "User selected one resource more than once", "resource", ri, "placement", placementKey) + return nil, NewUserError(err) + } + resourceMap[ri] = true + resources = append(resources, uObj) + } + } + // sort the resources in strict order so that we will get the stable list of manifest so that + // the generated work object doesn't change between reconcile loops. + sortResources(resources) + + return resources, nil +} + +func sortResources(resources []*unstructured.Unstructured) { + sort.Slice(resources, func(i, j int) bool { + obj1 := resources[i] + obj2 := resources[j] + k1 := obj1.GetObjectKind().GroupVersionKind().Kind + k2 := obj2.GetObjectKind().GroupVersionKind().Kind + + first, aok := resourceSortOrder[k1] + second, bok := resourceSortOrder[k2] + switch { + // if both kinds are unknown. + case !aok && !bok: + return lessByGVK(obj1, obj2, false) + // unknown kind should be last. + case !aok: + return false + case !bok: + return true + // same kind. + case first == second: + return lessByGVK(obj1, obj2, true) + } + // different known kinds, sort based on order index. + return first < second + }) +} + +func lessByGVK(obj1, obj2 *unstructured.Unstructured, ignoreKind bool) bool { + var gvk1, gvk2 string + if ignoreKind { + gvk1 = obj1.GetObjectKind().GroupVersionKind().GroupVersion().String() + gvk2 = obj2.GetObjectKind().GroupVersionKind().GroupVersion().String() + } else { + gvk1 = obj1.GetObjectKind().GroupVersionKind().String() + gvk2 = obj2.GetObjectKind().GroupVersionKind().String() + } + comp := strings.Compare(gvk1, gvk2) + if comp == 0 { + return strings.Compare(fmt.Sprintf("%s/%s", obj1.GetNamespace(), obj1.GetName()), + fmt.Sprintf("%s/%s", obj2.GetNamespace(), obj2.GetName())) < 0 + } + return comp < 0 +} + +// fetchNamespaceResources retrieves all the objects for a ResourceSelectorTerm that is for namespace. +func (rs *ResourceSelectorResolver) fetchNamespaceResources(selector placementv1beta1.ResourceSelectorTerm, placementName string) ([]runtime.Object, error) { + klog.V(2).InfoS("start to fetch the namespace resources by the selector", "selector", selector) + var resources []runtime.Object + + if len(selector.Name) != 0 { + // just a single namespace + objs, err := rs.fetchAllResourcesInOneNamespace(selector.Name, placementName) + if err != nil { + klog.ErrorS(err, "failed to fetch all the selected resource in a namespace", "namespace", selector.Name) + return nil, err + } + return objs, err + } + + // go through each namespace + var labelSelector labels.Selector + var err error + if selector.LabelSelector == nil { + labelSelector = labels.Everything() + } else { + labelSelector, err = metav1.LabelSelectorAsSelector(selector.LabelSelector) + if err != nil { + return nil, NewUnexpectedBehaviorError(fmt.Errorf("cannot convert the label selector to a selector: %w", err)) + } + } + namespaces, err := rs.InformerManager.Lister(utils.NamespaceGVR).List(labelSelector) + if err != nil { + klog.ErrorS(err, "Cannot list all the namespaces by the label selector", "labelSelector", labelSelector, "placement", placementName) + return nil, NewAPIServerError(true, err) + } + + for _, namespace := range namespaces { + ns, err := meta.Accessor(namespace) + if err != nil { + return nil, NewUnexpectedBehaviorError(fmt.Errorf("cannot get the name of a namespace object: %w", err)) + } + objs, err := rs.fetchAllResourcesInOneNamespace(ns.GetName(), placementName) + if err != nil { + klog.ErrorS(err, "failed to fetch all the selected resource in a namespace", "namespace", ns.GetName()) + return nil, err + } + resources = append(resources, objs...) + } + return resources, nil +} + +// fetchAllResourcesInOneNamespace retrieves all the objects inside a single namespace which includes the namespace itself. +func (rs *ResourceSelectorResolver) fetchAllResourcesInOneNamespace(namespaceName string, placeName string) ([]runtime.Object, error) { + var resources []runtime.Object + + if !utils.ShouldPropagateNamespace(namespaceName, rs.SkippedNamespaces) { + err := fmt.Errorf("invalid clusterRresourcePlacement %s: namespace %s is not allowed to propagate", placeName, namespaceName) + return nil, NewUserError(err) + } + + klog.V(2).InfoS("start to fetch all the resources inside a namespace", "namespace", namespaceName) + // select the namespace object itself + obj, err := rs.InformerManager.Lister(utils.NamespaceGVR).Get(namespaceName) + if err != nil { + klog.ErrorS(err, "cannot get the namespace", "namespace", namespaceName) + return nil, NewAPIServerError(true, client.IgnoreNotFound(err)) + } + nameSpaceObj := obj.DeepCopyObject().(*unstructured.Unstructured) + if nameSpaceObj.GetDeletionTimestamp() != nil { + // skip a to be deleted namespace + klog.V(2).InfoS("skip the deleting namespace resources by the selector", + "placeName", placeName, "namespace", namespaceName) + return resources, nil + } + resources = append(resources, obj) + + trackedResource := rs.InformerManager.GetNameSpaceScopedResources() + for _, gvr := range trackedResource { + if !utils.ShouldProcessResource(gvr, rs.RestMapper, rs.ResourceConfig) { + continue + } + if !rs.InformerManager.IsInformerSynced(gvr) { + return nil, NewExpectedBehaviorError(fmt.Errorf("informer cache for %+v is not synced yet", gvr)) + } + lister := rs.InformerManager.Lister(gvr) + objs, err := lister.ByNamespace(namespaceName).List(labels.Everything()) + if err != nil { + klog.ErrorS(err, "Cannot list all the objects in namespace", "gvr", gvr, "namespace", namespaceName) + return nil, NewAPIServerError(true, err) + } + for _, obj := range objs { + shouldInclude, err := rs.ShouldPropagateObj(namespaceName, placeName, obj) + if err != nil { + return nil, err + } + if shouldInclude { + resources = append(resources, obj) + } + } + } + + return resources, nil +} + +// fetchResources retrieves the objects based on the selector. +func (rs *ResourceSelectorResolver) fetchResources(selector placementv1beta1.ResourceSelectorTerm, placementKey types.NamespacedName) ([]runtime.Object, error) { + klog.V(2).InfoS("Start to fetch resources by the selector", "selector", selector, "placement", placementKey) + gk := schema.GroupKind{ + Group: selector.Group, + Kind: selector.Kind, + } + restMapping, err := rs.RestMapper.RESTMapping(gk, selector.Version) + if err != nil { + return nil, NewUserError(fmt.Errorf("invalid placement %s, failed to get GVR of the selector: %w", placementKey, err)) + } + gvr := restMapping.Resource + gvk := schema.GroupVersionKind{ + Group: selector.Group, + Version: selector.Version, + Kind: selector.Kind, + } + + isNamespacedResource := !rs.InformerManager.IsClusterScopedResources(gvk) + if isNamespacedResource && placementKey.Namespace == "" { + // If it's a namespace-scoped resource but placement has no namespace, return error. + err := fmt.Errorf("invalid placement %s: cannot select namespace-scoped resource %v in a clusterResourcePlacement", placementKey, gvr) + klog.ErrorS(err, "Invalid resource selector", "selector", selector) + return nil, NewUserError(err) + } else if !isNamespacedResource && placementKey.Namespace != "" { + // If it's a cluster-scoped resource but placement has a namespace, return error. + err := fmt.Errorf("invalid placement %s: cannot select cluster-scoped resource %v in a resourcePlacement", placementKey, gvr) + klog.ErrorS(err, "Invalid resource selector", "selector", selector) + return nil, NewUserError(err) + } + + if !rs.InformerManager.IsInformerSynced(gvr) { + err := fmt.Errorf("informer cache for %+v is not synced yet", restMapping.Resource) + klog.ErrorS(err, "Informer cache is not synced", "gvr", gvr, "placement", placementKey) + return nil, NewExpectedBehaviorError(err) + } + + lister := rs.InformerManager.Lister(gvr) + + // TODO: validator should enforce the mutual exclusiveness between the `name` and `labelSelector` fields + if len(selector.Name) != 0 { + var obj runtime.Object + var err error + + if isNamespacedResource { + obj, err = lister.ByNamespace(placementKey.Namespace).Get(selector.Name) + } else { + obj, err = lister.Get(selector.Name) + } + + if err != nil { + klog.ErrorS(err, "Cannot get the resource", "gvr", gvr, "name", selector.Name, "namespace", placementKey.Namespace) + return nil, NewAPIServerError(true, client.IgnoreNotFound(err)) + } + + shouldInclude, err := rs.ShouldPropagateObj(placementKey.Namespace, placementKey.Name, obj) + if err != nil { + return nil, err + } + if shouldInclude { + return []runtime.Object{obj}, nil + } + return []runtime.Object{}, nil + } + + var labelSelector labels.Selector + if selector.LabelSelector == nil { + labelSelector = labels.Everything() + } else { + // TODO: validator should enforce the validity of the labelSelector + labelSelector, err = metav1.LabelSelectorAsSelector(selector.LabelSelector) + if err != nil { + return nil, NewUnexpectedBehaviorError(fmt.Errorf("cannot convert the label selector to a selector: %w", err)) + } + } + + var selectedObjs []runtime.Object + var objects []runtime.Object + + if isNamespacedResource { + objects, err = lister.ByNamespace(placementKey.Namespace).List(labelSelector) + } else { + objects, err = lister.List(labelSelector) + } + if err != nil { + klog.ErrorS(err, "Cannot list all the objects", "gvr", gvr, "labelSelector", labelSelector, "placement", placementKey) + return nil, NewAPIServerError(true, err) + } + + // go ahead and claim all objects by adding a finalizer and insert the placement in its annotation + for i := 0; i < len(objects); i++ { + shouldInclude, err := rs.ShouldPropagateObj(placementKey.Namespace, placementKey.Name, objects[i]) + if err != nil { + return nil, err + } + if shouldInclude { + selectedObjs = append(selectedObjs, objects[i]) + } + } + + return selectedObjs, nil +} + +func (rs *ResourceSelectorResolver) ShouldPropagateObj(namespace, placementName string, obj runtime.Object) (bool, error) { + uObj := obj.DeepCopyObject().(*unstructured.Unstructured) + uObjKObj := klog.KObj(uObj) + if uObj.GetDeletionTimestamp() != nil { + // skip a to be deleted resource + klog.V(2).InfoS("Skip the deleting resource by the selector", "namespace", namespace, "placement", placementName, "object", uObjKObj) + return false, nil + } + + shouldInclude, err := ShouldPropagateObj(rs.InformerManager, uObj, rs.EnableWorkload) + if err != nil { + klog.ErrorS(err, "Cannot determine if we should propagate an object", "namespace", namespace, "placement", placementName, "object", uObjKObj) + return false, err + } + if !shouldInclude { + klog.V(2).InfoS("Skip the resource by the selector which is forbidden", "namespace", namespace, "placement", placementName, "object", uObjKObj) + return false, nil + } + return true, nil +} + +// ShouldPropagateObj decides if one should propagate the object. +// PVCs are only propagated when enableWorkload is false (workloads not allowed on hub). +func ShouldPropagateObj(informerManager informer.Manager, uObj *unstructured.Unstructured, enableWorkload bool) (bool, error) { + // TODO: add more special handling for different resource kind + switch uObj.GroupVersionKind() { + case appv1.SchemeGroupVersion.WithKind(utils.ReplicaSetKind): + // Skip ReplicaSets if they are managed by Deployments (have owner references). + // Standalone ReplicaSets (without owners) can be propagated. + if len(uObj.GetOwnerReferences()) > 0 { + return false, nil + } + case appv1.SchemeGroupVersion.WithKind("ControllerRevision"): + // Skip ControllerRevisions if they are managed by DaemonSets/StatefulSets (have owner references). + // Standalone ControllerRevisions (without owners) can be propagated. + if len(uObj.GetOwnerReferences()) > 0 { + return false, nil + } + case corev1.SchemeGroupVersion.WithKind(utils.ConfigMapKind): + // Skip the built-in custom CA certificate created in the namespace. + if uObj.GetName() == "kube-root-ca.crt" { + return false, nil + } + case corev1.SchemeGroupVersion.WithKind("ServiceAccount"): + // Skip the default service account created in the namespace. + if uObj.GetName() == "default" { + return false, nil + } + case corev1.SchemeGroupVersion.WithKind("Secret"): + // The secret, with type 'kubernetes.io/service-account-token', is created along with `ServiceAccount` should be + // prevented from propagating. + var secret corev1.Secret + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(uObj.Object, &secret); err != nil { + return false, NewUnexpectedBehaviorError(fmt.Errorf("failed to convert a secret object %s in namespace %s: %w", uObj.GetName(), uObj.GetNamespace(), err)) + } + if secret.Type == corev1.SecretTypeServiceAccountToken { + return false, nil + } + case corev1.SchemeGroupVersion.WithKind("PersistentVolumeClaim"): + // Skip PersistentVolumeClaims by default to avoid conflicts with the PVCs created by statefulset + // This only happens if the workloads are allowed to run on the hub cluster. + if enableWorkload { + return false, nil + } + case corev1.SchemeGroupVersion.WithKind("Endpoints"): + // we assume that all endpoints with the same name of a service is created by the service controller + if _, err := informerManager.Lister(utils.ServiceGVR).ByNamespace(uObj.GetNamespace()).Get(uObj.GetName()); err != nil { + if apierrors.IsNotFound(err) { + // there is no service of the same name as the end point, + // we assume that this endpoint is created by the user + return true, nil + } + return false, NewAPIServerError(true, fmt.Errorf("failed to get the service %s in namespace %s: %w", uObj.GetName(), uObj.GetNamespace(), err)) + } + // we find a service of the same name as the endpoint, we assume it's created by the service + return false, nil + case discoveryv1.SchemeGroupVersion.WithKind("EndpointSlice"): + // all EndpointSlice created by the EndpointSlice controller has a managed by label + if _, exist := uObj.GetLabels()[discoveryv1.LabelManagedBy]; exist { + // do not propagate hub cluster generated endpoint slice + return false, nil + } + } + return true, nil +} diff --git a/pkg/utils/controller/resource_selector_resolver_test.go b/pkg/utils/controller/resource_selector_resolver_test.go new file mode 100644 index 000000000..d0069412d --- /dev/null +++ b/pkg/utils/controller/resource_selector_resolver_test.go @@ -0,0 +1,2221 @@ +/* +Copyright 2025 The KubeFleet Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "errors" + "math/rand" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + corev1 "k8s.io/api/core/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + utilrand "k8s.io/apimachinery/pkg/util/rand" + "k8s.io/kubectl/pkg/util/deployment" + "k8s.io/utils/ptr" + + fleetv1beta1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" + "github.com/kubefleet-dev/kubefleet/pkg/utils" + testinformer "github.com/kubefleet-dev/kubefleet/test/utils/informer" +) + +func makeIPFamilyPolicyTypePointer(policyType corev1.IPFamilyPolicyType) *corev1.IPFamilyPolicyType { + return &policyType +} +func makeServiceInternalTrafficPolicyPointer(policyType corev1.ServiceInternalTrafficPolicyType) *corev1.ServiceInternalTrafficPolicyType { + return &policyType +} + +func TestGenerateResourceContent(t *testing.T) { + tests := map[string]struct { + resource interface{} + wantResource interface{} + }{ + "should generate sanitized resource content for Kind: CustomResourceDefinition": { + resource: apiextensionsv1.CustomResourceDefinition{ + TypeMeta: metav1.TypeMeta{ + Kind: "CustomResourceDefinition", + APIVersion: "apiextensions.k8s.io/v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "object-name", + GenerateName: "object-generateName", + Namespace: "object-namespace", + SelfLink: "object-selflink", + UID: types.UID(utilrand.String(10)), + ResourceVersion: utilrand.String(10), + Generation: int64(utilrand.Int()), + CreationTimestamp: metav1.Time{Time: time.Date(utilrand.IntnRange(0, 999), time.January, 1, 1, 1, 1, 1, time.UTC)}, + DeletionTimestamp: &metav1.Time{Time: time.Date(utilrand.IntnRange(1000, 1999), time.January, 1, 1, 1, 1, 1, time.UTC)}, + DeletionGracePeriodSeconds: ptr.To(int64(9999)), + Labels: map[string]string{ + "label-key": "label-value", + }, + Annotations: map[string]string{ + corev1.LastAppliedConfigAnnotation: "svc-object-annotation-lac-value", + deployment.RevisionAnnotation: "svc-object-revision-annotation-value", + "svc-annotation-key": "svc-object-annotation-key-value", + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "svc-ownerRef-api/v1", + Kind: "svc-owner-kind", + Name: "svc-owner-name", + UID: "svc-owner-uid", + }, + }, + Finalizers: []string{"object-finalizer"}, + ManagedFields: []metav1.ManagedFieldsEntry{ + { + Manager: utilrand.String(10), + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: utilrand.String(10), + }, + }, + }, + }, + wantResource: apiextensionsv1.CustomResourceDefinition{ + TypeMeta: metav1.TypeMeta{ + Kind: "CustomResourceDefinition", + APIVersion: "apiextensions.k8s.io/v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "object-name", + GenerateName: "object-generateName", + Namespace: "object-namespace", + DeletionGracePeriodSeconds: ptr.To(int64(9999)), + Labels: map[string]string{ + "label-key": "label-value", + }, + Annotations: map[string]string{ + "svc-annotation-key": "svc-object-annotation-key-value", + }, + Finalizers: []string{"object-finalizer"}, + }, + }, + }, + "should generate sanitized resource content for Kind: Service": { + resource: corev1.Service{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Service", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "svc-name", + Namespace: "svc-namespace", + SelfLink: utilrand.String(10), + DeletionTimestamp: &metav1.Time{Time: time.Date(00002, time.January, 1, 1, 1, 1, 1, time.UTC)}, + ManagedFields: []metav1.ManagedFieldsEntry{ + { + Manager: "svc-manager", + Operation: metav1.ManagedFieldsOperationApply, + APIVersion: "svc-manager-api/v1", + }, + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "svc-ownerRef-api/v1", + Kind: "svc-owner-kind", + Name: "svc-owner-name", + UID: "svc-owner-uid", + }, + }, + Annotations: map[string]string{ + corev1.LastAppliedConfigAnnotation: "svc-object-annotation-lac-value", + "svc-annotation-key": "svc-object-annotation-key-value", + }, + ResourceVersion: "svc-object-resourceVersion", + Generation: int64(utilrand.Int()), + CreationTimestamp: metav1.Time{Time: time.Date(00001, time.January, 1, 1, 1, 1, 1, time.UTC)}, + UID: types.UID(utilrand.String(10)), + }, + Spec: corev1.ServiceSpec{ + ClusterIP: utilrand.String(10), + ClusterIPs: []string{}, + HealthCheckNodePort: rand.Int31(), + Selector: map[string]string{"svc-spec-selector-key": "svc-spec-selector-value"}, + Ports: []corev1.ServicePort{ + { + Name: "svc-port", + Protocol: corev1.ProtocolTCP, + AppProtocol: ptr.To("svc.com/my-custom-protocol"), + Port: 9001, + NodePort: rand.Int31(), + }, + }, + Type: corev1.ServiceType("svc-spec-type"), + ExternalIPs: []string{"svc-spec-externalIps-1"}, + SessionAffinity: corev1.ServiceAffinity("svc-spec-sessionAffinity"), + LoadBalancerIP: "192.168.1.3", + LoadBalancerSourceRanges: []string{"192.168.1.1"}, + ExternalName: "svc-spec-externalName", + ExternalTrafficPolicy: corev1.ServiceExternalTrafficPolicyType("svc-spec-externalTrafficPolicy"), + PublishNotReadyAddresses: false, + SessionAffinityConfig: &corev1.SessionAffinityConfig{ClientIP: &corev1.ClientIPConfig{TimeoutSeconds: ptr.To(int32(60))}}, + IPFamilies: []corev1.IPFamily{ + corev1.IPv4Protocol, + corev1.IPv6Protocol, + }, + IPFamilyPolicy: makeIPFamilyPolicyTypePointer(corev1.IPFamilyPolicySingleStack), + AllocateLoadBalancerNodePorts: ptr.To(false), + LoadBalancerClass: ptr.To("svc-spec-loadBalancerClass"), + InternalTrafficPolicy: makeServiceInternalTrafficPolicyPointer(corev1.ServiceInternalTrafficPolicyCluster), + }, + Status: corev1.ServiceStatus{ + LoadBalancer: corev1.LoadBalancerStatus{ + Ingress: []corev1.LoadBalancerIngress{ + { + IP: "192.168.1.1", + Hostname: "loadbalancer-ingress-hostname", + Ports: []corev1.PortStatus{ + { + Port: 9003, + Protocol: corev1.ProtocolTCP, + }, + }, + }, + }, + }, + }, + }, + wantResource: corev1.Service{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Service", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "svc-name", + Namespace: "svc-namespace", + Annotations: map[string]string{ + "svc-annotation-key": "svc-object-annotation-key-value", + }, + }, + Spec: corev1.ServiceSpec{ + Selector: map[string]string{"svc-spec-selector-key": "svc-spec-selector-value"}, + Ports: []corev1.ServicePort{ + { + Name: "svc-port", + Protocol: corev1.ProtocolTCP, + AppProtocol: ptr.To("svc.com/my-custom-protocol"), + Port: 9001, + }, + }, + Type: corev1.ServiceType("svc-spec-type"), + ExternalIPs: []string{"svc-spec-externalIps-1"}, + SessionAffinity: corev1.ServiceAffinity("svc-spec-sessionAffinity"), + LoadBalancerIP: "192.168.1.3", + LoadBalancerSourceRanges: []string{"192.168.1.1"}, + ExternalName: "svc-spec-externalName", + ExternalTrafficPolicy: corev1.ServiceExternalTrafficPolicyType("svc-spec-externalTrafficPolicy"), + PublishNotReadyAddresses: false, + SessionAffinityConfig: &corev1.SessionAffinityConfig{ClientIP: &corev1.ClientIPConfig{TimeoutSeconds: ptr.To(int32(60))}}, + IPFamilies: []corev1.IPFamily{ + corev1.IPv4Protocol, + corev1.IPv6Protocol, + }, + IPFamilyPolicy: makeIPFamilyPolicyTypePointer(corev1.IPFamilyPolicySingleStack), + AllocateLoadBalancerNodePorts: ptr.To(false), + LoadBalancerClass: ptr.To("svc-spec-loadBalancerClass"), + InternalTrafficPolicy: makeServiceInternalTrafficPolicyPointer(corev1.ServiceInternalTrafficPolicyCluster), + }, + }, + }, + } + + for testName, tt := range tests { + t.Run(testName, func(t *testing.T) { + object, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&tt.resource) + if err != nil { + t.Fatalf("ToUnstructured failed: %v", err) + } + got, err := generateResourceContent(&unstructured.Unstructured{Object: object}) + if err != nil { + t.Fatalf("failed to generateResourceContent(): %v", err) + } + wantResourceContent := createResourceContentForTest(t, &tt.wantResource) + if diff := cmp.Diff(wantResourceContent, got); diff != "" { + t.Errorf("generateResourceContent() mismatch (-want, +got):\n%s", diff) + } + }) + } +} + +func createResourceContentForTest(t *testing.T, obj interface{}) *fleetv1beta1.ResourceContent { + want, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&obj) + if err != nil { + t.Fatalf("ToUnstructured failed: %v", err) + } + delete(want["metadata"].(map[string]interface{}), "creationTimestamp") + delete(want, "status") + + uWant := unstructured.Unstructured{Object: want} + rawWant, err := uWant.MarshalJSON() + if err != nil { + t.Fatalf("MarshalJSON failed: %v", err) + } + return &fleetv1beta1.ResourceContent{ + RawExtension: runtime.RawExtension{ + Raw: rawWant, + }, + } +} + +func TestGatherSelectedResource(t *testing.T) { + // Common test deployment object used across multiple test cases. + testDeployment := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": map[string]interface{}{ + "name": "test-deployment", + "namespace": "test-ns", + }, + }, + } + testDeployment.SetGroupVersionKind(utils.DeploymentGVK) + + // Common test configmap object used across multiple test cases. + testConfigMap := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "v1", + "kind": "ConfigMap", + "metadata": map[string]interface{}{ + "name": "test-configmap", + "namespace": "test-ns", + }, + }, + } + testConfigMap.SetGroupVersionKind(utils.ConfigMapGVK) + + // Common test endpoints object used across multiple test cases. + testEndpoints := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "v1", + "kind": "Endpoints", + "metadata": map[string]interface{}{ + "name": "test-endpoints", + "namespace": "test-ns", + }, + }, + } + testEndpoints.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "Endpoints", + }) + + kubeRootCAConfigMap := &unstructured.Unstructured{ // reserved configmap object + Object: map[string]interface{}{ + "apiVersion": "v1", + "kind": "ConfigMap", + "metadata": map[string]interface{}{ + "name": "kube-root-ca.crt", + "namespace": "test-ns", + }, + }, + } + kubeRootCAConfigMap.SetGroupVersionKind(utils.ConfigMapGVK) + + // Common test deployment object in deleting state. + testDeletingDeployment := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": map[string]interface{}{ + "name": "test-deleting-deployment", + "namespace": "test-ns", + "deletionTimestamp": "2025-01-01T00:00:00Z", + "labels": map[string]interface{}{ + "tier": "api", + "app": "frontend", + }, + }, + }, + } + testDeletingDeployment.SetGroupVersionKind(utils.DeploymentGVK) + + // Common test deployment with app=frontend label. + testFrontendDeployment := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": map[string]interface{}{ + "name": "frontend-deployment", + "namespace": "test-ns", + "labels": map[string]interface{}{ + "app": "frontend", + "tier": "web", + }, + }, + }, + } + testFrontendDeployment.SetGroupVersionKind(utils.DeploymentGVK) + + // Common test deployment with app=backend label. + testBackendDeployment := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": map[string]interface{}{ + "name": "backend-deployment", + "namespace": "test-ns", + "labels": map[string]interface{}{ + "app": "backend", + "tier": "api", + }, + }, + }, + } + testBackendDeployment.SetGroupVersionKind(utils.DeploymentGVK) + + // Common test namespace object (cluster-scoped). + testNamespace := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "v1", + "kind": "Namespace", + "metadata": map[string]interface{}{ + "name": "test-ns", + "labels": map[string]interface{}{ + "environment": "test", + }, + }, + }, + } + testNamespace.SetGroupVersionKind(utils.NamespaceGVK) + + testDeletingNamespace := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "v1", + "kind": "Namespace", + "metadata": map[string]interface{}{ + "name": "deleting-ns", + "labels": map[string]interface{}{ + "environment": "test", + }, + "deletionTimestamp": "2025-01-01T00:00:00Z", + }, + }, + } + testDeletingNamespace.SetGroupVersionKind(utils.NamespaceGVK) + + prodNamespace := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "v1", + "kind": "Namespace", + "metadata": map[string]interface{}{ + "name": "prod-ns", + "labels": map[string]interface{}{ + "environment": "production", + }, + }, + }, + } + prodNamespace.SetGroupVersionKind(utils.NamespaceGVK) + + // Common test cluster role object (cluster-scoped). + testClusterRole := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "rbac.authorization.k8s.io/v1", + "kind": "ClusterRole", + "metadata": map[string]interface{}{ + "name": "test-cluster-role", + }, + }, + } + testClusterRole.SetGroupVersionKind(utils.ClusterRoleGVK) + + // Common test cluster role object #2 (cluster-scoped). + testClusterRole2 := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "rbac.authorization.k8s.io/v1", + "kind": "ClusterRole", + "metadata": map[string]interface{}{ + "name": "test-cluster-role-2", + }, + }, + } + testClusterRole2.SetGroupVersionKind(utils.ClusterRoleGVK) + + kubeSystemNamespace := &unstructured.Unstructured{ // reserved namespace object + Object: map[string]interface{}{ + "apiVersion": "v1", + "kind": "Namespace", + "metadata": map[string]interface{}{ + "name": "kube-system", + "labels": map[string]interface{}{ + "environment": "test", + }, + }, + }, + } + kubeSystemNamespace.SetGroupVersionKind(utils.NamespaceGVK) + + tests := []struct { + name string + placementName types.NamespacedName + selectors []fleetv1beta1.ResourceSelectorTerm + resourceConfig *utils.ResourceConfig + informerManager *testinformer.FakeManager + want []*unstructured.Unstructured + wantError error + }{ + { + name: "should handle empty selectors", + placementName: types.NamespacedName{Name: "test-placement"}, + selectors: []fleetv1beta1.ResourceSelectorTerm{}, + want: nil, + }, + { + name: "should skip disabled resources", + placementName: types.NamespacedName{Name: "test-placement"}, + selectors: []fleetv1beta1.ResourceSelectorTerm{ + { + Group: "apps", + Version: "v1", + Kind: "Deployment", + Name: "test-deployment", + }, + }, + resourceConfig: utils.NewResourceConfig(true), // make this allow list - nothing is allowed + want: nil, + }, + { + name: "should skip disabled resources for resource placement", + placementName: types.NamespacedName{Name: "test-placement", Namespace: "test-ns"}, + selectors: []fleetv1beta1.ResourceSelectorTerm{ + { + Group: "apps", + Version: "v1", + Kind: "Deployment", + Name: "test-deployment", + }, + }, + resourceConfig: utils.NewResourceConfig(true), // make this allow list - nothing is allowed + want: nil, + }, + { + name: "should return error for cluster-scoped resource", + placementName: types.NamespacedName{Name: "test-placement", Namespace: "test-ns"}, + selectors: []fleetv1beta1.ResourceSelectorTerm{ + { + Group: "rbac.authorization.k8s.io", + Version: "v1", + Kind: "ClusterRole", + Name: "test-clusterrole", + }, + }, + resourceConfig: utils.NewResourceConfig(false), // default deny list + informerManager: &testinformer.FakeManager{ + IsClusterScopedResource: false, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{}, + }, + want: nil, + wantError: ErrUserError, + }, + { + name: "should handle single resource selection successfully", + placementName: types.NamespacedName{Name: "test-placement", Namespace: "test-ns"}, + selectors: []fleetv1beta1.ResourceSelectorTerm{ + { + Group: "apps", + Version: "v1", + Kind: "Deployment", + Name: "test-deployment", + }, + }, + resourceConfig: utils.NewResourceConfig(false), // default deny list + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: true, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + utils.DeploymentGVR: {Objects: []runtime.Object{testDeployment}}, + }, + } + }(), + want: []*unstructured.Unstructured{testDeployment}, + wantError: nil, + }, + { + name: "should return empty result when informer manager returns not found error", + placementName: types.NamespacedName{Name: "test-placement", Namespace: "test-ns"}, + selectors: []fleetv1beta1.ResourceSelectorTerm{ + { + Group: "apps", + Version: "v1", + Kind: "Deployment", + Name: "test-deployment", + }, + }, + resourceConfig: utils.NewResourceConfig(false), // default deny list + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: true, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + utils.DeploymentGVR: { + Objects: []runtime.Object{}, + Err: apierrors.NewNotFound(schema.GroupResource{Group: "apps", Resource: "deployments"}, "test-deployment"), + }, + }, + } + }(), + want: nil, // should return nil when informer returns not found error + }, + { + name: "should return error when informer manager returns non-NotFound error", + placementName: types.NamespacedName{Name: "test-placement", Namespace: "test-ns"}, + selectors: []fleetv1beta1.ResourceSelectorTerm{ + { + Group: "apps", + Version: "v1", + Kind: "Deployment", + Name: "test-deployment", + }, + }, + resourceConfig: utils.NewResourceConfig(false), // default deny list + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: true, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + utils.DeploymentGVR: { + Objects: []runtime.Object{}, + Err: errors.New("connection timeout"), + }, + }, + } + }(), + wantError: ErrUnexpectedBehavior, + }, + { + name: "should return error using label selector when informer manager returns error", + placementName: types.NamespacedName{Name: "test-placement", Namespace: "test-ns"}, + selectors: []fleetv1beta1.ResourceSelectorTerm{ + { + Group: "apps", + Version: "v1", + Kind: "Deployment", + }, + }, + resourceConfig: utils.NewResourceConfig(false), // default deny list + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: true, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + utils.DeploymentGVR: { + Objects: []runtime.Object{}, + Err: apierrors.NewNotFound(schema.GroupResource{Group: "apps", Resource: "deployments"}, "test-deployment"), + }, + }, + } + }(), + wantError: ErrAPIServerError, + }, + { + name: "should return only non-deleting resources when mixed with deleting resources", + placementName: types.NamespacedName{Name: "test-placement", Namespace: "test-ns"}, + selectors: []fleetv1beta1.ResourceSelectorTerm{ + { + Group: "apps", + Version: "v1", + Kind: "Deployment", + Name: "test-deployment", // non-deleting deployment + }, + { + Group: "apps", + Version: "v1", + Kind: "Deployment", + Name: "test-deleting-deployment", // deleting deployment + }, + }, + resourceConfig: utils.NewResourceConfig(false), // default deny list + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: true, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + utils.DeploymentGVR: {Objects: []runtime.Object{testDeployment, testDeletingDeployment}}, + }, + } + }(), + want: []*unstructured.Unstructured{testDeployment}, + wantError: nil, + }, + { + name: "should handle resource selection successfully by using label selector", + placementName: types.NamespacedName{Name: "test-placement", Namespace: "test-ns"}, + selectors: []fleetv1beta1.ResourceSelectorTerm{ + { + Group: "apps", + Version: "v1", + Kind: "Deployment", + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "frontend", + }, + }, + }, + }, + resourceConfig: utils.NewResourceConfig(false), // default deny list + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: true, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + utils.DeploymentGVR: {Objects: []runtime.Object{testFrontendDeployment, testBackendDeployment, testDeployment, testDeletingDeployment}}, + }, + } + }(), + want: []*unstructured.Unstructured{testFrontendDeployment}, + wantError: nil, + }, + { + name: "should handle label selector with MatchExpressions", + placementName: types.NamespacedName{Name: "test-placement", Namespace: "test-ns"}, + selectors: []fleetv1beta1.ResourceSelectorTerm{ + { + Group: "apps", + Version: "v1", + Kind: "Deployment", + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "tier", + Operator: metav1.LabelSelectorOpIn, + Values: []string{"web", "api"}, + }, + }, + }, + }, + }, + resourceConfig: utils.NewResourceConfig(false), // default deny list + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: true, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + utils.DeploymentGVR: {Objects: []runtime.Object{testFrontendDeployment, testBackendDeployment, testDeployment, testDeletingDeployment}}, + }, + } + }(), + want: []*unstructured.Unstructured{testBackendDeployment, testFrontendDeployment}, // should return both deployments (order may vary) + wantError: nil, + }, + { + name: "should detect duplicate resources", + placementName: types.NamespacedName{Name: "test-placement", Namespace: "test-ns"}, + selectors: []fleetv1beta1.ResourceSelectorTerm{ + { + Group: "apps", + Version: "v1", + Kind: "Deployment", + Name: "test-deployment", + }, + { + Group: "apps", + Version: "v1", + Kind: "Deployment", + Name: "test-deployment", // same deployment selected twice + }, + }, + resourceConfig: utils.NewResourceConfig(false), // default deny list + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: true, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + utils.DeploymentGVR: {Objects: []runtime.Object{testDeployment}}, + }, + } + }(), + wantError: ErrUserError, + }, + { + name: "should sort resources according to apply order", + placementName: types.NamespacedName{Name: "test-placement", Namespace: "test-ns"}, + selectors: []fleetv1beta1.ResourceSelectorTerm{ + { + Group: "apps", + Version: "v1", + Kind: "Deployment", + Name: "test-deployment", + }, + { + Group: "", + Version: "v1", + Kind: "ConfigMap", + Name: "test-configmap", + }, + }, + resourceConfig: utils.NewResourceConfig(false), // default deny list + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: true, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + utils.DeploymentGVR: {Objects: []runtime.Object{testDeployment}}, + utils.ConfigMapGVR: {Objects: []runtime.Object{testConfigMap}}, + }, + } + }(), + // ConfigMap should come first according to apply order. + want: []*unstructured.Unstructured{testConfigMap, testDeployment}, + }, + // tests for cluster-scoped placements + { + name: "should return error for namespace-scoped resource for cluster scoped placement", + placementName: types.NamespacedName{Name: "test-placement"}, + selectors: []fleetv1beta1.ResourceSelectorTerm{ + { + Group: "apps", + Version: "v1", + Kind: "Deployment", + Name: "test-deployment", + }, + }, + resourceConfig: utils.NewResourceConfig(false), // default deny list + informerManager: &testinformer.FakeManager{ + IsClusterScopedResource: true, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{}, + }, + want: nil, + wantError: ErrUserError, + }, + { + name: "should sort resources for cluster scoped placement", + placementName: types.NamespacedName{Name: "test-placement"}, + selectors: []fleetv1beta1.ResourceSelectorTerm{ + { + Group: "rbac.authorization.k8s.io", + Version: "v1", + Kind: "ClusterRole", + // Empty name means select all ClusterRoles (or use label selector). + }, + { + Group: "", + Version: "v1", + Kind: "Namespace", + Name: "test-ns", + }, + }, + resourceConfig: utils.NewResourceConfig(false), // default deny list + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: false, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + utils.ClusterRoleGVR: {Objects: []runtime.Object{testClusterRole, testClusterRole2}}, + utils.NamespaceGVR: {Objects: []runtime.Object{testNamespace}}, + }, + } + }(), + // Namespace should come first according to apply order (namespace comes before ClusterRole). + // Both ClusterRoles should be included since we're selecting all ClusterRoles with empty name. + want: []*unstructured.Unstructured{testNamespace, testClusterRole, testClusterRole2}, + }, + { + name: "should select resources by name for cluster scoped placement", + placementName: types.NamespacedName{Name: "test-placement"}, + selectors: []fleetv1beta1.ResourceSelectorTerm{ + { + Group: "rbac.authorization.k8s.io", + Version: "v1", + Kind: "ClusterRole", + Name: "test-cluster-role", + }, + { + Group: "", + Version: "v1", + Kind: "Namespace", + Name: "test-ns", + }, + }, + resourceConfig: utils.NewResourceConfig(false), // default deny list + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: false, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + utils.ClusterRoleGVR: {Objects: []runtime.Object{testClusterRole, testClusterRole2}}, + utils.NamespaceGVR: {Objects: []runtime.Object{testNamespace}}, + }, + } + }(), + // Namespace should come first according to apply order (namespace comes before ClusterRole). + want: []*unstructured.Unstructured{testNamespace, testClusterRole}, + }, + { + name: "should select namespaces and its children resources by using label selector for cluster scoped placement", + placementName: types.NamespacedName{Name: "test-placement"}, + selectors: []fleetv1beta1.ResourceSelectorTerm{ + { + Group: "", + Version: "v1", + Kind: "Namespace", + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "environment": "test", + }, + }, + SelectionScope: fleetv1beta1.NamespaceWithResources, + }, + }, + resourceConfig: utils.NewResourceConfig(false), // default deny list + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: false, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + utils.NamespaceGVR: {Objects: []runtime.Object{testNamespace, prodNamespace, testDeletingNamespace}}, + utils.DeploymentGVR: {Objects: []runtime.Object{testDeployment, testDeletingDeployment}}, + utils.ConfigMapGVR: {Objects: []runtime.Object{testConfigMap, kubeRootCAConfigMap}}, + }, + NamespaceScopedResources: []schema.GroupVersionResource{utils.DeploymentGVR, utils.ConfigMapGVR}, + } + }(), + // Should select only non-reserved namespaces with matching labels and their children resources + want: []*unstructured.Unstructured{testNamespace, testConfigMap, testDeployment}, + }, + { + name: "should skip the resource for cluster scoped placement", + placementName: types.NamespacedName{Name: "test-placement"}, + selectors: []fleetv1beta1.ResourceSelectorTerm{ + { + Group: "", + Version: "v1", + Kind: "Namespace", + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "environment": "test", + }, + }, + SelectionScope: fleetv1beta1.NamespaceWithResources, + }, + }, + resourceConfig: func() *utils.ResourceConfig { + cfg := utils.NewResourceConfig(false) + cfg.AddGroupVersionKind(utils.DeploymentGVK) + return cfg + }(), + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: false, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + utils.NamespaceGVR: {Objects: []runtime.Object{testNamespace, prodNamespace, testDeletingNamespace}}, + utils.DeploymentGVR: {Objects: []runtime.Object{testDeployment, testDeletingDeployment}}, + utils.ConfigMapGVR: {Objects: []runtime.Object{testConfigMap, kubeRootCAConfigMap}}, + }, + NamespaceScopedResources: []schema.GroupVersionResource{utils.DeploymentGVR, utils.ConfigMapGVR}, + } + }(), + // should skip the deployment resource since it is not allowed by resource config + want: []*unstructured.Unstructured{testNamespace, testConfigMap}, + }, + { + name: "should select namespaces using nil label selector for cluster scoped placement", + placementName: types.NamespacedName{Name: "test-placement"}, + selectors: []fleetv1beta1.ResourceSelectorTerm{ + { + Group: "", + Version: "v1", + Kind: "Namespace", + SelectionScope: fleetv1beta1.NamespaceWithResources, + }, + }, + resourceConfig: utils.NewResourceConfig(false), // default deny list + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: false, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + utils.NamespaceGVR: {Objects: []runtime.Object{testNamespace, prodNamespace, testDeletingNamespace}}, + utils.DeploymentGVR: {Objects: []runtime.Object{testDeployment, testDeletingDeployment}}, + utils.ConfigMapGVR: {Objects: []runtime.Object{testConfigMap, kubeRootCAConfigMap}}, + }, + NamespaceScopedResources: []schema.GroupVersionResource{utils.DeploymentGVR, utils.ConfigMapGVR}, + } + }(), + // Should select only non-reserved namespaces with matching labels and their child resources + want: []*unstructured.Unstructured{prodNamespace, testNamespace, testConfigMap, testDeployment}, + }, + { + name: "should select only namespaces for namespace only scope for a namespace", + placementName: types.NamespacedName{Name: "test-placement"}, + selectors: []fleetv1beta1.ResourceSelectorTerm{ + { + Group: "", + Version: "v1", + Kind: "Namespace", + Name: "test-ns", + SelectionScope: fleetv1beta1.NamespaceOnly, + }, + }, + resourceConfig: utils.NewResourceConfig(false), // default deny list + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: false, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + utils.NamespaceGVR: {Objects: []runtime.Object{testNamespace, prodNamespace, testDeletingNamespace}}, + utils.DeploymentGVR: {Objects: []runtime.Object{testDeployment, testDeletingDeployment}}, + utils.ConfigMapGVR: {Objects: []runtime.Object{testConfigMap, kubeRootCAConfigMap}}, + }, + NamespaceScopedResources: []schema.GroupVersionResource{utils.DeploymentGVR, utils.ConfigMapGVR}, + } + }(), + // Should select only the namespace with name "test-ns" and none of its child resources + want: []*unstructured.Unstructured{testNamespace}, + }, + { + name: "should select only namespaces for namespace only scope for namespaces with labels", + placementName: types.NamespacedName{Name: "test-placement"}, + selectors: []fleetv1beta1.ResourceSelectorTerm{ + { + Group: "", + Version: "v1", + Kind: "Namespace", + SelectionScope: fleetv1beta1.NamespaceOnly, + }, + }, + resourceConfig: utils.NewResourceConfig(false), // default deny list + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: false, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + utils.NamespaceGVR: {Objects: []runtime.Object{testNamespace, prodNamespace, testDeletingNamespace}}, + utils.DeploymentGVR: {Objects: []runtime.Object{testDeployment, testDeletingDeployment}}, + utils.ConfigMapGVR: {Objects: []runtime.Object{testConfigMap, kubeRootCAConfigMap}}, + }, + NamespaceScopedResources: []schema.GroupVersionResource{utils.DeploymentGVR, utils.ConfigMapGVR}, + } + }(), + // Should select only non-deleting namespaces with matching labels and none of their child resources + want: []*unstructured.Unstructured{prodNamespace, testNamespace}, + }, + { + name: "should return error if a resourceplacement selects namespaces even for namespace only scope", + placementName: types.NamespacedName{Name: "test-placement", Namespace: "test-ns"}, + selectors: []fleetv1beta1.ResourceSelectorTerm{ + { + Group: "", + Version: "v1", + Kind: "Namespace", + Name: "test-ns", + SelectionScope: fleetv1beta1.NamespaceOnly, + }, + }, + resourceConfig: utils.NewResourceConfig(false), // default deny list + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: false, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + utils.NamespaceGVR: {Objects: []runtime.Object{testNamespace, prodNamespace, testDeletingNamespace}}, + utils.DeploymentGVR: {Objects: []runtime.Object{testDeployment, testDeletingDeployment}}, + utils.ConfigMapGVR: {Objects: []runtime.Object{testConfigMap, kubeRootCAConfigMap}}, + }, + NamespaceScopedResources: []schema.GroupVersionResource{utils.DeploymentGVR, utils.ConfigMapGVR}, + } + }(), + wantError: ErrUserError, + }, + { + name: "should return error when selecting a reserved namespace for cluster scoped placement", + placementName: types.NamespacedName{Name: "test-placement"}, + selectors: []fleetv1beta1.ResourceSelectorTerm{ + { + Group: "", + Version: "v1", + Kind: "Namespace", + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "environment": "test", + }, + }, + SelectionScope: fleetv1beta1.NamespaceWithResources, + }, + }, + resourceConfig: utils.NewResourceConfig(false), // default deny list + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: false, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + utils.NamespaceGVR: {Objects: []runtime.Object{testNamespace, prodNamespace, testDeletingNamespace, kubeSystemNamespace}}, + utils.DeploymentGVR: {Objects: []runtime.Object{testDeployment, testDeletingDeployment}}, + utils.ConfigMapGVR: {Objects: []runtime.Object{testConfigMap}}, + }, + NamespaceScopedResources: []schema.GroupVersionResource{utils.DeploymentGVR, utils.ConfigMapGVR}, + } + }(), + wantError: ErrUserError, + }, + { + name: "should return empty result when informer manager returns not found error for cluster scoped placement", + placementName: types.NamespacedName{Name: "test-placement"}, + selectors: []fleetv1beta1.ResourceSelectorTerm{ + { + Group: "", + Version: "v1", + Kind: "Namespace", + Name: "test-ns", + SelectionScope: fleetv1beta1.NamespaceWithResources, + }, + }, + resourceConfig: utils.NewResourceConfig(false), // default deny list + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: true, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + utils.NamespaceGVR: { + Objects: []runtime.Object{}, + Err: apierrors.NewNotFound(schema.GroupResource{Group: "", Resource: "namespaces"}, "test-ns"), + }, + }, + } + }(), + want: nil, // should return nil when informer returns not found error + }, + { + name: "should return error when informer manager returns non-NotFound error (getting namespace) for cluster scoped placement", + placementName: types.NamespacedName{Name: "test-placement"}, + selectors: []fleetv1beta1.ResourceSelectorTerm{ + { + Group: "", + Version: "v1", + Kind: "Namespace", + Name: "test-ns", + SelectionScope: fleetv1beta1.NamespaceWithResources, + }, + }, + resourceConfig: utils.NewResourceConfig(false), // default deny list + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: true, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + utils.NamespaceGVR: { + Objects: []runtime.Object{}, + Err: errors.New("connection timeout"), + }, + }, + } + }(), + wantError: ErrUnexpectedBehavior, + }, + { + name: "should return error using label selector when informer manager returns error (getting namespace) for cluster scoped placement", + placementName: types.NamespacedName{Name: "test-placement"}, + selectors: []fleetv1beta1.ResourceSelectorTerm{ + { + Group: "", + Version: "v1", + Kind: "Namespace", + SelectionScope: fleetv1beta1.NamespaceWithResources, + }, + }, + resourceConfig: utils.NewResourceConfig(false), // default deny list + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: true, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + utils.NamespaceGVR: { + Objects: []runtime.Object{}, + Err: apierrors.NewNotFound(schema.GroupResource{Group: "", Resource: "namespaces"}, "test-ns"), + }, + }, + } + }(), + wantError: ErrAPIServerError, + }, + { + name: "should return error when informer manager returns non-NotFound error (getting deployment) for cluster scoped placement", + placementName: types.NamespacedName{Name: "test-placement"}, + selectors: []fleetv1beta1.ResourceSelectorTerm{ + { + Group: "", + Version: "v1", + Kind: "Namespace", + Name: "test-ns", + SelectionScope: fleetv1beta1.NamespaceWithResources, + }, + }, + resourceConfig: utils.NewResourceConfig(false), // default deny list + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: true, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + utils.NamespaceGVR: {Objects: []runtime.Object{testNamespace, prodNamespace, testDeletingNamespace, kubeSystemNamespace}}, + utils.DeploymentGVR: { + Objects: []runtime.Object{}, + Err: errors.New("connection timeout"), + }, + }, + NamespaceScopedResources: []schema.GroupVersionResource{utils.DeploymentGVR}, + } + }(), + wantError: ErrUnexpectedBehavior, + }, + { + name: "should skip reserved resources for namespaced placement", + placementName: types.NamespacedName{Name: "test-placement", Namespace: "test-ns"}, + selectors: []fleetv1beta1.ResourceSelectorTerm{ + { + Group: "", + Version: "v1", + Kind: "ConfigMap", + Name: "kube-root-ca.crt", + }, + }, + resourceConfig: utils.NewResourceConfig(false), // default deny list + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: true, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + utils.ConfigMapGVR: {Objects: []runtime.Object{kubeRootCAConfigMap}}, + }, + } + }(), + want: nil, // should not propagate reserved configmap + }, + { + name: "should skip reserved resources for namespaced placement when selecting all the configMaps", + placementName: types.NamespacedName{Name: "test-placement", Namespace: "test-ns"}, + selectors: []fleetv1beta1.ResourceSelectorTerm{ + { + Group: "", + Version: "v1", + Kind: "ConfigMap", + }, + }, + resourceConfig: utils.NewResourceConfig(false), // default deny list + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: true, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + utils.ConfigMapGVR: {Objects: []runtime.Object{kubeRootCAConfigMap, testConfigMap}}, + }, + } + }(), + want: []*unstructured.Unstructured{testConfigMap}, // should not propagate reserved configmap + }, + { + name: "should return error when informer cache is not synced for namespaced placement", + placementName: types.NamespacedName{Name: "test-placement", Namespace: "test-ns"}, + selectors: []fleetv1beta1.ResourceSelectorTerm{ + { + Group: "apps", + Version: "v1", + Kind: "Deployment", + Name: "test-deployment", + }, + }, + resourceConfig: utils.NewResourceConfig(false), // default deny list + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: true, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + utils.DeploymentGVR: {Objects: []runtime.Object{testDeployment}}, + }, + InformerSynced: ptr.To(false), + } + }(), + wantError: ErrExpectedBehavior, + }, + { + name: "should return error when informer cache is not synced for cluster scoped placement", + placementName: types.NamespacedName{Name: "test-placement"}, + selectors: []fleetv1beta1.ResourceSelectorTerm{ + { + Group: "rbac.authorization.k8s.io", + Version: "v1", + Kind: "ClusterRole", + Name: "test-cluster-role", + }, + }, + resourceConfig: utils.NewResourceConfig(false), // default deny list + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: false, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + utils.ClusterRoleGVR: {Objects: []runtime.Object{testClusterRole}}, + }, + InformerSynced: ptr.To(false), + } + }(), + wantError: ErrExpectedBehavior, + }, + { + name: "should return error when informer cache is not synced for cluster scoped placement with namespace resources", + placementName: types.NamespacedName{Name: "test-placement"}, + selectors: []fleetv1beta1.ResourceSelectorTerm{ + { + Group: "", + Version: "v1", + Kind: "Namespace", + Name: "test-ns", + SelectionScope: fleetv1beta1.NamespaceWithResources, + }, + }, + resourceConfig: utils.NewResourceConfig(false), // default deny list + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: true, + APIResources: map[schema.GroupVersionKind]bool{ + utils.NamespaceGVK: true, + }, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + utils.NamespaceGVR: {Objects: []runtime.Object{testNamespace}}, + utils.DeploymentGVR: {Objects: []runtime.Object{testDeployment}}, + }, + NamespaceScopedResources: []schema.GroupVersionResource{utils.DeploymentGVR}, + InformerSynced: ptr.To(false), + } + }(), + wantError: ErrExpectedBehavior, + }, + { + name: "should return error when shouldPropagateObj returns error", + placementName: types.NamespacedName{Name: "test-placement", Namespace: "test-ns"}, + selectors: []fleetv1beta1.ResourceSelectorTerm{ + { + Group: "", + Version: "v1", + Kind: "Endpoints", + Name: "test-endpoints", + }, + }, + resourceConfig: utils.NewResourceConfig(false), // default deny list + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: true, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + {Group: "", Version: "v1", Resource: "endpoints"}: { + Objects: []runtime.Object{testEndpoints}, + }, + utils.ServiceGVR: { + Objects: []runtime.Object{}, + Err: errors.New("connection timeout"), + }, + }, + } + }(), + wantError: ErrUnexpectedBehavior, + }, + { + name: "should return error by selecting all the endpoints when shouldPropagateObj returns error", + placementName: types.NamespacedName{Name: "test-placement", Namespace: "test-ns"}, + selectors: []fleetv1beta1.ResourceSelectorTerm{ + { + Group: "", + Version: "v1", + Kind: "Endpoints", + }, + }, + resourceConfig: utils.NewResourceConfig(false), // default deny list + informerManager: func() *testinformer.FakeManager { + return &testinformer.FakeManager{ + IsClusterScopedResource: true, + Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ + {Group: "", Version: "v1", Resource: "endpoints"}: { + Objects: []runtime.Object{testEndpoints}, + }, + utils.ServiceGVR: { + Objects: []runtime.Object{}, + Err: errors.New("connection timeout"), + }, + }, + } + }(), + wantError: ErrUnexpectedBehavior, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + rsr := &ResourceSelectorResolver{ + ResourceConfig: tt.resourceConfig, + InformerManager: tt.informerManager, + RestMapper: newFakeRESTMapper(), + } + + got, err := rsr.gatherSelectedResource(tt.placementName, tt.selectors) + if gotErr, wantErr := err != nil, tt.wantError != nil; gotErr != wantErr || !errors.Is(err, tt.wantError) { + t.Fatalf("gatherSelectedResource() = %v, want error %v", err, tt.wantError) + } + if tt.wantError != nil { + return + } + if diff := cmp.Diff(tt.want, got); diff != "" { + t.Errorf("gatherSelectedResource() mismatch (-want +got):\n%s", diff) + } + }) + } +} + +// fakeRESTMapper is a minimal RESTMapper implementation for testing +type fakeRESTMapper struct { + mappings map[schema.GroupKind]*meta.RESTMapping +} + +// newFakeRESTMapper creates a new fakeRESTMapper with default mappings +func newFakeRESTMapper() *fakeRESTMapper { + return &fakeRESTMapper{ + mappings: map[schema.GroupKind]*meta.RESTMapping{ + {Group: "", Kind: "Namespace"}: { + Resource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "namespaces"}, + }, + {Group: "apps", Kind: "Deployment"}: { + Resource: schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"}, + }, + {Group: "", Kind: "ConfigMap"}: { + Resource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "configmaps"}, + }, + {Group: "", Kind: "Node"}: { + Resource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "nodes"}, + }, + {Group: "rbac.authorization.k8s.io", Kind: "ClusterRole"}: { + Resource: schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1", Resource: "clusterroles"}, + }, + {Group: "", Kind: "Endpoints"}: { + Resource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "endpoints"}, + }, + }, + } +} + +func (f *fakeRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error) { + if mapping, exists := f.mappings[gk]; exists { + return mapping, nil + } + return nil, errors.New("resource not found") +} + +func (f *fakeRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*meta.RESTMapping, error) { + mapping, err := f.RESTMapping(gk, versions...) + if err != nil { + return nil, err + } + return []*meta.RESTMapping{mapping}, nil +} + +func (f *fakeRESTMapper) ResourceFor(input schema.GroupVersionResource) (schema.GroupVersionResource, error) { + return input, nil +} + +func (f *fakeRESTMapper) ResourcesFor(input schema.GroupVersionResource) ([]schema.GroupVersionResource, error) { + return []schema.GroupVersionResource{input}, nil +} + +func (f *fakeRESTMapper) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { + switch { + case resource.Group == "" && resource.Resource == "namespaces": + return schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Namespace"}, nil + case resource.Group == "apps" && resource.Resource == "deployments": + return schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "Deployment"}, nil + case resource.Group == "" && resource.Resource == "configmaps": + return schema.GroupVersionKind{Group: "", Version: "v1", Kind: "ConfigMap"}, nil + case resource.Group == "" && resource.Resource == "nodes": + return schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Node"}, nil + case resource.Group == "" && resource.Resource == "endpoints": + return schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Endpoints"}, nil + } + return schema.GroupVersionKind{}, errors.New("kind not found") +} + +func (f *fakeRESTMapper) KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error) { + kind, err := f.KindFor(resource) + if err != nil { + return nil, err + } + return []schema.GroupVersionKind{kind}, nil +} + +func (f *fakeRESTMapper) ResourceSingularizer(resource string) (singular string, err error) { + return resource, nil +} + +func TestSortResources(t *testing.T) { + // Create the ingressClass object + ingressClass := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "networking/v1", + "kind": "IngressClass", + "metadata": map[string]interface{}{ + "name": "test", + }, + }, + } + + // Create the Ingress object + ingress := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "networking/v1", + "kind": "Ingress", + "metadata": map[string]interface{}{ + "name": "test-ingress", + "namespace": "test", + }, + }, + } + + // Create the NetworkPolicy object + networkPolicy := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "networking/v1", + "kind": "NetworkPolicy", + "metadata": map[string]interface{}{ + "name": "test-networkpolicy", + "namespace": "test", + }, + }, + } + + // Create the first Namespace object + namespace1 := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "v1", + "kind": "Namespace", + "metadata": map[string]interface{}{ + "name": "test1", + }, + }, + } + + // Create the second Namespace object + namespace2 := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "v1", + "kind": "Namespace", + "metadata": map[string]interface{}{ + "name": "test2", + }, + }, + } + + // Create the LimitRange object + limitRange := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "v1", + "kind": "LimitRange", + "metadata": map[string]interface{}{ + "name": "test-limitrange", + "namespace": "test", + }, + }, + } + + // Create the pod object. + pod := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "v1", + "kind": "Pod", + "metadata": map[string]interface{}{ + "name": "test-pod", + "namespace": "test", + }, + }, + } + + // Create the ReplicationController object. + replicationController := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "v1", + "kind": "ReplicationController", + "metadata": map[string]interface{}{ + "name": "test-replicationcontroller", + "namespace": "test", + }, + }, + } + + // Create the ResourceQuota object. + resourceQuota := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "v1", + "kind": "ResourceQuota", + "metadata": map[string]interface{}{ + "name": "test-resourcequota", + "namespace": "test", + }, + }, + } + + // Create the Service object. + service := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "v1", + "kind": "Service", + "metadata": map[string]interface{}{ + "name": "test-service", + "namespace": "test", + }, + }, + } + + // Create the ServiceAccount object. + serviceAccount := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "v1", + "kind": "ServiceAccount", + "metadata": map[string]interface{}{ + "name": "test-serviceaccount", + "namespace": "test", + }, + }, + } + + // Create the PodDisruptionBudget object. + pdb := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "policy/v1", + "kind": "PodDisruptionBudget", + "metadata": map[string]interface{}{ + "name": "test-pdb", + "namespace": "test", + }, + }, + } + + // Create the Deployment object. + deployment := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": map[string]interface{}{ + "name": "test-nginx", + "namespace": "test", + }, + }, + } + + // Create the v1beta1 Deployment object. + v1beta1Deployment := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "apps/v1beta1", + "kind": "Deployment", + "metadata": map[string]interface{}{ + "name": "test-nginx1", + "namespace": "test", + }, + }, + } + + // Create the DaemonSet object. + daemonSet := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "apps/v1", + "kind": "DaemonSet", + "metadata": map[string]interface{}{ + "name": "test-daemonset", + "namespace": "test", + }, + }, + } + + // Create the ReplicaSet object. + replicaSet := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "apps/v1", + "kind": "ReplicaSet", + "metadata": map[string]interface{}{ + "name": "test-replicaset", + "namespace": "test", + }, + }, + } + + // Create the StatefulSet object. + statefulSet := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "apps/v1", + "kind": "StatefulSet", + "metadata": map[string]interface{}{ + "name": "test-statefulset", + "namespace": "test", + }, + }, + } + + // Create the StorageClass object. + storageClass := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "storage.k8s.io/v1", + "kind": "StorageClass", + "metadata": map[string]interface{}{ + "name": "test-storageclass", + }, + }, + } + + // Create the APIService object. + apiService := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "apiregistration.k8s.io/v1", + "kind": "APIService", + "metadata": map[string]interface{}{ + "name": "test-apiservice", + }, + }, + } + + // Create the HorizontalPodAutoscaler object. + hpa := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "autoscaling/v1", + "kind": "HorizontalPodAutoscaler", + "metadata": map[string]interface{}{ + "name": "test-hpa", + "namespace": "test", + }, + }, + } + + // Create the PriorityClass object. + priorityClass := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "scheduling.k8s.io/v1", + "kind": "PriorityClass", + "metadata": map[string]interface{}{ + "name": "test-priorityclass", + }, + }, + } + + // Create the ValidatingWebhookConfiguration object. + validatingWebhookConfiguration := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "admissionregistration.k8s.io/v1", + "kind": "ValidatingWebhookConfiguration", + "metadata": map[string]interface{}{ + "name": "test-validatingwebhookconfiguration", + }, + }, + } + + // Create the MutatingWebhookConfiguration object. + mutatingWebhookConfiguration := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "admissionregistration.k8s.io/v1", + "kind": "MutatingWebhookConfiguration", + "metadata": map[string]interface{}{ + "name": "test-mutatingwebhookconfiguration", + }, + }, + } + + // Create the first CustomResourceDefinition object. + crd1 := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "apiextensions.k8s.io/v1", + "kind": "CustomResourceDefinition", + "metadata": map[string]interface{}{ + "name": "test-crd1", + }, + }, + } + + // Create the second CustomResourceDefinition object. + crd2 := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "apiextensions.k8s.io/v1", + "kind": "CustomResourceDefinition", + "metadata": map[string]interface{}{ + "name": "test-crd2", + }, + }, + } + + // Create the ClusterRole object. + clusterRole := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "rbac.authorization.k8s.io/v1", + "kind": "ClusterRole", + "metadata": map[string]interface{}{ + "name": "test-clusterrole", + }, + }, + } + + // Create the ClusterRoleBinding object. + clusterRoleBinindg := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "rbac.authorization.k8s.io/v1", + "kind": "ClusterRoleBinding", + "metadata": map[string]interface{}{ + "name": "test-clusterrolebinding", + }, + }, + } + + // Create the Role object. + role := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "rbac.authorization.k8s.io/v1", + "kind": "Role", + "metadata": map[string]interface{}{ + "name": "test-role", + "namespace": "test", + }, + }, + } + + // Create the RoleBinding object. + roleBinding := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "rbac.authorization.k8s.io/v1", + "kind": "RoleBinding", + "metadata": map[string]interface{}{ + "name": "test-rolebinding", + "namespace": "test", + }, + }, + } + + // Create the Secret object. + secret1 := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "v1", + "kind": "Secret", + "metadata": map[string]interface{}{ + "name": "test-secret1", + "namespace": "test", + }, + }, + } + + // Create the Secret object. + secret2 := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "v1", + "kind": "Secret", + "metadata": map[string]interface{}{ + "name": "test-secret2", + "namespace": "test", + }, + }, + } + + // Create the ConfigMap object. + configMap := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "v1", + "kind": "ConfigMap", + "metadata": map[string]interface{}{ + "name": "test-configmap", + "namespace": "test", + }, + }, + } + + // Create the CronJob object. + cronJob := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "batch/v1", + "kind": "CronJob", + "metadata": map[string]interface{}{ + "name": "test-cronjob", + "namespace": "test", + }, + }, + } + + // Create the Job object. + job := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "batch/v1", + "kind": "Job", + "metadata": map[string]interface{}{ + "name": "test-job", + "namespace": "test", + }, + }, + } + + // Create the PersistentVolume object. + pv := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "v1", + "kind": "PersistentVolume", + "metadata": map[string]interface{}{ + "name": "test-pv", + }, + }, + } + + // Create the PersistentVolumeClaim object. + pvc := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "v1", + "kind": "PersistentVolumeClaim", + "metadata": map[string]interface{}{ + "name": "test-pvc", + "namespace": "test", + }, + }, + } + + // Create the test resource. + testResource1 := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "test.kubernetes-fleet.io/v1alpha1", + "kind": "TestResource", + "metadata": map[string]interface{}{ + "name": "test-resource1", + "namespace": "test", + }, + }, + } + + // Create the test resource. + testResource2 := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "test.kubernetes-fleet.io/v1alpha1", + "kind": "TestResource", + "metadata": map[string]interface{}{ + "name": "test-resource2", + "namespace": "test", + }, + }, + } + + // Create another test resource. + anotherTestResource := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "test.kubernetes-fleet.io/v1alpha1", + "kind": "AnotherTestResource", + "metadata": map[string]interface{}{ + "name": "another-test-resource", + "namespace": "test", + }, + }, + } + + // Create v1beta1 another test resource. + v1beta1AnotherTestResource := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "test.kubernetes-fleet.io/v1beta1", + "kind": "AnotherTestResource", + "metadata": map[string]interface{}{ + "name": "another-test-resource", + "namespace": "test", + }, + }, + } + + tests := map[string]struct { + resources []*unstructured.Unstructured + want []*unstructured.Unstructured + }{ + "should handle empty resources list": { + resources: []*unstructured.Unstructured{}, + want: []*unstructured.Unstructured{}, + }, + "should handle single resource": { + resources: []*unstructured.Unstructured{deployment}, + want: []*unstructured.Unstructured{deployment}, + }, + "should handle multiple resources of all kinds": { + resources: []*unstructured.Unstructured{ingressClass, clusterRole, clusterRoleBinindg, configMap, cronJob, crd1, daemonSet, deployment, testResource1, ingress, job, limitRange, namespace1, networkPolicy, pv, pvc, pod, pdb, replicaSet, replicationController, resourceQuota, role, roleBinding, secret1, service, serviceAccount, statefulSet, storageClass, apiService, hpa, priorityClass, validatingWebhookConfiguration, mutatingWebhookConfiguration}, + want: []*unstructured.Unstructured{priorityClass, namespace1, networkPolicy, resourceQuota, limitRange, pdb, serviceAccount, secret1, configMap, storageClass, pv, pvc, crd1, clusterRole, clusterRoleBinindg, role, roleBinding, service, daemonSet, pod, replicationController, replicaSet, deployment, hpa, statefulSet, job, cronJob, ingressClass, ingress, apiService, mutatingWebhookConfiguration, validatingWebhookConfiguration, testResource1}, + }, + "should handle multiple known resources, different kinds": { + resources: []*unstructured.Unstructured{crd2, crd1, secret2, namespace2, namespace1, secret1}, + want: []*unstructured.Unstructured{namespace1, namespace2, secret1, secret2, crd1, crd2}, + }, + "should handle multiple known resources, same kinds with different versions": { + resources: []*unstructured.Unstructured{v1beta1Deployment, deployment, limitRange}, + want: []*unstructured.Unstructured{limitRange, deployment, v1beta1Deployment}, + }, + "should handle multiple unknown resources, same kinds": { + resources: []*unstructured.Unstructured{testResource2, testResource1}, + want: []*unstructured.Unstructured{testResource1, testResource2}, + }, + "should handle multiple unknown resources, different kinds": { + resources: []*unstructured.Unstructured{testResource1, anotherTestResource}, + want: []*unstructured.Unstructured{anotherTestResource, testResource1}, + }, + "should handle multiple unknown resources, same kinds with different versions": { + resources: []*unstructured.Unstructured{v1beta1AnotherTestResource, anotherTestResource}, + want: []*unstructured.Unstructured{anotherTestResource, v1beta1AnotherTestResource}, + }, + } + + for testName, tt := range tests { + t.Run(testName, func(t *testing.T) { + // run many times to make sure it's stable + for i := 0; i < 10; i++ { + sortResources(tt.resources) + // Check that the returned resources match the expected resources + diff := cmp.Diff(tt.want, tt.resources) + if diff != "" { + t.Errorf("sortResources() mismatch (-want +got):\n%s", diff) + } + } + }) + } +} + +func TestShouldPropagateObj(t *testing.T) { + tests := []struct { + name string + obj map[string]interface{} + ownerReferences []metav1.OwnerReference + enableWorkload bool + want bool + }{ + { + name: "standalone replicaset without ownerReferences should propagate", + obj: map[string]interface{}{ + "apiVersion": "apps/v1", + "kind": "ReplicaSet", + "metadata": map[string]interface{}{ + "name": "standalone-rs", + "namespace": "default", + }, + }, + ownerReferences: nil, + enableWorkload: true, + want: true, + }, + { + name: "standalone replicaset without ownerReferences should propagate if workload is disabled", + obj: map[string]interface{}{ + "apiVersion": "apps/v1", + "kind": "ReplicaSet", + "metadata": map[string]interface{}{ + "name": "standalone-rs", + "namespace": "default", + }, + }, + ownerReferences: nil, + enableWorkload: false, + want: true, + }, + { + name: "standalone pod without ownerReferences should propagate", + obj: map[string]interface{}{ + "apiVersion": "v1", + "kind": "Pod", + "metadata": map[string]interface{}{ + "name": "standalone-pod", + "namespace": "default", + }, + }, + ownerReferences: nil, + enableWorkload: true, + want: true, + }, + { + name: "replicaset with deployment owner should NOT propagate", + obj: map[string]interface{}{ + "apiVersion": "apps/v1", + "kind": "ReplicaSet", + "metadata": map[string]interface{}{ + "name": "test-deploy-abc123", + "namespace": "default", + }, + }, + ownerReferences: []metav1.OwnerReference{ + { + APIVersion: "apps/v1", + Kind: "Deployment", + Name: "test-deploy", + UID: "12345", + }, + }, + enableWorkload: true, + want: false, + }, + { + name: "pod owned by replicaset - passes ShouldPropagateObj but filtered by resource config", + obj: map[string]interface{}{ + "apiVersion": "v1", + "kind": "Pod", + "metadata": map[string]interface{}{ + "name": "test-deploy-abc123-xyz", + "namespace": "default", + }, + }, + ownerReferences: []metav1.OwnerReference{ + { + APIVersion: "apps/v1", + Kind: "ReplicaSet", + Name: "test-deploy-abc123", + UID: "67890", + }, + }, + enableWorkload: false, + want: true, // ShouldPropagateObj doesn't filter Pods - they're filtered by NewResourceConfig + }, + { + name: "controllerrevision owned by daemonset should NOT propagate", + obj: map[string]interface{}{ + "apiVersion": "apps/v1", + "kind": "ControllerRevision", + "metadata": map[string]interface{}{ + "name": "test-ds-7b9848797f", + "namespace": "default", + }, + }, + ownerReferences: []metav1.OwnerReference{ + { + APIVersion: "apps/v1", + Kind: "DaemonSet", + Name: "test-ds", + UID: "abcdef", + }, + }, + enableWorkload: false, + want: false, + }, + { + name: "controllerrevision owned by statefulset should NOT propagate", + obj: map[string]interface{}{ + "apiVersion": "apps/v1", + "kind": "ControllerRevision", + "metadata": map[string]interface{}{ + "name": "test-ss-7878b4b446", + "namespace": "default", + }, + }, + ownerReferences: []metav1.OwnerReference{ + { + APIVersion: "apps/v1", + Kind: "StatefulSet", + Name: "test-ss", + UID: "fedcba", + }, + }, + enableWorkload: false, + want: false, + }, + { + name: "standalone controllerrevision without owner should propagate", + obj: map[string]interface{}{ + "apiVersion": "apps/v1", + "kind": "ControllerRevision", + "metadata": map[string]interface{}{ + "name": "custom-revision", + "namespace": "default", + }, + }, + ownerReferences: nil, + enableWorkload: false, + want: true, + }, + { + name: "PVC should propagate when workload is disabled", + obj: map[string]interface{}{ + "apiVersion": "v1", + "kind": "PersistentVolumeClaim", + "metadata": map[string]interface{}{ + "name": "test-pvc", + "namespace": "default", + }, + }, + ownerReferences: nil, + enableWorkload: false, + want: true, + }, + { + name: "PVC should NOT propagate when workload is enabled", + obj: map[string]interface{}{ + "apiVersion": "v1", + "kind": "PersistentVolumeClaim", + "metadata": map[string]interface{}{ + "name": "test-pvc", + "namespace": "default", + }, + }, + ownerReferences: nil, + enableWorkload: true, + want: false, + }, + { + name: "PVC with ownerReferences should NOT propagate when workload is enabled", + obj: map[string]interface{}{ + "apiVersion": "v1", + "kind": "PersistentVolumeClaim", + "metadata": map[string]interface{}{ + "name": "data-statefulset-0", + "namespace": "default", + }, + }, + ownerReferences: []metav1.OwnerReference{ + { + APIVersion: "apps/v1", + Kind: "StatefulSet", + Name: "statefulset", + UID: "sts-uid", + }, + }, + enableWorkload: true, + want: false, + }, + { + name: "Default ServiceAccount in namespace should NOT propagate", + obj: map[string]interface{}{ + "apiVersion": "v1", + "kind": "ServiceAccount", + "metadata": map[string]interface{}{ + "name": "default", + "namespace": "test-ns", + }, + }, + want: false, + }, + { + name: "service-account-token secret should NOT propagate", + obj: map[string]interface{}{ + "apiVersion": "v1", + "kind": "Secret", + "metadata": map[string]interface{}{ + "name": "sa-token", + "namespace": "test", + }, + "type": string(corev1.SecretTypeServiceAccountToken), + }, + want: false, + }, + { + name: "endpointslice with managed-by label should NOT propagate", + obj: map[string]interface{}{ + "apiVersion": "discovery.k8s.io/v1", + "kind": "EndpointSlice", + "metadata": map[string]interface{}{ + "name": "test-endpointslice", + "labels": map[string]interface{}{ + "endpointslice.kubernetes.io/managed-by": "endpointslice-controller", + }, + }, + }, + want: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + uObj := &unstructured.Unstructured{Object: tt.obj} + if tt.ownerReferences != nil { + uObj.SetOwnerReferences(tt.ownerReferences) + } + + got, err := ShouldPropagateObj(nil, uObj, tt.enableWorkload) + if err != nil { + t.Errorf("ShouldPropagateObj() error = %v", err) + return + } + if got != tt.want { + t.Errorf("ShouldPropagateObj() = %v, want %v", got, tt.want) + } + }) + } +} From 46157d834ae9b07c4ee4cec5eb5a748ac33e1d57 Mon Sep 17 00:00:00 2001 From: Wei Weng Date: Wed, 4 Feb 2026 17:16:41 -0500 Subject: [PATCH 07/10] chore: update go version to 1.24.12 for CVE (#428) bump go to 1.24.13 Signed-off-by: Wei Weng Co-authored-by: Wei Weng --- .github/workflows/ci.yml | 2 +- .github/workflows/code-lint.yml | 2 +- .github/workflows/release.yml | 2 +- .github/workflows/trivy.yml | 2 +- .github/workflows/upgrade.yml | 2 +- .golangci.yml | 2 +- docker/hub-agent.Dockerfile | 2 +- docker/member-agent.Dockerfile | 2 +- docker/refresh-token.Dockerfile | 2 +- go.mod | 2 +- 10 files changed, 10 insertions(+), 10 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 9b44dbc85..4738975f3 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -13,7 +13,7 @@ on: paths-ignore: [docs/**, "**.md", "**.mdx", "**.png", "**.jpg"] env: - GO_VERSION: '1.24.9' + GO_VERSION: '1.24.12' CERT_MANAGER_VERSION: 'v1.16.2' jobs: diff --git a/.github/workflows/code-lint.yml b/.github/workflows/code-lint.yml index 6b01223e2..65908017b 100644 --- a/.github/workflows/code-lint.yml +++ b/.github/workflows/code-lint.yml @@ -14,7 +14,7 @@ on: env: # Common versions - GO_VERSION: '1.24.9' + GO_VERSION: '1.24.12' jobs: diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 5d9503594..1dda21f26 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -20,7 +20,7 @@ env: HUB_AGENT_IMAGE_NAME: hub-agent MEMBER_AGENT_IMAGE_NAME: member-agent REFRESH_TOKEN_IMAGE_NAME: refresh-token - GO_VERSION: '1.24.9' + GO_VERSION: '1.24.12' jobs: export-registry: diff --git a/.github/workflows/trivy.yml b/.github/workflows/trivy.yml index 642663457..3cf096830 100644 --- a/.github/workflows/trivy.yml +++ b/.github/workflows/trivy.yml @@ -18,7 +18,7 @@ env: MEMBER_AGENT_IMAGE_NAME: member-agent REFRESH_TOKEN_IMAGE_NAME: refresh-token - GO_VERSION: '1.24.9' + GO_VERSION: '1.24.12' jobs: export-registry: diff --git a/.github/workflows/upgrade.yml b/.github/workflows/upgrade.yml index 4d7a87432..318a531aa 100644 --- a/.github/workflows/upgrade.yml +++ b/.github/workflows/upgrade.yml @@ -17,7 +17,7 @@ on: paths-ignore: [docs/**, "**.md", "**.mdx", "**.png", "**.jpg"] env: - GO_VERSION: '1.24.9' + GO_VERSION: '1.24.12' jobs: detect-noop: diff --git a/.golangci.yml b/.golangci.yml index a1b9bbc3a..d8c09541b 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,6 +1,6 @@ run: timeout: 15m - go: '1.24.9' + go: '1.24.12' linters-settings: stylecheck: diff --git a/docker/hub-agent.Dockerfile b/docker/hub-agent.Dockerfile index 4925c2356..faa7dfc6e 100644 --- a/docker/hub-agent.Dockerfile +++ b/docker/hub-agent.Dockerfile @@ -1,5 +1,5 @@ # Build the hubagent binary -FROM mcr.microsoft.com/oss/go/microsoft/golang:1.24.9 AS builder +FROM mcr.microsoft.com/oss/go/microsoft/golang:1.24.12 AS builder ARG GOOS=linux ARG GOARCH=amd64 diff --git a/docker/member-agent.Dockerfile b/docker/member-agent.Dockerfile index 06cb20fb0..43075a67b 100644 --- a/docker/member-agent.Dockerfile +++ b/docker/member-agent.Dockerfile @@ -1,5 +1,5 @@ # Build the memberagent binary -FROM mcr.microsoft.com/oss/go/microsoft/golang:1.24.9 AS builder +FROM mcr.microsoft.com/oss/go/microsoft/golang:1.24.12 AS builder ARG GOOS=linux ARG GOARCH=amd64 diff --git a/docker/refresh-token.Dockerfile b/docker/refresh-token.Dockerfile index 1c876a325..b79d3a389 100644 --- a/docker/refresh-token.Dockerfile +++ b/docker/refresh-token.Dockerfile @@ -1,5 +1,5 @@ # Build the refreshtoken binary -FROM mcr.microsoft.com/oss/go/microsoft/golang:1.24.9 AS builder +FROM mcr.microsoft.com/oss/go/microsoft/golang:1.24.12 AS builder ARG GOOS="linux" ARG GOARCH="amd64" diff --git a/go.mod b/go.mod index 704b41915..543ca7c84 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/kubefleet-dev/kubefleet -go 1.24.9 +go 1.24.12 require ( github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0 From 90b140f1aa61d4f8e30a415dd2d779598e424288 Mon Sep 17 00:00:00 2001 From: Britania Rodriguez Reyes <145056127+britaniar@users.noreply.github.com> Date: Thu, 5 Feb 2026 11:56:34 -0800 Subject: [PATCH 08/10] feat: update placement controller to use ResourceSelectorResolver (#426) --- cmd/hubagent/workload/setup.go | 13 +- pkg/controllers/placement/controller.go | 22 +- .../placement/resource_selector.go | 532 ----- .../placement/resource_selector_test.go | 1965 ----------------- pkg/controllers/placement/suite_test.go | 13 +- 5 files changed, 20 insertions(+), 2525 deletions(-) delete mode 100644 pkg/controllers/placement/resource_selector.go delete mode 100644 pkg/controllers/placement/resource_selector_test.go diff --git a/cmd/hubagent/workload/setup.go b/cmd/hubagent/workload/setup.go index 56f259b8d..00064faf6 100644 --- a/cmd/hubagent/workload/setup.go +++ b/cmd/hubagent/workload/setup.go @@ -157,18 +157,21 @@ func SetupControllers(ctx context.Context, wg *sync.WaitGroup, mgr ctrl.Manager, validator.RestMapper = mgr.GetRESTMapper() // webhook needs this to validate GVK of resource selector // Set up a custom controller to reconcile placement objects + resourceSelectorResolver := controller.ResourceSelectorResolver{ + RestMapper: mgr.GetRESTMapper(), + InformerManager: dynamicInformerManager, + ResourceConfig: resourceConfig, + SkippedNamespaces: skippedNamespaces, + EnableWorkload: opts.EnableWorkload, + } pc := &placement.Reconciler{ Client: mgr.GetClient(), Recorder: mgr.GetEventRecorderFor(placementControllerName), - RestMapper: mgr.GetRESTMapper(), - InformerManager: dynamicInformerManager, - ResourceConfig: resourceConfig, - SkippedNamespaces: skippedNamespaces, Scheme: mgr.GetScheme(), UncachedReader: mgr.GetAPIReader(), + ResourceSelectorResolver: resourceSelectorResolver, ResourceSnapshotCreationMinimumInterval: opts.ResourceSnapshotCreationMinimumInterval, ResourceChangesCollectionDuration: opts.ResourceChangesCollectionDuration, - EnableWorkload: opts.EnableWorkload, } rateLimiter := options.DefaultControllerRateLimiter(opts.RateLimiterOpts) diff --git a/pkg/controllers/placement/controller.go b/pkg/controllers/placement/controller.go index f366e689d..bc53ee8dd 100644 --- a/pkg/controllers/placement/controller.go +++ b/pkg/controllers/placement/controller.go @@ -42,12 +42,10 @@ import ( fleetv1beta1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" hubmetrics "github.com/kubefleet-dev/kubefleet/pkg/metrics/hub" "github.com/kubefleet-dev/kubefleet/pkg/scheduler/queue" - "github.com/kubefleet-dev/kubefleet/pkg/utils" "github.com/kubefleet-dev/kubefleet/pkg/utils/annotations" "github.com/kubefleet-dev/kubefleet/pkg/utils/condition" "github.com/kubefleet-dev/kubefleet/pkg/utils/controller" "github.com/kubefleet-dev/kubefleet/pkg/utils/defaulter" - "github.com/kubefleet-dev/kubefleet/pkg/utils/informer" "github.com/kubefleet-dev/kubefleet/pkg/utils/labels" "github.com/kubefleet-dev/kubefleet/pkg/utils/resource" fleettime "github.com/kubefleet-dev/kubefleet/pkg/utils/time" @@ -64,12 +62,6 @@ const controllerResyncPeriod = 30 * time.Minute // Reconciler reconciles a cluster resource placement object type Reconciler struct { - // the informer contains the cache for all the resources we need. - InformerManager informer.Manager - - // RestMapper is used to convert between gvk and gvr on known resources. - RestMapper meta.RESTMapper - // Client is used to update objects which goes to the api server directly. Client client.Client @@ -78,25 +70,19 @@ type Reconciler struct { // It's only needed by v1beta1 APIs. UncachedReader client.Reader - // ResourceConfig contains all the API resources that we won't select based on allowed or skipped propagating APIs option. - ResourceConfig *utils.ResourceConfig - - // SkippedNamespaces contains the namespaces that we should not propagate. - SkippedNamespaces map[string]bool - Recorder record.EventRecorder Scheme *runtime.Scheme + // ResourceSelectorResolver + ResourceSelectorResolver controller.ResourceSelectorResolver + // ResourceSnapshotCreationMinimumInterval is the minimum interval to create a new resourcesnapshot // to avoid too frequent updates. ResourceSnapshotCreationMinimumInterval time.Duration // ResourceChangesCollectionDuration is the duration for collecting resource changes into one snapshot. ResourceChangesCollectionDuration time.Duration - - // EnableWorkload indicates whether workloads are allowed to run on the hub cluster. - EnableWorkload bool } func (r *Reconciler) Reconcile(ctx context.Context, key controller.QueueKey) (ctrl.Result, error) { @@ -198,7 +184,7 @@ func (r *Reconciler) handleUpdate(ctx context.Context, placementObj fleetv1beta1 } // validate the resource selectors first before creating any snapshot - envelopeObjCount, selectedResources, selectedResourceIDs, err := r.selectResourcesForPlacement(placementObj) + envelopeObjCount, selectedResources, selectedResourceIDs, err := r.ResourceSelectorResolver.SelectResourcesForPlacement(placementObj) if err != nil { klog.ErrorS(err, "Failed to select the resources", "placement", placementKObj) if !errors.Is(err, controller.ErrUserError) { diff --git a/pkg/controllers/placement/resource_selector.go b/pkg/controllers/placement/resource_selector.go deleted file mode 100644 index 530e4ed6d..000000000 --- a/pkg/controllers/placement/resource_selector.go +++ /dev/null @@ -1,532 +0,0 @@ -/* -Copyright 2025 The KubeFleet Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package placement - -import ( - "fmt" - "sort" - "strings" - - batchv1 "k8s.io/api/batch/v1" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" - "k8s.io/klog/v2" - "k8s.io/kubectl/pkg/util/deployment" - "sigs.k8s.io/controller-runtime/pkg/client" - - fleetv1beta1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" - "github.com/kubefleet-dev/kubefleet/pkg/utils" - "github.com/kubefleet-dev/kubefleet/pkg/utils/controller" -) - -var ( - // resourceSortOrder is the order in which resources are sorted when KubeFleet - // organizes the resources in a resource snapshot. - // - // Note (chenyu1): the sort order here does not affect the order in which resources - // are applied on a selected member cluster (the work applier will handle the resources - // in batch with its own grouping logic). KubeFleet sorts resources here solely - // for consistency (deterministic processing) reasons (i.e., if the set of the - // resources remain the same, no new snapshots are generated). - // - // Important (chenyu1): changing the sort order here may induce side effects in - // existing KubeFleet deployments, as a new snapshot might be prepared and rolled out. - // Do not update the sort order unless absolutely necessary. - resourceSortOrder = map[string]int{ - "PriorityClass": 0, - "Namespace": 1, - "NetworkPolicy": 2, - "ResourceQuota": 3, - "LimitRange": 4, - "PodDisruptionBudget": 5, - "ServiceAccount": 6, - "Secret": 7, - "ConfigMap": 8, - "StorageClass": 9, - "PersistentVolume": 10, - "PersistentVolumeClaim": 11, - "CustomResourceDefinition": 12, - "ClusterRole": 13, - "ClusterRoleBinding": 14, - "Role": 15, - "RoleBinding": 16, - "Service": 17, - "DaemonSet": 18, - "Pod": 19, - "ReplicationController": 20, - "ReplicaSet": 21, - "Deployment": 22, - "HorizontalPodAutoscaler": 23, - "StatefulSet": 24, - "Job": 25, - "CronJob": 26, - "IngressClass": 27, - "Ingress": 28, - "APIService": 29, - "MutatingWebhookConfiguration": 30, - "ValidatingWebhookConfiguration": 31, - } -) - -// gatherSelectedResource gets all the resources according to the resource selector. -func (r *Reconciler) gatherSelectedResource(placementKey types.NamespacedName, selectors []fleetv1beta1.ResourceSelectorTerm) ([]*unstructured.Unstructured, error) { - var resources []*unstructured.Unstructured - var resourceMap = make(map[fleetv1beta1.ResourceIdentifier]bool) - for _, selector := range selectors { - gvk := schema.GroupVersionKind{ - Group: selector.Group, - Version: selector.Version, - Kind: selector.Kind, - } - - if r.ResourceConfig.IsResourceDisabled(gvk) { - klog.V(2).InfoS("Skip select resource", "group version kind", gvk.String()) - continue - } - var objs []runtime.Object - var err error - if gvk == utils.NamespaceGVK && placementKey.Namespace == "" && selector.SelectionScope != fleetv1beta1.NamespaceOnly { - objs, err = r.fetchNamespaceResources(selector, placementKey.Name) - } else { - objs, err = r.fetchResources(selector, placementKey) - } - if err != nil { - return nil, err - } - for _, obj := range objs { - uObj := obj.(*unstructured.Unstructured) - ri := fleetv1beta1.ResourceIdentifier{ - Group: obj.GetObjectKind().GroupVersionKind().Group, - Version: obj.GetObjectKind().GroupVersionKind().Version, - Kind: obj.GetObjectKind().GroupVersionKind().Kind, - Name: uObj.GetName(), - Namespace: uObj.GetNamespace(), - } - if _, exist := resourceMap[ri]; exist { - err = fmt.Errorf("found duplicate resource %+v", ri) - klog.ErrorS(err, "User selected one resource more than once", "resource", ri, "placement", placementKey) - return nil, controller.NewUserError(err) - } - resourceMap[ri] = true - resources = append(resources, uObj) - } - } - // sort the resources in strict order so that we will get the stable list of manifest so that - // the generated work object doesn't change between reconcile loops. - sortResources(resources) - - return resources, nil -} - -func sortResources(resources []*unstructured.Unstructured) { - sort.Slice(resources, func(i, j int) bool { - obj1 := resources[i] - obj2 := resources[j] - k1 := obj1.GetObjectKind().GroupVersionKind().Kind - k2 := obj2.GetObjectKind().GroupVersionKind().Kind - - first, aok := resourceSortOrder[k1] - second, bok := resourceSortOrder[k2] - switch { - // if both kinds are unknown. - case !aok && !bok: - return lessByGVK(obj1, obj2, false) - // unknown kind should be last. - case !aok: - return false - case !bok: - return true - // same kind. - case first == second: - return lessByGVK(obj1, obj2, true) - } - // different known kinds, sort based on order index. - return first < second - }) -} - -func lessByGVK(obj1, obj2 *unstructured.Unstructured, ignoreKind bool) bool { - var gvk1, gvk2 string - if ignoreKind { - gvk1 = obj1.GetObjectKind().GroupVersionKind().GroupVersion().String() - gvk2 = obj2.GetObjectKind().GroupVersionKind().GroupVersion().String() - } else { - gvk1 = obj1.GetObjectKind().GroupVersionKind().String() - gvk2 = obj2.GetObjectKind().GroupVersionKind().String() - } - comp := strings.Compare(gvk1, gvk2) - if comp == 0 { - return strings.Compare(fmt.Sprintf("%s/%s", obj1.GetNamespace(), obj1.GetName()), - fmt.Sprintf("%s/%s", obj2.GetNamespace(), obj2.GetName())) < 0 - } - return comp < 0 -} - -// fetchResources retrieves the objects based on the selector. -func (r *Reconciler) fetchResources(selector fleetv1beta1.ResourceSelectorTerm, placementKey types.NamespacedName) ([]runtime.Object, error) { - klog.V(2).InfoS("Start to fetch resources by the selector", "selector", selector, "placement", placementKey) - gk := schema.GroupKind{ - Group: selector.Group, - Kind: selector.Kind, - } - restMapping, err := r.RestMapper.RESTMapping(gk, selector.Version) - if err != nil { - return nil, controller.NewUserError(fmt.Errorf("invalid placement %s, failed to get GVR of the selector: %w", placementKey, err)) - } - gvr := restMapping.Resource - gvk := schema.GroupVersionKind{ - Group: selector.Group, - Version: selector.Version, - Kind: selector.Kind, - } - - isNamespacedResource := !r.InformerManager.IsClusterScopedResources(gvk) - if isNamespacedResource && placementKey.Namespace == "" { - // If it's a namespace-scoped resource but placement has no namespace, return error. - err := fmt.Errorf("invalid placement %s: cannot select namespace-scoped resource %v in a clusterResourcePlacement", placementKey, gvr) - klog.ErrorS(err, "Invalid resource selector", "selector", selector) - return nil, controller.NewUserError(err) - } else if !isNamespacedResource && placementKey.Namespace != "" { - // If it's a cluster-scoped resource but placement has a namespace, return error. - err := fmt.Errorf("invalid placement %s: cannot select cluster-scoped resource %v in a resourcePlacement", placementKey, gvr) - klog.ErrorS(err, "Invalid resource selector", "selector", selector) - return nil, controller.NewUserError(err) - } - - if !r.InformerManager.IsInformerSynced(gvr) { - err := fmt.Errorf("informer cache for %+v is not synced yet", restMapping.Resource) - klog.ErrorS(err, "Informer cache is not synced", "gvr", gvr, "placement", placementKey) - return nil, controller.NewExpectedBehaviorError(err) - } - - lister := r.InformerManager.Lister(gvr) - - // TODO: validator should enforce the mutual exclusiveness between the `name` and `labelSelector` fields - if len(selector.Name) != 0 { - var obj runtime.Object - var err error - - if isNamespacedResource { - obj, err = lister.ByNamespace(placementKey.Namespace).Get(selector.Name) - } else { - obj, err = lister.Get(selector.Name) - } - - if err != nil { - klog.ErrorS(err, "Cannot get the resource", "gvr", gvr, "name", selector.Name, "namespace", placementKey.Namespace) - return nil, controller.NewAPIServerError(true, client.IgnoreNotFound(err)) - } - - shouldInclude, err := r.shouldPropagateObj(placementKey.Namespace, placementKey.Name, obj) - if err != nil { - return nil, err - } - if shouldInclude { - return []runtime.Object{obj}, nil - } - return []runtime.Object{}, nil - } - - var labelSelector labels.Selector - if selector.LabelSelector == nil { - labelSelector = labels.Everything() - } else { - // TODO: validator should enforce the validity of the labelSelector - labelSelector, err = metav1.LabelSelectorAsSelector(selector.LabelSelector) - if err != nil { - return nil, controller.NewUnexpectedBehaviorError(fmt.Errorf("cannot convert the label selector to a selector: %w", err)) - } - } - - var selectedObjs []runtime.Object - var objects []runtime.Object - - if isNamespacedResource { - objects, err = lister.ByNamespace(placementKey.Namespace).List(labelSelector) - } else { - objects, err = lister.List(labelSelector) - } - if err != nil { - klog.ErrorS(err, "Cannot list all the objects", "gvr", gvr, "labelSelector", labelSelector, "placement", placementKey) - return nil, controller.NewAPIServerError(true, err) - } - - // go ahead and claim all objects by adding a finalizer and insert the placement in its annotation - for i := 0; i < len(objects); i++ { - shouldInclude, err := r.shouldPropagateObj(placementKey.Namespace, placementKey.Name, objects[i]) - if err != nil { - return nil, err - } - if shouldInclude { - selectedObjs = append(selectedObjs, objects[i]) - } - } - - return selectedObjs, nil -} - -func (r *Reconciler) shouldPropagateObj(namespace, placementName string, obj runtime.Object) (bool, error) { - uObj := obj.DeepCopyObject().(*unstructured.Unstructured) - uObjKObj := klog.KObj(uObj) - if uObj.GetDeletionTimestamp() != nil { - // skip a to be deleted resource - klog.V(2).InfoS("Skip the deleting resource by the selector", "namespace", namespace, "placement", placementName, "object", uObjKObj) - return false, nil - } - - shouldInclude, err := controller.ShouldPropagateObj(r.InformerManager, uObj, r.EnableWorkload) - if err != nil { - klog.ErrorS(err, "Cannot determine if we should propagate an object", "namespace", namespace, "placement", placementName, "object", uObjKObj) - return false, err - } - if !shouldInclude { - klog.V(2).InfoS("Skip the resource by the selector which is forbidden", "namespace", namespace, "placement", placementName, "object", uObjKObj) - return false, nil - } - return true, nil -} - -// fetchNamespaceResources retrieves all the objects for a ResourceSelectorTerm that is for namespace. -func (r *Reconciler) fetchNamespaceResources(selector fleetv1beta1.ResourceSelectorTerm, placementName string) ([]runtime.Object, error) { - klog.V(2).InfoS("start to fetch the namespace resources by the selector", "selector", selector) - var resources []runtime.Object - - if len(selector.Name) != 0 { - // just a single namespace - objs, err := r.fetchAllResourcesInOneNamespace(selector.Name, placementName) - if err != nil { - klog.ErrorS(err, "failed to fetch all the selected resource in a namespace", "namespace", selector.Name) - return nil, err - } - return objs, err - } - - // go through each namespace - var labelSelector labels.Selector - var err error - if selector.LabelSelector == nil { - labelSelector = labels.Everything() - } else { - labelSelector, err = metav1.LabelSelectorAsSelector(selector.LabelSelector) - if err != nil { - return nil, controller.NewUnexpectedBehaviorError(fmt.Errorf("cannot convert the label selector to a selector: %w", err)) - } - } - namespaces, err := r.InformerManager.Lister(utils.NamespaceGVR).List(labelSelector) - if err != nil { - klog.ErrorS(err, "Cannot list all the namespaces by the label selector", "labelSelector", labelSelector, "placement", placementName) - return nil, controller.NewAPIServerError(true, err) - } - - for _, namespace := range namespaces { - ns, err := meta.Accessor(namespace) - if err != nil { - return nil, controller.NewUnexpectedBehaviorError(fmt.Errorf("cannot get the name of a namespace object: %w", err)) - } - objs, err := r.fetchAllResourcesInOneNamespace(ns.GetName(), placementName) - if err != nil { - klog.ErrorS(err, "failed to fetch all the selected resource in a namespace", "namespace", ns.GetName()) - return nil, err - } - resources = append(resources, objs...) - } - return resources, nil -} - -// fetchAllResourcesInOneNamespace retrieves all the objects inside a single namespace which includes the namespace itself. -func (r *Reconciler) fetchAllResourcesInOneNamespace(namespaceName string, placeName string) ([]runtime.Object, error) { - var resources []runtime.Object - - if !utils.ShouldPropagateNamespace(namespaceName, r.SkippedNamespaces) { - err := fmt.Errorf("invalid clusterRresourcePlacement %s: namespace %s is not allowed to propagate", placeName, namespaceName) - return nil, controller.NewUserError(err) - } - - klog.V(2).InfoS("start to fetch all the resources inside a namespace", "namespace", namespaceName) - // select the namespace object itself - obj, err := r.InformerManager.Lister(utils.NamespaceGVR).Get(namespaceName) - if err != nil { - klog.ErrorS(err, "cannot get the namespace", "namespace", namespaceName) - return nil, controller.NewAPIServerError(true, client.IgnoreNotFound(err)) - } - nameSpaceObj := obj.DeepCopyObject().(*unstructured.Unstructured) - if nameSpaceObj.GetDeletionTimestamp() != nil { - // skip a to be deleted namespace - klog.V(2).InfoS("skip the deleting namespace resources by the selector", - "placeName", placeName, "namespace", namespaceName) - return resources, nil - } - resources = append(resources, obj) - - trackedResource := r.InformerManager.GetNameSpaceScopedResources() - for _, gvr := range trackedResource { - if !utils.ShouldProcessResource(gvr, r.RestMapper, r.ResourceConfig) { - continue - } - if !r.InformerManager.IsInformerSynced(gvr) { - return nil, controller.NewExpectedBehaviorError(fmt.Errorf("informer cache for %+v is not synced yet", gvr)) - } - lister := r.InformerManager.Lister(gvr) - objs, err := lister.ByNamespace(namespaceName).List(labels.Everything()) - if err != nil { - klog.ErrorS(err, "Cannot list all the objects in namespace", "gvr", gvr, "namespace", namespaceName) - return nil, controller.NewAPIServerError(true, err) - } - for _, obj := range objs { - shouldInclude, err := r.shouldPropagateObj(namespaceName, placeName, obj) - if err != nil { - return nil, err - } - if shouldInclude { - resources = append(resources, obj) - } - } - } - - return resources, nil -} - -// generateRawContent strips all the unnecessary fields to prepare the objects for dispatch. -func generateRawContent(object *unstructured.Unstructured) ([]byte, error) { - // Make a deep copy of the object as we are modifying it. - object = object.DeepCopy() - // we keep the annotation/label/finalizer/owner references/delete grace period - object.SetResourceVersion("") - object.SetGeneration(0) - object.SetUID("") - object.SetSelfLink("") - object.SetDeletionTimestamp(nil) - object.SetManagedFields(nil) - - annots := object.GetAnnotations() - if annots != nil { - // Remove kubectl last applied annotation if exist - delete(annots, corev1.LastAppliedConfigAnnotation) - // Remove the revision annotation set by deployment controller. - delete(annots, deployment.RevisionAnnotation) - if len(annots) == 0 { - object.SetAnnotations(nil) - } else { - object.SetAnnotations(annots) - } - } - // Remove all the owner references as the UID in the owner reference can't be transferred to - // the member clusters - // TODO: Establish a way to keep the ownership relation through work-api - object.SetOwnerReferences(nil) - unstructured.RemoveNestedField(object.Object, "metadata", "creationTimestamp") - unstructured.RemoveNestedField(object.Object, "status") - - // TODO: see if there are other cases that we may have some extra fields - if object.GetKind() == "Service" && object.GetAPIVersion() == "v1" { - if clusterIP, exist, _ := unstructured.NestedString(object.Object, "spec", "clusterIP"); exist && clusterIP != corev1.ClusterIPNone { - unstructured.RemoveNestedField(object.Object, "spec", "clusterIP") - unstructured.RemoveNestedField(object.Object, "spec", "clusterIPs") - } - // We should remove all node ports that are assigned by hubcluster if any. - unstructured.RemoveNestedField(object.Object, "spec", "healthCheckNodePort") - - vals, found, err := unstructured.NestedFieldNoCopy(object.Object, "spec", "ports") - if found && err == nil { - if ports, ok := vals.([]interface{}); ok { - for i := range ports { - if each, ok := ports[i].(map[string]interface{}); ok { - delete(each, "nodePort") - } - } - } - } - if err != nil { - return nil, fmt.Errorf("failed to get the ports field in Service object, name =%s: %w", object.GetName(), err) - } - } else if object.GetKind() == "Job" && object.GetAPIVersion() == batchv1.SchemeGroupVersion.String() { - if manualSelector, exist, _ := unstructured.NestedBool(object.Object, "spec", "manualSelector"); !exist || !manualSelector { - // remove the selector field and labels added by the api-server if the job is not created with manual selector - // whose value conflict with the ones created by the member cluster api server - // https://github.com/kubernetes/kubernetes/blob/d4fde1e92a83cb533ae63b3abe9d49f08efb7a2f/pkg/registry/batch/job/strategy.go#L219 - // k8s used to add an old label called "controller-uid" but use a new label called "batch.kubernetes.io/controller-uid" after 1.26 - unstructured.RemoveNestedField(object.Object, "spec", "selector", "matchLabels", "controller-uid") - unstructured.RemoveNestedField(object.Object, "spec", "selector", "matchLabels", "batch.kubernetes.io/controller-uid") - unstructured.RemoveNestedField(object.Object, "spec", "template", "metadata", "creationTimestamp") - unstructured.RemoveNestedField(object.Object, "spec", "template", "metadata", "labels", "controller-uid") - unstructured.RemoveNestedField(object.Object, "spec", "template", "metadata", "labels", "batch.kubernetes.io/controller-uid") - } - } - - rawContent, err := object.MarshalJSON() - if err != nil { - return nil, fmt.Errorf("failed to marshal the unstructured object gvk = %s, name =%s: %w", object.GroupVersionKind(), object.GetName(), err) - } - return rawContent, nil -} - -// generateResourceContent creates a resource content from the unstructured obj. -func generateResourceContent(object *unstructured.Unstructured) (*fleetv1beta1.ResourceContent, error) { - rawContent, err := generateRawContent(object) - if err != nil { - return nil, controller.NewUnexpectedBehaviorError(err) - } - return &fleetv1beta1.ResourceContent{ - RawExtension: runtime.RawExtension{Raw: rawContent}, - }, nil -} - -// selectResourcesForPlacement selects the resources according to the placement resourceSelectors. -// It also generates an array of resource content and resource identifier based on the selected resources. -// It also returns the number of envelope configmaps so the CRP controller can have the right expectation of the number of work objects. -func (r *Reconciler) selectResourcesForPlacement(placementObj fleetv1beta1.PlacementObj) (int, []fleetv1beta1.ResourceContent, []fleetv1beta1.ResourceIdentifier, error) { - envelopeObjCount := 0 - selectedObjects, err := r.gatherSelectedResource(types.NamespacedName{ - Name: placementObj.GetName(), - Namespace: placementObj.GetNamespace(), - }, placementObj.GetPlacementSpec().ResourceSelectors) - if err != nil { - return 0, nil, nil, err - } - - resources := make([]fleetv1beta1.ResourceContent, len(selectedObjects)) - resourcesIDs := make([]fleetv1beta1.ResourceIdentifier, len(selectedObjects)) - for i, unstructuredObj := range selectedObjects { - rc, err := generateResourceContent(unstructuredObj) - if err != nil { - return 0, nil, nil, err - } - uGVK := unstructuredObj.GetObjectKind().GroupVersionKind().GroupKind() - switch uGVK { - case utils.ClusterResourceEnvelopeGK: - envelopeObjCount++ - case utils.ResourceEnvelopeGK: - envelopeObjCount++ - } - resources[i] = *rc - ri := fleetv1beta1.ResourceIdentifier{ - Group: unstructuredObj.GroupVersionKind().Group, - Version: unstructuredObj.GroupVersionKind().Version, - Kind: unstructuredObj.GroupVersionKind().Kind, - Name: unstructuredObj.GetName(), - Namespace: unstructuredObj.GetNamespace(), - } - resourcesIDs[i] = ri - } - return envelopeObjCount, resources, resourcesIDs, nil -} diff --git a/pkg/controllers/placement/resource_selector_test.go b/pkg/controllers/placement/resource_selector_test.go deleted file mode 100644 index 29d62f426..000000000 --- a/pkg/controllers/placement/resource_selector_test.go +++ /dev/null @@ -1,1965 +0,0 @@ -/* -Copyright 2025 The KubeFleet Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package placement - -import ( - "errors" - "math/rand" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - corev1 "k8s.io/api/core/v1" - apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" - utilrand "k8s.io/apimachinery/pkg/util/rand" - "k8s.io/kubectl/pkg/util/deployment" - "k8s.io/utils/ptr" - - fleetv1beta1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" - "github.com/kubefleet-dev/kubefleet/pkg/utils" - "github.com/kubefleet-dev/kubefleet/pkg/utils/controller" - testinformer "github.com/kubefleet-dev/kubefleet/test/utils/informer" -) - -func makeIPFamilyPolicyTypePointer(policyType corev1.IPFamilyPolicyType) *corev1.IPFamilyPolicyType { - return &policyType -} -func makeServiceInternalTrafficPolicyPointer(policyType corev1.ServiceInternalTrafficPolicyType) *corev1.ServiceInternalTrafficPolicyType { - return &policyType -} - -func TestGenerateResourceContent(t *testing.T) { - tests := map[string]struct { - resource interface{} - wantResource interface{} - }{ - "should generate sanitized resource content for Kind: CustomResourceDefinition": { - resource: apiextensionsv1.CustomResourceDefinition{ - TypeMeta: metav1.TypeMeta{ - Kind: "CustomResourceDefinition", - APIVersion: "apiextensions.k8s.io/v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "object-name", - GenerateName: "object-generateName", - Namespace: "object-namespace", - SelfLink: "object-selflink", - UID: types.UID(utilrand.String(10)), - ResourceVersion: utilrand.String(10), - Generation: int64(utilrand.Int()), - CreationTimestamp: metav1.Time{Time: time.Date(utilrand.IntnRange(0, 999), time.January, 1, 1, 1, 1, 1, time.UTC)}, - DeletionTimestamp: &metav1.Time{Time: time.Date(utilrand.IntnRange(1000, 1999), time.January, 1, 1, 1, 1, 1, time.UTC)}, - DeletionGracePeriodSeconds: ptr.To(int64(9999)), - Labels: map[string]string{ - "label-key": "label-value", - }, - Annotations: map[string]string{ - corev1.LastAppliedConfigAnnotation: "svc-object-annotation-lac-value", - deployment.RevisionAnnotation: "svc-object-revision-annotation-value", - "svc-annotation-key": "svc-object-annotation-key-value", - }, - OwnerReferences: []metav1.OwnerReference{ - { - APIVersion: "svc-ownerRef-api/v1", - Kind: "svc-owner-kind", - Name: "svc-owner-name", - UID: "svc-owner-uid", - }, - }, - Finalizers: []string{"object-finalizer"}, - ManagedFields: []metav1.ManagedFieldsEntry{ - { - Manager: utilrand.String(10), - Operation: metav1.ManagedFieldsOperationApply, - APIVersion: utilrand.String(10), - }, - }, - }, - }, - wantResource: apiextensionsv1.CustomResourceDefinition{ - TypeMeta: metav1.TypeMeta{ - Kind: "CustomResourceDefinition", - APIVersion: "apiextensions.k8s.io/v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "object-name", - GenerateName: "object-generateName", - Namespace: "object-namespace", - DeletionGracePeriodSeconds: ptr.To(int64(9999)), - Labels: map[string]string{ - "label-key": "label-value", - }, - Annotations: map[string]string{ - "svc-annotation-key": "svc-object-annotation-key-value", - }, - Finalizers: []string{"object-finalizer"}, - }, - }, - }, - "should generate sanitized resource content for Kind: Service": { - resource: corev1.Service{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "v1", - Kind: "Service", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "svc-name", - Namespace: "svc-namespace", - SelfLink: utilrand.String(10), - DeletionTimestamp: &metav1.Time{Time: time.Date(00002, time.January, 1, 1, 1, 1, 1, time.UTC)}, - ManagedFields: []metav1.ManagedFieldsEntry{ - { - Manager: "svc-manager", - Operation: metav1.ManagedFieldsOperationApply, - APIVersion: "svc-manager-api/v1", - }, - }, - OwnerReferences: []metav1.OwnerReference{ - { - APIVersion: "svc-ownerRef-api/v1", - Kind: "svc-owner-kind", - Name: "svc-owner-name", - UID: "svc-owner-uid", - }, - }, - Annotations: map[string]string{ - corev1.LastAppliedConfigAnnotation: "svc-object-annotation-lac-value", - "svc-annotation-key": "svc-object-annotation-key-value", - }, - ResourceVersion: "svc-object-resourceVersion", - Generation: int64(utilrand.Int()), - CreationTimestamp: metav1.Time{Time: time.Date(00001, time.January, 1, 1, 1, 1, 1, time.UTC)}, - UID: types.UID(utilrand.String(10)), - }, - Spec: corev1.ServiceSpec{ - ClusterIP: utilrand.String(10), - ClusterIPs: []string{}, - HealthCheckNodePort: rand.Int31(), - Selector: map[string]string{"svc-spec-selector-key": "svc-spec-selector-value"}, - Ports: []corev1.ServicePort{ - { - Name: "svc-port", - Protocol: corev1.ProtocolTCP, - AppProtocol: ptr.To("svc.com/my-custom-protocol"), - Port: 9001, - NodePort: rand.Int31(), - }, - }, - Type: corev1.ServiceType("svc-spec-type"), - ExternalIPs: []string{"svc-spec-externalIps-1"}, - SessionAffinity: corev1.ServiceAffinity("svc-spec-sessionAffinity"), - LoadBalancerIP: "192.168.1.3", - LoadBalancerSourceRanges: []string{"192.168.1.1"}, - ExternalName: "svc-spec-externalName", - ExternalTrafficPolicy: corev1.ServiceExternalTrafficPolicyType("svc-spec-externalTrafficPolicy"), - PublishNotReadyAddresses: false, - SessionAffinityConfig: &corev1.SessionAffinityConfig{ClientIP: &corev1.ClientIPConfig{TimeoutSeconds: ptr.To(int32(60))}}, - IPFamilies: []corev1.IPFamily{ - corev1.IPv4Protocol, - corev1.IPv6Protocol, - }, - IPFamilyPolicy: makeIPFamilyPolicyTypePointer(corev1.IPFamilyPolicySingleStack), - AllocateLoadBalancerNodePorts: ptr.To(false), - LoadBalancerClass: ptr.To("svc-spec-loadBalancerClass"), - InternalTrafficPolicy: makeServiceInternalTrafficPolicyPointer(corev1.ServiceInternalTrafficPolicyCluster), - }, - Status: corev1.ServiceStatus{ - LoadBalancer: corev1.LoadBalancerStatus{ - Ingress: []corev1.LoadBalancerIngress{ - { - IP: "192.168.1.1", - Hostname: "loadbalancer-ingress-hostname", - Ports: []corev1.PortStatus{ - { - Port: 9003, - Protocol: corev1.ProtocolTCP, - }, - }, - }, - }, - }, - }, - }, - wantResource: corev1.Service{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "v1", - Kind: "Service", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "svc-name", - Namespace: "svc-namespace", - Annotations: map[string]string{ - "svc-annotation-key": "svc-object-annotation-key-value", - }, - }, - Spec: corev1.ServiceSpec{ - Selector: map[string]string{"svc-spec-selector-key": "svc-spec-selector-value"}, - Ports: []corev1.ServicePort{ - { - Name: "svc-port", - Protocol: corev1.ProtocolTCP, - AppProtocol: ptr.To("svc.com/my-custom-protocol"), - Port: 9001, - }, - }, - Type: corev1.ServiceType("svc-spec-type"), - ExternalIPs: []string{"svc-spec-externalIps-1"}, - SessionAffinity: corev1.ServiceAffinity("svc-spec-sessionAffinity"), - LoadBalancerIP: "192.168.1.3", - LoadBalancerSourceRanges: []string{"192.168.1.1"}, - ExternalName: "svc-spec-externalName", - ExternalTrafficPolicy: corev1.ServiceExternalTrafficPolicyType("svc-spec-externalTrafficPolicy"), - PublishNotReadyAddresses: false, - SessionAffinityConfig: &corev1.SessionAffinityConfig{ClientIP: &corev1.ClientIPConfig{TimeoutSeconds: ptr.To(int32(60))}}, - IPFamilies: []corev1.IPFamily{ - corev1.IPv4Protocol, - corev1.IPv6Protocol, - }, - IPFamilyPolicy: makeIPFamilyPolicyTypePointer(corev1.IPFamilyPolicySingleStack), - AllocateLoadBalancerNodePorts: ptr.To(false), - LoadBalancerClass: ptr.To("svc-spec-loadBalancerClass"), - InternalTrafficPolicy: makeServiceInternalTrafficPolicyPointer(corev1.ServiceInternalTrafficPolicyCluster), - }, - }, - }, - } - - for testName, tt := range tests { - t.Run(testName, func(t *testing.T) { - object, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&tt.resource) - if err != nil { - t.Fatalf("ToUnstructured failed: %v", err) - } - got, err := generateResourceContent(&unstructured.Unstructured{Object: object}) - if err != nil { - t.Fatalf("failed to generateResourceContent(): %v", err) - } - wantResourceContent := createResourceContentForTest(t, &tt.wantResource) - if diff := cmp.Diff(wantResourceContent, got); diff != "" { - t.Errorf("generateResourceContent() mismatch (-want, +got):\n%s", diff) - } - }) - } -} - -func createResourceContentForTest(t *testing.T, obj interface{}) *fleetv1beta1.ResourceContent { - want, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&obj) - if err != nil { - t.Fatalf("ToUnstructured failed: %v", err) - } - delete(want["metadata"].(map[string]interface{}), "creationTimestamp") - delete(want, "status") - - uWant := unstructured.Unstructured{Object: want} - rawWant, err := uWant.MarshalJSON() - if err != nil { - t.Fatalf("MarshalJSON failed: %v", err) - } - return &fleetv1beta1.ResourceContent{ - RawExtension: runtime.RawExtension{ - Raw: rawWant, - }, - } -} - -func TestGatherSelectedResource(t *testing.T) { - // Common test deployment object used across multiple test cases. - testDeployment := &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "apps/v1", - "kind": "Deployment", - "metadata": map[string]interface{}{ - "name": "test-deployment", - "namespace": "test-ns", - }, - }, - } - testDeployment.SetGroupVersionKind(utils.DeploymentGVK) - - // Common test configmap object used across multiple test cases. - testConfigMap := &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "v1", - "kind": "ConfigMap", - "metadata": map[string]interface{}{ - "name": "test-configmap", - "namespace": "test-ns", - }, - }, - } - testConfigMap.SetGroupVersionKind(utils.ConfigMapGVK) - - // Common test endpoints object used across multiple test cases. - testEndpoints := &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "v1", - "kind": "Endpoints", - "metadata": map[string]interface{}{ - "name": "test-endpoints", - "namespace": "test-ns", - }, - }, - } - testEndpoints.SetGroupVersionKind(schema.GroupVersionKind{ - Group: "", - Version: "v1", - Kind: "Endpoints", - }) - - kubeRootCAConfigMap := &unstructured.Unstructured{ // reserved configmap object - Object: map[string]interface{}{ - "apiVersion": "v1", - "kind": "ConfigMap", - "metadata": map[string]interface{}{ - "name": "kube-root-ca.crt", - "namespace": "test-ns", - }, - }, - } - kubeRootCAConfigMap.SetGroupVersionKind(utils.ConfigMapGVK) - - // Common test deployment object in deleting state. - testDeletingDeployment := &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "apps/v1", - "kind": "Deployment", - "metadata": map[string]interface{}{ - "name": "test-deleting-deployment", - "namespace": "test-ns", - "deletionTimestamp": "2025-01-01T00:00:00Z", - "labels": map[string]interface{}{ - "tier": "api", - "app": "frontend", - }, - }, - }, - } - testDeletingDeployment.SetGroupVersionKind(utils.DeploymentGVK) - - // Common test deployment with app=frontend label. - testFrontendDeployment := &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "apps/v1", - "kind": "Deployment", - "metadata": map[string]interface{}{ - "name": "frontend-deployment", - "namespace": "test-ns", - "labels": map[string]interface{}{ - "app": "frontend", - "tier": "web", - }, - }, - }, - } - testFrontendDeployment.SetGroupVersionKind(utils.DeploymentGVK) - - // Common test deployment with app=backend label. - testBackendDeployment := &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "apps/v1", - "kind": "Deployment", - "metadata": map[string]interface{}{ - "name": "backend-deployment", - "namespace": "test-ns", - "labels": map[string]interface{}{ - "app": "backend", - "tier": "api", - }, - }, - }, - } - testBackendDeployment.SetGroupVersionKind(utils.DeploymentGVK) - - // Common test namespace object (cluster-scoped). - testNamespace := &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "v1", - "kind": "Namespace", - "metadata": map[string]interface{}{ - "name": "test-ns", - "labels": map[string]interface{}{ - "environment": "test", - }, - }, - }, - } - testNamespace.SetGroupVersionKind(utils.NamespaceGVK) - - testDeletingNamespace := &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "v1", - "kind": "Namespace", - "metadata": map[string]interface{}{ - "name": "deleting-ns", - "labels": map[string]interface{}{ - "environment": "test", - }, - "deletionTimestamp": "2025-01-01T00:00:00Z", - }, - }, - } - testDeletingNamespace.SetGroupVersionKind(utils.NamespaceGVK) - - prodNamespace := &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "v1", - "kind": "Namespace", - "metadata": map[string]interface{}{ - "name": "prod-ns", - "labels": map[string]interface{}{ - "environment": "production", - }, - }, - }, - } - prodNamespace.SetGroupVersionKind(utils.NamespaceGVK) - - // Common test cluster role object (cluster-scoped). - testClusterRole := &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "rbac.authorization.k8s.io/v1", - "kind": "ClusterRole", - "metadata": map[string]interface{}{ - "name": "test-cluster-role", - }, - }, - } - testClusterRole.SetGroupVersionKind(utils.ClusterRoleGVK) - - // Common test cluster role object #2 (cluster-scoped). - testClusterRole2 := &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "rbac.authorization.k8s.io/v1", - "kind": "ClusterRole", - "metadata": map[string]interface{}{ - "name": "test-cluster-role-2", - }, - }, - } - testClusterRole2.SetGroupVersionKind(utils.ClusterRoleGVK) - - kubeSystemNamespace := &unstructured.Unstructured{ // reserved namespace object - Object: map[string]interface{}{ - "apiVersion": "v1", - "kind": "Namespace", - "metadata": map[string]interface{}{ - "name": "kube-system", - "labels": map[string]interface{}{ - "environment": "test", - }, - }, - }, - } - kubeSystemNamespace.SetGroupVersionKind(utils.NamespaceGVK) - - tests := []struct { - name string - placementName types.NamespacedName - selectors []fleetv1beta1.ResourceSelectorTerm - resourceConfig *utils.ResourceConfig - informerManager *testinformer.FakeManager - want []*unstructured.Unstructured - wantError error - }{ - { - name: "should handle empty selectors", - placementName: types.NamespacedName{Name: "test-placement"}, - selectors: []fleetv1beta1.ResourceSelectorTerm{}, - want: nil, - }, - { - name: "should skip disabled resources", - placementName: types.NamespacedName{Name: "test-placement"}, - selectors: []fleetv1beta1.ResourceSelectorTerm{ - { - Group: "apps", - Version: "v1", - Kind: "Deployment", - Name: "test-deployment", - }, - }, - resourceConfig: utils.NewResourceConfig(true), // make this allow list - nothing is allowed - want: nil, - }, - { - name: "should skip disabled resources for resource placement", - placementName: types.NamespacedName{Name: "test-placement", Namespace: "test-ns"}, - selectors: []fleetv1beta1.ResourceSelectorTerm{ - { - Group: "apps", - Version: "v1", - Kind: "Deployment", - Name: "test-deployment", - }, - }, - resourceConfig: utils.NewResourceConfig(true), // make this allow list - nothing is allowed - want: nil, - }, - { - name: "should return error for cluster-scoped resource", - placementName: types.NamespacedName{Name: "test-placement", Namespace: "test-ns"}, - selectors: []fleetv1beta1.ResourceSelectorTerm{ - { - Group: "rbac.authorization.k8s.io", - Version: "v1", - Kind: "ClusterRole", - Name: "test-clusterrole", - }, - }, - resourceConfig: utils.NewResourceConfig(false), // default deny list - informerManager: &testinformer.FakeManager{ - IsClusterScopedResource: false, - Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{}, - }, - want: nil, - wantError: controller.ErrUserError, - }, - { - name: "should handle single resource selection successfully", - placementName: types.NamespacedName{Name: "test-placement", Namespace: "test-ns"}, - selectors: []fleetv1beta1.ResourceSelectorTerm{ - { - Group: "apps", - Version: "v1", - Kind: "Deployment", - Name: "test-deployment", - }, - }, - resourceConfig: utils.NewResourceConfig(false), // default deny list - informerManager: func() *testinformer.FakeManager { - return &testinformer.FakeManager{ - IsClusterScopedResource: true, - Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ - utils.DeploymentGVR: {Objects: []runtime.Object{testDeployment}}, - }, - } - }(), - want: []*unstructured.Unstructured{testDeployment}, - wantError: nil, - }, - { - name: "should return empty result when informer manager returns not found error", - placementName: types.NamespacedName{Name: "test-placement", Namespace: "test-ns"}, - selectors: []fleetv1beta1.ResourceSelectorTerm{ - { - Group: "apps", - Version: "v1", - Kind: "Deployment", - Name: "test-deployment", - }, - }, - resourceConfig: utils.NewResourceConfig(false), // default deny list - informerManager: func() *testinformer.FakeManager { - return &testinformer.FakeManager{ - IsClusterScopedResource: true, - Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ - utils.DeploymentGVR: { - Objects: []runtime.Object{}, - Err: apierrors.NewNotFound(schema.GroupResource{Group: "apps", Resource: "deployments"}, "test-deployment"), - }, - }, - } - }(), - want: nil, // should return nil when informer returns not found error - }, - { - name: "should return error when informer manager returns non-NotFound error", - placementName: types.NamespacedName{Name: "test-placement", Namespace: "test-ns"}, - selectors: []fleetv1beta1.ResourceSelectorTerm{ - { - Group: "apps", - Version: "v1", - Kind: "Deployment", - Name: "test-deployment", - }, - }, - resourceConfig: utils.NewResourceConfig(false), // default deny list - informerManager: func() *testinformer.FakeManager { - return &testinformer.FakeManager{ - IsClusterScopedResource: true, - Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ - utils.DeploymentGVR: { - Objects: []runtime.Object{}, - Err: errors.New("connection timeout"), - }, - }, - } - }(), - wantError: controller.ErrUnexpectedBehavior, - }, - { - name: "should return error using label selector when informer manager returns error", - placementName: types.NamespacedName{Name: "test-placement", Namespace: "test-ns"}, - selectors: []fleetv1beta1.ResourceSelectorTerm{ - { - Group: "apps", - Version: "v1", - Kind: "Deployment", - }, - }, - resourceConfig: utils.NewResourceConfig(false), // default deny list - informerManager: func() *testinformer.FakeManager { - return &testinformer.FakeManager{ - IsClusterScopedResource: true, - Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ - utils.DeploymentGVR: { - Objects: []runtime.Object{}, - Err: apierrors.NewNotFound(schema.GroupResource{Group: "apps", Resource: "deployments"}, "test-deployment"), - }, - }, - } - }(), - wantError: controller.ErrAPIServerError, - }, - { - name: "should return only non-deleting resources when mixed with deleting resources", - placementName: types.NamespacedName{Name: "test-placement", Namespace: "test-ns"}, - selectors: []fleetv1beta1.ResourceSelectorTerm{ - { - Group: "apps", - Version: "v1", - Kind: "Deployment", - Name: "test-deployment", // non-deleting deployment - }, - { - Group: "apps", - Version: "v1", - Kind: "Deployment", - Name: "test-deleting-deployment", // deleting deployment - }, - }, - resourceConfig: utils.NewResourceConfig(false), // default deny list - informerManager: func() *testinformer.FakeManager { - return &testinformer.FakeManager{ - IsClusterScopedResource: true, - Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ - utils.DeploymentGVR: {Objects: []runtime.Object{testDeployment, testDeletingDeployment}}, - }, - } - }(), - want: []*unstructured.Unstructured{testDeployment}, - wantError: nil, - }, - { - name: "should handle resource selection successfully by using label selector", - placementName: types.NamespacedName{Name: "test-placement", Namespace: "test-ns"}, - selectors: []fleetv1beta1.ResourceSelectorTerm{ - { - Group: "apps", - Version: "v1", - Kind: "Deployment", - LabelSelector: &metav1.LabelSelector{ - MatchLabels: map[string]string{ - "app": "frontend", - }, - }, - }, - }, - resourceConfig: utils.NewResourceConfig(false), // default deny list - informerManager: func() *testinformer.FakeManager { - return &testinformer.FakeManager{ - IsClusterScopedResource: true, - Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ - utils.DeploymentGVR: {Objects: []runtime.Object{testFrontendDeployment, testBackendDeployment, testDeployment, testDeletingDeployment}}, - }, - } - }(), - want: []*unstructured.Unstructured{testFrontendDeployment}, - wantError: nil, - }, - { - name: "should handle label selector with MatchExpressions", - placementName: types.NamespacedName{Name: "test-placement", Namespace: "test-ns"}, - selectors: []fleetv1beta1.ResourceSelectorTerm{ - { - Group: "apps", - Version: "v1", - Kind: "Deployment", - LabelSelector: &metav1.LabelSelector{ - MatchExpressions: []metav1.LabelSelectorRequirement{ - { - Key: "tier", - Operator: metav1.LabelSelectorOpIn, - Values: []string{"web", "api"}, - }, - }, - }, - }, - }, - resourceConfig: utils.NewResourceConfig(false), // default deny list - informerManager: func() *testinformer.FakeManager { - return &testinformer.FakeManager{ - IsClusterScopedResource: true, - Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ - utils.DeploymentGVR: {Objects: []runtime.Object{testFrontendDeployment, testBackendDeployment, testDeployment, testDeletingDeployment}}, - }, - } - }(), - want: []*unstructured.Unstructured{testBackendDeployment, testFrontendDeployment}, // should return both deployments (order may vary) - wantError: nil, - }, - { - name: "should detect duplicate resources", - placementName: types.NamespacedName{Name: "test-placement", Namespace: "test-ns"}, - selectors: []fleetv1beta1.ResourceSelectorTerm{ - { - Group: "apps", - Version: "v1", - Kind: "Deployment", - Name: "test-deployment", - }, - { - Group: "apps", - Version: "v1", - Kind: "Deployment", - Name: "test-deployment", // same deployment selected twice - }, - }, - resourceConfig: utils.NewResourceConfig(false), // default deny list - informerManager: func() *testinformer.FakeManager { - return &testinformer.FakeManager{ - IsClusterScopedResource: true, - Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ - utils.DeploymentGVR: {Objects: []runtime.Object{testDeployment}}, - }, - } - }(), - wantError: controller.ErrUserError, - }, - { - name: "should sort resources according to apply order", - placementName: types.NamespacedName{Name: "test-placement", Namespace: "test-ns"}, - selectors: []fleetv1beta1.ResourceSelectorTerm{ - { - Group: "apps", - Version: "v1", - Kind: "Deployment", - Name: "test-deployment", - }, - { - Group: "", - Version: "v1", - Kind: "ConfigMap", - Name: "test-configmap", - }, - }, - resourceConfig: utils.NewResourceConfig(false), // default deny list - informerManager: func() *testinformer.FakeManager { - return &testinformer.FakeManager{ - IsClusterScopedResource: true, - Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ - utils.DeploymentGVR: {Objects: []runtime.Object{testDeployment}}, - utils.ConfigMapGVR: {Objects: []runtime.Object{testConfigMap}}, - }, - } - }(), - // ConfigMap should come first according to apply order. - want: []*unstructured.Unstructured{testConfigMap, testDeployment}, - }, - // tests for cluster-scoped placements - { - name: "should return error for namespace-scoped resource for cluster scoped placement", - placementName: types.NamespacedName{Name: "test-placement"}, - selectors: []fleetv1beta1.ResourceSelectorTerm{ - { - Group: "apps", - Version: "v1", - Kind: "Deployment", - Name: "test-deployment", - }, - }, - resourceConfig: utils.NewResourceConfig(false), // default deny list - informerManager: &testinformer.FakeManager{ - IsClusterScopedResource: true, - Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{}, - }, - want: nil, - wantError: controller.ErrUserError, - }, - { - name: "should sort resources for cluster scoped placement", - placementName: types.NamespacedName{Name: "test-placement"}, - selectors: []fleetv1beta1.ResourceSelectorTerm{ - { - Group: "rbac.authorization.k8s.io", - Version: "v1", - Kind: "ClusterRole", - // Empty name means select all ClusterRoles (or use label selector). - }, - { - Group: "", - Version: "v1", - Kind: "Namespace", - Name: "test-ns", - }, - }, - resourceConfig: utils.NewResourceConfig(false), // default deny list - informerManager: func() *testinformer.FakeManager { - return &testinformer.FakeManager{ - IsClusterScopedResource: false, - Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ - utils.ClusterRoleGVR: {Objects: []runtime.Object{testClusterRole, testClusterRole2}}, - utils.NamespaceGVR: {Objects: []runtime.Object{testNamespace}}, - }, - } - }(), - // Namespace should come first according to apply order (namespace comes before ClusterRole). - // Both ClusterRoles should be included since we're selecting all ClusterRoles with empty name. - want: []*unstructured.Unstructured{testNamespace, testClusterRole, testClusterRole2}, - }, - { - name: "should select resources by name for cluster scoped placement", - placementName: types.NamespacedName{Name: "test-placement"}, - selectors: []fleetv1beta1.ResourceSelectorTerm{ - { - Group: "rbac.authorization.k8s.io", - Version: "v1", - Kind: "ClusterRole", - Name: "test-cluster-role", - }, - { - Group: "", - Version: "v1", - Kind: "Namespace", - Name: "test-ns", - }, - }, - resourceConfig: utils.NewResourceConfig(false), // default deny list - informerManager: func() *testinformer.FakeManager { - return &testinformer.FakeManager{ - IsClusterScopedResource: false, - Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ - utils.ClusterRoleGVR: {Objects: []runtime.Object{testClusterRole, testClusterRole2}}, - utils.NamespaceGVR: {Objects: []runtime.Object{testNamespace}}, - }, - } - }(), - // Namespace should come first according to apply order (namespace comes before ClusterRole). - want: []*unstructured.Unstructured{testNamespace, testClusterRole}, - }, - { - name: "should select namespaces and its children resources by using label selector for cluster scoped placement", - placementName: types.NamespacedName{Name: "test-placement"}, - selectors: []fleetv1beta1.ResourceSelectorTerm{ - { - Group: "", - Version: "v1", - Kind: "Namespace", - LabelSelector: &metav1.LabelSelector{ - MatchLabels: map[string]string{ - "environment": "test", - }, - }, - SelectionScope: fleetv1beta1.NamespaceWithResources, - }, - }, - resourceConfig: utils.NewResourceConfig(false), // default deny list - informerManager: func() *testinformer.FakeManager { - return &testinformer.FakeManager{ - IsClusterScopedResource: false, - Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ - utils.NamespaceGVR: {Objects: []runtime.Object{testNamespace, prodNamespace, testDeletingNamespace}}, - utils.DeploymentGVR: {Objects: []runtime.Object{testDeployment, testDeletingDeployment}}, - utils.ConfigMapGVR: {Objects: []runtime.Object{testConfigMap, kubeRootCAConfigMap}}, - }, - NamespaceScopedResources: []schema.GroupVersionResource{utils.DeploymentGVR, utils.ConfigMapGVR}, - } - }(), - // Should select only non-reserved namespaces with matching labels and their children resources - want: []*unstructured.Unstructured{testNamespace, testConfigMap, testDeployment}, - }, - { - name: "should skip the resource for cluster scoped placement", - placementName: types.NamespacedName{Name: "test-placement"}, - selectors: []fleetv1beta1.ResourceSelectorTerm{ - { - Group: "", - Version: "v1", - Kind: "Namespace", - LabelSelector: &metav1.LabelSelector{ - MatchLabels: map[string]string{ - "environment": "test", - }, - }, - SelectionScope: fleetv1beta1.NamespaceWithResources, - }, - }, - resourceConfig: func() *utils.ResourceConfig { - cfg := utils.NewResourceConfig(false) - cfg.AddGroupVersionKind(utils.DeploymentGVK) - return cfg - }(), - informerManager: func() *testinformer.FakeManager { - return &testinformer.FakeManager{ - IsClusterScopedResource: false, - Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ - utils.NamespaceGVR: {Objects: []runtime.Object{testNamespace, prodNamespace, testDeletingNamespace}}, - utils.DeploymentGVR: {Objects: []runtime.Object{testDeployment, testDeletingDeployment}}, - utils.ConfigMapGVR: {Objects: []runtime.Object{testConfigMap, kubeRootCAConfigMap}}, - }, - NamespaceScopedResources: []schema.GroupVersionResource{utils.DeploymentGVR, utils.ConfigMapGVR}, - } - }(), - // should skip the deployment resource since it is not allowed by resource config - want: []*unstructured.Unstructured{testNamespace, testConfigMap}, - }, - { - name: "should select namespaces using nil label selector for cluster scoped placement", - placementName: types.NamespacedName{Name: "test-placement"}, - selectors: []fleetv1beta1.ResourceSelectorTerm{ - { - Group: "", - Version: "v1", - Kind: "Namespace", - SelectionScope: fleetv1beta1.NamespaceWithResources, - }, - }, - resourceConfig: utils.NewResourceConfig(false), // default deny list - informerManager: func() *testinformer.FakeManager { - return &testinformer.FakeManager{ - IsClusterScopedResource: false, - Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ - utils.NamespaceGVR: {Objects: []runtime.Object{testNamespace, prodNamespace, testDeletingNamespace}}, - utils.DeploymentGVR: {Objects: []runtime.Object{testDeployment, testDeletingDeployment}}, - utils.ConfigMapGVR: {Objects: []runtime.Object{testConfigMap, kubeRootCAConfigMap}}, - }, - NamespaceScopedResources: []schema.GroupVersionResource{utils.DeploymentGVR, utils.ConfigMapGVR}, - } - }(), - // Should select only non-reserved namespaces with matching labels and their child resources - want: []*unstructured.Unstructured{prodNamespace, testNamespace, testConfigMap, testDeployment}, - }, - { - name: "should select only namespaces for namespace only scope for a namespace", - placementName: types.NamespacedName{Name: "test-placement"}, - selectors: []fleetv1beta1.ResourceSelectorTerm{ - { - Group: "", - Version: "v1", - Kind: "Namespace", - Name: "test-ns", - SelectionScope: fleetv1beta1.NamespaceOnly, - }, - }, - resourceConfig: utils.NewResourceConfig(false), // default deny list - informerManager: func() *testinformer.FakeManager { - return &testinformer.FakeManager{ - IsClusterScopedResource: false, - Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ - utils.NamespaceGVR: {Objects: []runtime.Object{testNamespace, prodNamespace, testDeletingNamespace}}, - utils.DeploymentGVR: {Objects: []runtime.Object{testDeployment, testDeletingDeployment}}, - utils.ConfigMapGVR: {Objects: []runtime.Object{testConfigMap, kubeRootCAConfigMap}}, - }, - NamespaceScopedResources: []schema.GroupVersionResource{utils.DeploymentGVR, utils.ConfigMapGVR}, - } - }(), - // Should select only the namespace with name "test-ns" and none of its child resources - want: []*unstructured.Unstructured{testNamespace}, - }, - { - name: "should select only namespaces for namespace only scope for namespaces with labels", - placementName: types.NamespacedName{Name: "test-placement"}, - selectors: []fleetv1beta1.ResourceSelectorTerm{ - { - Group: "", - Version: "v1", - Kind: "Namespace", - SelectionScope: fleetv1beta1.NamespaceOnly, - }, - }, - resourceConfig: utils.NewResourceConfig(false), // default deny list - informerManager: func() *testinformer.FakeManager { - return &testinformer.FakeManager{ - IsClusterScopedResource: false, - Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ - utils.NamespaceGVR: {Objects: []runtime.Object{testNamespace, prodNamespace, testDeletingNamespace}}, - utils.DeploymentGVR: {Objects: []runtime.Object{testDeployment, testDeletingDeployment}}, - utils.ConfigMapGVR: {Objects: []runtime.Object{testConfigMap, kubeRootCAConfigMap}}, - }, - NamespaceScopedResources: []schema.GroupVersionResource{utils.DeploymentGVR, utils.ConfigMapGVR}, - } - }(), - // Should select only non-deleting namespaces with matching labels and none of their child resources - want: []*unstructured.Unstructured{prodNamespace, testNamespace}, - }, - { - name: "should return error if a resourceplacement selects namespaces even for namespace only scope", - placementName: types.NamespacedName{Name: "test-placement", Namespace: "test-ns"}, - selectors: []fleetv1beta1.ResourceSelectorTerm{ - { - Group: "", - Version: "v1", - Kind: "Namespace", - Name: "test-ns", - SelectionScope: fleetv1beta1.NamespaceOnly, - }, - }, - resourceConfig: utils.NewResourceConfig(false), // default deny list - informerManager: func() *testinformer.FakeManager { - return &testinformer.FakeManager{ - IsClusterScopedResource: false, - Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ - utils.NamespaceGVR: {Objects: []runtime.Object{testNamespace, prodNamespace, testDeletingNamespace}}, - utils.DeploymentGVR: {Objects: []runtime.Object{testDeployment, testDeletingDeployment}}, - utils.ConfigMapGVR: {Objects: []runtime.Object{testConfigMap, kubeRootCAConfigMap}}, - }, - NamespaceScopedResources: []schema.GroupVersionResource{utils.DeploymentGVR, utils.ConfigMapGVR}, - } - }(), - wantError: controller.ErrUserError, - }, - { - name: "should return error when selecting a reserved namespace for cluster scoped placement", - placementName: types.NamespacedName{Name: "test-placement"}, - selectors: []fleetv1beta1.ResourceSelectorTerm{ - { - Group: "", - Version: "v1", - Kind: "Namespace", - LabelSelector: &metav1.LabelSelector{ - MatchLabels: map[string]string{ - "environment": "test", - }, - }, - SelectionScope: fleetv1beta1.NamespaceWithResources, - }, - }, - resourceConfig: utils.NewResourceConfig(false), // default deny list - informerManager: func() *testinformer.FakeManager { - return &testinformer.FakeManager{ - IsClusterScopedResource: false, - Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ - utils.NamespaceGVR: {Objects: []runtime.Object{testNamespace, prodNamespace, testDeletingNamespace, kubeSystemNamespace}}, - utils.DeploymentGVR: {Objects: []runtime.Object{testDeployment, testDeletingDeployment}}, - utils.ConfigMapGVR: {Objects: []runtime.Object{testConfigMap}}, - }, - NamespaceScopedResources: []schema.GroupVersionResource{utils.DeploymentGVR, utils.ConfigMapGVR}, - } - }(), - wantError: controller.ErrUserError, - }, - { - name: "should return empty result when informer manager returns not found error for cluster scoped placement", - placementName: types.NamespacedName{Name: "test-placement"}, - selectors: []fleetv1beta1.ResourceSelectorTerm{ - { - Group: "", - Version: "v1", - Kind: "Namespace", - Name: "test-ns", - SelectionScope: fleetv1beta1.NamespaceWithResources, - }, - }, - resourceConfig: utils.NewResourceConfig(false), // default deny list - informerManager: func() *testinformer.FakeManager { - return &testinformer.FakeManager{ - IsClusterScopedResource: true, - Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ - utils.NamespaceGVR: { - Objects: []runtime.Object{}, - Err: apierrors.NewNotFound(schema.GroupResource{Group: "", Resource: "namespaces"}, "test-ns"), - }, - }, - } - }(), - want: nil, // should return nil when informer returns not found error - }, - { - name: "should return error when informer manager returns non-NotFound error (getting namespace) for cluster scoped placement", - placementName: types.NamespacedName{Name: "test-placement"}, - selectors: []fleetv1beta1.ResourceSelectorTerm{ - { - Group: "", - Version: "v1", - Kind: "Namespace", - Name: "test-ns", - SelectionScope: fleetv1beta1.NamespaceWithResources, - }, - }, - resourceConfig: utils.NewResourceConfig(false), // default deny list - informerManager: func() *testinformer.FakeManager { - return &testinformer.FakeManager{ - IsClusterScopedResource: true, - Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ - utils.NamespaceGVR: { - Objects: []runtime.Object{}, - Err: errors.New("connection timeout"), - }, - }, - } - }(), - wantError: controller.ErrUnexpectedBehavior, - }, - { - name: "should return error using label selector when informer manager returns error (getting namespace) for cluster scoped placement", - placementName: types.NamespacedName{Name: "test-placement"}, - selectors: []fleetv1beta1.ResourceSelectorTerm{ - { - Group: "", - Version: "v1", - Kind: "Namespace", - SelectionScope: fleetv1beta1.NamespaceWithResources, - }, - }, - resourceConfig: utils.NewResourceConfig(false), // default deny list - informerManager: func() *testinformer.FakeManager { - return &testinformer.FakeManager{ - IsClusterScopedResource: true, - Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ - utils.NamespaceGVR: { - Objects: []runtime.Object{}, - Err: apierrors.NewNotFound(schema.GroupResource{Group: "", Resource: "namespaces"}, "test-ns"), - }, - }, - } - }(), - wantError: controller.ErrAPIServerError, - }, - { - name: "should return error when informer manager returns non-NotFound error (getting deployment) for cluster scoped placement", - placementName: types.NamespacedName{Name: "test-placement"}, - selectors: []fleetv1beta1.ResourceSelectorTerm{ - { - Group: "", - Version: "v1", - Kind: "Namespace", - Name: "test-ns", - SelectionScope: fleetv1beta1.NamespaceWithResources, - }, - }, - resourceConfig: utils.NewResourceConfig(false), // default deny list - informerManager: func() *testinformer.FakeManager { - return &testinformer.FakeManager{ - IsClusterScopedResource: true, - Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ - utils.NamespaceGVR: {Objects: []runtime.Object{testNamespace, prodNamespace, testDeletingNamespace, kubeSystemNamespace}}, - utils.DeploymentGVR: { - Objects: []runtime.Object{}, - Err: errors.New("connection timeout"), - }, - }, - NamespaceScopedResources: []schema.GroupVersionResource{utils.DeploymentGVR}, - } - }(), - wantError: controller.ErrUnexpectedBehavior, - }, - { - name: "should skip reserved resources for namespaced placement", - placementName: types.NamespacedName{Name: "test-placement", Namespace: "test-ns"}, - selectors: []fleetv1beta1.ResourceSelectorTerm{ - { - Group: "", - Version: "v1", - Kind: "ConfigMap", - Name: "kube-root-ca.crt", - }, - }, - resourceConfig: utils.NewResourceConfig(false), // default deny list - informerManager: func() *testinformer.FakeManager { - return &testinformer.FakeManager{ - IsClusterScopedResource: true, - Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ - utils.ConfigMapGVR: {Objects: []runtime.Object{kubeRootCAConfigMap}}, - }, - } - }(), - want: nil, // should not propagate reserved configmap - }, - { - name: "should skip reserved resources for namespaced placement when selecting all the configMaps", - placementName: types.NamespacedName{Name: "test-placement", Namespace: "test-ns"}, - selectors: []fleetv1beta1.ResourceSelectorTerm{ - { - Group: "", - Version: "v1", - Kind: "ConfigMap", - }, - }, - resourceConfig: utils.NewResourceConfig(false), // default deny list - informerManager: func() *testinformer.FakeManager { - return &testinformer.FakeManager{ - IsClusterScopedResource: true, - Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ - utils.ConfigMapGVR: {Objects: []runtime.Object{kubeRootCAConfigMap, testConfigMap}}, - }, - } - }(), - want: []*unstructured.Unstructured{testConfigMap}, // should not propagate reserved configmap - }, - { - name: "should return error when informer cache is not synced for namespaced placement", - placementName: types.NamespacedName{Name: "test-placement", Namespace: "test-ns"}, - selectors: []fleetv1beta1.ResourceSelectorTerm{ - { - Group: "apps", - Version: "v1", - Kind: "Deployment", - Name: "test-deployment", - }, - }, - resourceConfig: utils.NewResourceConfig(false), // default deny list - informerManager: func() *testinformer.FakeManager { - return &testinformer.FakeManager{ - IsClusterScopedResource: true, - Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ - utils.DeploymentGVR: {Objects: []runtime.Object{testDeployment}}, - }, - InformerSynced: ptr.To(false), - } - }(), - wantError: controller.ErrExpectedBehavior, - }, - { - name: "should return error when informer cache is not synced for cluster scoped placement", - placementName: types.NamespacedName{Name: "test-placement"}, - selectors: []fleetv1beta1.ResourceSelectorTerm{ - { - Group: "rbac.authorization.k8s.io", - Version: "v1", - Kind: "ClusterRole", - Name: "test-cluster-role", - }, - }, - resourceConfig: utils.NewResourceConfig(false), // default deny list - informerManager: func() *testinformer.FakeManager { - return &testinformer.FakeManager{ - IsClusterScopedResource: false, - Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ - utils.ClusterRoleGVR: {Objects: []runtime.Object{testClusterRole}}, - }, - InformerSynced: ptr.To(false), - } - }(), - wantError: controller.ErrExpectedBehavior, - }, - { - name: "should return error when informer cache is not synced for cluster scoped placement with namespace resources", - placementName: types.NamespacedName{Name: "test-placement"}, - selectors: []fleetv1beta1.ResourceSelectorTerm{ - { - Group: "", - Version: "v1", - Kind: "Namespace", - Name: "test-ns", - SelectionScope: fleetv1beta1.NamespaceWithResources, - }, - }, - resourceConfig: utils.NewResourceConfig(false), // default deny list - informerManager: func() *testinformer.FakeManager { - return &testinformer.FakeManager{ - IsClusterScopedResource: true, - APIResources: map[schema.GroupVersionKind]bool{ - utils.NamespaceGVK: true, - }, - Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ - utils.NamespaceGVR: {Objects: []runtime.Object{testNamespace}}, - utils.DeploymentGVR: {Objects: []runtime.Object{testDeployment}}, - }, - NamespaceScopedResources: []schema.GroupVersionResource{utils.DeploymentGVR}, - InformerSynced: ptr.To(false), - } - }(), - wantError: controller.ErrExpectedBehavior, - }, - { - name: "should return error when shouldPropagateObj returns error", - placementName: types.NamespacedName{Name: "test-placement", Namespace: "test-ns"}, - selectors: []fleetv1beta1.ResourceSelectorTerm{ - { - Group: "", - Version: "v1", - Kind: "Endpoints", - Name: "test-endpoints", - }, - }, - resourceConfig: utils.NewResourceConfig(false), // default deny list - informerManager: func() *testinformer.FakeManager { - return &testinformer.FakeManager{ - IsClusterScopedResource: true, - Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ - {Group: "", Version: "v1", Resource: "endpoints"}: { - Objects: []runtime.Object{testEndpoints}, - }, - utils.ServiceGVR: { - Objects: []runtime.Object{}, - Err: errors.New("connection timeout"), - }, - }, - } - }(), - wantError: controller.ErrUnexpectedBehavior, - }, - { - name: "should return error by selecting all the endpoints when shouldPropagateObj returns error", - placementName: types.NamespacedName{Name: "test-placement", Namespace: "test-ns"}, - selectors: []fleetv1beta1.ResourceSelectorTerm{ - { - Group: "", - Version: "v1", - Kind: "Endpoints", - }, - }, - resourceConfig: utils.NewResourceConfig(false), // default deny list - informerManager: func() *testinformer.FakeManager { - return &testinformer.FakeManager{ - IsClusterScopedResource: true, - Listers: map[schema.GroupVersionResource]*testinformer.FakeLister{ - {Group: "", Version: "v1", Resource: "endpoints"}: { - Objects: []runtime.Object{testEndpoints}, - }, - utils.ServiceGVR: { - Objects: []runtime.Object{}, - Err: errors.New("connection timeout"), - }, - }, - } - }(), - wantError: controller.ErrUnexpectedBehavior, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - r := &Reconciler{ - ResourceConfig: tt.resourceConfig, - InformerManager: tt.informerManager, - RestMapper: newFakeRESTMapper(), - } - - got, err := r.gatherSelectedResource(tt.placementName, tt.selectors) - if gotErr, wantErr := err != nil, tt.wantError != nil; gotErr != wantErr || !errors.Is(err, tt.wantError) { - t.Fatalf("gatherSelectedResource() = %v, want error %v", err, tt.wantError) - } - if tt.wantError != nil { - return - } - if diff := cmp.Diff(tt.want, got); diff != "" { - t.Errorf("gatherSelectedResource() mismatch (-want +got):\n%s", diff) - } - }) - } -} - -// fakeRESTMapper is a minimal RESTMapper implementation for testing -type fakeRESTMapper struct { - mappings map[schema.GroupKind]*meta.RESTMapping -} - -// newFakeRESTMapper creates a new fakeRESTMapper with default mappings -func newFakeRESTMapper() *fakeRESTMapper { - return &fakeRESTMapper{ - mappings: map[schema.GroupKind]*meta.RESTMapping{ - {Group: "", Kind: "Namespace"}: { - Resource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "namespaces"}, - }, - {Group: "apps", Kind: "Deployment"}: { - Resource: schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"}, - }, - {Group: "", Kind: "ConfigMap"}: { - Resource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "configmaps"}, - }, - {Group: "", Kind: "Node"}: { - Resource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "nodes"}, - }, - {Group: "rbac.authorization.k8s.io", Kind: "ClusterRole"}: { - Resource: schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1", Resource: "clusterroles"}, - }, - {Group: "", Kind: "Endpoints"}: { - Resource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "endpoints"}, - }, - }, - } -} - -func (f *fakeRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error) { - if mapping, exists := f.mappings[gk]; exists { - return mapping, nil - } - return nil, errors.New("resource not found") -} - -func (f *fakeRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*meta.RESTMapping, error) { - mapping, err := f.RESTMapping(gk, versions...) - if err != nil { - return nil, err - } - return []*meta.RESTMapping{mapping}, nil -} - -func (f *fakeRESTMapper) ResourceFor(input schema.GroupVersionResource) (schema.GroupVersionResource, error) { - return input, nil -} - -func (f *fakeRESTMapper) ResourcesFor(input schema.GroupVersionResource) ([]schema.GroupVersionResource, error) { - return []schema.GroupVersionResource{input}, nil -} - -func (f *fakeRESTMapper) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { - switch { - case resource.Group == "" && resource.Resource == "namespaces": - return schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Namespace"}, nil - case resource.Group == "apps" && resource.Resource == "deployments": - return schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "Deployment"}, nil - case resource.Group == "" && resource.Resource == "configmaps": - return schema.GroupVersionKind{Group: "", Version: "v1", Kind: "ConfigMap"}, nil - case resource.Group == "" && resource.Resource == "nodes": - return schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Node"}, nil - case resource.Group == "" && resource.Resource == "endpoints": - return schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Endpoints"}, nil - } - return schema.GroupVersionKind{}, errors.New("kind not found") -} - -func (f *fakeRESTMapper) KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error) { - kind, err := f.KindFor(resource) - if err != nil { - return nil, err - } - return []schema.GroupVersionKind{kind}, nil -} - -func (f *fakeRESTMapper) ResourceSingularizer(resource string) (singular string, err error) { - return resource, nil -} - -func TestSortResources(t *testing.T) { - // Create the ingressClass object - ingressClass := &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "networking/v1", - "kind": "IngressClass", - "metadata": map[string]interface{}{ - "name": "test", - }, - }, - } - - // Create the Ingress object - ingress := &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "networking/v1", - "kind": "Ingress", - "metadata": map[string]interface{}{ - "name": "test-ingress", - "namespace": "test", - }, - }, - } - - // Create the NetworkPolicy object - networkPolicy := &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "networking/v1", - "kind": "NetworkPolicy", - "metadata": map[string]interface{}{ - "name": "test-networkpolicy", - "namespace": "test", - }, - }, - } - - // Create the first Namespace object - namespace1 := &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "v1", - "kind": "Namespace", - "metadata": map[string]interface{}{ - "name": "test1", - }, - }, - } - - // Create the second Namespace object - namespace2 := &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "v1", - "kind": "Namespace", - "metadata": map[string]interface{}{ - "name": "test2", - }, - }, - } - - // Create the LimitRange object - limitRange := &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "v1", - "kind": "LimitRange", - "metadata": map[string]interface{}{ - "name": "test-limitrange", - "namespace": "test", - }, - }, - } - - // Create the pod object. - pod := &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "v1", - "kind": "Pod", - "metadata": map[string]interface{}{ - "name": "test-pod", - "namespace": "test", - }, - }, - } - - // Create the ReplicationController object. - replicationController := &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "v1", - "kind": "ReplicationController", - "metadata": map[string]interface{}{ - "name": "test-replicationcontroller", - "namespace": "test", - }, - }, - } - - // Create the ResourceQuota object. - resourceQuota := &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "v1", - "kind": "ResourceQuota", - "metadata": map[string]interface{}{ - "name": "test-resourcequota", - "namespace": "test", - }, - }, - } - - // Create the Service object. - service := &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "v1", - "kind": "Service", - "metadata": map[string]interface{}{ - "name": "test-service", - "namespace": "test", - }, - }, - } - - // Create the ServiceAccount object. - serviceAccount := &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "v1", - "kind": "ServiceAccount", - "metadata": map[string]interface{}{ - "name": "test-serviceaccount", - "namespace": "test", - }, - }, - } - - // Create the PodDisruptionBudget object. - pdb := &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "policy/v1", - "kind": "PodDisruptionBudget", - "metadata": map[string]interface{}{ - "name": "test-pdb", - "namespace": "test", - }, - }, - } - - // Create the Deployment object. - deployment := &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "apps/v1", - "kind": "Deployment", - "metadata": map[string]interface{}{ - "name": "test-nginx", - "namespace": "test", - }, - }, - } - - // Create the v1beta1 Deployment object. - v1beta1Deployment := &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "apps/v1beta1", - "kind": "Deployment", - "metadata": map[string]interface{}{ - "name": "test-nginx1", - "namespace": "test", - }, - }, - } - - // Create the DaemonSet object. - daemonSet := &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "apps/v1", - "kind": "DaemonSet", - "metadata": map[string]interface{}{ - "name": "test-daemonset", - "namespace": "test", - }, - }, - } - - // Create the ReplicaSet object. - replicaSet := &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "apps/v1", - "kind": "ReplicaSet", - "metadata": map[string]interface{}{ - "name": "test-replicaset", - "namespace": "test", - }, - }, - } - - // Create the StatefulSet object. - statefulSet := &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "apps/v1", - "kind": "StatefulSet", - "metadata": map[string]interface{}{ - "name": "test-statefulset", - "namespace": "test", - }, - }, - } - - // Create the StorageClass object. - storageClass := &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "storage.k8s.io/v1", - "kind": "StorageClass", - "metadata": map[string]interface{}{ - "name": "test-storageclass", - }, - }, - } - - // Create the APIService object. - apiService := &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "apiregistration.k8s.io/v1", - "kind": "APIService", - "metadata": map[string]interface{}{ - "name": "test-apiservice", - }, - }, - } - - // Create the HorizontalPodAutoscaler object. - hpa := &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "autoscaling/v1", - "kind": "HorizontalPodAutoscaler", - "metadata": map[string]interface{}{ - "name": "test-hpa", - "namespace": "test", - }, - }, - } - - // Create the PriorityClass object. - priorityClass := &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "scheduling.k8s.io/v1", - "kind": "PriorityClass", - "metadata": map[string]interface{}{ - "name": "test-priorityclass", - }, - }, - } - - // Create the ValidatingWebhookConfiguration object. - validatingWebhookConfiguration := &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "admissionregistration.k8s.io/v1", - "kind": "ValidatingWebhookConfiguration", - "metadata": map[string]interface{}{ - "name": "test-validatingwebhookconfiguration", - }, - }, - } - - // Create the MutatingWebhookConfiguration object. - mutatingWebhookConfiguration := &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "admissionregistration.k8s.io/v1", - "kind": "MutatingWebhookConfiguration", - "metadata": map[string]interface{}{ - "name": "test-mutatingwebhookconfiguration", - }, - }, - } - - // Create the first CustomResourceDefinition object. - crd1 := &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "apiextensions.k8s.io/v1", - "kind": "CustomResourceDefinition", - "metadata": map[string]interface{}{ - "name": "test-crd1", - }, - }, - } - - // Create the second CustomResourceDefinition object. - crd2 := &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "apiextensions.k8s.io/v1", - "kind": "CustomResourceDefinition", - "metadata": map[string]interface{}{ - "name": "test-crd2", - }, - }, - } - - // Create the ClusterRole object. - clusterRole := &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "rbac.authorization.k8s.io/v1", - "kind": "ClusterRole", - "metadata": map[string]interface{}{ - "name": "test-clusterrole", - }, - }, - } - - // Create the ClusterRoleBinding object. - clusterRoleBinindg := &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "rbac.authorization.k8s.io/v1", - "kind": "ClusterRoleBinding", - "metadata": map[string]interface{}{ - "name": "test-clusterrolebinding", - }, - }, - } - - // Create the Role object. - role := &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "rbac.authorization.k8s.io/v1", - "kind": "Role", - "metadata": map[string]interface{}{ - "name": "test-role", - "namespace": "test", - }, - }, - } - - // Create the RoleBinding object. - roleBinding := &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "rbac.authorization.k8s.io/v1", - "kind": "RoleBinding", - "metadata": map[string]interface{}{ - "name": "test-rolebinding", - "namespace": "test", - }, - }, - } - - // Create the Secret object. - secret1 := &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "v1", - "kind": "Secret", - "metadata": map[string]interface{}{ - "name": "test-secret1", - "namespace": "test", - }, - }, - } - - // Create the Secret object. - secret2 := &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "v1", - "kind": "Secret", - "metadata": map[string]interface{}{ - "name": "test-secret2", - "namespace": "test", - }, - }, - } - - // Create the ConfigMap object. - configMap := &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "v1", - "kind": "ConfigMap", - "metadata": map[string]interface{}{ - "name": "test-configmap", - "namespace": "test", - }, - }, - } - - // Create the CronJob object. - cronJob := &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "batch/v1", - "kind": "CronJob", - "metadata": map[string]interface{}{ - "name": "test-cronjob", - "namespace": "test", - }, - }, - } - - // Create the Job object. - job := &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "batch/v1", - "kind": "Job", - "metadata": map[string]interface{}{ - "name": "test-job", - "namespace": "test", - }, - }, - } - - // Create the PersistentVolume object. - pv := &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "v1", - "kind": "PersistentVolume", - "metadata": map[string]interface{}{ - "name": "test-pv", - }, - }, - } - - // Create the PersistentVolumeClaim object. - pvc := &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "v1", - "kind": "PersistentVolumeClaim", - "metadata": map[string]interface{}{ - "name": "test-pvc", - "namespace": "test", - }, - }, - } - - // Create the test resource. - testResource1 := &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "test.kubernetes-fleet.io/v1alpha1", - "kind": "TestResource", - "metadata": map[string]interface{}{ - "name": "test-resource1", - "namespace": "test", - }, - }, - } - - // Create the test resource. - testResource2 := &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "test.kubernetes-fleet.io/v1alpha1", - "kind": "TestResource", - "metadata": map[string]interface{}{ - "name": "test-resource2", - "namespace": "test", - }, - }, - } - - // Create another test resource. - anotherTestResource := &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "test.kubernetes-fleet.io/v1alpha1", - "kind": "AnotherTestResource", - "metadata": map[string]interface{}{ - "name": "another-test-resource", - "namespace": "test", - }, - }, - } - - // Create v1beta1 another test resource. - v1beta1AnotherTestResource := &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "test.kubernetes-fleet.io/v1beta1", - "kind": "AnotherTestResource", - "metadata": map[string]interface{}{ - "name": "another-test-resource", - "namespace": "test", - }, - }, - } - - tests := map[string]struct { - resources []*unstructured.Unstructured - want []*unstructured.Unstructured - }{ - "should handle empty resources list": { - resources: []*unstructured.Unstructured{}, - want: []*unstructured.Unstructured{}, - }, - "should handle single resource": { - resources: []*unstructured.Unstructured{deployment}, - want: []*unstructured.Unstructured{deployment}, - }, - "should handle multiple resources of all kinds": { - resources: []*unstructured.Unstructured{ingressClass, clusterRole, clusterRoleBinindg, configMap, cronJob, crd1, daemonSet, deployment, testResource1, ingress, job, limitRange, namespace1, networkPolicy, pv, pvc, pod, pdb, replicaSet, replicationController, resourceQuota, role, roleBinding, secret1, service, serviceAccount, statefulSet, storageClass, apiService, hpa, priorityClass, validatingWebhookConfiguration, mutatingWebhookConfiguration}, - want: []*unstructured.Unstructured{priorityClass, namespace1, networkPolicy, resourceQuota, limitRange, pdb, serviceAccount, secret1, configMap, storageClass, pv, pvc, crd1, clusterRole, clusterRoleBinindg, role, roleBinding, service, daemonSet, pod, replicationController, replicaSet, deployment, hpa, statefulSet, job, cronJob, ingressClass, ingress, apiService, mutatingWebhookConfiguration, validatingWebhookConfiguration, testResource1}, - }, - "should handle multiple known resources, different kinds": { - resources: []*unstructured.Unstructured{crd2, crd1, secret2, namespace2, namespace1, secret1}, - want: []*unstructured.Unstructured{namespace1, namespace2, secret1, secret2, crd1, crd2}, - }, - "should handle multiple known resources, same kinds with different versions": { - resources: []*unstructured.Unstructured{v1beta1Deployment, deployment, limitRange}, - want: []*unstructured.Unstructured{limitRange, deployment, v1beta1Deployment}, - }, - "should handle multiple unknown resources, same kinds": { - resources: []*unstructured.Unstructured{testResource2, testResource1}, - want: []*unstructured.Unstructured{testResource1, testResource2}, - }, - "should handle multiple unknown resources, different kinds": { - resources: []*unstructured.Unstructured{testResource1, anotherTestResource}, - want: []*unstructured.Unstructured{anotherTestResource, testResource1}, - }, - "should handle multiple unknown resources, same kinds with different versions": { - resources: []*unstructured.Unstructured{v1beta1AnotherTestResource, anotherTestResource}, - want: []*unstructured.Unstructured{anotherTestResource, v1beta1AnotherTestResource}, - }, - } - - for testName, tt := range tests { - t.Run(testName, func(t *testing.T) { - // run many times to make sure it's stable - for i := 0; i < 10; i++ { - sortResources(tt.resources) - // Check that the returned resources match the expected resources - diff := cmp.Diff(tt.want, tt.resources) - if diff != "" { - t.Errorf("sortResources() mismatch (-want +got):\n%s", diff) - } - } - }) - } -} diff --git a/pkg/controllers/placement/suite_test.go b/pkg/controllers/placement/suite_test.go index 11a003016..641cbc370 100644 --- a/pkg/controllers/placement/suite_test.go +++ b/pkg/controllers/placement/suite_test.go @@ -111,11 +111,7 @@ var _ = BeforeSuite(func() { }) Expect(err).Should(Succeed(), "failed to create manager") - reconciler := &Reconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - UncachedReader: mgr.GetAPIReader(), - Recorder: mgr.GetEventRecorderFor(controllerName), + resourceSelectorResolver := controller.ResourceSelectorResolver{ RestMapper: mgr.GetRESTMapper(), InformerManager: informer.NewInformerManager(dynamicClient, 5*time.Minute, ctx.Done()), ResourceConfig: utils.NewResourceConfig(false), @@ -123,6 +119,13 @@ var _ = BeforeSuite(func() { "default": true, }, } + reconciler := &Reconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + UncachedReader: mgr.GetAPIReader(), + Recorder: mgr.GetEventRecorderFor(controllerName), + ResourceSelectorResolver: resourceSelectorResolver, + } opts := options.RateLimitOptions{ RateLimiterBaseDelay: 5 * time.Millisecond, RateLimiterMaxDelay: 60 * time.Second, From 76abb3f535a9c0c22b527efd3a98ee1e70aa77ef Mon Sep 17 00:00:00 2001 From: Ryan Zhang Date: Thu, 5 Feb 2026 12:00:01 -0800 Subject: [PATCH 09/10] fix: clusterProfile controller should handle the MC condition (#427) --- .../clusterprofile/controller.go | 14 ++ .../controller_integration_test.go | 86 ++++++++- .../clusterprofile/controller_test.go | 181 ++++++++++++++++++ .../clusterprofile/suite_test.go | 9 +- 4 files changed, 286 insertions(+), 4 deletions(-) diff --git a/pkg/controllers/clusterinventory/clusterprofile/controller.go b/pkg/controllers/clusterinventory/clusterprofile/controller.go index 1aeea73f7..94517e40d 100644 --- a/pkg/controllers/clusterinventory/clusterprofile/controller.go +++ b/pkg/controllers/clusterinventory/clusterprofile/controller.go @@ -39,6 +39,7 @@ import ( clusterv1beta1 "github.com/kubefleet-dev/kubefleet/apis/cluster/v1beta1" "github.com/kubefleet-dev/kubefleet/pkg/propertyprovider" + "github.com/kubefleet-dev/kubefleet/pkg/utils/condition" "github.com/kubefleet-dev/kubefleet/pkg/utils/controller" ) @@ -110,6 +111,13 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu return ctrl.Result{}, nil } + // Check if the MemberCluster has joined. + joinedCondition := meta.FindStatusCondition(mc.Status.Conditions, string(clusterv1beta1.ConditionTypeMemberClusterJoined)) + if !condition.IsConditionStatusTrue(joinedCondition, mc.Generation) { + klog.V(2).InfoS("Member cluster has not joined; skip cluster profile reconciliation", "memberCluster", mcRef) + return ctrl.Result{}, nil + } + // Check if the MemberCluster object has the cleanup finalizer; if not, add it. if !controllerutil.ContainsFinalizer(mc, clusterProfileCleanupFinalizer) { mc.Finalizers = append(mc.Finalizers, clusterProfileCleanupFinalizer) @@ -175,6 +183,12 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu // fillInClusterStatus fills in the ClusterProfile status fields from the MemberCluster status. // Currently, it only fills in the Kubernetes version field. func (r *Reconciler) fillInClusterStatus(mc *clusterv1beta1.MemberCluster, cp *clusterinventory.ClusterProfile) { + clusterPropertyCondition := meta.FindStatusCondition(mc.Status.Conditions, string(clusterv1beta1.ConditionTypeClusterPropertyCollectionSucceeded)) + if !condition.IsConditionStatusTrue(clusterPropertyCondition, mc.Generation) { + klog.V(3).InfoS("Cluster property collection has not succeeded; skip updating the cluster profile status", "memberCluster", klog.KObj(mc), "clusterProfile", klog.KObj(cp)) + return + } + k8sversion, exists := mc.Status.Properties[propertyprovider.K8sVersionProperty] if exists { klog.V(3).InfoS("Get Kubernetes version from member cluster status", "kubernetesVersion", k8sversion.Value, "clusterProfile", klog.KObj(cp)) diff --git a/pkg/controllers/clusterinventory/clusterprofile/controller_integration_test.go b/pkg/controllers/clusterinventory/clusterprofile/controller_integration_test.go index 1f90b415a..49c13c571 100644 --- a/pkg/controllers/clusterinventory/clusterprofile/controller_integration_test.go +++ b/pkg/controllers/clusterinventory/clusterprofile/controller_integration_test.go @@ -28,6 +28,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" clusterv1beta1 "github.com/kubefleet-dev/kubefleet/apis/cluster/v1beta1" + "github.com/kubefleet-dev/kubefleet/pkg/propertyprovider" "github.com/kubefleet-dev/kubefleet/pkg/utils" "github.com/kubefleet-dev/kubefleet/pkg/utils/condition" "github.com/kubefleet-dev/kubefleet/pkg/utils/controller" @@ -62,6 +63,22 @@ var _ = Describe("Test ClusterProfile Controller", func() { }) It("Should create a clusterProfile when a member cluster is created", func() { + By("Check the clusterProfile is not created") + Consistently(func() error { + return k8sClient.Get(ctx, types.NamespacedName{Namespace: clusterProfileNS, Name: testMCName}, &clusterProfile) + }, consistentlyDuration, interval).ShouldNot(Succeed(), "clusterProfile is created before member cluster is marked as join") + By("Mark the member cluster as joined") + mc.Status.Conditions = []metav1.Condition{ + { + Type: string(clusterv1beta1.ConditionTypeMemberClusterJoined), + Status: metav1.ConditionTrue, + Reason: "Joined", + Message: "Member cluster has joined", + LastTransitionTime: metav1.Time{Time: time.Now()}, + ObservedGeneration: mc.Generation, + }, + } + Expect(k8sClient.Status().Update(ctx, mc)).Should(Succeed(), "failed to update member cluster status") By("Check the clusterProfile is created") Eventually(func() error { return k8sClient.Get(ctx, types.NamespacedName{Namespace: clusterProfileNS, Name: testMCName}, &clusterProfile) @@ -79,6 +96,7 @@ var _ = Describe("Test ClusterProfile Controller", func() { Reason: "Healthy", Message: "Agent is healthy", LastTransitionTime: metav1.Time{Time: time.Now()}, + ObservedGeneration: mc.Generation, }, }, LastReceivedHeartbeat: metav1.Time{Time: time.Now()}, @@ -100,7 +118,19 @@ var _ = Describe("Test ClusterProfile Controller", func() { }, eventuallyTimeout, interval).Should(BeTrue(), "clusterProfile is not created") }) - It("Should recreate a clusterProfile when it is deleted by the user", func() { + It("Should recreate a clusterProfile when it is deleted by the user but properties should not show if MC property collection is not succeeded", func() { + By("Mark the member cluster as joined") + mc.Status.Conditions = []metav1.Condition{ + { + Type: string(clusterv1beta1.ConditionTypeMemberClusterJoined), + Status: metav1.ConditionTrue, + Reason: "Joined", + Message: "Member cluster has joined", + LastTransitionTime: metav1.Time{Time: time.Now()}, + ObservedGeneration: mc.Generation, + }, + } + Expect(k8sClient.Status().Update(ctx, mc)).Should(Succeed(), "failed to update member cluster status") By("Check the clusterProfile is created") Eventually(func() error { return k8sClient.Get(ctx, types.NamespacedName{Namespace: clusterProfileNS, Name: testMCName}, &clusterProfile) @@ -111,13 +141,53 @@ var _ = Describe("Test ClusterProfile Controller", func() { Eventually(func() error { return k8sClient.Get(ctx, types.NamespacedName{Namespace: clusterProfileNS, Name: testMCName}, &clusterProfile) }, eventuallyTimeout, interval).Should(Succeed(), "clusterProfile is not created") + By("Check the properties are not created") + Consistently(func() bool { + if err := k8sClient.Get(ctx, types.NamespacedName{Namespace: clusterProfileNS, Name: testMCName}, &clusterProfile); err != nil { + return false + } + return clusterProfile.Status.AccessProviders == nil || clusterProfile.Status.AccessProviders[0].Cluster.CertificateAuthorityData == nil + }, consistentlyDuration, interval).Should(BeTrue(), "ClusterCertificateAuthority property is created before member cluster is marked as collection succeeded") }) - It("Should update a clusterProfile when it is modified by the user", func() { + It("Should have property filled in clusterProfile created from MemberCluster and reconcile the clusterProfile if changed", func() { + By("Mark the member cluster as joined") + mc.Status.Conditions = []metav1.Condition{ + { + Type: string(clusterv1beta1.ConditionTypeClusterPropertyCollectionSucceeded), + Status: metav1.ConditionTrue, + Reason: "CollectionSucceeded", + Message: "Cluster property collection succeeded", + LastTransitionTime: metav1.Time{Time: time.Now()}, + ObservedGeneration: mc.Generation, + }, + { + Type: string(clusterv1beta1.ConditionTypeMemberClusterJoined), + Status: metav1.ConditionTrue, + Reason: "Joined", + Message: "Member cluster has joined", + LastTransitionTime: metav1.Time{Time: time.Now()}, + ObservedGeneration: mc.Generation, + }, + } + mc.Status.Properties = map[clusterv1beta1.PropertyName]clusterv1beta1.PropertyValue{ + propertyprovider.ClusterCertificateAuthorityProperty: { + Value: "dummy-ca-data", + ObservationTime: metav1.Time{Time: time.Now()}, + }, + } + Expect(k8sClient.Status().Update(ctx, mc)).Should(Succeed(), "failed to update member cluster status") By("Check the clusterProfile is created") Eventually(func() error { return k8sClient.Get(ctx, types.NamespacedName{Namespace: clusterProfileNS, Name: testMCName}, &clusterProfile) }, eventuallyTimeout, interval).Should(Succeed(), "clusterProfile is not created") + By("Check the properties in clusterProfile") + Eventually(func() bool { + if err := k8sClient.Get(ctx, types.NamespacedName{Namespace: clusterProfileNS, Name: testMCName}, &clusterProfile); err != nil { + return false + } + return string(clusterProfile.Status.AccessProviders[0].Cluster.CertificateAuthorityData) == "dummy-ca-data" + }, eventuallyTimeout, interval).Should(BeTrue(), "ClusterCertificateAuthority property is not created") By("Modifying the ClusterProfile") clusterProfile.Spec.DisplayName = "ModifiedMCName" Expect(k8sClient.Update(ctx, &clusterProfile)).Should(Succeed(), "failed to modify clusterProfile") @@ -131,6 +201,18 @@ var _ = Describe("Test ClusterProfile Controller", func() { }) It("Should delete the clusterProfile when the MemberCluster is deleted", func() { + By("Mark the member cluster as joined") + mc.Status.Conditions = []metav1.Condition{ + { + Type: string(clusterv1beta1.ConditionTypeMemberClusterJoined), + Status: metav1.ConditionTrue, + Reason: "Joined", + Message: "Member cluster has joined", + LastTransitionTime: metav1.Time{Time: time.Now()}, + ObservedGeneration: mc.Generation, + }, + } + Expect(k8sClient.Status().Update(ctx, mc)).Should(Succeed(), "failed to update member cluster status") By("Check the clusterProfile is created") Eventually(func() error { return k8sClient.Get(ctx, types.NamespacedName{Namespace: clusterProfileNS, Name: testMCName}, &clusterProfile) diff --git a/pkg/controllers/clusterinventory/clusterprofile/controller_test.go b/pkg/controllers/clusterinventory/clusterprofile/controller_test.go index 0be621ac9..b51ca2a2a 100644 --- a/pkg/controllers/clusterinventory/clusterprofile/controller_test.go +++ b/pkg/controllers/clusterinventory/clusterprofile/controller_test.go @@ -20,13 +20,194 @@ import ( "testing" "time" + "github.com/google/go-cmp/cmp" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clusterinventory "sigs.k8s.io/cluster-inventory-api/apis/v1alpha1" clusterv1beta1 "github.com/kubefleet-dev/kubefleet/apis/cluster/v1beta1" + "github.com/kubefleet-dev/kubefleet/pkg/propertyprovider" + "github.com/kubefleet-dev/kubefleet/pkg/utils/controller" ) +func TestFillInClusterStatus(t *testing.T) { + reconciler := &Reconciler{} + + tests := []struct { + name string + memberCluster *clusterv1beta1.MemberCluster + clusterProfile *clusterinventory.ClusterProfile + expectVersion bool + expectedK8sVersion string + expectAccessProvider bool + expectedServer string + expectedCAData string + }{ + { + name: "Cluster property collection has not succeeded", + memberCluster: &clusterv1beta1.MemberCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + Generation: 1, + }, + Status: clusterv1beta1.MemberClusterStatus{ + Conditions: []metav1.Condition{ + { + Type: string(clusterv1beta1.ConditionTypeClusterPropertyCollectionSucceeded), + Status: metav1.ConditionFalse, + ObservedGeneration: 1, + }, + }, + }, + }, + clusterProfile: &clusterinventory.ClusterProfile{}, + expectVersion: false, + expectAccessProvider: false, + }, + { + name: "Cluster property collection succeeded but no properties", + memberCluster: &clusterv1beta1.MemberCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + Generation: 1, + }, + Status: clusterv1beta1.MemberClusterStatus{ + Conditions: []metav1.Condition{ + { + Type: string(clusterv1beta1.ConditionTypeClusterPropertyCollectionSucceeded), + Status: metav1.ConditionTrue, + ObservedGeneration: 1, + }, + }, + Properties: map[clusterv1beta1.PropertyName]clusterv1beta1.PropertyValue{}, + }, + }, + clusterProfile: &clusterinventory.ClusterProfile{}, + expectVersion: false, + expectAccessProvider: true, + }, + { + name: "Cluster property collection succeeded with k8s version only", + memberCluster: &clusterv1beta1.MemberCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + Generation: 1, + }, + Status: clusterv1beta1.MemberClusterStatus{ + Conditions: []metav1.Condition{ + { + Type: string(clusterv1beta1.ConditionTypeClusterPropertyCollectionSucceeded), + Status: metav1.ConditionTrue, + ObservedGeneration: 1, + }, + }, + Properties: map[clusterv1beta1.PropertyName]clusterv1beta1.PropertyValue{ + propertyprovider.K8sVersionProperty: { + Value: "v1.28.0", + }, + }, + }, + }, + clusterProfile: &clusterinventory.ClusterProfile{}, + expectVersion: true, + expectedK8sVersion: "v1.28.0", + expectAccessProvider: true, + }, + { + name: "Cluster property collection succeeded with all properties", + memberCluster: &clusterv1beta1.MemberCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + Generation: 1, + }, + Status: clusterv1beta1.MemberClusterStatus{ + Conditions: []metav1.Condition{ + { + Type: string(clusterv1beta1.ConditionTypeClusterPropertyCollectionSucceeded), + Status: metav1.ConditionTrue, + ObservedGeneration: 1, + }, + }, + Properties: map[clusterv1beta1.PropertyName]clusterv1beta1.PropertyValue{ + propertyprovider.K8sVersionProperty: { + Value: "v1.29.1", + }, + propertyprovider.ClusterEntryPointProperty: { + Value: "https://api.test-cluster.example.com:6443", + }, + propertyprovider.ClusterCertificateAuthorityProperty: { + Value: "dGVzdC1jYS1kYXRh", + }, + }, + }, + }, + clusterProfile: &clusterinventory.ClusterProfile{}, + expectVersion: true, + expectedK8sVersion: "v1.29.1", + expectAccessProvider: true, + expectedServer: "https://api.test-cluster.example.com:6443", + expectedCAData: "dGVzdC1jYS1kYXRh", + }, + { + name: "Cluster property collection succeeded with partial properties", + memberCluster: &clusterv1beta1.MemberCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + Generation: 1, + }, + Status: clusterv1beta1.MemberClusterStatus{ + Conditions: []metav1.Condition{ + { + Type: string(clusterv1beta1.ConditionTypeClusterPropertyCollectionSucceeded), + Status: metav1.ConditionTrue, + ObservedGeneration: 1, + }, + }, + Properties: map[clusterv1beta1.PropertyName]clusterv1beta1.PropertyValue{ + propertyprovider.K8sVersionProperty: { + Value: "v1.27.5", + }, + propertyprovider.ClusterEntryPointProperty: { + Value: "https://api.partial-cluster.example.com:6443", + }, + }, + }, + }, + clusterProfile: &clusterinventory.ClusterProfile{}, + expectVersion: true, + expectedK8sVersion: "v1.27.5", + expectAccessProvider: true, + expectedServer: "https://api.partial-cluster.example.com:6443", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + reconciler.fillInClusterStatus(tt.memberCluster, tt.clusterProfile) + + expected := clusterinventory.ClusterProfileStatus{} + if tt.expectVersion { + expected.Version.Kubernetes = tt.expectedK8sVersion + } + if tt.expectAccessProvider { + expected.AccessProviders = []clusterinventory.AccessProvider{{ + Name: controller.ClusterManagerName, + }} + if tt.expectedServer != "" { + expected.AccessProviders[0].Cluster.Server = tt.expectedServer + } + if tt.expectedCAData != "" { + expected.AccessProviders[0].Cluster.CertificateAuthorityData = []byte(tt.expectedCAData) + } + } + + if diff := cmp.Diff(expected, tt.clusterProfile.Status); diff != "" { + t.Fatalf("test case `%s` failed diff (-want +got):\n%s", tt.name, diff) + } + }) + } +} + func TestSyncClusterProfileCondition(t *testing.T) { clusterUnhealthyThreshold := 5 * time.Minute reconciler := &Reconciler{ diff --git a/pkg/controllers/clusterinventory/clusterprofile/suite_test.go b/pkg/controllers/clusterinventory/clusterprofile/suite_test.go index f6f869e23..09f61e5cb 100644 --- a/pkg/controllers/clusterinventory/clusterprofile/suite_test.go +++ b/pkg/controllers/clusterinventory/clusterprofile/suite_test.go @@ -25,14 +25,15 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + "go.uber.org/zap/zapcore" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" "k8s.io/klog/v2" - "k8s.io/klog/v2/textlogger" clusterinventory "sigs.k8s.io/cluster-inventory-api/apis/v1alpha1" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/envtest" + "sigs.k8s.io/controller-runtime/pkg/log/zap" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/metrics/server" @@ -62,6 +63,10 @@ var _ = BeforeSuite(func() { klog.InitFlags(fs) Expect(fs.Parse([]string{"--v", "5", "-add_dir_header", "true"})).Should(Succeed()) + logger := zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true), zap.Level(zapcore.Level(-5))) + klog.SetLogger(logger) + ctrl.SetLogger(logger) + By("bootstrapping test environment") testEnv = &envtest.Environment{ CRDDirectoryPaths: []string{filepath.Join("../../../../", "config", "crd", "bases")}, @@ -92,7 +97,7 @@ var _ = BeforeSuite(func() { Metrics: server.Options{ BindAddress: "0", }, - Logger: textlogger.NewLogger(textlogger.NewConfig(textlogger.Verbosity(4))), + Logger: logger, }) Expect(err).Should(Succeed()) err = (&Reconciler{ From f14f202202a644991a6c8cd7395d2870821ee636 Mon Sep 17 00:00:00 2001 From: Wei Weng Date: Thu, 5 Feb 2026 20:59:42 +0000 Subject: [PATCH 10/10] update crd installer to 1.24.12 Signed-off-by: Wei Weng --- docker/crd-installer.Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/crd-installer.Dockerfile b/docker/crd-installer.Dockerfile index 3055a235f..07d8c6a6c 100644 --- a/docker/crd-installer.Dockerfile +++ b/docker/crd-installer.Dockerfile @@ -1,5 +1,5 @@ # Build the crdinstaller binary -FROM mcr.microsoft.com/oss/go/microsoft/golang:1.24.9 AS builder +FROM mcr.microsoft.com/oss/go/microsoft/golang:1.24.12 AS builder ARG GOOS=linux ARG GOARCH=amd64