From f0ae939d1e24977433db4f73c33564a9419333a1 Mon Sep 17 00:00:00 2001 From: yemreinci <18687880+yemreinci@users.noreply.github.com> Date: Wed, 7 Jan 2026 17:20:30 +0000 Subject: [PATCH 1/2] Fix nil pointer dereference in logger (#552) op is dereferenced for the log line but it can be nil. Splitting the if block into two as it seemed that the log line "concurrency limit reached" does not apply to the case when op is nil. Example error: ``` [pod/eno-controller-7459646c9-bdctm/eno-controller] {"level":"error","ts":1767623387.6310673,"caller":"runtime/runtime.go:142","msg":"Observed a panic","controller":"schedulingController","reconcileID":"bbaa832f-8868-4f5b-bac8-efe20e8b50ab","panic":"runtime error: invalid memory address or nil pointer dereference","panicGoValue":"\"invalid memory address or nil pointer dereference\"","stacktrace":"goroutine 328 [running]:\nk8s.io/apimachinery/pkg/util/runtime.logPanic({0x1cfe328, 0xc000617b90}, {0x17a8ba0, 0x28ee950})\n\t/go/pkg/mod/k8s.io/apimachinery@v0.34.0/pkg/util/runtime/runtime.go:132 +0xbc\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile.func1()\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.22.0/pkg/internal/controller/controller.go:198 +0x10e\npanic({0x17a8ba0?, 0x28ee950?})\n\t/usr/local/go/src/runtime/panic.go:792 +0x132\ngithub.com/Azure/eno/internal/controllers/scheduling.(*controller).Reconcile(0xc0005ab0c0, {0x1cfe328, 0xc000617b90}, {{{0x100?, 0xc000378008?}, {0xc0001cdd80?, 0x0?}}})\n\t/app/internal/controllers/scheduling/controller.go:141 +0xeed\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile(0xc000617ad0?, {0x1cfe328?, 0xc000617b90}, {{{0x0, 0x0?}, {0x0?, 0x0?}}})\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.22.0/pkg/internal/controller/controller.go:216 +0x165\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler(0x1d1c3c0, {0x1cfe360, 0xc00047dbd0}, {{{0x0, 0x0}, {0x0, 0x0}}}, 0x0)\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.22.0/pkg/internal/controller/controller.go:461 +0x3ad\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem(0x1d1c3c0, {0x1cfe360, 0xc00047dbd0})\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.22.0/pkg/internal/controller/controller.go:421 +0x21b\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1()\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.22.0/pkg/internal/controller/controller.go:296 +0x85\ncreated by sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1 in goroutine 209\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.22.0/pkg/internal/controller/controller.go:292 +0x26b\n","stacktrace":"k8s.io/apimachinery/pkg/util/runtime.logPanic\n\t/go/pkg/mod/k8s.io/apimachinery@v0.34.0/pkg/util/runtime/runtime.go:142\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile.func1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.22.0/pkg/internal/controller/controller.go:198\nruntime.gopanic\n\t/usr/local/go/src/runtime/panic.go:792\nruntime.panicmem\n\t/usr/local/go/src/runtime/panic.go:262\nruntime.sigpanic\n\t/usr/local/go/src/runtime/signal_unix.go:925\ngithub.com/Azure/eno/internal/controllers/scheduling.(*controller).Reconcile\n\t/app/internal/controllers/scheduling/controller.go:141\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.22.0/pkg/internal/controller/controller.go:216\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.22.0/pkg/internal/controller/controller.go:461\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.22.0/pkg/internal/controller/controller.go:421\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.22.0/pkg/internal/controller/controller.go:296"} ``` Co-authored-by: Yunus Inci --- internal/controllers/scheduling/controller.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/internal/controllers/scheduling/controller.go b/internal/controllers/scheduling/controller.go index 85c6cac4..6d31d971 100644 --- a/internal/controllers/scheduling/controller.go +++ b/internal/controllers/scheduling/controller.go @@ -137,10 +137,15 @@ func (c *controller) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu } freeSynthesisSlots.Set(float64(c.concurrencyLimit - inFlight)) - if op == nil || inFlight >= c.concurrencyLimit { + if op == nil { + return ctrl.Result{}, nil + } + + if inFlight >= c.concurrencyLimit { logger.Info("concurrency limit reached, deferring synthesis", "inFlight", inFlight, "limit", c.concurrencyLimit, "nextCompositionName", op.Composition.Name, "nextCompositionNamespace", op.Composition.Namespace) return ctrl.Result{}, nil } + if !op.NotBefore.IsZero() { // the next op isn't ready to be dispathced yet if wait := time.Until(op.NotBefore); wait > 0 { logger.Info("synthesis operation not ready, waiting for cooldown", "compositionName", op.Composition.Name, "compositionNamespace", op.Composition.Namespace, "waitDuration", wait, "notBefore", op.NotBefore) From be21618179e15fcb714f72b5575052e035d65586 Mon Sep 17 00:00:00 2001 From: Ruinan Liu <72321026+ruinan-liu@users.noreply.github.com> Date: Thu, 15 Jan 2026 11:01:46 -0800 Subject: [PATCH 2/2] [eno] Update Normalization Field Manager Logic (#555) - Only migrate `f:metadata:annotation`, `f:metadata:labels`, and `f:spec` during the migration. Tested e2e pipeline passed: https://msazure.visualstudio.com/CloudNativeCompute/_build/results?buildId=149448228&view=results https://msazure.visualstudio.com/CloudNativeCompute/_build/results?buildId=149448289&view=results https://msazure.visualstudio.com/CloudNativeCompute/_build/results?buildId=149448264&view=results https://msazure.visualstudio.com/CloudNativeCompute/_build/results?buildId=149448364&view=results https://msazure.visualstudio.com/CloudNativeCompute/_build/results?buildId=149448306&view=results https://msazure.visualstudio.com/CloudNativeCompute/_build/results?buildId=149449489&view=results https://dev.azure.com/msazure/CloudNativeCompute/_build/results?buildId=149448405&view=results --- cmd/eno-reconciler/main.go | 9 + .../controllers/reconciliation/controller.go | 5 +- .../reconciliation/overrides_test.go | 2 + internal/resource/fieldmanager.go | 114 +++- internal/resource/fieldmanager_test.go | 492 +++++++++++++++++- 5 files changed, 606 insertions(+), 16 deletions(-) diff --git a/cmd/eno-reconciler/main.go b/cmd/eno-reconciler/main.go index 9ed0e7b9..d1b6abf8 100644 --- a/cmd/eno-reconciler/main.go +++ b/cmd/eno-reconciler/main.go @@ -43,6 +43,7 @@ func run() error { namespaceCleanup bool enoBuildVersion string migratingFieldManagers string + migratingFields string mgrOpts = &manager.Options{ Rest: ctrl.GetConfigOrDie(), @@ -64,6 +65,7 @@ func run() error { flag.BoolVar(&namespaceCleanup, "namespace-cleanup", true, "Clean up orphaned resources caused by namespace force-deletions") flag.BoolVar(&recOpts.FailOpen, "fail-open", false, "Report that resources are reconciled once they've been seen, even if reconciliation failed. Overridden by individual resources with 'eno.azure.io/fail-open: true|false'") flag.StringVar(&migratingFieldManagers, "migrating-field-managers", os.Getenv("MIGRATING_FIELD_MANAGERS"), "Comma-separated list of Kubernetes SSA field manager names to take ownership from during migrations") + flag.StringVar(&migratingFields, "migrating-fields", os.Getenv("MIGRATING_FIELDS"), "Comma-seperated list of fields Kubernetes fields(metadata.labels, spec, stringData...) to migrate the ownership to eno") mgrOpts.Bind(flag.CommandLine) flag.Parse() @@ -135,6 +137,13 @@ func run() error { } } + if migratingFields != "" { + recOpts.MigratingFields = strings.Split(migratingFields, ",") + for i := range recOpts.MigratingFields { + recOpts.MigratingFields[i] = strings.TrimSpace(recOpts.MigratingFields[i]) + } + } + err = reconciliation.New(mgr, recOpts) if err != nil { return fmt.Errorf("constructing reconciliation controller: %w", err) diff --git a/internal/controllers/reconciliation/controller.go b/internal/controllers/reconciliation/controller.go index 60adee8d..ffabbb45 100644 --- a/internal/controllers/reconciliation/controller.go +++ b/internal/controllers/reconciliation/controller.go @@ -38,6 +38,7 @@ type Options struct { DisableServerSideApply bool FailOpen bool MigratingFieldManagers []string + MigratingFields []string Timeout time.Duration ReadinessPollInterval time.Duration @@ -56,6 +57,7 @@ type Controller struct { disableSSA bool failOpen bool migratingFieldManagers []string + migratingFields []string } func New(mgr ctrl.Manager, opts Options) error { @@ -83,6 +85,7 @@ func New(mgr ctrl.Manager, opts Options) error { disableSSA: opts.DisableServerSideApply, failOpen: opts.FailOpen, migratingFieldManagers: opts.MigratingFieldManagers, + migratingFields: opts.MigratingFields, } return builder.TypedControllerManagedBy[resource.Request](mgr). @@ -296,7 +299,7 @@ func (c *Controller) reconcileSnapshot(ctx context.Context, comp *apiv1.Composit // subsequent SSA Apply will treat eno as the sole owner and automatically merge the managedFields // entries into a single consolidated entry for eno. if current != nil && len(c.migratingFieldManagers) > 0 { - wasModified, err := resource.NormalizeConflictingManagers(ctx, current, c.migratingFieldManagers) + wasModified, err := resource.NormalizeConflictingManagers(ctx, current, c.migratingFieldManagers, c.migratingFields) if err != nil { return false, fmt.Errorf("normalize conflicting manager failed: %w", err) } diff --git a/internal/controllers/reconciliation/overrides_test.go b/internal/controllers/reconciliation/overrides_test.go index 8f18a546..4bb27ce3 100644 --- a/internal/controllers/reconciliation/overrides_test.go +++ b/internal/controllers/reconciliation/overrides_test.go @@ -881,6 +881,7 @@ func TestMigratingFieldManagers(t *testing.T) { ReadinessPollInterval: time.Hour, DisableServerSideApply: mgr.NoSsaSupport, MigratingFieldManagers: []string{"legacy-tool"}, + MigratingFields: []string{"data"}, }) mgr.Start(t) _, comp := writeGenericComposition(t, upstream) @@ -997,6 +998,7 @@ func TestMigratingFieldManagersFieldRemoval(t *testing.T) { ReadinessPollInterval: time.Hour, DisableServerSideApply: mgr.NoSsaSupport, MigratingFieldManagers: []string{"legacy-tool"}, + MigratingFields: []string{"data"}, }) mgr.Start(t) _, comp := writeGenericComposition(t, upstream) diff --git a/internal/resource/fieldmanager.go b/internal/resource/fieldmanager.go index ba318094..492a3391 100644 --- a/internal/resource/fieldmanager.go +++ b/internal/resource/fieldmanager.go @@ -5,6 +5,7 @@ import ( "context" "fmt" "slices" + "strings" "github.com/go-logr/logr" "k8s.io/apimachinery/pkg/api/equality" @@ -17,6 +18,26 @@ const ( enoManager = "eno" ) +// parseFieldPaths converts a slice of dot-separated field path strings into fieldpath.Path objects. +// For example: "metadata.labels" -> fieldpath.MakePathOrDie("metadata", "labels") +func parseFieldPaths(fields []string) []fieldpath.Path { + paths := make([]fieldpath.Path, 0, len(fields)) + for _, field := range fields { + if field == "" { + continue + } + // Split on dots to handle nested paths like "metadata.labels" + parts := strings.Split(field, ".") + // Convert string parts to interface{} for MakePathOrDie + pathParts := make([]interface{}, len(parts)) + for i, part := range parts { + pathParts[i] = part + } + paths = append(paths, fieldpath.MakePathOrDie(pathParts...)) + } + return paths +} + // MergeEnoManagedFields corrects managed fields drift to ensure Eno can remove fields // that are no longer set by the synthesizer, even when another client corrupts the // managed fields metadata. Returns corrected managed fields, affected field paths, @@ -135,7 +156,7 @@ func compareEnoManagedFields(a, b []metav1.ManagedFieldsEntry) bool { return equality.Semantic.DeepEqual(a[ai].FieldsV1, b[ab].FieldsV1) } -func NormalizeConflictingManagers(ctx context.Context, current *unstructured.Unstructured, migratingManagers []string) (modified bool, err error) { +func NormalizeConflictingManagers(ctx context.Context, current *unstructured.Unstructured, migratingManagers []string, migratingFields []string) (modified bool, err error) { managedFields := current.GetManagedFields() logger := logr.FromContextOrDiscard(ctx) logger.Info("NormalizingConflictingManager", "Name", current.GetName(), "Namespace", current.GetNamespace()) @@ -143,11 +164,14 @@ func NormalizeConflictingManagers(ctx context.Context, current *unstructured.Uns return false, nil } + // Parse the allowed field paths from the configuration + allowedPrefixes := parseFieldPaths(migratingFields) + // Build the unique list of managers to migrate from user-provided migratingManagers uniqueMigratingManagers := buildUniqueManagersList(migratingManagers) // Check if normalization is needed - hasLegacyManager, enoEntryCount, err := analyzeManagerConflicts(managedFields, uniqueMigratingManagers) + hasLegacyManager, enoEntryCount, err := analyzeManagerConflicts(managedFields, uniqueMigratingManagers, allowedPrefixes) if err != nil { return false, err } @@ -157,7 +181,7 @@ func NormalizeConflictingManagers(ctx context.Context, current *unstructured.Uns } // Merge all eno entries first to get the combined fieldset - mergedEnoSet, mergedEnoTime := mergeEnoEntries(managedFields) + mergedEnoSet, mergedEnoTime := mergeEnoEntries(managedFields, allowedPrefixes) // Build new managedFields list, merging legacy managers into eno and excluding original eno entries newManagedFields := make([]metav1.ManagedFieldsEntry, 0, len(managedFields)) @@ -189,12 +213,28 @@ func NormalizeConflictingManagers(ctx context.Context, current *unstructured.Uns logger.Info("NormalizeConflictingManagers found migrating managers", "manager", entry.Manager, "resoruceName", current.GetName(), "resourceNamespace", current.GetNamespace()) // Check if this is a legacy manager that should be migrated to eno - // Merge legacy manager's fields into the eno fieldset instead of creating a separate entry + // Separate allowed fields (to migrate) from excluded fields (to keep with legacy manager) if mergedEnoSet == nil { mergedEnoSet = &fieldpath.Set{} } if set := parseFieldsEntry(*entry); set != nil { - mergedEnoSet = mergedEnoSet.Union(set) + // Filter to only include allowed field paths for migration + allowedFields := filterAllowedFieldPaths(set, allowedPrefixes) + if !allowedFields.Empty() { + mergedEnoSet = mergedEnoSet.Union(allowedFields) + } + + // Keep the legacy manager entry if it has excluded fields + excludedFields := set.Difference(allowedFields) + if !excludedFields.Empty() { + // Create a new entry with only the excluded fields + js, err := excludedFields.ToJSON() + if err == nil { + entryCopy := *entry + entryCopy.FieldsV1 = &metav1.FieldsV1{Raw: js} + newManagedFields = append(newManagedFields, entryCopy) + } + } } // Update the timestamp to the most recent if mergedEnoTime == nil || (entry.Time != nil && entry.Time.After(mergedEnoTime.Time)) { @@ -203,13 +243,14 @@ func NormalizeConflictingManagers(ctx context.Context, current *unstructured.Uns modified = true } - // Add the merged eno entry if we found any eno entries + // Add the merged eno entry if we found any eno entries OR migrated any legacy manager fields if mergedEnoSet != nil && !mergedEnoSet.Empty() { mergedEntry, err := createMergedEnoEntry(mergedEnoSet, mergedEnoTime, managedFields) if err != nil { return false, err } newManagedFields = append(newManagedFields, mergedEntry) + modified = true // Ensure modified is true when we create/update the eno entry } if modified { @@ -234,9 +275,11 @@ func buildUniqueManagersList(migratingManagers []string) map[string]bool { return unique } -// analyzeManagerConflicts checks if there are legacy managers present -// and counts the number of eno entries -func analyzeManagerConflicts(managedFields []metav1.ManagedFieldsEntry, uniqueMigratingManagers map[string]bool) (hasLegacyManager bool, enoEntryCount int, err error) { +// analyzeManagerConflicts checks if there are legacy managers present that own allowed fields +// and counts the number of eno entries. Only returns hasLegacyManager=true if a legacy manager +// owns at least one field from the allowed list (spec, data, etc.), preventing infinite loops +// when legacy managers only own excluded fields (status, finalizers, etc.). +func analyzeManagerConflicts(managedFields []metav1.ManagedFieldsEntry, uniqueMigratingManagers map[string]bool, allowedPrefixes []fieldpath.Path) (hasLegacyManager bool, enoEntryCount int, err error) { for i := range managedFields { entry := &managedFields[i] @@ -247,7 +290,13 @@ func analyzeManagerConflicts(managedFields []metav1.ManagedFieldsEntry, uniqueMi // Check if this is a legacy manager we need to normalize if uniqueMigratingManagers[entry.Manager] { - hasLegacyManager = true + // Only consider it a legacy manager needing migration if it owns at least one allowed field + if set := parseFieldsEntry(*entry); set != nil { + allowedFields := filterAllowedFieldPaths(set, allowedPrefixes) + if !allowedFields.Empty() { + hasLegacyManager = true + } + } } } @@ -255,8 +304,8 @@ func analyzeManagerConflicts(managedFields []metav1.ManagedFieldsEntry, uniqueMi } // mergeEnoEntries merges all eno Apply entries into a single fieldpath.Set -// and tracks the most recent timestamp -func mergeEnoEntries(managedFields []metav1.ManagedFieldsEntry) (*fieldpath.Set, *metav1.Time) { +// and tracks the most recent timestamp. Filters the result to only include allowed field paths. +func mergeEnoEntries(managedFields []metav1.ManagedFieldsEntry, allowedPrefixes []fieldpath.Path) (*fieldpath.Set, *metav1.Time) { var mergedSet *fieldpath.Set var latestTime *metav1.Time @@ -268,7 +317,11 @@ func mergeEnoEntries(managedFields []metav1.ManagedFieldsEntry) (*fieldpath.Set, mergedSet = &fieldpath.Set{} } if set := parseFieldsEntry(*entry); set != nil { - mergedSet = mergedSet.Union(set) + // Filter to only include allowed field paths + filteredSet := filterAllowedFieldPaths(set, allowedPrefixes) + if !filteredSet.Empty() { + mergedSet = mergedSet.Union(filteredSet) + } } if latestTime == nil || (entry.Time != nil && entry.Time.After(latestTime.Time)) { latestTime = entry.Time @@ -309,3 +362,38 @@ func createMergedEnoEntry(mergedSet *fieldpath.Set, timestamp *metav1.Time, mana FieldsV1: &metav1.FieldsV1{Raw: js}, }, nil } + +// filterAllowedFieldPaths filters a fieldpath.Set to only include paths that are safe to migrate. +// The allowedPrefixes parameter specifies which field paths should be migrated. +// Common examples: spec.*, data.*, stringData.*, binaryData.*, metadata.labels.*, metadata.annotations.* +// Excluded paths: metadata.finalizers, metadata.deletionTimestamp, metadata.ownerReferences, status.*, and other metadata fields +func filterAllowedFieldPaths(set *fieldpath.Set, allowedPrefixes []fieldpath.Path) *fieldpath.Set { + if set == nil || set.Empty() { + return &fieldpath.Set{} + } + + filtered := &fieldpath.Set{} + set.Iterate(func(path fieldpath.Path) { + for _, prefix := range allowedPrefixes { + if hasPrefix(path, prefix) { + filtered.Insert(path) + break + } + } + }) + + return filtered +} + +// hasPrefix checks if a path starts with the given prefix +func hasPrefix(path, prefix fieldpath.Path) bool { + if len(path) < len(prefix) { + return false + } + for i, elem := range prefix { + if !path[i].Equals(elem) { + return false + } + } + return true +} diff --git a/internal/resource/fieldmanager_test.go b/internal/resource/fieldmanager_test.go index 59f2642f..1411b092 100644 --- a/internal/resource/fieldmanager_test.go +++ b/internal/resource/fieldmanager_test.go @@ -612,7 +612,7 @@ func TestNormalizeConflictingManagers(t *testing.T) { obj := &unstructured.Unstructured{} obj.SetManagedFields(tc.managedFields) - modified, err := NormalizeConflictingManagers(context.Background(), obj, tc.migratingManagers) + modified, err := NormalizeConflictingManagers(context.Background(), obj, tc.migratingManagers, []string{"spec", "data", "stringData", "binaryData", "metadata.labels", "metadata.annotations"}) require.NoError(t, err) assert.Equal(t, tc.expectModified, modified) @@ -828,7 +828,7 @@ func TestNormalizeConflictingManagers_FieldMerging(t *testing.T) { obj := &unstructured.Unstructured{} obj.SetManagedFields(tc.managedFields) - modified, err := NormalizeConflictingManagers(context.Background(), obj, tc.migratingManagers) + modified, err := NormalizeConflictingManagers(context.Background(), obj, tc.migratingManagers, []string{"spec", "data", "stringData", "binaryData", "metadata.labels", "metadata.annotations"}) require.NoError(t, err) assert.Equal(t, tc.expectModified, modified) @@ -870,3 +870,491 @@ func TestNormalizeConflictingManagers_FieldMerging(t *testing.T) { }) } } + +func TestNormalizeConflictingManagers_AnnotationGating(t *testing.T) { + tests := []struct { + name string + annotations map[string]string + managedFields []metav1.ManagedFieldsEntry + migratingManagers []string + expectModified bool + }{ + { + name: "migration occurs even with annotation present (no longer gates)", + annotations: map[string]string{ + "eno.azure.com/ownership-migrated": "v1", + }, + managedFields: []metav1.ManagedFieldsEntry{ + { + Manager: "kubectl", + Operation: metav1.ManagedFieldsOperationUpdate, + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + }, + }, + migratingManagers: []string{"kubectl"}, + expectModified: true, + }, + { + name: "migration occurs when annotation not present", + annotations: nil, + managedFields: []metav1.ManagedFieldsEntry{ + { + Manager: "kubectl", + Operation: metav1.ManagedFieldsOperationUpdate, + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + }, + }, + migratingManagers: []string{"kubectl"}, + expectModified: true, + }, + { + name: "migration occurs when annotation present with wrong value", + annotations: map[string]string{ + "eno.azure.com/ownership-migrated": "v0", + }, + managedFields: []metav1.ManagedFieldsEntry{ + { + Manager: "kubectl", + Operation: metav1.ManagedFieldsOperationUpdate, + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + }, + }, + migratingManagers: []string{"kubectl"}, + expectModified: true, + }, + { + name: "no-op when no conflicting managers present", + annotations: map[string]string{ + "other-annotation": "value", + }, + managedFields: []metav1.ManagedFieldsEntry{ + { + Manager: "eno", + Operation: metav1.ManagedFieldsOperationApply, + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + }, + }, + migratingManagers: []string{"kubectl"}, + expectModified: false, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + obj := &unstructured.Unstructured{} + obj.SetManagedFields(tc.managedFields) + if tc.annotations != nil { + obj.SetAnnotations(tc.annotations) + } + + modified, err := NormalizeConflictingManagers(context.Background(), obj, tc.migratingManagers, []string{"spec", "data", "stringData", "binaryData", "metadata.labels", "metadata.annotations"}) + + require.NoError(t, err) + assert.Equal(t, tc.expectModified, modified) + + // Verify that annotations are not modified by the function + // (the function should not set or remove the migration annotation) + originalAnnotations := tc.annotations + currentAnnotations := obj.GetAnnotations() + + if originalAnnotations == nil { + assert.Nil(t, currentAnnotations, "annotations should remain nil if they were originally nil") + } else { + assert.Equal(t, originalAnnotations, currentAnnotations, "annotations should not be modified") + } + }) + } +} + +func TestNormalizeConflictingManagers_FieldPathFiltering(t *testing.T) { + tests := []struct { + name string + managedFields []metav1.ManagedFieldsEntry + migratingManagers []string + expectModified bool + expectEnoFields []string + expectLegacyStillOwns []string + }{ + { + name: "only spec, metadata.labels, metadata.annotations migrated", + managedFields: []metav1.ManagedFieldsEntry{ + { + Manager: "kubectl", + Operation: metav1.ManagedFieldsOperationUpdate, + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}, "f:metadata":{"f:labels":{"f:app":{}}, "f:annotations":{"f:note":{}}, "f:finalizers":{}}}`)}, + }, + }, + migratingManagers: []string{"kubectl"}, + expectModified: true, + expectEnoFields: []string{".spec.replicas", ".metadata.labels.app", ".metadata.annotations.note"}, + expectLegacyStillOwns: []string{".metadata.finalizers"}, + }, + { + name: "status fields not migrated", + managedFields: []metav1.ManagedFieldsEntry{ + { + Manager: "kubectl", + Operation: metav1.ManagedFieldsOperationUpdate, + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}, "f:status":{"f:ready":{}}}`)}, + }, + }, + migratingManagers: []string{"kubectl"}, + expectModified: true, + expectEnoFields: []string{".spec.replicas"}, + expectLegacyStillOwns: []string{".status.ready"}, + }, + { + name: "deletionTimestamp not migrated", + managedFields: []metav1.ManagedFieldsEntry{ + { + Manager: "kubectl", + Operation: metav1.ManagedFieldsOperationUpdate, + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}, "f:metadata":{"f:deletionTimestamp":{}}}`)}, + }, + }, + migratingManagers: []string{"kubectl"}, + expectModified: true, + expectEnoFields: []string{".spec.replicas"}, + expectLegacyStillOwns: []string{".metadata.deletionTimestamp"}, + }, + { + name: "multiple eno entries merged with filtering", + managedFields: []metav1.ManagedFieldsEntry{ + { + Manager: "eno", + Operation: metav1.ManagedFieldsOperationApply, + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}, "f:status":{"f:ready":{}}}`)}, + }, + { + Manager: "eno", + Operation: metav1.ManagedFieldsOperationApply, + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:paused":{}}, "f:metadata":{"f:finalizers":{}}}`)}, + }, + }, + migratingManagers: []string{}, + expectModified: true, + expectEnoFields: []string{".spec.replicas", ".spec.paused"}, + expectLegacyStillOwns: []string{}, // status and finalizers are filtered out entirely + }, + { + name: "complex nested spec paths migrated", + managedFields: []metav1.ManagedFieldsEntry{ + { + Manager: "kubectl", + Operation: metav1.ManagedFieldsOperationUpdate, + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:template":{"f:spec":{"f:containers":{}}}}}`)}, + }, + }, + migratingManagers: []string{"kubectl"}, + expectModified: true, + expectEnoFields: []string{".spec.template.spec.containers"}, + }, + { + name: "other metadata fields not migrated", + managedFields: []metav1.ManagedFieldsEntry{ + { + Manager: "kubectl", + Operation: metav1.ManagedFieldsOperationUpdate, + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}, "f:metadata":{"f:ownerReferences":{},"f:resourceVersion":{}}}`)}, + }, + }, + migratingManagers: []string{"kubectl"}, + expectModified: true, + expectEnoFields: []string{".spec.replicas"}, + expectLegacyStillOwns: []string{".metadata.ownerReferences", ".metadata.resourceVersion"}, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + obj := &unstructured.Unstructured{} + obj.SetManagedFields(tc.managedFields) + + modified, err := NormalizeConflictingManagers(context.Background(), obj, tc.migratingManagers, []string{"spec", "data", "stringData", "binaryData", "metadata.labels", "metadata.annotations"}) + + require.NoError(t, err) + assert.Equal(t, tc.expectModified, modified) + + // Find the eno entry + var enoEntry *metav1.ManagedFieldsEntry + for _, entry := range obj.GetManagedFields() { + if entry.Manager == "eno" && entry.Operation == metav1.ManagedFieldsOperationApply { + enoEntry = &entry + break + } + } + + require.NotNil(t, enoEntry, "expected to find an eno managed fields entry") + require.NotNil(t, enoEntry.FieldsV1, "expected eno entry to have FieldsV1") + + // Deserialize the FieldsV1 JSON to verify expected fields exist + var enoFieldsMap map[string]interface{} + err = json.Unmarshal(enoEntry.FieldsV1.Raw, &enoFieldsMap) + require.NoError(t, err) + + // Verify all expected fields are present in eno entry + for _, expectedPath := range tc.expectEnoFields { + components := parseFieldPath(expectedPath) + assert.True(t, hasNestedField(enoFieldsMap, components), + "expected eno entry to contain field path: %s (components: %v)", expectedPath, components) + } + + // Verify excluded fields are NOT in eno entry + for _, excludedPath := range tc.expectLegacyStillOwns { + components := parseFieldPath(excludedPath) + assert.False(t, hasNestedField(enoFieldsMap, components), + "expected eno entry to NOT contain field path: %s (components: %v)", excludedPath, components) + } + + // Verify that legacy managers still own the excluded fields (if any) + if len(tc.expectLegacyStillOwns) > 0 && len(tc.migratingManagers) > 0 { + // Find any remaining legacy manager entries (they should still exist with excluded fields) + for _, entry := range obj.GetManagedFields() { + for _, migratingMgr := range tc.migratingManagers { + if entry.Manager == migratingMgr && entry.FieldsV1 != nil { + var legacyFieldsMap map[string]interface{} + err = json.Unmarshal(entry.FieldsV1.Raw, &legacyFieldsMap) + require.NoError(t, err) + + // Verify the legacy manager still owns at least one excluded field + for _, excludedPath := range tc.expectLegacyStillOwns { + components := parseFieldPath(excludedPath) + if hasNestedField(legacyFieldsMap, components) { + // Good - at least one excluded field is still owned by legacy manager + return + } + } + } + } + } + } + }) + } +} + +// TestNormalizeConflictingManagers_CustomMigratingFields tests that only the specified +// migratingFields are migrated, and fields outside that list remain with their original managers. +func TestNormalizeConflictingManagers_CustomMigratingFields(t *testing.T) { + tests := []struct { + name string + managedFields []metav1.ManagedFieldsEntry + migratingManagers []string + migratingFields []string + expectModified bool + expectEnoFields []string + expectLegacyStillOwns []string + expectLegacyManager string + }{ + { + name: "only migrate spec when migratingFields contains only spec", + managedFields: []metav1.ManagedFieldsEntry{ + { + Manager: "kubectl", + Operation: metav1.ManagedFieldsOperationUpdate, + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}, "f:data":{"f:key1":{}}, "f:stringData":{"f:key2":{}}}`)}, + }, + }, + migratingManagers: []string{"kubectl"}, + migratingFields: []string{"spec"}, + expectModified: true, + expectEnoFields: []string{".spec.replicas"}, + expectLegacyStillOwns: []string{".data.key1", ".stringData.key2"}, + expectLegacyManager: "kubectl", + }, + { + name: "only migrate data when migratingFields contains only data", + managedFields: []metav1.ManagedFieldsEntry{ + { + Manager: "helm", + Operation: metav1.ManagedFieldsOperationUpdate, + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}, "f:data":{"f:key1":{}}, "f:metadata":{"f:labels":{"f:app":{}}}}`)}, + }, + }, + migratingManagers: []string{"helm"}, + migratingFields: []string{"data"}, + expectModified: true, + expectEnoFields: []string{".data.key1"}, + expectLegacyStillOwns: []string{".spec.replicas", ".metadata.labels.app"}, + expectLegacyManager: "helm", + }, + { + name: "migrate metadata.labels but not metadata.annotations", + managedFields: []metav1.ManagedFieldsEntry{ + { + Manager: "kubectl", + Operation: metav1.ManagedFieldsOperationUpdate, + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}, "f:metadata":{"f:labels":{"f:app":{}}, "f:annotations":{"f:note":{}}}}`)}, + }, + }, + migratingManagers: []string{"kubectl"}, + migratingFields: []string{"metadata.labels"}, + expectModified: true, + expectEnoFields: []string{".metadata.labels.app"}, + expectLegacyStillOwns: []string{".spec.replicas", ".metadata.annotations.note"}, + expectLegacyManager: "kubectl", + }, + { + name: "empty migratingFields should not migrate anything", + managedFields: []metav1.ManagedFieldsEntry{ + { + Manager: "kubectl", + Operation: metav1.ManagedFieldsOperationUpdate, + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}, "f:data":{"f:key1":{}}}`)}, + }, + }, + migratingManagers: []string{"kubectl"}, + migratingFields: []string{}, + expectModified: false, + expectEnoFields: []string{}, + expectLegacyStillOwns: []string{".spec.replicas", ".data.key1"}, + expectLegacyManager: "kubectl", + }, + { + name: "combine spec and data but exclude stringData and binaryData", + managedFields: []metav1.ManagedFieldsEntry{ + { + Manager: "helm-controller", + Operation: metav1.ManagedFieldsOperationUpdate, + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}, "f:data":{"f:key1":{}}, "f:stringData":{"f:key2":{}}, "f:binaryData":{"f:key3":{}}}`)}, + }, + }, + migratingManagers: []string{"helm-controller"}, + migratingFields: []string{"spec", "data"}, + expectModified: true, + expectEnoFields: []string{".spec.replicas", ".data.key1"}, + expectLegacyStillOwns: []string{".stringData.key2", ".binaryData.key3"}, + expectLegacyManager: "helm-controller", + }, + { + name: "migrate all default fields", + managedFields: []metav1.ManagedFieldsEntry{ + { + Manager: "kubectl", + Operation: metav1.ManagedFieldsOperationUpdate, + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}, "f:data":{"f:key1":{}}, "f:stringData":{"f:key2":{}}, "f:binaryData":{"f:key3":{}}, "f:metadata":{"f:labels":{"f:app":{}}, "f:annotations":{"f:note":{}}}}`)}, + }, + }, + migratingManagers: []string{"kubectl"}, + migratingFields: []string{"spec", "data", "stringData", "binaryData", "metadata.labels", "metadata.annotations"}, + expectModified: true, + expectEnoFields: []string{".spec.replicas", ".data.key1", ".stringData.key2", ".binaryData.key3", ".metadata.labels.app", ".metadata.annotations.note"}, + expectLegacyStillOwns: []string{}, + expectLegacyManager: "", + }, + { + name: "migrate nested spec.template but not top-level spec.replicas", + managedFields: []metav1.ManagedFieldsEntry{ + { + Manager: "custom-operator", + Operation: metav1.ManagedFieldsOperationUpdate, + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}, "f:template":{"f:spec":{"f:containers":{}}}}, "f:status":{"f:ready":{}}}`)}, + }, + }, + migratingManagers: []string{"custom-operator"}, + migratingFields: []string{"spec.template"}, + expectModified: true, + expectEnoFields: []string{".spec.template.spec.containers"}, + expectLegacyStillOwns: []string{".status.ready"}, + expectLegacyManager: "custom-operator", + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + obj := &unstructured.Unstructured{} + obj.SetManagedFields(tc.managedFields) + + modified, err := NormalizeConflictingManagers(context.Background(), obj, tc.migratingManagers, tc.migratingFields) + + require.NoError(t, err) + assert.Equal(t, tc.expectModified, modified, "expected modified=%v, got %v", tc.expectModified, modified) + + managedFields := obj.GetManagedFields() + + // Find the eno entry + var enoEntry *metav1.ManagedFieldsEntry + for _, entry := range managedFields { + if entry.Manager == "eno" && entry.Operation == metav1.ManagedFieldsOperationApply { + enoEntry = &entry + break + } + } + + if len(tc.expectEnoFields) > 0 { + require.NotNil(t, enoEntry, "expected to find an eno managed fields entry") + require.NotNil(t, enoEntry.FieldsV1, "expected eno entry to have FieldsV1") + + // Deserialize the FieldsV1 JSON to verify expected fields are present + var enoFieldsMap map[string]interface{} + err = json.Unmarshal(enoEntry.FieldsV1.Raw, &enoFieldsMap) + require.NoError(t, err) + + // Verify all expected fields are present in eno + for _, expectedPath := range tc.expectEnoFields { + components := parseFieldPath(expectedPath) + assert.True(t, hasNestedField(enoFieldsMap, components), + "expected eno entry to contain field path: %s (components: %v)", expectedPath, components) + } + } else if tc.expectModified { + // If we expect modification but no eno fields, something is wrong + t.Errorf("expected modification but no eno fields specified") + } + + // Verify that fields outside migratingFields remain with the legacy manager + if len(tc.expectLegacyStillOwns) > 0 { + var legacyEntry *metav1.ManagedFieldsEntry + for _, entry := range managedFields { + if entry.Manager == tc.expectLegacyManager { + legacyEntry = &entry + break + } + } + + require.NotNil(t, legacyEntry, "expected to find legacy manager %s", tc.expectLegacyManager) + require.NotNil(t, legacyEntry.FieldsV1, "expected legacy manager to have FieldsV1") + + var legacyFieldsMap map[string]interface{} + err = json.Unmarshal(legacyEntry.FieldsV1.Raw, &legacyFieldsMap) + require.NoError(t, err) + + // Verify all expected fields are still owned by legacy manager + for _, expectedPath := range tc.expectLegacyStillOwns { + components := parseFieldPath(expectedPath) + assert.True(t, hasNestedField(legacyFieldsMap, components), + "expected legacy manager %s to still own field path: %s (components: %v)", tc.expectLegacyManager, expectedPath, components) + } + + // Verify that eno does NOT own the fields that should remain with legacy manager + if enoEntry != nil && enoEntry.FieldsV1 != nil { + var enoFieldsMap map[string]interface{} + err = json.Unmarshal(enoEntry.FieldsV1.Raw, &enoFieldsMap) + require.NoError(t, err) + + for _, excludedPath := range tc.expectLegacyStillOwns { + components := parseFieldPath(excludedPath) + assert.False(t, hasNestedField(enoFieldsMap, components), + "eno should NOT own field path: %s (components: %v), it should remain with %s", excludedPath, components, tc.expectLegacyManager) + } + } + } + }) + } +}