diff --git a/cmd/eno-reconciler/main.go b/cmd/eno-reconciler/main.go index 9ed0e7b9..d1b6abf8 100644 --- a/cmd/eno-reconciler/main.go +++ b/cmd/eno-reconciler/main.go @@ -43,6 +43,7 @@ func run() error { namespaceCleanup bool enoBuildVersion string migratingFieldManagers string + migratingFields string mgrOpts = &manager.Options{ Rest: ctrl.GetConfigOrDie(), @@ -64,6 +65,7 @@ func run() error { flag.BoolVar(&namespaceCleanup, "namespace-cleanup", true, "Clean up orphaned resources caused by namespace force-deletions") flag.BoolVar(&recOpts.FailOpen, "fail-open", false, "Report that resources are reconciled once they've been seen, even if reconciliation failed. Overridden by individual resources with 'eno.azure.io/fail-open: true|false'") flag.StringVar(&migratingFieldManagers, "migrating-field-managers", os.Getenv("MIGRATING_FIELD_MANAGERS"), "Comma-separated list of Kubernetes SSA field manager names to take ownership from during migrations") + flag.StringVar(&migratingFields, "migrating-fields", os.Getenv("MIGRATING_FIELDS"), "Comma-seperated list of fields Kubernetes fields(metadata.labels, spec, stringData...) to migrate the ownership to eno") mgrOpts.Bind(flag.CommandLine) flag.Parse() @@ -135,6 +137,13 @@ func run() error { } } + if migratingFields != "" { + recOpts.MigratingFields = strings.Split(migratingFields, ",") + for i := range recOpts.MigratingFields { + recOpts.MigratingFields[i] = strings.TrimSpace(recOpts.MigratingFields[i]) + } + } + err = reconciliation.New(mgr, recOpts) if err != nil { return fmt.Errorf("constructing reconciliation controller: %w", err) diff --git a/internal/controllers/reconciliation/controller.go b/internal/controllers/reconciliation/controller.go index 60adee8d..ffabbb45 100644 --- a/internal/controllers/reconciliation/controller.go +++ b/internal/controllers/reconciliation/controller.go @@ -38,6 +38,7 @@ type Options struct { DisableServerSideApply bool FailOpen bool MigratingFieldManagers []string + MigratingFields []string Timeout time.Duration ReadinessPollInterval time.Duration @@ -56,6 +57,7 @@ type Controller struct { disableSSA bool failOpen bool migratingFieldManagers []string + migratingFields []string } func New(mgr ctrl.Manager, opts Options) error { @@ -83,6 +85,7 @@ func New(mgr ctrl.Manager, opts Options) error { disableSSA: opts.DisableServerSideApply, failOpen: opts.FailOpen, migratingFieldManagers: opts.MigratingFieldManagers, + migratingFields: opts.MigratingFields, } return builder.TypedControllerManagedBy[resource.Request](mgr). @@ -296,7 +299,7 @@ func (c *Controller) reconcileSnapshot(ctx context.Context, comp *apiv1.Composit // subsequent SSA Apply will treat eno as the sole owner and automatically merge the managedFields // entries into a single consolidated entry for eno. if current != nil && len(c.migratingFieldManagers) > 0 { - wasModified, err := resource.NormalizeConflictingManagers(ctx, current, c.migratingFieldManagers) + wasModified, err := resource.NormalizeConflictingManagers(ctx, current, c.migratingFieldManagers, c.migratingFields) if err != nil { return false, fmt.Errorf("normalize conflicting manager failed: %w", err) } diff --git a/internal/controllers/reconciliation/overrides_test.go b/internal/controllers/reconciliation/overrides_test.go index 8f18a546..4bb27ce3 100644 --- a/internal/controllers/reconciliation/overrides_test.go +++ b/internal/controllers/reconciliation/overrides_test.go @@ -881,6 +881,7 @@ func TestMigratingFieldManagers(t *testing.T) { ReadinessPollInterval: time.Hour, DisableServerSideApply: mgr.NoSsaSupport, MigratingFieldManagers: []string{"legacy-tool"}, + MigratingFields: []string{"data"}, }) mgr.Start(t) _, comp := writeGenericComposition(t, upstream) @@ -997,6 +998,7 @@ func TestMigratingFieldManagersFieldRemoval(t *testing.T) { ReadinessPollInterval: time.Hour, DisableServerSideApply: mgr.NoSsaSupport, MigratingFieldManagers: []string{"legacy-tool"}, + MigratingFields: []string{"data"}, }) mgr.Start(t) _, comp := writeGenericComposition(t, upstream) diff --git a/internal/resource/fieldmanager.go b/internal/resource/fieldmanager.go index ba318094..492a3391 100644 --- a/internal/resource/fieldmanager.go +++ b/internal/resource/fieldmanager.go @@ -5,6 +5,7 @@ import ( "context" "fmt" "slices" + "strings" "github.com/go-logr/logr" "k8s.io/apimachinery/pkg/api/equality" @@ -17,6 +18,26 @@ const ( enoManager = "eno" ) +// parseFieldPaths converts a slice of dot-separated field path strings into fieldpath.Path objects. +// For example: "metadata.labels" -> fieldpath.MakePathOrDie("metadata", "labels") +func parseFieldPaths(fields []string) []fieldpath.Path { + paths := make([]fieldpath.Path, 0, len(fields)) + for _, field := range fields { + if field == "" { + continue + } + // Split on dots to handle nested paths like "metadata.labels" + parts := strings.Split(field, ".") + // Convert string parts to interface{} for MakePathOrDie + pathParts := make([]interface{}, len(parts)) + for i, part := range parts { + pathParts[i] = part + } + paths = append(paths, fieldpath.MakePathOrDie(pathParts...)) + } + return paths +} + // MergeEnoManagedFields corrects managed fields drift to ensure Eno can remove fields // that are no longer set by the synthesizer, even when another client corrupts the // managed fields metadata. Returns corrected managed fields, affected field paths, @@ -135,7 +156,7 @@ func compareEnoManagedFields(a, b []metav1.ManagedFieldsEntry) bool { return equality.Semantic.DeepEqual(a[ai].FieldsV1, b[ab].FieldsV1) } -func NormalizeConflictingManagers(ctx context.Context, current *unstructured.Unstructured, migratingManagers []string) (modified bool, err error) { +func NormalizeConflictingManagers(ctx context.Context, current *unstructured.Unstructured, migratingManagers []string, migratingFields []string) (modified bool, err error) { managedFields := current.GetManagedFields() logger := logr.FromContextOrDiscard(ctx) logger.Info("NormalizingConflictingManager", "Name", current.GetName(), "Namespace", current.GetNamespace()) @@ -143,11 +164,14 @@ func NormalizeConflictingManagers(ctx context.Context, current *unstructured.Uns return false, nil } + // Parse the allowed field paths from the configuration + allowedPrefixes := parseFieldPaths(migratingFields) + // Build the unique list of managers to migrate from user-provided migratingManagers uniqueMigratingManagers := buildUniqueManagersList(migratingManagers) // Check if normalization is needed - hasLegacyManager, enoEntryCount, err := analyzeManagerConflicts(managedFields, uniqueMigratingManagers) + hasLegacyManager, enoEntryCount, err := analyzeManagerConflicts(managedFields, uniqueMigratingManagers, allowedPrefixes) if err != nil { return false, err } @@ -157,7 +181,7 @@ func NormalizeConflictingManagers(ctx context.Context, current *unstructured.Uns } // Merge all eno entries first to get the combined fieldset - mergedEnoSet, mergedEnoTime := mergeEnoEntries(managedFields) + mergedEnoSet, mergedEnoTime := mergeEnoEntries(managedFields, allowedPrefixes) // Build new managedFields list, merging legacy managers into eno and excluding original eno entries newManagedFields := make([]metav1.ManagedFieldsEntry, 0, len(managedFields)) @@ -189,12 +213,28 @@ func NormalizeConflictingManagers(ctx context.Context, current *unstructured.Uns logger.Info("NormalizeConflictingManagers found migrating managers", "manager", entry.Manager, "resoruceName", current.GetName(), "resourceNamespace", current.GetNamespace()) // Check if this is a legacy manager that should be migrated to eno - // Merge legacy manager's fields into the eno fieldset instead of creating a separate entry + // Separate allowed fields (to migrate) from excluded fields (to keep with legacy manager) if mergedEnoSet == nil { mergedEnoSet = &fieldpath.Set{} } if set := parseFieldsEntry(*entry); set != nil { - mergedEnoSet = mergedEnoSet.Union(set) + // Filter to only include allowed field paths for migration + allowedFields := filterAllowedFieldPaths(set, allowedPrefixes) + if !allowedFields.Empty() { + mergedEnoSet = mergedEnoSet.Union(allowedFields) + } + + // Keep the legacy manager entry if it has excluded fields + excludedFields := set.Difference(allowedFields) + if !excludedFields.Empty() { + // Create a new entry with only the excluded fields + js, err := excludedFields.ToJSON() + if err == nil { + entryCopy := *entry + entryCopy.FieldsV1 = &metav1.FieldsV1{Raw: js} + newManagedFields = append(newManagedFields, entryCopy) + } + } } // Update the timestamp to the most recent if mergedEnoTime == nil || (entry.Time != nil && entry.Time.After(mergedEnoTime.Time)) { @@ -203,13 +243,14 @@ func NormalizeConflictingManagers(ctx context.Context, current *unstructured.Uns modified = true } - // Add the merged eno entry if we found any eno entries + // Add the merged eno entry if we found any eno entries OR migrated any legacy manager fields if mergedEnoSet != nil && !mergedEnoSet.Empty() { mergedEntry, err := createMergedEnoEntry(mergedEnoSet, mergedEnoTime, managedFields) if err != nil { return false, err } newManagedFields = append(newManagedFields, mergedEntry) + modified = true // Ensure modified is true when we create/update the eno entry } if modified { @@ -234,9 +275,11 @@ func buildUniqueManagersList(migratingManagers []string) map[string]bool { return unique } -// analyzeManagerConflicts checks if there are legacy managers present -// and counts the number of eno entries -func analyzeManagerConflicts(managedFields []metav1.ManagedFieldsEntry, uniqueMigratingManagers map[string]bool) (hasLegacyManager bool, enoEntryCount int, err error) { +// analyzeManagerConflicts checks if there are legacy managers present that own allowed fields +// and counts the number of eno entries. Only returns hasLegacyManager=true if a legacy manager +// owns at least one field from the allowed list (spec, data, etc.), preventing infinite loops +// when legacy managers only own excluded fields (status, finalizers, etc.). +func analyzeManagerConflicts(managedFields []metav1.ManagedFieldsEntry, uniqueMigratingManagers map[string]bool, allowedPrefixes []fieldpath.Path) (hasLegacyManager bool, enoEntryCount int, err error) { for i := range managedFields { entry := &managedFields[i] @@ -247,7 +290,13 @@ func analyzeManagerConflicts(managedFields []metav1.ManagedFieldsEntry, uniqueMi // Check if this is a legacy manager we need to normalize if uniqueMigratingManagers[entry.Manager] { - hasLegacyManager = true + // Only consider it a legacy manager needing migration if it owns at least one allowed field + if set := parseFieldsEntry(*entry); set != nil { + allowedFields := filterAllowedFieldPaths(set, allowedPrefixes) + if !allowedFields.Empty() { + hasLegacyManager = true + } + } } } @@ -255,8 +304,8 @@ func analyzeManagerConflicts(managedFields []metav1.ManagedFieldsEntry, uniqueMi } // mergeEnoEntries merges all eno Apply entries into a single fieldpath.Set -// and tracks the most recent timestamp -func mergeEnoEntries(managedFields []metav1.ManagedFieldsEntry) (*fieldpath.Set, *metav1.Time) { +// and tracks the most recent timestamp. Filters the result to only include allowed field paths. +func mergeEnoEntries(managedFields []metav1.ManagedFieldsEntry, allowedPrefixes []fieldpath.Path) (*fieldpath.Set, *metav1.Time) { var mergedSet *fieldpath.Set var latestTime *metav1.Time @@ -268,7 +317,11 @@ func mergeEnoEntries(managedFields []metav1.ManagedFieldsEntry) (*fieldpath.Set, mergedSet = &fieldpath.Set{} } if set := parseFieldsEntry(*entry); set != nil { - mergedSet = mergedSet.Union(set) + // Filter to only include allowed field paths + filteredSet := filterAllowedFieldPaths(set, allowedPrefixes) + if !filteredSet.Empty() { + mergedSet = mergedSet.Union(filteredSet) + } } if latestTime == nil || (entry.Time != nil && entry.Time.After(latestTime.Time)) { latestTime = entry.Time @@ -309,3 +362,38 @@ func createMergedEnoEntry(mergedSet *fieldpath.Set, timestamp *metav1.Time, mana FieldsV1: &metav1.FieldsV1{Raw: js}, }, nil } + +// filterAllowedFieldPaths filters a fieldpath.Set to only include paths that are safe to migrate. +// The allowedPrefixes parameter specifies which field paths should be migrated. +// Common examples: spec.*, data.*, stringData.*, binaryData.*, metadata.labels.*, metadata.annotations.* +// Excluded paths: metadata.finalizers, metadata.deletionTimestamp, metadata.ownerReferences, status.*, and other metadata fields +func filterAllowedFieldPaths(set *fieldpath.Set, allowedPrefixes []fieldpath.Path) *fieldpath.Set { + if set == nil || set.Empty() { + return &fieldpath.Set{} + } + + filtered := &fieldpath.Set{} + set.Iterate(func(path fieldpath.Path) { + for _, prefix := range allowedPrefixes { + if hasPrefix(path, prefix) { + filtered.Insert(path) + break + } + } + }) + + return filtered +} + +// hasPrefix checks if a path starts with the given prefix +func hasPrefix(path, prefix fieldpath.Path) bool { + if len(path) < len(prefix) { + return false + } + for i, elem := range prefix { + if !path[i].Equals(elem) { + return false + } + } + return true +} diff --git a/internal/resource/fieldmanager_test.go b/internal/resource/fieldmanager_test.go index 59f2642f..1411b092 100644 --- a/internal/resource/fieldmanager_test.go +++ b/internal/resource/fieldmanager_test.go @@ -612,7 +612,7 @@ func TestNormalizeConflictingManagers(t *testing.T) { obj := &unstructured.Unstructured{} obj.SetManagedFields(tc.managedFields) - modified, err := NormalizeConflictingManagers(context.Background(), obj, tc.migratingManagers) + modified, err := NormalizeConflictingManagers(context.Background(), obj, tc.migratingManagers, []string{"spec", "data", "stringData", "binaryData", "metadata.labels", "metadata.annotations"}) require.NoError(t, err) assert.Equal(t, tc.expectModified, modified) @@ -828,7 +828,7 @@ func TestNormalizeConflictingManagers_FieldMerging(t *testing.T) { obj := &unstructured.Unstructured{} obj.SetManagedFields(tc.managedFields) - modified, err := NormalizeConflictingManagers(context.Background(), obj, tc.migratingManagers) + modified, err := NormalizeConflictingManagers(context.Background(), obj, tc.migratingManagers, []string{"spec", "data", "stringData", "binaryData", "metadata.labels", "metadata.annotations"}) require.NoError(t, err) assert.Equal(t, tc.expectModified, modified) @@ -870,3 +870,491 @@ func TestNormalizeConflictingManagers_FieldMerging(t *testing.T) { }) } } + +func TestNormalizeConflictingManagers_AnnotationGating(t *testing.T) { + tests := []struct { + name string + annotations map[string]string + managedFields []metav1.ManagedFieldsEntry + migratingManagers []string + expectModified bool + }{ + { + name: "migration occurs even with annotation present (no longer gates)", + annotations: map[string]string{ + "eno.azure.com/ownership-migrated": "v1", + }, + managedFields: []metav1.ManagedFieldsEntry{ + { + Manager: "kubectl", + Operation: metav1.ManagedFieldsOperationUpdate, + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + }, + }, + migratingManagers: []string{"kubectl"}, + expectModified: true, + }, + { + name: "migration occurs when annotation not present", + annotations: nil, + managedFields: []metav1.ManagedFieldsEntry{ + { + Manager: "kubectl", + Operation: metav1.ManagedFieldsOperationUpdate, + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + }, + }, + migratingManagers: []string{"kubectl"}, + expectModified: true, + }, + { + name: "migration occurs when annotation present with wrong value", + annotations: map[string]string{ + "eno.azure.com/ownership-migrated": "v0", + }, + managedFields: []metav1.ManagedFieldsEntry{ + { + Manager: "kubectl", + Operation: metav1.ManagedFieldsOperationUpdate, + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + }, + }, + migratingManagers: []string{"kubectl"}, + expectModified: true, + }, + { + name: "no-op when no conflicting managers present", + annotations: map[string]string{ + "other-annotation": "value", + }, + managedFields: []metav1.ManagedFieldsEntry{ + { + Manager: "eno", + Operation: metav1.ManagedFieldsOperationApply, + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}}`)}, + }, + }, + migratingManagers: []string{"kubectl"}, + expectModified: false, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + obj := &unstructured.Unstructured{} + obj.SetManagedFields(tc.managedFields) + if tc.annotations != nil { + obj.SetAnnotations(tc.annotations) + } + + modified, err := NormalizeConflictingManagers(context.Background(), obj, tc.migratingManagers, []string{"spec", "data", "stringData", "binaryData", "metadata.labels", "metadata.annotations"}) + + require.NoError(t, err) + assert.Equal(t, tc.expectModified, modified) + + // Verify that annotations are not modified by the function + // (the function should not set or remove the migration annotation) + originalAnnotations := tc.annotations + currentAnnotations := obj.GetAnnotations() + + if originalAnnotations == nil { + assert.Nil(t, currentAnnotations, "annotations should remain nil if they were originally nil") + } else { + assert.Equal(t, originalAnnotations, currentAnnotations, "annotations should not be modified") + } + }) + } +} + +func TestNormalizeConflictingManagers_FieldPathFiltering(t *testing.T) { + tests := []struct { + name string + managedFields []metav1.ManagedFieldsEntry + migratingManagers []string + expectModified bool + expectEnoFields []string + expectLegacyStillOwns []string + }{ + { + name: "only spec, metadata.labels, metadata.annotations migrated", + managedFields: []metav1.ManagedFieldsEntry{ + { + Manager: "kubectl", + Operation: metav1.ManagedFieldsOperationUpdate, + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}, "f:metadata":{"f:labels":{"f:app":{}}, "f:annotations":{"f:note":{}}, "f:finalizers":{}}}`)}, + }, + }, + migratingManagers: []string{"kubectl"}, + expectModified: true, + expectEnoFields: []string{".spec.replicas", ".metadata.labels.app", ".metadata.annotations.note"}, + expectLegacyStillOwns: []string{".metadata.finalizers"}, + }, + { + name: "status fields not migrated", + managedFields: []metav1.ManagedFieldsEntry{ + { + Manager: "kubectl", + Operation: metav1.ManagedFieldsOperationUpdate, + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}, "f:status":{"f:ready":{}}}`)}, + }, + }, + migratingManagers: []string{"kubectl"}, + expectModified: true, + expectEnoFields: []string{".spec.replicas"}, + expectLegacyStillOwns: []string{".status.ready"}, + }, + { + name: "deletionTimestamp not migrated", + managedFields: []metav1.ManagedFieldsEntry{ + { + Manager: "kubectl", + Operation: metav1.ManagedFieldsOperationUpdate, + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}, "f:metadata":{"f:deletionTimestamp":{}}}`)}, + }, + }, + migratingManagers: []string{"kubectl"}, + expectModified: true, + expectEnoFields: []string{".spec.replicas"}, + expectLegacyStillOwns: []string{".metadata.deletionTimestamp"}, + }, + { + name: "multiple eno entries merged with filtering", + managedFields: []metav1.ManagedFieldsEntry{ + { + Manager: "eno", + Operation: metav1.ManagedFieldsOperationApply, + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}, "f:status":{"f:ready":{}}}`)}, + }, + { + Manager: "eno", + Operation: metav1.ManagedFieldsOperationApply, + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:paused":{}}, "f:metadata":{"f:finalizers":{}}}`)}, + }, + }, + migratingManagers: []string{}, + expectModified: true, + expectEnoFields: []string{".spec.replicas", ".spec.paused"}, + expectLegacyStillOwns: []string{}, // status and finalizers are filtered out entirely + }, + { + name: "complex nested spec paths migrated", + managedFields: []metav1.ManagedFieldsEntry{ + { + Manager: "kubectl", + Operation: metav1.ManagedFieldsOperationUpdate, + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:template":{"f:spec":{"f:containers":{}}}}}`)}, + }, + }, + migratingManagers: []string{"kubectl"}, + expectModified: true, + expectEnoFields: []string{".spec.template.spec.containers"}, + }, + { + name: "other metadata fields not migrated", + managedFields: []metav1.ManagedFieldsEntry{ + { + Manager: "kubectl", + Operation: metav1.ManagedFieldsOperationUpdate, + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}, "f:metadata":{"f:ownerReferences":{},"f:resourceVersion":{}}}`)}, + }, + }, + migratingManagers: []string{"kubectl"}, + expectModified: true, + expectEnoFields: []string{".spec.replicas"}, + expectLegacyStillOwns: []string{".metadata.ownerReferences", ".metadata.resourceVersion"}, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + obj := &unstructured.Unstructured{} + obj.SetManagedFields(tc.managedFields) + + modified, err := NormalizeConflictingManagers(context.Background(), obj, tc.migratingManagers, []string{"spec", "data", "stringData", "binaryData", "metadata.labels", "metadata.annotations"}) + + require.NoError(t, err) + assert.Equal(t, tc.expectModified, modified) + + // Find the eno entry + var enoEntry *metav1.ManagedFieldsEntry + for _, entry := range obj.GetManagedFields() { + if entry.Manager == "eno" && entry.Operation == metav1.ManagedFieldsOperationApply { + enoEntry = &entry + break + } + } + + require.NotNil(t, enoEntry, "expected to find an eno managed fields entry") + require.NotNil(t, enoEntry.FieldsV1, "expected eno entry to have FieldsV1") + + // Deserialize the FieldsV1 JSON to verify expected fields exist + var enoFieldsMap map[string]interface{} + err = json.Unmarshal(enoEntry.FieldsV1.Raw, &enoFieldsMap) + require.NoError(t, err) + + // Verify all expected fields are present in eno entry + for _, expectedPath := range tc.expectEnoFields { + components := parseFieldPath(expectedPath) + assert.True(t, hasNestedField(enoFieldsMap, components), + "expected eno entry to contain field path: %s (components: %v)", expectedPath, components) + } + + // Verify excluded fields are NOT in eno entry + for _, excludedPath := range tc.expectLegacyStillOwns { + components := parseFieldPath(excludedPath) + assert.False(t, hasNestedField(enoFieldsMap, components), + "expected eno entry to NOT contain field path: %s (components: %v)", excludedPath, components) + } + + // Verify that legacy managers still own the excluded fields (if any) + if len(tc.expectLegacyStillOwns) > 0 && len(tc.migratingManagers) > 0 { + // Find any remaining legacy manager entries (they should still exist with excluded fields) + for _, entry := range obj.GetManagedFields() { + for _, migratingMgr := range tc.migratingManagers { + if entry.Manager == migratingMgr && entry.FieldsV1 != nil { + var legacyFieldsMap map[string]interface{} + err = json.Unmarshal(entry.FieldsV1.Raw, &legacyFieldsMap) + require.NoError(t, err) + + // Verify the legacy manager still owns at least one excluded field + for _, excludedPath := range tc.expectLegacyStillOwns { + components := parseFieldPath(excludedPath) + if hasNestedField(legacyFieldsMap, components) { + // Good - at least one excluded field is still owned by legacy manager + return + } + } + } + } + } + } + }) + } +} + +// TestNormalizeConflictingManagers_CustomMigratingFields tests that only the specified +// migratingFields are migrated, and fields outside that list remain with their original managers. +func TestNormalizeConflictingManagers_CustomMigratingFields(t *testing.T) { + tests := []struct { + name string + managedFields []metav1.ManagedFieldsEntry + migratingManagers []string + migratingFields []string + expectModified bool + expectEnoFields []string + expectLegacyStillOwns []string + expectLegacyManager string + }{ + { + name: "only migrate spec when migratingFields contains only spec", + managedFields: []metav1.ManagedFieldsEntry{ + { + Manager: "kubectl", + Operation: metav1.ManagedFieldsOperationUpdate, + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}, "f:data":{"f:key1":{}}, "f:stringData":{"f:key2":{}}}`)}, + }, + }, + migratingManagers: []string{"kubectl"}, + migratingFields: []string{"spec"}, + expectModified: true, + expectEnoFields: []string{".spec.replicas"}, + expectLegacyStillOwns: []string{".data.key1", ".stringData.key2"}, + expectLegacyManager: "kubectl", + }, + { + name: "only migrate data when migratingFields contains only data", + managedFields: []metav1.ManagedFieldsEntry{ + { + Manager: "helm", + Operation: metav1.ManagedFieldsOperationUpdate, + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}, "f:data":{"f:key1":{}}, "f:metadata":{"f:labels":{"f:app":{}}}}`)}, + }, + }, + migratingManagers: []string{"helm"}, + migratingFields: []string{"data"}, + expectModified: true, + expectEnoFields: []string{".data.key1"}, + expectLegacyStillOwns: []string{".spec.replicas", ".metadata.labels.app"}, + expectLegacyManager: "helm", + }, + { + name: "migrate metadata.labels but not metadata.annotations", + managedFields: []metav1.ManagedFieldsEntry{ + { + Manager: "kubectl", + Operation: metav1.ManagedFieldsOperationUpdate, + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}, "f:metadata":{"f:labels":{"f:app":{}}, "f:annotations":{"f:note":{}}}}`)}, + }, + }, + migratingManagers: []string{"kubectl"}, + migratingFields: []string{"metadata.labels"}, + expectModified: true, + expectEnoFields: []string{".metadata.labels.app"}, + expectLegacyStillOwns: []string{".spec.replicas", ".metadata.annotations.note"}, + expectLegacyManager: "kubectl", + }, + { + name: "empty migratingFields should not migrate anything", + managedFields: []metav1.ManagedFieldsEntry{ + { + Manager: "kubectl", + Operation: metav1.ManagedFieldsOperationUpdate, + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}, "f:data":{"f:key1":{}}}`)}, + }, + }, + migratingManagers: []string{"kubectl"}, + migratingFields: []string{}, + expectModified: false, + expectEnoFields: []string{}, + expectLegacyStillOwns: []string{".spec.replicas", ".data.key1"}, + expectLegacyManager: "kubectl", + }, + { + name: "combine spec and data but exclude stringData and binaryData", + managedFields: []metav1.ManagedFieldsEntry{ + { + Manager: "helm-controller", + Operation: metav1.ManagedFieldsOperationUpdate, + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}, "f:data":{"f:key1":{}}, "f:stringData":{"f:key2":{}}, "f:binaryData":{"f:key3":{}}}`)}, + }, + }, + migratingManagers: []string{"helm-controller"}, + migratingFields: []string{"spec", "data"}, + expectModified: true, + expectEnoFields: []string{".spec.replicas", ".data.key1"}, + expectLegacyStillOwns: []string{".stringData.key2", ".binaryData.key3"}, + expectLegacyManager: "helm-controller", + }, + { + name: "migrate all default fields", + managedFields: []metav1.ManagedFieldsEntry{ + { + Manager: "kubectl", + Operation: metav1.ManagedFieldsOperationUpdate, + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}}, "f:data":{"f:key1":{}}, "f:stringData":{"f:key2":{}}, "f:binaryData":{"f:key3":{}}, "f:metadata":{"f:labels":{"f:app":{}}, "f:annotations":{"f:note":{}}}}`)}, + }, + }, + migratingManagers: []string{"kubectl"}, + migratingFields: []string{"spec", "data", "stringData", "binaryData", "metadata.labels", "metadata.annotations"}, + expectModified: true, + expectEnoFields: []string{".spec.replicas", ".data.key1", ".stringData.key2", ".binaryData.key3", ".metadata.labels.app", ".metadata.annotations.note"}, + expectLegacyStillOwns: []string{}, + expectLegacyManager: "", + }, + { + name: "migrate nested spec.template but not top-level spec.replicas", + managedFields: []metav1.ManagedFieldsEntry{ + { + Manager: "custom-operator", + Operation: metav1.ManagedFieldsOperationUpdate, + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: []byte(`{"f:spec":{"f:replicas":{}, "f:template":{"f:spec":{"f:containers":{}}}}, "f:status":{"f:ready":{}}}`)}, + }, + }, + migratingManagers: []string{"custom-operator"}, + migratingFields: []string{"spec.template"}, + expectModified: true, + expectEnoFields: []string{".spec.template.spec.containers"}, + expectLegacyStillOwns: []string{".status.ready"}, + expectLegacyManager: "custom-operator", + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + obj := &unstructured.Unstructured{} + obj.SetManagedFields(tc.managedFields) + + modified, err := NormalizeConflictingManagers(context.Background(), obj, tc.migratingManagers, tc.migratingFields) + + require.NoError(t, err) + assert.Equal(t, tc.expectModified, modified, "expected modified=%v, got %v", tc.expectModified, modified) + + managedFields := obj.GetManagedFields() + + // Find the eno entry + var enoEntry *metav1.ManagedFieldsEntry + for _, entry := range managedFields { + if entry.Manager == "eno" && entry.Operation == metav1.ManagedFieldsOperationApply { + enoEntry = &entry + break + } + } + + if len(tc.expectEnoFields) > 0 { + require.NotNil(t, enoEntry, "expected to find an eno managed fields entry") + require.NotNil(t, enoEntry.FieldsV1, "expected eno entry to have FieldsV1") + + // Deserialize the FieldsV1 JSON to verify expected fields are present + var enoFieldsMap map[string]interface{} + err = json.Unmarshal(enoEntry.FieldsV1.Raw, &enoFieldsMap) + require.NoError(t, err) + + // Verify all expected fields are present in eno + for _, expectedPath := range tc.expectEnoFields { + components := parseFieldPath(expectedPath) + assert.True(t, hasNestedField(enoFieldsMap, components), + "expected eno entry to contain field path: %s (components: %v)", expectedPath, components) + } + } else if tc.expectModified { + // If we expect modification but no eno fields, something is wrong + t.Errorf("expected modification but no eno fields specified") + } + + // Verify that fields outside migratingFields remain with the legacy manager + if len(tc.expectLegacyStillOwns) > 0 { + var legacyEntry *metav1.ManagedFieldsEntry + for _, entry := range managedFields { + if entry.Manager == tc.expectLegacyManager { + legacyEntry = &entry + break + } + } + + require.NotNil(t, legacyEntry, "expected to find legacy manager %s", tc.expectLegacyManager) + require.NotNil(t, legacyEntry.FieldsV1, "expected legacy manager to have FieldsV1") + + var legacyFieldsMap map[string]interface{} + err = json.Unmarshal(legacyEntry.FieldsV1.Raw, &legacyFieldsMap) + require.NoError(t, err) + + // Verify all expected fields are still owned by legacy manager + for _, expectedPath := range tc.expectLegacyStillOwns { + components := parseFieldPath(expectedPath) + assert.True(t, hasNestedField(legacyFieldsMap, components), + "expected legacy manager %s to still own field path: %s (components: %v)", tc.expectLegacyManager, expectedPath, components) + } + + // Verify that eno does NOT own the fields that should remain with legacy manager + if enoEntry != nil && enoEntry.FieldsV1 != nil { + var enoFieldsMap map[string]interface{} + err = json.Unmarshal(enoEntry.FieldsV1.Raw, &enoFieldsMap) + require.NoError(t, err) + + for _, excludedPath := range tc.expectLegacyStillOwns { + components := parseFieldPath(excludedPath) + assert.False(t, hasNestedField(enoFieldsMap, components), + "eno should NOT own field path: %s (components: %v), it should remain with %s", excludedPath, components, tc.expectLegacyManager) + } + } + } + }) + } +}