From 0054ca96efb952cab82aa51570f46b76e102838e Mon Sep 17 00:00:00 2001 From: ldornele Date: Thu, 30 Apr 2026 00:00:31 -0300 Subject: [PATCH 1/3] HYPERFLEET-999 - refactor: standardize adapter conditions for deletions --- helm/adapter1/adapter-task-config.yaml | 66 +++++++++++++++++----- helm/adapter2/adapter-task-config.yaml | 42 +++++++++++++- helm/adapter3/adapter-task-config.yaml | 76 +++++++++++++++++--------- 3 files changed, 142 insertions(+), 42 deletions(-) diff --git a/helm/adapter1/adapter-task-config.yaml b/helm/adapter1/adapter-task-config.yaml index 60764eb..0baa3c0 100644 --- a/helm/adapter1/adapter-task-config.yaml +++ b/helm/adapter1/adapter-task-config.yaml @@ -29,6 +29,8 @@ preconditions: field: "name" - name: "generation" field: "generation" + - name: "is_deleting" + expression: "clusterStatus.?deleted_time.hasValue()" - name: "clusterNotReady" expression: | status.conditions.filter(c, c.type == "Ready").size() > 0 @@ -43,9 +45,9 @@ preconditions: )).getSeconds() > 300 - name: "validationCheck" - # Precondition passes if cluster is NOT Ready OR if cluster is Ready and stable for >300 seconds since last transition (enables self-healing) + # Precondition passes if cluster is NOT Ready OR if cluster is Ready and stable for >300 seconds since last transition (enables self-healing) OR if cluster is being deleted expression: | - clusterNotReady || clusterReadyTTL + clusterNotReady || clusterReadyTTL || is_deleting # Resources with valid K8s manifests resources: @@ -69,12 +71,19 @@ resources: app.kubernetes.io/transport: kubernetes hyperfleet.io/cluster-id: "{{ .clusterId }}" hyperfleet.io/cluster-name: "{{ .clusterName }}" + annotations: + hyperfleet.io/generation: "{{ .generation }}" discovery: namespace: "{{ .namespace }}" by_selectors: label_selector: hyperfleet.io/cluster-id: "{{ .clusterId }}" hyperfleet.io/cluster-name: "{{ .clusterName }}" + lifecycle: + delete: + propagationPolicy: "Background" + when: + expression: "is_deleting" # Post-processing with valid CEL expressions # This example contains multiple resources, we will only report on the conditions of the jobNamespace not to overcomplicate the example @@ -88,30 +97,42 @@ post: - type: "Applied" status: expression: | - has(resources.resource0.metadata.creationTimestamp) ? "True" : "False" + is_deleting + ? "False" + : (resources.?resource0.hasValue() ? "True" : "False") reason: expression: | - has(resources.resource0.metadata.creationTimestamp) ? "ConfigMapApplied" : "ConfigMapPending" + is_deleting + ? "ResourceDeleted" + : (resources.?resource0.hasValue() ? "ConfigMapApplied" : "ConfigMapPending") message: expression: | - has(resources.resource0.metadata.creationTimestamp) - ? "ConfigMap has been applied correctly" - : "ConfigMap is pending to be applied" + is_deleting + ? "ConfigMap has been deleted" + : (resources.?resource0.hasValue() + ? "ConfigMap has been applied correctly" + : "ConfigMap is pending to be applied") # Available: Check job status conditions - type: "Available" status: expression: | - has(resources.resource0.data.cluster_id) ? "True" : "False" + is_deleting + ? "False" + : (resources.?resource0.?data.?cluster_id.hasValue() ? "True" : "False") reason: expression: | - has(resources.resource0.data.cluster_id) - ? "ConfigMap data available" - : "ConfigMap data not yet available" + is_deleting + ? "ResourceDeleted" + : (resources.?resource0.?data.?cluster_id.hasValue() + ? "ConfigMapDataAvailable" + : "ConfigMapDataNotYetAvailable") message: expression: | - has(resources.resource0.data.cluster_id) - ? "ConfigMap data available" - : "ConfigMap data not yet available" + is_deleting + ? "ConfigMap data no longer available" + : (resources.?resource0.?data.?cluster_id.hasValue() + ? "ConfigMap data available" + : "ConfigMap data not yet available") # Health: Adapter execution status (runtime) - type: "Health" status: @@ -123,6 +144,23 @@ post: message: expression: | adapter.?errorMessage.orValue("") != "" ? adapter.?errorMessage.orValue("") : "All adapter operations completed successfully" + # Finalized: Deletion lifecycle completion + - type: "Finalized" + status: + expression: | + is_deleting && adapter.?executionStatus.orValue("") == "success" + ? (!resources.?resource0.hasValue() ? "True" : "False") + : "False" + reason: + expression: | + is_deleting && adapter.?executionStatus.orValue("") == "success" + ? (!resources.?resource0.hasValue() ? "CleanupConfirmed" : "CleanupInProgress") + : "ResourceActive" + message: + expression: | + is_deleting && adapter.?executionStatus.orValue("") == "success" + ? (!resources.?resource0.hasValue() ? "All resources deleted successfully" : "Deletion in progress") + : "Resource is active, not scheduled for deletion" # Event generation ID metadata field needs to use expression to avoid interpolation issues observed_generation: expression: "generation" diff --git a/helm/adapter2/adapter-task-config.yaml b/helm/adapter2/adapter-task-config.yaml index 653b15d..67868c1 100644 --- a/helm/adapter2/adapter-task-config.yaml +++ b/helm/adapter2/adapter-task-config.yaml @@ -31,6 +31,8 @@ preconditions: field: "generation" - name: "timestamp" field: "created_time" + - name: "is_deleting" + expression: "clusterStatus.?deleted_time.hasValue()" - name: "readyConditionStatus" expression: | status.conditions.filter(c, c.type == "Ready").size() > 0 @@ -46,9 +48,9 @@ preconditions: value: "False" - name: "validationCheck" - # Valid CEL expression + # Precondition passes if cluster is NOT Ready OR if cluster is being deleted expression: | - readyConditionStatus == "False" + readyConditionStatus == "False" || is_deleting # Resources with valid K8s manifests resources: @@ -208,6 +210,15 @@ resources: discovery: by_name: "{{ .clusterId | lower }}-{{ .adapter.name }}-configmap" + # Deletion lifecycle - ManifestWork deletion cascades to nested resources + lifecycle: + delete: + # Foreground propagation ensures nested resources (namespace, configmap) are deleted + # before the parent ManifestWork is removed, preventing orphaned resources + propagationPolicy: "Foreground" + when: + expression: "is_deleting" + post: payloads: - name: "statusPayload" @@ -288,6 +299,33 @@ post: ? "Resources skipped: " + adapter.?skipReason.orValue("unknown reason") : "Adapter execution completed successfully" + # Finalized: Deletion lifecycle completion + - type: "Finalized" + status: + expression: | + is_deleting && adapter.?executionStatus.orValue("") == "success" + ? (!resources.?resource0.hasValue() + && !resources.?namespace0.hasValue() + && !resources.?configmap0.hasValue() + ? "True" : "False") + : "False" + reason: + expression: | + is_deleting && adapter.?executionStatus.orValue("") == "success" + ? (!resources.?resource0.hasValue() + && !resources.?namespace0.hasValue() + && !resources.?configmap0.hasValue() + ? "CleanupConfirmed" : "CleanupInProgress") + : "ResourceActive" + message: + expression: | + is_deleting && adapter.?executionStatus.orValue("") == "success" + ? (!resources.?resource0.hasValue() + && !resources.?namespace0.hasValue() + && !resources.?configmap0.hasValue() + ? "ManifestWork and all nested resources deleted successfully" : "Deletion in progress") + : "Resource is active, not scheduled for deletion" + observed_generation: expression: "generation" observed_time: "{{ now | date \"2006-01-02T15:04:05Z07:00\" }}" diff --git a/helm/adapter3/adapter-task-config.yaml b/helm/adapter3/adapter-task-config.yaml index 40d0548..d3ebe33 100644 --- a/helm/adapter3/adapter-task-config.yaml +++ b/helm/adapter3/adapter-task-config.yaml @@ -34,6 +34,8 @@ preconditions: field: "name" - name: "generation" field: "generation" + - name: "is_deleting" + expression: "nodepoolStatus.?deleted_time.hasValue()" - name: "nodepoolNotReady" expression: | status.conditions.filter(c, c.type == "Ready").size() > 0 @@ -48,9 +50,9 @@ preconditions: )).getSeconds() > 300 - name: "validationCheck" - # Precondition passes if nodepool is NOT Ready OR if nodepool is Ready and stable for >300 seconds since last transition (enables self-healing) + # Precondition passes if nodepool is NOT Ready OR if nodepool is Ready and stable for >300 seconds since last transition (enables self-healing) OR if nodepool is being deleted expression: | - nodepoolNotReady || nodepoolReadyTTL + nodepoolNotReady || nodepoolReadyTTL || is_deleting # Resources with valid K8s manifests resources: @@ -78,6 +80,11 @@ resources: label_selector: hyperfleet.io/nodepool-id: "{{ .nodepoolId }}" hyperfleet.io/nodepool-name: "{{ .nodepoolName }}" + lifecycle: + delete: + propagationPolicy: "Background" + when: + expression: "is_deleting" # Post-processing with valid CEL expressions post: @@ -90,51 +97,68 @@ post: - type: "Applied" status: expression: | - has(resources.resource0.metadata.creationTimestamp) - ? "True" - : "False" + is_deleting + ? "False" + : (resources.?resource0.?metadata.?creationTimestamp.hasValue() ? "True" : "False") reason: expression: | - has(resources.resource0.metadata.creationTimestamp) - ? "ConfigMap manifest applied successfully" - : "ConfigMap is pending to be applied" + is_deleting + ? "ResourceDeleted" + : (resources.?resource0.?metadata.?creationTimestamp.hasValue() ? "ConfigMapApplied" : "ConfigMapPending") message: expression: | - has(resources.resource0.metadata.creationTimestamp) - ? "ConfigMap manifest applied successfully" - : "ConfigMap is pending to be applied" + is_deleting + ? "ConfigMap has been deleted" + : (resources.?resource0.?metadata.?creationTimestamp.hasValue() + ? "ConfigMap has been applied correctly" + : "ConfigMap is pending to be applied") # Available: Check job status conditions - type: "Available" status: expression: | - has(resources.resource0.data.nodepoolId) - ? "True" - : "False" + is_deleting + ? "False" + : (resources.?resource0.?data.?nodepoolId.hasValue() ? "True" : "False") reason: expression: | - has(resources.resource0.data.nodepoolId) - ? "ConfigMap data available" - : "ConfigMap data not yet available" + is_deleting + ? "ResourceDeleted" + : (resources.?resource0.?data.?nodepoolId.hasValue() ? "ConfigMapDataAvailable" : "ConfigMapDataNotYetAvailable") message: expression: | - has(resources.resource0.data.nodepoolId) - ? "ConfigMap data available" - : "ConfigMap data not yet available" + is_deleting + ? "ConfigMap data no longer available" + : (resources.?resource0.?data.?nodepoolId.hasValue() + ? "ConfigMap data available" + : "ConfigMap data not yet available") # Health: Adapter execution status (runtime) - type: "Health" status: expression: | - has(resources.resource0.data.nodepoolId) - ? "True" + adapter.?executionStatus.orValue("") == "success" ? "True" : (adapter.?executionStatus.orValue("") == "failed" ? "False" : "Unknown") + reason: + expression: | + adapter.?errorReason.orValue("") != "" ? adapter.?errorReason.orValue("") : "Healthy" + message: + expression: | + adapter.?errorMessage.orValue("") != "" ? adapter.?errorMessage.orValue("") : "All adapter operations completed successfully" + # Finalized: Deletion lifecycle completion + - type: "Finalized" + status: + expression: | + is_deleting && adapter.?executionStatus.orValue("") == "success" + ? (!resources.?resource0.hasValue() ? "True" : "False") : "False" reason: expression: | - has(resources.resource0.data.nodepoolId) - ? "ConfigMap data available" - : "ConfigMap data not yet available" + is_deleting && adapter.?executionStatus.orValue("") == "success" + ? (!resources.?resource0.hasValue() ? "CleanupConfirmed" : "CleanupInProgress") + : "ResourceActive" message: expression: | - toJson(resources.resource0) + is_deleting && adapter.?executionStatus.orValue("") == "success" + ? (!resources.?resource0.hasValue() ? "All resources deleted successfully" : "Deletion in progress") + : "Resource is active, not scheduled for deletion" # Event generation ID metadata field needs to use expression to avoid interpolation issues observed_generation: expression: "generation" From a3b7875cf392234d655a9626294d7bb9028dcb66 Mon Sep 17 00:00:00 2001 From: ldornele Date: Thu, 30 Apr 2026 01:31:41 -0300 Subject: [PATCH 2/3] HYPERFLEET-999 - refactor: apply coderabbitai feedback --- helm/adapter1/adapter-task-config.yaml | 28 +++++++++++++++-------- helm/adapter2/adapter-task-config.yaml | 31 ++++++++++++++++---------- helm/adapter3/adapter-task-config.yaml | 28 +++++++++++++++-------- 3 files changed, 57 insertions(+), 30 deletions(-) diff --git a/helm/adapter1/adapter-task-config.yaml b/helm/adapter1/adapter-task-config.yaml index 0baa3c0..3eff30c 100644 --- a/helm/adapter1/adapter-task-config.yaml +++ b/helm/adapter1/adapter-task-config.yaml @@ -148,19 +148,29 @@ post: - type: "Finalized" status: expression: | - is_deleting && adapter.?executionStatus.orValue("") == "success" - ? (!resources.?resource0.hasValue() ? "True" : "False") - : "False" + !is_deleting + ? "False" + : (!resources.?resource0.hasValue() + ? "True" + : "False") reason: expression: | - is_deleting && adapter.?executionStatus.orValue("") == "success" - ? (!resources.?resource0.hasValue() ? "CleanupConfirmed" : "CleanupInProgress") - : "ResourceActive" + !is_deleting + ? "ResourceActive" + : (!resources.?resource0.hasValue() + ? "CleanupConfirmed" + : (adapter.?executionStatus.orValue("") == "failed" + ? "CleanupFailed" + : "CleanupInProgress")) message: expression: | - is_deleting && adapter.?executionStatus.orValue("") == "success" - ? (!resources.?resource0.hasValue() ? "All resources deleted successfully" : "Deletion in progress") - : "Resource is active, not scheduled for deletion" + !is_deleting + ? "Resource is active, not scheduled for deletion" + : (!resources.?resource0.hasValue() + ? "All resources deleted successfully" + : (adapter.?executionStatus.orValue("") == "failed" + ? "Deletion failed during adapter execution" + : "Deletion in progress")) # Event generation ID metadata field needs to use expression to avoid interpolation issues observed_generation: expression: "generation" diff --git a/helm/adapter2/adapter-task-config.yaml b/helm/adapter2/adapter-task-config.yaml index 67868c1..91a10e6 100644 --- a/helm/adapter2/adapter-task-config.yaml +++ b/helm/adapter2/adapter-task-config.yaml @@ -303,28 +303,35 @@ post: - type: "Finalized" status: expression: | - is_deleting && adapter.?executionStatus.orValue("") == "success" - ? (!resources.?resource0.hasValue() + !is_deleting + ? "False" + : (!resources.?resource0.hasValue() && !resources.?namespace0.hasValue() && !resources.?configmap0.hasValue() - ? "True" : "False") - : "False" + ? "True" + : "False") reason: expression: | - is_deleting && adapter.?executionStatus.orValue("") == "success" - ? (!resources.?resource0.hasValue() + !is_deleting + ? "ResourceActive" + : (!resources.?resource0.hasValue() && !resources.?namespace0.hasValue() && !resources.?configmap0.hasValue() - ? "CleanupConfirmed" : "CleanupInProgress") - : "ResourceActive" + ? "CleanupConfirmed" + : (adapter.?executionStatus.orValue("") == "failed" + ? "CleanupFailed" + : "CleanupInProgress")) message: expression: | - is_deleting && adapter.?executionStatus.orValue("") == "success" - ? (!resources.?resource0.hasValue() + !is_deleting + ? "Resource is active, not scheduled for deletion" + : (!resources.?resource0.hasValue() && !resources.?namespace0.hasValue() && !resources.?configmap0.hasValue() - ? "ManifestWork and all nested resources deleted successfully" : "Deletion in progress") - : "Resource is active, not scheduled for deletion" + ? "ManifestWork and all nested resources deleted successfully" + : (adapter.?executionStatus.orValue("") == "failed" + ? "Deletion failed during adapter execution" + : "Deletion in progress")) observed_generation: expression: "generation" diff --git a/helm/adapter3/adapter-task-config.yaml b/helm/adapter3/adapter-task-config.yaml index d3ebe33..fc12c4f 100644 --- a/helm/adapter3/adapter-task-config.yaml +++ b/helm/adapter3/adapter-task-config.yaml @@ -146,19 +146,29 @@ post: - type: "Finalized" status: expression: | - is_deleting && adapter.?executionStatus.orValue("") == "success" - ? (!resources.?resource0.hasValue() ? "True" : "False") - : "False" + !is_deleting + ? "False" + : (!resources.?resource0.hasValue() + ? "True" + : "False") reason: expression: | - is_deleting && adapter.?executionStatus.orValue("") == "success" - ? (!resources.?resource0.hasValue() ? "CleanupConfirmed" : "CleanupInProgress") - : "ResourceActive" + !is_deleting + ? "ResourceActive" + : (!resources.?resource0.hasValue() + ? "CleanupConfirmed" + : (adapter.?executionStatus.orValue("") == "failed" + ? "CleanupFailed" + : "CleanupInProgress")) message: expression: | - is_deleting && adapter.?executionStatus.orValue("") == "success" - ? (!resources.?resource0.hasValue() ? "All resources deleted successfully" : "Deletion in progress") - : "Resource is active, not scheduled for deletion" + !is_deleting + ? "Resource is active, not scheduled for deletion" + : (!resources.?resource0.hasValue() + ? "All resources deleted successfully" + : (adapter.?executionStatus.orValue("") == "failed" + ? "Deletion failed during adapter execution" + : "Deletion in progress")) # Event generation ID metadata field needs to use expression to avoid interpolation issues observed_generation: expression: "generation" From 981ddd104521aa39cadb7ccdaf91e169033acd54 Mon Sep 17 00:00:00 2001 From: ldornele Date: Thu, 30 Apr 2026 03:14:18 -0300 Subject: [PATCH 3/3] HYPERFLEET-999 - refactor: apply additional coderabbitai feedback --- helm/adapter1/adapter-task-config.yaml | 2 + helm/adapter2/adapter-task-config.yaml | 91 ++++++++++++++++---------- helm/adapter3/adapter-task-config.yaml | 4 ++ 3 files changed, 63 insertions(+), 34 deletions(-) diff --git a/helm/adapter1/adapter-task-config.yaml b/helm/adapter1/adapter-task-config.yaml index 3eff30c..d84cce6 100644 --- a/helm/adapter1/adapter-task-config.yaml +++ b/helm/adapter1/adapter-task-config.yaml @@ -81,6 +81,8 @@ resources: hyperfleet.io/cluster-name: "{{ .clusterName }}" lifecycle: delete: + # Background propagation is appropriate for simple ConfigMap resources + # without nested dependencies - allows faster deletion without waiting propagationPolicy: "Background" when: expression: "is_deleting" diff --git a/helm/adapter2/adapter-task-config.yaml b/helm/adapter2/adapter-task-config.yaml index 91a10e6..62f759d 100644 --- a/helm/adapter2/adapter-task-config.yaml +++ b/helm/adapter2/adapter-task-config.yaml @@ -41,14 +41,9 @@ preconditions: - name: "placementClusterName" expression: "\"cluster1\"" # TBC coming from placement adapter - # Structured conditions with valid operators - conditions: - - field: "readyConditionStatus" - operator: "equals" - value: "False" - - name: "validationCheck" - # Precondition passes if cluster is NOT Ready OR if cluster is being deleted + # Precondition passes if cluster is NOT Ready (readyConditionStatus == "False") OR if cluster is being deleted + # Note: Unlike adapter1/adapter3, this adapter does not implement self-healing TTL checks expression: | readyConditionStatus == "False" || is_deleting @@ -229,47 +224,75 @@ post: - type: "Applied" status: expression: | - has(resources.resource0) && has(resources.resource0.status) && has(resources.resource0.status.conditions) && resources.resource0.status.conditions.filter(c, has(c.type) && c.type == "Applied").size() > 0 ? resources.resource0.status.conditions.filter(c, c.type == "Applied")[0].status : "False" + !is_deleting + ? (resources.?resource0.hasValue() && has(resources.resource0.status) && has(resources.resource0.status.conditions) && resources.resource0.status.conditions.filter(c, has(c.type) && c.type == "Applied").size() > 0 ? resources.resource0.status.conditions.filter(c, c.type == "Applied")[0].status : "False") + : "False" reason: expression: | - has(resources.resource0) && has(resources.resource0.status) && has(resources.resource0.status.conditions) && resources.resource0.status.conditions.filter(c, has(c.type) && c.type == "Applied").size() > 0 ? resources.resource0.status.conditions.filter(c, c.type == "Applied")[0].reason : "ManifestWorkNotDiscovered" + !is_deleting + ? (resources.?resource0.hasValue() && has(resources.resource0.status) && has(resources.resource0.status.conditions) && resources.resource0.status.conditions.filter(c, has(c.type) && c.type == "Applied").size() > 0 ? resources.resource0.status.conditions.filter(c, c.type == "Applied")[0].reason : "ManifestWorkNotDiscovered") + : (!resources.?resource0.hasValue() + ? "ResourceDeleted" + : (adapter.?executionStatus.orValue("") == "failed" + ? "DeletionFailed" + : "DeletionInProgress")) message: expression: | - has(resources.resource0) && has(resources.resource0.status) && has(resources.resource0.status.conditions) && resources.resource0.status.conditions.filter(c, has(c.type) && c.type == "Applied").size() > 0 ? resources.resource0.status.conditions.filter(c, c.type == "Applied")[0].message : "ManifestWork not discovered from Maestro or no Applied condition" + !is_deleting + ? (resources.?resource0.hasValue() && has(resources.resource0.status) && has(resources.resource0.status.conditions) && resources.resource0.status.conditions.filter(c, has(c.type) && c.type == "Applied").size() > 0 ? resources.resource0.status.conditions.filter(c, c.type == "Applied")[0].message : "ManifestWork not discovered from Maestro or no Applied condition") + : (!resources.?resource0.hasValue() + ? "ManifestWork deleted" + : (adapter.?executionStatus.orValue("") == "failed" + ? "Deletion failed during adapter execution" + : "ManifestWork deletion in progress")) # Available: Check if nested discovered manifests are available on the spoke cluster # Each nested discovery is enriched with top-level "conditions" from status.resourceStatus.manifests[] - type: "Available" status: expression: | - has(resources.namespace0) && has(resources.namespace0.conditions) - && resources.namespace0.conditions.exists(c, has(c.type) && c.type == "Available" && has(c.status) && c.status == "True") - && has(resources.configmap0) && has(resources.configmap0.conditions) - && resources.configmap0.conditions.exists(c, has(c.type) && c.type == "Available" && has(c.status) && c.status == "True") - ? "True" - : "False" + !is_deleting + ? (resources.?namespace0.hasValue() && has(resources.namespace0.conditions) + && resources.namespace0.conditions.exists(c, has(c.type) && c.type == "Available" && has(c.status) && c.status == "True") + && resources.?configmap0.hasValue() && has(resources.configmap0.conditions) + && resources.configmap0.conditions.exists(c, has(c.type) && c.type == "Available" && has(c.status) && c.status == "True") + ? "True" + : "False") + : "False" reason: expression: | - !(has(resources.namespace0) && has(resources.namespace0.conditions)) - ? "NamespaceNotDiscovered" - : !resources.namespace0.conditions.exists(c, has(c.type) && c.type == "Available" && has(c.status) && c.status == "True") - ? "NamespaceNotAvailable" - : !(has(resources.configmap0) && has(resources.configmap0.conditions)) - ? "ConfigMapNotDiscovered" - : !resources.configmap0.conditions.exists(c, has(c.type) && c.type == "Available" && has(c.status) && c.status == "True") - ? "ConfigMapNotAvailable" - : "AllResourcesAvailable" + !is_deleting + ? (!(resources.?namespace0.hasValue() && has(resources.namespace0.conditions)) + ? "NamespaceNotDiscovered" + : !resources.namespace0.conditions.exists(c, has(c.type) && c.type == "Available" && has(c.status) && c.status == "True") + ? "NamespaceNotAvailable" + : !(resources.?configmap0.hasValue() && has(resources.configmap0.conditions)) + ? "ConfigMapNotDiscovered" + : !resources.configmap0.conditions.exists(c, has(c.type) && c.type == "Available" && has(c.status) && c.status == "True") + ? "ConfigMapNotAvailable" + : "AllResourcesAvailable") + : (!resources.?namespace0.hasValue() && !resources.?configmap0.hasValue() + ? "ResourceDeleted" + : (adapter.?executionStatus.orValue("") == "failed" + ? "DeletionFailed" + : "DeletionInProgress")) message: expression: | - !(has(resources.namespace0) && has(resources.namespace0.conditions)) - ? "Namespace not discovered from ManifestWork" - : !resources.namespace0.conditions.exists(c, has(c.type) && c.type == "Available" && has(c.status) && c.status == "True") - ? "Namespace not yet available on spoke cluster" - : !(has(resources.configmap0) && has(resources.configmap0.conditions)) - ? "ConfigMap not discovered from ManifestWork" - : !resources.configmap0.conditions.exists(c, has(c.type) && c.type == "Available" && has(c.status) && c.status == "True") - ? "ConfigMap not yet available on spoke cluster" - : "All manifests (namespace, configmap) are available on spoke cluster" + !is_deleting + ? (!(resources.?namespace0.hasValue() && has(resources.namespace0.conditions)) + ? "Namespace not discovered from ManifestWork" + : !resources.namespace0.conditions.exists(c, has(c.type) && c.type == "Available" && has(c.status) && c.status == "True") + ? "Namespace not yet available on spoke cluster" + : !(resources.?configmap0.hasValue() && has(resources.configmap0.conditions)) + ? "ConfigMap not discovered from ManifestWork" + : !resources.configmap0.conditions.exists(c, has(c.type) && c.type == "Available" && has(c.status) && c.status == "True") + ? "ConfigMap not yet available on spoke cluster" + : "All manifests (namespace, configmap) are available on spoke cluster") + : (!resources.?namespace0.hasValue() && !resources.?configmap0.hasValue() + ? "Nested resources deleted" + : (adapter.?executionStatus.orValue("") == "failed" + ? "Deletion failed during adapter execution" + : "Nested resource deletion in progress")) # Health: Adapter execution status — surfaces errors from any phase - type: "Health" diff --git a/helm/adapter3/adapter-task-config.yaml b/helm/adapter3/adapter-task-config.yaml index fc12c4f..3950d1f 100644 --- a/helm/adapter3/adapter-task-config.yaml +++ b/helm/adapter3/adapter-task-config.yaml @@ -74,6 +74,8 @@ resources: app.kubernetes.io/version: 1.0.0 hyperfleet.io/nodepool-id: "{{ .nodepoolId }}" hyperfleet.io/nodepool-name: "{{ .nodepoolName }}" + annotations: + hyperfleet.io/generation: "{{ .generation }}" discovery: namespace: "{{ .namespace }}" by_selectors: @@ -82,6 +84,8 @@ resources: hyperfleet.io/nodepool-name: "{{ .nodepoolName }}" lifecycle: delete: + # Background propagation is appropriate for simple ConfigMap resources + # without nested dependencies - allows faster deletion without waiting propagationPolicy: "Background" when: expression: "is_deleting"