diff --git a/README.md b/README.md index 90efac153..875538e41 100644 --- a/README.md +++ b/README.md @@ -94,14 +94,13 @@ spec: ``` 3. Verify the server pod is running in the user defined namespace. -### Using a ConfigMap for run.yaml configuration +### Using inline configuration -A ConfigMap can be used to store run.yaml configuration for each LlamaStackDistribution. -Updates to the ConfigMap will restart the Pod to load the new data. +You can provide custom run.yaml configuration directly in the LlamaStackDistribution spec using the `userConfig.customConfig` field. The operator will automatically create and manage a ConfigMap containing this configuration. -Example to create a run.yaml ConfigMap, and a LlamaStackDistribution that references it: +Example to create a LlamaStackDistribution with a custom run.yaml configuration: ``` -kubectl apply -f config/samples/example-with-configmap.yaml +kubectl apply -f config/samples/example-with-custom-config.yaml ``` ## Developer Guide diff --git a/api/v1alpha1/llamastackdistribution_types.go b/api/v1alpha1/llamastackdistribution_types.go index 9d4f185a6..4305b5d39 100644 --- a/api/v1alpha1/llamastackdistribution_types.go +++ b/api/v1alpha1/llamastackdistribution_types.go @@ -87,36 +87,20 @@ type ServerSpec struct { TLSConfig *TLSConfig `json:"tlsConfig,omitempty"` } +// UserConfigSpec defines the user configuration for the llama-stack server. type UserConfigSpec struct { - // ConfigMapName is the name of the ConfigMap containing user configuration - ConfigMapName string `json:"configMapName"` - // ConfigMapNamespace is the namespace of the ConfigMap (defaults to the same namespace as the CR) + // CustomConfig contains arbitrary text data that represents a user-provided run.yamlconfiguration file + // The operator automatically creates and manages a ConfigMap for mounting into the llama-stack pod // +optional - ConfigMapNamespace string `json:"configMapNamespace,omitempty"` + CustomConfig string `json:"customConfig,omitempty"` } // TLSConfig defines the TLS configuration for the llama-stack server type TLSConfig struct { - // CABundle defines the CA bundle configuration for custom certificates + // CABundle contains PEM-encoded CA bundle certificates as inline data + // The operator automatically creates and manages a ConfigMap for mounting into the llama-stack pod // +optional - CABundle *CABundleConfig `json:"caBundle,omitempty"` -} - -// CABundleConfig defines the CA bundle configuration for custom certificates -type CABundleConfig struct { - // ConfigMapName is the name of the ConfigMap containing CA bundle certificates - ConfigMapName string `json:"configMapName"` - // ConfigMapNamespace is the namespace of the ConfigMap (defaults to the same namespace as the CR) - // +optional - ConfigMapNamespace string `json:"configMapNamespace,omitempty"` - // ConfigMapKeys specifies multiple keys within the ConfigMap containing CA bundle data - // All certificates from these keys will be concatenated into a single CA bundle file - // If not specified, defaults to [DefaultCABundleKey] - // +optional - // +kubebuilder:validation:MaxItems=50 - // +kubebuilder:validation:Items:Pattern="^[a-zA-Z0-9]([a-zA-Z0-9\\-_.]*[a-zA-Z0-9])?$" - // +kubebuilder:validation:Items:MaxLength=253 - ConfigMapKeys []string `json:"configMapKeys,omitempty"` + CABundle string `json:"caBundle,omitempty"` } // StorageSpec defines the persistent storage configuration @@ -215,10 +199,6 @@ type LlamaStackDistributionStatus struct { //+kubebuilder:printcolumn:name="Server Version",type="string",JSONPath=".status.version.llamaStackServerVersion" //+kubebuilder:printcolumn:name="Available",type="integer",JSONPath=".status.availableReplicas" //+kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" -//+kubebuilder:selectablefield:JSONPath=".spec.server.userConfig.configMapName" -//+kubebuilder:selectablefield:JSONPath=".spec.server.userConfig.configMapNamespace" -//+kubebuilder:selectablefield:JSONPath=".spec.server.tlsConfig.caBundle.configMapName" -//+kubebuilder:selectablefield:JSONPath=".spec.server.tlsConfig.caBundle.configMapNamespace" // LlamaStackDistribution is the Schema for the llamastackdistributions API type LlamaStackDistribution struct { diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index b39a399e7..e576879cd 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -26,26 +26,6 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CABundleConfig) DeepCopyInto(out *CABundleConfig) { - *out = *in - if in.ConfigMapKeys != nil { - in, out := &in.ConfigMapKeys, &out.ConfigMapKeys - *out = make([]string, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CABundleConfig. -func (in *CABundleConfig) DeepCopy() *CABundleConfig { - if in == nil { - return nil - } - out := new(CABundleConfig) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ContainerSpec) DeepCopyInto(out *ContainerSpec) { *out = *in @@ -306,7 +286,7 @@ func (in *ServerSpec) DeepCopyInto(out *ServerSpec) { if in.TLSConfig != nil { in, out := &in.TLSConfig, &out.TLSConfig *out = new(TLSConfig) - (*in).DeepCopyInto(*out) + **out = **in } } @@ -343,11 +323,6 @@ func (in *StorageSpec) DeepCopy() *StorageSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TLSConfig) DeepCopyInto(out *TLSConfig) { *out = *in - if in.CABundle != nil { - in, out := &in.CABundle, &out.CABundle - *out = new(CABundleConfig) - (*in).DeepCopyInto(*out) - } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSConfig. diff --git a/config/crd/bases/llamastack.io_llamastackdistributions.yaml b/config/crd/bases/llamastack.io_llamastackdistributions.yaml index f48d0ccf2..e76c5b17c 100644 --- a/config/crd/bases/llamastack.io_llamastackdistributions.yaml +++ b/config/crd/bases/llamastack.io_llamastackdistributions.yaml @@ -2006,44 +2006,20 @@ spec: server properties: caBundle: - description: CABundle defines the CA bundle configuration - for custom certificates - properties: - configMapKeys: - description: |- - ConfigMapKeys specifies multiple keys within the ConfigMap containing CA bundle data - All certificates from these keys will be concatenated into a single CA bundle file - If not specified, defaults to [DefaultCABundleKey] - items: - type: string - maxItems: 50 - type: array - configMapName: - description: ConfigMapName is the name of the ConfigMap - containing CA bundle certificates - type: string - configMapNamespace: - description: ConfigMapNamespace is the namespace of the - ConfigMap (defaults to the same namespace as the CR) - type: string - required: - - configMapName - type: object + description: |- + CABundle contains PEM-encoded CA bundle certificates as inline data + The operator automatically creates and manages a ConfigMap for mounting into the llama-stack pod + type: string type: object userConfig: description: UserConfig defines the user configuration for the llama-stack server properties: - configMapName: - description: ConfigMapName is the name of the ConfigMap containing - user configuration - type: string - configMapNamespace: - description: ConfigMapNamespace is the namespace of the ConfigMap - (defaults to the same namespace as the CR) + customConfig: + description: |- + CustomConfig contains arbitrary text data that represents a user-provided run.yamlconfiguration file + The operator automatically creates and manages a ConfigMap for mounting into the llama-stack pod type: string - required: - - configMapName type: object required: - distribution @@ -2196,11 +2172,6 @@ spec: required: - spec type: object - selectableFields: - - jsonPath: .spec.server.userConfig.configMapName - - jsonPath: .spec.server.userConfig.configMapNamespace - - jsonPath: .spec.server.tlsConfig.caBundle.configMapName - - jsonPath: .spec.server.tlsConfig.caBundle.configMapNamespace served: true storage: true subresources: diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 777f9e041..0b34cba27 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -8,8 +8,11 @@ rules: - "" resources: - configmaps + - serviceaccounts + - services verbs: - create + - delete - get - list - patch @@ -24,19 +27,6 @@ rules: - get - list - watch -- apiGroups: - - "" - resources: - - serviceaccounts - - services - verbs: - - create - - delete - - get - - list - - patch - - update - - watch - apiGroups: - apps resources: diff --git a/config/samples/example-with-ca-bundle.yaml b/config/samples/example-with-ca-bundle.yaml index 120427867..f0f3ec2d9 100644 --- a/config/samples/example-with-ca-bundle.yaml +++ b/config/samples/example-with-ca-bundle.yaml @@ -1,31 +1,7 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: llama-stack-config -data: - run.yaml: | - # Llama Stack Configuration - version: '2' - image_name: remote-vllm - apis: - - inference - providers: - inference: - - provider_id: vllm - provider_type: "remote::vllm" - config: - url: "https://vllm-server.vllm-dist.svc.cluster.local:8000/v1" - models: - - model_id: "meta-llama/Llama-3.2-1B-Instruct" - provider_id: vllm - model_type: llm - server: - port: 8321 ---- apiVersion: llamastack.io/v1alpha1 kind: LlamaStackDistribution metadata: - name: llamastack-with-config + name: llamastack-with-ca-bundle spec: replicas: 1 server: @@ -41,13 +17,24 @@ spec: - name: VLLM_TLS_VERIFY value: "/etc/ssl/certs/ca-bundle.crt" userConfig: - configMapName: llama-stack-config - # configMapNamespace: "" # Optional - defaults to the same namespace as the CR + customConfig: | + # Llama Stack Configuration + version: '2' + image_name: remote-vllm + apis: + - inference + providers: + inference: + - provider_id: vllm + provider_type: "remote::vllm" + config: + url: "https://vllm-server.vllm-dist.svc.cluster.local:8000/v1" + models: + - model_id: "meta-llama/Llama-3.2-1B-Instruct" + provider_id: vllm + model_type: llm + server: + port: 8321 tlsConfig: - caBundle: - configMapName: custom-ca-bundle - # configMapNamespace: "" # Optional - defaults to the same namespace as the CR - # configMapKeys not specified - defaults to ["ca-bundle.crt"] - # configMapKeys: # Specify multiple keys to concatenate into ca-bundle.crt - # - ca-bundle1.crt - # - ca-bundle2.crt + # caBundle must contain valid PEM formatted data + caBundle: "PLACEHOLDER_CA_BUNDLE" diff --git a/config/samples/example-with-configmap.yaml b/config/samples/example-with-configmap.yaml deleted file mode 100644 index b4600ee32..000000000 --- a/config/samples/example-with-configmap.yaml +++ /dev/null @@ -1,43 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: llama-stack-config -data: - run.yaml: | - # Llama Stack Configuration - version: '2' - image_name: ollama - apis: - - inference - providers: - inference: - - provider_id: ollama - provider_type: "remote::ollama" - config: - url: "http://ollama-server-service.ollama-dist.svc.cluster.local:11434" - models: - - model_id: "llama3.2:1b" - provider_id: ollama - model_type: llm - server: - port: 8321 ---- -apiVersion: llamastack.io/v1alpha1 -kind: LlamaStackDistribution -metadata: - name: llamastack-with-config -spec: - replicas: 1 - server: - distribution: - name: ollama - containerSpec: - port: 8321 - env: - - name: INFERENCE_MODEL - value: "llama3.2:1b" - - name: OLLAMA_URL - value: "http://ollama-server-service.ollama-dist.svc.cluster.local:11434" - userConfig: - configMapName: llama-stack-config - # configMapNamespace: "" # Optional - defaults to the same namespace as the CR diff --git a/config/samples/example-with-custom-config.yaml b/config/samples/example-with-custom-config.yaml new file mode 100644 index 000000000..d8ff39bf0 --- /dev/null +++ b/config/samples/example-with-custom-config.yaml @@ -0,0 +1,36 @@ +--- +apiVersion: llamastack.io/v1alpha1 +kind: LlamaStackDistribution +metadata: + name: llamastack-with-custom-config +spec: + replicas: 1 + server: + distribution: + name: ollama + containerSpec: + port: 8321 + env: + - name: INFERENCE_MODEL + value: "llama3.2:1b" + - name: OLLAMA_URL + value: "http://ollama-server-service.ollama-dist.svc.cluster.local:11434" + userConfig: + customConfig: | + # Llama Stack run.yaml Configuration + version: '2' + image_name: ollama + apis: + - inference + providers: + inference: + - provider_id: ollama + provider_type: "remote::ollama" + config: + url: "http://ollama-server-service.ollama-dist.svc.cluster.local:11434" + models: + - model_id: "llama3.2:1b" + provider_id: ollama + model_type: llm + server: + port: 8321 diff --git a/controllers/kubebuilder_rbac.go b/controllers/kubebuilder_rbac.go index 0e39dbc47..6b9472cec 100644 --- a/controllers/kubebuilder_rbac.go +++ b/controllers/kubebuilder_rbac.go @@ -22,8 +22,8 @@ package controllers //+kubebuilder:rbac:groups="",resources=persistentvolumeclaims,verbs=get;list;watch;create -// ConfigMap permissions - controller reads user configmaps and manages operator config configmaps -//+kubebuilder:rbac:groups="",resources=configmaps,verbs=get;list;watch;create;update;patch +// ConfigMap permissions - controller creates and manages operator-owned configmaps for CA bundles and user config +//+kubebuilder:rbac:groups="",resources=configmaps,verbs=get;list;watch;create;update;patch;delete // NetworkPolicy permissions - controller creates and manages network policies //+kubebuilder:rbac:groups=networking.k8s.io,resources=networkpolicies,verbs=get;list;watch;create;update;patch;delete diff --git a/controllers/llamastackdistribution_controller.go b/controllers/llamastackdistribution_controller.go index 07b94d8cf..95ed0304d 100644 --- a/controllers/llamastackdistribution_controller.go +++ b/controllers/llamastackdistribution_controller.go @@ -20,12 +20,14 @@ import ( "context" "encoding/json" "encoding/pem" + "errors" "fmt" + "hash/fnv" "io" "net/http" "net/url" "os" - "strings" + "strconv" "time" "github.com/go-logr/logr" @@ -48,10 +50,8 @@ import ( "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/event" - "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/predicate" - "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/kustomize/kyaml/filesys" ) @@ -60,26 +60,11 @@ const ( manifestsBasePath = "manifests/base" // CA Bundle related constants. - DefaultCABundleKey = "ca-bundle.crt" - CABundleMountPath = "/etc/ssl/certs/ca-bundle.crt" - CABundleTempPath = "/tmp/ca-bundle/ca-bundle.crt" - CABundleVolumeName = "ca-bundle" - CABundleSourceDir = "/tmp/ca-source" - CABundleInitName = "ca-bundle-init" - CABundleSourceVolName = "ca-bundle-source" - CABundleTempDir = "/tmp/ca-bundle" - - // ODH/RHOAI well-known ConfigMap for trusted CA bundles. - odhTrustedCABundleConfigMap = "odh-trusted-ca-bundle" + DefaultCABundleKey = "ca-bundle.crt" + CABundleMountPath = "/etc/ssl/certs/ca-bundle.crt" ) // LlamaStackDistributionReconciler reconciles a LlamaStack object. -// -// ConfigMap Watching Feature: -// This reconciler watches for changes to ConfigMaps referenced by LlamaStackDistribution CRs. -// When a ConfigMap's data changes, it automatically triggers reconciliation of the referencing -// LlamaStackDistribution, which recalculates a content-based hash and updates the deployment's -// pod template annotations. This causes Kubernetes to restart the pods with the updated configuration. type LlamaStackDistributionReconciler struct { client.Client Scheme *runtime.Scheme @@ -90,62 +75,10 @@ type LlamaStackDistributionReconciler struct { httpClient *http.Client } -// hasUserConfigMap checks if the instance has a valid UserConfig with ConfigMapName. +// hasCABundle checks if the instance has a valid TLSConfig with CABundle data. // Returns true if configured, false otherwise. -func (r *LlamaStackDistributionReconciler) hasUserConfigMap(instance *llamav1alpha1.LlamaStackDistribution) bool { - return instance.Spec.Server.UserConfig != nil && instance.Spec.Server.UserConfig.ConfigMapName != "" -} - -// getUserConfigMapNamespace returns the resolved ConfigMap namespace. -// If ConfigMapNamespace is specified, it returns that; otherwise, it returns the instance's namespace. -func (r *LlamaStackDistributionReconciler) getUserConfigMapNamespace(instance *llamav1alpha1.LlamaStackDistribution) string { - if instance.Spec.Server.UserConfig.ConfigMapNamespace != "" { - return instance.Spec.Server.UserConfig.ConfigMapNamespace - } - return instance.Namespace -} - -// hasCABundleConfigMap checks if the instance has a valid TLSConfig with CABundle ConfigMapName. -// Returns true if configured, false otherwise. -func (r *LlamaStackDistributionReconciler) hasCABundleConfigMap(instance *llamav1alpha1.LlamaStackDistribution) bool { - return instance.Spec.Server.TLSConfig != nil && instance.Spec.Server.TLSConfig.CABundle != nil && instance.Spec.Server.TLSConfig.CABundle.ConfigMapName != "" -} - -// getCABundleConfigMapNamespace returns the resolved CA bundle ConfigMap namespace. -// If ConfigMapNamespace is specified, it returns that; otherwise, it returns the instance's namespace. -func (r *LlamaStackDistributionReconciler) getCABundleConfigMapNamespace(instance *llamav1alpha1.LlamaStackDistribution) string { - if instance.Spec.Server.TLSConfig.CABundle.ConfigMapNamespace != "" { - return instance.Spec.Server.TLSConfig.CABundle.ConfigMapNamespace - } - return instance.Namespace -} - -// hasValidUserConfig is a standalone helper function to check if a LlamaStackDistribution has valid UserConfig. -// This is used by functions that don't have access to the reconciler receiver. -func hasValidUserConfig(llsd *llamav1alpha1.LlamaStackDistribution) bool { - return llsd.Spec.Server.UserConfig != nil && llsd.Spec.Server.UserConfig.ConfigMapName != "" -} - -// getUserConfigMapNamespaceStandalone returns the resolved ConfigMap namespace without needing a receiver. -func getUserConfigMapNamespaceStandalone(llsd *llamav1alpha1.LlamaStackDistribution) string { - if llsd.Spec.Server.UserConfig.ConfigMapNamespace != "" { - return llsd.Spec.Server.UserConfig.ConfigMapNamespace - } - return llsd.Namespace -} - -// hasValidCABundleConfig is a standalone helper function to check if a LlamaStackDistribution has valid CA bundle config. -// This is used by functions that don't have access to the reconciler receiver. -func hasValidCABundleConfig(llsd *llamav1alpha1.LlamaStackDistribution) bool { - return llsd.Spec.Server.TLSConfig != nil && llsd.Spec.Server.TLSConfig.CABundle != nil && llsd.Spec.Server.TLSConfig.CABundle.ConfigMapName != "" -} - -// getCABundleConfigMapNamespaceStandalone returns the resolved CA bundle ConfigMap namespace without needing a receiver. -func getCABundleConfigMapNamespaceStandalone(llsd *llamav1alpha1.LlamaStackDistribution) string { - if llsd.Spec.Server.TLSConfig.CABundle.ConfigMapNamespace != "" { - return llsd.Spec.Server.TLSConfig.CABundle.ConfigMapNamespace - } - return llsd.Namespace +func (r *LlamaStackDistributionReconciler) hasCABundle(instance *llamav1alpha1.LlamaStackDistribution) bool { + return instance.Spec.Server.TLSConfig != nil && instance.Spec.Server.TLSConfig.CABundle != "" } // Reconcile is part of the main kubernetes reconciliation loop which aims to @@ -292,17 +225,20 @@ func (r *LlamaStackDistributionReconciler) reconcileResources(ctx context.Contex } func (r *LlamaStackDistributionReconciler) reconcileConfigMaps(ctx context.Context, instance *llamav1alpha1.LlamaStackDistribution) error { - // Reconcile the ConfigMap if specified by the user - if r.hasUserConfigMap(instance) { - if err := r.reconcileUserConfigMap(ctx, instance); err != nil { - return fmt.Errorf("failed to reconcile user ConfigMap: %w", err) - } - } + configMapName := instance.Name + "-config" + + // Check if we need the combined ConfigMap + needsConfigMap := (instance.Spec.Server.UserConfig != nil && instance.Spec.Server.UserConfig.CustomConfig != "") || r.hasCABundle(instance) - // Reconcile the CA bundle ConfigMap if specified - if r.hasCABundleConfigMap(instance) { - if err := r.reconcileCABundleConfigMap(ctx, instance); err != nil { - return fmt.Errorf("failed to reconcile CA bundle ConfigMap: %w", err) + if needsConfigMap { + // Reconcile the combined ConfigMap if either user config or CA bundle is specified + if err := r.reconcileCombinedConfigMap(ctx, instance); err != nil { + return fmt.Errorf("failed to reconcile combined ConfigMap: %w", err) + } + } else { + // Clean up the ConfigMap if it exists but is no longer needed + if err := r.cleanupConfigMapIfExists(ctx, configMapName, instance.Namespace); err != nil { + return fmt.Errorf("failed to cleanup ConfigMap: %w", err) } } @@ -326,11 +262,6 @@ func (r *LlamaStackDistributionReconciler) reconcileStorage(ctx context.Context, // SetupWithManager sets up the controller with the Manager. func (r *LlamaStackDistributionReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager) error { - // Create a field indexer for ConfigMap references to improve performance - if err := r.createConfigMapFieldIndexer(ctx, mgr); err != nil { - return err - } - return ctrl.NewControllerManagedBy(mgr). For(&llamav1alpha1.LlamaStackDistribution{}, builder.WithPredicates(predicate.Funcs{ UpdateFunc: r.llamaStackUpdatePredicate(mgr), @@ -339,84 +270,9 @@ func (r *LlamaStackDistributionReconciler) SetupWithManager(ctx context.Context, Owns(&corev1.Service{}). Owns(&networkingv1.NetworkPolicy{}). Owns(&corev1.PersistentVolumeClaim{}). - Watches( - &corev1.ConfigMap{}, - handler.EnqueueRequestsFromMapFunc(r.findLlamaStackDistributionsForConfigMap), - builder.WithPredicates(predicate.Funcs{ - UpdateFunc: r.configMapUpdatePredicate, - CreateFunc: r.configMapCreatePredicate, - DeleteFunc: r.configMapDeletePredicate, - }), - ). Complete(r) } -// createConfigMapFieldIndexer creates a field indexer for ConfigMap references. -// On older Kubernetes versions that don't support custom field labels for custom resources, -// this will fail gracefully and the operator will fall back to manual searching. -func (r *LlamaStackDistributionReconciler) createConfigMapFieldIndexer(ctx context.Context, mgr ctrl.Manager) error { - // Create index for user config ConfigMaps - if err := mgr.GetFieldIndexer().IndexField( - ctx, - &llamav1alpha1.LlamaStackDistribution{}, - "spec.server.userConfig.configMapName", - r.configMapIndexFunc, - ); err != nil { - // Log warning but don't fail startup - older Kubernetes versions may not support this - mgr.GetLogger().V(1).Info("Field indexer for ConfigMap references not supported, will use manual search fallback", - "error", err.Error()) - return nil - } - - // Create index for CA bundle ConfigMaps - if err := mgr.GetFieldIndexer().IndexField( - ctx, - &llamav1alpha1.LlamaStackDistribution{}, - "spec.server.tlsConfig.caBundle.configMapName", - r.caBundleConfigMapIndexFunc, - ); err != nil { - // Log warning but don't fail startup - older Kubernetes versions may not support this - mgr.GetLogger().Info("Field indexer for CA bundle ConfigMap references not supported, will use manual search fallback", - "error", err.Error()) - return nil - } - - mgr.GetLogger().V(1).Info("Successfully created field indexer for ConfigMap references - will use efficient lookups") - return nil -} - -// configMapIndexFunc is the indexer function for ConfigMap references. -func (r *LlamaStackDistributionReconciler) configMapIndexFunc(rawObj client.Object) []string { - llsd, ok := rawObj.(*llamav1alpha1.LlamaStackDistribution) - if !ok { - return nil - } - if !hasValidUserConfig(llsd) { - return nil - } - - // Create index key as "namespace/name" format - configMapNamespace := getUserConfigMapNamespaceStandalone(llsd) - indexKey := fmt.Sprintf("%s/%s", configMapNamespace, llsd.Spec.Server.UserConfig.ConfigMapName) - return []string{indexKey} -} - -// caBundleConfigMapIndexFunc is the indexer function for CA bundle ConfigMap references. -func (r *LlamaStackDistributionReconciler) caBundleConfigMapIndexFunc(rawObj client.Object) []string { - llsd, ok := rawObj.(*llamav1alpha1.LlamaStackDistribution) - if !ok { - return nil - } - if !hasValidCABundleConfig(llsd) { - return nil - } - - // Create index key as "namespace/name" format - configMapNamespace := getCABundleConfigMapNamespaceStandalone(llsd) - indexKey := fmt.Sprintf("%s/%s", configMapNamespace, llsd.Spec.Server.TLSConfig.CABundle.ConfigMapName) - return []string{indexKey} -} - // llamaStackUpdatePredicate returns a predicate function for LlamaStackDistribution updates. func (r *LlamaStackDistributionReconciler) llamaStackUpdatePredicate(mgr ctrl.Manager) func(event.UpdateEvent) bool { return func(e event.UpdateEvent) bool { @@ -449,298 +305,6 @@ func (r *LlamaStackDistributionReconciler) llamaStackUpdatePredicate(mgr ctrl.Ma } } -// configMapUpdatePredicate handles ConfigMap update events. -func (r *LlamaStackDistributionReconciler) configMapUpdatePredicate(e event.UpdateEvent) bool { - oldConfigMap, oldOk := e.ObjectOld.(*corev1.ConfigMap) - newConfigMap, newOk := e.ObjectNew.(*corev1.ConfigMap) - - if !oldOk || !newOk { - return false - } - - // Only proceed if this ConfigMap is referenced by any LlamaStackDistribution - if !r.isConfigMapReferenced(newConfigMap) { - return false - } - - // Only trigger if Data or BinaryData has changed - dataChanged := !cmp.Equal(oldConfigMap.Data, newConfigMap.Data) - binaryDataChanged := !cmp.Equal(oldConfigMap.BinaryData, newConfigMap.BinaryData) - - // Log ConfigMap changes for debugging (only for referenced ConfigMaps) - if dataChanged || binaryDataChanged { - r.logConfigMapDiff(oldConfigMap, newConfigMap, dataChanged, binaryDataChanged) - } - - return dataChanged || binaryDataChanged -} - -// logConfigMapDiff logs the differences between old and new ConfigMaps. -func (r *LlamaStackDistributionReconciler) logConfigMapDiff(oldConfigMap, newConfigMap *corev1.ConfigMap, dataChanged, binaryDataChanged bool) { - logger := log.FromContext(context.Background()).WithValues( - "configMapName", newConfigMap.Name, - "configMapNamespace", newConfigMap.Namespace) - - logger.Info("Referenced ConfigMap change detected") - - if dataChanged { - if dataDiff := cmp.Diff(oldConfigMap.Data, newConfigMap.Data); dataDiff != "" { - logger.Info("ConfigMap Data changed") - fmt.Printf("ConfigMap %s/%s Data diff:\n%s\n", newConfigMap.Namespace, newConfigMap.Name, dataDiff) - } - } - - if binaryDataChanged { - if binaryDataDiff := cmp.Diff(oldConfigMap.BinaryData, newConfigMap.BinaryData); binaryDataDiff != "" { - logger.Info("ConfigMap BinaryData changed") - fmt.Printf("ConfigMap %s/%s BinaryData diff:\n%s\n", newConfigMap.Namespace, newConfigMap.Name, binaryDataDiff) - } - } -} - -// configMapCreatePredicate handles ConfigMap create events. -func (r *LlamaStackDistributionReconciler) configMapCreatePredicate(e event.CreateEvent) bool { - configMap, ok := e.Object.(*corev1.ConfigMap) - if !ok { - return false - } - - isReferenced := r.isConfigMapReferenced(configMap) - // Log create events for referenced ConfigMaps - if isReferenced { - log.FromContext(context.Background()).Info("ConfigMap create event detected for referenced ConfigMap", - "configMapName", configMap.Name, - "configMapNamespace", configMap.Namespace) - } - - return isReferenced -} - -// configMapDeletePredicate handles ConfigMap delete events. -func (r *LlamaStackDistributionReconciler) configMapDeletePredicate(e event.DeleteEvent) bool { - configMap, ok := e.Object.(*corev1.ConfigMap) - if !ok { - return false - } - - isReferenced := r.isConfigMapReferenced(configMap) - // Log delete events for referenced ConfigMaps - this is critical for deployment health - if isReferenced { - log.FromContext(context.Background()).Error(nil, - "CRITICAL: ConfigMap delete event detected for referenced ConfigMap - this will break dependent deployments", - "configMapName", configMap.Name, - "configMapNamespace", configMap.Namespace) - } - - return isReferenced -} - -// isConfigMapReferenced checks if a ConfigMap is referenced by any LlamaStackDistribution. -func (r *LlamaStackDistributionReconciler) isConfigMapReferenced(configMap client.Object) bool { - logger := log.FromContext(context.Background()).WithValues( - "configMapName", configMap.GetName(), - "configMapNamespace", configMap.GetNamespace()) - - // Use field indexer for efficient lookup - create the same index key format - indexKey := fmt.Sprintf("%s/%s", configMap.GetNamespace(), configMap.GetName()) - - // Check for user config ConfigMap references - userConfigLlamaStacks := llamav1alpha1.LlamaStackDistributionList{} - err := r.List(context.Background(), &userConfigLlamaStacks, client.MatchingFields{"spec.server.userConfig.configMapName": indexKey}) - if err != nil { - // Field indexer failed (likely due to older Kubernetes version not supporting custom field labels) - // Fall back to a manual check instead of assuming all ConfigMaps are referenced - logger.V(1).Info("Field indexer not supported, falling back to manual ConfigMap reference check", "error", err.Error()) - return r.manuallyCheckConfigMapReference(configMap) - } - - found := len(userConfigLlamaStacks.Items) > 0 - - // Check for CA bundle ConfigMap references if not found in user config - if !found { - caBundleLlamaStacks := llamav1alpha1.LlamaStackDistributionList{} - err := r.List(context.Background(), &caBundleLlamaStacks, client.MatchingFields{"spec.server.tlsConfig.caBundle.configMapName": indexKey}) - if err != nil { - // Field indexer failed for CA bundle, fall back to manual check - logger.Info("CA bundle field indexer not supported, falling back to manual ConfigMap reference check", "error", err.Error()) - return r.manuallyCheckConfigMapReference(configMap) - } - found = len(caBundleLlamaStacks.Items) > 0 - } - - if !found { - // Fallback: manually check all LlamaStackDistributions - manuallyFound := r.manuallyCheckConfigMapReference(configMap) - if manuallyFound { - return true - } - } - - return found -} - -// manuallyCheckConfigMapReference manually checks if any LlamaStackDistribution references the given ConfigMap. -func (r *LlamaStackDistributionReconciler) manuallyCheckConfigMapReference(configMap client.Object) bool { - logger := log.FromContext(context.Background()).WithValues( - "configMapName", configMap.GetName(), - "configMapNamespace", configMap.GetNamespace()) - - allLlamaStacks := llamav1alpha1.LlamaStackDistributionList{} - err := r.List(context.Background(), &allLlamaStacks) - if err != nil { - logger.Error(err, "CRITICAL: Failed to list all LlamaStackDistributions for manual ConfigMap reference check - assuming ConfigMap is referenced") - return true // Return true to trigger reconciliation when we can't determine reference status - } - - targetNamespace := configMap.GetNamespace() - targetName := configMap.GetName() - - for _, ls := range allLlamaStacks.Items { - // Check user config ConfigMap references - if hasValidUserConfig(&ls) { - configMapNamespace := getUserConfigMapNamespaceStandalone(&ls) - - if configMapNamespace == targetNamespace && ls.Spec.Server.UserConfig.ConfigMapName == targetName { - // found a LlamaStackDistribution that references the ConfigMap - return true - } - } - - // Check CA bundle ConfigMap references - if hasValidCABundleConfig(&ls) { - configMapNamespace := getCABundleConfigMapNamespaceStandalone(&ls) - - if configMapNamespace == targetNamespace && ls.Spec.Server.TLSConfig.CABundle.ConfigMapName == targetName { - // found a LlamaStackDistribution that references the CA bundle ConfigMap - return true - } - } - } - - // no LlamaStackDistribution found that references the ConfigMap - return false -} - -// findLlamaStackDistributionsForConfigMap maps ConfigMap changes to LlamaStackDistribution reconcile requests. -func (r *LlamaStackDistributionReconciler) findLlamaStackDistributionsForConfigMap(ctx context.Context, configMap client.Object) []reconcile.Request { - // Try field indexer lookup first - attachedLlamaStacks, found := r.tryFieldIndexerLookup(ctx, configMap) - if !found { - // Fallback to manual search if field indexer returns no results - attachedLlamaStacks = r.performManualSearch(ctx, configMap) - } - - // Convert to reconcile requests - requests := r.convertToReconcileRequests(attachedLlamaStacks) - - return requests -} - -// tryFieldIndexerLookup attempts to find LlamaStackDistributions using the field indexer. -func (r *LlamaStackDistributionReconciler) tryFieldIndexerLookup(ctx context.Context, configMap client.Object) (llamav1alpha1.LlamaStackDistributionList, bool) { - logger := log.FromContext(ctx).WithValues( - "configMapName", configMap.GetName(), - "configMapNamespace", configMap.GetNamespace()) - - indexKey := fmt.Sprintf("%s/%s", configMap.GetNamespace(), configMap.GetName()) - - // Check for user config ConfigMap references - userConfigLlamaStacks := llamav1alpha1.LlamaStackDistributionList{} - err := r.List(ctx, &userConfigLlamaStacks, client.MatchingFields{"spec.server.userConfig.configMapName": indexKey}) - if err != nil { - logger.V(1).Info("Field indexer not supported, will fall back to a manual search for ConfigMap event processing", - "indexKey", indexKey, "error", err.Error()) - return userConfigLlamaStacks, false - } - - // Check for CA bundle ConfigMap references - caBundleLlamaStacks := llamav1alpha1.LlamaStackDistributionList{} - err = r.List(ctx, &caBundleLlamaStacks, client.MatchingFields{"spec.server.tlsConfig.caBundle.configMapName": indexKey}) - if err != nil { - logger.Info("CA bundle field indexer not supported, will fall back to a manual search for ConfigMap event processing", - "indexKey", indexKey, "error", err.Error()) - return userConfigLlamaStacks, len(userConfigLlamaStacks.Items) > 0 - } - - // Combine results from both searches - combinedLlamaStacks := llamav1alpha1.LlamaStackDistributionList{} - combinedLlamaStacks.Items = append(combinedLlamaStacks.Items, userConfigLlamaStacks.Items...) - combinedLlamaStacks.Items = append(combinedLlamaStacks.Items, caBundleLlamaStacks.Items...) - - return combinedLlamaStacks, len(combinedLlamaStacks.Items) > 0 -} - -// performManualSearch performs a manual search and filtering when field indexer returns no results. -func (r *LlamaStackDistributionReconciler) performManualSearch(ctx context.Context, configMap client.Object) llamav1alpha1.LlamaStackDistributionList { - logger := log.FromContext(ctx).WithValues( - "configMapName", configMap.GetName(), - "configMapNamespace", configMap.GetNamespace()) - - allLlamaStacks := llamav1alpha1.LlamaStackDistributionList{} - err := r.List(ctx, &allLlamaStacks) - if err != nil { - logger.Error(err, "CRITICAL: Failed to list all LlamaStackDistributions for manual ConfigMap reference search") - return allLlamaStacks - } - - // Filter for ConfigMap references - filteredItems := r.filterLlamaStacksForConfigMap(allLlamaStacks.Items, configMap) - allLlamaStacks.Items = filteredItems - - return allLlamaStacks -} - -// filterLlamaStacksForConfigMap filters LlamaStackDistributions that reference the given ConfigMap. -func (r *LlamaStackDistributionReconciler) filterLlamaStacksForConfigMap(llamaStacks []llamav1alpha1.LlamaStackDistribution, - configMap client.Object) []llamav1alpha1.LlamaStackDistribution { - var filteredItems []llamav1alpha1.LlamaStackDistribution - targetNamespace := configMap.GetNamespace() - targetName := configMap.GetName() - - for _, ls := range llamaStacks { - if r.doesLlamaStackReferenceConfigMap(ls, targetNamespace, targetName) { - filteredItems = append(filteredItems, ls) - } - } - - return filteredItems -} - -// doesLlamaStackReferenceConfigMap checks if a LlamaStackDistribution references the specified ConfigMap. -func (r *LlamaStackDistributionReconciler) doesLlamaStackReferenceConfigMap(ls llamav1alpha1.LlamaStackDistribution, targetNamespace, targetName string) bool { - // Check user config ConfigMap references - if hasValidUserConfig(&ls) { - configMapNamespace := getUserConfigMapNamespaceStandalone(&ls) - if configMapNamespace == targetNamespace && ls.Spec.Server.UserConfig.ConfigMapName == targetName { - return true - } - } - - // Check CA bundle ConfigMap references - if hasValidCABundleConfig(&ls) { - configMapNamespace := getCABundleConfigMapNamespaceStandalone(&ls) - if configMapNamespace == targetNamespace && ls.Spec.Server.TLSConfig.CABundle.ConfigMapName == targetName { - return true - } - } - - return false -} - -// convertToReconcileRequests converts LlamaStackDistribution items to reconcile requests. -func (r *LlamaStackDistributionReconciler) convertToReconcileRequests(attachedLlamaStacks llamav1alpha1.LlamaStackDistributionList) []reconcile.Request { - requests := make([]reconcile.Request, 0, len(attachedLlamaStacks.Items)) - for _, llamaStack := range attachedLlamaStacks.Items { - requests = append(requests, reconcile.Request{ - NamespacedName: types.NamespacedName{ - Name: llamaStack.Name, - Namespace: llamaStack.Namespace, - }, - }) - } - return requests -} - // reconcileDeployment manages the Deployment for the LlamaStack server. func (r *LlamaStackDistributionReconciler) reconcileDeployment(ctx context.Context, instance *llamav1alpha1.LlamaStackDistribution) error { logger := log.FromContext(ctx) @@ -757,41 +321,16 @@ func (r *LlamaStackDistributionReconciler) reconcileDeployment(ctx context.Conte } // Build container spec - container := buildContainerSpec(ctx, r, instance, resolvedImage) + container := buildContainerSpec(instance, resolvedImage) // Configure storage - podSpec := configurePodStorage(ctx, r, instance, container) + podSpec := configurePodStorage(instance, container) // Set the service acc // Prepare annotations for the pod template - podAnnotations := make(map[string]string) - - // Add ConfigMap hash to trigger restarts when the ConfigMap changes - if r.hasUserConfigMap(instance) { - configMapHash, err := r.getConfigMapHash(ctx, instance) - if err != nil { - return fmt.Errorf("failed to get ConfigMap hash for pod restart annotation: %w", err) - } - if configMapHash != "" { - podAnnotations["configmap.hash/user-config"] = configMapHash - logger.V(1).Info("Added ConfigMap hash annotation to trigger pod restart", - "configMapName", instance.Spec.Server.UserConfig.ConfigMapName, - "hash", configMapHash) - } - } - - // Add CA bundle ConfigMap hash to trigger restarts when the CA bundle changes - if r.hasCABundleConfigMap(instance) { - caBundleHash, err := r.getCABundleConfigMapHash(ctx, instance) - if err != nil { - return fmt.Errorf("failed to get CA bundle ConfigMap hash for pod restart annotation: %w", err) - } - if caBundleHash != "" { - podAnnotations["configmap.hash/ca-bundle"] = caBundleHash - logger.V(1).Info("Added CA bundle ConfigMap hash annotation to trigger pod restart", - "configMapName", instance.Spec.Server.TLSConfig.CABundle.ConfigMapName, - "hash", caBundleHash) - } + podAnnotations, err := r.BuildPodAnnotations(ctx, instance) + if err != nil { + return err } // Create deployment object @@ -824,6 +363,107 @@ func (r *LlamaStackDistributionReconciler) reconcileDeployment(ctx context.Conte return deploy.ApplyDeployment(ctx, r.Client, r.Scheme, instance, deployment, logger) } +// BuildPodAnnotations creates annotations for the pod template to trigger restarts when configurations change. +func (r *LlamaStackDistributionReconciler) BuildPodAnnotations(ctx context.Context, instance *llamav1alpha1.LlamaStackDistribution) (map[string]string, error) { + logger := log.FromContext(ctx) + podAnnotations := make(map[string]string) + + // Only calculate checksum if there are configuration fields that could trigger pod restarts + if r.hasConfigurationData(instance) { + // Calculate a checksum of the configuration content to trigger pod restarts when configs change + configChecksum, err := r.CalculateConfigurationChecksum(ctx, instance) + if err != nil { + logger.Error(err, "Failed to calculate configuration checksum") + return nil, fmt.Errorf("failed to calculate configuration checksum: %w", err) + } + + // Add the configuration checksum as an annotation + // When this checksum changes, Kubernetes will restart the pods + if configChecksum != "" { + podAnnotations["llamastack.io/config-checksum"] = configChecksum + logger.V(1).Info("Added configuration checksum annotation", "checksum", configChecksum) + } + } else { + logger.V(1).Info("No configuration data present, skipping checksum calculation") + } + + return podAnnotations, nil +} + +// hasConfigurationData checks if the instance has any configuration data that should trigger +// pod restarts when changed. This includes userConfig and tlsConfig. +func (r *LlamaStackDistributionReconciler) hasConfigurationData(instance *llamav1alpha1.LlamaStackDistribution) bool { + // Check for explicit user configuration + if instance.Spec.Server.UserConfig != nil && instance.Spec.Server.UserConfig.CustomConfig != "" { + return true + } + + // Check for explicit TLS configuration + if instance.Spec.Server.TLSConfig != nil && instance.Spec.Server.TLSConfig.CABundle != "" { + return true + } + + return false +} + +// CalculateConfigurationChecksum computes a simple non-cryptographic checksum of the configuration content +// that should trigger pod restarts when changed. This includes userConfig and tlsConfig. +// Uses FNV-1a hash which is fast, deterministic, and not cryptographic (FIPS-compliant). +func (r *LlamaStackDistributionReconciler) CalculateConfigurationChecksum(ctx context.Context, instance *llamav1alpha1.LlamaStackDistribution) (string, error) { + logger := log.FromContext(ctx) + + // Create a structure to hold all configuration data that should trigger restarts + configData := r.buildConfigurationData(instance) + + // Marshal the configuration data to JSON for consistent checksumming + configJSON, err := json.Marshal(configData) + if err != nil { + return "", fmt.Errorf("failed to marshal configuration data: %w", err) + } + + // Calculate FNV-1a hash (non-cryptographic, fast, deterministic) + checksumString := r.calculateFNVHash(configJSON) + + r.logConfigurationChecksum(logger, checksumString, configData) + + return checksumString, nil +} + +// buildConfigurationData creates the configuration data structure for checksumming. +func (r *LlamaStackDistributionReconciler) buildConfigurationData(instance *llamav1alpha1.LlamaStackDistribution) struct { + UserConfig *llamav1alpha1.UserConfigSpec `json:"userConfig,omitempty"` + TLSConfig *llamav1alpha1.TLSConfig `json:"tlsConfig,omitempty"` +} { + configData := struct { + UserConfig *llamav1alpha1.UserConfigSpec `json:"userConfig,omitempty"` + TLSConfig *llamav1alpha1.TLSConfig `json:"tlsConfig,omitempty"` + }{ + UserConfig: instance.Spec.Server.UserConfig, + TLSConfig: instance.Spec.Server.TLSConfig, + } + + return configData +} + +// calculateFNVHash computes the FNV-1a hash of the given data. +func (r *LlamaStackDistributionReconciler) calculateFNVHash(data []byte) string { + hasher := fnv.New64a() + hasher.Write(data) + checksum := hasher.Sum64() + return strconv.FormatUint(checksum, 16) +} + +// logConfigurationChecksum logs the calculated checksum and configuration details. +func (r *LlamaStackDistributionReconciler) logConfigurationChecksum(logger logr.Logger, checksumString string, configData struct { + UserConfig *llamav1alpha1.UserConfigSpec `json:"userConfig,omitempty"` + TLSConfig *llamav1alpha1.TLSConfig `json:"tlsConfig,omitempty"` +}) { + logger.V(1).Info("Calculated configuration checksum", + "checksum", checksumString, + "hasUserConfig", configData.UserConfig != nil && configData.UserConfig.CustomConfig != "", + "hasTLSConfig", configData.TLSConfig != nil && configData.TLSConfig.CABundle != "") +} + // getServerURL returns the URL for the LlamaStack server. func (r *LlamaStackDistributionReconciler) getServerURL(instance *llamav1alpha1.LlamaStackDistribution, path string) *url.URL { serviceName := deploy.GetServiceName(instance) @@ -1124,45 +764,6 @@ func (r *LlamaStackDistributionReconciler) reconcileNetworkPolicy(ctx context.Co return deploy.ApplyNetworkPolicy(ctx, r.Client, r.Scheme, instance, networkPolicy, logger) } -// reconcileUserConfigMap validates that the referenced ConfigMap exists. -func (r *LlamaStackDistributionReconciler) reconcileUserConfigMap(ctx context.Context, instance *llamav1alpha1.LlamaStackDistribution) error { - logger := log.FromContext(ctx) - - if !r.hasUserConfigMap(instance) { - logger.V(1).Info("No user ConfigMap specified, skipping") - return nil - } - - // Determine the ConfigMap namespace - default to the same namespace as the LlamaStackDistribution. - configMapNamespace := r.getUserConfigMapNamespace(instance) - - logger.V(1).Info("Validating referenced ConfigMap exists", - "configMapName", instance.Spec.Server.UserConfig.ConfigMapName, - "configMapNamespace", configMapNamespace) - - // Check if the ConfigMap exists - configMap := &corev1.ConfigMap{} - err := r.Get(ctx, types.NamespacedName{ - Name: instance.Spec.Server.UserConfig.ConfigMapName, - Namespace: configMapNamespace, - }, configMap) - if err != nil { - if k8serrors.IsNotFound(err) { - logger.Error(err, "Referenced ConfigMap not found", - "configMapName", instance.Spec.Server.UserConfig.ConfigMapName, - "configMapNamespace", configMapNamespace) - return fmt.Errorf("failed to find referenced ConfigMap %s/%s", configMapNamespace, instance.Spec.Server.UserConfig.ConfigMapName) - } - return fmt.Errorf("failed to fetch ConfigMap %s/%s: %w", configMapNamespace, instance.Spec.Server.UserConfig.ConfigMapName, err) - } - - logger.V(1).Info("User ConfigMap found and validated", - "configMap", configMap.Name, - "namespace", configMap.Namespace, - "dataKeys", len(configMap.Data)) - return nil -} - // isValidPEM validates that the given data contains valid PEM formatted content. func isValidPEM(data []byte) bool { // Basic PEM validation using pem.Decode. @@ -1170,188 +771,180 @@ func isValidPEM(data []byte) bool { return block != nil } -// reconcileCABundleConfigMap validates that the referenced CA bundle ConfigMap exists. -func (r *LlamaStackDistributionReconciler) reconcileCABundleConfigMap(ctx context.Context, instance *llamav1alpha1.LlamaStackDistribution) error { +// reconcileCombinedConfigMap creates or updates an operator-managed ConfigMap containing both user configuration and CA bundle data. +func (r *LlamaStackDistributionReconciler) reconcileCombinedConfigMap(ctx context.Context, instance *llamav1alpha1.LlamaStackDistribution) error { logger := log.FromContext(ctx) + configMapName := instance.Name + "-config" - if !r.hasCABundleConfigMap(instance) { - logger.V(1).Info("No CA bundle ConfigMap specified, skipping") - return nil + // Prepare the ConfigMap data + configMapData, err := r.buildCombinedConfigMapData(instance, logger) + if err != nil { + return err } - // Determine the ConfigMap namespace - default to the same namespace as the LlamaStackDistribution. - configMapNamespace := r.getCABundleConfigMapNamespace(instance) + // Skip if no data to store + if len(configMapData) == 0 { + return nil + } - logger.V(1).Info("Validating referenced CA bundle ConfigMap exists", - "configMapName", instance.Spec.Server.TLSConfig.CABundle.ConfigMapName, - "configMapNamespace", configMapNamespace) + // Create the ConfigMap with the combined data + configMap := r.createConfigMapObject(configMapName, instance, configMapData) - // Check if the ConfigMap exists - configMap := &corev1.ConfigMap{} - err := r.Get(ctx, types.NamespacedName{ - Name: instance.Spec.Server.TLSConfig.CABundle.ConfigMapName, - Namespace: configMapNamespace, - }, configMap) - if err != nil { - if k8serrors.IsNotFound(err) { - logger.Error(err, "Referenced CA bundle ConfigMap not found", - "configMapName", instance.Spec.Server.TLSConfig.CABundle.ConfigMapName, - "configMapNamespace", configMapNamespace) - return fmt.Errorf("failed to find referenced CA bundle ConfigMap %s/%s", configMapNamespace, instance.Spec.Server.TLSConfig.CABundle.ConfigMapName) - } - return fmt.Errorf("failed to fetch CA bundle ConfigMap %s/%s: %w", configMapNamespace, instance.Spec.Server.TLSConfig.CABundle.ConfigMapName, err) + // Set controller reference so the ConfigMap is owned by this LlamaStackDistribution + if err := ctrl.SetControllerReference(instance, configMap, r.Scheme); err != nil { + return fmt.Errorf("failed to set controller reference for combined ConfigMap: %w", err) } - // Validate that the specified keys exist in the ConfigMap - var keysToValidate []string - if len(instance.Spec.Server.TLSConfig.CABundle.ConfigMapKeys) > 0 { - keysToValidate = instance.Spec.Server.TLSConfig.CABundle.ConfigMapKeys - } else { - // Default to DefaultCABundleKey when no keys are specified - keysToValidate = []string{DefaultCABundleKey} - } + // Handle ConfigMap creation or update + return r.createOrUpdateConfigMap(ctx, configMapName, instance, configMap, configMapData, logger) +} - for _, key := range keysToValidate { - if _, exists := configMap.Data[key]; !exists { - logger.Error(err, "CA bundle key not found in ConfigMap", - "configMapName", instance.Spec.Server.TLSConfig.CABundle.ConfigMapName, - "configMapNamespace", configMapNamespace, - "key", key) - return fmt.Errorf("failed to find CA bundle key '%s' in ConfigMap %s/%s", key, configMapNamespace, instance.Spec.Server.TLSConfig.CABundle.ConfigMapName) - } +// buildCombinedConfigMapData prepares the data for the operator-managed ConfigMap. +func (r *LlamaStackDistributionReconciler) buildCombinedConfigMapData(instance *llamav1alpha1.LlamaStackDistribution, logger logr.Logger) (map[string]string, error) { + configMapData := make(map[string]string) - // Validate that the key contains valid PEM data - pemData, exists := configMap.Data[key] - if !exists { - // This should not happen since we checked above, but just to be safe - return fmt.Errorf("failed to find CA bundle key '%s' in ConfigMap %s/%s", key, configMapNamespace, instance.Spec.Server.TLSConfig.CABundle.ConfigMapName) - } + // Add user config if specified + if instance.Spec.Server.UserConfig != nil && instance.Spec.Server.UserConfig.CustomConfig != "" { + configMapData["run.yaml"] = instance.Spec.Server.UserConfig.CustomConfig + } + // Add CA bundle if specified + if r.hasCABundle(instance) { + pemData := instance.Spec.Server.TLSConfig.CABundle if !isValidPEM([]byte(pemData)) { - logger.Error(nil, "CA bundle key contains invalid PEM data", - "configMapName", instance.Spec.Server.TLSConfig.CABundle.ConfigMapName, - "configMapNamespace", configMapNamespace, - "key", key) - return fmt.Errorf("failed to validate CA bundle key '%s' in ConfigMap %s/%s: contains invalid PEM data", - key, - configMapNamespace, - instance.Spec.Server.TLSConfig.CABundle.ConfigMapName, - ) + logger.Error(nil, "CA bundle contains invalid PEM data") + return nil, errors.New("CA bundle contains invalid PEM data") } - - logger.V(1).Info("CA bundle key contains valid PEM data", - "configMapName", instance.Spec.Server.TLSConfig.CABundle.ConfigMapName, - "configMapNamespace", configMapNamespace, - "key", key) + configMapData[DefaultCABundleKey] = pemData } - logger.V(1).Info("CA bundle ConfigMap found and validated", - "configMap", configMap.Name, - "namespace", configMap.Namespace, - "keys", keysToValidate, - "dataKeys", len(configMap.Data)) - return nil + return configMapData, nil } -// getConfigMapHash calculates a hash of the ConfigMap data to detect changes. -func (r *LlamaStackDistributionReconciler) getConfigMapHash(ctx context.Context, instance *llamav1alpha1.LlamaStackDistribution) (string, error) { - if !r.hasUserConfigMap(instance) { - return "", nil - } - - configMapNamespace := r.getUserConfigMapNamespace(instance) +// cleanupConfigMapIfExists deletes the ConfigMap if it exists but is no longer needed. +func (r *LlamaStackDistributionReconciler) cleanupConfigMapIfExists(ctx context.Context, configMapName, namespace string) error { + logger := log.FromContext(ctx) + // Check if the ConfigMap exists configMap := &corev1.ConfigMap{} - err := r.Get(ctx, types.NamespacedName{ - Name: instance.Spec.Server.UserConfig.ConfigMapName, - Namespace: configMapNamespace, - }, configMap) + err := r.Get(ctx, client.ObjectKey{Name: configMapName, Namespace: namespace}, configMap) if err != nil { - return "", err + if client.IgnoreNotFound(err) == nil { + // ConfigMap doesn't exist, nothing to clean up + return nil + } + return fmt.Errorf("failed to get ConfigMap %s: %w", configMapName, err) } - // Create a content-based hash that will change when the ConfigMap data changes - return fmt.Sprintf("%s-%s", configMap.ResourceVersion, configMap.Name), nil + // Delete the ConfigMap + logger.Info("Cleaning up ConfigMap as it's no longer needed", "configMapName", configMapName) + if err := r.Delete(ctx, configMap); err != nil { + return fmt.Errorf("failed to delete ConfigMap %s: %w", configMapName, err) + } + + return nil } -// getCABundleConfigMapHash calculates a hash of the CA bundle ConfigMap data to detect changes. -func (r *LlamaStackDistributionReconciler) getCABundleConfigMapHash(ctx context.Context, instance *llamav1alpha1.LlamaStackDistribution) (string, error) { - if !r.hasCABundleConfigMap(instance) { - return "", nil +// createConfigMapObject creates a new ConfigMap object with the given data. +func (r *LlamaStackDistributionReconciler) createConfigMapObject( + configMapName string, + instance *llamav1alpha1.LlamaStackDistribution, + configMapData map[string]string, +) *corev1.ConfigMap { + return &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: configMapName, + Namespace: instance.Namespace, + }, + Data: configMapData, } +} - configMapNamespace := r.getCABundleConfigMapNamespace(instance) - - configMap := &corev1.ConfigMap{} - err := r.Get(ctx, types.NamespacedName{ - Name: instance.Spec.Server.TLSConfig.CABundle.ConfigMapName, - Namespace: configMapNamespace, - }, configMap) +// createOrUpdateConfigMap handles the creation or update of the ConfigMap. +func (r *LlamaStackDistributionReconciler) createOrUpdateConfigMap( + ctx context.Context, + configMapName string, + instance *llamav1alpha1.LlamaStackDistribution, + configMap *corev1.ConfigMap, + configMapData map[string]string, + logger logr.Logger, +) error { + // Check if ConfigMap already exists + existing := &corev1.ConfigMap{} + err := r.Get(ctx, client.ObjectKey{Name: configMapName, Namespace: instance.Namespace}, existing) if err != nil { - return "", err + return r.handleConfigMapCreation(ctx, err, configMap, configMapName, logger) } - // Create a content-based hash that will change when the ConfigMap data changes - // Include information about which keys are being used - var keyInfo string - if len(instance.Spec.Server.TLSConfig.CABundle.ConfigMapKeys) > 0 { - keyInfo = fmt.Sprintf("-%s", strings.Join(instance.Spec.Server.TLSConfig.CABundle.ConfigMapKeys, ",")) - } else { - // Default to DefaultCABundleKey when no keys are specified - keyInfo = fmt.Sprintf("-%s", DefaultCABundleKey) + // ConfigMap exists, update it if the data has changed + return r.handleConfigMapUpdate(ctx, existing, configMapData, configMapName, logger) +} + +// handleConfigMapCreation handles the creation of a new ConfigMap. +func (r *LlamaStackDistributionReconciler) handleConfigMapCreation(ctx context.Context, err error, configMap *corev1.ConfigMap, configMapName string, logger logr.Logger) error { + if !k8serrors.IsNotFound(err) { + return fmt.Errorf("failed to get existing combined ConfigMap: %w", err) } - return fmt.Sprintf("%s-%s%s", configMap.ResourceVersion, configMap.Name, keyInfo), nil + // ConfigMap doesn't exist, create it + logger.Info("Creating combined ConfigMap", "configMapName", configMapName) + if err := r.Create(ctx, configMap); err != nil { + return fmt.Errorf("failed to create combined ConfigMap: %w", err) + } + return nil } -// detectODHTrustedCABundle checks if the well-known ODH trusted CA bundle ConfigMap -// exists in the same namespace as the LlamaStackDistribution and returns its available keys. -// Returns the ConfigMap and a list of data keys if found, or nil and empty slice if not found. -func (r *LlamaStackDistributionReconciler) detectODHTrustedCABundle(ctx context.Context, instance *llamav1alpha1.LlamaStackDistribution) (*corev1.ConfigMap, []string, error) { - logger := log.FromContext(ctx) +// handleConfigMapUpdate handles the update of an existing ConfigMap. +func (r *LlamaStackDistributionReconciler) handleConfigMapUpdate( + ctx context.Context, + existing *corev1.ConfigMap, + configMapData map[string]string, + configMapName string, + logger logr.Logger, +) error { + dataChanged := r.hasConfigMapDataChanged(existing, configMapData) - configMap := &corev1.ConfigMap{} - err := r.Get(ctx, types.NamespacedName{ - Name: odhTrustedCABundleConfigMap, - Namespace: instance.Namespace, - }, configMap) - - if err != nil { - if k8serrors.IsNotFound(err) { - logger.V(1).Info("ODH trusted CA bundle ConfigMap not found, skipping auto-detection", - "configMapName", odhTrustedCABundleConfigMap, - "namespace", instance.Namespace) - return nil, nil, nil + if dataChanged { + logger.Info("Updating combined ConfigMap", "configMapName", configMapName) + existing.Data = configMapData + if err := r.Update(ctx, existing); err != nil { + return fmt.Errorf("failed to update combined ConfigMap: %w", err) } - return nil, nil, fmt.Errorf("failed to check for ODH trusted CA bundle ConfigMap %s/%s: %w", - instance.Namespace, odhTrustedCABundleConfigMap, err) } - // Extract available data keys and validate they contain valid PEM data - keys := make([]string, 0, len(configMap.Data)) + r.logConfigMapReconciliation(logger, configMapName, configMapData) + return nil +} - for key, value := range configMap.Data { - // Only include keys that contain valid PEM data - if isValidPEM([]byte(value)) { - keys = append(keys, key) - logger.V(1).Info("Auto-detected CA bundle key contains valid PEM data", - "configMapName", odhTrustedCABundleConfigMap, - "namespace", instance.Namespace, - "key", key) - } else { - logger.V(1).Info("Auto-detected CA bundle key contains invalid PEM data, skipping", - "configMapName", odhTrustedCABundleConfigMap, - "namespace", instance.Namespace, - "key", key) +// hasConfigMapDataChanged checks if the ConfigMap data has changed. +func (r *LlamaStackDistributionReconciler) hasConfigMapDataChanged(existing *corev1.ConfigMap, configMapData map[string]string) bool { + // Check if any values have changed + for key, value := range configMapData { + if existing.Data[key] != value { + return true } } - logger.V(1).Info("ODH trusted CA bundle ConfigMap detected", - "configMapName", odhTrustedCABundleConfigMap, - "namespace", instance.Namespace, - "availableKeys", keys) + // Check if any keys were removed + for key := range existing.Data { + if _, exists := configMapData[key]; !exists { + return true + } + } + + return false +} + +// logConfigMapReconciliation logs the successful reconciliation of the ConfigMap. +func (r *LlamaStackDistributionReconciler) logConfigMapReconciliation(logger logr.Logger, configMapName string, configMapData map[string]string) { + keys := make([]string, 0, len(configMapData)) + for key := range configMapData { + keys = append(keys, key) + } - return configMap, keys, nil + logger.V(1).Info("Combined ConfigMap reconciled successfully", + "configMapName", configMapName, + "keys", keys) } // createDefaultConfigMap creates a ConfigMap with default feature flag values. diff --git a/controllers/llamastackdistribution_controller_test.go b/controllers/llamastackdistribution_controller_test.go index 8cfb61077..08133f8dd 100644 --- a/controllers/llamastackdistribution_controller_test.go +++ b/controllers/llamastackdistribution_controller_test.go @@ -7,10 +7,9 @@ import ( "net/http" "strings" "testing" - "time" llamav1alpha1 "github.com/llamastack/llama-stack-k8s-operator/api/v1alpha1" - controllers "github.com/llamastack/llama-stack-k8s-operator/controllers" + "github.com/llamastack/llama-stack-k8s-operator/controllers" "github.com/llamastack/llama-stack-k8s-operator/pkg/cluster" "github.com/stretchr/testify/require" appsv1 "k8s.io/api/apps/v1" @@ -18,7 +17,6 @@ import ( networkingv1 "k8s.io/api/networking/v1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/kubernetes/scheme" @@ -147,20 +145,13 @@ func TestStorageConfiguration(t *testing.T) { } } -func TestConfigMapWatchingFunctionality(t *testing.T) { +func TestCustomConfigFunctionality(t *testing.T) { ctrl.SetLogger(zap.New(zap.UseDevMode(true))) // Create a test namespace - namespace := createTestNamespace(t, "test-configmap-watch") + namespace := createTestNamespace(t, "test-custom-config") - // Create a ConfigMap - configMap := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-config", - Namespace: namespace.Name, - }, - Data: map[string]string{ - "run.yaml": `version: '2' + customConfig := `version: '2' image_name: ollama apis: - inference @@ -175,84 +166,76 @@ models: provider_id: ollama model_type: llm server: - port: 8321`, - }, - } - require.NoError(t, k8sClient.Create(t.Context(), configMap)) + port: 8321` - // Create a LlamaStackDistribution that references the ConfigMap + // Create a LlamaStackDistribution with inline CustomConfig instance := NewDistributionBuilder(). - WithName("test-configmap-reference"). + WithName("test-custom-config"). WithNamespace(namespace.Name). - WithUserConfig(configMap.Name). + WithUserConfig(customConfig). Build() require.NoError(t, k8sClient.Create(t.Context(), instance)) - // Reconcile to create initial deployment + // Reconcile to create initial deployment and ConfigMap ReconcileDistribution(t, instance, false) - // Get the initial deployment and check for ConfigMap hash annotation + // Verify that the combined ConfigMap was created with the CustomConfig data + configMap := &corev1.ConfigMap{} + configMapKey := types.NamespacedName{Name: instance.Name + "-config", Namespace: instance.Namespace} + waitForResourceWithKey(t, k8sClient, configMapKey, configMap) + + require.Equal(t, customConfig, configMap.Data["run.yaml"], "Operator-managed ConfigMap should contain the CustomConfig data") + + // Verify that the deployment mounts the ConfigMap deployment := &appsv1.Deployment{} deploymentKey := types.NamespacedName{Name: instance.Name, Namespace: instance.Namespace} waitForResourceWithKey(t, k8sClient, deploymentKey, deployment) - // Verify the ConfigMap hash annotation exists - initialAnnotations := deployment.Spec.Template.Annotations - require.Contains(t, initialAnnotations, "configmap.hash/user-config", "ConfigMap hash annotation should be present") - initialHash := initialAnnotations["configmap.hash/user-config"] - require.NotEmpty(t, initialHash, "ConfigMap hash should not be empty") + // Check that the deployment has the combined-config volume + combinedConfigVolumeFound := false + for _, volume := range deployment.Spec.Template.Spec.Volumes { + if volume.Name == controllers.CombinedConfigVolumeName { + combinedConfigVolumeFound = true + require.NotNil(t, volume.ConfigMap, "combined-config volume should be a ConfigMap volume") + require.Equal(t, configMap.Name, volume.ConfigMap.Name, "combined-config volume should reference the created ConfigMap") + break + } + } + require.True(t, combinedConfigVolumeFound, "deployment should have combined-config volume") + + // Check that the container has the combined-config volume mount for run.yaml + require.NotEmpty(t, deployment.Spec.Template.Spec.Containers, "deployment should have containers") + container := deployment.Spec.Template.Spec.Containers[0] + + userConfigMountFound := false + for _, mount := range container.VolumeMounts { + if mount.Name == controllers.CombinedConfigVolumeName && mount.SubPath == "run.yaml" { + userConfigMountFound = true + require.Equal(t, "/etc/llama-stack/run.yaml", mount.MountPath, "run.yaml should be mounted at /etc/llama-stack/run.yaml") + require.True(t, mount.ReadOnly, "run.yaml mount should be read-only") + break + } + } + require.True(t, userConfigMountFound, "container should have combined-config volume mount for run.yaml") - // Update the ConfigMap data - require.NoError(t, k8sClient.Get(t.Context(), - types.NamespacedName{Name: configMap.Name, Namespace: configMap.Namespace}, configMap)) + // Verify that the container command is configured to use the custom config with the startup script + require.Equal(t, []string{"/bin/sh", "-c", controllers.StartupScript}, container.Command, "container command should be set for custom config") + require.Empty(t, container.Args, "container args should be empty when using startup script") - configMap.Data["run.yaml"] = `version: '2' -image_name: ollama -apis: -- inference -providers: - inference: - - provider_id: ollama - provider_type: "remote::ollama" - config: - url: "http://ollama-server:11434" -models: - - model_id: "llama3.2:3b" - provider_id: ollama - model_type: llm -server: - port: 8321` - require.NoError(t, k8sClient.Update(t.Context(), configMap)) + // Test updating the CustomConfig + updatedConfig := customConfig + "\n# Updated configuration" - // Wait a moment for the watch to trigger - time.Sleep(2 * time.Second) + // Refresh the instance to avoid conflicts + require.NoError(t, k8sClient.Get(t.Context(), types.NamespacedName{Name: instance.Name, Namespace: instance.Namespace}, instance)) + instance.Spec.Server.UserConfig.CustomConfig = updatedConfig + require.NoError(t, k8sClient.Update(t.Context(), instance)) - // Trigger reconciliation (in real scenarios this would be triggered by the watch) + // Reconcile again to update the ConfigMap ReconcileDistribution(t, instance, false) - // Verify the deployment was updated with a new hash - waitForResourceWithKeyAndCondition( - t, k8sClient, deploymentKey, deployment, func() bool { - newHash := deployment.Spec.Template.Annotations["configmap.hash/user-config"] - return newHash != initialHash && newHash != "" - }, "ConfigMap hash should be updated after ConfigMap data change") - - t.Logf("ConfigMap hash changed from %s to %s", initialHash, deployment.Spec.Template.Annotations["configmap.hash/user-config"]) - - // Test that unrelated ConfigMaps don't trigger reconciliation - unrelatedConfigMap := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: "unrelated-config", - Namespace: namespace.Name, - }, - Data: map[string]string{ - "some-key": "some-value", - }, - } - require.NoError(t, k8sClient.Create(t.Context(), unrelatedConfigMap)) - - // Note: In test environment, field indexer might not be set up properly, - // so we skip the isConfigMapReferenced checks which rely on field indexing + // Verify the ConfigMap was updated + require.NoError(t, k8sClient.Get(t.Context(), configMapKey, configMap)) + require.Equal(t, updatedConfig, configMap.Data["run.yaml"], "ConfigMap should be updated with new CustomConfig data") } func TestReconcile(t *testing.T) { diff --git a/controllers/resource_helper.go b/controllers/resource_helper.go index b1e8bfddb..02aa8511d 100644 --- a/controllers/resource_helper.go +++ b/controllers/resource_helper.go @@ -17,7 +17,6 @@ limitations under the License. package controllers import ( - "context" "errors" "fmt" "regexp" @@ -27,18 +26,17 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/utils/ptr" - "sigs.k8s.io/controller-runtime/pkg/log" ) -// Constants for validation limits. const ( + // Constants for validation limits. // maxConfigMapKeyLength defines the maximum allowed length for ConfigMap keys // based on Kubernetes DNS subdomain name limits. maxConfigMapKeyLength = 253 -) - -// Probes configuration. -const ( + // Constants for volume and ConfigMap names. + // CombinedConfigVolumeName is the name used for the combined configuration volume. + CombinedConfigVolumeName = "combined-config" + // Probes configuration. startupProbeInitialDelaySeconds = 15 // Time to wait before the first probe startupProbeTimeoutSeconds = 30 // When the probe times out startupProbeFailureThreshold = 3 // Pod is marked Unhealthy after 3 consecutive failures @@ -49,8 +47,8 @@ const ( // Kubernetes ConfigMap keys must be valid DNS subdomain names or data keys. var validConfigMapKeyRegex = regexp.MustCompile(`^[a-zA-Z0-9]([a-zA-Z0-9\-_.]*[a-zA-Z0-9])?$`) -// startupScript is the script that will be used to start the server. -var startupScript = ` +// StartupScript is the script that will be used to start the server. +var StartupScript = ` set -e if python -c " @@ -122,7 +120,7 @@ func getStartupProbe(instance *llamav1alpha1.LlamaStackDistribution) *corev1.Pro } // buildContainerSpec creates the container specification. -func buildContainerSpec(ctx context.Context, r *LlamaStackDistributionReconciler, instance *llamav1alpha1.LlamaStackDistribution, image string) corev1.Container { +func buildContainerSpec(instance *llamav1alpha1.LlamaStackDistribution, image string) corev1.Container { container := corev1.Container{ Name: getContainerName(instance), Image: image, @@ -133,8 +131,8 @@ func buildContainerSpec(ctx context.Context, r *LlamaStackDistributionReconciler } // Configure environment variables and mounts - configureContainerEnvironment(ctx, r, instance, &container) - configureContainerMounts(ctx, r, instance, &container) + configureContainerEnvironment(instance, &container) + configureContainerMounts(instance, &container) configureContainerCommands(instance, &container) return container @@ -157,7 +155,7 @@ func getContainerPort(instance *llamav1alpha1.LlamaStackDistribution) int32 { } // configureContainerEnvironment sets up environment variables for the container. -func configureContainerEnvironment(ctx context.Context, r *LlamaStackDistributionReconciler, instance *llamav1alpha1.LlamaStackDistribution, container *corev1.Container) { +func configureContainerEnvironment(instance *llamav1alpha1.LlamaStackDistribution, container *corev1.Container) { mountPath := getMountPath(instance) // Add HF_HOME variable to our mount path so that downloaded models and datasets are stored @@ -170,21 +168,12 @@ func configureContainerEnvironment(ctx context.Context, r *LlamaStackDistributio }) // Add CA bundle environment variable if TLS config is specified - if instance.Spec.Server.TLSConfig != nil && instance.Spec.Server.TLSConfig.CABundle != nil { + if instance.Spec.Server.TLSConfig != nil && instance.Spec.Server.TLSConfig.CABundle != "" { // Set SSL_CERT_FILE to point to the specific CA bundle file container.Env = append(container.Env, corev1.EnvVar{ Name: "SSL_CERT_FILE", Value: CABundleMountPath, }) - } else if r != nil { - // Check for auto-detected ODH trusted CA bundle - if _, keys, err := r.detectODHTrustedCABundle(ctx, instance); err == nil && len(keys) > 0 { - // Set SSL_CERT_FILE to point to the auto-detected consolidated CA bundle - container.Env = append(container.Env, corev1.EnvVar{ - Name: "SSL_CERT_FILE", - Value: CABundleMountPath, - }) - } } // Finally, add the user provided env vars @@ -192,26 +181,26 @@ func configureContainerEnvironment(ctx context.Context, r *LlamaStackDistributio } // configureContainerMounts sets up volume mounts for the container. -func configureContainerMounts(ctx context.Context, r *LlamaStackDistributionReconciler, instance *llamav1alpha1.LlamaStackDistribution, container *corev1.Container) { +func configureContainerMounts(instance *llamav1alpha1.LlamaStackDistribution, container *corev1.Container) { // Add volume mount for storage addStorageVolumeMount(instance, container) // Add ConfigMap volume mount if user config is specified addUserConfigVolumeMount(instance, container) - // Add CA bundle volume mount if TLS config is specified or auto-detected - addCABundleVolumeMount(ctx, r, instance, container) + // Add CA bundle volume mount if TLS config is specified + addCABundleVolumeMount(instance, container) } // configureContainerCommands sets up container commands and args. func configureContainerCommands(instance *llamav1alpha1.LlamaStackDistribution, container *corev1.Container) { // Override the container entrypoint to use the custom config file if user config is specified - if instance.Spec.Server.UserConfig != nil && instance.Spec.Server.UserConfig.ConfigMapName != "" { + if instance.Spec.Server.UserConfig != nil && instance.Spec.Server.UserConfig.CustomConfig != "" { // Override the container entrypoint to use the custom config file instead of the default // template. The script will determine the llama-stack version and use the appropriate module // path to start the server. - container.Command = []string{"/bin/sh", "-c", startupScript} + container.Command = []string{"/bin/sh", "-c", StartupScript} container.Args = []string{} } @@ -244,138 +233,47 @@ func addStorageVolumeMount(instance *llamav1alpha1.LlamaStackDistribution, conta // addUserConfigVolumeMount adds the user config volume mount to the container if specified. func addUserConfigVolumeMount(instance *llamav1alpha1.LlamaStackDistribution, container *corev1.Container) { - if instance.Spec.Server.UserConfig != nil && instance.Spec.Server.UserConfig.ConfigMapName != "" { + if instance.Spec.Server.UserConfig != nil && instance.Spec.Server.UserConfig.CustomConfig != "" { container.VolumeMounts = append(container.VolumeMounts, corev1.VolumeMount{ - Name: "user-config", - MountPath: "/etc/llama-stack/", + Name: CombinedConfigVolumeName, + MountPath: "/etc/llama-stack/run.yaml", + SubPath: "run.yaml", ReadOnly: true, }) } } // addCABundleVolumeMount adds the CA bundle volume mount to the container if TLS config is specified. -// For multiple keys: the init container writes DefaultCABundleKey to the root of the emptyDir volume, -// and the main container mounts it with SubPath to CABundleMountPath. -// For single key: the main container directly mounts the ConfigMap key. -// Also handles auto-detected ODH trusted CA bundle ConfigMaps. -func addCABundleVolumeMount(ctx context.Context, r *LlamaStackDistributionReconciler, instance *llamav1alpha1.LlamaStackDistribution, container *corev1.Container) { - if instance.Spec.Server.TLSConfig != nil && instance.Spec.Server.TLSConfig.CABundle != nil { +// Mounts the combined ConfigMap created from inline PEM data. +func addCABundleVolumeMount(instance *llamav1alpha1.LlamaStackDistribution, container *corev1.Container) { + if instance.Spec.Server.TLSConfig != nil && instance.Spec.Server.TLSConfig.CABundle != "" { container.VolumeMounts = append(container.VolumeMounts, corev1.VolumeMount{ - Name: CABundleVolumeName, + Name: CombinedConfigVolumeName, MountPath: CABundleMountPath, SubPath: DefaultCABundleKey, ReadOnly: true, }) - } else if r != nil { - // Check for auto-detected ODH trusted CA bundle - if _, keys, err := r.detectODHTrustedCABundle(ctx, instance); err == nil && len(keys) > 0 { - // Mount the auto-detected consolidated CA bundle - container.VolumeMounts = append(container.VolumeMounts, corev1.VolumeMount{ - Name: CABundleVolumeName, - MountPath: CABundleMountPath, - SubPath: DefaultCABundleKey, - ReadOnly: true, - }) - } } } -// createCABundleVolume creates the appropriate volume configuration for CA bundles. -// For single key: uses direct ConfigMap volume. -// For multiple keys: uses emptyDir volume with InitContainer to concatenate keys. -func createCABundleVolume(caBundleConfig *llamav1alpha1.CABundleConfig) corev1.Volume { - // For multiple keys, we'll use an emptyDir that gets populated by an InitContainer - if len(caBundleConfig.ConfigMapKeys) > 0 { - return corev1.Volume{ - Name: CABundleVolumeName, - VolumeSource: corev1.VolumeSource{ - EmptyDir: &corev1.EmptyDirVolumeSource{}, - }, - } - } - - // For single key (legacy behavior), use direct ConfigMap volume +// createCombinedConfigVolume creates the volume configuration for the operator-managed ConfigMap. +// Used when either user config or CA bundle (or both) are specified. +func createCombinedConfigVolume(instance *llamav1alpha1.LlamaStackDistribution) corev1.Volume { + configMapName := instance.Name + "-config" return corev1.Volume{ - Name: CABundleVolumeName, + Name: CombinedConfigVolumeName, VolumeSource: corev1.VolumeSource{ ConfigMap: &corev1.ConfigMapVolumeSource{ LocalObjectReference: corev1.LocalObjectReference{ - Name: caBundleConfig.ConfigMapName, + Name: configMapName, }, }, }, } } -// createCABundleInitContainer creates an InitContainer that concatenates multiple CA bundle keys -// from a ConfigMap into a single file in the shared ca-bundle volume. -func createCABundleInitContainer(caBundleConfig *llamav1alpha1.CABundleConfig) (corev1.Container, error) { - // Validate ConfigMap keys for security - if err := validateConfigMapKeys(caBundleConfig.ConfigMapKeys); err != nil { - return corev1.Container{}, fmt.Errorf("failed to validate ConfigMap keys: %w", err) - } - - // Build the file list as a shell array embedded in the script - // This ensures the arguments are properly passed to the script - var fileListBuilder strings.Builder - for i, key := range caBundleConfig.ConfigMapKeys { - if i > 0 { - fileListBuilder.WriteString(" ") - } - // Quote each key to handle any special characters safely - fileListBuilder.WriteString(fmt.Sprintf("%q", key)) - } - fileList := fileListBuilder.String() - - // Use a secure script approach that embeds the file list directly - // This eliminates the issue with arguments not being passed to sh -c - script := fmt.Sprintf(`#!/bin/sh -set -e -output_file="%s" -source_dir="%s" - -# Clear the output file -> "$output_file" - -# Process each validated key file (keys are pre-validated) -for key in %s; do - file_path="$source_dir/$key" - if [ -f "$file_path" ]; then - cat "$file_path" >> "$output_file" - echo >> "$output_file" # Add newline between certificates - else - echo "Warning: Certificate file $file_path not found" >&2 - fi -done`, CABundleTempPath, CABundleSourceDir, fileList) - - return corev1.Container{ - Name: CABundleInitName, - Image: "registry.access.redhat.com/ubi9/ubi-minimal:latest", - Command: []string{"/bin/sh", "-c", script}, - // No Args needed since we embed the file list in the script - VolumeMounts: []corev1.VolumeMount{ - { - Name: CABundleSourceVolName, - MountPath: CABundleSourceDir, - ReadOnly: true, - }, - { - Name: CABundleVolumeName, - MountPath: CABundleTempDir, - }, - }, - SecurityContext: &corev1.SecurityContext{ - AllowPrivilegeEscalation: &[]bool{false}[0], - RunAsNonRoot: &[]bool{false}[0], - Capabilities: &corev1.Capabilities{ - Drop: []corev1.Capability{"ALL"}, - }, - }, - }, nil -} - // configurePodStorage configures the pod storage and returns the complete pod spec. -func configurePodStorage(ctx context.Context, r *LlamaStackDistributionReconciler, instance *llamav1alpha1.LlamaStackDistribution, container corev1.Container) corev1.PodSpec { +func configurePodStorage(instance *llamav1alpha1.LlamaStackDistribution, container corev1.Container) corev1.PodSpec { podSpec := corev1.PodSpec{ Containers: []corev1.Container{container}, } @@ -383,8 +281,8 @@ func configurePodStorage(ctx context.Context, r *LlamaStackDistributionReconcile // Configure storage volumes and init containers configureStorage(instance, &podSpec) - // Configure TLS CA bundle (with auto-detection support) - configureTLSCABundle(ctx, r, instance, &podSpec) + // Configure TLS CA bundle + configureTLSCABundle(instance, &podSpec) // Configure user config configureUserConfig(instance, &podSpec) @@ -465,130 +363,47 @@ func configureEmptyDirStorage(podSpec *corev1.PodSpec) { } // configureTLSCABundle handles TLS CA bundle configuration. -// For multiple keys: adds a ca-bundle-init init container that concatenates all keys into a single file -// in a shared emptyDir volume, which the main container then mounts via SubPath. -// For single key: uses a direct ConfigMap volume mount. -// If no explicit CA bundle is configured, it checks for the well-known ODH trusted CA bundle ConfigMap. -func configureTLSCABundle(ctx context.Context, r *LlamaStackDistributionReconciler, instance *llamav1alpha1.LlamaStackDistribution, podSpec *corev1.PodSpec) { +// Adds the combined ConfigMap volume if CA bundle is explicitly configured. +func configureTLSCABundle(instance *llamav1alpha1.LlamaStackDistribution, podSpec *corev1.PodSpec) { tlsConfig := instance.Spec.Server.TLSConfig - // Handle explicit CA bundle configuration first - if tlsConfig != nil && tlsConfig.CABundle != nil { - addExplicitCABundle(ctx, tlsConfig.CABundle, podSpec) - return - } - - // If no explicit CA bundle is configured, check for ODH trusted CA bundle auto-detection - if r != nil { - addAutoDetectedCABundle(ctx, r, instance, podSpec) + // Handle explicit CA bundle configuration + if tlsConfig != nil && tlsConfig.CABundle != "" { + addExplicitCABundle(instance, podSpec) } } // addExplicitCABundle handles explicitly configured CA bundles. -func addExplicitCABundle(ctx context.Context, caBundleConfig *llamav1alpha1.CABundleConfig, podSpec *corev1.PodSpec) { - // Add CA bundle InitContainer if multiple keys are specified - if len(caBundleConfig.ConfigMapKeys) > 0 { - caBundleInitContainer, err := createCABundleInitContainer(caBundleConfig) - if err != nil { - log.FromContext(ctx).Error(err, "Failed to create CA bundle init container") - return - } - podSpec.InitContainers = append(podSpec.InitContainers, caBundleInitContainer) - } - - // Add CA bundle ConfigMap volume - volume := createCABundleVolume(caBundleConfig) - podSpec.Volumes = append(podSpec.Volumes, volume) - - // Add source ConfigMap volume for multiple keys scenario - if len(caBundleConfig.ConfigMapKeys) > 0 { - sourceVolume := corev1.Volume{ - Name: CABundleSourceVolName, - VolumeSource: corev1.VolumeSource{ - ConfigMap: &corev1.ConfigMapVolumeSource{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: caBundleConfig.ConfigMapName, - }, - }, - }, - } - podSpec.Volumes = append(podSpec.Volumes, sourceVolume) +func addExplicitCABundle(instance *llamav1alpha1.LlamaStackDistribution, podSpec *corev1.PodSpec) { + // Add combined ConfigMap volume if not already added + if !hasVolumeWithName(podSpec, CombinedConfigVolumeName) { + volume := createCombinedConfigVolume(instance) + podSpec.Volumes = append(podSpec.Volumes, volume) } } -// addAutoDetectedCABundle handles auto-detection of ODH trusted CA bundle ConfigMap. -func addAutoDetectedCABundle(ctx context.Context, r *LlamaStackDistributionReconciler, instance *llamav1alpha1.LlamaStackDistribution, podSpec *corev1.PodSpec) { - if r == nil { - return - } - - configMap, keys, err := r.detectODHTrustedCABundle(ctx, instance) - if err != nil { - // Log error but don't fail the reconciliation - log.FromContext(ctx).Error(err, "Failed to detect ODH trusted CA bundle ConfigMap") - return - } - - if configMap == nil || len(keys) == 0 { - // No ODH trusted CA bundle found or no keys available +// configureUserConfig handles user configuration setup. +func configureUserConfig(instance *llamav1alpha1.LlamaStackDistribution, podSpec *corev1.PodSpec) { + userConfig := instance.Spec.Server.UserConfig + if userConfig == nil || userConfig.CustomConfig == "" { return } - // Create a virtual CA bundle config for auto-detected ConfigMap - autoCaBundleConfig := &llamav1alpha1.CABundleConfig{ - ConfigMapName: configMap.Name, - ConfigMapKeys: keys, // Use all available keys - } - - // Use the same logic as explicit configuration - caBundleInitContainer, err := createCABundleInitContainer(autoCaBundleConfig) - if err != nil { - // Log error and skip auto-detected CA bundle configuration - log.FromContext(ctx).Error(err, "Failed to create CA bundle init container for auto-detected ConfigMap") - return + // Add combined ConfigMap volume if not already added + if !hasVolumeWithName(podSpec, CombinedConfigVolumeName) { + volume := createCombinedConfigVolume(instance) + podSpec.Volumes = append(podSpec.Volumes, volume) } - podSpec.InitContainers = append(podSpec.InitContainers, caBundleInitContainer) - - // Add CA bundle emptyDir volume for auto-detected ConfigMap - volume := createCABundleVolume(autoCaBundleConfig) - podSpec.Volumes = append(podSpec.Volumes, volume) - - // Add source ConfigMap volume for auto-detected ConfigMap - sourceVolume := corev1.Volume{ - Name: CABundleSourceVolName, - VolumeSource: corev1.VolumeSource{ - ConfigMap: &corev1.ConfigMapVolumeSource{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: configMap.Name, - }, - }, - }, - } - podSpec.Volumes = append(podSpec.Volumes, sourceVolume) - - log.FromContext(ctx).Info("Auto-configured ODH trusted CA bundle", - "configMapName", configMap.Name, - "keys", keys) } -// configureUserConfig handles user configuration setup. -func configureUserConfig(instance *llamav1alpha1.LlamaStackDistribution, podSpec *corev1.PodSpec) { - userConfig := instance.Spec.Server.UserConfig - if userConfig == nil || userConfig.ConfigMapName == "" { - return +// hasVolumeWithName checks if a pod spec already has a volume with the given name. +func hasVolumeWithName(podSpec *corev1.PodSpec, volumeName string) bool { + for _, volume := range podSpec.Volumes { + if volume.Name == volumeName { + return true + } } - - // Add ConfigMap volume if user config is specified - podSpec.Volumes = append(podSpec.Volumes, corev1.Volume{ - Name: "user-config", - VolumeSource: corev1.VolumeSource{ - ConfigMap: &corev1.ConfigMapVolumeSource{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: userConfig.ConfigMapName, - }, - }, - }, - }) + return false } // configurePodOverrides applies pod-level overrides from the LlamaStackDistribution spec. diff --git a/controllers/resource_helper_test.go b/controllers/resource_helper_test.go index f1ea8dd98..d7e4309ab 100644 --- a/controllers/resource_helper_test.go +++ b/controllers/resource_helper_test.go @@ -148,7 +148,7 @@ func TestBuildContainerSpec(t *testing.T) { }, ContainerSpec: llamav1alpha1.ContainerSpec{}, UserConfig: &llamav1alpha1.UserConfigSpec{ - ConfigMapName: "test-config", + CustomConfig: "test-config-data", }, }, }, @@ -160,7 +160,7 @@ func TestBuildContainerSpec(t *testing.T) { ImagePullPolicy: corev1.PullAlways, Ports: []corev1.ContainerPort{{ContainerPort: llamav1alpha1.DefaultServerPort}}, StartupProbe: newDefaultStartupProbe(llamav1alpha1.DefaultServerPort), - Command: []string{"/bin/sh", "-c", startupScript}, + Command: []string{"/bin/sh", "-c", StartupScript}, Args: []string{}, Env: []corev1.EnvVar{ {Name: "HF_HOME", Value: llamav1alpha1.DefaultMountPath}, @@ -171,8 +171,9 @@ func TestBuildContainerSpec(t *testing.T) { MountPath: llamav1alpha1.DefaultMountPath, }, { - Name: "user-config", - MountPath: "/etc/llama-stack/", + Name: CombinedConfigVolumeName, + MountPath: "/etc/llama-stack/run.yaml", + SubPath: "run.yaml", ReadOnly: true, }, }, @@ -182,7 +183,7 @@ func TestBuildContainerSpec(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - result := buildContainerSpec(t.Context(), nil, tc.instance, tc.image) + result := buildContainerSpec(tc.instance, tc.image) assert.Equal(t, tc.expectedResult.Name, result.Name) assert.Equal(t, tc.expectedResult.Image, result.Image) assert.Equal(t, tc.expectedResult.Ports, result.Ports) @@ -274,7 +275,7 @@ func TestConfigurePodStorage(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - result := configurePodStorage(t.Context(), nil, tc.instance, tc.container) + result := configurePodStorage(tc.instance, tc.container) // Verify container was added. assert.Len(t, result.Containers, 1) diff --git a/controllers/testing_support_test.go b/controllers/testing_support_test.go index c83d73c0a..f8db7cbbc 100644 --- a/controllers/testing_support_test.go +++ b/controllers/testing_support_test.go @@ -105,9 +105,9 @@ func (b *DistributionBuilder) WithServiceAccountName(serviceAccountName string) return b } -func (b *DistributionBuilder) WithUserConfig(configMapName string) *DistributionBuilder { +func (b *DistributionBuilder) WithUserConfig(customConfig string) *DistributionBuilder { b.instance.Spec.Server.UserConfig = &llamav1alpha1.UserConfigSpec{ - ConfigMapName: configMapName, + CustomConfig: customConfig, } return b } diff --git a/docs/additional/ca-bundle-configuration.md b/docs/additional/ca-bundle-configuration.md index 0fc841f07..ef371ef22 100644 --- a/docs/additional/ca-bundle-configuration.md +++ b/docs/additional/ca-bundle-configuration.md @@ -7,31 +7,18 @@ This document explains how to configure custom CA bundles for LlamaStackDistribu The CA bundle configuration allows you to: - Use self-signed certificates for external LLM API connections - Trust custom Certificate Authorities (CAs) for secure communication -- Mount CA certificates from ConfigMaps into the LlamaStack server pods +- Provide CA certificates inline in the LlamaStackDistribution spec ## How It Works When you configure a CA bundle: -1. **ConfigMap Storage**: CA certificates are stored in a Kubernetes ConfigMap -2. **Volume Mounting**: The certificates are mounted at `/etc/ssl/certs/` in the container -3. **Environment Variable**: The `SSL_CERT_FILE` environment variable is set to point to the CA bundle -4. **Automatic Restarts**: Pods restart automatically when the CA bundle ConfigMap changes +1. **Inline Storage**: CA certificates are provided directly in the LlamaStackDistribution spec as PEM-encoded data +2. **Automatic ConfigMap Creation**: The operator automatically creates a ConfigMap containing the CA bundle data +3. **Volume Mounting**: The certificates are mounted at `/etc/ssl/certs/ca-bundle.crt` in the container +4. **Automatic Restarts**: Pods restart automatically when the CA bundle data changes -### Single Key vs Multiple Keys - -**Single Key (configMapKey):** -- Direct ConfigMap volume mount -- Certificate file mounted directly from the ConfigMap key -- Minimal resource overhead - -**Multiple Keys (configMapKeys):** -- Uses an InitContainer to concatenate multiple keys -- All certificates from specified keys are combined into a single file -- Slightly higher resource overhead due to InitContainer, but maintains standard SSL behavior -- The final consolidated file is always named `ca-bundle.crt` regardless of source key names - -## Configuration Options +## Configuration ### Basic CA Bundle Configuration @@ -43,15 +30,22 @@ metadata: spec: server: distribution: - name: hf-serverless + name: remote-vllm tlsConfig: - caBundle: - configMapName: my-ca-bundle - # configMapNamespace: default # Optional - defaults to CR namespace - # configMapKey: ca-bundle.crt # Optional - defaults to "ca-bundle.crt" + caBundle: | + -----BEGIN CERTIFICATE----- + MIIDXTCCAkWgAwIBAgIJAKoK/heBjcOuMA0GCSqGSIb3DQEBBQUAMEUxCzAJBgNV + BAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEwHwYDVQQKDBhJbnRlcm5ldCBX + aWRnaXRzIFB0eSBMdGQwHhcNMTMwODI3MjM1NDA3WhcNMjMwODI1MjM1NDA3WjBF + MQswCQYDVQQGEwJBVTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50 + ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB + CgKCAQEAwuqTiuGqAXTAM4PLnL6jrOMiTUps8gmI8DnJTtIQN9XgHk7ckY6+8X9s + -----END CERTIFICATE----- ``` -### Multiple CA Bundle Keys Configuration (RHOAI Pattern) +### Multiple CA Certificates + +You can include multiple CA certificates in a single CA bundle: ```yaml apiVersion: llamastack.io/v1alpha1 @@ -61,154 +55,79 @@ metadata: spec: server: distribution: - name: hf-serverless + name: remote-vllm tlsConfig: - caBundle: - configMapName: odh-trusted-ca-bundle - # configMapNamespace: default # Optional - defaults to CR namespace - configMapKeys: # Multiple keys from same ConfigMap - - ca-bundle.crt # CNO-injected cluster CAs - - odh-ca-bundle.crt # User-specified custom CAs + caBundle: | + -----BEGIN CERTIFICATE----- + # First CA certificate + MIIDXTCCAkWgAwIBAgIJAKoK/heBjcOuMA0GCSqGSIb3DQEBBQUAMEUxCzAJBgNV + # ... certificate data ... + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + # Second CA certificate + MIIDYTCCAkmgAwIBAgIJALfggjqwGI5jMA0GCSqGSIb3DQEBBQUAMEYxCzAJBgNV + # ... certificate data ... + -----END CERTIFICATE----- ``` -### Configuration Fields - -- `configMapName` (required): Name of the ConfigMap containing CA certificates -- `configMapNamespace` (optional): Namespace of the ConfigMap. Defaults to the same namespace as the LlamaStackDistribution -- `configMapKeys` (optional): Array of keys within the ConfigMap containing CA bundle data. All certificates from these keys will be concatenated into a single CA bundle file. If not specified, defaults to `["ca-bundle.crt"]` - ## Examples -### Example 1: Basic CA Bundle +### Example 1: VLLM with Custom CA ```yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: my-ca-bundle -data: - ca-bundle.crt: | - -----BEGIN CERTIFICATE----- - # ... your CA certificate data here ... - -----END CERTIFICATE----- ---- apiVersion: llamastack.io/v1alpha1 kind: LlamaStackDistribution metadata: - name: secure-llama-stack + name: secure-vllm-stack spec: + replicas: 1 server: distribution: - name: hf-serverless - tlsConfig: - caBundle: - configMapName: my-ca-bundle -``` - -### Example 2: Custom Key Name - -```yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: my-ca-bundle -data: - custom-ca.pem: | - -----BEGIN CERTIFICATE----- - # ... your CA certificate data here ... - -----END CERTIFICATE----- ---- -apiVersion: llamastack.io/v1alpha1 -kind: LlamaStackDistribution -metadata: - name: secure-llama-stack -spec: - server: - distribution: - name: hf-serverless - tlsConfig: - caBundle: - configMapName: my-ca-bundle - configMapKey: custom-ca.pem -``` - -### Example 3: Cross-Namespace CA Bundle - -```yaml -apiVersion: llamastack.io/v1alpha1 -kind: LlamaStackDistribution -metadata: - name: secure-llama-stack - namespace: my-namespace -spec: - server: - distribution: - name: hf-serverless + name: remote-vllm + containerSpec: + port: 8321 + env: + - name: INFERENCE_MODEL + value: "meta-llama/Llama-3.2-1B-Instruct" + - name: VLLM_URL + value: "https://vllm-server.vllm-dist.svc.cluster.local:8000/v1" + - name: VLLM_TLS_VERIFY + value: "/etc/ssl/certs/ca-bundle.crt" tlsConfig: - caBundle: - configMapName: global-ca-bundle - configMapNamespace: kube-system + caBundle: | + -----BEGIN CERTIFICATE----- + # Your VLLM server's CA certificate + MIIDXTCCAkWgAwIBAgIJAKoK/heBjcOuMA0GCSqGSIb3DQEBBQUAMEUxCzAJBgNV + # ... certificate data ... + -----END CERTIFICATE----- ``` -### Example 4: RHOAI Pattern with Multiple CA Sources +### Example 2: Ollama with Custom CA ```yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: odh-trusted-ca-bundle - labels: - config.openshift.io/inject-trusted-cabundle: "true" -data: - ca-bundle.crt: | - # Populated by Cluster Network Operator (CNO) - -----BEGIN CERTIFICATE----- - # ... cluster-wide CA certificates ... - -----END CERTIFICATE----- - odh-ca-bundle.crt: | - # User-specified custom CAs from DSCInitialization - -----BEGIN CERTIFICATE----- - # ... custom CA certificate 1 ... - -----END CERTIFICATE----- - -----BEGIN CERTIFICATE----- - # ... custom CA certificate 2 ... - -----END CERTIFICATE----- ---- apiVersion: llamastack.io/v1alpha1 kind: LlamaStackDistribution metadata: - name: rhoai-llama-stack + name: secure-ollama-stack spec: + replicas: 1 server: distribution: - name: hf-serverless + name: ollama + containerSpec: + port: 8321 + env: + - name: INFERENCE_MODEL + value: "llama3.2:1b" + - name: OLLAMA_URL + value: "https://ollama-server.ollama-dist.svc.cluster.local:11434" tlsConfig: - caBundle: - configMapName: odh-trusted-ca-bundle - configMapKeys: - - ca-bundle.crt # Cluster CAs - - odh-ca-bundle.crt # Custom CAs -``` - -## Creating CA Bundle ConfigMaps - -### From Certificate Files - -```bash -# Create a ConfigMap from a certificate file -kubectl create configmap my-ca-bundle --from-file=ca-bundle.crt=/path/to/your/ca.crt - -# Or create from multiple certificate files -kubectl create configmap my-ca-bundle \ - --from-file=ca-bundle.crt=/path/to/your/ca1.crt \ - --from-file=additional-ca.crt=/path/to/your/ca2.crt -``` - -### From Certificate Content - -```bash -# Create a ConfigMap with certificate content -kubectl create configmap my-ca-bundle --from-literal=ca-bundle.crt="$(cat /path/to/your/ca.crt)" + caBundle: | + -----BEGIN CERTIFICATE----- + # Your Ollama server's CA certificate + MIIDXTCCAkWgAwIBAgIJAKoK/heBjcOuMA0GCSqGSIb3DQEBBQUAMEUxCzAJBgNV + # ... certificate data ... + -----END CERTIFICATE----- ``` ## Use Cases @@ -221,19 +140,19 @@ When using private cloud LLM providers with self-signed certificates: spec: server: distribution: - name: hf-serverless + name: remote-vllm containerSpec: env: - - name: HF_API_KEY - valueFrom: - secretKeyRef: - name: hf-api-key - key: token - userConfig: - configMapName: llama-stack-config + - name: VLLM_URL + value: "https://private-llm-api.company.com/v1" + - name: VLLM_TLS_VERIFY + value: "/etc/ssl/certs/ca-bundle.crt" tlsConfig: - caBundle: - configMapName: private-cloud-ca-bundle + caBundle: | + -----BEGIN CERTIFICATE----- + # Private cloud provider's CA certificate + # ... certificate data ... + -----END CERTIFICATE----- ``` ### 2. Internal Enterprise APIs @@ -244,11 +163,17 @@ For enterprise environments with internal CAs: spec: server: distribution: - name: hf-endpoint + name: remote-vllm tlsConfig: - caBundle: - configMapName: enterprise-ca-bundle - configMapNamespace: security-system + caBundle: | + -----BEGIN CERTIFICATE----- + # Enterprise root CA certificate + # ... certificate data ... + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + # Enterprise intermediate CA certificate + # ... certificate data ... + -----END CERTIFICATE----- ``` ### 3. Development/Testing @@ -261,52 +186,62 @@ spec: distribution: name: ollama tlsConfig: - caBundle: - configMapName: dev-ca-bundle - configMapKey: development-ca.pem + caBundle: | + -----BEGIN CERTIFICATE----- + # Development self-signed CA certificate + # ... certificate data ... + -----END CERTIFICATE----- +``` + +## Obtaining CA Certificates + +### From a Server + +```bash +# Get the CA certificate from a server +openssl s_client -showcerts -connect your-server.com:443 /dev/null | \ + openssl x509 -outform PEM > ca-certificate.pem +``` + +### From a Certificate File + +```bash +# Extract CA certificate from a certificate bundle +openssl x509 -in certificate-bundle.pem -out ca-certificate.pem +``` + +### From Kubernetes Secrets + +```bash +# Extract CA certificate from a Kubernetes secret +kubectl get secret your-tls-secret -o jsonpath='{.data.ca\.crt}' | base64 -d > ca-certificate.pem ``` ## Troubleshooting ### Common Issues -1. **Certificate Not Found**: Ensure the ConfigMap exists and contains the specified key -2. **Permission Denied**: Check that the operator has permissions to read the ConfigMap -3. **Invalid Certificate**: Verify the certificate format is correct (PEM format) -4. **Pod Not Restarting**: ConfigMap changes trigger automatic pod restarts via annotations +1. **Invalid Certificate Format**: Ensure the certificate is in PEM format with proper BEGIN/END blocks +2. **Certificate Validation**: Verify the certificate is valid and not expired +3. **Pod Not Restarting**: The operator automatically restarts pods when CA bundle data changes ### Common Error Messages and Solutions -#### "CA bundle key not found in ConfigMap" -- **Cause**: The specified key doesn't exist in the ConfigMap data -- **Solution**: Check the key name in your LlamaStackDistribution spec, default is "ca-bundle.crt" -- **Example**: Verify `kubectl get configmap my-ca-bundle -o yaml` shows your expected key - -#### "Invalid CA bundle format" -- **Cause**: The certificate data is not in valid PEM format or contains invalid certificates +#### "CA bundle contains invalid PEM data" +- **Cause**: The certificate data is not in valid PEM format - **Solution**: Ensure certificates are properly formatted with BEGIN/END CERTIFICATE blocks - **Example**: Valid format starts with `-----BEGIN CERTIFICATE-----` -#### "Referenced CA bundle ConfigMap not found" -- **Cause**: The ConfigMap specified in tlsConfig.caBundle.configMapName doesn't exist -- **Solution**: Create the ConfigMap first, then apply the LlamaStackDistribution -- **Example**: `kubectl create configmap my-ca-bundle --from-file=ca-bundle.crt=my-ca.crt` - -#### "No valid certificates found in CA bundle" -- **Cause**: The ConfigMap contains data but no parseable certificates -- **Solution**: Verify certificate content and format -- **Example**: Use `openssl x509 -text -noout -in your-cert.crt` to validate certificates - #### "Failed to parse certificate" - **Cause**: Certificate data is corrupted or not a valid X.509 certificate - **Solution**: Regenerate the certificate or verify the source -- **Example**: Check if the certificate was properly base64 encoded +- **Example**: Use `openssl x509 -text -noout -in your-cert.pem` to validate certificates ### Debugging ```bash -# Check if ConfigMap exists -kubectl get configmap my-ca-bundle -o yaml +# Check the automatically created ConfigMap +kubectl get configmap -config -o yaml # Check pod environment variables kubectl describe pod @@ -314,42 +249,29 @@ kubectl describe pod # Check mounted certificates kubectl exec -- ls -la /etc/ssl/certs/ -# Check SSL_CERT_FILE environment variable -kubectl exec -- env | grep SSL_CERT_FILE - # Validate certificate format locally -openssl x509 -text -noout -in ca-bundle.crt +openssl x509 -text -noout -in ca-certificate.pem # Check certificate expiration -openssl x509 -enddate -noout -in ca-bundle.crt +openssl x509 -enddate -noout -in ca-certificate.pem # Test certificate chain -openssl verify -CAfile ca-bundle.crt server.crt +openssl verify -CAfile ca-certificate.pem server.crt ``` ### Validation Checklist Before deploying a LlamaStackDistribution with CA bundle: -- [ ] ConfigMap exists in the correct namespace -- [ ] ConfigMap contains the specified key (default: "ca-bundle.crt") - [ ] Certificate data is in PEM format - [ ] Certificate data contains valid X.509 certificates -- [ ] Operator has read permissions on the ConfigMap - [ ] Certificate is not expired - [ ] Certificate contains the expected CA for your external service +- [ ] Certificate data is properly indented in the YAML (use `|` for multiline strings) ## Security Considerations -1. **ConfigMap Security**: ConfigMaps are stored in plain text in etcd. Consider using appropriate RBAC policies -2. **Certificate Rotation**: Update ConfigMaps when certificates expire or are rotated -3. **Namespace Isolation**: Use appropriate namespaces to isolate CA bundles -4. **Audit Trail**: Monitor ConfigMap changes in production environments -5. **Principle of Least Privilege**: Only grant necessary permissions to access CA bundle ConfigMaps - -## Limitations - -- Only supports PEM format certificates -- ConfigMap size limits apply (1MB by default) -- Certificate validation is handled by the underlying Python SSL libraries -- Cross-namespace ConfigMap access requires appropriate RBAC permissions +1. **Sensitive Data**: CA certificates are stored in Kubernetes ConfigMaps created by the operator +2. **RBAC**: Ensure appropriate RBAC policies are in place for accessing the LlamaStackDistribution resources +3. **Certificate Rotation**: Update the CA bundle in the LlamaStackDistribution spec when certificates are rotated +4. **Validation**: The operator validates PEM format but doesn't verify certificate validity or expiration diff --git a/docs/api-overview.md b/docs/api-overview.md index d92bd4863..0250fbd7b 100644 --- a/docs/api-overview.md +++ b/docs/api-overview.md @@ -11,19 +11,6 @@ Package v1alpha1 contains API Schema definitions for the v1alpha1 API group - [LlamaStackDistribution](#llamastackdistribution) - [LlamaStackDistributionList](#llamastackdistributionlist) -#### CABundleConfig - -CABundleConfig defines the CA bundle configuration for custom certificates - -_Appears in:_ -- [TLSConfig](#tlsconfig) - -| Field | Description | Default | Validation | -| --- | --- | --- | --- | -| `configMapName` _string_ | ConfigMapName is the name of the ConfigMap containing CA bundle certificates | | | -| `configMapNamespace` _string_ | ConfigMapNamespace is the namespace of the ConfigMap (defaults to the same namespace as the CR) | | | -| `configMapKeys` _string array_ | ConfigMapKeys specifies multiple keys within the ConfigMap containing CA bundle data
All certificates from these keys will be concatenated into a single CA bundle file
If not specified, defaults to [DefaultCABundleKey] | | MaxItems: 50
| - #### ContainerSpec ContainerSpec defines the llama-stack server container configuration. @@ -217,17 +204,18 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `caBundle` _[CABundleConfig](#cabundleconfig)_ | CABundle defines the CA bundle configuration for custom certificates | | | +| `caBundle` _string_ | CABundle contains PEM-encoded CA bundle certificates as inline data
The operator automatically creates and manages a ConfigMap for mounting into the llama-stack pod | | | #### UserConfigSpec +UserConfigSpec defines the user configuration for the llama-stack server. + _Appears in:_ - [ServerSpec](#serverspec) | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `configMapName` _string_ | ConfigMapName is the name of the ConfigMap containing user configuration | | | -| `configMapNamespace` _string_ | ConfigMapNamespace is the namespace of the ConfigMap (defaults to the same namespace as the CR) | | | +| `customConfig` _string_ | CustomConfig contains arbitrary text data that represents a user-provided run.yamlconfiguration file
The operator automatically creates and manages a ConfigMap for mounting into the llama-stack pod | | | #### VersionInfo diff --git a/pkg/deploy/deploy.go b/pkg/deploy/deploy.go index 141c9c6d5..51b815444 100644 --- a/pkg/deploy/deploy.go +++ b/pkg/deploy/deploy.go @@ -31,17 +31,17 @@ func ApplyDeployment(ctx context.Context, cli client.Client, scheme *runtime.Sch } // For updates, preserve the existing selector since it's immutable - // and use server-side apply for other fields + // and update the deployment spec if !reflect.DeepEqual(found.Spec, deployment.Spec) { logger.Info("Updating Deployment", "deployment", deployment.Name) // Preserve the existing selector to avoid immutable field error during upgrades deployment.Spec.Selector = found.Spec.Selector - // Use server-side apply to merge changes properly - // Ensure the deployment has proper TypeMeta for server-side apply - deployment.SetGroupVersionKind(appsv1.SchemeGroupVersion.WithKind("Deployment")) - return cli.Patch(ctx, deployment, client.Apply, client.ForceOwnership, client.FieldOwner("llama-stack-operator")) + // Copy the existing deployment and update its spec + // This ensures proper field removal when fields are no longer present + found.Spec = deployment.Spec + return cli.Update(ctx, found) } return nil } diff --git a/release/operator.yaml b/release/operator.yaml index be1efc850..c9473f621 100644 --- a/release/operator.yaml +++ b/release/operator.yaml @@ -2015,44 +2015,20 @@ spec: server properties: caBundle: - description: CABundle defines the CA bundle configuration - for custom certificates - properties: - configMapKeys: - description: |- - ConfigMapKeys specifies multiple keys within the ConfigMap containing CA bundle data - All certificates from these keys will be concatenated into a single CA bundle file - If not specified, defaults to [DefaultCABundleKey] - items: - type: string - maxItems: 50 - type: array - configMapName: - description: ConfigMapName is the name of the ConfigMap - containing CA bundle certificates - type: string - configMapNamespace: - description: ConfigMapNamespace is the namespace of the - ConfigMap (defaults to the same namespace as the CR) - type: string - required: - - configMapName - type: object + description: |- + CABundle contains PEM-encoded CA bundle certificates as inline data + The operator automatically creates and manages a ConfigMap for mounting into the llama-stack pod + type: string type: object userConfig: description: UserConfig defines the user configuration for the llama-stack server properties: - configMapName: - description: ConfigMapName is the name of the ConfigMap containing - user configuration - type: string - configMapNamespace: - description: ConfigMapNamespace is the namespace of the ConfigMap - (defaults to the same namespace as the CR) + customConfig: + description: |- + CustomConfig contains arbitrary text data that represents a user-provided run.yamlconfiguration file + The operator automatically creates and manages a ConfigMap for mounting into the llama-stack pod type: string - required: - - configMapName type: object required: - distribution @@ -2205,11 +2181,6 @@ spec: required: - spec type: object - selectableFields: - - jsonPath: .spec.server.userConfig.configMapName - - jsonPath: .spec.server.userConfig.configMapNamespace - - jsonPath: .spec.server.tlsConfig.caBundle.configMapName - - jsonPath: .spec.server.tlsConfig.caBundle.configMapNamespace served: true storage: true subresources: @@ -2325,8 +2296,11 @@ rules: - "" resources: - configmaps + - serviceaccounts + - services verbs: - create + - delete - get - list - patch @@ -2341,19 +2315,6 @@ rules: - get - list - watch -- apiGroups: - - "" - resources: - - serviceaccounts - - services - verbs: - - create - - delete - - get - - list - - patch - - update - - watch - apiGroups: - apps resources: diff --git a/tests/e2e/tls_test.go b/tests/e2e/tls_test.go index 8dd19549e..4956b2e78 100644 --- a/tests/e2e/tls_test.go +++ b/tests/e2e/tls_test.go @@ -19,11 +19,9 @@ import ( corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/wait" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/yaml" ) const ( @@ -67,14 +65,6 @@ func testCreateNamespace(t *testing.T) { if err != nil && !k8serrors.IsAlreadyExists(err) { require.NoError(t, err) } - - // Create CA bundle configmap in test namespace - err = createCABundleConfigMap(t, llsTestNS) - require.NoError(t, err) - - // Verify the CA bundle ConfigMap was created correctly - err = verifyCABundleConfigMap(t, llsTestNS) - require.NoError(t, err) } func testLlamaStackWithCABundle(t *testing.T) { @@ -84,14 +74,6 @@ func testLlamaStackWithCABundle(t *testing.T) { err := deployLlamaStackWithCABundle(t) require.NoError(t, err) - // The YAML file creates a placeholder ConfigMap, so we need to update it with the actual CA bundle - err = updateCABundleConfigMap(t, llsTestNS) - require.NoError(t, err) - - // Verify the CA bundle ConfigMap has the correct content after update - err = verifyCABundleConfigMap(t, llsTestNS) - require.NoError(t, err) - // Verify the LlamaStack distribution is configured with TLS err = verifyLlamaStackTLSConfig(t, llsTestNS, "llamastack-with-config") require.NoError(t, err) @@ -160,98 +142,6 @@ func generateCertificates(t *testing.T) { t.Log("Certificates generated successfully") } -func createCABundleConfigMap(t *testing.T, targetNS string) error { - t.Helper() - - // Get the project root path - projectRoot, err := filepath.Abs("../..") - if err != nil { - return fmt.Errorf("failed to get project root: %w", err) - } - - // Read CA bundle - caBundle, err := os.ReadFile(filepath.Join(projectRoot, "config", "samples", "vllm-ca-certs", controllers.DefaultCABundleKey)) - if err != nil { - return fmt.Errorf("failed to read CA bundle: %w", err) - } - - caBundleConfigMap := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: "custom-ca-bundle", - Namespace: targetNS, - }, - Data: map[string]string{ - controllers.DefaultCABundleKey: string(caBundle), - }, - } - - // Try to create, if it exists, update it - err = TestEnv.Client.Create(TestEnv.Ctx, caBundleConfigMap) - if err != nil { - if k8serrors.IsAlreadyExists(err) { - // ConfigMap exists, update it - existingConfigMap := &corev1.ConfigMap{} - err = TestEnv.Client.Get(TestEnv.Ctx, client.ObjectKey{ - Namespace: targetNS, - Name: "custom-ca-bundle", - }, existingConfigMap) - if err != nil { - return fmt.Errorf("failed to get existing ConfigMap: %w", err) - } - - existingConfigMap.Data[controllers.DefaultCABundleKey] = string(caBundle) - err = TestEnv.Client.Update(TestEnv.Ctx, existingConfigMap) - if err != nil { - return fmt.Errorf("failed to update existing ConfigMap: %w", err) - } - } else { - return fmt.Errorf("failed to create CA bundle configmap: %w", err) - } - } else { - t.Logf("Created CA bundle ConfigMap with %d bytes", len(caBundle)) - } - - return nil -} - -func verifyCABundleConfigMap(t *testing.T, targetNS string) error { - t.Helper() - - // Get the ConfigMap - configMap := &corev1.ConfigMap{} - err := TestEnv.Client.Get(TestEnv.Ctx, client.ObjectKey{ - Namespace: targetNS, - Name: "custom-ca-bundle", - }, configMap) - if err != nil { - return fmt.Errorf("failed to get CA bundle ConfigMap: %w", err) - } - - // Verify the CA bundle content exists - caBundle, exists := configMap.Data[controllers.DefaultCABundleKey] - if !exists { - return fmt.Errorf("failed to find %s CA bundle key in ConfigMap", controllers.DefaultCABundleKey) - } - - if len(caBundle) == 0 { - return fmt.Errorf("failed to find any keys in CA bundle ConfigMap %s", controllers.DefaultCABundleKey) - } - - // Check if CA bundle appears to be a placeholder - if len(caBundle) < 100 || !strings.Contains(caBundle, "BEGIN CERTIFICATE") { - t.Logf("WARNING: CA bundle appears to be a placeholder or invalid") - t.Logf("CA bundle content: %s", caBundle) - - // Try to update the ConfigMap with the actual CA bundle from the file - err := updateCABundleConfigMap(t, targetNS) - if err != nil { - t.Logf("Failed to update CA bundle ConfigMap: %v", err) - } - } - - return nil -} - func verifyLlamaStackTLSConfig(t *testing.T, namespace, name string) error { t.Helper() @@ -270,80 +160,86 @@ func verifyLlamaStackTLSConfig(t *testing.T, namespace, name string) error { return errors.New("LlamaStack distribution does not have TLS config") } - if distribution.Spec.Server.TLSConfig.CABundle == nil { + if distribution.Spec.Server.TLSConfig.CABundle == "" { return errors.New("LlamaStack distribution TLS config does not have CA bundle") } return nil } -func updateCABundleConfigMap(t *testing.T, targetNS string) error { - t.Helper() - - // Get the project root path - projectRoot, err := filepath.Abs("../..") - if err != nil { - return fmt.Errorf("failed to get project root: %w", err) - } - - // Read the actual CA bundle from the file - actualCABundle, err := os.ReadFile(filepath.Join(projectRoot, "config", "samples", "vllm-ca-certs", controllers.DefaultCABundleKey)) - if err != nil { - return fmt.Errorf("failed to read CA bundle file: %w", err) - } - - // Get the existing ConfigMap - configMap := &corev1.ConfigMap{} - err = TestEnv.Client.Get(TestEnv.Ctx, client.ObjectKey{ - Namespace: targetNS, - Name: "custom-ca-bundle", - }, configMap) - if err != nil { - return fmt.Errorf("failed to get ConfigMap: %w", err) - } - - // Update the ConfigMap with the actual CA bundle - configMap.Data[controllers.DefaultCABundleKey] = string(actualCABundle) - - err = TestEnv.Client.Update(TestEnv.Ctx, configMap) - if err != nil { - return fmt.Errorf("failed to update ConfigMap: %w", err) - } - - return nil -} - func deployLlamaStackWithCABundle(t *testing.T) error { t.Helper() - // Read LlamaStack TLS test configuration + // Read the generated CA certificate projectRoot, err := filepath.Abs("../..") if err != nil { return fmt.Errorf("failed to get project root: %w", err) } - llamaStackConfigPath := filepath.Join(projectRoot, "config", "samples", "example-with-ca-bundle.yaml") - llamaStackConfigData, err := os.ReadFile(llamaStackConfigPath) + caCertPath := filepath.Join(projectRoot, "ca.crt") + caCertData, err := os.ReadFile(caCertPath) if err != nil { - return fmt.Errorf("failed to read LlamaStack config: %w", err) + return fmt.Errorf("failed to read CA certificate: %w", err) } - // Apply LlamaStack configuration - objects, err := parseKubernetesYAML(llamaStackConfigData) - if err != nil { - return fmt.Errorf("failed to parse LlamaStack config: %w", err) + // Create the LlamaStackDistribution directly instead of using YAML template + llsd := &v1alpha1.LlamaStackDistribution{ + ObjectMeta: metav1.ObjectMeta{ + Name: "llamastack-with-config", + Namespace: llsTestNS, + }, + Spec: v1alpha1.LlamaStackDistributionSpec{ + Replicas: 1, + Server: v1alpha1.ServerSpec{ + Distribution: v1alpha1.DistributionType{ + Name: "remote-vllm", + }, + ContainerSpec: v1alpha1.ContainerSpec{ + Port: 8321, + Env: []corev1.EnvVar{ + { + Name: "INFERENCE_MODEL", + Value: "meta-llama/Llama-3.2-1B-Instruct", + }, + { + Name: "VLLM_URL", + Value: "https://vllm-server.vllm-dist.svc.cluster.local:8000/v1", + }, + { + Name: "VLLM_TLS_VERIFY", + Value: "/etc/ssl/certs/ca-bundle.crt", + }, + }, + }, + UserConfig: &v1alpha1.UserConfigSpec{ + CustomConfig: `# Llama Stack Configuration +version: '2' +image_name: remote-vllm +apis: +- inference +providers: + inference: + - provider_id: vllm + provider_type: "remote::vllm" + config: + url: "https://vllm-server.vllm-dist.svc.cluster.local:8000/v1" +models: + - model_id: "meta-llama/Llama-3.2-1B-Instruct" + provider_id: vllm + model_type: llm +server: + port: 8321`, + }, + TLSConfig: &v1alpha1.TLSConfig{ + CABundle: string(caCertData), + }, + }, + }, } - for _, obj := range objects { - // Set the namespace for namespaced resources - if obj.GetNamespace() == "" { - obj.SetNamespace(llsTestNS) - } - - err = TestEnv.Client.Create(TestEnv.Ctx, obj) - if err != nil && !k8serrors.IsAlreadyExists(err) { - return fmt.Errorf("failed to create LlamaStack resource: %w", err) - } + err = TestEnv.Client.Create(TestEnv.Ctx, llsd) + if err != nil && !k8serrors.IsAlreadyExists(err) { + return fmt.Errorf("failed to create LlamaStack resource: %w", err) } return nil @@ -377,7 +273,7 @@ func verifyCertificateMounts(t *testing.T, namespace, name string) error { func hasCABundleVolume(volumes []corev1.Volume) bool { for _, volume := range volumes { - if volume.ConfigMap != nil && volume.ConfigMap.Name == "custom-ca-bundle" { + if volume.ConfigMap != nil && volume.Name == controllers.CombinedConfigVolumeName { return true } } @@ -396,6 +292,7 @@ func hasCABundleMount(containers []corev1.Container) bool { func hasCABundleMountInContainer(mounts []corev1.VolumeMount) bool { for _, mount := range mounts { if mount.MountPath == controllers.CABundleMountPath || + mount.Name == controllers.CombinedConfigVolumeName || strings.Contains(mount.MountPath, "ca-bundle") { return true } @@ -442,57 +339,6 @@ func verifyEnvironmentVariables(t *testing.T, namespace, name string) error { return nil } -func parseKubernetesYAML(data []byte) ([]client.Object, error) { - // Split YAML documents - docs := yamlSplit(data) - - // Pre-allocate slice with expected capacity - objects := make([]client.Object, 0, len(docs)) - - for _, doc := range docs { - if len(doc) == 0 { - continue - } - - obj := &unstructured.Unstructured{} - err := yaml.Unmarshal(doc, obj) - if err != nil { - return nil, fmt.Errorf("failed to unmarshal YAML: %w", err) - } - - if obj.GetKind() == "" { - continue - } - - objects = append(objects, obj) - } - - return objects, nil -} - -func yamlSplit(data []byte) [][]byte { - var docs [][]byte - var currentDoc []byte - - lines := strings.Split(string(data), "\n") - for _, line := range lines { - if strings.TrimSpace(line) == "---" { - if len(currentDoc) > 0 { - docs = append(docs, currentDoc) - currentDoc = nil - } - } else { - currentDoc = append(currentDoc, []byte(line+"\n")...) - } - } - - if len(currentDoc) > 0 { - docs = append(docs, currentDoc) - } - - return docs -} - func waitForDeploymentCreation(t *testing.T, namespace, name string, timeout time.Duration) error { t.Helper()