From 60d6d1a825fa653943d4b81c9f11e8f7a3968d76 Mon Sep 17 00:00:00 2001 From: Pablo Rodriguez Nava Date: Thu, 30 Oct 2025 18:16:18 +0100 Subject: [PATCH] MCO-1957: OSImageStream gathering from ReleaseImage --- cmd/machine-config-operator/bootstrap.go | 42 +- go.mod | 17 +- go.sum | 25 +- pkg/controller/build/imagepruner/errors.go | 51 +- .../build/imagepruner/imageinspect.go | 99 +-- .../build/imagepruner/imagepruner.go | 154 +---- pkg/controller/common/images.go | 4 - pkg/controller/image/errors.go | 50 ++ pkg/controller/image/image.go | 107 +++ pkg/controller/image/streams.go | 26 + pkg/controller/image/sys_context.go | 171 +++++ pkg/controller/osimagestream/cli_osimages.go | 29 + .../osimagestream/configmap_osimages.go | 57 ++ .../osimagestream/imagestream_osimages.go | 180 +++++ .../osimagestream/imagestream_provider.go | 123 ++++ pkg/controller/osimagestream/osimagestream.go | 157 +++++ pkg/operator/bootstrap.go | 23 +- pkg/operator/operator.go | 1 + pkg/operator/sync.go | 17 + .../google/gnostic-models/compiler/context.go | 2 +- .../gnostic-models/compiler/extensions.go | 2 +- .../google/gnostic-models/compiler/helpers.go | 2 +- .../google/gnostic-models/compiler/reader.go | 2 +- .../gnostic-models/jsonschema/models.go | 2 +- .../gnostic-models/jsonschema/reader.go | 2 +- .../gnostic-models/jsonschema/writer.go | 2 +- .../gnostic-models/openapiv2/OpenAPIv2.go | 80 +-- .../gnostic-models/openapiv2/document.go | 2 +- .../gnostic-models/openapiv3/OpenAPIv3.go | 24 +- .../gnostic-models/openapiv3/document.go | 2 +- .../modern-go/reflect2/safe_type.go | 22 +- vendor/github.com/openshift/api/AGENTS.md | 15 + vendor/github.com/openshift/api/Makefile | 44 +- .../api/config/v1/types_cluster_version.go | 16 +- .../api/config/v1/types_infrastructure.go | 39 ++ ...1_clusterversions-CustomNoUpgrade.crd.yaml | 16 +- ...erator_01_clusterversions-Default.crd.yaml | 16 +- ...usterversions-DevPreviewNoUpgrade.crd.yaml | 16 +- ...sterversions-TechPreviewNoUpgrade.crd.yaml | 16 +- ...1_infrastructures-CustomNoUpgrade.crd.yaml | 30 + ...frastructures-DevPreviewNoUpgrade.crd.yaml | 30 + ...rastructures-TechPreviewNoUpgrade.crd.yaml | 30 + ...hift-controller-manager_01_builds.crd.yaml | 44 +- ..._generated.featuregated-crd-manifests.yaml | 2 + .../v1/zz_generated.swagger_doc_generated.go | 6 +- ...clustermonitoring-CustomNoUpgrade.crd.yaml | 10 +- ...termonitoring-DevPreviewNoUpgrade.crd.yaml | 10 +- ...ermonitoring-TechPreviewNoUpgrade.crd.yaml | 10 +- vendor/github.com/openshift/api/features.md | 10 +- .../openshift/api/features/features.go | 25 +- .../api/machineconfiguration/v1/types.go | 16 + ...controllerconfigs-CustomNoUpgrade.crd.yaml | 30 + ...rollerconfigs-DevPreviewNoUpgrade.crd.yaml | 30 + ...ollerconfigs-TechPreviewNoUpgrade.crd.yaml | 30 + ...achineconfigpools-CustomNoUpgrade.crd.yaml | 634 ++++++++++++++++++ ...ig_01_machineconfigpools-Default.crd.yaml} | 1 + ...neconfigpools-DevPreviewNoUpgrade.crd.yaml | 634 ++++++++++++++++++ ...econfigpools-TechPreviewNoUpgrade.crd.yaml | 634 ++++++++++++++++++ ..._generated.featuregated-crd-manifests.yaml | 3 + .../v1/zz_generated.swagger_doc_generated.go | 1 + .../machineconfiguration/v1alpha1/register.go | 2 + .../v1alpha1/types_osimagestream.go | 124 ++++ ...01_osimagestreams-CustomNoUpgrade.crd.yaml | 153 +++++ ...simagestreams-DevPreviewNoUpgrade.crd.yaml | 153 +++++ ...imagestreams-TechPreviewNoUpgrade.crd.yaml | 153 +++++ .../v1alpha1/zz_generated.deepcopy.go | 122 ++++ ..._generated.featuregated-crd-manifests.yaml | 24 + .../zz_generated.swagger_doc_generated.go | 48 ++ .../api/operator/v1/types_ingress.go | 17 + .../0000_50_console_01_consoles.crd.yaml | 32 +- ..._50_ingress_00_ingresscontrollers.crd.yaml | 25 + .../v1/zz_generated.swagger_doc_generated.go | 2 +- .../openshift/api/route/v1/generated.proto | 4 +- .../openshift/api/route/v1/types.go | 4 +- .../v1/zz_generated.swagger_doc_generated.go | 2 +- .../k8s.io/code-generator/generate-groups.sh | 0 .../generate-internal-groups.sh | 0 vendor/k8s.io/gengo/v2/codetags/extractor.go | 85 +++ vendor/k8s.io/gengo/v2/codetags/parser.go | 407 +++++++++++ vendor/k8s.io/gengo/v2/codetags/scanner.go | 228 +++++++ vendor/k8s.io/gengo/v2/codetags/types.go | 169 +++++ vendor/k8s.io/gengo/v2/comments.go | 234 ++----- .../k8s.io/kube-openapi/pkg/common/common.go | 4 +- .../kube-openapi/pkg/schemaconv/openapi.go | 2 +- .../pkg/schemaconv/proto_models.go | 2 +- .../k8s.io/kube-openapi/pkg/schemaconv/smd.go | 2 +- .../kube-openapi/pkg/util/proto/document.go | 2 +- .../pkg/util/proto/document_v3.go | 2 +- .../pkg/validation/strfmt/format.go | 24 + .../strfmt/kubernetes-extensions.go | 143 ++++ vendor/modules.txt | 27 +- .../structured-merge-diff/v6/LICENSE | 201 ++++++ .../structured-merge-diff/v6/schema/doc.go | 28 + .../v6/schema/elements.go | 375 +++++++++++ .../structured-merge-diff/v6/schema/equals.go | 202 ++++++ .../v6/schema/schemaschema.go | 165 +++++ 96 files changed, 6366 insertions(+), 676 deletions(-) create mode 100644 pkg/controller/image/errors.go create mode 100644 pkg/controller/image/image.go create mode 100644 pkg/controller/image/streams.go create mode 100644 pkg/controller/image/sys_context.go create mode 100644 pkg/controller/osimagestream/cli_osimages.go create mode 100644 pkg/controller/osimagestream/configmap_osimages.go create mode 100644 pkg/controller/osimagestream/imagestream_osimages.go create mode 100644 pkg/controller/osimagestream/imagestream_provider.go create mode 100644 pkg/controller/osimagestream/osimagestream.go create mode 100644 vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigpools-CustomNoUpgrade.crd.yaml rename vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/{0000_80_machine-config_01_machineconfigpools.crd.yaml => 0000_80_machine-config_01_machineconfigpools-Default.crd.yaml} (99%) create mode 100644 vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigpools-DevPreviewNoUpgrade.crd.yaml create mode 100644 vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigpools-TechPreviewNoUpgrade.crd.yaml create mode 100644 vendor/github.com/openshift/api/machineconfiguration/v1alpha1/types_osimagestream.go create mode 100644 vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.crd-manifests/0000_80_machine-config_01_osimagestreams-CustomNoUpgrade.crd.yaml create mode 100644 vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.crd-manifests/0000_80_machine-config_01_osimagestreams-DevPreviewNoUpgrade.crd.yaml create mode 100644 vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.crd-manifests/0000_80_machine-config_01_osimagestreams-TechPreviewNoUpgrade.crd.yaml mode change 100644 => 100755 vendor/k8s.io/code-generator/generate-groups.sh mode change 100644 => 100755 vendor/k8s.io/code-generator/generate-internal-groups.sh create mode 100644 vendor/k8s.io/gengo/v2/codetags/extractor.go create mode 100644 vendor/k8s.io/gengo/v2/codetags/parser.go create mode 100644 vendor/k8s.io/gengo/v2/codetags/scanner.go create mode 100644 vendor/k8s.io/gengo/v2/codetags/types.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/validation/strfmt/kubernetes-extensions.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v6/LICENSE create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v6/schema/doc.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v6/schema/elements.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v6/schema/equals.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v6/schema/schemaschema.go diff --git a/cmd/machine-config-operator/bootstrap.go b/cmd/machine-config-operator/bootstrap.go index 2c75f685aa..2d42f56bee 100644 --- a/cmd/machine-config-operator/bootstrap.go +++ b/cmd/machine-config-operator/bootstrap.go @@ -5,6 +5,7 @@ import ( "fmt" "os" + "github.com/openshift/machine-config-operator/pkg/controller/osimagestream" "github.com/spf13/cobra" "k8s.io/klog/v2" @@ -82,6 +83,7 @@ func init() { bootstrapCmd.PersistentFlags().StringVar(&bootstrapOpts.haproxyImage, "haproxy-image", "", "Image for haproxy.") bootstrapCmd.PersistentFlags().StringVar(&bootstrapOpts.baremetalRuntimeCfgImage, "baremetal-runtimecfg-image", "", "Image for baremetal-runtimecfg.") bootstrapCmd.PersistentFlags().StringVar(&bootstrapOpts.oauthProxyImage, "oauth-proxy-image", "", "Image for origin oauth proxy.") + // TODO: @pablintino I'm not sure who's passing the baseos options bootstrapCmd.PersistentFlags().StringVar(&bootstrapOpts.baseOSContainerImage, "baseos-image", "", "ostree-bootable container image reference") bootstrapCmd.PersistentFlags().StringVar(&bootstrapOpts.baseOSExtensionsContainerImage, "baseos-extensions-image", "", "Image with extensions") bootstrapCmd.PersistentFlags().StringVar(&bootstrapOpts.kubeRbacProxyImage, "kube-rbac-proxy-image", "", "Image for origin kube-rbac proxy.") @@ -118,20 +120,14 @@ func runBootstrapCmd(_ *cobra.Command, _ []string) { // To help debugging, immediately log version klog.Infof("Version: %+v (%s)", version.Raw, version.Hash) - baseOSContainerImageTag := "rhel-coreos" - if version.IsFCOS() { - baseOSContainerImageTag = "fedora-coreos" - } else if version.IsSCOS() { - baseOSContainerImageTag = "stream-coreos" - } - + var imgstream *imagev1.ImageStream if bootstrapOpts.imageReferences != "" { imageRefData, err := os.ReadFile(bootstrapOpts.imageReferences) if err != nil { klog.Fatalf("failed to read %s: %v", bootstrapOpts.imageReferences, err) } - imgstream := resourceread.ReadImageStreamV1OrDie(imageRefData) + imgstream = resourceread.ReadImageStreamV1OrDie(imageRefData) bootstrapOpts.mcoImage = findImageOrDie(imgstream, "machine-config-operator") bootstrapOpts.keepalivedImage = findImageOrDie(imgstream, "keepalived-ipfailover") @@ -142,26 +138,25 @@ func runBootstrapCmd(_ *cobra.Command, _ []string) { bootstrapOpts.kubeRbacProxyImage = findImageOrDie(imgstream, "kube-rbac-proxy") bootstrapOpts.infraImage = findImageOrDie(imgstream, "pod") bootstrapOpts.haproxyImage = findImageOrDie(imgstream, "haproxy-router") - bootstrapOpts.baseOSContainerImage, err = findImage(imgstream, baseOSContainerImageTag) - if err != nil { + + // TODO: @pablintino. I've not identified a usage of the bootstrap that doesn't pass the image-references option + streamName := osimagestream.GetDefaultStreamName() + if _, err = findImage(imgstream, streamName); err != nil { klog.Warningf("Base OS container not found: %s", err) } - bootstrapOpts.baseOSExtensionsContainerImage, err = findImage(imgstream, fmt.Sprintf("%s-extensions", baseOSContainerImageTag)) - if err != nil { + if _, err = findImage(imgstream, fmt.Sprintf("%s-extensions", streamName)); err != nil { klog.Warningf("Base OS extensions container not found: %s", err) } } imgs := ctrlcommon.Images{ RenderConfigImages: ctrlcommon.RenderConfigImages{ - MachineConfigOperator: bootstrapOpts.mcoImage, - KeepalivedBootstrap: bootstrapOpts.keepalivedImage, - CorednsBootstrap: bootstrapOpts.corednsImage, - BaremetalRuntimeCfgBootstrap: bootstrapOpts.baremetalRuntimeCfgImage, - OauthProxy: bootstrapOpts.oauthProxyImage, - KubeRbacProxy: bootstrapOpts.kubeRbacProxyImage, - BaseOSContainerImage: bootstrapOpts.baseOSContainerImage, - BaseOSExtensionsContainerImage: bootstrapOpts.baseOSExtensionsContainerImage, + MachineConfigOperator: bootstrapOpts.mcoImage, + KeepalivedBootstrap: bootstrapOpts.keepalivedImage, + CorednsBootstrap: bootstrapOpts.corednsImage, + BaremetalRuntimeCfgBootstrap: bootstrapOpts.baremetalRuntimeCfgImage, + OauthProxy: bootstrapOpts.oauthProxyImage, + KubeRbacProxy: bootstrapOpts.kubeRbacProxyImage, }, ControllerConfigImages: ctrlcommon.ControllerConfigImages{ InfraImage: bootstrapOpts.infraImage, @@ -172,6 +167,11 @@ func runBootstrapCmd(_ *cobra.Command, _ []string) { }, } + // todo: @pablintino Add the CLI image URLs as another source fot the OS Stream parser + var cliOSImageStreamParser *osimagestream.CliOSImageStreamParser + if bootstrapOpts.baseOSContainerImage != "" && bootstrapOpts.baseOSExtensionsContainerImage != "" { + cliOSImageStreamParser = osimagestream.NewCliOSImageStreamParser(bootstrapOpts.baseOSContainerImage, bootstrapOpts.baseOSExtensionsContainerImage) + } if err := operator.RenderBootstrap( bootstrapOpts.additionalTrustBundleFile, bootstrapOpts.proxyConfigFile, @@ -185,6 +185,8 @@ func runBootstrapCmd(_ *cobra.Command, _ []string) { &imgs, bootstrapOpts.destinationDir, bootstrapOpts.releaseImage, + imgstream, + cliOSImageStreamParser, ); err != nil { klog.Fatalf("error rendering bootstrap manifests: %v", err) } diff --git a/go.mod b/go.mod index 5cc713ce39..95cae0f822 100644 --- a/go.mod +++ b/go.mod @@ -50,9 +50,9 @@ require ( golang.org/x/net v0.43.0 golang.org/x/time v0.11.0 google.golang.org/grpc v1.71.0 - k8s.io/api v0.33.3 + k8s.io/api v0.34.1 k8s.io/apiextensions-apiserver v0.33.2 - k8s.io/apimachinery v0.33.3 + k8s.io/apimachinery v0.34.1 k8s.io/client-go v0.33.2 k8s.io/code-generator v0.33.2 k8s.io/component-base v0.33.2 @@ -129,7 +129,7 @@ require ( github.com/google/btree v1.1.3 // indirect github.com/google/cadvisor v0.52.1 // indirect github.com/google/cel-go v0.23.2 // indirect - github.com/google/gnostic-models v0.6.9 // indirect + github.com/google/gnostic-models v0.7.0 // indirect github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect @@ -208,7 +208,7 @@ require ( go.uber.org/atomic v1.9.0 // indirect go.uber.org/automaxprocs v1.6.0 // indirect go.yaml.in/yaml/v2 v2.4.2 // indirect - go.yaml.in/yaml/v3 v3.0.3 // indirect + go.yaml.in/yaml/v3 v3.0.4 // indirect golang.org/x/tools/go/expect v0.1.1-deprecated // indirect golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated // indirect google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb // indirect @@ -223,7 +223,7 @@ require ( k8s.io/cri-client v0.0.0 // indirect k8s.io/csi-translation-lib v0.0.0 // indirect k8s.io/dynamic-resource-allocation v0.0.0 // indirect - k8s.io/gengo/v2 v2.0.0-20250207200755-1244d31929d7 // indirect + k8s.io/gengo/v2 v2.0.0-20250604051438-85fd79dbfd9f // indirect k8s.io/kms v0.33.2 // indirect k8s.io/kube-scheduler v0.0.0 // indirect k8s.io/mount-utils v0.0.0 // indirect @@ -232,6 +232,7 @@ require ( sigs.k8s.io/kustomize/api v0.19.0 // indirect sigs.k8s.io/kustomize/kyaml v0.19.0 // indirect sigs.k8s.io/randfill v1.0.0 // indirect + sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect ) require ( @@ -335,7 +336,7 @@ require ( github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/moby/sys/mountinfo v0.7.2 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect github.com/moricho/tparallel v0.3.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/nakabonne/nestif v0.3.1 // indirect @@ -410,7 +411,7 @@ require ( k8s.io/apiserver v0.33.2 k8s.io/klog/v2 v2.130.1 k8s.io/kube-aggregator v0.33.2 // indirect - k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect + k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b // indirect mvdan.cc/gofumpt v0.7.0 // indirect mvdan.cc/unparam v0.0.0-20240528143540-8a5130ca722f // indirect sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect @@ -454,3 +455,5 @@ replace ( k8s.io/sample-cli-plugin => github.com/openshift/kubernetes/staging/src/k8s.io/sample-cli-plugin v0.0.0-20250716113245-b94367cabf3e k8s.io/sample-controller => github.com/openshift/kubernetes/staging/src/k8s.io/sample-controller v0.0.0-20250716113245-b94367cabf3e ) + +replace github.com/openshift/api => github.com/pablintino/api v0.0.0-20251030115546-80b1b5867a30 diff --git a/go.sum b/go.sum index 5fb66ff53c..df380d6a87 100644 --- a/go.sum +++ b/go.sum @@ -360,8 +360,8 @@ github.com/google/cadvisor v0.52.1 h1:sC8SZ6jio9ds+P2dk51bgbeYeufxo55n0X3tmrpA9a github.com/google/cadvisor v0.52.1/go.mod h1:OAhPcx1nOm5YwMh/JhpUOMKyv1YKLRtS9KgzWPndHmA= github.com/google/cel-go v0.23.2 h1:UdEe3CvQh3Nv+E/j9r1Y//WO0K0cSyD7/y0bzyLIMI4= github.com/google/cel-go v0.23.2/go.mod h1:52Pb6QsDbC5kvgxvZhiL9QX1oZEkcUF/ZqaPx1J5Wwo= -github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw= -github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw= +github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo= +github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= @@ -545,8 +545,9 @@ github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFL github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw= github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0= @@ -595,8 +596,6 @@ github.com/opencontainers/selinux v1.12.0 h1:6n5JV4Cf+4y0KNXW48TLj5DwfXpvWlxXplU github.com/opencontainers/selinux v1.12.0/go.mod h1:BTPX+bjVbWGXw7ZZWUbdENt8w0htPSrlgOOysQaU62U= github.com/openshift-eng/openshift-tests-extension v0.0.0-20250722101414-8083129ab8f9 h1:4ZeSM80DVCb5WWB3Q/fyCI9jYXAl9bfrGnFvFONqzN4= github.com/openshift-eng/openshift-tests-extension v0.0.0-20250722101414-8083129ab8f9/go.mod h1:6gkP5f2HL0meusT0Aim8icAspcD1cG055xxBZ9yC68M= -github.com/openshift/api v0.0.0-20251013165757-fe48e8fd548b h1:X18aj8dcvmGC9T7xiHHz3B9YRT4b5KiX/snG27cj9mc= -github.com/openshift/api v0.0.0-20251013165757-fe48e8fd548b/go.mod h1:SPLf21TYPipzCO67BURkCfK6dcIIxx0oNRVWaOyRcXM= github.com/openshift/client-go v0.0.0-20250911202206-1bc0cb0da03b h1:VQpSjWE8jmsPj+EXB+XABTLmDgg9xtT8/fudB/31/aI= github.com/openshift/client-go v0.0.0-20250911202206-1bc0cb0da03b/go.mod h1:w7sV33ASK/HcuEb0Ll9qvChZdJwNwqo8GocVAnd7fVY= github.com/openshift/kubernetes v1.30.1-0.20250716113245-b94367cabf3e h1:M5BrUTglTltZjcRz5ouJBqSw0a60p760Bl520ndOGS0= @@ -660,6 +659,8 @@ github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJ github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs= github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo= github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc= +github.com/pablintino/api v0.0.0-20251030115546-80b1b5867a30 h1:rPPG1IpYgTEpslrXXdjILXR+ZT8vekpSlBMQfu6v2HU= +github.com/pablintino/api v0.0.0-20251030115546-80b1b5867a30/go.mod h1:d5uzF0YN2nQQFA0jIEWzzOZ+edmo6wzlGLvx5Fhz4uY= github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M= github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= @@ -934,8 +935,8 @@ go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= -go.yaml.in/yaml/v3 v3.0.3 h1:bXOww4E/J3f66rav3pX3m8w6jDE4knZjGOw8b5Y6iNE= -go.yaml.in/yaml/v3 v3.0.3/go.mod h1:tBHosrYAkRZjRAOREWbDnBXUf08JOwYq++0QNwQiWzI= +go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= go4.org v0.0.0-20200104003542-c7e774b10ea0 h1:M6XsnQeLwG+rHQ+/rrGh3puBI3WZEy9TBWmf2H+enQA= go4.org v0.0.0-20200104003542-c7e774b10ea0/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -1153,14 +1154,14 @@ gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.5.1 h1:4bH5o3b5ZULQ4UrBmP+63W9r7qIkqJClEA9ko5YKx+I= honnef.co/go/tools v0.5.1/go.mod h1:e9irvo83WDG9/irijV44wr3tbhcFeRnfpVlRqVwpzMs= -k8s.io/gengo/v2 v2.0.0-20250207200755-1244d31929d7 h1:2OX19X59HxDprNCVrWi6jb7LW1PoqTlYqEq5H2oetog= -k8s.io/gengo/v2 v2.0.0-20250207200755-1244d31929d7/go.mod h1:EJykeLsmFC60UQbYJezXkEsG2FLrt0GPNkU5iK5GWxU= +k8s.io/gengo/v2 v2.0.0-20250604051438-85fd79dbfd9f h1:SLb+kxmzfA87x4E4brQzB33VBbT2+x7Zq9ROIHmGn9Q= +k8s.io/gengo/v2 v2.0.0-20250604051438-85fd79dbfd9f/go.mod h1:EJykeLsmFC60UQbYJezXkEsG2FLrt0GPNkU5iK5GWxU= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kms v0.33.2 h1:GFwNXX4CZGQCg9DPOaJi1/+iKidCtB9/OIAGdzRo8FI= k8s.io/kms v0.33.2/go.mod h1:C1I8mjFFBNzfUZXYt9FZVJ8MJl7ynFbGgZFbBzkBJ3E= -k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4= -k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8= +k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOPolHyvO06MXG5TUIj2mNAA= +k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts= k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y= k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= mvdan.cc/gofumpt v0.7.0 h1:bg91ttqXmi9y2xawvkuMXyvAA/1ZGJqYAEGjXuP0JXU= @@ -1186,6 +1187,8 @@ sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= sigs.k8s.io/structured-merge-diff/v4 v4.7.0 h1:qPeWmscJcXP0snki5IYF79Z8xrl8ETFxgMd7wez1XkI= sigs.k8s.io/structured-merge-diff/v4 v4.7.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= diff --git a/pkg/controller/build/imagepruner/errors.go b/pkg/controller/build/imagepruner/errors.go index f88d452ae9..73c1ab2e4c 100644 --- a/pkg/controller/build/imagepruner/errors.go +++ b/pkg/controller/build/imagepruner/errors.go @@ -2,13 +2,13 @@ package imagepruner import ( "errors" - "fmt" "net/http" "strings" "github.com/containers/image/v5/docker" "github.com/docker/distribution/registry/api/errcode" errcodev2 "github.com/docker/distribution/registry/api/v2" + "github.com/openshift/machine-config-operator/pkg/controller/image" ) // IsTolerableDeleteErr determines if the returned error message during image deletion can be @@ -27,7 +27,7 @@ func IsTolerableDeleteErr(err error) bool { // Any errors related to the actual image registry query are wrapped in an // ErrImage instance. This allows us to easily identify intolerable errors // such as not being able to write the authfile or certs, etc. - var errImage *ErrImage + var errImage *image.ErrImage if !errors.As(err, &errImage) { return false } @@ -142,50 +142,3 @@ func isTolerableUnexpectedHTTPStatusError(err error) bool { return false } - -// ErrImage holds and wraps an error related to a specific image. -type ErrImage struct { - msg string - img string - err error -} - -// newErrImageWithMessage constructs a new ErrImage instance with a custom message, -// image pullspec, and wrapped error. -func newErrImageWithMessage(msg, img string, err error) error { - return &ErrImage{msg: msg, img: img, err: err} -} - -// newErrImage constructs a new ErrImage instance with an image pullspec and -// wrapped error, without a custom message. -func newErrImage(img string, err error) error { - return &ErrImage{img: img, err: err} -} - -// Image returns the image pullspec that caused the error. -func (e *ErrImage) Image() string { - return e.img -} - -// Error implements the error interface, providing a formatted error string -// including the message (if present), image (if present), and the wrapped error's string. -func (e *ErrImage) Error() string { - if e.msg != "" && e.img != "" { - // If both the message and image are not empty, include both. - return fmt.Sprintf("%s: image %q: %s", e.msg, e.img, e.err.Error()) - } - - if e.msg == "" && e.img != "" { - // If the message is empty and the image is not, only include the image. - return fmt.Sprintf("image %q: %s", e.img, e.err.Error()) - } - - // If neither the message nor the image is populated, just return the error - // string as-is. - return e.err.Error() -} - -// Unwrap implements the Unwrap interface, allowing the nested error to be surfaced. -func (e *ErrImage) Unwrap() error { - return e.err -} diff --git a/pkg/controller/build/imagepruner/imageinspect.go b/pkg/controller/build/imagepruner/imageinspect.go index 1f479befd5..93eeb76af7 100644 --- a/pkg/controller/build/imagepruner/imageinspect.go +++ b/pkg/controller/build/imagepruner/imageinspect.go @@ -2,15 +2,11 @@ package imagepruner import ( "context" - "fmt" - "strings" "github.com/containers/common/pkg/retry" - "github.com/containers/image/v5/docker" - "github.com/containers/image/v5/image" - "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/types" digest "github.com/opencontainers/go-digest" + "github.com/openshift/machine-config-operator/pkg/controller/image" ) const ( @@ -49,31 +45,13 @@ func NewImageInspectorDeleter() ImageInspectorDeleter { } // ImageInspect uses the provided system context to inspect the provided image pullspec. -func (i *imageInspectorImpl) ImageInspect(ctx context.Context, sysCtx *types.SystemContext, image string) (*types.ImageInspectInfo, *digest.Digest, error) { - return imageInspect(ctx, sysCtx, image) +func (i *imageInspectorImpl) ImageInspect(ctx context.Context, sysCtx *types.SystemContext, imageName string) (*types.ImageInspectInfo, *digest.Digest, error) { + return image.ImageInspect(ctx, sysCtx, imageName, &retry.RetryOptions{MaxRetry: cmdRetriesCount}) } // DeleteImage uses the provided system context to delete the provided image pullspec. -func (i *imageInspectorImpl) DeleteImage(ctx context.Context, sysCtx *types.SystemContext, image string) error { - return deleteImage(ctx, sysCtx, image) -} - -// parseImageName parses the image name string into an ImageReference, -// handling various prefix formats like "docker://" and ensuring a standard format. -func parseImageName(imgName string) (types.ImageReference, error) { - if strings.Contains(imgName, "//") && !strings.HasPrefix(imgName, "docker://") { - return nil, fmt.Errorf("unknown transport for pullspec %s", imgName) - } - - if strings.HasPrefix(imgName, "docker://") { - imgName = strings.ReplaceAll(imgName, "docker://", "//") - } - - if !strings.HasPrefix(imgName, "//") { - imgName = "//" + imgName - } - - return docker.Transport.ParseReference(imgName) +func (i *imageInspectorImpl) DeleteImage(ctx context.Context, sysCtx *types.SystemContext, imageName string) error { + return deleteImage(ctx, sysCtx, imageName) } // deleteImage attempts to delete the specified image with retries, @@ -85,7 +63,7 @@ func deleteImage(ctx context.Context, sysCtx *types.SystemContext, imageName str MaxRetry: cmdRetriesCount, } - ref, err := parseImageName(imageName) + ref, err := image.ParseImageName(imageName) if err != nil { return err } @@ -93,70 +71,7 @@ func deleteImage(ctx context.Context, sysCtx *types.SystemContext, imageName str if err := retry.IfNecessary(ctx, func() error { return ref.DeleteImage(ctx, sysCtx) }, &retryOpts); err != nil { - return newErrImage(imageName, err) + return image.NewErrImage(imageName, err) } return nil } - -// imageInspect inspects the specified image, retrieving its ImageInspectInfo and digest. -// This function has been inspired by upstream skopeo inspect. -// It includes retry logic for image source creation and manifest retrieval. -// TODO(jkyros): Revisit direct skopeo inspect usage, but direct library calls are beneficial for error context. -// -//nolint:unparam -func imageInspect(ctx context.Context, sysCtx *types.SystemContext, imageName string) (*types.ImageInspectInfo, *digest.Digest, error) { - var ( - src types.ImageSource - imgInspect *types.ImageInspectInfo - err error - ) - - retryOpts := retry.RetryOptions{ - MaxRetry: cmdRetriesCount, - } - - ref, err := parseImageName(imageName) - if err != nil { - return nil, nil, fmt.Errorf("error parsing image name %q: %w", imageName, err) - } - - // retry.IfNecessary takes into account whether the error is "retryable" - // so we don't keep looping on errors that will never resolve - if err := retry.RetryIfNecessary(ctx, func() error { - src, err = ref.NewImageSource(ctx, sysCtx) - return err - }, &retryOpts); err != nil { - return nil, nil, newErrImage(imageName, fmt.Errorf("error getting image source: %w", err)) - } - - var rawManifest []byte - unparsedInstance := image.UnparsedInstance(src, nil) - if err := retry.IfNecessary(ctx, func() error { - rawManifest, _, err = unparsedInstance.Manifest(ctx) - return err - }, &retryOpts); err != nil { - return nil, nil, fmt.Errorf("error retrieving manifest for image: %w", err) - } - - // get the digest here because it's not part of the image inspection - digest, err := manifest.Digest(rawManifest) - if err != nil { - return nil, nil, fmt.Errorf("error retrieving image digest: %q: %w", imageName, err) - } - - defer src.Close() - - img, err := image.FromUnparsedImage(ctx, sysCtx, unparsedInstance) - if err != nil { - return nil, nil, newErrImage(imageName, fmt.Errorf("error parsing manifest for image: %w", err)) - } - - if err := retry.RetryIfNecessary(ctx, func() error { - imgInspect, err = img.Inspect(ctx) - return err - }, &retryOpts); err != nil { - return nil, nil, newErrImage(imageName, err) - } - - return imgInspect, &digest, nil -} diff --git a/pkg/controller/build/imagepruner/imagepruner.go b/pkg/controller/build/imagepruner/imagepruner.go index 28c3089971..d3d64be616 100644 --- a/pkg/controller/build/imagepruner/imagepruner.go +++ b/pkg/controller/build/imagepruner/imagepruner.go @@ -3,9 +3,6 @@ package imagepruner import ( "context" "fmt" - "os" - "path/filepath" - "strings" corev1 "k8s.io/api/core/v1" "k8s.io/klog/v2" @@ -13,8 +10,7 @@ import ( "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" mcfgv1 "github.com/openshift/api/machineconfiguration/v1" - "github.com/openshift/machine-config-operator/pkg/controller/template" - "github.com/openshift/machine-config-operator/pkg/secrets" + "github.com/openshift/machine-config-operator/pkg/controller/image" ) // ImagePruner defines the interface for inspecting and deleting container images. @@ -42,13 +38,14 @@ func NewImagePruner() ImagePruner { // InspectImage inspects the given image using the provided secret. It also accepts a // ControllerConfig so that certificates may be placed on the filesystem for authentication. func (i *imagePrunerImpl) InspectImage(ctx context.Context, pullspec string, secret *corev1.Secret, cc *mcfgv1.ControllerConfig) (*types.ImageInspectInfo, *digest.Digest, error) { - sysCtx, err := i.prepareSystemContext(secret, cc) + sysCfgProvider := image.NewSysContextControllerConfigProvider(secret, cc) + sysCtx, err := sysCfgProvider.BuildSystemContext() if err != nil { return nil, nil, fmt.Errorf("could not prepare for image inspection: %w", err) } defer func() { - if err := i.cleanup(sysCtx); err != nil { + if err := sysCfgProvider.Cleanup(sysCtx); err != nil { klog.Warningf("Unable to clean up after inspection of %s: %s", pullspec, err) } }() @@ -64,13 +61,14 @@ func (i *imagePrunerImpl) InspectImage(ctx context.Context, pullspec string, sec // DeleteImage deletes the given image using the provided secret. It also accepts a // ControllerConfig so that certificates may be placed on the filesystem for authentication. func (i *imagePrunerImpl) DeleteImage(ctx context.Context, pullspec string, secret *corev1.Secret, cc *mcfgv1.ControllerConfig) error { - sysCtx, err := i.prepareSystemContext(secret, cc) + sysCfgProvider := image.NewSysContextControllerConfigProvider(secret, cc) + sysCtx, err := sysCfgProvider.BuildSystemContext() if err != nil { return fmt.Errorf("could not prepare for image deletion: %w", err) } defer func() { - if err := i.cleanup(sysCtx); err != nil { + if err := sysCfgProvider.Cleanup(sysCtx); err != nil { klog.Warningf("Unable to clean up after deletion of %s: %s", pullspec, err) } }() @@ -81,141 +79,3 @@ func (i *imagePrunerImpl) DeleteImage(ctx context.Context, pullspec string, secr return nil } - -// prepareSystemContext prepares to perform the requested operation by first creating the -// certificate directory and then writing the authfile to the appropriate path. -func (i *imagePrunerImpl) prepareSystemContext(secret *corev1.Secret, cc *mcfgv1.ControllerConfig) (*types.SystemContext, error) { - // Make a deep copy of the ControllerConfig because the write process mutates - // the ControllerConfig in-place. - certsDir, err := i.prepareCerts(cc.DeepCopy()) - if err != nil { - return nil, fmt.Errorf("could not prepare certs: %w", err) - } - - authfilePath, err := i.prepareAuthfile(secret) - if err != nil { - return nil, fmt.Errorf("could not get authfile path for secret %s: %w", secret.Name, err) - } - - return &types.SystemContext{ - AuthFilePath: authfilePath, - DockerPerHostCertDirPath: certsDir, - }, nil -} - -// cleanup cleans up after an operation by removing the temporary certificates directory -// and the temporary authfile. -func (i *imagePrunerImpl) cleanup(sysCtx *types.SystemContext) error { - if err := os.RemoveAll(sysCtx.DockerPerHostCertDirPath); err != nil && !os.IsNotExist(err) { - return fmt.Errorf("could not clean up certs directory %s: %w", sysCtx.DockerPerHostCertDirPath, err) - } - - if err := os.RemoveAll(sysCtx.AuthFilePath); err != nil && !os.IsNotExist(err) { - return fmt.Errorf("could not clean up authfile directory %s: %w", sysCtx.AuthFilePath, err) - } - - return nil -} - -// prepareCerts prepares the certificates by first creating a temporary directory for them -// and then writing the certs from the ControllerConfig to that directory. -func (i *imagePrunerImpl) prepareCerts(cc *mcfgv1.ControllerConfig) (string, error) { - certsDir, err := os.MkdirTemp("", "imagepruner-certs-dir") - if err != nil { - return "", fmt.Errorf("could not create temp dir: %w", err) - } - - if err := i.writeCerts(certsDir, cc); err != nil { - return "", fmt.Errorf("could not write certs: %w", err) - } - - return certsDir, nil -} - -// writeCerts extracts the certificates from the ControllerConfig and writes them -// to the appropriate directory, which defaults to /etc/docker/certs.d. -func (i *imagePrunerImpl) writeCerts(certsDir string, cc *mcfgv1.ControllerConfig) error { - template.UpdateControllerConfigCerts(cc) - - for _, irb := range cc.Spec.ImageRegistryBundleData { - if err := i.writeCertFromImageRegistryBundle(certsDir, irb); err != nil { - return fmt.Errorf("could not write image registry bundle from ImageRegistryBundleData: %w", err) - } - } - - for _, irb := range cc.Spec.ImageRegistryBundleUserData { - if err := i.writeCertFromImageRegistryBundle(certsDir, irb); err != nil { - return fmt.Errorf("could not write image registry bundle from ImageRegistryBundleUserData: %w", err) - } - } - - return nil -} - -// writeCertFromImageRegistryBundle writes a certificate from an image registry bundle -// to the specified certificates directory, creating necessary subdirectories. -func (i *imagePrunerImpl) writeCertFromImageRegistryBundle(certsDir string, irb mcfgv1.ImageRegistryBundle) error { - caFile := strings.ReplaceAll(irb.File, "..", ":") - - certDir := filepath.Join(certsDir, caFile) - - if err := os.MkdirAll(certDir, 0o755); err != nil { - return fmt.Errorf("could not create cert dir %q: %w", certDir, err) - } - - certFile := filepath.Join(certDir, "ca.crt") - - if err := os.WriteFile(certFile, irb.Data, 0o644); err != nil { - return fmt.Errorf("could not write cert file %q: %w", certFile, err) - } - - return nil -} - -// prepareAuthfile creates a temporary directory and writes the Docker secret -// (authfile) into a file named "authfile.json" within that directory. -// It returns the path to the created authfile. -func (i *imagePrunerImpl) prepareAuthfile(secret *corev1.Secret) (string, error) { - authfileDir, err := os.MkdirTemp("", "imagepruner-authfile") - if err != nil { - return "", fmt.Errorf("could not create temp dir for authfile: %w", err) - } - - authfilePath := filepath.Join(authfileDir, "authfile.json") - - is, err := secrets.NewImageRegistrySecret(secret) - if err != nil { - return "", fmt.Errorf("could not create an ImageRegistrySecret for '%s/%s': %w", secret.Namespace, secret.Name, err) - } - - secretBytes, err := is.JSONBytes(corev1.SecretTypeDockerConfigJson) - if err != nil { - return "", fmt.Errorf("could not normalize secret '%s/%s' to %s: %w", secret.Namespace, secret.Name, corev1.SecretTypeDockerConfigJson, err) - } - - if err := os.WriteFile(authfilePath, secretBytes, 0o644); err != nil { - return "", fmt.Errorf("could not write temp authfile %q for secret %q: %w", authfilePath, secret.Name, err) - } - - return authfilePath, nil -} - -// writeAuthfile ensures that the image registry secret is in the dockerconfigjson format -// and writes it to the specified path. -func (i *imagePrunerImpl) writeAuthfile(secret *corev1.Secret, authfilePath string) error { - is, err := secrets.NewImageRegistrySecret(secret) - if err != nil { - return fmt.Errorf("could not create an ImageRegistrySecret for '%s/%s': %w", secret.Namespace, secret.Name, err) - } - - secretBytes, err := is.JSONBytes(corev1.SecretTypeDockerConfigJson) - if err != nil { - return fmt.Errorf("could not normalize secret '%s/%s' to %s: %w", secret.Namespace, secret.Name, corev1.SecretTypeDockerConfigJson, err) - } - - if err := os.WriteFile(authfilePath, secretBytes, 0o644); err != nil { - return fmt.Errorf("could not write temp authfile %q for secret %q: %w", authfilePath, secret.Name, err) - } - - return nil -} diff --git a/pkg/controller/common/images.go b/pkg/controller/common/images.go index abd24c580a..2cbcca9c9f 100644 --- a/pkg/controller/common/images.go +++ b/pkg/controller/common/images.go @@ -28,10 +28,6 @@ type Images struct { // RenderConfigImages are image names used to render templates under ./manifests/ type RenderConfigImages struct { MachineConfigOperator string `json:"machineConfigOperator"` - // The new format image - BaseOSContainerImage string `json:"baseOSContainerImage"` - // The matching extensions container for the new format image - BaseOSExtensionsContainerImage string `json:"baseOSExtensionsContainerImage"` // These have to be named differently from the ones in ControllerConfigImages // or we get errors about ambiguous selectors because both structs are // combined in the Images struct. diff --git a/pkg/controller/image/errors.go b/pkg/controller/image/errors.go new file mode 100644 index 0000000000..a7b71998ab --- /dev/null +++ b/pkg/controller/image/errors.go @@ -0,0 +1,50 @@ +package image + +import "fmt" + +// ErrImage holds and wraps an error related to a specific image. +type ErrImage struct { + msg string + img string + err error +} + +// newErrImageWithMessage constructs a new ErrImage instance with a custom message, +// image pullspec, and wrapped error. +func NewErrImageWithMessage(msg, img string, err error) error { + return &ErrImage{msg: msg, img: img, err: err} +} + +// newErrImage constructs a new ErrImage instance with an image pullspec and +// wrapped error, without a custom message. +func NewErrImage(img string, err error) error { + return &ErrImage{img: img, err: err} +} + +// Image returns the image pullspec that caused the error. +func (e *ErrImage) Image() string { + return e.img +} + +// Error implements the error interface, providing a formatted error string +// including the message (if present), image (if present), and the wrapped error's string. +func (e *ErrImage) Error() string { + if e.msg != "" && e.img != "" { + // If both the message and image are not empty, include both. + return fmt.Sprintf("%s: image %q: %s", e.msg, e.img, e.err.Error()) + } + + if e.msg == "" && e.img != "" { + // If the message is empty and the image is not, only include the image. + return fmt.Sprintf("image %q: %s", e.img, e.err.Error()) + } + + // If neither the message nor the image is populated, just return the error + // string as-is. + return e.err.Error() +} + +// Unwrap implements the Unwrap interface, allowing the nested error to be surfaced. +func (e *ErrImage) Unwrap() error { + return e.err +} diff --git a/pkg/controller/image/image.go b/pkg/controller/image/image.go new file mode 100644 index 0000000000..21b1ca45d2 --- /dev/null +++ b/pkg/controller/image/image.go @@ -0,0 +1,107 @@ +package image + +import ( + "context" + "fmt" + "strings" + + "github.com/containers/common/pkg/retry" + "github.com/containers/image/v5/docker" + image_v5 "github.com/containers/image/v5/image" + "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/types" + "github.com/opencontainers/go-digest" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" +) + +var ( + imagesScheme = runtime.NewScheme() + imagesCodecs = serializer.NewCodecFactory(imagesScheme) +) + +func GetImageSource(ctx context.Context, sysCtx *types.SystemContext, imageName string, retryOpts *retry.RetryOptions) (types.ImageSource, error) { + ref, err := ParseImageName(imageName) + if err != nil { + return nil, fmt.Errorf("error parsing image name %q: %w", imageName, err) + } + + var src types.ImageSource + // retry.IfNecessary takes into account whether the error is "retryable" + // so we don't keep looping on errors that will never resolve + if err := retry.RetryIfNecessary(ctx, func() error { + src, err = ref.NewImageSource(ctx, sysCtx) + return err + }, retryOpts); err != nil { + return nil, NewErrImage(imageName, fmt.Errorf("error getting image source: %w", err)) + } + return src, nil +} + +// ParseImageName parses the image name string into an ImageReference, +// handling various prefix formats like "docker://" and ensuring a standard format. +func ParseImageName(imgName string) (types.ImageReference, error) { + if strings.Contains(imgName, "//") && !strings.HasPrefix(imgName, "docker://") { + return nil, fmt.Errorf("unknown transport for pullspec %s", imgName) + } + + if strings.HasPrefix(imgName, "docker://") { + imgName = strings.ReplaceAll(imgName, "docker://", "//") + } + + if !strings.HasPrefix(imgName, "//") { + imgName = "//" + imgName + } + + return docker.Transport.ParseReference(imgName) +} + +// imageInspect inspects the specified image, retrieving its ImageInspectInfo and digest. +// This function has been inspired by upstream skopeo inspect. +// It includes retry logic for image source creation and manifest retrieval. +// TODO(jkyros): Revisit direct skopeo inspect usage, but direct library calls are beneficial for error context. +// +//nolint:unparam +func ImageInspect(ctx context.Context, sysCtx *types.SystemContext, imageName string, retryOpts *retry.RetryOptions) (*types.ImageInspectInfo, *digest.Digest, error) { + var ( + imgInspect *types.ImageInspectInfo + err error + ) + + src, err := GetImageSource(ctx, sysCtx, imageName, retryOpts) + if err != nil { + return nil, nil, err + } + defer src.Close() + + var rawManifest []byte + unparsedInstance := image_v5.UnparsedInstance(src, nil) + if err := retry.IfNecessary(ctx, func() error { + rawManifest, _, err = unparsedInstance.Manifest(ctx) + return err + }, retryOpts); err != nil { + return nil, nil, fmt.Errorf("error retrieving manifest for image: %w", err) + } + + // get the digest here because it's not part of the image inspection + digest, err := manifest.Digest(rawManifest) + if err != nil { + return nil, nil, fmt.Errorf("error retrieving image digest: %q: %w", imageName, err) + } + + defer src.Close() + + img, err := image_v5.FromUnparsedImage(ctx, sysCtx, unparsedInstance) + if err != nil { + return nil, nil, NewErrImage(imageName, fmt.Errorf("error parsing manifest for image: %w", err)) + } + + if err := retry.RetryIfNecessary(ctx, func() error { + imgInspect, err = img.Inspect(ctx) + return err + }, retryOpts); err != nil { + return nil, nil, NewErrImage(imageName, err) + } + + return imgInspect, &digest, nil +} diff --git a/pkg/controller/image/streams.go b/pkg/controller/image/streams.go new file mode 100644 index 0000000000..c2b31e16e5 --- /dev/null +++ b/pkg/controller/image/streams.go @@ -0,0 +1,26 @@ +package image + +import ( + "fmt" + + imagev1 "github.com/openshift/api/image/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +func ReadImageStreamV1O(objBytes []byte) (*imagev1.ImageStream, error) { + requiredObj, err := runtime.Decode(imagesCodecs.UniversalDecoder(imagev1.SchemeGroupVersion), objBytes) + if err != nil { + return nil, err + } + stream, ok := requiredObj.(*imagev1.ImageStream) + if !ok { + return nil, fmt.Errorf("expected ImageStream, got %#v", requiredObj) + } + return stream, nil +} + +func init() { + if err := imagev1.AddToScheme(imagesScheme); err != nil { + panic(err) + } +} diff --git a/pkg/controller/image/sys_context.go b/pkg/controller/image/sys_context.go new file mode 100644 index 0000000000..a9e5bbc9b3 --- /dev/null +++ b/pkg/controller/image/sys_context.go @@ -0,0 +1,171 @@ +package image + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/containers/image/v5/types" + mcfgv1 "github.com/openshift/api/machineconfiguration/v1" + "github.com/openshift/machine-config-operator/pkg/controller/template" + "github.com/openshift/machine-config-operator/pkg/secrets" + corev1 "k8s.io/api/core/v1" +) + +type SysContextProvider interface { + BuildSystemContext() (*types.SystemContext, error) +} + +type SysContextControllerConfigProvider struct { + sysCtx *types.SystemContext + secret *corev1.Secret + cc *mcfgv1.ControllerConfig +} + +func NewSysContextControllerConfigProvider(secret *corev1.Secret, cc *mcfgv1.ControllerConfig) *SysContextControllerConfigProvider { + return &SysContextControllerConfigProvider{ + secret: secret, + cc: cc, + } +} + +// prepareSystemContext prepares to perform the requested operation by first creating the +// certificate directory and then writing the authfile to the appropriate path. +func (i *SysContextControllerConfigProvider) BuildSystemContext() (*types.SystemContext, error) { + // Make a deep copy of the ControllerConfig because the write process mutates + // the ControllerConfig in-place. + certsDir, err := i.prepareCerts() + if err != nil { + return nil, fmt.Errorf("could not prepare certs: %w", err) + } + + authfilePath, err := i.prepareAuthfile() + if err != nil { + return nil, fmt.Errorf("could not get authfile path for secret %s: %w", i.secret.Name, err) + } + + i.sysCtx = &types.SystemContext{ + AuthFilePath: authfilePath, + DockerPerHostCertDirPath: certsDir, + } + return i.sysCtx, nil +} + +// cleanup cleans up after an operation by removing the temporary certificates directory +// and the temporary authfile. +func (i *SysContextControllerConfigProvider) Cleanup(sysCtx *types.SystemContext) error { + if err := os.RemoveAll(sysCtx.DockerPerHostCertDirPath); err != nil && !os.IsNotExist(err) { + return fmt.Errorf("could not clean up certs directory %s: %w", sysCtx.DockerPerHostCertDirPath, err) + } + + if err := os.RemoveAll(sysCtx.AuthFilePath); err != nil && !os.IsNotExist(err) { + return fmt.Errorf("could not clean up authfile directory %s: %w", sysCtx.AuthFilePath, err) + } + + return nil +} + +// prepareCerts prepares the certificates by first creating a temporary directory for them +// and then writing the certs from the ControllerConfig to that directory. +func (i *SysContextControllerConfigProvider) prepareCerts() (string, error) { + certsDir, err := os.MkdirTemp("", "imagepruner-certs-dir") + if err != nil { + return "", fmt.Errorf("could not create temp dir: %w", err) + } + + if err := i.writeCerts(certsDir); err != nil { + return "", fmt.Errorf("could not write certs: %w", err) + } + + return certsDir, nil +} + +// writeCerts extracts the certificates from the ControllerConfig and writes them +// to the appropriate directory, which defaults to /etc/docker/certs.d. +func (i *SysContextControllerConfigProvider) writeCerts(certsDir string) error { + cc := i.cc.DeepCopy() + template.UpdateControllerConfigCerts(cc) + + for _, irb := range cc.Spec.ImageRegistryBundleData { + if err := i.writeCertFromImageRegistryBundle(certsDir, irb); err != nil { + return fmt.Errorf("could not write image registry bundle from ImageRegistryBundleData: %w", err) + } + } + + for _, irb := range cc.Spec.ImageRegistryBundleUserData { + if err := i.writeCertFromImageRegistryBundle(certsDir, irb); err != nil { + return fmt.Errorf("could not write image registry bundle from ImageRegistryBundleUserData: %w", err) + } + } + + return nil +} + +// writeCertFromImageRegistryBundle writes a certificate from an image registry bundle +// to the specified certificates directory, creating necessary subdirectories. +func (i *SysContextControllerConfigProvider) writeCertFromImageRegistryBundle(certsDir string, irb mcfgv1.ImageRegistryBundle) error { + caFile := strings.ReplaceAll(irb.File, "..", ":") + + certDir := filepath.Join(certsDir, caFile) + + if err := os.MkdirAll(certDir, 0o755); err != nil { + return fmt.Errorf("could not create cert dir %q: %w", certDir, err) + } + + certFile := filepath.Join(certDir, "ca.crt") + + if err := os.WriteFile(certFile, irb.Data, 0o644); err != nil { + return fmt.Errorf("could not write cert file %q: %w", certFile, err) + } + + return nil +} + +// prepareAuthfile creates a temporary directory and writes the Docker secret +// (authfile) into a file named "authfile.json" within that directory. +// It returns the path to the created authfile. +func (i *SysContextControllerConfigProvider) prepareAuthfile() (string, error) { + authfileDir, err := os.MkdirTemp("", "imagepruner-authfile") + if err != nil { + return "", fmt.Errorf("could not create temp dir for authfile: %w", err) + } + + authfilePath := filepath.Join(authfileDir, "authfile.json") + + is, err := secrets.NewImageRegistrySecret(i.secret) + if err != nil { + return "", fmt.Errorf("could not create an ImageRegistrySecret for '%s/%s': %w", i.secret.Namespace, i.secret.Name, err) + } + + secretBytes, err := is.JSONBytes(corev1.SecretTypeDockerConfigJson) + if err != nil { + return "", fmt.Errorf("could not normalize secret '%s/%s' to %s: %w", i.secret.Namespace, i.secret.Name, corev1.SecretTypeDockerConfigJson, err) + } + + if err := os.WriteFile(authfilePath, secretBytes, 0o644); err != nil { + return "", fmt.Errorf("could not write temp authfile %q for secret %q: %w", authfilePath, i.secret.Name, err) + } + + return authfilePath, nil +} + +// writeAuthfile ensures that the image registry secret is in the dockerconfigjson format +// and writes it to the specified path. +func (i *SysContextControllerConfigProvider) writeAuthfile(secret *corev1.Secret, authfilePath string) error { + is, err := secrets.NewImageRegistrySecret(secret) + if err != nil { + return fmt.Errorf("could not create an ImageRegistrySecret for '%s/%s': %w", secret.Namespace, secret.Name, err) + } + + secretBytes, err := is.JSONBytes(corev1.SecretTypeDockerConfigJson) + if err != nil { + return fmt.Errorf("could not normalize secret '%s/%s' to %s: %w", secret.Namespace, secret.Name, corev1.SecretTypeDockerConfigJson, err) + } + + if err := os.WriteFile(authfilePath, secretBytes, 0o644); err != nil { + return fmt.Errorf("could not write temp authfile %q for secret %q: %w", authfilePath, secret.Name, err) + } + + return nil +} diff --git a/pkg/controller/osimagestream/cli_osimages.go b/pkg/controller/osimagestream/cli_osimages.go new file mode 100644 index 0000000000..211e2601ea --- /dev/null +++ b/pkg/controller/osimagestream/cli_osimages.go @@ -0,0 +1,29 @@ +package osimagestream + +import ( + "context" + + "github.com/openshift/api/machineconfiguration/v1alpha1" +) + +type CliOSImageStreamParser struct { + osImageUrl string + osExtensionsImageUrl string +} + +func NewCliOSImageStreamParser(osImageUrl string, osExtensionsImageUrl string) *CliOSImageStreamParser { + return &CliOSImageStreamParser{ + osImageUrl: osImageUrl, + osExtensionsImageUrl: osExtensionsImageUrl, + } +} + +func (c *CliOSImageStreamParser) FetchStreams(_ context.Context) ([]*v1alpha1.OSImageStreamURLSet, error) { + return []*v1alpha1.OSImageStreamURLSet{ + { + Name: GetDefaultStreamName(), + OSExtensionsImageUrl: c.osExtensionsImageUrl, + OSImageUrl: c.osImageUrl, + }, + }, nil +} diff --git a/pkg/controller/osimagestream/configmap_osimages.go b/pkg/controller/osimagestream/configmap_osimages.go new file mode 100644 index 0000000000..a6dc697906 --- /dev/null +++ b/pkg/controller/osimagestream/configmap_osimages.go @@ -0,0 +1,57 @@ +package osimagestream + +import ( + "context" + "fmt" + + "github.com/openshift/api/machineconfiguration/v1alpha1" + "github.com/openshift/machine-config-operator/pkg/controller/common" + corev1 "k8s.io/api/core/v1" + corelisterv1 "k8s.io/client-go/listers/core/v1" +) + +type ConfigMapSource interface { + GetOSImagesConfigMap(ctx context.Context) (*corev1.ConfigMap, error) +} + +type OSImageURLConfigMapEtcdSource struct { + cmLister corelisterv1.ConfigMapLister +} + +func NewOSImageURLConfigMapEtcdSource(cmLister corelisterv1.ConfigMapLister) *OSImageURLConfigMapEtcdSource { + return &OSImageURLConfigMapEtcdSource{cmLister: cmLister} +} + +func (s *OSImageURLConfigMapEtcdSource) GetOSImagesConfigMap(ctx context.Context) (*corev1.ConfigMap, error) { + cm, err := s.cmLister.ConfigMaps(common.MCONamespace).Get(common.MachineConfigOSImageURLConfigMapName) + if err != nil { + return nil, fmt.Errorf("could not get ConfigMap %s: %w", common.MachineConfigOSImageURLConfigMapName, err) + } + return cm, nil +} + +type ConfigMapParser struct { + source ConfigMapSource +} + +func NewConfigMapParser(source ConfigMapSource) *ConfigMapParser { + return &ConfigMapParser{source: source} +} + +func (e *ConfigMapParser) FetchStreams(ctx context.Context) ([]*v1alpha1.OSImageStreamURLSet, error) { + configMap, err := e.source.GetOSImagesConfigMap(ctx) + if err != nil { + return nil, fmt.Errorf("failed to fetch OS Image URLs ConfigMap: %w", err) + } + osUrls, err := common.ParseOSImageURLConfigMap(configMap) + if err != nil { + return nil, fmt.Errorf("failed to parse OS Image URLs ConfigMap: %w", err) + } + return []*v1alpha1.OSImageStreamURLSet{ + { + Name: GetDefaultStreamName(), + OSExtensionsImageUrl: osUrls.BaseOSExtensionsContainerImage, + OSImageUrl: osUrls.BaseOSContainerImage, + }, + }, nil +} diff --git a/pkg/controller/osimagestream/imagestream_osimages.go b/pkg/controller/osimagestream/imagestream_osimages.go new file mode 100644 index 0000000000..4600bbcc15 --- /dev/null +++ b/pkg/controller/osimagestream/imagestream_osimages.go @@ -0,0 +1,180 @@ +package osimagestream + +import ( + "context" + "maps" + "regexp" + "slices" + "strings" + + "github.com/containers/image/v5/types" + imagev1 "github.com/openshift/api/image/v1" + "github.com/openshift/api/machineconfiguration/v1alpha1" + "github.com/openshift/machine-config-operator/pkg/controller/build/imagepruner" +) + +type OSImageUrlType string + +const ( + coreOSLabelStream = "io.coreos.oscontainerimage.osstream" + coreOSExtensionsLabelStream = "io.coreos.osextensionscontainerimage.osstream" + OSImageUrlTypeOS = OSImageUrlType("OS") + OSImageUrlTypeExtensions = OSImageUrlType("Extensions") +) + +var ( + imageTagRegxpr = regexp.MustCompile(`^(rhel[\w.+-]*|stream)-coreos[\w.+-]*(-extensions[\w.+-]*)?$`) +) + +type OSContainerLabelMetadata struct { + ImageUrl string + ImageType OSImageUrlType + Stream string +} + +func NewOSContainerLabelMetadataFromLabels(image string, labels map[string]string) *OSContainerLabelMetadata { + if osContainerStream, ok := labels[coreOSLabelStream]; ok && osContainerStream != "" { + return &OSContainerLabelMetadata{ + ImageUrl: image, + ImageType: OSImageUrlTypeOS, + Stream: osContainerStream, + } + } + if osExtensionsContainerStream, ok := labels[coreOSExtensionsLabelStream]; ok && osExtensionsContainerStream != "" { + return &OSContainerLabelMetadata{ + ImageUrl: image, + ImageType: OSImageUrlTypeExtensions, + Stream: osExtensionsContainerStream, + } + } + return nil +} + +func FetchImagesLabels(ctx context.Context, sys *types.SystemContext, images []string) []OSContainerLabelMetadata { + imageLabels := make([]OSContainerLabelMetadata, 0) + if len(images) == 0 { + return imageLabels + } + + inspector := imagepruner.NewImageInspectorDeleter() + + type labelsInspectionResult struct { + imageLabels map[string]string + url string + err error + } + results := make(chan labelsInspectionResult, len(images)) + rateLimiterChannel := make(chan struct{}, 5) + for _, image := range images { + go func(img string) { + select { + case rateLimiterChannel <- struct{}{}: + defer func() { <-rateLimiterChannel }() + + info, _, err := inspector.ImageInspect(ctx, sys, img) + if err != nil { + results <- labelsInspectionResult{err: err} + return + } + results <- labelsInspectionResult{imageLabels: info.Labels, url: img} + case <-ctx.Done(): + results <- labelsInspectionResult{err: ctx.Err()} + } + }(image) + } + + for range images { + res := <-results + if res.err != nil { + // Best effort, do not return! Just discard the stream + // TODO Log + continue + } + containerLabelMetadata := NewOSContainerLabelMetadataFromLabels(res.url, res.imageLabels) + if containerLabelMetadata != nil { + // TODO Debug log the nil case + imageLabels = append(imageLabels, *containerLabelMetadata) + } + } + return imageLabels +} + +func FilterImageStreamImages(is *imagev1.ImageStream) []string { + imagesToParse := make([]string, 0) + for _, tag := range is.Spec.Tags { + if tag.From == nil || tag.From.Kind != "DockerImage" { + continue + } + if tag.Annotations != nil { + if source, ok := tag.Annotations["io.openshift.build.source-location"]; ok && strings.Contains(source, "github.com/openshift/os") { + imagesToParse = append(imagesToParse, tag.From.Name) + continue + } + } + if imageTagRegxpr.MatchString(tag.Name) { + imagesToParse = append(imagesToParse, tag.From.Name) + } + } + return imagesToParse +} + +func GroupOSContainerLabelMetadataToStream(labelMetadatas []OSContainerLabelMetadata) []*v1alpha1.OSImageStreamURLSet { + streamMaps := make(map[string]*v1alpha1.OSImageStreamURLSet, 0) + for _, labelMetadata := range labelMetadatas { + streamUrlSet, exists := streamMaps[labelMetadata.Stream] + if !exists { + streamMaps[labelMetadata.Stream] = NewOSImageStreamURLSetFromLabelMetadata(&labelMetadata) + continue + } + + // The stream already exits. Maybe it has not both urls yet + if labelMetadata.ImageType == OSImageUrlTypeOS { + if streamUrlSet.OSImageUrl != "" && streamUrlSet.OSImageUrl != labelMetadata.ImageUrl { + // Looks like we have a conflict. Log it and override // todo + } + streamUrlSet.OSImageUrl = labelMetadata.ImageUrl + } else { + if streamUrlSet.OSExtensionsImageUrl != "" && streamUrlSet.OSExtensionsImageUrl != labelMetadata.ImageUrl { + // Looks like we have a conflict. Log it and override // todo + } + streamUrlSet.OSExtensionsImageUrl = labelMetadata.ImageUrl + } + } + return slices.Collect(maps.Values(streamMaps)) +} + +func NewOSImageStreamURLSetFromLabelMetadata(metadata *OSContainerLabelMetadata) *v1alpha1.OSImageStreamURLSet { + urlSet := &v1alpha1.OSImageStreamURLSet{ + Name: metadata.Stream, + } + if metadata.ImageType == OSImageUrlTypeOS { + urlSet.OSImageUrl = metadata.ImageUrl + } else { + urlSet.OSExtensionsImageUrl = metadata.ImageUrl + } + return urlSet +} + +type OSImageStreamParser struct { + imageStreamProvider ImageStreamProvider + sysCtx *types.SystemContext +} + +func NewOSImageStreamParser(sysCtx *types.SystemContext, imageStreamProvider ImageStreamProvider) *OSImageStreamParser { + return &OSImageStreamParser{sysCtx: sysCtx, imageStreamProvider: imageStreamProvider} +} + +func (r *OSImageStreamParser) FetchStreams(ctx context.Context) ([]*v1alpha1.OSImageStreamURLSet, error) { + imageStream, err := r.imageStreamProvider.ReadImageStream(ctx) + if err != nil { + return nil, err + } + + // Filter out the tags to get only the one we consider + // related to OS/Extensions + osImagesDigests := FilterImageStreamImages(imageStream) + + // Get the labels of each OS image + osContainerMetadatas := FetchImagesLabels(ctx, r.sysCtx, osImagesDigests) + return GroupOSContainerLabelMetadataToStream(osContainerMetadatas), nil +} diff --git a/pkg/controller/osimagestream/imagestream_provider.go b/pkg/controller/osimagestream/imagestream_provider.go new file mode 100644 index 0000000000..5fab811867 --- /dev/null +++ b/pkg/controller/osimagestream/imagestream_provider.go @@ -0,0 +1,123 @@ +package osimagestream + +import ( + "archive/tar" + "compress/gzip" + "context" + "fmt" + "io" + "slices" + "strings" + + "github.com/containers/common/pkg/retry" + "github.com/containers/image/v5/pkg/blobinfocache/none" + "github.com/containers/image/v5/types" + imagev1 "github.com/openshift/api/image/v1" + "github.com/openshift/machine-config-operator/pkg/controller/image" +) + +const releaseImageStreamLocation = "/release-manifests/image-references" + +type ImageStreamProvider interface { + ReadImageStream(ctx context.Context) (*imagev1.ImageStream, error) +} + +type ImageStreamProviderResource struct { + imageStream *imagev1.ImageStream +} + +func NewImageStreamProviderResource(imageStream *imagev1.ImageStream) *ImageStreamProviderResource { + return &ImageStreamProviderResource{imageStream: imageStream} +} + +func (i *ImageStreamProviderResource) ReadImageStream(_ context.Context) (*imagev1.ImageStream, error) { + return i.imageStream, nil +} + +type ImageStreamProviderNetwork struct { + sysCtx *types.SystemContext + imageName string +} + +func NewImageStreamProviderNetwork(sysCtx *types.SystemContext, imageName string) *ImageStreamProviderNetwork { + return &ImageStreamProviderNetwork{sysCtx: sysCtx, imageName: imageName} +} + +func (i *ImageStreamProviderNetwork) ReadImageStream(ctx context.Context) (*imagev1.ImageStream, error) { + imageStreamBytes, err := fetchImageReferences(ctx, i.sysCtx, i.imageName) + if err != nil { + return nil, err + } + + imageStream, err := image.ReadImageStreamV1O(imageStreamBytes) + if err != nil { + return nil, fmt.Errorf("error reading image stream from %s image: %v", i.imageName, err) + } + return imageStream, nil +} + +func fetchImageReferences(ctx context.Context, sysCtx *types.SystemContext, imageName string) ([]byte, error) { + ref, err := image.ParseImageName(imageName) + if err != nil { + return nil, err + } + img, err := ref.NewImage(ctx, sysCtx) + if err != nil { + return nil, err + } + defer img.Close() + + src, err := image.GetImageSource(ctx, sysCtx, imageName, &retry.Options{MaxRetry: 2}) + if err != nil { + return nil, err + } + defer src.Close() + + layerInfos := img.LayerInfos() + + // Start searching from the back + slices.Reverse(layerInfos) + for _, info := range layerInfos { + content, err := searchLayerForFile(ctx, src, info, releaseImageStreamLocation) + if err != nil { + return nil, err + } + if content != nil { + return content, nil + } + } + return nil, fmt.Errorf("%s file not found in %s image", releaseImageStreamLocation, imageName) +} + +func searchLayerForFile(ctx context.Context, imgSrc types.ImageSource, blobInfo types.BlobInfo, targetFile string) ([]byte, error) { + layerStream, _, err := imgSrc.GetBlob(ctx, blobInfo, none.NoCache) + if err != nil { + return nil, err + } + defer layerStream.Close() + + gzr, err := gzip.NewReader(layerStream) + if err != nil { + return nil, err + } + defer gzr.Close() + + // Search tar + tr := tar.NewReader(gzr) + for { + header, err := tr.Next() + if err == io.EOF { + break + } + if err != nil { + return nil, err + } + + // Normalize and match + if header.Name == targetFile || header.Name == "."+targetFile || "./"+header.Name == targetFile || strings.TrimLeft(targetFile, "/") == header.Name { + content, err := io.ReadAll(tr) + return content, err + } + } + return nil, nil +} diff --git a/pkg/controller/osimagestream/osimagestream.go b/pkg/controller/osimagestream/osimagestream.go new file mode 100644 index 0000000000..549679b5b6 --- /dev/null +++ b/pkg/controller/osimagestream/osimagestream.go @@ -0,0 +1,157 @@ +package osimagestream + +import ( + "context" + "fmt" + "maps" + "slices" + + imagev1 "github.com/openshift/api/image/v1" + v1 "github.com/openshift/api/machineconfiguration/v1" + "github.com/openshift/api/machineconfiguration/v1alpha1" + "github.com/openshift/machine-config-operator/pkg/controller/image" + "github.com/openshift/machine-config-operator/pkg/version" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corelisterv1 "k8s.io/client-go/listers/core/v1" + "k8s.io/klog/v2" +) + +type StreamSource interface { + FetchStreams(ctx context.Context) ([]*v1alpha1.OSImageStreamURLSet, error) +} + +func BuildOsImageStreamBootstrap( + ctx context.Context, + secret *corev1.Secret, + temporalCC *v1.ControllerConfig, + imageStream *imagev1.ImageStream, + cliParser *CliOSImageStreamParser, +) (*v1alpha1.OSImageStream, error) { + var sources []StreamSource + if cliParser != nil { + sources = append(sources, cliParser) + } + if imageStream != nil { + sysCfgProvider := image.NewSysContextControllerConfigProvider(secret, temporalCC) + sysCtx, err := sysCfgProvider.BuildSystemContext() + if err != nil { + return nil, fmt.Errorf("could not prepare for image inspection: %w", err) + } + + defer func() { + if err := sysCfgProvider.Cleanup(sysCtx); err != nil { + klog.Warningf("Unable to clean resources after OSImageStream inspection: %s", err) + } + }() + sources = append(sources, NewOSImageStreamParser(sysCtx, NewImageStreamProviderResource(imageStream))) + } + return BuildOSImageStreamFromSources(ctx, sources) +} + +func BuildOsImageStreamRuntime( + ctx context.Context, + secret *corev1.Secret, + controllerConfig *v1.ControllerConfig, + cmLister corelisterv1.ConfigMapLister, + releaseImage string, +) (*v1alpha1.OSImageStream, error) { + + sysCfgProvider := image.NewSysContextControllerConfigProvider(secret, controllerConfig) + sysCtx, err := sysCfgProvider.BuildSystemContext() + if err != nil { + return nil, fmt.Errorf("could not prepare for image inspection: %w", err) + } + + defer func() { + if err := sysCfgProvider.Cleanup(sysCtx); err != nil { + klog.Warningf("Unable to clean resources after OSImageStream inspection: %s", err) + } + }() + + // TODO @pablintino + // To avoid breaking changes we firstly load the streams from the configmap + // After that's done we fetch the ImageStream of the current version (likely this code is code at the first + // boot of the controller after an MCO update) and get the available images from that ImageStream + return BuildOSImageStreamFromSources(ctx, + []StreamSource{ + NewConfigMapParser(NewOSImageURLConfigMapEtcdSource(cmLister)), + NewOSImageStreamParser(sysCtx, NewImageStreamProviderNetwork(sysCtx, releaseImage)), + }) +} + +func BuildOSImageStreamFromSources(ctx context.Context, sources []StreamSource) (*v1alpha1.OSImageStream, error) { + streams := collect(ctx, sources) + if len(streams) == 0 { + return nil, fmt.Errorf("could not find any OS stream") + } + + defaultStream := getDefaultStream(streams) + if defaultStream == nil { + return nil, fmt.Errorf("could not find default stream %s in the list of OSImageStreams", GetDefaultStreamName()) + } + return &v1alpha1.OSImageStream{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster", + }, + Spec: &v1alpha1.OSImageStreamSpec{}, + Status: &v1alpha1.OSImageStreamStatus{ + DefaultStream: defaultStream.Name, + AvailableStreams: streams, + }, + }, nil +} + +func getDefaultStream(streams []v1alpha1.OSImageStreamURLSet) *v1alpha1.OSImageStreamURLSet { + defaultStream := GetDefaultStreamName() + for _, stream := range streams { + if stream.Name == defaultStream { + return &stream + } + } + return nil +} + +func GetDefaultOSImageStream(osImageStream *v1alpha1.OSImageStream) v1alpha1.OSImageStreamURLSet { + // OSImageStream warranties that the default stream is always available + if osImageStream == nil || osImageStream.Status == nil { + return v1alpha1.OSImageStreamURLSet{} + } + defaultStream := getDefaultStream(osImageStream.Status.AvailableStreams) + if defaultStream == nil { + return v1alpha1.OSImageStreamURLSet{} + } + return *defaultStream +} + +func collect(ctx context.Context, sources []StreamSource) []v1alpha1.OSImageStreamURLSet { + result := make(map[string]v1alpha1.OSImageStreamURLSet) + for _, source := range sources { + streams, err := source.FetchStreams(ctx) + if err != nil { + // Do not return: Soft failure, best effort + // todo: log + } + + for _, stream := range streams { + _, exists := result[stream.Name] + if exists { + // Conflict: TODO DEBUG log and override + // This is expected. For example, the cli args + // may define the base stream, but an imagestream is + // passed and it overrides the values + } + result[stream.Name] = *stream + } + } + return slices.Collect(maps.Values(result)) +} + +func GetDefaultStreamName() string { + if version.IsFCOS() { + return "fedora-coreos" + } else if version.IsSCOS() { + return "stream-coreos" + } + return "rhel-coreos" +} diff --git a/pkg/operator/bootstrap.go b/pkg/operator/bootstrap.go index 5ec86ce5b7..26f149184d 100644 --- a/pkg/operator/bootstrap.go +++ b/pkg/operator/bootstrap.go @@ -1,11 +1,16 @@ package operator import ( + "context" "fmt" "os" "path/filepath" "strings" + imagev1 "github.com/openshift/api/image/v1" + mcfgv1 "github.com/openshift/api/machineconfiguration/v1" + "github.com/openshift/library-go/pkg/operator/resource/resourceread" + "github.com/openshift/machine-config-operator/pkg/controller/osimagestream" "k8s.io/klog/v2" configv1 "github.com/openshift/api/config/v1" @@ -34,6 +39,8 @@ func RenderBootstrap( mcsCAFile, kubeAPIServerServingCA, pullSecretFile string, imgs *ctrlcommon.Images, destinationDir, releaseImage string, + imageStream *imagev1.ImageStream, + cliOSImageStream *osimagestream.CliOSImageStreamParser, ) error { filesData := map[string][]byte{} files := []string{ @@ -137,10 +144,22 @@ func RenderBootstrap( spec.CloudProviderCAData = data } + // TODO @pablintino Example of how Streams are integrated + // at bootstrap time + pullSecretSecret := resourceread.ReadSecretV1OrDie(filesData[pullSecretFile]) + osimageStream, err := osimagestream.BuildOsImageStreamBootstrap( + context.Background(), + pullSecretSecret, + &mcfgv1.ControllerConfig{Spec: *spec}, imageStream, cliOSImageStream) + if err != nil { + return err + } + spec.RootCAData = bundle spec.PullSecret = nil - spec.BaseOSContainerImage = imgs.BaseOSContainerImage - spec.BaseOSExtensionsContainerImage = imgs.BaseOSExtensionsContainerImage + defaultStream := osimagestream.GetDefaultOSImageStream(osimageStream) + spec.BaseOSContainerImage = defaultStream.OSImageUrl + spec.BaseOSExtensionsContainerImage = defaultStream.OSExtensionsImageUrl spec.ReleaseImage = releaseImage spec.Images = map[string]string{ templatectrl.MachineConfigOperatorKey: imgs.MachineConfigOperator, diff --git a/pkg/operator/operator.go b/pkg/operator/operator.go index 5b5e31f7a0..9027215c81 100644 --- a/pkg/operator/operator.go +++ b/pkg/operator/operator.go @@ -505,6 +505,7 @@ func (optr *Operator) sync(key string) error { syncFuncs := []syncFunc{ // "RenderConfig" must always run first as it sets the renderConfig in the operator // for the sync funcs below + {"OSImageStream", optr.syncOSImageStreams}, {"RenderConfig", optr.syncRenderConfig}, {"MachineConfiguration", optr.syncMachineConfiguration}, {"MachineConfigNode", optr.syncMachineConfigNodes}, diff --git a/pkg/operator/sync.go b/pkg/operator/sync.go index 1cf5309f55..a480ef55ce 100644 --- a/pkg/operator/sync.go +++ b/pkg/operator/sync.go @@ -288,6 +288,19 @@ func (optr *Operator) syncCloudConfig(spec *mcfgv1.ControllerConfigSpec, infra * return nil } +func (optr *Operator) syncOSImageStreams(_ *renderConfig, _ *configv1.ClusterOperator) error { + // TODO @pablintino Perform here a check to see if the OSImageStream resource exist. + // If it doesn't exist or it's version annotation indicates that it was generated by + // a different version just create it. + // Important: This process can be places into a sync.Once and performed only once + // during the lifetime of the container as the content won't change unless the CVO updates us. + // if renderedMC.Annotations[ctrlcommon.GeneratedByControllerVersionAnnotationKey] != version.Hash { + // klog.V(4).Infof("rendered MC commit hash %s mismatch with operator release commit hash %s", renderedMC.Annotations[ctrlcommon.GeneratedByControllerVersionAnnotationKey], version.Hash) + // return nil + // } + return nil +} + //nolint:gocyclo func (optr *Operator) syncRenderConfig(_ *renderConfig, _ *configv1.ClusterOperator) error { if optr.inClusterBringup { @@ -557,6 +570,10 @@ func (optr *Operator) syncRenderConfig(_ *renderConfig, _ *configv1.ClusterOpera // sync up os image url // TODO: this should probably be part of the imgs + // TODO: @pablintino Replace this logic with a call to get the default stream + // IMPORTANT: For backguard compatibility purposes, the ControllerConfig fields + // will still be populated but we will fill them with the default stream. + // IMPORTANT: Pools should never use these values and should always look the OSImageStream resource oscontainer, osextensionscontainer, err := optr.getOsImageURLs(optr.namespace) if err != nil { return err diff --git a/vendor/github.com/google/gnostic-models/compiler/context.go b/vendor/github.com/google/gnostic-models/compiler/context.go index 1bfe961219..26b31e51e3 100644 --- a/vendor/github.com/google/gnostic-models/compiler/context.go +++ b/vendor/github.com/google/gnostic-models/compiler/context.go @@ -15,7 +15,7 @@ package compiler import ( - yaml "gopkg.in/yaml.v3" + yaml "go.yaml.in/yaml/v3" ) // Context contains state of the compiler as it traverses a document. diff --git a/vendor/github.com/google/gnostic-models/compiler/extensions.go b/vendor/github.com/google/gnostic-models/compiler/extensions.go index 16ae66faa3..efa07f2a90 100644 --- a/vendor/github.com/google/gnostic-models/compiler/extensions.go +++ b/vendor/github.com/google/gnostic-models/compiler/extensions.go @@ -20,9 +20,9 @@ import ( "os/exec" "strings" + yaml "go.yaml.in/yaml/v3" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/types/known/anypb" - yaml "gopkg.in/yaml.v3" extensions "github.com/google/gnostic-models/extensions" ) diff --git a/vendor/github.com/google/gnostic-models/compiler/helpers.go b/vendor/github.com/google/gnostic-models/compiler/helpers.go index 975d65e8f8..a83261eb6c 100644 --- a/vendor/github.com/google/gnostic-models/compiler/helpers.go +++ b/vendor/github.com/google/gnostic-models/compiler/helpers.go @@ -20,7 +20,7 @@ import ( "sort" "strconv" - "gopkg.in/yaml.v3" + yaml "go.yaml.in/yaml/v3" "github.com/google/gnostic-models/jsonschema" ) diff --git a/vendor/github.com/google/gnostic-models/compiler/reader.go b/vendor/github.com/google/gnostic-models/compiler/reader.go index be0e8b40c8..da409d6b36 100644 --- a/vendor/github.com/google/gnostic-models/compiler/reader.go +++ b/vendor/github.com/google/gnostic-models/compiler/reader.go @@ -24,7 +24,7 @@ import ( "strings" "sync" - yaml "gopkg.in/yaml.v3" + yaml "go.yaml.in/yaml/v3" ) var verboseReader = false diff --git a/vendor/github.com/google/gnostic-models/jsonschema/models.go b/vendor/github.com/google/gnostic-models/jsonschema/models.go index 4781bdc5f5..a42b8e0035 100644 --- a/vendor/github.com/google/gnostic-models/jsonschema/models.go +++ b/vendor/github.com/google/gnostic-models/jsonschema/models.go @@ -16,7 +16,7 @@ // of JSON Schemas. package jsonschema -import "gopkg.in/yaml.v3" +import "go.yaml.in/yaml/v3" // The Schema struct models a JSON Schema and, because schemas are // defined hierarchically, contains many references to itself. diff --git a/vendor/github.com/google/gnostic-models/jsonschema/reader.go b/vendor/github.com/google/gnostic-models/jsonschema/reader.go index b8583d4660..4f1fe0c08c 100644 --- a/vendor/github.com/google/gnostic-models/jsonschema/reader.go +++ b/vendor/github.com/google/gnostic-models/jsonschema/reader.go @@ -21,7 +21,7 @@ import ( "io/ioutil" "strconv" - "gopkg.in/yaml.v3" + yaml "go.yaml.in/yaml/v3" ) // This is a global map of all known Schemas. diff --git a/vendor/github.com/google/gnostic-models/jsonschema/writer.go b/vendor/github.com/google/gnostic-models/jsonschema/writer.go index 340dc5f933..19f5ddeae2 100644 --- a/vendor/github.com/google/gnostic-models/jsonschema/writer.go +++ b/vendor/github.com/google/gnostic-models/jsonschema/writer.go @@ -17,7 +17,7 @@ package jsonschema import ( "fmt" - "gopkg.in/yaml.v3" + yaml "go.yaml.in/yaml/v3" ) const indentation = " " diff --git a/vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.go b/vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.go index d71fe6d545..de337d80c8 100644 --- a/vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.go +++ b/vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.go @@ -21,7 +21,7 @@ import ( "regexp" "strings" - "gopkg.in/yaml.v3" + yaml "go.yaml.in/yaml/v3" "github.com/google/gnostic-models/compiler" ) @@ -60,7 +60,7 @@ func NewAdditionalPropertiesItem(in *yaml.Node, context *compiler.Context) (*Add // since the oneof matched one of its possibilities, discard any matching errors errors = make([]error, 0) } else { - message := fmt.Sprintf("contains an invalid AdditionalPropertiesItem") + message := "contains an invalid AdditionalPropertiesItem" err := compiler.NewError(context, message) errors = []error{err} } @@ -2543,7 +2543,7 @@ func NewNonBodyParameter(in *yaml.Node, context *compiler.Context) (*NonBodyPara // since the oneof matched one of its possibilities, discard any matching errors errors = make([]error, 0) } else { - message := fmt.Sprintf("contains an invalid NonBodyParameter") + message := "contains an invalid NonBodyParameter" err := compiler.NewError(context, message) errors = []error{err} } @@ -3271,7 +3271,7 @@ func NewParameter(in *yaml.Node, context *compiler.Context) (*Parameter, error) // since the oneof matched one of its possibilities, discard any matching errors errors = make([]error, 0) } else { - message := fmt.Sprintf("contains an invalid Parameter") + message := "contains an invalid Parameter" err := compiler.NewError(context, message) errors = []error{err} } @@ -3345,7 +3345,7 @@ func NewParametersItem(in *yaml.Node, context *compiler.Context) (*ParametersIte // since the oneof matched one of its possibilities, discard any matching errors errors = make([]error, 0) } else { - message := fmt.Sprintf("contains an invalid ParametersItem") + message := "contains an invalid ParametersItem" err := compiler.NewError(context, message) errors = []error{err} } @@ -4561,7 +4561,7 @@ func NewResponseValue(in *yaml.Node, context *compiler.Context) (*ResponseValue, // since the oneof matched one of its possibilities, discard any matching errors errors = make([]error, 0) } else { - message := fmt.Sprintf("contains an invalid ResponseValue") + message := "contains an invalid ResponseValue" err := compiler.NewError(context, message) errors = []error{err} } @@ -5030,7 +5030,7 @@ func NewSchemaItem(in *yaml.Node, context *compiler.Context) (*SchemaItem, error // since the oneof matched one of its possibilities, discard any matching errors errors = make([]error, 0) } else { - message := fmt.Sprintf("contains an invalid SchemaItem") + message := "contains an invalid SchemaItem" err := compiler.NewError(context, message) errors = []error{err} } @@ -5160,7 +5160,7 @@ func NewSecurityDefinitionsItem(in *yaml.Node, context *compiler.Context) (*Secu // since the oneof matched one of its possibilities, discard any matching errors errors = make([]error, 0) } else { - message := fmt.Sprintf("contains an invalid SecurityDefinitionsItem") + message := "contains an invalid SecurityDefinitionsItem" err := compiler.NewError(context, message) errors = []error{err} } @@ -6930,7 +6930,7 @@ func (m *BodyParameter) ToRawInfo() *yaml.Node { // always include this required field. info.Content = append(info.Content, compiler.NewScalarNodeForString("in")) info.Content = append(info.Content, compiler.NewScalarNodeForString(m.In)) - if m.Required != false { + if m.Required { info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Required)) } @@ -7149,7 +7149,7 @@ func (m *FileSchema) ToRawInfo() *yaml.Node { // always include this required field. info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) - if m.ReadOnly != false { + if m.ReadOnly { info.Content = append(info.Content, compiler.NewScalarNodeForString("readOnly")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ReadOnly)) } @@ -7176,7 +7176,7 @@ func (m *FormDataParameterSubSchema) ToRawInfo() *yaml.Node { if m == nil { return info } - if m.Required != false { + if m.Required { info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Required)) } @@ -7192,7 +7192,7 @@ func (m *FormDataParameterSubSchema) ToRawInfo() *yaml.Node { info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) } - if m.AllowEmptyValue != false { + if m.AllowEmptyValue { info.Content = append(info.Content, compiler.NewScalarNodeForString("allowEmptyValue")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.AllowEmptyValue)) } @@ -7220,7 +7220,7 @@ func (m *FormDataParameterSubSchema) ToRawInfo() *yaml.Node { info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) } - if m.ExclusiveMaximum != false { + if m.ExclusiveMaximum { info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) } @@ -7228,7 +7228,7 @@ func (m *FormDataParameterSubSchema) ToRawInfo() *yaml.Node { info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) } - if m.ExclusiveMinimum != false { + if m.ExclusiveMinimum { info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) } @@ -7252,7 +7252,7 @@ func (m *FormDataParameterSubSchema) ToRawInfo() *yaml.Node { info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) } - if m.UniqueItems != false { + if m.UniqueItems { info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) } @@ -7306,7 +7306,7 @@ func (m *Header) ToRawInfo() *yaml.Node { info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) } - if m.ExclusiveMaximum != false { + if m.ExclusiveMaximum { info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) } @@ -7314,7 +7314,7 @@ func (m *Header) ToRawInfo() *yaml.Node { info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) } - if m.ExclusiveMinimum != false { + if m.ExclusiveMinimum { info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) } @@ -7338,7 +7338,7 @@ func (m *Header) ToRawInfo() *yaml.Node { info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) } - if m.UniqueItems != false { + if m.UniqueItems { info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) } @@ -7373,7 +7373,7 @@ func (m *HeaderParameterSubSchema) ToRawInfo() *yaml.Node { if m == nil { return info } - if m.Required != false { + if m.Required { info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Required)) } @@ -7413,7 +7413,7 @@ func (m *HeaderParameterSubSchema) ToRawInfo() *yaml.Node { info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) } - if m.ExclusiveMaximum != false { + if m.ExclusiveMaximum { info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) } @@ -7421,7 +7421,7 @@ func (m *HeaderParameterSubSchema) ToRawInfo() *yaml.Node { info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) } - if m.ExclusiveMinimum != false { + if m.ExclusiveMinimum { info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) } @@ -7445,7 +7445,7 @@ func (m *HeaderParameterSubSchema) ToRawInfo() *yaml.Node { info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) } - if m.UniqueItems != false { + if m.UniqueItems { info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) } @@ -7940,7 +7940,7 @@ func (m *Operation) ToRawInfo() *yaml.Node { info.Content = append(info.Content, compiler.NewScalarNodeForString("schemes")) info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Schemes)) } - if m.Deprecated != false { + if m.Deprecated { info.Content = append(info.Content, compiler.NewScalarNodeForString("deprecated")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Deprecated)) } @@ -8110,7 +8110,7 @@ func (m *PathParameterSubSchema) ToRawInfo() *yaml.Node { info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) } - if m.ExclusiveMaximum != false { + if m.ExclusiveMaximum { info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) } @@ -8118,7 +8118,7 @@ func (m *PathParameterSubSchema) ToRawInfo() *yaml.Node { info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) } - if m.ExclusiveMinimum != false { + if m.ExclusiveMinimum { info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) } @@ -8142,7 +8142,7 @@ func (m *PathParameterSubSchema) ToRawInfo() *yaml.Node { info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) } - if m.UniqueItems != false { + if m.UniqueItems { info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) } @@ -8218,7 +8218,7 @@ func (m *PrimitivesItems) ToRawInfo() *yaml.Node { info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) } - if m.ExclusiveMaximum != false { + if m.ExclusiveMaximum { info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) } @@ -8226,7 +8226,7 @@ func (m *PrimitivesItems) ToRawInfo() *yaml.Node { info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) } - if m.ExclusiveMinimum != false { + if m.ExclusiveMinimum { info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) } @@ -8250,7 +8250,7 @@ func (m *PrimitivesItems) ToRawInfo() *yaml.Node { info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) } - if m.UniqueItems != false { + if m.UniqueItems { info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) } @@ -8296,7 +8296,7 @@ func (m *QueryParameterSubSchema) ToRawInfo() *yaml.Node { if m == nil { return info } - if m.Required != false { + if m.Required { info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Required)) } @@ -8312,7 +8312,7 @@ func (m *QueryParameterSubSchema) ToRawInfo() *yaml.Node { info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) } - if m.AllowEmptyValue != false { + if m.AllowEmptyValue { info.Content = append(info.Content, compiler.NewScalarNodeForString("allowEmptyValue")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.AllowEmptyValue)) } @@ -8340,7 +8340,7 @@ func (m *QueryParameterSubSchema) ToRawInfo() *yaml.Node { info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) } - if m.ExclusiveMaximum != false { + if m.ExclusiveMaximum { info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) } @@ -8348,7 +8348,7 @@ func (m *QueryParameterSubSchema) ToRawInfo() *yaml.Node { info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) } - if m.ExclusiveMinimum != false { + if m.ExclusiveMinimum { info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) } @@ -8372,7 +8372,7 @@ func (m *QueryParameterSubSchema) ToRawInfo() *yaml.Node { info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) } - if m.UniqueItems != false { + if m.UniqueItems { info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) } @@ -8514,7 +8514,7 @@ func (m *Schema) ToRawInfo() *yaml.Node { info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) } - if m.ExclusiveMaximum != false { + if m.ExclusiveMaximum { info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) } @@ -8522,7 +8522,7 @@ func (m *Schema) ToRawInfo() *yaml.Node { info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) } - if m.ExclusiveMinimum != false { + if m.ExclusiveMinimum { info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) } @@ -8546,7 +8546,7 @@ func (m *Schema) ToRawInfo() *yaml.Node { info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) } - if m.UniqueItems != false { + if m.UniqueItems { info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) } @@ -8610,7 +8610,7 @@ func (m *Schema) ToRawInfo() *yaml.Node { info.Content = append(info.Content, compiler.NewScalarNodeForString("discriminator")) info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Discriminator)) } - if m.ReadOnly != false { + if m.ReadOnly { info.Content = append(info.Content, compiler.NewScalarNodeForString("readOnly")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ReadOnly)) } @@ -8796,11 +8796,11 @@ func (m *Xml) ToRawInfo() *yaml.Node { info.Content = append(info.Content, compiler.NewScalarNodeForString("prefix")) info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Prefix)) } - if m.Attribute != false { + if m.Attribute { info.Content = append(info.Content, compiler.NewScalarNodeForString("attribute")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Attribute)) } - if m.Wrapped != false { + if m.Wrapped { info.Content = append(info.Content, compiler.NewScalarNodeForString("wrapped")) info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Wrapped)) } diff --git a/vendor/github.com/google/gnostic-models/openapiv2/document.go b/vendor/github.com/google/gnostic-models/openapiv2/document.go index e96ac0d6da..89469a13ed 100644 --- a/vendor/github.com/google/gnostic-models/openapiv2/document.go +++ b/vendor/github.com/google/gnostic-models/openapiv2/document.go @@ -15,7 +15,7 @@ package openapi_v2 import ( - "gopkg.in/yaml.v3" + yaml "go.yaml.in/yaml/v3" "github.com/google/gnostic-models/compiler" ) diff --git a/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.go b/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.go index 4b1131ce1c..662772dd95 100644 --- a/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.go +++ b/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.go @@ -21,7 +21,7 @@ import ( "regexp" "strings" - "gopkg.in/yaml.v3" + yaml "go.yaml.in/yaml/v3" "github.com/google/gnostic-models/compiler" ) @@ -60,7 +60,7 @@ func NewAdditionalPropertiesItem(in *yaml.Node, context *compiler.Context) (*Add // since the oneof matched one of its possibilities, discard any matching errors errors = make([]error, 0) } else { - message := fmt.Sprintf("contains an invalid AdditionalPropertiesItem") + message := "contains an invalid AdditionalPropertiesItem" err := compiler.NewError(context, message) errors = []error{err} } @@ -113,7 +113,7 @@ func NewAnyOrExpression(in *yaml.Node, context *compiler.Context) (*AnyOrExpress // since the oneof matched one of its possibilities, discard any matching errors errors = make([]error, 0) } else { - message := fmt.Sprintf("contains an invalid AnyOrExpression") + message := "contains an invalid AnyOrExpression" err := compiler.NewError(context, message) errors = []error{err} } @@ -227,7 +227,7 @@ func NewCallbackOrReference(in *yaml.Node, context *compiler.Context) (*Callback // since the oneof matched one of its possibilities, discard any matching errors errors = make([]error, 0) } else { - message := fmt.Sprintf("contains an invalid CallbackOrReference") + message := "contains an invalid CallbackOrReference" err := compiler.NewError(context, message) errors = []error{err} } @@ -979,7 +979,7 @@ func NewExampleOrReference(in *yaml.Node, context *compiler.Context) (*ExampleOr // since the oneof matched one of its possibilities, discard any matching errors errors = make([]error, 0) } else { - message := fmt.Sprintf("contains an invalid ExampleOrReference") + message := "contains an invalid ExampleOrReference" err := compiler.NewError(context, message) errors = []error{err} } @@ -1320,7 +1320,7 @@ func NewHeaderOrReference(in *yaml.Node, context *compiler.Context) (*HeaderOrRe // since the oneof matched one of its possibilities, discard any matching errors errors = make([]error, 0) } else { - message := fmt.Sprintf("contains an invalid HeaderOrReference") + message := "contains an invalid HeaderOrReference" err := compiler.NewError(context, message) errors = []error{err} } @@ -1713,7 +1713,7 @@ func NewLinkOrReference(in *yaml.Node, context *compiler.Context) (*LinkOrRefere // since the oneof matched one of its possibilities, discard any matching errors errors = make([]error, 0) } else { - message := fmt.Sprintf("contains an invalid LinkOrReference") + message := "contains an invalid LinkOrReference" err := compiler.NewError(context, message) errors = []error{err} } @@ -3090,7 +3090,7 @@ func NewParameterOrReference(in *yaml.Node, context *compiler.Context) (*Paramet // since the oneof matched one of its possibilities, discard any matching errors errors = make([]error, 0) } else { - message := fmt.Sprintf("contains an invalid ParameterOrReference") + message := "contains an invalid ParameterOrReference" err := compiler.NewError(context, message) errors = []error{err} } @@ -3606,7 +3606,7 @@ func NewRequestBodyOrReference(in *yaml.Node, context *compiler.Context) (*Reque // since the oneof matched one of its possibilities, discard any matching errors errors = make([]error, 0) } else { - message := fmt.Sprintf("contains an invalid RequestBodyOrReference") + message := "contains an invalid RequestBodyOrReference" err := compiler.NewError(context, message) errors = []error{err} } @@ -3743,7 +3743,7 @@ func NewResponseOrReference(in *yaml.Node, context *compiler.Context) (*Response // since the oneof matched one of its possibilities, discard any matching errors errors = make([]error, 0) } else { - message := fmt.Sprintf("contains an invalid ResponseOrReference") + message := "contains an invalid ResponseOrReference" err := compiler.NewError(context, message) errors = []error{err} } @@ -4310,7 +4310,7 @@ func NewSchemaOrReference(in *yaml.Node, context *compiler.Context) (*SchemaOrRe // since the oneof matched one of its possibilities, discard any matching errors errors = make([]error, 0) } else { - message := fmt.Sprintf("contains an invalid SchemaOrReference") + message := "contains an invalid SchemaOrReference" err := compiler.NewError(context, message) errors = []error{err} } @@ -4543,7 +4543,7 @@ func NewSecuritySchemeOrReference(in *yaml.Node, context *compiler.Context) (*Se // since the oneof matched one of its possibilities, discard any matching errors errors = make([]error, 0) } else { - message := fmt.Sprintf("contains an invalid SecuritySchemeOrReference") + message := "contains an invalid SecuritySchemeOrReference" err := compiler.NewError(context, message) errors = []error{err} } diff --git a/vendor/github.com/google/gnostic-models/openapiv3/document.go b/vendor/github.com/google/gnostic-models/openapiv3/document.go index 1cee467735..499ff883c5 100644 --- a/vendor/github.com/google/gnostic-models/openapiv3/document.go +++ b/vendor/github.com/google/gnostic-models/openapiv3/document.go @@ -15,7 +15,7 @@ package openapi_v3 import ( - "gopkg.in/yaml.v3" + yaml "go.yaml.in/yaml/v3" "github.com/google/gnostic-models/compiler" ) diff --git a/vendor/github.com/modern-go/reflect2/safe_type.go b/vendor/github.com/modern-go/reflect2/safe_type.go index ee4e7bb6ed..5646309e09 100644 --- a/vendor/github.com/modern-go/reflect2/safe_type.go +++ b/vendor/github.com/modern-go/reflect2/safe_type.go @@ -6,10 +6,12 @@ import ( ) type safeType struct { - reflect.Type - cfg *frozenConfig + Type reflect.Type + cfg *frozenConfig } +var _ Type = &safeType{} + func (type2 *safeType) New() interface{} { return reflect.New(type2.Type).Interface() } @@ -18,6 +20,22 @@ func (type2 *safeType) UnsafeNew() unsafe.Pointer { panic("does not support unsafe operation") } +func (type2 *safeType) Kind() reflect.Kind { + return type2.Type.Kind() +} + +func (type2 *safeType) Len() int { + return type2.Type.Len() +} + +func (type2 *safeType) NumField() int { + return type2.Type.NumField() +} + +func (type2 *safeType) String() string { + return type2.Type.String() +} + func (type2 *safeType) Elem() Type { return type2.cfg.Type2(type2.Type.Elem()) } diff --git a/vendor/github.com/openshift/api/AGENTS.md b/vendor/github.com/openshift/api/AGENTS.md index a009bbb2de..0e39032433 100644 --- a/vendor/github.com/openshift/api/AGENTS.md +++ b/vendor/github.com/openshift/api/AGENTS.md @@ -32,6 +32,21 @@ make clean # Clean build artifacts make update # Alias for update-codegen-crds ``` +#### Targeted Code Generation +When working on a specific API group/version, you can regenerate only the affected CRDs instead of all CRDs: + +```bash +# Regenerate CRDs for a specific API group/version +make update-codegen-crds API_GROUP_VERSIONS=operator.openshift.io/v1alpha1 +make update-codegen-crds API_GROUP_VERSIONS=config.openshift.io/v1 +make update-codegen-crds API_GROUP_VERSIONS=route.openshift.io/v1 + +# Multiple API groups can be specified with comma separation +make update-codegen-crds API_GROUP_VERSIONS=operator.openshift.io/v1alpha1,config.openshift.io/v1 +``` + +This is more efficient than running `make update` (which regenerates all CRDs) when you're only working on specific API groups. + ### Testing ```bash make test-unit # Run unit tests diff --git a/vendor/github.com/openshift/api/Makefile b/vendor/github.com/openshift/api/Makefile index fd4268a789..c069d80401 100644 --- a/vendor/github.com/openshift/api/Makefile +++ b/vendor/github.com/openshift/api/Makefile @@ -1,7 +1,7 @@ all: build .PHONY: all -update: update-codegen-crds +update: update-non-codegen update-codegen RUNTIME ?= podman RUNTIME_IMAGE_NAME ?= registry.ci.openshift.org/openshift/release:rhel-9-release-golang-1.24-openshift-4.20 @@ -17,10 +17,8 @@ test-unit: # # BEGIN: Update codegen-crds. Defaults to generating updates for all API packages. # To run a subset of packages: -# - Filter by group with make update-codegen-crds- -# E.g. make update-codegen-crds-machine -# - Set API_GROUP_VERSIONS to a space separated list of /. -# E.g. API_GROUP_VERSIONS="apps/v1 build/v1" make update-codegen-crds. +# - Set API_GROUP_VERSIONS to a space separated list of fully qualified /. +# E.g. API_GROUP_VERSIONS="apps.openshift.io/v1 build.openshift.io/v1" make update-codegen-crds. # FeatureSet generation is controlled at the group level by the # .codegen.yaml file. # @@ -57,23 +55,28 @@ verify-lint-fix: make lint-fix 2>/dev/null || true git diff --exit-code -.PHONY: verify-scripts -verify-scripts: - bash -x hack/verify-deepcopy.sh - bash -x hack/verify-openapi.sh +# Verify codegen runs all verifiers in the order they are defined in the root.go file. +# This includes all generators defined in update-codegen, but also the crd-schema-checker and crdify verifiers. +.PHONY: verify-codegen +verify-codegen: + EXTRA_ARGS=--verify hack/update-codegen.sh + +.PHONY: verify-non-codegen +verify-non-codegen: bash -x hack/verify-protobuf.sh - bash -x hack/verify-swagger-docs.sh hack/verify-crds.sh bash -x hack/verify-types.sh - bash -x hack/verify-compatibility.sh bash -x hack/verify-integration-tests.sh bash -x hack/verify-group-versions.sh bash -x hack/verify-prerelease-lifecycle-gen.sh hack/verify-payload-crds.sh hack/verify-payload-featuregates.sh +.PHONY: verify-scripts +verify-scripts: verify-non-codegen verify-codegen + .PHONY: verify -verify: verify-scripts lint verify-crd-schema verify-crdify verify-codegen-crds +verify: verify-scripts lint .PHONY: verify-codegen-crds verify-codegen-crds: @@ -99,8 +102,8 @@ verify-%: ################################################################################################ # # BEGIN: Update scripts. Defaults to generating updates for all API packages. -# Set API_GROUP_VERSIONS to a space separated list of / to limit -# the scope of the updates. Eg API_GROUP_VERSIONS="apps/v1 build/v1" make update-scripts. +# Set API_GROUP_VERSIONS to a space separated list of fully qualified / to limit +# the scope of the updates. Eg API_GROUP_VERSIONS="apps.openshift.io/v1 build.openshift.io/v1" make update-scripts. # Note: Protobuf generation is handled separately, see hack/lib/init.sh. # ################################################################################################ @@ -108,6 +111,19 @@ verify-%: .PHONY: update-scripts update-scripts: update-compatibility update-openapi update-deepcopy update-protobuf update-swagger-docs tests-vendor update-prerelease-lifecycle-gen update-payload-featuregates +# Update codegen runs all generators in the order they are defined in the root.go file. +# The per group generators are:[compatibility, deepcopy, swagger-docs, empty-partial-schema, schema-patch, crd-manifest-merge] +# The multi group generators are:[openapi] +.PHONY: update-codegen +update-codegen: + hack/update-codegen.sh + +# Update non-codegen runs all generators that are not part of the codegen utility, or +# are part of it, but are not run by default when invoking codegen without a specific generator. +# E.g. the payload feature gates which is not part of the generator style, but is still a subcommand. +.PHONY: update-non-codegen +update-non-codegen: update-protobuf tests-vendor update-prerelease-lifecycle-gen update-payload-crds update-payload-featuregates + .PHONY: update-compatibility update-compatibility: hack/update-compatibility.sh diff --git a/vendor/github.com/openshift/api/config/v1/types_cluster_version.go b/vendor/github.com/openshift/api/config/v1/types_cluster_version.go index cfac9689e4..7929f4b625 100644 --- a/vendor/github.com/openshift/api/config/v1/types_cluster_version.go +++ b/vendor/github.com/openshift/api/config/v1/types_cluster_version.go @@ -72,8 +72,10 @@ type ClusterVersionSpec struct { // // If an upgrade fails the operator will halt and report status // about the failing component. Setting the desired update value back to - // the previous version will cause a rollback to be attempted. Not all - // rollbacks will succeed. + // the previous version will cause a rollback to be attempted if the + // previous version is within the current minor version. Not all + // rollbacks will succeed, and some may unrecoverably break the + // cluster. // // +optional DesiredUpdate *Update `json:"desiredUpdate,omitempty"` @@ -718,9 +720,13 @@ type Update struct { Image string `json:"image"` // force allows an administrator to update to an image that has failed - // verification or upgradeable checks. This option should only - // be used when the authenticity of the provided image has been verified out - // of band because the provided image will run with full administrative access + // verification or upgradeable checks that are designed to keep your + // cluster safe. Only use this if: + // * you are testing unsigned release images in short-lived test clusters or + // * you are working around a known bug in the cluster-version + // operator and you have verified the authenticity of the provided + // image yourself. + // The provided image will run with full administrative access // to the cluster. Do not use this flag with images that comes from unknown // or potentially malicious sources. // diff --git a/vendor/github.com/openshift/api/config/v1/types_infrastructure.go b/vendor/github.com/openshift/api/config/v1/types_infrastructure.go index effafde644..005702e993 100644 --- a/vendor/github.com/openshift/api/config/v1/types_infrastructure.go +++ b/vendor/github.com/openshift/api/config/v1/types_infrastructure.go @@ -491,6 +491,21 @@ type AWSServiceEndpoint struct { URL string `json:"url"` } +// IPFamilyType represents the IP protocol family that cloud platform resources should use. +// +kubebuilder:validation:Enum=IPv4;DualStackIPv6Primary;DualStackIPv4Primary +type IPFamilyType string + +const ( + // IPv4 indicates that cloud platform resources should use IPv4 addressing only. + IPv4 IPFamilyType = "IPv4" + + // DualStackIPv6Primary indicates that cloud platform resources should use dual-stack networking with IPv6 as primary. + DualStackIPv6Primary IPFamilyType = "DualStackIPv6Primary" + + // DualStackIPv4Primary indicates that cloud platform resources should use dual-stack networking with IPv4 as primary. + DualStackIPv4Primary IPFamilyType = "DualStackIPv4Primary" +) + // AWSPlatformSpec holds the desired state of the Amazon Web Services infrastructure provider. // This only includes fields that can be modified in the cluster. type AWSPlatformSpec struct { @@ -536,6 +551,18 @@ type AWSPlatformStatus struct { // +optional // +nullable CloudLoadBalancerConfig *CloudLoadBalancerConfig `json:"cloudLoadBalancerConfig,omitempty"` + + // ipFamily specifies the IP protocol family that should be used for AWS + // network resources. This controls whether AWS resources are created with + // IPv4-only, or dual-stack networking with IPv4 or IPv6 as the primary + // protocol family. + // + // +default="IPv4" + // +kubebuilder:default="IPv4" + // +kubebuilder:validation:XValidation:rule="oldSelf == '' || self == oldSelf",message="ipFamily is immutable once set" + // +openshift:enable:FeatureGate=AWSDualStackInstall + // +optional + IPFamily IPFamilyType `json:"ipFamily,omitempty"` } // AWSResourceTag is a tag to apply to AWS resources created for the cluster. @@ -607,6 +634,18 @@ type AzurePlatformStatus struct { // +openshift:enable:FeatureGate=AzureClusterHostedDNSInstall // +optional CloudLoadBalancerConfig *CloudLoadBalancerConfig `json:"cloudLoadBalancerConfig,omitempty"` + + // ipFamily specifies the IP protocol family that should be used for Azure + // network resources. This controls whether Azure resources are created with + // IPv4-only, or dual-stack networking with IPv4 or IPv6 as the primary + // protocol family. + // + // +default="IPv4" + // +kubebuilder:default="IPv4" + // +kubebuilder:validation:XValidation:rule="oldSelf == '' || self == oldSelf",message="ipFamily is immutable once set" + // +openshift:enable:FeatureGate=AzureDualStackInstall + // +optional + IPFamily IPFamilyType `json:"ipFamily,omitempty"` } // AzureResourceTag is a tag to apply to Azure resources created for the cluster. diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_00_cluster-version-operator_01_clusterversions-CustomNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_00_cluster-version-operator_01_clusterversions-CustomNoUpgrade.crd.yaml index fe8e41c086..368e8f636d 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_00_cluster-version-operator_01_clusterversions-CustomNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_00_cluster-version-operator_01_clusterversions-CustomNoUpgrade.crd.yaml @@ -151,8 +151,10 @@ spec: If an upgrade fails the operator will halt and report status about the failing component. Setting the desired update value back to - the previous version will cause a rollback to be attempted. Not all - rollbacks will succeed. + the previous version will cause a rollback to be attempted if the + previous version is within the current minor version. Not all + rollbacks will succeed, and some may unrecoverably break the + cluster. properties: architecture: description: |- @@ -171,9 +173,13 @@ spec: force: description: |- force allows an administrator to update to an image that has failed - verification or upgradeable checks. This option should only - be used when the authenticity of the provided image has been verified out - of band because the provided image will run with full administrative access + verification or upgradeable checks that are designed to keep your + cluster safe. Only use this if: + * you are testing unsigned release images in short-lived test clusters or + * you are working around a known bug in the cluster-version + operator and you have verified the authenticity of the provided + image yourself. + The provided image will run with full administrative access to the cluster. Do not use this flag with images that comes from unknown or potentially malicious sources. type: boolean diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_00_cluster-version-operator_01_clusterversions-Default.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_00_cluster-version-operator_01_clusterversions-Default.crd.yaml index 1b2662e080..3ad92d23ce 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_00_cluster-version-operator_01_clusterversions-Default.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_00_cluster-version-operator_01_clusterversions-Default.crd.yaml @@ -151,8 +151,10 @@ spec: If an upgrade fails the operator will halt and report status about the failing component. Setting the desired update value back to - the previous version will cause a rollback to be attempted. Not all - rollbacks will succeed. + the previous version will cause a rollback to be attempted if the + previous version is within the current minor version. Not all + rollbacks will succeed, and some may unrecoverably break the + cluster. properties: architecture: description: |- @@ -171,9 +173,13 @@ spec: force: description: |- force allows an administrator to update to an image that has failed - verification or upgradeable checks. This option should only - be used when the authenticity of the provided image has been verified out - of band because the provided image will run with full administrative access + verification or upgradeable checks that are designed to keep your + cluster safe. Only use this if: + * you are testing unsigned release images in short-lived test clusters or + * you are working around a known bug in the cluster-version + operator and you have verified the authenticity of the provided + image yourself. + The provided image will run with full administrative access to the cluster. Do not use this flag with images that comes from unknown or potentially malicious sources. type: boolean diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_00_cluster-version-operator_01_clusterversions-DevPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_00_cluster-version-operator_01_clusterversions-DevPreviewNoUpgrade.crd.yaml index 3d0a05471b..1928bb8c51 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_00_cluster-version-operator_01_clusterversions-DevPreviewNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_00_cluster-version-operator_01_clusterversions-DevPreviewNoUpgrade.crd.yaml @@ -151,8 +151,10 @@ spec: If an upgrade fails the operator will halt and report status about the failing component. Setting the desired update value back to - the previous version will cause a rollback to be attempted. Not all - rollbacks will succeed. + the previous version will cause a rollback to be attempted if the + previous version is within the current minor version. Not all + rollbacks will succeed, and some may unrecoverably break the + cluster. properties: architecture: description: |- @@ -171,9 +173,13 @@ spec: force: description: |- force allows an administrator to update to an image that has failed - verification or upgradeable checks. This option should only - be used when the authenticity of the provided image has been verified out - of band because the provided image will run with full administrative access + verification or upgradeable checks that are designed to keep your + cluster safe. Only use this if: + * you are testing unsigned release images in short-lived test clusters or + * you are working around a known bug in the cluster-version + operator and you have verified the authenticity of the provided + image yourself. + The provided image will run with full administrative access to the cluster. Do not use this flag with images that comes from unknown or potentially malicious sources. type: boolean diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_00_cluster-version-operator_01_clusterversions-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_00_cluster-version-operator_01_clusterversions-TechPreviewNoUpgrade.crd.yaml index 1e0f08de8c..82ae3f4451 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_00_cluster-version-operator_01_clusterversions-TechPreviewNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_00_cluster-version-operator_01_clusterversions-TechPreviewNoUpgrade.crd.yaml @@ -151,8 +151,10 @@ spec: If an upgrade fails the operator will halt and report status about the failing component. Setting the desired update value back to - the previous version will cause a rollback to be attempted. Not all - rollbacks will succeed. + the previous version will cause a rollback to be attempted if the + previous version is within the current minor version. Not all + rollbacks will succeed, and some may unrecoverably break the + cluster. properties: architecture: description: |- @@ -171,9 +173,13 @@ spec: force: description: |- force allows an administrator to update to an image that has failed - verification or upgradeable checks. This option should only - be used when the authenticity of the provided image has been verified out - of band because the provided image will run with full administrative access + verification or upgradeable checks that are designed to keep your + cluster safe. Only use this if: + * you are testing unsigned release images in short-lived test clusters or + * you are working around a known bug in the cluster-version + operator and you have verified the authenticity of the provided + image yourself. + The provided image will run with full administrative access to the cluster. Do not use this flag with images that comes from unknown or potentially malicious sources. type: boolean diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-CustomNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-CustomNoUpgrade.crd.yaml index 9f01a6aebd..a6bbab4369 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-CustomNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-CustomNoUpgrade.crd.yaml @@ -1353,6 +1353,21 @@ spec: ClusterHosted rule: 'has(self.dnsType) && self.dnsType != ''ClusterHosted'' ? !has(self.clusterHosted) : true' + ipFamily: + default: IPv4 + description: |- + ipFamily specifies the IP protocol family that should be used for AWS + network resources. This controls whether AWS resources are created with + IPv4-only, or dual-stack networking with IPv4 or IPv6 as the primary + protocol family. + enum: + - IPv4 + - DualStackIPv6Primary + - DualStackIPv4Primary + type: string + x-kubernetes-validations: + - message: ipFamily is immutable once set + rule: oldSelf == '' || self == oldSelf region: description: region holds the default AWS region for new AWS resources created by the cluster. @@ -1556,6 +1571,21 @@ spec: - AzureGermanCloud - AzureStackCloud type: string + ipFamily: + default: IPv4 + description: |- + ipFamily specifies the IP protocol family that should be used for Azure + network resources. This controls whether Azure resources are created with + IPv4-only, or dual-stack networking with IPv4 or IPv6 as the primary + protocol family. + enum: + - IPv4 + - DualStackIPv6Primary + - DualStackIPv4Primary + type: string + x-kubernetes-validations: + - message: ipFamily is immutable once set + rule: oldSelf == '' || self == oldSelf networkResourceGroupName: description: |- networkResourceGroupName is the Resource Group for network resources like the Virtual Network and Subnets used by the cluster. diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-DevPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-DevPreviewNoUpgrade.crd.yaml index 44185f514e..206ffb86c1 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-DevPreviewNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-DevPreviewNoUpgrade.crd.yaml @@ -1353,6 +1353,21 @@ spec: ClusterHosted rule: 'has(self.dnsType) && self.dnsType != ''ClusterHosted'' ? !has(self.clusterHosted) : true' + ipFamily: + default: IPv4 + description: |- + ipFamily specifies the IP protocol family that should be used for AWS + network resources. This controls whether AWS resources are created with + IPv4-only, or dual-stack networking with IPv4 or IPv6 as the primary + protocol family. + enum: + - IPv4 + - DualStackIPv6Primary + - DualStackIPv4Primary + type: string + x-kubernetes-validations: + - message: ipFamily is immutable once set + rule: oldSelf == '' || self == oldSelf region: description: region holds the default AWS region for new AWS resources created by the cluster. @@ -1556,6 +1571,21 @@ spec: - AzureGermanCloud - AzureStackCloud type: string + ipFamily: + default: IPv4 + description: |- + ipFamily specifies the IP protocol family that should be used for Azure + network resources. This controls whether Azure resources are created with + IPv4-only, or dual-stack networking with IPv4 or IPv6 as the primary + protocol family. + enum: + - IPv4 + - DualStackIPv6Primary + - DualStackIPv4Primary + type: string + x-kubernetes-validations: + - message: ipFamily is immutable once set + rule: oldSelf == '' || self == oldSelf networkResourceGroupName: description: |- networkResourceGroupName is the Resource Group for network resources like the Virtual Network and Subnets used by the cluster. diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-TechPreviewNoUpgrade.crd.yaml index 27e1ce7b41..c14a981101 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-TechPreviewNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-TechPreviewNoUpgrade.crd.yaml @@ -1353,6 +1353,21 @@ spec: ClusterHosted rule: 'has(self.dnsType) && self.dnsType != ''ClusterHosted'' ? !has(self.clusterHosted) : true' + ipFamily: + default: IPv4 + description: |- + ipFamily specifies the IP protocol family that should be used for AWS + network resources. This controls whether AWS resources are created with + IPv4-only, or dual-stack networking with IPv4 or IPv6 as the primary + protocol family. + enum: + - IPv4 + - DualStackIPv6Primary + - DualStackIPv4Primary + type: string + x-kubernetes-validations: + - message: ipFamily is immutable once set + rule: oldSelf == '' || self == oldSelf region: description: region holds the default AWS region for new AWS resources created by the cluster. @@ -1556,6 +1571,21 @@ spec: - AzureGermanCloud - AzureStackCloud type: string + ipFamily: + default: IPv4 + description: |- + ipFamily specifies the IP protocol family that should be used for Azure + network resources. This controls whether Azure resources are created with + IPv4-only, or dual-stack networking with IPv4 or IPv6 as the primary + protocol family. + enum: + - IPv4 + - DualStackIPv6Primary + - DualStackIPv4Primary + type: string + x-kubernetes-validations: + - message: ipFamily is immutable once set + rule: oldSelf == '' || self == oldSelf networkResourceGroupName: description: |- networkResourceGroupName is the Resource Group for network resources like the Virtual Network and Subnets used by the cluster. diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_openshift-controller-manager_01_builds.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_openshift-controller-manager_01_builds.crd.yaml index 21eee52c73..646978b805 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_openshift-controller-manager_01_builds.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_openshift-controller-manager_01_builds.crd.yaml @@ -139,8 +139,9 @@ spec: in a Container. properties: name: - description: Name of the environment variable. Must be a - C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -198,6 +199,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume mount containing + the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -338,7 +376,7 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the + This field depends on the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml index d8d6b502ee..03b091ead5 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml @@ -362,7 +362,9 @@ infrastructures.config.openshift.io: Category: "" FeatureGates: - AWSClusterHostedDNSInstall + - AWSDualStackInstall - AzureClusterHostedDNSInstall + - AzureDualStackInstall - DualReplica - DyanmicServiceEndpointIBMCloud - GCPClusterHostedDNSInstall diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go index 31aab4dfe8..0d8587e1dc 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go @@ -777,7 +777,7 @@ func (ClusterVersionList) SwaggerDoc() map[string]string { var map_ClusterVersionSpec = map[string]string{ "": "ClusterVersionSpec is the desired version state of the cluster. It includes the version the cluster should be at, how the cluster is identified, and where the cluster should look for version updates.", "clusterID": "clusterID uniquely identifies this cluster. This is expected to be an RFC4122 UUID value (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx in hexadecimal values). This is a required field.", - "desiredUpdate": "desiredUpdate is an optional field that indicates the desired value of the cluster version. Setting this value will trigger an upgrade (if the current version does not match the desired version). The set of recommended update values is listed as part of available updates in status, and setting values outside that range may cause the upgrade to fail.\n\nSome of the fields are inter-related with restrictions and meanings described here. 1. image is specified, version is specified, architecture is specified. API validation error. 2. image is specified, version is specified, architecture is not specified. The version extracted from the referenced image must match the specified version. 3. image is specified, version is not specified, architecture is specified. API validation error. 4. image is specified, version is not specified, architecture is not specified. image is used. 5. image is not specified, version is specified, architecture is specified. version and desired architecture are used to select an image. 6. image is not specified, version is specified, architecture is not specified. version and current architecture are used to select an image. 7. image is not specified, version is not specified, architecture is specified. API validation error. 8. image is not specified, version is not specified, architecture is not specified. API validation error.\n\nIf an upgrade fails the operator will halt and report status about the failing component. Setting the desired update value back to the previous version will cause a rollback to be attempted. Not all rollbacks will succeed.", + "desiredUpdate": "desiredUpdate is an optional field that indicates the desired value of the cluster version. Setting this value will trigger an upgrade (if the current version does not match the desired version). The set of recommended update values is listed as part of available updates in status, and setting values outside that range may cause the upgrade to fail.\n\nSome of the fields are inter-related with restrictions and meanings described here. 1. image is specified, version is specified, architecture is specified. API validation error. 2. image is specified, version is specified, architecture is not specified. The version extracted from the referenced image must match the specified version. 3. image is specified, version is not specified, architecture is specified. API validation error. 4. image is specified, version is not specified, architecture is not specified. image is used. 5. image is not specified, version is specified, architecture is specified. version and desired architecture are used to select an image. 6. image is not specified, version is specified, architecture is not specified. version and current architecture are used to select an image. 7. image is not specified, version is not specified, architecture is specified. API validation error. 8. image is not specified, version is not specified, architecture is not specified. API validation error.\n\nIf an upgrade fails the operator will halt and report status about the failing component. Setting the desired update value back to the previous version will cause a rollback to be attempted if the previous version is within the current minor version. Not all rollbacks will succeed, and some may unrecoverably break the cluster.", "upstream": "upstream may be used to specify the preferred update server. By default it will use the appropriate update server for the cluster and region.", "channel": "channel is an identifier for explicitly requesting a non-default set of updates to be applied to this cluster. The default channel will contain stable updates that are appropriate for production clusters.", "capabilities": "capabilities configures the installation of optional, core cluster components. A null value here is identical to an empty object; see the child properties for default semantics.", @@ -878,7 +878,7 @@ var map_Update = map[string]string{ "architecture": "architecture is an optional field that indicates the desired value of the cluster architecture. In this context cluster architecture means either a single architecture or a multi architecture. architecture can only be set to Multi thereby only allowing updates from single to multi architecture. If architecture is set, image cannot be set and version must be set. Valid values are 'Multi' and empty.", "version": "version is a semantic version identifying the update version. version is required if architecture is specified. If both version and image are set, the version extracted from the referenced image must match the specified version.", "image": "image is a container image location that contains the update. image should be used when the desired version does not exist in availableUpdates or history. When image is set, architecture cannot be specified. If both version and image are set, the version extracted from the referenced image must match the specified version.", - "force": "force allows an administrator to update to an image that has failed verification or upgradeable checks. This option should only be used when the authenticity of the provided image has been verified out of band because the provided image will run with full administrative access to the cluster. Do not use this flag with images that comes from unknown or potentially malicious sources.", + "force": "force allows an administrator to update to an image that has failed verification or upgradeable checks that are designed to keep your cluster safe. Only use this if: * you are testing unsigned release images in short-lived test clusters or * you are working around a known bug in the cluster-version\n operator and you have verified the authenticity of the provided\n image yourself.\nThe provided image will run with full administrative access to the cluster. Do not use this flag with images that comes from unknown or potentially malicious sources.", } func (Update) SwaggerDoc() map[string]string { @@ -1410,6 +1410,7 @@ var map_AWSPlatformStatus = map[string]string{ "serviceEndpoints": "serviceEndpoints list contains custom endpoints which will override default service endpoint of AWS Services. There must be only one ServiceEndpoint for a service.", "resourceTags": "resourceTags is a list of additional tags to apply to AWS resources created for the cluster. See https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html for information on tagging AWS resources. AWS supports a maximum of 50 tags per resource. OpenShift reserves 25 tags for its use, leaving 25 tags available for the user.", "cloudLoadBalancerConfig": "cloudLoadBalancerConfig holds configuration related to DNS and cloud load balancers. It allows configuration of in-cluster DNS as an alternative to the platform default DNS implementation. When using the ClusterHosted DNS type, Load Balancer IP addresses must be provided for the API and internal API load balancers as well as the ingress load balancer.", + "ipFamily": "ipFamily specifies the IP protocol family that should be used for AWS network resources. This controls whether AWS resources are created with IPv4-only, or dual-stack networking with IPv4 or IPv6 as the primary protocol family.", } func (AWSPlatformStatus) SwaggerDoc() map[string]string { @@ -1481,6 +1482,7 @@ var map_AzurePlatformStatus = map[string]string{ "armEndpoint": "armEndpoint specifies a URL to use for resource management in non-soverign clouds such as Azure Stack.", "resourceTags": "resourceTags is a list of additional tags to apply to Azure resources created for the cluster. See https://docs.microsoft.com/en-us/rest/api/resources/tags for information on tagging Azure resources. Due to limitations on Automation, Content Delivery Network, DNS Azure resources, a maximum of 15 tags may be applied. OpenShift reserves 5 tags for internal use, allowing 10 tags for user configuration.", "cloudLoadBalancerConfig": "cloudLoadBalancerConfig holds configuration related to DNS and cloud load balancers. It allows configuration of in-cluster DNS as an alternative to the platform default DNS implementation. When using the ClusterHosted DNS type, Load Balancer IP addresses must be provided for the API and internal API load balancers as well as the ingress load balancer.", + "ipFamily": "ipFamily specifies the IP protocol family that should be used for Azure network resources. This controls whether Azure resources are created with IPv4-only, or dual-stack networking with IPv4 or IPv6 as the primary protocol family.", } func (AzurePlatformStatus) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.crd-manifests/0000_10_config-operator_01_clustermonitoring-CustomNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.crd-manifests/0000_10_config-operator_01_clustermonitoring-CustomNoUpgrade.crd.yaml index cdc019885b..306d545527 100644 --- a/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.crd-manifests/0000_10_config-operator_01_clustermonitoring-CustomNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.crd-manifests/0000_10_config-operator_01_clustermonitoring-CustomNoUpgrade.crd.yaml @@ -663,15 +663,13 @@ spec: volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - will be set by the persistentvolume controller if it exists. + it can be changed after the claim is created. An empty string or nil value indicates that no + VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state, + this field can be reset to its previous value (including nil) to cancel the modification. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). type: string volumeMode: description: |- @@ -842,13 +840,11 @@ spec: description: |- currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using. When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim - This is a beta field and requires enabling VolumeAttributesClass feature (off by default). type: string modifyVolumeStatus: description: |- ModifyVolumeStatus represents the status object of ControllerModifyVolume operation. When this is unset, there is no ModifyVolume operation being attempted. - This is a beta field and requires enabling VolumeAttributesClass feature (off by default). properties: status: description: "status is the status of the ControllerModifyVolume diff --git a/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.crd-manifests/0000_10_config-operator_01_clustermonitoring-DevPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.crd-manifests/0000_10_config-operator_01_clustermonitoring-DevPreviewNoUpgrade.crd.yaml index f79bea45d5..c647ac400c 100644 --- a/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.crd-manifests/0000_10_config-operator_01_clustermonitoring-DevPreviewNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.crd-manifests/0000_10_config-operator_01_clustermonitoring-DevPreviewNoUpgrade.crd.yaml @@ -663,15 +663,13 @@ spec: volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - will be set by the persistentvolume controller if it exists. + it can be changed after the claim is created. An empty string or nil value indicates that no + VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state, + this field can be reset to its previous value (including nil) to cancel the modification. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). type: string volumeMode: description: |- @@ -842,13 +840,11 @@ spec: description: |- currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using. When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim - This is a beta field and requires enabling VolumeAttributesClass feature (off by default). type: string modifyVolumeStatus: description: |- ModifyVolumeStatus represents the status object of ControllerModifyVolume operation. When this is unset, there is no ModifyVolume operation being attempted. - This is a beta field and requires enabling VolumeAttributesClass feature (off by default). properties: status: description: "status is the status of the ControllerModifyVolume diff --git a/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.crd-manifests/0000_10_config-operator_01_clustermonitoring-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.crd-manifests/0000_10_config-operator_01_clustermonitoring-TechPreviewNoUpgrade.crd.yaml index 44c1184e1a..97dbda237d 100644 --- a/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.crd-manifests/0000_10_config-operator_01_clustermonitoring-TechPreviewNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.crd-manifests/0000_10_config-operator_01_clustermonitoring-TechPreviewNoUpgrade.crd.yaml @@ -663,15 +663,13 @@ spec: volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - will be set by the persistentvolume controller if it exists. + it can be changed after the claim is created. An empty string or nil value indicates that no + VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state, + this field can be reset to its previous value (including nil) to cancel the modification. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). type: string volumeMode: description: |- @@ -842,13 +840,11 @@ spec: description: |- currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using. When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim - This is a beta field and requires enabling VolumeAttributesClass feature (off by default). type: string modifyVolumeStatus: description: |- ModifyVolumeStatus represents the status object of ControllerModifyVolume operation. When this is unset, there is no ModifyVolume operation being attempted. - This is a beta field and requires enabling VolumeAttributesClass feature (off by default). properties: status: description: "status is the status of the ControllerModifyVolume diff --git a/vendor/github.com/openshift/api/features.md b/vendor/github.com/openshift/api/features.md index 54c9fd3dea..e688528256 100644 --- a/vendor/github.com/openshift/api/features.md +++ b/vendor/github.com/openshift/api/features.md @@ -12,10 +12,10 @@ | NewOLMCatalogdAPIV1Metas| | | | Enabled | | Enabled | | NewOLMOwnSingleNamespace| | | | Enabled | | Enabled | | NewOLMPreflightPermissionChecks| | | | Enabled | | Enabled | -| NewOLMWebhookProviderOpenshiftServiceCA| | | | Enabled | | Enabled | | NoRegistryClusterOperations| | | | Enabled | | Enabled | | VSphereMixedNodeEnv| | | Enabled | Enabled | | | | NewOLM| | Enabled | | Enabled | | Enabled | +| NewOLMWebhookProviderOpenshiftServiceCA| | Enabled | | Enabled | | Enabled | | AWSClusterHostedDNS| | | Enabled | Enabled | Enabled | Enabled | | AWSClusterHostedDNSInstall| | | Enabled | Enabled | Enabled | Enabled | | AWSDedicatedHosts| | | Enabled | Enabled | Enabled | Enabled | @@ -52,17 +52,16 @@ | IrreconcilableMachineConfig| | | Enabled | Enabled | Enabled | Enabled | | KMSEncryptionProvider| | | Enabled | Enabled | Enabled | Enabled | | MachineAPIMigration| | | Enabled | Enabled | Enabled | Enabled | -| ManagedBootImagesAzure| | | Enabled | Enabled | Enabled | Enabled | | ManagedBootImagesCPMS| | | Enabled | Enabled | Enabled | Enabled | -| ManagedBootImagesvSphere| | | Enabled | Enabled | Enabled | Enabled | | MaxUnavailableStatefulSet| | | Enabled | Enabled | Enabled | Enabled | | MinimumKubeletVersion| | | Enabled | Enabled | Enabled | Enabled | | MixedCPUsAllocation| | | Enabled | Enabled | Enabled | Enabled | | MultiDiskSetup| | | Enabled | Enabled | Enabled | Enabled | +| MutableCSINodeAllocatableCount| | | Enabled | Enabled | Enabled | Enabled | | MutatingAdmissionPolicy| | | Enabled | Enabled | Enabled | Enabled | | NutanixMultiSubnets| | | Enabled | Enabled | Enabled | Enabled | +| OSStreams| | | Enabled | Enabled | Enabled | Enabled | | OVNObservability| | | Enabled | Enabled | Enabled | Enabled | -| PreconfiguredUDNAddresses| | | Enabled | Enabled | Enabled | Enabled | | SELinuxMount| | | Enabled | Enabled | Enabled | Enabled | | SignatureStores| | | Enabled | Enabled | Enabled | Enabled | | SigstoreImageVerificationPKI| | | Enabled | Enabled | Enabled | Enabled | @@ -88,12 +87,15 @@ | MachineConfigNodes| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | ManagedBootImages| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | ManagedBootImagesAWS| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| ManagedBootImagesAzure| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| ManagedBootImagesvSphere| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | MetricsCollectionProfiles| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | NetworkDiagnosticsConfig| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | NetworkLiveMigration| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | NetworkSegmentation| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | OpenShiftPodSecurityAdmission| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | PinnedImages| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| PreconfiguredUDNAddresses| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | ProcMountType| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | RouteAdvertisements| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | RouteExternalCertificate| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | diff --git a/vendor/github.com/openshift/api/features/features.go b/vendor/github.com/openshift/api/features/features.go index 5d3836a436..0c88e7be75 100644 --- a/vendor/github.com/openshift/api/features/features.go +++ b/vendor/github.com/openshift/api/features/features.go @@ -342,7 +342,7 @@ var ( contactPerson("rsaini"). productScope(ocpSpecific). enhancementPR("https://github.com/openshift/enhancements/pull/1496"). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() FeatureGateManagedBootImagesAzure = newFeatureGate("ManagedBootImagesAzure"). @@ -350,7 +350,7 @@ var ( contactPerson("djoshy"). productScope(ocpSpecific). enhancementPR("https://github.com/openshift/enhancements/pull/1761"). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() FeatureGateManagedBootImagesCPMS = newFeatureGate("ManagedBootImagesCPMS"). @@ -509,8 +509,8 @@ var ( reportProblemsToJiraComponent("olm"). contactPerson("pegoncal"). productScope(ocpSpecific). - enhancementPR("https://github.com/openshift/enhancements/pull/1799"). - enableForClusterProfile(SelfManaged, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enhancementPR("https://github.com/openshift/enhancements/pull/1844"). + enableForClusterProfile(SelfManaged, configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() FeatureGateInsightsOnDemandDataGather = newFeatureGate("InsightsOnDemandDataGather"). @@ -772,7 +772,7 @@ var ( contactPerson("kyrtapz"). productScope(ocpSpecific). enhancementPR("https://github.com/openshift/enhancements/pull/1793"). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade, configv1.Default). mustRegister() FeatureGateAWSServiceLBNetworkSecurityGroup = newFeatureGate("AWSServiceLBNetworkSecurityGroup"). @@ -885,4 +885,19 @@ var ( enhancementPR("https://github.com/openshift/enhancements/pull/1857"). enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() + + FeatureGateMutableCSINodeAllocatableCount = newFeatureGate("MutableCSINodeAllocatableCount"). + reportProblemsToJiraComponent("Storage / Kubernetes External Components"). + contactPerson("jsafrane"). + productScope(kubernetes). + enhancementPR("https://github.com/kubernetes/enhancements/issues/4876"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + FeatureGateOSStreams = newFeatureGate("OSStreams"). + reportProblemsToJiraComponent("MachineConfigOperator"). + contactPerson("pabrodri"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1874"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() ) diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1/types.go b/vendor/github.com/openshift/api/machineconfiguration/v1/types.go index cbb1fe077f..2b541b2548 100644 --- a/vendor/github.com/openshift/api/machineconfiguration/v1/types.go +++ b/vendor/github.com/openshift/api/machineconfiguration/v1/types.go @@ -452,6 +452,22 @@ type MachineConfigPoolSpec struct { // +listMapKey=name // +kubebuilder:validation:MaxItems=100 PinnedImageSets []PinnedImageSetRef `json:"pinnedImageSets,omitempty"` + + // osImageStream specifies an OS stream to be used for the pool. + // + // When set, this value overrides the cluster-wide OS images for the pool with + // the OS and Extensions associated to the specified stream. + // When omitted or empty, the pool uses the cluster-wide default OS images. + // + // The stream name must start with a letter and contain only alphanumeric + // characters, hyphens ('-'), and dots ('.'), with a maximum length of 70 characters. + // + // +optional + // +openshift:enable:FeatureGate=OSStreams + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=70 + // +kubebuilder:validation:XValidation:rule=`self.matches('^[a-zA-Z][a-zA-Z0-9.-]*$')`,message="The osImageStream must start with a letter and contain only alphanumeric characters, hyphens ('-'), and dots ('.')." + OSImageStream string `json:"osImageStream,omitempty"` } type PinnedImageSetRef struct { diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_controllerconfigs-CustomNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_controllerconfigs-CustomNoUpgrade.crd.yaml index 386383a9b5..7fca872ec0 100644 --- a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_controllerconfigs-CustomNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_controllerconfigs-CustomNoUpgrade.crd.yaml @@ -1626,6 +1626,21 @@ spec: is ClusterHosted rule: 'has(self.dnsType) && self.dnsType != ''ClusterHosted'' ? !has(self.clusterHosted) : true' + ipFamily: + default: IPv4 + description: |- + ipFamily specifies the IP protocol family that should be used for AWS + network resources. This controls whether AWS resources are created with + IPv4-only, or dual-stack networking with IPv4 or IPv6 as the primary + protocol family. + enum: + - IPv4 + - DualStackIPv6Primary + - DualStackIPv4Primary + type: string + x-kubernetes-validations: + - message: ipFamily is immutable once set + rule: oldSelf == '' || self == oldSelf region: description: region holds the default AWS region for new AWS resources created by the cluster. @@ -1830,6 +1845,21 @@ spec: - AzureGermanCloud - AzureStackCloud type: string + ipFamily: + default: IPv4 + description: |- + ipFamily specifies the IP protocol family that should be used for Azure + network resources. This controls whether Azure resources are created with + IPv4-only, or dual-stack networking with IPv4 or IPv6 as the primary + protocol family. + enum: + - IPv4 + - DualStackIPv6Primary + - DualStackIPv4Primary + type: string + x-kubernetes-validations: + - message: ipFamily is immutable once set + rule: oldSelf == '' || self == oldSelf networkResourceGroupName: description: |- networkResourceGroupName is the Resource Group for network resources like the Virtual Network and Subnets used by the cluster. diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_controllerconfigs-DevPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_controllerconfigs-DevPreviewNoUpgrade.crd.yaml index 9cf9598025..17ed0395f2 100644 --- a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_controllerconfigs-DevPreviewNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_controllerconfigs-DevPreviewNoUpgrade.crd.yaml @@ -1626,6 +1626,21 @@ spec: is ClusterHosted rule: 'has(self.dnsType) && self.dnsType != ''ClusterHosted'' ? !has(self.clusterHosted) : true' + ipFamily: + default: IPv4 + description: |- + ipFamily specifies the IP protocol family that should be used for AWS + network resources. This controls whether AWS resources are created with + IPv4-only, or dual-stack networking with IPv4 or IPv6 as the primary + protocol family. + enum: + - IPv4 + - DualStackIPv6Primary + - DualStackIPv4Primary + type: string + x-kubernetes-validations: + - message: ipFamily is immutable once set + rule: oldSelf == '' || self == oldSelf region: description: region holds the default AWS region for new AWS resources created by the cluster. @@ -1830,6 +1845,21 @@ spec: - AzureGermanCloud - AzureStackCloud type: string + ipFamily: + default: IPv4 + description: |- + ipFamily specifies the IP protocol family that should be used for Azure + network resources. This controls whether Azure resources are created with + IPv4-only, or dual-stack networking with IPv4 or IPv6 as the primary + protocol family. + enum: + - IPv4 + - DualStackIPv6Primary + - DualStackIPv4Primary + type: string + x-kubernetes-validations: + - message: ipFamily is immutable once set + rule: oldSelf == '' || self == oldSelf networkResourceGroupName: description: |- networkResourceGroupName is the Resource Group for network resources like the Virtual Network and Subnets used by the cluster. diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_controllerconfigs-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_controllerconfigs-TechPreviewNoUpgrade.crd.yaml index 434cdf4c63..a76869fb0b 100644 --- a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_controllerconfigs-TechPreviewNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_controllerconfigs-TechPreviewNoUpgrade.crd.yaml @@ -1626,6 +1626,21 @@ spec: is ClusterHosted rule: 'has(self.dnsType) && self.dnsType != ''ClusterHosted'' ? !has(self.clusterHosted) : true' + ipFamily: + default: IPv4 + description: |- + ipFamily specifies the IP protocol family that should be used for AWS + network resources. This controls whether AWS resources are created with + IPv4-only, or dual-stack networking with IPv4 or IPv6 as the primary + protocol family. + enum: + - IPv4 + - DualStackIPv6Primary + - DualStackIPv4Primary + type: string + x-kubernetes-validations: + - message: ipFamily is immutable once set + rule: oldSelf == '' || self == oldSelf region: description: region holds the default AWS region for new AWS resources created by the cluster. @@ -1830,6 +1845,21 @@ spec: - AzureGermanCloud - AzureStackCloud type: string + ipFamily: + default: IPv4 + description: |- + ipFamily specifies the IP protocol family that should be used for Azure + network resources. This controls whether Azure resources are created with + IPv4-only, or dual-stack networking with IPv4 or IPv6 as the primary + protocol family. + enum: + - IPv4 + - DualStackIPv6Primary + - DualStackIPv4Primary + type: string + x-kubernetes-validations: + - message: ipFamily is immutable once set + rule: oldSelf == '' || self == oldSelf networkResourceGroupName: description: |- networkResourceGroupName is the Resource Group for network resources like the Virtual Network and Subnets used by the cluster. diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigpools-CustomNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigpools-CustomNoUpgrade.crd.yaml new file mode 100644 index 0000000000..b4d142c23f --- /dev/null +++ b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigpools-CustomNoUpgrade.crd.yaml @@ -0,0 +1,634 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/1453 + api.openshift.io/merged-by-featuregates: "true" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + release.openshift.io/feature-set: CustomNoUpgrade + labels: + openshift.io/operator-managed: "" + name: machineconfigpools.machineconfiguration.openshift.io +spec: + group: machineconfiguration.openshift.io + names: + kind: MachineConfigPool + listKind: MachineConfigPoolList + plural: machineconfigpools + shortNames: + - mcp + singular: machineconfigpool + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.configuration.name + name: Config + type: string + - description: When all the machines in the pool are updated to the correct machine + config. + jsonPath: .status.conditions[?(@.type=="Updated")].status + name: Updated + type: string + - description: When at least one of machine is not either not updated or is in + the process of updating to the desired machine config. + jsonPath: .status.conditions[?(@.type=="Updating")].status + name: Updating + type: string + - description: When progress is blocked on updating one or more nodes or the pool + configuration is failing. + jsonPath: .status.conditions[?(@.type=="Degraded")].status + name: Degraded + type: string + - description: Total number of machines in the machine config pool + jsonPath: .status.machineCount + name: MachineCount + type: number + - description: Total number of ready machines targeted by the pool + jsonPath: .status.readyMachineCount + name: ReadyMachineCount + type: number + - description: Total number of machines targeted by the pool that have the CurrentMachineConfig + as their config + jsonPath: .status.updatedMachineCount + name: UpdatedMachineCount + type: number + - description: Total number of machines marked degraded (or unreconcilable) + jsonPath: .status.degradedMachineCount + name: DegradedMachineCount + type: number + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: |- + MachineConfigPool describes a pool of MachineConfigs. + + Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: spec contains the desired machine config pool configuration. + properties: + configuration: + description: The targeted MachineConfig object for the machine config + pool. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + source: + description: source is the list of MachineConfig objects that + were used to generate the single MachineConfig object specified + in `content`. + items: + description: ObjectReference contains enough information to + let you inspect or modify the referred object. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + machineConfigSelector: + description: |- + machineConfigSelector specifies a label selector for MachineConfigs. + Refer https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ on how label and selectors work. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + maxUnavailable: + anyOf: + - type: integer + - type: string + description: |- + maxUnavailable defines either an integer number or percentage + of nodes in the pool that can go Unavailable during an update. + This includes nodes Unavailable for any reason, including user + initiated cordons, failing nodes, etc. The default value is 1. + + A value larger than 1 will mean multiple nodes going unavailable during + the update, which may affect your workload stress on the remaining nodes. + You cannot set this value to 0 to stop updates (it will default back to 1); + to stop updates, use the 'paused' property instead. Drain will respect + Pod Disruption Budgets (PDBs) such as etcd quorum guards, even if + maxUnavailable is greater than one. + x-kubernetes-int-or-string: true + nodeSelector: + description: nodeSelector specifies a label selector for Machines + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + osImageStream: + description: |- + osImageStream specifies an OS stream to be used for the pool. + + When set, this value overrides the cluster-wide OS images for the pool with + the OS and Extensions associated to the specified stream. + When omitted or empty, the pool uses the cluster-wide default OS images. + + The stream name must start with a letter and contain only alphanumeric + characters, hyphens ('-'), and dots ('.'), with a maximum length of 70 characters. + maxLength: 70 + minLength: 1 + type: string + x-kubernetes-validations: + - message: The osImageStream must start with a letter and contain + only alphanumeric characters, hyphens ('-'), and dots ('.'). + rule: self.matches('^[a-zA-Z][a-zA-Z0-9.-]*$') + paused: + description: |- + paused specifies whether or not changes to this machine config pool should be stopped. + This includes generating new desiredMachineConfig and update of machines. + type: boolean + pinnedImageSets: + description: |- + pinnedImageSets specifies a sequence of PinnedImageSetRef objects for the + pool. Nodes within this pool will preload and pin images defined in the + PinnedImageSet. Before pulling images the MachineConfigDaemon will ensure + the total uncompressed size of all the images does not exceed available + resources. If the total size of the images exceeds the available + resources the controller will report a Degraded status to the + MachineConfigPool and not attempt to pull any images. Also to help ensure + the kubelet can mitigate storage risk, the pinned_image configuration and + subsequent service reload will happen only after all of the images have + been pulled for each set. Images from multiple PinnedImageSets are loaded + and pinned sequentially as listed. Duplicate and existing images will be + skipped. + + Any failure to prefetch or pin images will result in a Degraded pool. + Resolving these failures is the responsibility of the user. The admin + should be proactive in ensuring adequate storage and proper image + authentication exists in advance. + items: + properties: + name: + description: |- + name is a reference to the name of a PinnedImageSet. Must adhere to + RFC-1123 (https://tools.ietf.org/html/rfc1123). + Made up of one of more period-separated (.) segments, where each segment + consists of alphanumeric characters and hyphens (-), must begin and end + with an alphanumeric character, and is at most 63 characters in length. + The total length of the name must not exceed 253 characters. + maxLength: 253 + minLength: 1 + pattern: ^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$ + type: string + required: + - name + type: object + maxItems: 100 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + type: object + status: + description: status contains observed information about the machine config + pool. + properties: + certExpirys: + description: certExpirys keeps track of important certificate expiration + data + items: + description: ceryExpiry contains the bundle name and the expiry + date + properties: + bundle: + description: bundle is the name of the bundle in which the subject + certificate resides + type: string + expiry: + description: expiry is the date after which the certificate + will no longer be valid + format: date-time + type: string + subject: + description: subject is the subject of the certificate + type: string + required: + - bundle + - subject + type: object + type: array + x-kubernetes-list-type: atomic + conditions: + description: conditions represents the latest available observations + of current state. + items: + description: MachineConfigPoolCondition contains condition information + for an MachineConfigPool. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the timestamp corresponding to the last status + change of this condition. + format: date-time + nullable: true + type: string + message: + description: |- + message is a human readable description of the details of the last + transition, complementing reason. + type: string + reason: + description: |- + reason is a brief machine readable explanation for the condition's last + transition. + type: string + status: + description: status of the condition, one of ('True', 'False', + 'Unknown'). + type: string + type: + description: type of the condition, currently ('Done', 'Updating', + 'Failed'). + type: string + type: object + type: array + x-kubernetes-list-type: atomic + configuration: + description: configuration represents the current MachineConfig object + for the machine config pool. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + source: + description: source is the list of MachineConfig objects that + were used to generate the single MachineConfig object specified + in `content`. + items: + description: ObjectReference contains enough information to + let you inspect or modify the referred object. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + degradedMachineCount: + description: |- + degradedMachineCount represents the total number of machines marked degraded (or unreconcilable). + A node is marked degraded if applying a configuration failed.. + format: int32 + type: integer + machineCount: + description: machineCount represents the total number of machines + in the machine config pool. + format: int32 + type: integer + observedGeneration: + description: observedGeneration represents the generation observed + by the controller. + format: int64 + type: integer + poolSynchronizersStatus: + description: poolSynchronizersStatus is the status of the machines + managed by the pool synchronizers. + items: + properties: + availableMachineCount: + description: availableMachineCount is the number of machines + managed by the node synchronizer which are available. + format: int64 + minimum: 0 + type: integer + machineCount: + description: machineCount is the number of machines that are + managed by the node synchronizer. + format: int64 + minimum: 0 + type: integer + observedGeneration: + description: observedGeneration is the last generation change + that has been applied. + format: int64 + minimum: 0 + type: integer + x-kubernetes-validations: + - message: observedGeneration must not move backwards except + to zero + rule: self >= oldSelf || (self == 0 && oldSelf > 0) + poolSynchronizerType: + description: poolSynchronizerType describes the type of the + pool synchronizer. + enum: + - PinnedImageSets + maxLength: 256 + type: string + readyMachineCount: + description: readyMachineCount is the number of machines managed + by the node synchronizer that are in a ready state. + format: int64 + minimum: 0 + type: integer + unavailableMachineCount: + description: unavailableMachineCount is the number of machines + managed by the node synchronizer but are unavailable. + format: int64 + minimum: 0 + type: integer + updatedMachineCount: + description: updatedMachineCount is the number of machines that + have been updated by the node synchronizer. + format: int64 + minimum: 0 + type: integer + required: + - availableMachineCount + - machineCount + - poolSynchronizerType + - readyMachineCount + - unavailableMachineCount + - updatedMachineCount + type: object + x-kubernetes-validations: + - message: machineCount must be greater than or equal to updatedMachineCount + rule: self.machineCount >= self.updatedMachineCount + - message: machineCount must be greater than or equal to availableMachineCount + rule: self.machineCount >= self.availableMachineCount + - message: machineCount must be greater than or equal to unavailableMachineCount + rule: self.machineCount >= self.unavailableMachineCount + - message: machineCount must be greater than or equal to readyMachineCount + rule: self.machineCount >= self.readyMachineCount + - message: availableMachineCount must be greater than or equal to + readyMachineCount + rule: self.availableMachineCount >= self.readyMachineCount + type: array + x-kubernetes-list-map-keys: + - poolSynchronizerType + x-kubernetes-list-type: map + readyMachineCount: + description: readyMachineCount represents the total number of ready + machines targeted by the pool. + format: int32 + type: integer + unavailableMachineCount: + description: |- + unavailableMachineCount represents the total number of unavailable (non-ready) machines targeted by the pool. + A node is marked unavailable if it is in updating state or NodeReady condition is false. + format: int32 + type: integer + updatedMachineCount: + description: updatedMachineCount represents the total number of machines + targeted by the pool that have the CurrentMachineConfig as their + config. + format: int32 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigpools.crd.yaml b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigpools-Default.crd.yaml similarity index 99% rename from vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigpools.crd.yaml rename to vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigpools-Default.crd.yaml index 5d69585f08..b551493e48 100644 --- a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigpools.crd.yaml +++ b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigpools-Default.crd.yaml @@ -6,6 +6,7 @@ metadata: api.openshift.io/merged-by-featuregates: "true" include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" + release.openshift.io/feature-set: Default labels: openshift.io/operator-managed: "" name: machineconfigpools.machineconfiguration.openshift.io diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigpools-DevPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigpools-DevPreviewNoUpgrade.crd.yaml new file mode 100644 index 0000000000..eb8de651e1 --- /dev/null +++ b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigpools-DevPreviewNoUpgrade.crd.yaml @@ -0,0 +1,634 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/1453 + api.openshift.io/merged-by-featuregates: "true" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + release.openshift.io/feature-set: DevPreviewNoUpgrade + labels: + openshift.io/operator-managed: "" + name: machineconfigpools.machineconfiguration.openshift.io +spec: + group: machineconfiguration.openshift.io + names: + kind: MachineConfigPool + listKind: MachineConfigPoolList + plural: machineconfigpools + shortNames: + - mcp + singular: machineconfigpool + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.configuration.name + name: Config + type: string + - description: When all the machines in the pool are updated to the correct machine + config. + jsonPath: .status.conditions[?(@.type=="Updated")].status + name: Updated + type: string + - description: When at least one of machine is not either not updated or is in + the process of updating to the desired machine config. + jsonPath: .status.conditions[?(@.type=="Updating")].status + name: Updating + type: string + - description: When progress is blocked on updating one or more nodes or the pool + configuration is failing. + jsonPath: .status.conditions[?(@.type=="Degraded")].status + name: Degraded + type: string + - description: Total number of machines in the machine config pool + jsonPath: .status.machineCount + name: MachineCount + type: number + - description: Total number of ready machines targeted by the pool + jsonPath: .status.readyMachineCount + name: ReadyMachineCount + type: number + - description: Total number of machines targeted by the pool that have the CurrentMachineConfig + as their config + jsonPath: .status.updatedMachineCount + name: UpdatedMachineCount + type: number + - description: Total number of machines marked degraded (or unreconcilable) + jsonPath: .status.degradedMachineCount + name: DegradedMachineCount + type: number + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: |- + MachineConfigPool describes a pool of MachineConfigs. + + Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: spec contains the desired machine config pool configuration. + properties: + configuration: + description: The targeted MachineConfig object for the machine config + pool. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + source: + description: source is the list of MachineConfig objects that + were used to generate the single MachineConfig object specified + in `content`. + items: + description: ObjectReference contains enough information to + let you inspect or modify the referred object. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + machineConfigSelector: + description: |- + machineConfigSelector specifies a label selector for MachineConfigs. + Refer https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ on how label and selectors work. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + maxUnavailable: + anyOf: + - type: integer + - type: string + description: |- + maxUnavailable defines either an integer number or percentage + of nodes in the pool that can go Unavailable during an update. + This includes nodes Unavailable for any reason, including user + initiated cordons, failing nodes, etc. The default value is 1. + + A value larger than 1 will mean multiple nodes going unavailable during + the update, which may affect your workload stress on the remaining nodes. + You cannot set this value to 0 to stop updates (it will default back to 1); + to stop updates, use the 'paused' property instead. Drain will respect + Pod Disruption Budgets (PDBs) such as etcd quorum guards, even if + maxUnavailable is greater than one. + x-kubernetes-int-or-string: true + nodeSelector: + description: nodeSelector specifies a label selector for Machines + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + osImageStream: + description: |- + osImageStream specifies an OS stream to be used for the pool. + + When set, this value overrides the cluster-wide OS images for the pool with + the OS and Extensions associated to the specified stream. + When omitted or empty, the pool uses the cluster-wide default OS images. + + The stream name must start with a letter and contain only alphanumeric + characters, hyphens ('-'), and dots ('.'), with a maximum length of 70 characters. + maxLength: 70 + minLength: 1 + type: string + x-kubernetes-validations: + - message: The osImageStream must start with a letter and contain + only alphanumeric characters, hyphens ('-'), and dots ('.'). + rule: self.matches('^[a-zA-Z][a-zA-Z0-9.-]*$') + paused: + description: |- + paused specifies whether or not changes to this machine config pool should be stopped. + This includes generating new desiredMachineConfig and update of machines. + type: boolean + pinnedImageSets: + description: |- + pinnedImageSets specifies a sequence of PinnedImageSetRef objects for the + pool. Nodes within this pool will preload and pin images defined in the + PinnedImageSet. Before pulling images the MachineConfigDaemon will ensure + the total uncompressed size of all the images does not exceed available + resources. If the total size of the images exceeds the available + resources the controller will report a Degraded status to the + MachineConfigPool and not attempt to pull any images. Also to help ensure + the kubelet can mitigate storage risk, the pinned_image configuration and + subsequent service reload will happen only after all of the images have + been pulled for each set. Images from multiple PinnedImageSets are loaded + and pinned sequentially as listed. Duplicate and existing images will be + skipped. + + Any failure to prefetch or pin images will result in a Degraded pool. + Resolving these failures is the responsibility of the user. The admin + should be proactive in ensuring adequate storage and proper image + authentication exists in advance. + items: + properties: + name: + description: |- + name is a reference to the name of a PinnedImageSet. Must adhere to + RFC-1123 (https://tools.ietf.org/html/rfc1123). + Made up of one of more period-separated (.) segments, where each segment + consists of alphanumeric characters and hyphens (-), must begin and end + with an alphanumeric character, and is at most 63 characters in length. + The total length of the name must not exceed 253 characters. + maxLength: 253 + minLength: 1 + pattern: ^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$ + type: string + required: + - name + type: object + maxItems: 100 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + type: object + status: + description: status contains observed information about the machine config + pool. + properties: + certExpirys: + description: certExpirys keeps track of important certificate expiration + data + items: + description: ceryExpiry contains the bundle name and the expiry + date + properties: + bundle: + description: bundle is the name of the bundle in which the subject + certificate resides + type: string + expiry: + description: expiry is the date after which the certificate + will no longer be valid + format: date-time + type: string + subject: + description: subject is the subject of the certificate + type: string + required: + - bundle + - subject + type: object + type: array + x-kubernetes-list-type: atomic + conditions: + description: conditions represents the latest available observations + of current state. + items: + description: MachineConfigPoolCondition contains condition information + for an MachineConfigPool. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the timestamp corresponding to the last status + change of this condition. + format: date-time + nullable: true + type: string + message: + description: |- + message is a human readable description of the details of the last + transition, complementing reason. + type: string + reason: + description: |- + reason is a brief machine readable explanation for the condition's last + transition. + type: string + status: + description: status of the condition, one of ('True', 'False', + 'Unknown'). + type: string + type: + description: type of the condition, currently ('Done', 'Updating', + 'Failed'). + type: string + type: object + type: array + x-kubernetes-list-type: atomic + configuration: + description: configuration represents the current MachineConfig object + for the machine config pool. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + source: + description: source is the list of MachineConfig objects that + were used to generate the single MachineConfig object specified + in `content`. + items: + description: ObjectReference contains enough information to + let you inspect or modify the referred object. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + degradedMachineCount: + description: |- + degradedMachineCount represents the total number of machines marked degraded (or unreconcilable). + A node is marked degraded if applying a configuration failed.. + format: int32 + type: integer + machineCount: + description: machineCount represents the total number of machines + in the machine config pool. + format: int32 + type: integer + observedGeneration: + description: observedGeneration represents the generation observed + by the controller. + format: int64 + type: integer + poolSynchronizersStatus: + description: poolSynchronizersStatus is the status of the machines + managed by the pool synchronizers. + items: + properties: + availableMachineCount: + description: availableMachineCount is the number of machines + managed by the node synchronizer which are available. + format: int64 + minimum: 0 + type: integer + machineCount: + description: machineCount is the number of machines that are + managed by the node synchronizer. + format: int64 + minimum: 0 + type: integer + observedGeneration: + description: observedGeneration is the last generation change + that has been applied. + format: int64 + minimum: 0 + type: integer + x-kubernetes-validations: + - message: observedGeneration must not move backwards except + to zero + rule: self >= oldSelf || (self == 0 && oldSelf > 0) + poolSynchronizerType: + description: poolSynchronizerType describes the type of the + pool synchronizer. + enum: + - PinnedImageSets + maxLength: 256 + type: string + readyMachineCount: + description: readyMachineCount is the number of machines managed + by the node synchronizer that are in a ready state. + format: int64 + minimum: 0 + type: integer + unavailableMachineCount: + description: unavailableMachineCount is the number of machines + managed by the node synchronizer but are unavailable. + format: int64 + minimum: 0 + type: integer + updatedMachineCount: + description: updatedMachineCount is the number of machines that + have been updated by the node synchronizer. + format: int64 + minimum: 0 + type: integer + required: + - availableMachineCount + - machineCount + - poolSynchronizerType + - readyMachineCount + - unavailableMachineCount + - updatedMachineCount + type: object + x-kubernetes-validations: + - message: machineCount must be greater than or equal to updatedMachineCount + rule: self.machineCount >= self.updatedMachineCount + - message: machineCount must be greater than or equal to availableMachineCount + rule: self.machineCount >= self.availableMachineCount + - message: machineCount must be greater than or equal to unavailableMachineCount + rule: self.machineCount >= self.unavailableMachineCount + - message: machineCount must be greater than or equal to readyMachineCount + rule: self.machineCount >= self.readyMachineCount + - message: availableMachineCount must be greater than or equal to + readyMachineCount + rule: self.availableMachineCount >= self.readyMachineCount + type: array + x-kubernetes-list-map-keys: + - poolSynchronizerType + x-kubernetes-list-type: map + readyMachineCount: + description: readyMachineCount represents the total number of ready + machines targeted by the pool. + format: int32 + type: integer + unavailableMachineCount: + description: |- + unavailableMachineCount represents the total number of unavailable (non-ready) machines targeted by the pool. + A node is marked unavailable if it is in updating state or NodeReady condition is false. + format: int32 + type: integer + updatedMachineCount: + description: updatedMachineCount represents the total number of machines + targeted by the pool that have the CurrentMachineConfig as their + config. + format: int32 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigpools-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigpools-TechPreviewNoUpgrade.crd.yaml new file mode 100644 index 0000000000..69d78fc4e2 --- /dev/null +++ b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigpools-TechPreviewNoUpgrade.crd.yaml @@ -0,0 +1,634 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/1453 + api.openshift.io/merged-by-featuregates: "true" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + release.openshift.io/feature-set: TechPreviewNoUpgrade + labels: + openshift.io/operator-managed: "" + name: machineconfigpools.machineconfiguration.openshift.io +spec: + group: machineconfiguration.openshift.io + names: + kind: MachineConfigPool + listKind: MachineConfigPoolList + plural: machineconfigpools + shortNames: + - mcp + singular: machineconfigpool + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.configuration.name + name: Config + type: string + - description: When all the machines in the pool are updated to the correct machine + config. + jsonPath: .status.conditions[?(@.type=="Updated")].status + name: Updated + type: string + - description: When at least one of machine is not either not updated or is in + the process of updating to the desired machine config. + jsonPath: .status.conditions[?(@.type=="Updating")].status + name: Updating + type: string + - description: When progress is blocked on updating one or more nodes or the pool + configuration is failing. + jsonPath: .status.conditions[?(@.type=="Degraded")].status + name: Degraded + type: string + - description: Total number of machines in the machine config pool + jsonPath: .status.machineCount + name: MachineCount + type: number + - description: Total number of ready machines targeted by the pool + jsonPath: .status.readyMachineCount + name: ReadyMachineCount + type: number + - description: Total number of machines targeted by the pool that have the CurrentMachineConfig + as their config + jsonPath: .status.updatedMachineCount + name: UpdatedMachineCount + type: number + - description: Total number of machines marked degraded (or unreconcilable) + jsonPath: .status.degradedMachineCount + name: DegradedMachineCount + type: number + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: |- + MachineConfigPool describes a pool of MachineConfigs. + + Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: spec contains the desired machine config pool configuration. + properties: + configuration: + description: The targeted MachineConfig object for the machine config + pool. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + source: + description: source is the list of MachineConfig objects that + were used to generate the single MachineConfig object specified + in `content`. + items: + description: ObjectReference contains enough information to + let you inspect or modify the referred object. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + machineConfigSelector: + description: |- + machineConfigSelector specifies a label selector for MachineConfigs. + Refer https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ on how label and selectors work. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + maxUnavailable: + anyOf: + - type: integer + - type: string + description: |- + maxUnavailable defines either an integer number or percentage + of nodes in the pool that can go Unavailable during an update. + This includes nodes Unavailable for any reason, including user + initiated cordons, failing nodes, etc. The default value is 1. + + A value larger than 1 will mean multiple nodes going unavailable during + the update, which may affect your workload stress on the remaining nodes. + You cannot set this value to 0 to stop updates (it will default back to 1); + to stop updates, use the 'paused' property instead. Drain will respect + Pod Disruption Budgets (PDBs) such as etcd quorum guards, even if + maxUnavailable is greater than one. + x-kubernetes-int-or-string: true + nodeSelector: + description: nodeSelector specifies a label selector for Machines + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + osImageStream: + description: |- + osImageStream specifies an OS stream to be used for the pool. + + When set, this value overrides the cluster-wide OS images for the pool with + the OS and Extensions associated to the specified stream. + When omitted or empty, the pool uses the cluster-wide default OS images. + + The stream name must start with a letter and contain only alphanumeric + characters, hyphens ('-'), and dots ('.'), with a maximum length of 70 characters. + maxLength: 70 + minLength: 1 + type: string + x-kubernetes-validations: + - message: The osImageStream must start with a letter and contain + only alphanumeric characters, hyphens ('-'), and dots ('.'). + rule: self.matches('^[a-zA-Z][a-zA-Z0-9.-]*$') + paused: + description: |- + paused specifies whether or not changes to this machine config pool should be stopped. + This includes generating new desiredMachineConfig and update of machines. + type: boolean + pinnedImageSets: + description: |- + pinnedImageSets specifies a sequence of PinnedImageSetRef objects for the + pool. Nodes within this pool will preload and pin images defined in the + PinnedImageSet. Before pulling images the MachineConfigDaemon will ensure + the total uncompressed size of all the images does not exceed available + resources. If the total size of the images exceeds the available + resources the controller will report a Degraded status to the + MachineConfigPool and not attempt to pull any images. Also to help ensure + the kubelet can mitigate storage risk, the pinned_image configuration and + subsequent service reload will happen only after all of the images have + been pulled for each set. Images from multiple PinnedImageSets are loaded + and pinned sequentially as listed. Duplicate and existing images will be + skipped. + + Any failure to prefetch or pin images will result in a Degraded pool. + Resolving these failures is the responsibility of the user. The admin + should be proactive in ensuring adequate storage and proper image + authentication exists in advance. + items: + properties: + name: + description: |- + name is a reference to the name of a PinnedImageSet. Must adhere to + RFC-1123 (https://tools.ietf.org/html/rfc1123). + Made up of one of more period-separated (.) segments, where each segment + consists of alphanumeric characters and hyphens (-), must begin and end + with an alphanumeric character, and is at most 63 characters in length. + The total length of the name must not exceed 253 characters. + maxLength: 253 + minLength: 1 + pattern: ^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$ + type: string + required: + - name + type: object + maxItems: 100 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + type: object + status: + description: status contains observed information about the machine config + pool. + properties: + certExpirys: + description: certExpirys keeps track of important certificate expiration + data + items: + description: ceryExpiry contains the bundle name and the expiry + date + properties: + bundle: + description: bundle is the name of the bundle in which the subject + certificate resides + type: string + expiry: + description: expiry is the date after which the certificate + will no longer be valid + format: date-time + type: string + subject: + description: subject is the subject of the certificate + type: string + required: + - bundle + - subject + type: object + type: array + x-kubernetes-list-type: atomic + conditions: + description: conditions represents the latest available observations + of current state. + items: + description: MachineConfigPoolCondition contains condition information + for an MachineConfigPool. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the timestamp corresponding to the last status + change of this condition. + format: date-time + nullable: true + type: string + message: + description: |- + message is a human readable description of the details of the last + transition, complementing reason. + type: string + reason: + description: |- + reason is a brief machine readable explanation for the condition's last + transition. + type: string + status: + description: status of the condition, one of ('True', 'False', + 'Unknown'). + type: string + type: + description: type of the condition, currently ('Done', 'Updating', + 'Failed'). + type: string + type: object + type: array + x-kubernetes-list-type: atomic + configuration: + description: configuration represents the current MachineConfig object + for the machine config pool. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + source: + description: source is the list of MachineConfig objects that + were used to generate the single MachineConfig object specified + in `content`. + items: + description: ObjectReference contains enough information to + let you inspect or modify the referred object. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + degradedMachineCount: + description: |- + degradedMachineCount represents the total number of machines marked degraded (or unreconcilable). + A node is marked degraded if applying a configuration failed.. + format: int32 + type: integer + machineCount: + description: machineCount represents the total number of machines + in the machine config pool. + format: int32 + type: integer + observedGeneration: + description: observedGeneration represents the generation observed + by the controller. + format: int64 + type: integer + poolSynchronizersStatus: + description: poolSynchronizersStatus is the status of the machines + managed by the pool synchronizers. + items: + properties: + availableMachineCount: + description: availableMachineCount is the number of machines + managed by the node synchronizer which are available. + format: int64 + minimum: 0 + type: integer + machineCount: + description: machineCount is the number of machines that are + managed by the node synchronizer. + format: int64 + minimum: 0 + type: integer + observedGeneration: + description: observedGeneration is the last generation change + that has been applied. + format: int64 + minimum: 0 + type: integer + x-kubernetes-validations: + - message: observedGeneration must not move backwards except + to zero + rule: self >= oldSelf || (self == 0 && oldSelf > 0) + poolSynchronizerType: + description: poolSynchronizerType describes the type of the + pool synchronizer. + enum: + - PinnedImageSets + maxLength: 256 + type: string + readyMachineCount: + description: readyMachineCount is the number of machines managed + by the node synchronizer that are in a ready state. + format: int64 + minimum: 0 + type: integer + unavailableMachineCount: + description: unavailableMachineCount is the number of machines + managed by the node synchronizer but are unavailable. + format: int64 + minimum: 0 + type: integer + updatedMachineCount: + description: updatedMachineCount is the number of machines that + have been updated by the node synchronizer. + format: int64 + minimum: 0 + type: integer + required: + - availableMachineCount + - machineCount + - poolSynchronizerType + - readyMachineCount + - unavailableMachineCount + - updatedMachineCount + type: object + x-kubernetes-validations: + - message: machineCount must be greater than or equal to updatedMachineCount + rule: self.machineCount >= self.updatedMachineCount + - message: machineCount must be greater than or equal to availableMachineCount + rule: self.machineCount >= self.availableMachineCount + - message: machineCount must be greater than or equal to unavailableMachineCount + rule: self.machineCount >= self.unavailableMachineCount + - message: machineCount must be greater than or equal to readyMachineCount + rule: self.machineCount >= self.readyMachineCount + - message: availableMachineCount must be greater than or equal to + readyMachineCount + rule: self.availableMachineCount >= self.readyMachineCount + type: array + x-kubernetes-list-map-keys: + - poolSynchronizerType + x-kubernetes-list-type: map + readyMachineCount: + description: readyMachineCount represents the total number of ready + machines targeted by the pool. + format: int32 + type: integer + unavailableMachineCount: + description: |- + unavailableMachineCount represents the total number of unavailable (non-ready) machines targeted by the pool. + A node is marked unavailable if it is in updating state or NodeReady condition is false. + format: int32 + type: integer + updatedMachineCount: + description: updatedMachineCount represents the total number of machines + targeted by the pool that have the CurrentMachineConfig as their + config. + format: int32 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.featuregated-crd-manifests.yaml index 7d78ab9646..d9e115f76a 100644 --- a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.featuregated-crd-manifests.yaml +++ b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.featuregated-crd-manifests.yaml @@ -29,7 +29,9 @@ controllerconfigs.machineconfiguration.openshift.io: Category: "" FeatureGates: - AWSClusterHostedDNSInstall + - AWSDualStackInstall - AzureClusterHostedDNSInstall + - AzureDualStackInstall - DualReplica - DyanmicServiceEndpointIBMCloud - GCPClusterHostedDNSInstall @@ -198,6 +200,7 @@ machineconfigpools.machineconfiguration.openshift.io: Capability: "" Category: "" FeatureGates: + - OSStreams - PinnedImages FilenameOperatorName: machine-config FilenameOperatorOrdering: "01" diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.swagger_doc_generated.go index 3a0b0646a6..816ca37a07 100644 --- a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.swagger_doc_generated.go @@ -289,6 +289,7 @@ var map_MachineConfigPoolSpec = map[string]string{ "maxUnavailable": "maxUnavailable defines either an integer number or percentage of nodes in the pool that can go Unavailable during an update. This includes nodes Unavailable for any reason, including user initiated cordons, failing nodes, etc. The default value is 1.\n\nA value larger than 1 will mean multiple nodes going unavailable during the update, which may affect your workload stress on the remaining nodes. You cannot set this value to 0 to stop updates (it will default back to 1); to stop updates, use the 'paused' property instead. Drain will respect Pod Disruption Budgets (PDBs) such as etcd quorum guards, even if maxUnavailable is greater than one.", "configuration": "The targeted MachineConfig object for the machine config pool.", "pinnedImageSets": "pinnedImageSets specifies a sequence of PinnedImageSetRef objects for the pool. Nodes within this pool will preload and pin images defined in the PinnedImageSet. Before pulling images the MachineConfigDaemon will ensure the total uncompressed size of all the images does not exceed available resources. If the total size of the images exceeds the available resources the controller will report a Degraded status to the MachineConfigPool and not attempt to pull any images. Also to help ensure the kubelet can mitigate storage risk, the pinned_image configuration and subsequent service reload will happen only after all of the images have been pulled for each set. Images from multiple PinnedImageSets are loaded and pinned sequentially as listed. Duplicate and existing images will be skipped.\n\nAny failure to prefetch or pin images will result in a Degraded pool. Resolving these failures is the responsibility of the user. The admin should be proactive in ensuring adequate storage and proper image authentication exists in advance.", + "osImageStream": "osImageStream specifies an OS stream to be used for the pool.\n\nWhen set, this value overrides the cluster-wide OS images for the pool with the OS and Extensions associated to the specified stream. When omitted or empty, the pool uses the cluster-wide default OS images.\n\nThe stream name must start with a letter and contain only alphanumeric characters, hyphens ('-'), and dots ('.'), with a maximum length of 70 characters.", } func (MachineConfigPoolSpec) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/register.go b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/register.go index c60f521f94..3f0cf2d13f 100644 --- a/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/register.go +++ b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/register.go @@ -28,6 +28,8 @@ func addKnownTypes(scheme *runtime.Scheme) error { &MachineConfigNodeList{}, &PinnedImageSet{}, &PinnedImageSetList{}, + &OSImageStream{}, + &OSImageStreamList{}, ) metav1.AddToGroupVersion(scheme, GroupVersion) return nil diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/types_osimagestream.go b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/types_osimagestream.go new file mode 100644 index 0000000000..86011bbc25 --- /dev/null +++ b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/types_osimagestream.go @@ -0,0 +1,124 @@ +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OSImageStream describes a set of streams and associated URLs available +// for the MachineConfigPools to be used as base OS images. +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=osimagestreams,scope=Cluster +// +kubebuilder:subresource:status +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/2555 +// +openshift:file-pattern=cvoRunLevel=0000_80,operatorName=machine-config,operatorOrdering=01 +// +openshift:enable:FeatureGate=OSStreams +// +kubebuilder:metadata:labels=openshift.io/operator-managed= +// +kubebuilder:validation:XValidation:rule="self.metadata.name == 'cluster'",message="osimagestream is a singleton, .metadata.name must be 'cluster'" +type OSImageStream struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec contains the desired OSImageStream config configuration. + // +required + Spec *OSImageStreamSpec `json:"spec,omitempty"` + + // status describes the last observed state of this OSImageStream. + // Populated by the MachineConfigOperator after reading release metadata. + // When not present, the controller has not yet reconciled this resource. + // +optional + Status *OSImageStreamStatus `json:"status,omitempty"` +} + +// OSImageStreamStatus describes the current state of a OSImageStream. +// +kubebuilder:validation:XValidation:rule="!has(self.availableStreams) || size(self.availableStreams) == 0 || (has(self.defaultStream) && size(self.defaultStream) != 0)",message="defaultStream must be set when availableStreams is not empty" +// +kubebuilder:validation:XValidation:rule="!has(self.defaultStream) || self.defaultStream in self.availableStreams.map(s, s.name)",message="defaultStream must reference a stream name from availableStreams" +type OSImageStreamStatus struct { + // availableStreams is a list of the available OS Image Streams + // available and their associated URLs for both OS and Extensions + // images. + // + // A maximum of 100 streams may be specified. + // +optional + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=100 + // +listType=map + // +listMapKey=name + AvailableStreams []OSImageStreamURLSet `json:"availableStreams,omitempty"` + + // defaultStream is the name of the stream that should be used as the default + // when no specific stream is requested by a MachineConfigPool. + // Must reference the name of one of the streams in availableStreams. + // Must be set when availableStreams is not empty. + // When not set and availableStreams is empty, controllers should use the default one stated in the release image. + // + // +optional + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=70 + // +kubebuilder:validation:XValidation:rule=`self.matches('^[\\w\\.\\-]+$')`,message="The name must consist only of alphanumeric characters, hyphens ('-') and dots ('.')." + DefaultStream string `json:"defaultStream,omitempty"` +} + +// OSImageStreamSpec defines the desired state of a OSImageStream. +type OSImageStreamSpec struct { +} + +type OSImageStreamURLSet struct { + // name is the identifier of the stream. + // + // Must not be empty and must not exceed 70 characters in length. + // Must only contain alphanumeric characters, hyphens ('-'), or dots ('.'). + // +required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=70 + // +kubebuilder:validation:XValidation:rule=`self.matches('^[\\w\\.\\-]+$')`,message="The name must consist only of alphanumeric characters, hyphens ('-') and dots ('.')." + Name string `json:"name,omitempty"` + + // osImageUrl is an OS Image referenced by digest. + // + // The format of the URL ref is: + // host[:port][/namespace]/name@sha256: + // +required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=447 + // +kubebuilder:validation:XValidation:rule=`self.split('@').size() == 2 && self.split('@')[1].matches('^sha256:[a-f0-9]{64}$')`,message="the OCI Image reference must end with a valid '@sha256:' suffix, where '' is 64 characters long" + // +kubebuilder:validation:XValidation:rule=`self.split('@')[0].matches('^([a-zA-Z0-9-]+\\.)+[a-zA-Z0-9-]+(:[0-9]{2,5})?/([a-zA-Z0-9-_]{0,61}/)?[a-zA-Z0-9-_.]*?$')`,message="the OCI Image name should follow the host[:port][/namespace]/name format, resembling a valid URL without the scheme" + OSImageUrl string `json:"osImageUrl,omitempty"` + + // osExtensionsImageUrl is an OS Extensions Image referenced by digest. + // + // The format of the URL ref is: + // host[:port][/namespace]/name@sha256: + // +required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=447 + // +kubebuilder:validation:XValidation:rule=`self.split('@').size() == 2 && self.split('@')[1].matches('^sha256:[a-f0-9]{64}$')`,message="the OCI Image reference must end with a valid '@sha256:' suffix, where '' is 64 characters long" + // +kubebuilder:validation:XValidation:rule=`self.split('@')[0].matches('^([a-zA-Z0-9-]+\\.)+[a-zA-Z0-9-]+(:[0-9]{2,5})?/([a-zA-Z0-9-_]{0,61}/)?[a-zA-Z0-9-_.]*?$')`,message="the OCI Image name should follow the host[:port][/namespace]/name format, resembling a valid URL without the scheme" + OSExtensionsImageUrl string `json:"osExtensionsImageUrl,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OSImageStreamList is a list of OSImageStream resources +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +type OSImageStreamList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + Items []OSImageStream `json:"items"` +} diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.crd-manifests/0000_80_machine-config_01_osimagestreams-CustomNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.crd-manifests/0000_80_machine-config_01_osimagestreams-CustomNoUpgrade.crd.yaml new file mode 100644 index 0000000000..35f9465163 --- /dev/null +++ b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.crd-manifests/0000_80_machine-config_01_osimagestreams-CustomNoUpgrade.crd.yaml @@ -0,0 +1,153 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/2555 + api.openshift.io/merged-by-featuregates: "true" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + release.openshift.io/feature-set: CustomNoUpgrade + labels: + openshift.io/operator-managed: "" + name: osimagestreams.machineconfiguration.openshift.io +spec: + group: machineconfiguration.openshift.io + names: + kind: OSImageStream + listKind: OSImageStreamList + plural: osimagestreams + singular: osimagestream + scope: Cluster + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: |- + OSImageStream describes a set of streams and associated URLs available + for the MachineConfigPools to be used as base OS images. + + Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: spec contains the desired OSImageStream config configuration. + type: object + status: + description: |- + status describes the last observed state of this OSImageStream. + Populated by the MachineConfigOperator after reading release metadata. + When not present, the controller has not yet reconciled this resource. + properties: + availableStreams: + description: |- + availableStreams is a list of the available OS Image Streams + available and their associated URLs for both OS and Extensions + images. + + A maximum of 100 streams may be specified. + items: + properties: + name: + description: |- + name is the identifier of the stream. + + Must not be empty and must not exceed 70 characters in length. + Must only contain alphanumeric characters, hyphens ('-'), or dots ('.'). + maxLength: 70 + minLength: 1 + type: string + x-kubernetes-validations: + - message: The name must consist only of alphanumeric characters, + hyphens ('-') and dots ('.'). + rule: self.matches('^[\\w\\.\\-]+$') + osExtensionsImageUrl: + description: |- + osExtensionsImageUrl is an OS Extensions Image referenced by digest. + + The format of the URL ref is: + host[:port][/namespace]/name@sha256: + maxLength: 447 + minLength: 1 + type: string + x-kubernetes-validations: + - message: the OCI Image reference must end with a valid '@sha256:' + suffix, where '' is 64 characters long + rule: self.split('@').size() == 2 && self.split('@')[1].matches('^sha256:[a-f0-9]{64}$') + - message: the OCI Image name should follow the host[:port][/namespace]/name + format, resembling a valid URL without the scheme + rule: self.split('@')[0].matches('^([a-zA-Z0-9-]+\\.)+[a-zA-Z0-9-]+(:[0-9]{2,5})?/([a-zA-Z0-9-_]{0,61}/)?[a-zA-Z0-9-_.]*?$') + osImageUrl: + description: |- + osImageUrl is an OS Image referenced by digest. + + The format of the URL ref is: + host[:port][/namespace]/name@sha256: + maxLength: 447 + minLength: 1 + type: string + x-kubernetes-validations: + - message: the OCI Image reference must end with a valid '@sha256:' + suffix, where '' is 64 characters long + rule: self.split('@').size() == 2 && self.split('@')[1].matches('^sha256:[a-f0-9]{64}$') + - message: the OCI Image name should follow the host[:port][/namespace]/name + format, resembling a valid URL without the scheme + rule: self.split('@')[0].matches('^([a-zA-Z0-9-]+\\.)+[a-zA-Z0-9-]+(:[0-9]{2,5})?/([a-zA-Z0-9-_]{0,61}/)?[a-zA-Z0-9-_.]*?$') + required: + - name + - osExtensionsImageUrl + - osImageUrl + type: object + maxItems: 100 + minItems: 1 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + defaultStream: + description: |- + defaultStream is the name of the stream that should be used as the default + when no specific stream is requested by a MachineConfigPool. + Must reference the name of one of the streams in availableStreams. + Must be set when availableStreams is not empty. + When not set and availableStreams is empty, controllers should use the default one stated in the release image. + maxLength: 70 + minLength: 1 + type: string + x-kubernetes-validations: + - message: The name must consist only of alphanumeric characters, + hyphens ('-') and dots ('.'). + rule: self.matches('^[\\w\\.\\-]+$') + type: object + x-kubernetes-validations: + - message: defaultStream must be set when availableStreams is not empty + rule: '!has(self.availableStreams) || size(self.availableStreams) == + 0 || (has(self.defaultStream) && size(self.defaultStream) != 0)' + - message: defaultStream must reference a stream name from availableStreams + rule: '!has(self.defaultStream) || self.defaultStream in self.availableStreams.map(s, + s.name)' + required: + - spec + type: object + x-kubernetes-validations: + - message: osimagestream is a singleton, .metadata.name must be 'cluster' + rule: self.metadata.name == 'cluster' + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.crd-manifests/0000_80_machine-config_01_osimagestreams-DevPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.crd-manifests/0000_80_machine-config_01_osimagestreams-DevPreviewNoUpgrade.crd.yaml new file mode 100644 index 0000000000..1a02e83247 --- /dev/null +++ b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.crd-manifests/0000_80_machine-config_01_osimagestreams-DevPreviewNoUpgrade.crd.yaml @@ -0,0 +1,153 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/2555 + api.openshift.io/merged-by-featuregates: "true" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + release.openshift.io/feature-set: DevPreviewNoUpgrade + labels: + openshift.io/operator-managed: "" + name: osimagestreams.machineconfiguration.openshift.io +spec: + group: machineconfiguration.openshift.io + names: + kind: OSImageStream + listKind: OSImageStreamList + plural: osimagestreams + singular: osimagestream + scope: Cluster + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: |- + OSImageStream describes a set of streams and associated URLs available + for the MachineConfigPools to be used as base OS images. + + Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: spec contains the desired OSImageStream config configuration. + type: object + status: + description: |- + status describes the last observed state of this OSImageStream. + Populated by the MachineConfigOperator after reading release metadata. + When not present, the controller has not yet reconciled this resource. + properties: + availableStreams: + description: |- + availableStreams is a list of the available OS Image Streams + available and their associated URLs for both OS and Extensions + images. + + A maximum of 100 streams may be specified. + items: + properties: + name: + description: |- + name is the identifier of the stream. + + Must not be empty and must not exceed 70 characters in length. + Must only contain alphanumeric characters, hyphens ('-'), or dots ('.'). + maxLength: 70 + minLength: 1 + type: string + x-kubernetes-validations: + - message: The name must consist only of alphanumeric characters, + hyphens ('-') and dots ('.'). + rule: self.matches('^[\\w\\.\\-]+$') + osExtensionsImageUrl: + description: |- + osExtensionsImageUrl is an OS Extensions Image referenced by digest. + + The format of the URL ref is: + host[:port][/namespace]/name@sha256: + maxLength: 447 + minLength: 1 + type: string + x-kubernetes-validations: + - message: the OCI Image reference must end with a valid '@sha256:' + suffix, where '' is 64 characters long + rule: self.split('@').size() == 2 && self.split('@')[1].matches('^sha256:[a-f0-9]{64}$') + - message: the OCI Image name should follow the host[:port][/namespace]/name + format, resembling a valid URL without the scheme + rule: self.split('@')[0].matches('^([a-zA-Z0-9-]+\\.)+[a-zA-Z0-9-]+(:[0-9]{2,5})?/([a-zA-Z0-9-_]{0,61}/)?[a-zA-Z0-9-_.]*?$') + osImageUrl: + description: |- + osImageUrl is an OS Image referenced by digest. + + The format of the URL ref is: + host[:port][/namespace]/name@sha256: + maxLength: 447 + minLength: 1 + type: string + x-kubernetes-validations: + - message: the OCI Image reference must end with a valid '@sha256:' + suffix, where '' is 64 characters long + rule: self.split('@').size() == 2 && self.split('@')[1].matches('^sha256:[a-f0-9]{64}$') + - message: the OCI Image name should follow the host[:port][/namespace]/name + format, resembling a valid URL without the scheme + rule: self.split('@')[0].matches('^([a-zA-Z0-9-]+\\.)+[a-zA-Z0-9-]+(:[0-9]{2,5})?/([a-zA-Z0-9-_]{0,61}/)?[a-zA-Z0-9-_.]*?$') + required: + - name + - osExtensionsImageUrl + - osImageUrl + type: object + maxItems: 100 + minItems: 1 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + defaultStream: + description: |- + defaultStream is the name of the stream that should be used as the default + when no specific stream is requested by a MachineConfigPool. + Must reference the name of one of the streams in availableStreams. + Must be set when availableStreams is not empty. + When not set and availableStreams is empty, controllers should use the default one stated in the release image. + maxLength: 70 + minLength: 1 + type: string + x-kubernetes-validations: + - message: The name must consist only of alphanumeric characters, + hyphens ('-') and dots ('.'). + rule: self.matches('^[\\w\\.\\-]+$') + type: object + x-kubernetes-validations: + - message: defaultStream must be set when availableStreams is not empty + rule: '!has(self.availableStreams) || size(self.availableStreams) == + 0 || (has(self.defaultStream) && size(self.defaultStream) != 0)' + - message: defaultStream must reference a stream name from availableStreams + rule: '!has(self.defaultStream) || self.defaultStream in self.availableStreams.map(s, + s.name)' + required: + - spec + type: object + x-kubernetes-validations: + - message: osimagestream is a singleton, .metadata.name must be 'cluster' + rule: self.metadata.name == 'cluster' + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.crd-manifests/0000_80_machine-config_01_osimagestreams-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.crd-manifests/0000_80_machine-config_01_osimagestreams-TechPreviewNoUpgrade.crd.yaml new file mode 100644 index 0000000000..59a14e0c58 --- /dev/null +++ b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.crd-manifests/0000_80_machine-config_01_osimagestreams-TechPreviewNoUpgrade.crd.yaml @@ -0,0 +1,153 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/2555 + api.openshift.io/merged-by-featuregates: "true" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + release.openshift.io/feature-set: TechPreviewNoUpgrade + labels: + openshift.io/operator-managed: "" + name: osimagestreams.machineconfiguration.openshift.io +spec: + group: machineconfiguration.openshift.io + names: + kind: OSImageStream + listKind: OSImageStreamList + plural: osimagestreams + singular: osimagestream + scope: Cluster + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: |- + OSImageStream describes a set of streams and associated URLs available + for the MachineConfigPools to be used as base OS images. + + Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: spec contains the desired OSImageStream config configuration. + type: object + status: + description: |- + status describes the last observed state of this OSImageStream. + Populated by the MachineConfigOperator after reading release metadata. + When not present, the controller has not yet reconciled this resource. + properties: + availableStreams: + description: |- + availableStreams is a list of the available OS Image Streams + available and their associated URLs for both OS and Extensions + images. + + A maximum of 100 streams may be specified. + items: + properties: + name: + description: |- + name is the identifier of the stream. + + Must not be empty and must not exceed 70 characters in length. + Must only contain alphanumeric characters, hyphens ('-'), or dots ('.'). + maxLength: 70 + minLength: 1 + type: string + x-kubernetes-validations: + - message: The name must consist only of alphanumeric characters, + hyphens ('-') and dots ('.'). + rule: self.matches('^[\\w\\.\\-]+$') + osExtensionsImageUrl: + description: |- + osExtensionsImageUrl is an OS Extensions Image referenced by digest. + + The format of the URL ref is: + host[:port][/namespace]/name@sha256: + maxLength: 447 + minLength: 1 + type: string + x-kubernetes-validations: + - message: the OCI Image reference must end with a valid '@sha256:' + suffix, where '' is 64 characters long + rule: self.split('@').size() == 2 && self.split('@')[1].matches('^sha256:[a-f0-9]{64}$') + - message: the OCI Image name should follow the host[:port][/namespace]/name + format, resembling a valid URL without the scheme + rule: self.split('@')[0].matches('^([a-zA-Z0-9-]+\\.)+[a-zA-Z0-9-]+(:[0-9]{2,5})?/([a-zA-Z0-9-_]{0,61}/)?[a-zA-Z0-9-_.]*?$') + osImageUrl: + description: |- + osImageUrl is an OS Image referenced by digest. + + The format of the URL ref is: + host[:port][/namespace]/name@sha256: + maxLength: 447 + minLength: 1 + type: string + x-kubernetes-validations: + - message: the OCI Image reference must end with a valid '@sha256:' + suffix, where '' is 64 characters long + rule: self.split('@').size() == 2 && self.split('@')[1].matches('^sha256:[a-f0-9]{64}$') + - message: the OCI Image name should follow the host[:port][/namespace]/name + format, resembling a valid URL without the scheme + rule: self.split('@')[0].matches('^([a-zA-Z0-9-]+\\.)+[a-zA-Z0-9-]+(:[0-9]{2,5})?/([a-zA-Z0-9-_]{0,61}/)?[a-zA-Z0-9-_.]*?$') + required: + - name + - osExtensionsImageUrl + - osImageUrl + type: object + maxItems: 100 + minItems: 1 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + defaultStream: + description: |- + defaultStream is the name of the stream that should be used as the default + when no specific stream is requested by a MachineConfigPool. + Must reference the name of one of the streams in availableStreams. + Must be set when availableStreams is not empty. + When not set and availableStreams is empty, controllers should use the default one stated in the release image. + maxLength: 70 + minLength: 1 + type: string + x-kubernetes-validations: + - message: The name must consist only of alphanumeric characters, + hyphens ('-') and dots ('.'). + rule: self.matches('^[\\w\\.\\-]+$') + type: object + x-kubernetes-validations: + - message: defaultStream must be set when availableStreams is not empty + rule: '!has(self.availableStreams) || size(self.availableStreams) == + 0 || (has(self.defaultStream) && size(self.defaultStream) != 0)' + - message: defaultStream must reference a stream name from availableStreams + rule: '!has(self.defaultStream) || self.defaultStream in self.availableStreams.map(s, + s.name)' + required: + - spec + type: object + x-kubernetes-validations: + - message: osimagestream is a singleton, .metadata.name must be 'cluster' + rule: self.metadata.name == 'cluster' + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.deepcopy.go index 5e9e7a8c08..773e5432ff 100644 --- a/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.deepcopy.go @@ -183,6 +183,128 @@ func (in *MachineConfigNodeStatusPinnedImageSet) DeepCopy() *MachineConfigNodeSt return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OSImageStream) DeepCopyInto(out *OSImageStream) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Spec != nil { + in, out := &in.Spec, &out.Spec + *out = new(OSImageStreamSpec) + **out = **in + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(OSImageStreamStatus) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OSImageStream. +func (in *OSImageStream) DeepCopy() *OSImageStream { + if in == nil { + return nil + } + out := new(OSImageStream) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OSImageStream) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OSImageStreamList) DeepCopyInto(out *OSImageStreamList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OSImageStream, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OSImageStreamList. +func (in *OSImageStreamList) DeepCopy() *OSImageStreamList { + if in == nil { + return nil + } + out := new(OSImageStreamList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OSImageStreamList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OSImageStreamSpec) DeepCopyInto(out *OSImageStreamSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OSImageStreamSpec. +func (in *OSImageStreamSpec) DeepCopy() *OSImageStreamSpec { + if in == nil { + return nil + } + out := new(OSImageStreamSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OSImageStreamStatus) DeepCopyInto(out *OSImageStreamStatus) { + *out = *in + if in.AvailableStreams != nil { + in, out := &in.AvailableStreams, &out.AvailableStreams + *out = make([]OSImageStreamURLSet, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OSImageStreamStatus. +func (in *OSImageStreamStatus) DeepCopy() *OSImageStreamStatus { + if in == nil { + return nil + } + out := new(OSImageStreamStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OSImageStreamURLSet) DeepCopyInto(out *OSImageStreamURLSet) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OSImageStreamURLSet. +func (in *OSImageStreamURLSet) DeepCopy() *OSImageStreamURLSet { + if in == nil { + return nil + } + out := new(OSImageStreamURLSet) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PinnedImageRef) DeepCopyInto(out *PinnedImageRef) { *out = *in diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.featuregated-crd-manifests.yaml index 9404910930..a2b8d213cf 100644 --- a/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.featuregated-crd-manifests.yaml +++ b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.featuregated-crd-manifests.yaml @@ -74,6 +74,30 @@ machineconfignodes.machineconfiguration.openshift.io: - MachineConfigNodes Version: v1alpha1 +osimagestreams.machineconfiguration.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/2555 + CRDName: osimagestreams.machineconfiguration.openshift.io + Capability: "" + Category: "" + FeatureGates: + - OSStreams + FilenameOperatorName: machine-config + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_80" + GroupName: machineconfiguration.openshift.io + HasStatus: true + KindName: OSImageStream + Labels: + openshift.io/operator-managed: "" + PluralName: osimagestreams + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: + - OSStreams + Version: v1alpha1 + pinnedimagesets.machineconfiguration.openshift.io: Annotations: {} ApprovedPRNumber: https://github.com/openshift/api/pull/1713 diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.swagger_doc_generated.go index a5b0dcfb31..b6222dc5ff 100644 --- a/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.swagger_doc_generated.go @@ -96,6 +96,54 @@ func (MachineConfigNodeStatusPinnedImageSet) SwaggerDoc() map[string]string { return map_MachineConfigNodeStatusPinnedImageSet } +var map_OSImageStream = map[string]string{ + "": "OSImageStream describes a set of streams and associated URLs available for the MachineConfigPools to be used as base OS images.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec contains the desired OSImageStream config configuration.", + "status": "status describes the last observed state of this OSImageStream. Populated by the MachineConfigOperator after reading release metadata. When not present, the controller has not yet reconciled this resource.", +} + +func (OSImageStream) SwaggerDoc() map[string]string { + return map_OSImageStream +} + +var map_OSImageStreamList = map[string]string{ + "": "OSImageStreamList is a list of OSImageStream resources\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (OSImageStreamList) SwaggerDoc() map[string]string { + return map_OSImageStreamList +} + +var map_OSImageStreamSpec = map[string]string{ + "": "OSImageStreamSpec defines the desired state of a OSImageStream.", +} + +func (OSImageStreamSpec) SwaggerDoc() map[string]string { + return map_OSImageStreamSpec +} + +var map_OSImageStreamStatus = map[string]string{ + "": "OSImageStreamStatus describes the current state of a OSImageStream.", + "availableStreams": "availableStreams is a list of the available OS Image Streams available and their associated URLs for both OS and Extensions images.\n\nA maximum of 100 streams may be specified.", + "defaultStream": "defaultStream is the name of the stream that should be used as the default when no specific stream is requested by a MachineConfigPool. Must reference the name of one of the streams in availableStreams. Must be set when availableStreams is not empty. When not set and availableStreams is empty, controllers should use the default one stated in the release image.", +} + +func (OSImageStreamStatus) SwaggerDoc() map[string]string { + return map_OSImageStreamStatus +} + +var map_OSImageStreamURLSet = map[string]string{ + "name": "name is the identifier of the stream.\n\nMust not be empty and must not exceed 70 characters in length. Must only contain alphanumeric characters, hyphens ('-'), or dots ('.').", + "osImageUrl": "osImageUrl is an OS Image referenced by digest.\n\nThe format of the URL ref is: host[:port][/namespace]/name@sha256:", + "osExtensionsImageUrl": "osExtensionsImageUrl is an OS Extensions Image referenced by digest.\n\nThe format of the URL ref is: host[:port][/namespace]/name@sha256:", +} + +func (OSImageStreamURLSet) SwaggerDoc() map[string]string { + return map_OSImageStreamURLSet +} + var map_PinnedImageRef = map[string]string{ "name": "name is an OCI Image referenced by digest.\n\nThe format of the image ref is: host[:port][/namespace]/name@sha256:", } diff --git a/vendor/github.com/openshift/api/operator/v1/types_ingress.go b/vendor/github.com/openshift/api/operator/v1/types_ingress.go index 2dac08f099..46b906518d 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_ingress.go +++ b/vendor/github.com/openshift/api/operator/v1/types_ingress.go @@ -35,6 +35,7 @@ import ( // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 +// +kubebuilder:validation:XValidation:rule="!has(self.spec.domain) || size('router-' + self.metadata.name + '.' + self.spec.domain) <= 253",message="The combined 'router-' + metadata.name + '.' + .spec.domain cannot exceed 253 characters" type IngressController struct { metav1.TypeMeta `json:",inline"` @@ -68,6 +69,22 @@ type IngressControllerSpec struct { // // If empty, defaults to ingress.config.openshift.io/cluster .spec.domain. // + // The domain value must be a valid DNS name. It must consist of lowercase + // alphanumeric characters, '-' or '.', and each label must start and end + // with an alphanumeric character and not exceed 63 characters. Maximum + // length of a valid DNS domain is 253 characters. + // + // The implementation may add a prefix such as "router-default." to the domain + // when constructing the router canonical hostname. To ensure the resulting + // hostname does not exceed the DNS maximum length of 253 characters, + // the domain length is additionally validated at the IngressController object + // level. For the maximum length of the domain value itself, the shortest + // possible variant of the prefix and the ingress controller name was considered + // for example "router-a." + // + // +kubebuilder:validation:MaxLength=244 + // +kubebuilder:validation:XValidation:rule="!format.dns1123Subdomain().validate(self).hasValue()",message="domain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character" + // +kubebuilder:validation:XValidation:rule="self.split('.').all(label, size(label) <= 63)",message="each DNS label must not exceed 63 characters" // +optional Domain string `json:"domain,omitempty"` diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_50_console_01_consoles.crd.yaml b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_50_console_01_consoles.crd.yaml index dfd9e3ddb4..a18cf575ea 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_50_console_01_consoles.crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_50_console_01_consoles.crd.yaml @@ -480,11 +480,9 @@ spec: the Authorizer interface properties: fieldSelector: - description: |- - fieldSelector describes the limitation on access based on field. It can only limit access, not broaden it. - - This field is alpha-level. To use this field, you must enable the - `AuthorizeWithSelectors` feature gate (disabled by default). + description: fieldSelector describes the limitation + on access based on field. It can only limit + access, not broaden it. properties: rawSelector: description: |- @@ -536,11 +534,9 @@ spec: Resource. "*" means all. type: string labelSelector: - description: |- - labelSelector describes the limitation on access based on labels. It can only limit access, not broaden it. - - This field is alpha-level. To use this field, you must enable the - `AuthorizeWithSelectors` feature gate (disabled by default). + description: labelSelector describes the limitation + on access based on labels. It can only + limit access, not broaden it. properties: rawSelector: description: |- @@ -631,11 +627,9 @@ spec: the Authorizer interface properties: fieldSelector: - description: |- - fieldSelector describes the limitation on access based on field. It can only limit access, not broaden it. - - This field is alpha-level. To use this field, you must enable the - `AuthorizeWithSelectors` feature gate (disabled by default). + description: fieldSelector describes the limitation + on access based on field. It can only limit + access, not broaden it. properties: rawSelector: description: |- @@ -687,11 +681,9 @@ spec: Resource. "*" means all. type: string labelSelector: - description: |- - labelSelector describes the limitation on access based on labels. It can only limit access, not broaden it. - - This field is alpha-level. To use this field, you must enable the - `AuthorizeWithSelectors` feature gate (disabled by default). + description: labelSelector describes the limitation + on access based on labels. It can only + limit access, not broaden it. properties: rawSelector: description: |- diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_50_ingress_00_ingresscontrollers.crd.yaml b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_50_ingress_00_ingresscontrollers.crd.yaml index 10ca42895c..25c51d7956 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_50_ingress_00_ingresscontrollers.crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_50_ingress_00_ingresscontrollers.crd.yaml @@ -164,7 +164,27 @@ spec: updated. If empty, defaults to ingress.config.openshift.io/cluster .spec.domain. + + The domain value must be a valid DNS name. It must consist of lowercase + alphanumeric characters, '-' or '.', and each label must start and end + with an alphanumeric character and not exceed 63 characters. Maximum + length of a valid DNS domain is 253 characters. + + The implementation may add a prefix such as "router-default." to the domain + when constructing the router canonical hostname. To ensure the resulting + hostname does not exceed the DNS maximum length of 253 characters, + the domain length is additionally validated at the IngressController object + level. For the maximum length of the domain value itself, the shortest + possible variant of the prefix and the ingress controller name was considered + for example "router-a." + maxLength: 244 type: string + x-kubernetes-validations: + - message: domain must consist of lower case alphanumeric characters, + '-' or '.', and must start and end with an alphanumeric character + rule: '!format.dns1123Subdomain().validate(self).hasValue()' + - message: each DNS label must not exceed 63 characters + rule: self.split('.').all(label, size(label) <= 63) endpointPublishingStrategy: description: |- endpointPublishingStrategy is used to publish the ingress controller @@ -3234,6 +3254,11 @@ spec: type: object type: object type: object + x-kubernetes-validations: + - message: The combined 'router-' + metadata.name + '.' + .spec.domain cannot + exceed 253 characters + rule: '!has(self.spec.domain) || size(''router-'' + self.metadata.name + + ''.'' + self.spec.domain) <= 253' served: true storage: true subresources: diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go index 483d9720da..d3475d9024 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go @@ -1063,7 +1063,7 @@ func (IngressControllerSetHTTPHeader) SwaggerDoc() map[string]string { var map_IngressControllerSpec = map[string]string{ "": "IngressControllerSpec is the specification of the desired behavior of the IngressController.", - "domain": "domain is a DNS name serviced by the ingress controller and is used to configure multiple features:\n\n* For the LoadBalancerService endpoint publishing strategy, domain is\n used to configure DNS records. See endpointPublishingStrategy.\n\n* When using a generated default certificate, the certificate will be valid\n for domain and its subdomains. See defaultCertificate.\n\n* The value is published to individual Route statuses so that end-users\n know where to target external DNS records.\n\ndomain must be unique among all IngressControllers, and cannot be updated.\n\nIf empty, defaults to ingress.config.openshift.io/cluster .spec.domain.", + "domain": "domain is a DNS name serviced by the ingress controller and is used to configure multiple features:\n\n* For the LoadBalancerService endpoint publishing strategy, domain is\n used to configure DNS records. See endpointPublishingStrategy.\n\n* When using a generated default certificate, the certificate will be valid\n for domain and its subdomains. See defaultCertificate.\n\n* The value is published to individual Route statuses so that end-users\n know where to target external DNS records.\n\ndomain must be unique among all IngressControllers, and cannot be updated.\n\nIf empty, defaults to ingress.config.openshift.io/cluster .spec.domain.\n\nThe domain value must be a valid DNS name. It must consist of lowercase alphanumeric characters, '-' or '.', and each label must start and end with an alphanumeric character and not exceed 63 characters. Maximum length of a valid DNS domain is 253 characters.\n\nThe implementation may add a prefix such as \"router-default.\" to the domain when constructing the router canonical hostname. To ensure the resulting hostname does not exceed the DNS maximum length of 253 characters, the domain length is additionally validated at the IngressController object level. For the maximum length of the domain value itself, the shortest possible variant of the prefix and the ingress controller name was considered for example \"router-a.\"", "httpErrorCodePages": "httpErrorCodePages specifies a configmap with custom error pages. The administrator must create this configmap in the openshift-config namespace. This configmap should have keys in the format \"error-page-.http\", where is an HTTP error code. For example, \"error-page-503.http\" defines an error page for HTTP 503 responses. Currently only error pages for 503 and 404 responses can be customized. Each value in the configmap should be the full response, including HTTP headers. Eg- https://raw.githubusercontent.com/openshift/router/fadab45747a9b30cc3f0a4b41ad2871f95827a93/images/router/haproxy/conf/error-page-503.http If this field is empty, the ingress controller uses the default error pages.", "replicas": "replicas is the desired number of ingress controller replicas. If unset, the default depends on the value of the defaultPlacement field in the cluster config.openshift.io/v1/ingresses status.\n\nThe value of replicas is set based on the value of a chosen field in the Infrastructure CR. If defaultPlacement is set to ControlPlane, the chosen field will be controlPlaneTopology. If it is set to Workers the chosen field will be infrastructureTopology. Replicas will then be set to 1 or 2 based whether the chosen field's value is SingleReplica or HighlyAvailable, respectively.\n\nThese defaults are subject to change.", "endpointPublishingStrategy": "endpointPublishingStrategy is used to publish the ingress controller endpoints to other networks, enable load balancer integrations, etc.\n\nIf unset, the default is based on infrastructure.config.openshift.io/cluster .status.platform:\n\n AWS: LoadBalancerService (with External scope)\n Azure: LoadBalancerService (with External scope)\n GCP: LoadBalancerService (with External scope)\n IBMCloud: LoadBalancerService (with External scope)\n AlibabaCloud: LoadBalancerService (with External scope)\n Libvirt: HostNetwork\n\nAny other platform types (including None) default to HostNetwork.\n\nendpointPublishingStrategy cannot be updated.", diff --git a/vendor/github.com/openshift/api/route/v1/generated.proto b/vendor/github.com/openshift/api/route/v1/generated.proto index e055eb0d26..85018b16b7 100644 --- a/vendor/github.com/openshift/api/route/v1/generated.proto +++ b/vendor/github.com/openshift/api/route/v1/generated.proto @@ -413,10 +413,12 @@ message RouterShard { // +kubebuilder:validation:XValidation:rule="has(self.termination) && has(self.insecureEdgeTerminationPolicy) ? !((self.termination=='passthrough') && (self.insecureEdgeTerminationPolicy=='Allow')) : true", message="cannot have both spec.tls.termination: passthrough and spec.tls.insecureEdgeTerminationPolicy: Allow" // +openshift:validation:FeatureGateAwareXValidation:featureGate=RouteExternalCertificate,rule="!(has(self.certificate) && has(self.externalCertificate))", message="cannot have both spec.tls.certificate and spec.tls.externalCertificate" message TLSConfig { - // termination indicates termination type. + // termination indicates the TLS termination type. // // * edge - TLS termination is done by the router and http is used to communicate with the backend (default) + // // * passthrough - Traffic is sent straight to the destination without the router providing TLS termination + // // * reencrypt - TLS termination is done by the router and https is used to communicate with the backend // // Note: passthrough termination is incompatible with httpHeader actions diff --git a/vendor/github.com/openshift/api/route/v1/types.go b/vendor/github.com/openshift/api/route/v1/types.go index 5a61f477e7..35c4064825 100644 --- a/vendor/github.com/openshift/api/route/v1/types.go +++ b/vendor/github.com/openshift/api/route/v1/types.go @@ -424,10 +424,12 @@ type RouterShard struct { // +kubebuilder:validation:XValidation:rule="has(self.termination) && has(self.insecureEdgeTerminationPolicy) ? !((self.termination=='passthrough') && (self.insecureEdgeTerminationPolicy=='Allow')) : true", message="cannot have both spec.tls.termination: passthrough and spec.tls.insecureEdgeTerminationPolicy: Allow" // +openshift:validation:FeatureGateAwareXValidation:featureGate=RouteExternalCertificate,rule="!(has(self.certificate) && has(self.externalCertificate))", message="cannot have both spec.tls.certificate and spec.tls.externalCertificate" type TLSConfig struct { - // termination indicates termination type. + // termination indicates the TLS termination type. // // * edge - TLS termination is done by the router and http is used to communicate with the backend (default) + // // * passthrough - Traffic is sent straight to the destination without the router providing TLS termination + // // * reencrypt - TLS termination is done by the router and https is used to communicate with the backend // // Note: passthrough termination is incompatible with httpHeader actions diff --git a/vendor/github.com/openshift/api/route/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/route/v1/zz_generated.swagger_doc_generated.go index e6c44a6b02..4c8f9eeddf 100644 --- a/vendor/github.com/openshift/api/route/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/route/v1/zz_generated.swagger_doc_generated.go @@ -173,7 +173,7 @@ func (RouterShard) SwaggerDoc() map[string]string { var map_TLSConfig = map[string]string{ "": "TLSConfig defines config used to secure a route and provide termination", - "termination": "termination indicates termination type.\n\n* edge - TLS termination is done by the router and http is used to communicate with the backend (default) * passthrough - Traffic is sent straight to the destination without the router providing TLS termination * reencrypt - TLS termination is done by the router and https is used to communicate with the backend\n\nNote: passthrough termination is incompatible with httpHeader actions", + "termination": "termination indicates the TLS termination type.\n\n* edge - TLS termination is done by the router and http is used to communicate with the backend (default)\n\n* passthrough - Traffic is sent straight to the destination without the router providing TLS termination\n\n* reencrypt - TLS termination is done by the router and https is used to communicate with the backend\n\nNote: passthrough termination is incompatible with httpHeader actions", "certificate": "certificate provides certificate contents. This should be a single serving certificate, not a certificate chain. Do not include a CA certificate.", "key": "key provides key file contents", "caCertificate": "caCertificate provides the cert authority certificate contents", diff --git a/vendor/k8s.io/code-generator/generate-groups.sh b/vendor/k8s.io/code-generator/generate-groups.sh old mode 100644 new mode 100755 diff --git a/vendor/k8s.io/code-generator/generate-internal-groups.sh b/vendor/k8s.io/code-generator/generate-internal-groups.sh old mode 100644 new mode 100755 diff --git a/vendor/k8s.io/gengo/v2/codetags/extractor.go b/vendor/k8s.io/gengo/v2/codetags/extractor.go new file mode 100644 index 0000000000..5e58b00831 --- /dev/null +++ b/vendor/k8s.io/gengo/v2/codetags/extractor.go @@ -0,0 +1,85 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package codetags + +import ( + "strings" + "unicode/utf8" +) + +// Extract identifies and collects lines containing special metadata tags. +// It processes only lines that begin with the prefix. +// +// The portion of a line immediately following the prefix is treated as +// a potential tag name. To be considered valid, this tag name must +// match the regular expression `[a-zA-Z_][a-zA-Z0-9_.-:]*`. +// +// Extract returns a map where each key is a valid tag name found in +// lines that begin with the prefix. +// The value for each key is a slice of strings. Each string in this slice +// represents the contents of an original line after the prefix has been removed. +// +// Example: When called with prefix "+k8s:", lines: +// +// Comment line without marker +// +k8s:noArgs # comment +// +withValue=value1 +// +withValue=value2 +// +k8s:withArg(arg1)=value1 +// +k8s:withArg(arg2)=value2 # comment +// +k8s:withNamedArgs(arg1=value1, arg2=value2)=value +// +// Then this function will return: +// +// map[string][]string{ +// "noArgs": {"noArgs # comment"}, +// "withArg": {"withArg(arg1)=value1", "withArg(arg2)=value2 # comment"}, +// "withNamedArgs": {"withNamedArgs(arg1=value1, arg2=value2)=value"}, +// } +func Extract(prefix string, lines []string) map[string][]string { + out := map[string][]string{} + for _, line := range lines { + line = strings.TrimLeft(line, " \t") + if !strings.HasPrefix(line, prefix) { + continue + } + line = line[len(prefix):] + + // Find the end of the presumed tag name. + nameEnd := findNameEnd(line) + name := line[:nameEnd] + out[name] = append(out[name], line) + } + return out +} + +// findNameEnd matches a tag in the same way as the parser. +func findNameEnd(s string) int { + if len(s) == 0 { + return 0 + } + if r, _ := utf8.DecodeRuneInString(s); !isIdentBegin(r) { + return 0 + } + idx := strings.IndexFunc(s, func(r rune) bool { + return !(isTagNameInterior(r)) + }) + if idx == -1 { + return len(s) + } + return idx +} diff --git a/vendor/k8s.io/gengo/v2/codetags/parser.go b/vendor/k8s.io/gengo/v2/codetags/parser.go new file mode 100644 index 0000000000..8ff49b039e --- /dev/null +++ b/vendor/k8s.io/gengo/v2/codetags/parser.go @@ -0,0 +1,407 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package codetags + +import ( + "fmt" + "strings" + "unicode" +) + +// Parse parses a tag string into a Tag, or returns an error if the tag +// string fails to parse. +// +// ParseOption may be provided to modify the behavior of the parser. The below +// describes the default behavior. +// +// A tag consists of a name, optional arguments, and an optional scalar value or +// tag value. For example, +// +// "name" +// "name=50" +// "name("featureX")=50" +// "name(limit: 10, path: "/xyz")=text value" +// "name(limit: 10, path: "/xyz")=+anotherTag(size: 100)" +// +// Arguments are optional and may be either: +// - A single positional argument. +// - One or more named arguments (in the format `name: value`). +// - (Positional and named arguments cannot be mixed.) +// +// For example, +// +// "name()" +// "name(arg)" +// "name(namedArg1: argValue1)" +// "name(namedArg1: argValue1, namedArg2: argValue2)" +// +// Argument values may be strings, ints, booleans, or identifiers. +// +// For example, +// +// "name("double-quoted")" +// "name(`backtick-quoted`)" +// "name(100)" +// "name(true)" +// "name(arg1: identifier)" +// "name(arg1:`string value`)" +// "name(arg1: 100)" +// "name(arg1: true)" +// +// Note: When processing Go source code comments, the Extract function is +// typically used first to find and isolate tag strings matching a specific +// prefix. Those extracted strings can then be parsed using this function. +// +// The value part of the tag is optional and follows an equals sign "=". If a +// value is present, it must be a string, int, boolean, identifier, or tag. +// +// For example, +// +// "name" # no value +// "name=identifier" +// "name="double-quoted value"" +// "name=`backtick-quoted value`" +// "name(100)" +// "name(true)" +// "name=+anotherTag" +// "name=+anotherTag(size: 100)" +// +// Trailing comments are ignored unless the RawValues option is enabled, in which +// case they are treated as part of the value. +// +// For example, +// +// "key=value # This comment is ignored" +// +// Formal Grammar: +// +// ::= [ "(" [ ] ")" ] [ ( "=" | "=+" ) ] +// ::= | +// ::= [ "," ]* +// ::= ":" +// ::= | | | +// +// ::= [a-zA-Z_][a-zA-Z0-9_-.:]* +// ::= [a-zA-Z_][a-zA-Z0-9_-.]* +// ::= /* Go-style double-quoted or backtick-quoted strings, +// ... with standard Go escape sequences for double-quoted strings. */ +// ::= /* Standard Go integer literals (decimal, 0x hex, 0o octal, 0b binary), +// ... with an optional +/- prefix. */ +// ::= "true" | "false" +func Parse(tag string, options ...ParseOption) (Tag, error) { + opts := parseOpts{} + for _, o := range options { + o(&opts) + } + + tag = strings.TrimSpace(tag) + return parseTag(tag, opts) +} + +// ParseAll calls Parse on each tag in the input slice. +func ParseAll(tags []string, options ...ParseOption) ([]Tag, error) { + var out []Tag + for _, tag := range tags { + parsed, err := Parse(tag, options...) + if err != nil { + return nil, err + } + out = append(out, parsed) + } + return out, nil +} + +type parseOpts struct { + rawValues bool +} + +// ParseOption provides a parser option. +type ParseOption func(*parseOpts) + +// RawValues skips parsing of the value part of the tag. If enabled, the Value +// in the parse response will contain all text following the "=" sign, up to the last +// non-whitespace character, and ValueType will be set to ValueTypeRaw. +// Default: disabled +func RawValues(enabled bool) ParseOption { + return func(opts *parseOpts) { + opts.rawValues = enabled + } +} + +func parseTag(input string, opts parseOpts) (Tag, error) { + const ( + stTag = "stTag" + stMaybeArgs = "stMaybeArgs" + stArg = "stArg" + stArgEndOfToken = "stArgEndOfToken" + stMaybeValue = "stMaybeValue" + stValue = "stValue" + stMaybeComment = "stMaybeComment" + ) + var startTag, endTag *Tag // both ends of the chain when parsing chained tags + + // accumulators + var tagName string // current tag name + var value string // current value + var valueType ValueType // current value type + cur := Arg{} // current argument + var args []Arg // current arguments slice + + s := scanner{buf: []rune(input)} // scanner for parsing the tag string + var incomplete bool // tracks if a token is incomplete + + // These are defined outside the loop to make errors easier. + saveArg := func(v string, t ArgType) { + cur.Value = v + cur.Type = t + args = append(args, cur) + cur = Arg{} + } + saveInt := func(v string) { saveArg(v, ArgTypeInt) } + saveString := func(v string) { saveArg(v, ArgTypeString) } + saveBoolOrString := func(value string) { + if value == "true" || value == "false" { + saveArg(value, ArgTypeBool) + } else { + saveArg(value, ArgTypeString) + } + } + saveName := func(value string) { + cur.Name = value + } + saveTag := func() error { + usingNamedArgs := false + for i, arg := range args { + if (usingNamedArgs && arg.Name == "") || (!usingNamedArgs && arg.Name != "" && i > 0) { + return fmt.Errorf("can't mix named and positional arguments") + } + if arg.Name != "" { + usingNamedArgs = true + } + } + if !usingNamedArgs && len(args) > 1 { + return fmt.Errorf("multiple arguments must use 'name: value' syntax") + } + newTag := &Tag{Name: tagName, Args: args} + if startTag == nil { + startTag = newTag + endTag = newTag + } else { + endTag.ValueTag = newTag + endTag.ValueType = ValueTypeTag + endTag = newTag + } + args = nil // Reset to nil instead of empty slice + return nil + } + saveValue := func() { + endTag.Value = value + endTag.ValueType = valueType + } + var err error + st := stTag +parseLoop: + for r := s.peek(); r != EOF; r = s.peek() { + switch st { + case stTag: // Any leading whitespace is expected to be trimmed before parsing. + switch { + case isIdentBegin(r): + tagName, err = s.nextIdent(isTagNameInterior) + if err != nil { + return Tag{}, err + } + st = stMaybeArgs + default: + break parseLoop + } + case stMaybeArgs: + switch { + case r == '(': + s.next() // consume ( + incomplete = true + st = stArg + case r == '=': + s.next() // consume = + if opts.rawValues { + // only raw values support empty values following = + valueType = ValueTypeRaw + } else { + incomplete = true + } + st = stValue + default: + st = stMaybeComment + } + case stArg: + switch { + case r == ')': + s.next() // consume ) + incomplete = false + st = stMaybeValue + case r == '-' || r == '+' || unicode.IsDigit(r): + number, err := s.nextNumber() + if err != nil { + return Tag{}, err + } + saveInt(number) + st = stArgEndOfToken + case r == '"' || r == '`': + str, err := s.nextString() + if err != nil { + return Tag{}, err + } + saveString(str) + st = stArgEndOfToken + case isIdentBegin(r): + identifier, err := s.nextIdent(isIdentInterior) + if err != nil { + return Tag{}, err + } + r = s.peek() // reset r after nextIdent + + switch { + case r == ',' || r == ')': // positional arg + if r == ',' { + r = s.skipWhitespace() // allow whitespace after , + } + saveBoolOrString(identifier) + st = stArgEndOfToken + case r == ':': // named arg + s.next() // consume : + r = s.skipWhitespace() // allow whitespace after : + saveName(identifier) + st = stArg + default: + break parseLoop + } + default: + break parseLoop + } + case stArgEndOfToken: + switch { + case r == ',': + s.next() // consume , + r = s.skipWhitespace() // allow whitespace after , + st = stArg + case r == ')': + s.next() // consume ) + incomplete = false + st = stMaybeValue + default: + break parseLoop + } + case stMaybeValue: + switch { + case r == '=': + s.next() // consume = + if opts.rawValues { + // Empty values are allowed for raw. + // Since = might be the last char in the input, we need + // to record the valueType as raw immediately. + valueType = ValueTypeRaw + } + st = stValue + default: + st = stMaybeComment + } + case stValue: + switch { + case opts.rawValues: // When enabled, consume all remaining chars + incomplete = false + value = s.remainder() + break parseLoop + case r == '+' && isIdentBegin(s.peekN(1)): // tag value + incomplete = false + s.next() // consume + + if err := saveTag(); err != nil { + return Tag{}, err + } + st = stTag + case r == '-' || r == '+' || unicode.IsDigit(r): + incomplete = false + number, err := s.nextNumber() + valueType = ValueTypeInt + if err != nil { + return Tag{}, err + } + value = number + st = stMaybeComment + case r == '"' || r == '`': + incomplete = false + str, err := s.nextString() + if err != nil { + return Tag{}, err + } + value = str + valueType = ValueTypeString + st = stMaybeComment + case isIdentBegin(r): + incomplete = false + str, err := s.nextIdent(isIdentInterior) + if err != nil { + return Tag{}, err + } + value = str + if str == "true" || str == "false" { + valueType = ValueTypeBool + } else { + valueType = ValueTypeString + } + st = stMaybeComment + default: + break parseLoop + } + case stMaybeComment: + switch { + case s.nextIsTrailingComment(): + s.remainder() + default: + break parseLoop + } + default: + return Tag{}, fmt.Errorf("unexpected internal parser error: unknown state: %s at position %d", st, s.pos) + } + } + if s.peek() != EOF { + return Tag{}, fmt.Errorf("unexpected character %q at position %d", s.next(), s.pos) + } + if incomplete { + return Tag{}, fmt.Errorf("unexpected end of input") + } + if err := saveTag(); err != nil { + return Tag{}, err + } + if len(valueType) > 0 { + saveValue() + } + if startTag == nil { + return Tag{}, fmt.Errorf("unexpected internal parser error: no tags parsed") + } + return *startTag, nil +} + +func isIdentBegin(r rune) bool { + return unicode.IsLetter(r) || r == '_' +} + +func isIdentInterior(r rune) bool { + return unicode.IsLetter(r) || unicode.IsDigit(r) || r == '_' || r == '.' || r == '-' +} + +func isTagNameInterior(r rune) bool { + return isIdentInterior(r) || r == ':' +} diff --git a/vendor/k8s.io/gengo/v2/codetags/scanner.go b/vendor/k8s.io/gengo/v2/codetags/scanner.go new file mode 100644 index 0000000000..5204e347f8 --- /dev/null +++ b/vendor/k8s.io/gengo/v2/codetags/scanner.go @@ -0,0 +1,228 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package codetags + +import ( + "bytes" + "fmt" + "strconv" + "strings" + "unicode" +) + +type scanner struct { + buf []rune + pos int +} + +func (s *scanner) next() rune { + if s.pos >= len(s.buf) { + return EOF + } + r := s.buf[s.pos] + s.pos++ + return r +} + +func (s *scanner) peek() rune { + return s.peekN(0) +} + +func (s *scanner) peekN(n int) rune { + if s.pos+n >= len(s.buf) { + return EOF + } + return s.buf[s.pos+n] +} + +func (s *scanner) skipWhitespace() rune { + for r := s.peek(); unicode.IsSpace(r); r = s.peek() { + s.next() + } + return s.peek() +} + +func (s *scanner) remainder() string { + result := string(s.buf[s.pos:]) + s.pos = len(s.buf) + return result +} + +const ( + EOF = -1 +) + +func (s *scanner) nextIsTrailingComment() bool { + i := 0 + for ; unicode.IsSpace(s.peekN(i)); i++ { + } + return s.peekN(i) == '#' +} + +func (s *scanner) nextNumber() (string, error) { + const ( + stBegin = "stBegin" + stPrefix = "stPrefix" + stPosNeg = "stPosNeg" + stNumber = "stNumber" + ) + var buf bytes.Buffer + st := stBegin + +parseLoop: + for r := s.peek(); r != EOF; r = s.peek() { + switch st { + case stBegin: + switch { + case r == '0': + buf.WriteRune(s.next()) + st = stPrefix + case r == '+' || r == '-': + buf.WriteRune(s.next()) + st = stPosNeg + case unicode.IsDigit(r): + buf.WriteRune(s.next()) + st = stNumber + default: + break parseLoop + } + case stPosNeg: + switch { + case r == '0': + buf.WriteRune(s.next()) + st = stPrefix + case unicode.IsDigit(r): + buf.WriteRune(s.next()) + st = stNumber + default: + break parseLoop + } + case stPrefix: + switch { + case unicode.IsDigit(r): + buf.WriteRune(s.next()) + st = stNumber + case r == 'x' || r == 'o' || r == 'b': + buf.WriteRune(s.next()) + st = stNumber + default: + break parseLoop + } + case stNumber: + const hexits = "abcdefABCDEF" + switch { + case unicode.IsDigit(r) || strings.Contains(hexits, string(r)): + buf.WriteRune(s.next()) + default: + break parseLoop + } + default: + return "", fmt.Errorf("unexpected internal parser error: unknown state: %s at position %d", st, s.pos) + } + } + numStr := buf.String() + if _, err := strconv.ParseInt(numStr, 0, 64); err != nil { + return "", fmt.Errorf("invalid number %q at position %d", numStr, s.pos) + } + return numStr, nil +} + +func (s *scanner) nextString() (string, error) { + const ( + stBegin = "stBegin" + stQuotedString = "stQuotedString" + stEscape = "stEscape" + ) + var buf bytes.Buffer + var quote rune + var incomplete bool + st := stBegin + +parseLoop: + for r := s.peek(); r != EOF; r = s.peek() { + switch st { + case stBegin: + switch { + case r == '"' || r == '`': + incomplete = true + quote = s.next() // consume quote + st = stQuotedString + default: + return "", fmt.Errorf("expected string at position %d", s.pos) + } + case stQuotedString: + switch { + case r == '\\': + s.next() // consume escape + st = stEscape + case r == quote: + incomplete = false + s.next() + break parseLoop + default: + buf.WriteRune(s.next()) + } + case stEscape: + switch { + case r == quote || r == '\\': + buf.WriteRune(s.next()) + st = stQuotedString + default: + return "", fmt.Errorf("unhandled escaped character %q", r) + } + default: + return "", fmt.Errorf("unexpected internal parser error: unknown state: %s at position %d", st, s.pos) + } + } + if incomplete { + return "", fmt.Errorf("unterminated string at position %d", s.pos) + } + return buf.String(), nil +} + +func (s *scanner) nextIdent(isInteriorChar func(r rune) bool) (string, error) { + const ( + stBegin = "stBegin" + stInterior = "stInterior" + ) + var buf bytes.Buffer + st := stBegin + +parseLoop: + for r := s.peek(); r != EOF; r = s.peek() { + switch st { + case stBegin: + switch { + case isIdentBegin(r): + buf.WriteRune(s.next()) + st = stInterior + default: + return "", fmt.Errorf("expected identifier at position %d", s.pos) + } + case stInterior: + switch { + case isInteriorChar(r): + buf.WriteRune(s.next()) + default: + break parseLoop + } + default: + return "", fmt.Errorf("unexpected internal parser error: unknown state: %s at position %d", st, s.pos) + } + } + return buf.String(), nil +} diff --git a/vendor/k8s.io/gengo/v2/codetags/types.go b/vendor/k8s.io/gengo/v2/codetags/types.go new file mode 100644 index 0000000000..d004f9bf22 --- /dev/null +++ b/vendor/k8s.io/gengo/v2/codetags/types.go @@ -0,0 +1,169 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package codetags + +import ( + "strconv" + "strings" +) + +// Tag represents a single comment tag with typed args. +type Tag struct { + // Name is the name of the tag with no arguments. + Name string + + // Args is a list of optional arguments to the tag. + Args []Arg + + // Value is the string representation of the tag value. + // Provides the tag value when ValueType is ValueTypeString, ValueTypeBool, ValueTypeInt or ValueTypeRaw. + Value string + + // ValueTag is another tag parsed from the value of this tag. + // Provides the tag value when ValueType is ValueTypeTag. + ValueTag *Tag + + // ValueType is the type of the value. + ValueType ValueType +} + +// PositionalArg returns the positional argument. If there is no positional +// argument, it returns false. +func (t Tag) PositionalArg() (Arg, bool) { + if len(t.Args) == 0 || len(t.Args[0].Name) > 0 { + return Arg{}, false + } + return t.Args[0], true +} + +// NamedArg returns the named argument. If o named argument is found, it returns +// false. Always returns false for empty name; use PositionalArg instead. +func (t Tag) NamedArg(name string) (Arg, bool) { + if len(name) == 0 { + return Arg{}, false + } + for _, arg := range t.Args { + if arg.Name == name { + return arg, true + } + } + return Arg{}, false +} + +// String returns the canonical string representation of the tag. +// All strings are represented in double quotes. Spacing is normalized. +func (t Tag) String() string { + buf := strings.Builder{} + buf.WriteString(t.Name) + if len(t.Args) > 0 { + buf.WriteString("(") + for i, a := range t.Args { + if i > 0 { + buf.WriteString(", ") + } + buf.WriteString(a.String()) + } + buf.WriteString(")") + } + if t.ValueType != ValueTypeNone { + if t.ValueType == ValueTypeTag { + buf.WriteString("=+") + buf.WriteString(t.ValueTag.String()) + } else { + buf.WriteString("=") + if t.ValueType == ValueTypeString { + buf.WriteString(strconv.Quote(t.Value)) + } else { + buf.WriteString(t.Value) + } + } + } + return buf.String() +} + +// Arg represents a argument. +type Arg struct { + // Name is the name of a named argument. This is zero-valued for positional arguments. + Name string + + // Value is the string value of an argument. It has been validated to match the Type. + // See the ArgType const godoc for further details on how to parse the value for the + // Type. + Value string + + // Type identifies the type of the argument. + Type ArgType +} + +func (a Arg) String() string { + buf := strings.Builder{} + if len(a.Name) > 0 { + buf.WriteString(a.Name) + buf.WriteString(": ") + } + if a.Type == ArgTypeString { + buf.WriteString(strconv.Quote(a.Value)) + } else { + buf.WriteString(a.Value) + } + return buf.String() +} + +// ArgType is an argument's type. +type ArgType string + +const ( + // ArgTypeString identifies string values. + ArgTypeString ArgType = "string" + + // ArgTypeInt identifies int values. Values of this type may be in decimal, + // octal, hex or binary string representations. Consider using strconv.ParseInt + // to parse, as it supports all these string representations. + ArgTypeInt ArgType = "int" + + // ArgTypeBool identifies bool values. Values of this type must either be the + // string "true" or "false". + ArgTypeBool ArgType = "bool" +) + +// ValueType is a tag's value type. +type ValueType string + +const ( + // ValueTypeNone indicates that the tag has no value. + ValueTypeNone ValueType = "" + + // ValueTypeString identifies string values. + ValueTypeString ValueType = "string" + + // ValueTypeInt identifies int values. Values of this type may be in decimal, + // octal, hex or binary string representations. Consider using strconv.ParseInt + // to parse, as it supports all these string representations. + ValueTypeInt ValueType = "int" + + // ValueTypeBool identifies bool values. Values of this type must either be the + // string "true" or "false". + ValueTypeBool ValueType = "bool" + + // ValueTypeTag identifies that the value is another tag. + ValueTypeTag ValueType = "tag" + + // ValueTypeRaw identifies that the value is raw, untyped content and contains + // all text from the tag declaration following the "=" sign, up to the last + // non-whitespace character. + ValueTypeRaw ValueType = "raw" +) diff --git a/vendor/k8s.io/gengo/v2/comments.go b/vendor/k8s.io/gengo/v2/comments.go index aa041ae24a..fbc41bed54 100644 --- a/vendor/k8s.io/gengo/v2/comments.go +++ b/vendor/k8s.io/gengo/v2/comments.go @@ -19,8 +19,10 @@ package gengo import ( "bytes" "fmt" + "slices" "strings" - "unicode" + + "k8s.io/gengo/v2/codetags" ) // ExtractCommentTags parses comments for lines of the form: @@ -43,7 +45,7 @@ import ( // // map[string][]string{"foo":{"value1, "value2"}, "bar": {""}, "baz": {`"qux"`}} // -// Deprecated: Use ExtractFunctionStyleCommentTags. +// Deprecated: Prefer codetags.Extract and codetags.Parse. func ExtractCommentTags(marker string, lines []string) map[string][]string { out := map[string][]string{} for _, line := range lines { @@ -71,8 +73,11 @@ func ExtractCommentTags(marker string, lines []string) map[string][]string { // If the tag is not found, the default value is returned. Values are asserted // to be boolean ("true" or "false"), and any other value will cause an error // to be returned. If the key has multiple values, the first one will be used. +// +// This function is a wrapper around codetags.Extract and codetags.Parse, but only supports tags with +// a single position arg of type string, and a value of type bool. func ExtractSingleBoolCommentTag(marker string, key string, defaultVal bool, lines []string) (bool, error) { - tags, err := ExtractFunctionStyleCommentTags(marker, []string{key}, lines) + tags, err := ExtractFunctionStyleCommentTags(marker, []string{key}, lines, ParseValues(true)) if err != nil { return false, err } @@ -89,104 +94,77 @@ func ExtractSingleBoolCommentTag(marker string, key string, defaultVal bool, lin return false, fmt.Errorf("tag value for %q is not boolean: %q", key, values[0]) } -// ExtractFunctionStyleCommentTags parses comments for special metadata tags. The -// marker argument should be unique enough to identify the tags needed, and -// should not be a marker for tags you don't want, or else the caller takes -// responsibility for making that distinction. -// -// The tagNames argument is a list of specific tags being extracted. If this is -// nil or empty, all lines which match the marker are considered. If this is -// specified, only lines with begin with marker + one of the tags will be -// considered. This is useful when a common marker is used which may match -// lines which fail this syntax (e.g. which predate this definition). -// -// This function looks for input lines of the following forms: -// - 'marker' + "key=value" -// - 'marker' + "key()=value" -// - 'marker' + "key(arg)=value" -// -// The arg is optional. If not specified (either as "key=value" or as -// "key()=value"), the resulting Tag will have an empty Args list. -// -// The value is optional. If not specified, the resulting Tag will have "" as -// the value. -// -// Tag comment-lines may have a trailing end-of-line comment. -// -// The map returned here is keyed by the Tag's name without args. -// -// A tag can be specified more than one time and all values are returned. If -// the resulting map has an entry for a key, the value (a slice) is guaranteed -// to have at least 1 element. -// -// Example: if you pass "+" as the marker, and the following lines are in -// the comments: -// -// +foo=val1 // foo -// +bar -// +foo=val2 // also foo -// +baz="qux" -// +foo(arg) // still foo -// -// Then this function will return: +// ExtractFunctionStyleCommentTags parses comments for special metadata tags. // -// map[string][]Tag{ -// "foo": []Tag{{ -// Name: "foo", -// Args: nil, -// Value: "val1", -// }, { -// Name: "foo", -// Args: nil, -// Value: "val2", -// }, { -// Name: "foo", -// Args: []string{"arg"}, -// Value: "", -// }, { -// Name: "bar", -// Args: nil, -// Value: "" -// }, { -// Name: "baz", -// Args: nil, -// Value: "\"qux\"" -// }} -// -// This function should be preferred instead of ExtractCommentTags. -func ExtractFunctionStyleCommentTags(marker string, tagNames []string, lines []string) (map[string][]Tag, error) { - stripTrailingComment := func(in string) string { - parts := strings.SplitN(in, "//", 2) - return strings.TrimSpace(parts[0]) +// This function is a wrapper around codetags.Extract and codetags.Parse, but only supports tags with +// a single position arg of type string. +func ExtractFunctionStyleCommentTags(marker string, tagNames []string, lines []string, options ...TagOption) (map[string][]Tag, error) { + opts := tagOpts{} + for _, o := range options { + o(&opts) } out := map[string][]Tag{} - for _, line := range lines { - line = strings.TrimSpace(line) - if len(line) == 0 { - continue - } - if !strings.HasPrefix(line, marker) { + + tags := codetags.Extract(marker, lines) + for tagName, tagLines := range tags { + if len(tagNames) > 0 && !slices.Contains(tagNames, tagName) { continue } - line = stripTrailingComment(line) - kv := strings.SplitN(line[len(marker):], "=", 2) - key := kv[0] - val := "" - if len(kv) == 2 { - val = kv[1] + for _, line := range tagLines { + typedTag, err := codetags.Parse(line, codetags.RawValues(!opts.parseValues)) + if err != nil { + return nil, err + } + tag, err := toStringArgs(typedTag) + if err != nil { + return nil, err + } + out[tagName] = append(out[tagName], tag) } + } + + return out, nil +} + +// TagOption provides an option for extracting tags. +type TagOption func(opts *tagOpts) + +// ParseValues enables parsing of tag values. When enabled, tag values must +// be valid quoted strings, ints, booleans, identifiers, or tags. Otherwise, a +// parse error will be returned. Also, when enabled, trailing comments are +// ignored. +// Default: disabled +func ParseValues(enabled bool) TagOption { + return func(opts *tagOpts) { + opts.parseValues = enabled + } +} + +type tagOpts struct { + parseValues bool +} - tag := Tag{} - if name, args, err := parseTagKey(key, tagNames); err != nil { - return nil, err - } else if name != "" { - tag.Name, tag.Args = name, args - tag.Value = val - out[tag.Name] = append(out[tag.Name], tag) +func toStringArgs(tag codetags.Tag) (Tag, error) { + var stringArgs []string + if len(tag.Args) > 1 { + return Tag{}, fmt.Errorf("expected one argument, got: %v", tag.Args) + } + for _, arg := range tag.Args { + if len(arg.Name) > 0 { + return Tag{}, fmt.Errorf("unexpected named argument: %q", arg.Name) + } + if arg.Type != codetags.ArgTypeString { + return Tag{}, fmt.Errorf("unexpected argument type: %s", arg.Type) + } else { + stringArgs = append(stringArgs, arg.Value) } } - return out, nil + return Tag{ + Name: tag.Name, + Args: stringArgs, + Value: tag.Value, + }, nil } // Tag represents a single comment tag. @@ -214,77 +192,3 @@ func (t Tag) String() string { } return buf.String() } - -// parseTagKey parses the key part of an extended comment tag, including -// optional arguments. The input is assumed to be the entire text of the -// original input after the marker, up to the '=' or end-of-line. -// -// The tags argument is an optional list of tag names to match. If it is nil or -// empty, all tags match. -// -// At the moment, arguments are very strictly formatted (see parseTagArgs) and -// whitespace is not allowed. -// -// This function returns the key name and arguments, unless tagNames was -// specified and the input did not match, in which case it returns "". -func parseTagKey(input string, tagNames []string) (string, []string, error) { - parts := strings.SplitN(input, "(", 2) - key := parts[0] - - if len(tagNames) > 0 { - found := false - for _, tn := range tagNames { - if key == tn { - found = true - break - } - } - if !found { - return "", nil, nil - } - } - - var args []string - if len(parts) == 2 { - if ret, err := parseTagArgs(parts[1]); err != nil { - return key, nil, fmt.Errorf("failed to parse tag args: %v", err) - } else { - args = ret - } - } - return key, args, nil -} - -// parseTagArgs parses the arguments part of an extended comment tag. The input -// is assumed to be the entire text of the original input after the opening -// '(', including the trailing ')'. -// -// At the moment this assumes that the entire string between the opening '(' -// and the trailing ')' is a single Go-style identifier token, but in the -// future could be extended to have multiple arguments with actual syntax. The -// single token may consist only of letters and digits. Whitespace is not -// allowed. -func parseTagArgs(input string) ([]string, error) { - // This is really dumb, but should be extendable to a "real" parser if - // needed. - runes := []rune(input) - for i, r := range runes { - if unicode.IsLetter(r) || unicode.IsDigit(r) { - continue - } - if r == ',' { - return nil, fmt.Errorf("multiple arguments are not supported: %q", input) - } - if r == ')' { - if i != len(runes)-1 { - return nil, fmt.Errorf("unexpected characters after ')': %q", string(runes[i:])) - } - if i == 0 { - return nil, nil - } - return []string{string(runes[:i])}, nil - } - return nil, fmt.Errorf("unsupported character: %q", string(r)) - } - return nil, fmt.Errorf("no closing ')' found: %q", input) -} diff --git a/vendor/k8s.io/kube-openapi/pkg/common/common.go b/vendor/k8s.io/kube-openapi/pkg/common/common.go index e4ce843b0c..da2e8f11ad 100644 --- a/vendor/k8s.io/kube-openapi/pkg/common/common.go +++ b/vendor/k8s.io/kube-openapi/pkg/common/common.go @@ -48,11 +48,11 @@ type GetOpenAPIDefinitions func(ReferenceCallback) map[string]OpenAPIDefinition // GetOpenAPITypeFormat for more information about trade-offs of using this interface or GetOpenAPITypeFormat method when // possible. type OpenAPIDefinitionGetter interface { - OpenAPIDefinition() *OpenAPIDefinition + OpenAPIDefinition() OpenAPIDefinition } type OpenAPIV3DefinitionGetter interface { - OpenAPIV3Definition() *OpenAPIDefinition + OpenAPIV3Definition() OpenAPIDefinition } type PathHandler interface { diff --git a/vendor/k8s.io/kube-openapi/pkg/schemaconv/openapi.go b/vendor/k8s.io/kube-openapi/pkg/schemaconv/openapi.go index 61141a500d..81280aae64 100644 --- a/vendor/k8s.io/kube-openapi/pkg/schemaconv/openapi.go +++ b/vendor/k8s.io/kube-openapi/pkg/schemaconv/openapi.go @@ -22,7 +22,7 @@ import ( "strings" "k8s.io/kube-openapi/pkg/validation/spec" - "sigs.k8s.io/structured-merge-diff/v4/schema" + "sigs.k8s.io/structured-merge-diff/v6/schema" ) // ToSchemaFromOpenAPI converts a directory of OpenAPI schemas to an smd Schema. diff --git a/vendor/k8s.io/kube-openapi/pkg/schemaconv/proto_models.go b/vendor/k8s.io/kube-openapi/pkg/schemaconv/proto_models.go index 2c6fd76a91..e40f6056e7 100644 --- a/vendor/k8s.io/kube-openapi/pkg/schemaconv/proto_models.go +++ b/vendor/k8s.io/kube-openapi/pkg/schemaconv/proto_models.go @@ -22,7 +22,7 @@ import ( "strings" "k8s.io/kube-openapi/pkg/util/proto" - "sigs.k8s.io/structured-merge-diff/v4/schema" + "sigs.k8s.io/structured-merge-diff/v6/schema" ) // ToSchema converts openapi definitions into a schema suitable for structured diff --git a/vendor/k8s.io/kube-openapi/pkg/schemaconv/smd.go b/vendor/k8s.io/kube-openapi/pkg/schemaconv/smd.go index 9887d185b2..c4a083cb41 100644 --- a/vendor/k8s.io/kube-openapi/pkg/schemaconv/smd.go +++ b/vendor/k8s.io/kube-openapi/pkg/schemaconv/smd.go @@ -20,7 +20,7 @@ import ( "fmt" "sort" - "sigs.k8s.io/structured-merge-diff/v4/schema" + "sigs.k8s.io/structured-merge-diff/v6/schema" ) const ( diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go index 1b758ab25a..c7b69b2005 100644 --- a/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go @@ -22,7 +22,7 @@ import ( "strings" openapi_v2 "github.com/google/gnostic-models/openapiv2" - yaml "sigs.k8s.io/yaml/goyaml.v2" + yaml "go.yaml.in/yaml/v2" ) func newSchemaError(path *Path, format string, a ...interface{}) error { diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/document_v3.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/document_v3.go index d9f2896e35..8694c6c769 100644 --- a/vendor/k8s.io/kube-openapi/pkg/util/proto/document_v3.go +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/document_v3.go @@ -22,7 +22,7 @@ import ( "strings" openapi_v3 "github.com/google/gnostic-models/openapiv3" - "gopkg.in/yaml.v3" + "go.yaml.in/yaml/v3" ) // Temporary parse implementation to be used until gnostic->kube-openapi conversion diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/format.go b/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/format.go index c85067a263..6981a40f51 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/format.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/format.go @@ -16,6 +16,7 @@ package strfmt import ( "encoding" + "encoding/json" "reflect" "strings" "sync" @@ -231,3 +232,26 @@ func (f *defaultFormats) Parse(name, data string) (interface{}, error) { } return nil, errors.InvalidTypeName(name) } + +// unmarshalJSON provides a generic implementation of json.Unmarshaler interface's UnmarshalJSON function for basic string formats. +func unmarshalJSON[T ~string](r *T, data []byte) error { + if string(data) == jsonNull { + return nil + } + var ustr string + if err := json.Unmarshal(data, &ustr); err != nil { + return err + } + *r = T(ustr) + return nil +} + +// deepCopy provides a generic implementation of DeepCopy for basic string formats. +func deepCopy[T ~string](r *T) *T { + if r == nil { + return nil + } + out := new(T) + *out = *r + return out +} diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/kubernetes-extensions.go b/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/kubernetes-extensions.go new file mode 100644 index 0000000000..64d665476e --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/kubernetes-extensions.go @@ -0,0 +1,143 @@ +// Copyright 2024 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package strfmt + +import ( + "encoding/json" + "regexp" +) + +const k8sPrefix = "k8s-" + +func init() { + // register formats in the KubernetesExtensions registry: + // - k8s-short-name + // - k8s-long-name + shortName := ShortName("") + Default.Add(k8sPrefix+"short-name", &shortName, IsShortName) + + longName := LongName("") + Default.Add(k8sPrefix+"long-name", &longName, IsLongName) +} + +// ShortName is a name, up to 63 characters long, composed of alphanumeric +// characters and dashes, which cannot begin or end with a dash. +// +// ShortName almost conforms to the definition of a label in DNS (RFC 1123), +// except that uppercase letters are not allowed. +// +// xref: https://github.com/kubernetes/kubernetes/issues/71140 +// +// swagger:strfmt k8s-short-name +type ShortName string + +func (r ShortName) MarshalText() ([]byte, error) { + return []byte(string(r)), nil +} + +func (r *ShortName) UnmarshalText(data []byte) error { // validation is performed later on + *r = ShortName(data) + return nil +} + +func (r ShortName) String() string { + return string(r) +} + +func (r ShortName) MarshalJSON() ([]byte, error) { + return json.Marshal(string(r)) +} + +func (r *ShortName) UnmarshalJSON(data []byte) error { + return unmarshalJSON(r, data) +} + +func (r *ShortName) DeepCopyInto(out *ShortName) { + *out = *r +} + +func (r *ShortName) DeepCopy() *ShortName { + return deepCopy(r) +} + +const shortNameFmt string = "[a-z0-9]([-a-z0-9]*[a-z0-9])?" + +// ShortNameMaxLength is a label's max length in DNS (RFC 1123) +const ShortNameMaxLength int = 63 + +var shortNameRegexp = regexp.MustCompile("^" + shortNameFmt + "$") + +// IsShortName checks if a string is a valid ShortName. +func IsShortName(value string) bool { + return len(value) <= ShortNameMaxLength && + shortNameRegexp.MatchString(value) +} + +// LongName is a name, up to 253 characters long, composed of dot-separated +// segments; each segment uses only alphanumerics and dashes (no +// leading/trailing). +// +// LongName almost conforms to the definition of a subdomain in DNS (RFC 1123), +// except that uppercase letters are not allowed, and there is no max length +// limit of 63 for each of the dot-separated DNS Labels that make up the +// subdomain. +// +// xref: https://github.com/kubernetes/kubernetes/issues/71140 +// xref: https://github.com/kubernetes/kubernetes/issues/79351 +// +// swagger:strfmt k8s-long-name +type LongName string + +func (r LongName) MarshalText() ([]byte, error) { + return []byte(string(r)), nil +} + +func (r *LongName) UnmarshalText(data []byte) error { // validation is performed later on + *r = LongName(data) + return nil +} + +func (r LongName) String() string { + return string(r) +} + +func (r LongName) MarshalJSON() ([]byte, error) { + return json.Marshal(string(r)) +} + +func (r *LongName) UnmarshalJSON(data []byte) error { + return unmarshalJSON(r, data) +} + +func (r *LongName) DeepCopyInto(out *LongName) { + *out = *r +} + +func (r *LongName) DeepCopy() *LongName { + return deepCopy(r) +} + +const longNameFmt string = shortNameFmt + "(\\." + shortNameFmt + ")*" + +// LongNameMaxLength is a subdomain's max length in DNS (RFC 1123) +const LongNameMaxLength int = 253 + +var longNameRegexp = regexp.MustCompile("^" + longNameFmt + "$") + +// IsLongName checks if a string is a valid LongName. +func IsLongName(value string) bool { + return len(value) <= LongNameMaxLength && + longNameRegexp.MatchString(value) +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 654fce4a9b..571df656ea 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -875,8 +875,8 @@ github.com/google/cel-go/interpreter github.com/google/cel-go/interpreter/functions github.com/google/cel-go/parser github.com/google/cel-go/parser/gen -# github.com/google/gnostic-models v0.6.9 -## explicit; go 1.21 +# github.com/google/gnostic-models v0.7.0 +## explicit; go 1.22 github.com/google/gnostic-models/compiler github.com/google/gnostic-models/extensions github.com/google/gnostic-models/jsonschema @@ -1113,7 +1113,7 @@ github.com/moby/term/windows # github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd ## explicit github.com/modern-go/concurrent -# github.com/modern-go/reflect2 v1.0.2 +# github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee ## explicit; go 1.12 github.com/modern-go/reflect2 # github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 @@ -1252,7 +1252,7 @@ github.com/openshift-eng/openshift-tests-extension/pkg/ginkgo github.com/openshift-eng/openshift-tests-extension/pkg/junit github.com/openshift-eng/openshift-tests-extension/pkg/util/sets github.com/openshift-eng/openshift-tests-extension/pkg/version -# github.com/openshift/api v0.0.0-20251013165757-fe48e8fd548b +# github.com/openshift/api v0.0.0-20251013165757-fe48e8fd548b => github.com/pablintino/api v0.0.0-20251030115546-80b1b5867a30 ## explicit; go 1.24.0 github.com/openshift/api github.com/openshift/api/annotations @@ -1982,8 +1982,8 @@ go.uber.org/zap/zapgrpc # go.yaml.in/yaml/v2 v2.4.2 ## explicit; go 1.15 go.yaml.in/yaml/v2 -# go.yaml.in/yaml/v3 v3.0.3 -## explicit; go 1.22 +# go.yaml.in/yaml/v3 v3.0.4 +## explicit; go 1.16 go.yaml.in/yaml/v3 # go4.org v0.0.0-20200104003542-c7e774b10ea0 ## explicit @@ -2504,7 +2504,7 @@ honnef.co/go/tools/stylecheck/st1021 honnef.co/go/tools/stylecheck/st1022 honnef.co/go/tools/stylecheck/st1023 honnef.co/go/tools/unused -# k8s.io/api v0.33.3 => github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20250716113245-b94367cabf3e +# k8s.io/api v0.34.1 => github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20250716113245-b94367cabf3e ## explicit; go 1.24.0 k8s.io/api/admission/v1 k8s.io/api/admission/v1beta1 @@ -2585,7 +2585,7 @@ k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/internalint k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/v1 k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/v1beta1 k8s.io/apiextensions-apiserver/pkg/features -# k8s.io/apimachinery v0.33.3 => github.com/openshift/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20250716113245-b94367cabf3e +# k8s.io/apimachinery v0.34.1 => github.com/openshift/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20250716113245-b94367cabf3e ## explicit; go 1.24.0 k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors @@ -3330,9 +3330,10 @@ k8s.io/dynamic-resource-allocation/internal/queue k8s.io/dynamic-resource-allocation/resourceclaim k8s.io/dynamic-resource-allocation/resourceslice/tracker k8s.io/dynamic-resource-allocation/structured -# k8s.io/gengo/v2 v2.0.0-20250207200755-1244d31929d7 +# k8s.io/gengo/v2 v2.0.0-20250604051438-85fd79dbfd9f ## explicit; go 1.20 k8s.io/gengo/v2 +k8s.io/gengo/v2/codetags k8s.io/gengo/v2/generator k8s.io/gengo/v2/namer k8s.io/gengo/v2/parser @@ -3361,8 +3362,8 @@ k8s.io/kube-aggregator/pkg/apis/apiregistration/v1 k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1 k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1 -# k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff -## explicit; go 1.21 +# k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b +## explicit; go 1.23 k8s.io/kube-openapi/cmd/openapi-gen k8s.io/kube-openapi/cmd/openapi-gen/args k8s.io/kube-openapi/pkg/builder @@ -3897,6 +3898,9 @@ sigs.k8s.io/structured-merge-diff/v4/merge sigs.k8s.io/structured-merge-diff/v4/schema sigs.k8s.io/structured-merge-diff/v4/typed sigs.k8s.io/structured-merge-diff/v4/value +# sigs.k8s.io/structured-merge-diff/v6 v6.3.0 +## explicit; go 1.23 +sigs.k8s.io/structured-merge-diff/v6/schema # sigs.k8s.io/yaml v1.6.0 ## explicit; go 1.22 sigs.k8s.io/yaml @@ -3934,3 +3938,4 @@ sigs.k8s.io/yaml/goyaml.v3 # k8s.io/sample-apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/sample-apiserver v0.0.0-20250716113245-b94367cabf3e # k8s.io/sample-cli-plugin => github.com/openshift/kubernetes/staging/src/k8s.io/sample-cli-plugin v0.0.0-20250716113245-b94367cabf3e # k8s.io/sample-controller => github.com/openshift/kubernetes/staging/src/k8s.io/sample-controller v0.0.0-20250716113245-b94367cabf3e +# github.com/openshift/api => github.com/pablintino/api v0.0.0-20251030115546-80b1b5867a30 diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v6/LICENSE b/vendor/sigs.k8s.io/structured-merge-diff/v6/LICENSE new file mode 100644 index 0000000000..8dada3edaf --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v6/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v6/schema/doc.go b/vendor/sigs.k8s.io/structured-merge-diff/v6/schema/doc.go new file mode 100644 index 0000000000..9081ccbc73 --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v6/schema/doc.go @@ -0,0 +1,28 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package schema defines a targeted schema language which allows one to +// represent all the schema information necessary to perform "structured" +// merges and diffs. +// +// Due to the targeted nature of the data model, the schema language can fit in +// just a few hundred lines of go code, making it much more understandable and +// concise than e.g. OpenAPI. +// +// This schema was derived by observing the API objects used by Kubernetes, and +// formalizing a model which allows certain operations ("apply") to be more +// well defined. It is currently missing one feature: one-of ("unions"). +package schema diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v6/schema/elements.go b/vendor/sigs.k8s.io/structured-merge-diff/v6/schema/elements.go new file mode 100644 index 0000000000..5d3707a5b5 --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v6/schema/elements.go @@ -0,0 +1,375 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schema + +import ( + "sync" +) + +// Schema is a list of named types. +// +// Schema types are indexed in a map before the first search so this type +// should be considered immutable. +type Schema struct { + Types []TypeDef `yaml:"types,omitempty"` + + once sync.Once + m map[string]TypeDef + + lock sync.Mutex + // Cached results of resolving type references to atoms. Only stores + // type references which require fields of Atom to be overriden. + resolvedTypes map[TypeRef]Atom +} + +// A TypeSpecifier references a particular type in a schema. +type TypeSpecifier struct { + Type TypeRef `yaml:"type,omitempty"` + Schema Schema `yaml:"schema,omitempty"` +} + +// TypeDef represents a named type in a schema. +type TypeDef struct { + // Top level types should be named. Every type must have a unique name. + Name string `yaml:"name,omitempty"` + + Atom `yaml:"atom,omitempty,inline"` +} + +// TypeRef either refers to a named type or declares an inlined type. +type TypeRef struct { + // Either the name or one member of Atom should be set. + NamedType *string `yaml:"namedType,omitempty"` + Inlined Atom `yaml:",inline,omitempty"` + + // If this reference refers to a map-type or list-type, this field overrides + // the `ElementRelationship` of the referred type when resolved. + // If this field is nil, then it has no effect. + // See `Map` and `List` for more information about `ElementRelationship` + ElementRelationship *ElementRelationship `yaml:"elementRelationship,omitempty"` +} + +// Atom represents the smallest possible pieces of the type system. +// Each set field in the Atom represents a possible type for the object. +// If none of the fields are set, any object will fail validation against the atom. +type Atom struct { + *Scalar `yaml:"scalar,omitempty"` + *List `yaml:"list,omitempty"` + *Map `yaml:"map,omitempty"` +} + +// Scalar (AKA "primitive") represents a type which has a single value which is +// either numeric, string, or boolean, or untyped for any of them. +// +// TODO: split numeric into float/int? Something even more fine-grained? +type Scalar string + +const ( + Numeric = Scalar("numeric") + String = Scalar("string") + Boolean = Scalar("boolean") + Untyped = Scalar("untyped") +) + +// ElementRelationship is an enum of the different possible relationships +// between the elements of container types (maps, lists). +type ElementRelationship string + +const ( + // Associative only applies to lists (see the documentation there). + Associative = ElementRelationship("associative") + // Atomic makes container types (lists, maps) behave + // as scalars / leaf fields + Atomic = ElementRelationship("atomic") + // Separable means the items of the container type have no particular + // relationship (default behavior for maps). + Separable = ElementRelationship("separable") +) + +// Map is a key-value pair. Its default semantics are the same as an +// associative list, but: +// - It is serialized differently: +// map: {"k": {"value": "v"}} +// list: [{"key": "k", "value": "v"}] +// - Keys must be string typed. +// - Keys can't have multiple components. +// +// Optionally, maps may be atomic (for example, imagine representing an RGB +// color value--it doesn't make sense to have different actors own the R and G +// values). +// +// Maps may also represent a type which is composed of a number of different fields. +// Each field has a name and a type. +// +// Fields are indexed in a map before the first search so this type +// should be considered immutable. +type Map struct { + // Each struct field appears exactly once in this list. The order in + // this list defines the canonical field ordering. + Fields []StructField `yaml:"fields,omitempty"` + + // A Union is a grouping of fields with special rules. It may refer to + // one or more fields in the above list. A given field from the above + // list may be referenced in exactly 0 or 1 places in the below list. + // One can have multiple unions in the same struct, but the fields can't + // overlap between unions. + Unions []Union `yaml:"unions,omitempty"` + + // ElementType is the type of the structs's unknown fields. + ElementType TypeRef `yaml:"elementType,omitempty"` + + // ElementRelationship states the relationship between the map's items. + // * `separable` (or unset) implies that each element is 100% independent. + // * `atomic` implies that all elements depend on each other, and this + // is effectively a scalar / leaf field; it doesn't make sense for + // separate actors to set the elements. Example: an RGB color struct; + // it would never make sense to "own" only one component of the + // color. + // The default behavior for maps is `separable`; it's permitted to + // leave this unset to get the default behavior. + ElementRelationship ElementRelationship `yaml:"elementRelationship,omitempty"` + + once sync.Once + m map[string]StructField +} + +// FindField is a convenience function that returns the referenced StructField, +// if it exists, or (nil, false) if it doesn't. +func (m *Map) FindField(name string) (StructField, bool) { + m.once.Do(func() { + m.m = make(map[string]StructField, len(m.Fields)) + for _, field := range m.Fields { + m.m[field.Name] = field + } + }) + sf, ok := m.m[name] + return sf, ok +} + +// CopyInto this instance of Map into the other +// If other is nil this method does nothing. +// If other is already initialized, overwrites it with this instance +// Warning: Not thread safe +func (m *Map) CopyInto(dst *Map) { + if dst == nil { + return + } + + // Map type is considered immutable so sharing references + dst.Fields = m.Fields + dst.ElementType = m.ElementType + dst.Unions = m.Unions + dst.ElementRelationship = m.ElementRelationship + + if m.m != nil { + // If cache is non-nil then the once token had been consumed. + // Must reset token and use it again to ensure same semantics. + dst.once = sync.Once{} + dst.once.Do(func() { + dst.m = m.m + }) + } +} + +// UnionFields are mapping between the fields that are part of the union and +// their discriminated value. The discriminated value has to be set, and +// should not conflict with other discriminated value in the list. +type UnionField struct { + // FieldName is the name of the field that is part of the union. This + // is the serialized form of the field. + FieldName string `yaml:"fieldName"` + // Discriminatorvalue is the value of the discriminator to + // select that field. If the union doesn't have a discriminator, + // this field is ignored. + DiscriminatorValue string `yaml:"discriminatorValue"` +} + +// Union, or oneof, means that only one of multiple fields of a structure can be +// set at a time. Setting the discriminator helps clearing oher fields: +// - If discriminator changed to non-nil, and a new field has been added +// that doesn't match, an error is returned, +// - If discriminator hasn't changed and two fields or more are set, an +// error is returned, +// - If discriminator changed to non-nil, all other fields but the +// discriminated one will be cleared, +// - Otherwise, If only one field is left, update discriminator to that value. +type Union struct { + // Discriminator, if present, is the name of the field that + // discriminates fields in the union. The mapping between the value of + // the discriminator and the field is done by using the Fields list + // below. + Discriminator *string `yaml:"discriminator,omitempty"` + + // DeduceInvalidDiscriminator indicates if the discriminator + // should be updated automatically based on the fields set. This + // typically defaults to false since we don't want to deduce by + // default (the behavior exists to maintain compatibility on + // existing types and shouldn't be used for new types). + DeduceInvalidDiscriminator bool `yaml:"deduceInvalidDiscriminator,omitempty"` + + // This is the list of fields that belong to this union. All the + // fields present in here have to be part of the parent + // structure. Discriminator (if oneOf has one), is NOT included in + // this list. The value for field is how we map the name of the field + // to actual value for discriminator. + Fields []UnionField `yaml:"fields,omitempty"` +} + +// StructField pairs a field name with a field type. +type StructField struct { + // Name is the field name. + Name string `yaml:"name,omitempty"` + // Type is the field type. + Type TypeRef `yaml:"type,omitempty"` + // Default value for the field, nil if not present. + Default interface{} `yaml:"default,omitempty"` +} + +// List represents a type which contains a zero or more elements, all of the +// same subtype. Lists may be either associative: each element is more or less +// independent and could be managed by separate entities in the system; or +// atomic, where the elements are heavily dependent on each other: it is not +// sensible to change one element without considering the ramifications on all +// the other elements. +type List struct { + // ElementType is the type of the list's elements. + ElementType TypeRef `yaml:"elementType,omitempty"` + + // ElementRelationship states the relationship between the list's elements + // and must have one of these values: + // * `atomic`: the list is treated as a single entity, like a scalar. + // * `associative`: + // - If the list element is a scalar, the list is treated as a set. + // - If the list element is a map, the list is treated as a map. + // There is no default for this value for lists; all schemas must + // explicitly state the element relationship for all lists. + ElementRelationship ElementRelationship `yaml:"elementRelationship,omitempty"` + + // Iff ElementRelationship is `associative`, and the element type is + // map, then Keys must have non-zero length, and it lists the fields + // of the element's map type which are to be used as the keys of the + // list. + // + // TODO: change this to "non-atomic struct" above and make the code reflect this. + // + // Each key must refer to a single field name (no nesting, not JSONPath). + Keys []string `yaml:"keys,omitempty"` +} + +// FindNamedType is a convenience function that returns the referenced TypeDef, +// if it exists, or (nil, false) if it doesn't. +func (s *Schema) FindNamedType(name string) (TypeDef, bool) { + s.once.Do(func() { + s.m = make(map[string]TypeDef, len(s.Types)) + for _, t := range s.Types { + s.m[t.Name] = t + } + }) + t, ok := s.m[name] + return t, ok +} + +func (s *Schema) resolveNoOverrides(tr TypeRef) (Atom, bool) { + result := Atom{} + + if tr.NamedType != nil { + t, ok := s.FindNamedType(*tr.NamedType) + if !ok { + return Atom{}, false + } + + result = t.Atom + } else { + result = tr.Inlined + } + + return result, true +} + +// Resolve is a convenience function which returns the atom referenced, whether +// it is inline or named. Returns (Atom{}, false) if the type can't be resolved. +// +// This allows callers to not care about the difference between a (possibly +// inlined) reference and a definition. +func (s *Schema) Resolve(tr TypeRef) (Atom, bool) { + // If this is a plain reference with no overrides, just return the type + if tr.ElementRelationship == nil { + return s.resolveNoOverrides(tr) + } + + s.lock.Lock() + defer s.lock.Unlock() + + if s.resolvedTypes == nil { + s.resolvedTypes = make(map[TypeRef]Atom) + } + + var result Atom + var exists bool + + // Return cached result if available + // If not, calculate result and cache it + if result, exists = s.resolvedTypes[tr]; !exists { + if result, exists = s.resolveNoOverrides(tr); exists { + // Allow field-level electives to override the referred type's modifiers + switch { + case result.Map != nil: + mapCopy := Map{} + result.Map.CopyInto(&mapCopy) + mapCopy.ElementRelationship = *tr.ElementRelationship + result.Map = &mapCopy + case result.List != nil: + listCopy := *result.List + listCopy.ElementRelationship = *tr.ElementRelationship + result.List = &listCopy + case result.Scalar != nil: + return Atom{}, false + default: + return Atom{}, false + } + } else { + return Atom{}, false + } + + // Save result. If it is nil, that is also recorded as not existing. + s.resolvedTypes[tr] = result + } + + return result, true +} + +// Clones this instance of Schema into the other +// If other is nil this method does nothing. +// If other is already initialized, overwrites it with this instance +// Warning: Not thread safe +func (s *Schema) CopyInto(dst *Schema) { + if dst == nil { + return + } + + // Schema type is considered immutable so sharing references + dst.Types = s.Types + + if s.m != nil { + // If cache is non-nil then the once token had been consumed. + // Must reset token and use it again to ensure same semantics. + dst.once = sync.Once{} + dst.once.Do(func() { + dst.m = s.m + }) + } +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v6/schema/equals.go b/vendor/sigs.k8s.io/structured-merge-diff/v6/schema/equals.go new file mode 100644 index 0000000000..b668eff833 --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v6/schema/equals.go @@ -0,0 +1,202 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schema + +import "reflect" + +// Equals returns true iff the two Schemas are equal. +func (a *Schema) Equals(b *Schema) bool { + if a == nil || b == nil { + return a == nil && b == nil + } + + if len(a.Types) != len(b.Types) { + return false + } + for i := range a.Types { + if !a.Types[i].Equals(&b.Types[i]) { + return false + } + } + return true +} + +// Equals returns true iff the two TypeRefs are equal. +// +// Note that two typerefs that have an equivalent type but where one is +// inlined and the other is named, are not considered equal. +func (a *TypeRef) Equals(b *TypeRef) bool { + if a == nil || b == nil { + return a == nil && b == nil + } + if (a.NamedType == nil) != (b.NamedType == nil) { + return false + } + if a.NamedType != nil { + if *a.NamedType != *b.NamedType { + return false + } + //return true + } + if a.ElementRelationship != b.ElementRelationship { + return false + } + return a.Inlined.Equals(&b.Inlined) +} + +// Equals returns true iff the two TypeDefs are equal. +func (a *TypeDef) Equals(b *TypeDef) bool { + if a == nil || b == nil { + return a == nil && b == nil + } + if a.Name != b.Name { + return false + } + return a.Atom.Equals(&b.Atom) +} + +// Equals returns true iff the two Atoms are equal. +func (a *Atom) Equals(b *Atom) bool { + if a == nil || b == nil { + return a == nil && b == nil + } + if (a.Scalar == nil) != (b.Scalar == nil) { + return false + } + if (a.List == nil) != (b.List == nil) { + return false + } + if (a.Map == nil) != (b.Map == nil) { + return false + } + switch { + case a.Scalar != nil: + return *a.Scalar == *b.Scalar + case a.List != nil: + return a.List.Equals(b.List) + case a.Map != nil: + return a.Map.Equals(b.Map) + } + return true +} + +// Equals returns true iff the two Maps are equal. +func (a *Map) Equals(b *Map) bool { + if a == nil || b == nil { + return a == nil && b == nil + } + if !a.ElementType.Equals(&b.ElementType) { + return false + } + if a.ElementRelationship != b.ElementRelationship { + return false + } + if len(a.Fields) != len(b.Fields) { + return false + } + for i := range a.Fields { + if !a.Fields[i].Equals(&b.Fields[i]) { + return false + } + } + if len(a.Unions) != len(b.Unions) { + return false + } + for i := range a.Unions { + if !a.Unions[i].Equals(&b.Unions[i]) { + return false + } + } + return true +} + +// Equals returns true iff the two Unions are equal. +func (a *Union) Equals(b *Union) bool { + if a == nil || b == nil { + return a == nil && b == nil + } + if (a.Discriminator == nil) != (b.Discriminator == nil) { + return false + } + if a.Discriminator != nil { + if *a.Discriminator != *b.Discriminator { + return false + } + } + if a.DeduceInvalidDiscriminator != b.DeduceInvalidDiscriminator { + return false + } + if len(a.Fields) != len(b.Fields) { + return false + } + for i := range a.Fields { + if !a.Fields[i].Equals(&b.Fields[i]) { + return false + } + } + return true +} + +// Equals returns true iff the two UnionFields are equal. +func (a *UnionField) Equals(b *UnionField) bool { + if a == nil || b == nil { + return a == nil && b == nil + } + if a.FieldName != b.FieldName { + return false + } + if a.DiscriminatorValue != b.DiscriminatorValue { + return false + } + return true +} + +// Equals returns true iff the two StructFields are equal. +func (a *StructField) Equals(b *StructField) bool { + if a == nil || b == nil { + return a == nil && b == nil + } + if a.Name != b.Name { + return false + } + if !reflect.DeepEqual(a.Default, b.Default) { + return false + } + return a.Type.Equals(&b.Type) +} + +// Equals returns true iff the two Lists are equal. +func (a *List) Equals(b *List) bool { + if a == nil || b == nil { + return a == nil && b == nil + } + if !a.ElementType.Equals(&b.ElementType) { + return false + } + if a.ElementRelationship != b.ElementRelationship { + return false + } + if len(a.Keys) != len(b.Keys) { + return false + } + for i := range a.Keys { + if a.Keys[i] != b.Keys[i] { + return false + } + } + return true +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v6/schema/schemaschema.go b/vendor/sigs.k8s.io/structured-merge-diff/v6/schema/schemaschema.go new file mode 100644 index 0000000000..6eb6c36df3 --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v6/schema/schemaschema.go @@ -0,0 +1,165 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schema + +// SchemaSchemaYAML is a schema against which you can validate other schemas. +// It will validate itself. It can be unmarshalled into a Schema type. +var SchemaSchemaYAML = `types: +- name: schema + map: + fields: + - name: types + type: + list: + elementRelationship: associative + elementType: + namedType: typeDef + keys: + - name +- name: typeDef + map: + fields: + - name: name + type: + scalar: string + - name: scalar + type: + scalar: string + - name: map + type: + namedType: map + - name: list + type: + namedType: list + - name: untyped + type: + namedType: untyped +- name: typeRef + map: + fields: + - name: namedType + type: + scalar: string + - name: scalar + type: + scalar: string + - name: map + type: + namedType: map + - name: list + type: + namedType: list + - name: untyped + type: + namedType: untyped + - name: elementRelationship + type: + scalar: string +- name: scalar + scalar: string +- name: map + map: + fields: + - name: fields + type: + list: + elementType: + namedType: structField + elementRelationship: associative + keys: [ "name" ] + - name: unions + type: + list: + elementType: + namedType: union + elementRelationship: atomic + - name: elementType + type: + namedType: typeRef + - name: elementRelationship + type: + scalar: string +- name: unionField + map: + fields: + - name: fieldName + type: + scalar: string + - name: discriminatorValue + type: + scalar: string +- name: union + map: + fields: + - name: discriminator + type: + scalar: string + - name: deduceInvalidDiscriminator + type: + scalar: boolean + - name: fields + type: + list: + elementRelationship: associative + elementType: + namedType: unionField + keys: + - fieldName +- name: structField + map: + fields: + - name: name + type: + scalar: string + - name: type + type: + namedType: typeRef + - name: default + type: + namedType: __untyped_atomic_ +- name: list + map: + fields: + - name: elementType + type: + namedType: typeRef + - name: elementRelationship + type: + scalar: string + - name: keys + type: + list: + elementType: + scalar: string + elementRelationship: atomic +- name: untyped + map: + fields: + - name: elementRelationship + type: + scalar: string +- name: __untyped_atomic_ + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic +`