diff --git a/cmd/machine-api-tests-ext/provider.go b/cmd/machine-api-tests-ext/provider.go index 7fc96febfd..cbe28d54da 100644 --- a/cmd/machine-api-tests-ext/provider.go +++ b/cmd/machine-api-tests-ext/provider.go @@ -1,7 +1,6 @@ package main import ( - "context" "encoding/json" "fmt" "os" @@ -10,10 +9,7 @@ import ( "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" - corev1 "k8s.io/api/core/v1" - kclientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/clientcmd" - "k8s.io/kubernetes/openshift-hack/e2e" conformancetestdata "k8s.io/kubernetes/test/conformance/testdata" "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework/testfiles" @@ -90,11 +86,6 @@ func initializeTestFramework(provider string) error { } testContext.Host = cfg.Host - // Ensure that Kube tests run privileged (like they do upstream) - testContext.CreateTestingNS = func(ctx context.Context, baseName string, c kclientset.Interface, labels map[string]string) (*corev1.Namespace, error) { - return e2e.CreateTestingNS(ctx, baseName, c, labels, true) - } - gomega.RegisterFailHandler(ginkgo.Fail) framework.AfterReadingAllFlags(testContext) diff --git a/go.mod b/go.mod index a60dbd4029..1c0f8f88ce 100644 --- a/go.mod +++ b/go.mod @@ -6,9 +6,10 @@ go 1.24.0 // an API yet we can leverage so that I do not have to copy what openshift/kubernetes/openshift-hack/cmd/k8s-tests-ext did to initialize. replace ( github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20251001123353-fd5b1fb35db1 - k8s.io/apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20251015171918-61114aa5a292 // openshift kubernetes has very old copy of k8s.io/kubernetes/pkg/kubelet/server/server.go - k8s.io/kubelet => github.com/openshift/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20251015171918-61114aa5a292 // openshift kubernetes has very old copy of k8s.io/kubernetes/cmd/kubelet/app/options/options.go - k8s.io/kubernetes => github.com/openshift/kubernetes v1.30.1-0.20251027205255-4e0347881cbd +// I was able to remove the following by removing the namespace checking logic of OTE. Current assumption is origin is making sure all permissions are set before launching OTE. +//k8s.io/apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20250716113245-b94367cabf3e // openshift kubernetes has very old copy of k8s.io/kubernetes/pkg/kubelet/server/server.go +//k8s.io/kubelet => github.com/openshift/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20250716113245-b94367cabf3e // openshift kubernetes has very old copy of k8s.io/kubernetes/cmd/kubelet/app/options/options.go: +//k8s.io/kubernetes => github.com/openshift/kubernetes v1.30.1-0.20250716113245-b94367cabf3e //v1.30.1-0.20250704150419-38c60a516ecb ) require ( @@ -19,7 +20,7 @@ require ( github.com/onsi/ginkgo/v2 v2.27.2 github.com/onsi/gomega v1.38.2 github.com/openshift-eng/openshift-tests-extension v0.0.0-20251105193959-75a0be5d9bd7 - github.com/openshift/api v0.0.0-20251106190826-ebe535b08719 + github.com/openshift/api v0.0.0-20251111193948-50e2ece149d7 github.com/openshift/client-go v0.0.0-20251015124057-db0dee36e235 github.com/openshift/cluster-api-actuator-pkg/testutils v0.0.0-20250910145856-21d03d30056d github.com/openshift/cluster-control-plane-machine-set-operator v0.0.0-20251029084908-344babe6a957 @@ -127,7 +128,6 @@ require ( github.com/evanphx/json-patch v5.9.0+incompatible // indirect github.com/evanphx/json-patch/v5 v5.9.11 // indirect github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect - github.com/fatih/camelcase v1.0.0 // indirect github.com/fatih/color v1.18.0 // indirect github.com/fatih/structtag v1.2.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect @@ -237,10 +237,9 @@ require ( github.com/nishanths/predeclared v0.2.2 // indirect github.com/nunnatsa/ginkgolinter v0.19.1 // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect - github.com/opencontainers/cgroups v0.0.3 // indirect + github.com/opencontainers/cgroups v0.0.1 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.1 // indirect - github.com/opencontainers/runc v1.2.5 // indirect github.com/opencontainers/runtime-spec v1.2.0 // indirect github.com/opencontainers/selinux v1.11.1 // indirect github.com/pelletier/go-toml/v2 v2.2.3 // indirect @@ -315,7 +314,6 @@ require ( go.opentelemetry.io/otel/sdk v1.34.0 // indirect go.opentelemetry.io/otel/trace v1.35.0 // indirect go.opentelemetry.io/proto/otlp v1.5.0 // indirect - go.uber.org/atomic v1.11.0 // indirect go.uber.org/automaxprocs v1.6.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect @@ -339,7 +337,6 @@ require ( google.golang.org/grpc v1.72.1 // indirect google.golang.org/protobuf v1.36.8 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect - gopkg.in/go-jose/go-jose.v2 v2.6.3 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect @@ -349,7 +346,6 @@ require ( k8s.io/apiextensions-apiserver v0.34.1 // indirect k8s.io/cli-runtime v0.34.1 // indirect k8s.io/cloud-provider v0.32.0 // indirect - k8s.io/cluster-bootstrap v0.32.3 // indirect k8s.io/component-helpers v0.34.1 // indirect k8s.io/controller-manager v0.32.1 // indirect k8s.io/cri-api v0.34.1 // indirect diff --git a/go.sum b/go.sum index f8179f8207..212f49b00f 100644 --- a/go.sum +++ b/go.sum @@ -435,34 +435,26 @@ github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A= github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k= -github.com/opencontainers/cgroups v0.0.3 h1:Jc9dWh/0YLGjdy6J/9Ln8NM5BfTA4W2BY0GMozy3aDU= -github.com/opencontainers/cgroups v0.0.3/go.mod h1:s8lktyhlGUqM7OSRL5P7eAW6Wb+kWPNvt4qvVfzA5vs= +github.com/opencontainers/cgroups v0.0.1 h1:MXjMkkFpKv6kpuirUa4USFBas573sSAY082B4CiHEVA= +github.com/opencontainers/cgroups v0.0.1/go.mod h1:s8lktyhlGUqM7OSRL5P7eAW6Wb+kWPNvt4qvVfzA5vs= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= -github.com/opencontainers/runc v1.2.5 h1:8KAkq3Wrem8bApgOHyhRI/8IeLXIfmZ6Qaw6DNSLnA4= -github.com/opencontainers/runc v1.2.5/go.mod h1:dOQeFo29xZKBNeRBI0B19mJtfHv68YgCTh1X+YphA+4= github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk= github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/selinux v1.11.1 h1:nHFvthhM0qY8/m+vfhJylliSshm8G1jJ2jDMcgULaH8= github.com/opencontainers/selinux v1.11.1/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec= github.com/openshift-eng/openshift-tests-extension v0.0.0-20251105193959-75a0be5d9bd7 h1:Z1swlS6b3Adm6RPhjqefs3DWnNFLDxRX+WC8GMXhja4= github.com/openshift-eng/openshift-tests-extension v0.0.0-20251105193959-75a0be5d9bd7/go.mod h1:6gkP5f2HL0meusT0Aim8icAspcD1cG055xxBZ9yC68M= -github.com/openshift/api v0.0.0-20251106190826-ebe535b08719 h1:KEwYyKaJniwhoyLB75tAMmJn9pMlk0PUlRfrsXYOhwM= -github.com/openshift/api v0.0.0-20251106190826-ebe535b08719/go.mod h1:d5uzF0YN2nQQFA0jIEWzzOZ+edmo6wzlGLvx5Fhz4uY= +github.com/openshift/api v0.0.0-20251111193948-50e2ece149d7 h1:MemawsK6SpxEaE5y0NqO5sIX3yTLIIyP89w6DGKukAk= +github.com/openshift/api v0.0.0-20251111193948-50e2ece149d7/go.mod h1:d5uzF0YN2nQQFA0jIEWzzOZ+edmo6wzlGLvx5Fhz4uY= github.com/openshift/client-go v0.0.0-20251015124057-db0dee36e235 h1:9JBeIXmnHlpXTQPi7LPmu1jdxznBhAE7bb1K+3D8gxY= github.com/openshift/client-go v0.0.0-20251015124057-db0dee36e235/go.mod h1:L49W6pfrZkfOE5iC1PqEkuLkXG4W0BX4w8b+L2Bv7fM= github.com/openshift/cluster-api-actuator-pkg/testutils v0.0.0-20250910145856-21d03d30056d h1:+sqUThLi/lmgT5/scmmjnS6+RZFtbdxRAscNfCPyLPI= github.com/openshift/cluster-api-actuator-pkg/testutils v0.0.0-20250910145856-21d03d30056d/go.mod h1:9+FWWWLkVrnBo1eYhA/0Ehlq5JMgIAHtcB0IF+qV1AA= github.com/openshift/cluster-control-plane-machine-set-operator v0.0.0-20251029084908-344babe6a957 h1:eVnkMTFnirnoUOlAUT3Hy8WriIi1JoSrilWym3Dl8Q4= github.com/openshift/cluster-control-plane-machine-set-operator v0.0.0-20251029084908-344babe6a957/go.mod h1:TBlORAAtNZ/Tl86pO7GjNXKsH/g0QAW5GnvYstdOhYI= -github.com/openshift/kubernetes v1.30.1-0.20251027205255-4e0347881cbd h1:WCCP41uY1QoqrDeykXFF/Dmf8NV3fnnAXUnh1oVFxW8= -github.com/openshift/kubernetes v1.30.1-0.20251027205255-4e0347881cbd/go.mod h1:w3+IfrXNp5RosdDXg3LB55yijJqR/FwouvVntYHQf0o= -github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20251015171918-61114aa5a292 h1:Uq4CTGAl32NYNK7KD35oRg7pdZOFkUF+/2wGSyR0VYA= -github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20251015171918-61114aa5a292/go.mod h1:TRSSqgXggJaDK5vtVtlQ9wEYOk32Pl+9tf0ROf3ljiM= -github.com/openshift/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20251015171918-61114aa5a292 h1:ITqtj/xBNlSPChpvV4a+VQ93Sk5RFkwRx+CnIWBrZ98= -github.com/openshift/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20251015171918-61114aa5a292/go.mod h1:+bTwPbT5dZB8j6eKQHBZRMfNmY6MEryba1wQljr9VWw= github.com/openshift/library-go v0.0.0-20251107090138-0de9712313a5 h1:Gq8jCFgSrilZ2ZHjQleFZWlblikc1aaRZ0hqs+yvrP4= github.com/openshift/library-go v0.0.0-20251107090138-0de9712313a5/go.mod h1:OlFFws1AO51uzfc48MsStGE4SFMWlMZD0+f5a/zCtKI= github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20251001123353-fd5b1fb35db1 h1:PMTgifBcBRLJJiM+LgSzPDTk9/Rx4qS09OUrfpY6GBQ= @@ -685,8 +677,6 @@ go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc= go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4= go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4= -go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= -go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= @@ -873,6 +863,8 @@ k8s.io/apiextensions-apiserver v0.34.1 h1:NNPBva8FNAPt1iSVwIE0FsdrVriRXMsaWFMqJb k8s.io/apiextensions-apiserver v0.34.1/go.mod h1:hP9Rld3zF5Ay2Of3BeEpLAToP+l4s5UlxiHfqRaRcMc= k8s.io/apimachinery v0.34.1 h1:dTlxFls/eikpJxmAC7MVE8oOeP1zryV7iRyIjB0gky4= k8s.io/apimachinery v0.34.1/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw= +k8s.io/apiserver v0.34.1 h1:U3JBGdgANK3dfFcyknWde1G6X1F4bg7PXuvlqt8lITA= +k8s.io/apiserver v0.34.1/go.mod h1:eOOc9nrVqlBI1AFCvVzsob0OxtPZUCPiUJL45JOTBG0= k8s.io/cli-runtime v0.34.1 h1:btlgAgTrYd4sk8vJTRG6zVtqBKt9ZMDeQZo2PIzbL7M= k8s.io/cli-runtime v0.34.1/go.mod h1:aVA65c+f0MZiMUPbseU/M9l1Wo2byeaGwUuQEQVVveE= k8s.io/client-go v0.34.1 h1:ZUPJKgXsnKwVwmKKdPfw4tB58+7/Ik3CrjOEhsiZ7mY= @@ -909,6 +901,10 @@ k8s.io/kube-scheduler v0.34.1 h1:S5td6VZwC3lCqERXclerDXhJ26zYc6JroY0s03+PqJ8= k8s.io/kube-scheduler v0.34.1/go.mod h1:UiOkod/w+HKoGut9mz9ie4s4KcI82vmLFdq1iIgsmRs= k8s.io/kubectl v0.34.1 h1:1qP1oqT5Xc93K+H8J7ecpBjaz511gan89KO9Vbsh/OI= k8s.io/kubectl v0.34.1/go.mod h1:JRYlhJpGPyk3dEmJ+BuBiOB9/dAvnrALJEiY/C5qa6A= +k8s.io/kubelet v0.34.1 h1:doAaTA9/Yfzbdq/u/LveZeONp96CwX9giW6b+oHn4m4= +k8s.io/kubelet v0.34.1/go.mod h1:PtV3Ese8iOM19gSooFoQT9iyRisbmJdAPuDImuccbbA= +k8s.io/kubernetes v1.34.1 h1:F3p8dtpv+i8zQoebZeK5zBqM1g9x1aIdnA5vthvcuUk= +k8s.io/kubernetes v1.34.1/go.mod h1:iu+FhII+Oc/1gGWLJcer6wpyih441aNFHl7Pvm8yPto= k8s.io/mount-utils v0.34.1 h1:zMBEFav8Rxwm54S8srzy5FxAc4KQ3X4ZcjnqTCzHmZk= k8s.io/mount-utils v0.34.1/go.mod h1:MIjjYlqJ0ziYQg0MO09kc9S96GIcMkhF/ay9MncF0GA= k8s.io/pod-security-admission v0.32.2 h1:zDfAb/t0LbNU3z0ZMHtCb1zp8x05gWCGhmBYpUptm9A= diff --git a/pkg/operator/operator.go b/pkg/operator/operator.go index 58bc56ba4a..8672165d9f 100644 --- a/pkg/operator/operator.go +++ b/pkg/operator/operator.go @@ -472,6 +472,7 @@ func (optr *Operator) maoConfigFromInfrastructure() (*OperatorConfig, error) { // flags, we selectively populate the map (and therefore passed // as args) features := map[string]bool{ + string(apifeatures.FeatureGateAWSDedicatedHosts): featureGates.Enabled(apifeatures.FeatureGateAWSDedicatedHosts), string(apifeatures.FeatureGateMachineAPIMigration): featureGates.Enabled(apifeatures.FeatureGateMachineAPIMigration), string(apifeatures.FeatureGateAzureWorkloadIdentity): featureGates.Enabled(apifeatures.FeatureGateAzureWorkloadIdentity), string(apifeatures.FeatureGateVSphereMultiDisk): featureGates.Enabled(apifeatures.FeatureGateVSphereMultiDisk), diff --git a/pkg/operator/operator_test.go b/pkg/operator/operator_test.go index c6e28b72e2..8188a8999a 100644 --- a/pkg/operator/operator_test.go +++ b/pkg/operator/operator_test.go @@ -45,6 +45,7 @@ var ( {Name: apifeatures.FeatureGateAzureWorkloadIdentity}, {Name: apifeatures.FeatureGateVSphereMultiDisk}, {Name: apifeatures.FeatureGateVSphereHostVMGroupZonal}, + {Name: apifeatures.FeatureGateAWSDedicatedHosts}, } enabledFeatureMap = map[string]bool{ @@ -52,6 +53,7 @@ var ( "AzureWorkloadIdentity": true, "VSphereMultiDisk": true, "VSphereHostVMGroupZonal": true, + "AWSDedicatedHosts": true, } ) diff --git a/pkg/webhooks/machine_webhook.go b/pkg/webhooks/machine_webhook.go index 3b5d491c99..026cbb0b24 100644 --- a/pkg/webhooks/machine_webhook.go +++ b/pkg/webhooks/machine_webhook.go @@ -50,6 +50,11 @@ type systemSpecifications struct { type machineArch string var ( + // AWS Variables / Defaults + + // awsDedicatedHostNamePattern is used to validate the id of a dedicated host + awsDedicatedHostNamePattern = regexp.MustCompile(`^h-[0-9a-f]{17}$`) + // Azure Defaults defaultAzureVnet = func(clusterID string) string { return fmt.Sprintf("%s-vnet", clusterID) @@ -897,6 +902,38 @@ func validateAWS(m *machinev1beta1.Machine, config *admissionConfig) (bool, []st } } + // Dedicated host support. + // Check if host placement is configured. If so, then we need to determine placement affinity and validate configs. + if providerSpec.HostPlacement != nil { + klog.V(4).Infof("Validating AWS Host Placement") + placement := *providerSpec.HostPlacement + if placement.Affinity == nil { + errs = append(errs, field.Required(field.NewPath("spec.hostPlacement.affinity"), "affinity is required and must be set to either AnyAvailable or DedicatedHost")) + } else { + switch *placement.Affinity { + case machinev1beta1.HostAffinityAnyAvailable: + // Cannot have DedicatedHost set + if placement.DedicatedHost != nil { + errs = append(errs, field.Forbidden(field.NewPath("spec.hostPlacement.dedicatedHost"), "dedicatedHost is required when affinity is DedicatedHost, and forbidden otherwise")) + } + case machinev1beta1.HostAffinityDedicatedHost: + // We need to make sure DedicatedHost is set with a HostID + if placement.DedicatedHost == nil { + errs = append(errs, field.Required(field.NewPath("spec.hostPlacement.dedicatedHost"), "dedicatedHost is required when affinity is DedicatedHost, and forbidden otherwise")) + } else { + // If not set, return required error. If it does not match pattern, return pattern failure message. + if placement.DedicatedHost.ID == "" { + errs = append(errs, field.Required(field.NewPath("spec.hostPlacement.dedicatedHost.id"), "id is required and must start with 'h-' followed by 17 lowercase hexadecimal characters (0-9 and a-f)")) + } else if awsDedicatedHostNamePattern.FindStringSubmatch(placement.DedicatedHost.ID) == nil { + errs = append(errs, field.Invalid(field.NewPath("spec.hostPlacement.dedicatedHost.id"), placement.DedicatedHost.ID, "id must start with 'h-' followed by 17 lowercase hexadecimal characters (0-9 and a-f)")) + } + } + default: + errs = append(errs, field.Invalid(field.NewPath("spec.hostPlacement.affinity"), placement.Affinity, "affinity must be either AnyAvailable or DedicatedHost")) + } + } + } + if len(errs) > 0 { return false, warnings, errs } diff --git a/pkg/webhooks/machine_webhook_test.go b/pkg/webhooks/machine_webhook_test.go index 56ab60b85b..2741451c74 100644 --- a/pkg/webhooks/machine_webhook_test.go +++ b/pkg/webhooks/machine_webhook_test.go @@ -316,6 +316,141 @@ func TestMachineCreation(t *testing.T) { }, expectedError: "", }, + { + name: "configure host placement with AnyAvailable affinity", + platformType: osconfigv1.AWSPlatformType, + clusterID: "aws-cluster", + providerSpecValue: &kruntime.RawExtension{ + Object: &machinev1beta1.AWSMachineProviderConfig{ + AMI: machinev1beta1.AWSResourceReference{ + ID: ptr.To[string]("ami"), + }, + InstanceType: "test", + HostPlacement: &machinev1beta1.HostPlacement{ + Affinity: ptr.To(machinev1beta1.HostAffinityAnyAvailable), + }, + }, + }, + expectedError: "", + }, + { + name: "configure host placement with invalid affinity", + platformType: osconfigv1.AWSPlatformType, + clusterID: "aws-cluster", + providerSpecValue: &kruntime.RawExtension{ + Object: &machinev1beta1.AWSMachineProviderConfig{ + AMI: machinev1beta1.AWSResourceReference{ + ID: ptr.To[string]("ami"), + }, + InstanceType: "test", + HostPlacement: &machinev1beta1.HostPlacement{ + Affinity: ptr.To(machinev1beta1.HostAffinity("invalid")), + }, + }, + }, + expectedError: "admission webhook \"validation.machine.machine.openshift.io\" denied the request: spec.hostPlacement.affinity: Invalid value: \"invalid\": affinity must be either AnyAvailable or DedicatedHost", + }, + { + name: "configure host placement dedicatedHost without dedicatedHost", + platformType: osconfigv1.AWSPlatformType, + clusterID: "aws-cluster", + providerSpecValue: &kruntime.RawExtension{ + Object: &machinev1beta1.AWSMachineProviderConfig{ + AMI: machinev1beta1.AWSResourceReference{ + ID: ptr.To[string]("ami"), + }, + InstanceType: "test", + HostPlacement: &machinev1beta1.HostPlacement{ + Affinity: ptr.To(machinev1beta1.HostAffinityDedicatedHost), + }, + }, + }, + expectedError: "admission webhook \"validation.machine.machine.openshift.io\" denied the request: spec.hostPlacement.dedicatedHost: Required value: dedicatedHost is required when affinity is DedicatedHost", + }, + { + name: "configure host placement dedicatedHost with valid ID", + platformType: osconfigv1.AWSPlatformType, + clusterID: "aws-cluster", + providerSpecValue: &kruntime.RawExtension{ + Object: &machinev1beta1.AWSMachineProviderConfig{ + AMI: machinev1beta1.AWSResourceReference{ + ID: ptr.To[string]("ami"), + }, + InstanceType: "test", + HostPlacement: &machinev1beta1.HostPlacement{ + Affinity: ptr.To(machinev1beta1.HostAffinityDedicatedHost), + DedicatedHost: &machinev1beta1.DedicatedHost{ID: "h-1234567890abcdef0"}, + }, + }, + }, + expectedError: "", + }, + { + name: "configure host placement AnyAvailable forbids dedicatedHost", + platformType: osconfigv1.AWSPlatformType, + clusterID: "aws-cluster", + providerSpecValue: &kruntime.RawExtension{ + Object: &machinev1beta1.AWSMachineProviderConfig{ + AMI: machinev1beta1.AWSResourceReference{ID: ptr.To[string]("ami")}, + InstanceType: "test", + HostPlacement: &machinev1beta1.HostPlacement{ + Affinity: ptr.To(machinev1beta1.HostAffinityAnyAvailable), + DedicatedHost: &machinev1beta1.DedicatedHost{ID: "h-09dcf61cb388b0149"}, + }, + }, + }, + expectedError: "admission webhook \"validation.machine.machine.openshift.io\" denied the request: spec.hostPlacement.dedicatedHost: Forbidden: dedicatedHost is required when affinity is DedicatedHost, and forbidden otherwise", + }, + { + name: "configure host placement dedicatedHost with empty ID", + platformType: osconfigv1.AWSPlatformType, + clusterID: "aws-cluster", + providerSpecValue: &kruntime.RawExtension{ + Object: &machinev1beta1.AWSMachineProviderConfig{ + AMI: machinev1beta1.AWSResourceReference{ID: ptr.To[string]("ami")}, + InstanceType: "test", + HostPlacement: &machinev1beta1.HostPlacement{ + Affinity: ptr.To(machinev1beta1.HostAffinityDedicatedHost), + DedicatedHost: &machinev1beta1.DedicatedHost{ID: ""}, + }, + }, + }, + expectedError: "admission webhook \"validation.machine.machine.openshift.io\" denied the request: spec.hostPlacement.dedicatedHost.id: Required value: id is required and must start with 'h-' followed by 17 lowercase hexadecimal characters (0-9 and a-f)", + }, + { + name: "configure host placement dedicatedHost with ID not set", + platformType: osconfigv1.AWSPlatformType, + clusterID: "aws-cluster", + providerSpecValue: &kruntime.RawExtension{ + Object: &machinev1beta1.AWSMachineProviderConfig{ + AMI: machinev1beta1.AWSResourceReference{ID: ptr.To[string]("ami")}, + InstanceType: "test", + HostPlacement: &machinev1beta1.HostPlacement{ + Affinity: ptr.To(machinev1beta1.HostAffinityDedicatedHost), + DedicatedHost: &machinev1beta1.DedicatedHost{}, + }, + }, + }, + expectedError: "admission webhook \"validation.machine.machine.openshift.io\" denied the request: spec.hostPlacement.dedicatedHost.id: Required value: id is required and must start with 'h-' followed by 17 lowercase hexadecimal characters (0-9 and a-f)", + }, + { + name: "configure host placement dedicatedHost with invalid ID", + platformType: osconfigv1.AWSPlatformType, + clusterID: "aws-cluster", + providerSpecValue: &kruntime.RawExtension{ + Object: &machinev1beta1.AWSMachineProviderConfig{ + AMI: machinev1beta1.AWSResourceReference{ID: ptr.To[string]("ami")}, + InstanceType: "test", + HostPlacement: &machinev1beta1.HostPlacement{ + Affinity: ptr.To(machinev1beta1.HostAffinityDedicatedHost), + DedicatedHost: &machinev1beta1.DedicatedHost{ + ID: "invalid", + }, + }, + }, + }, + expectedError: "admission webhook \"validation.machine.machine.openshift.io\" denied the request: spec.hostPlacement.dedicatedHost.id: Invalid value: \"invalid\": id must start with 'h-' followed by 17 lowercase hexadecimal characters (0-9 and a-f)", + }, { name: "with Azure and a nil provider spec value", platformType: osconfigv1.AzurePlatformType, diff --git a/vendor/github.com/fatih/camelcase/.travis.yml b/vendor/github.com/fatih/camelcase/.travis.yml deleted file mode 100644 index 3489e38713..0000000000 --- a/vendor/github.com/fatih/camelcase/.travis.yml +++ /dev/null @@ -1,3 +0,0 @@ -language: go -go: 1.x - diff --git a/vendor/github.com/fatih/camelcase/LICENSE.md b/vendor/github.com/fatih/camelcase/LICENSE.md deleted file mode 100644 index aa4a536caf..0000000000 --- a/vendor/github.com/fatih/camelcase/LICENSE.md +++ /dev/null @@ -1,20 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 Fatih Arslan - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/fatih/camelcase/README.md b/vendor/github.com/fatih/camelcase/README.md deleted file mode 100644 index 105a6ae33d..0000000000 --- a/vendor/github.com/fatih/camelcase/README.md +++ /dev/null @@ -1,58 +0,0 @@ -# CamelCase [![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/fatih/camelcase) [![Build Status](http://img.shields.io/travis/fatih/camelcase.svg?style=flat-square)](https://travis-ci.org/fatih/camelcase) - -CamelCase is a Golang (Go) package to split the words of a camelcase type -string into a slice of words. It can be used to convert a camelcase word (lower -or upper case) into any type of word. - -## Splitting rules: - -1. If string is not valid UTF-8, return it without splitting as - single item array. -2. Assign all unicode characters into one of 4 sets: lower case - letters, upper case letters, numbers, and all other characters. -3. Iterate through characters of string, introducing splits - between adjacent characters that belong to different sets. -4. Iterate through array of split strings, and if a given string - is upper case: - * if subsequent string is lower case: - * move last character of upper case string to beginning of - lower case string - -## Install - -```bash -go get github.com/fatih/camelcase -``` - -## Usage and examples - -```go -splitted := camelcase.Split("GolangPackage") - -fmt.Println(splitted[0], splitted[1]) // prints: "Golang", "Package" -``` - -Both lower camel case and upper camel case are supported. For more info please -check: [http://en.wikipedia.org/wiki/CamelCase](http://en.wikipedia.org/wiki/CamelCase) - -Below are some example cases: - -``` -"" => [] -"lowercase" => ["lowercase"] -"Class" => ["Class"] -"MyClass" => ["My", "Class"] -"MyC" => ["My", "C"] -"HTML" => ["HTML"] -"PDFLoader" => ["PDF", "Loader"] -"AString" => ["A", "String"] -"SimpleXMLParser" => ["Simple", "XML", "Parser"] -"vimRPCPlugin" => ["vim", "RPC", "Plugin"] -"GL11Version" => ["GL", "11", "Version"] -"99Bottles" => ["99", "Bottles"] -"May5" => ["May", "5"] -"BFG9000" => ["BFG", "9000"] -"BöseÜberraschung" => ["Böse", "Überraschung"] -"Two spaces" => ["Two", " ", "spaces"] -"BadUTF8\xe2\xe2\xa1" => ["BadUTF8\xe2\xe2\xa1"] -``` diff --git a/vendor/github.com/fatih/camelcase/camelcase.go b/vendor/github.com/fatih/camelcase/camelcase.go deleted file mode 100644 index 02160c9a43..0000000000 --- a/vendor/github.com/fatih/camelcase/camelcase.go +++ /dev/null @@ -1,90 +0,0 @@ -// Package camelcase is a micro package to split the words of a camelcase type -// string into a slice of words. -package camelcase - -import ( - "unicode" - "unicode/utf8" -) - -// Split splits the camelcase word and returns a list of words. It also -// supports digits. Both lower camel case and upper camel case are supported. -// For more info please check: http://en.wikipedia.org/wiki/CamelCase -// -// Examples -// -// "" => [""] -// "lowercase" => ["lowercase"] -// "Class" => ["Class"] -// "MyClass" => ["My", "Class"] -// "MyC" => ["My", "C"] -// "HTML" => ["HTML"] -// "PDFLoader" => ["PDF", "Loader"] -// "AString" => ["A", "String"] -// "SimpleXMLParser" => ["Simple", "XML", "Parser"] -// "vimRPCPlugin" => ["vim", "RPC", "Plugin"] -// "GL11Version" => ["GL", "11", "Version"] -// "99Bottles" => ["99", "Bottles"] -// "May5" => ["May", "5"] -// "BFG9000" => ["BFG", "9000"] -// "BöseÜberraschung" => ["Böse", "Überraschung"] -// "Two spaces" => ["Two", " ", "spaces"] -// "BadUTF8\xe2\xe2\xa1" => ["BadUTF8\xe2\xe2\xa1"] -// -// Splitting rules -// -// 1) If string is not valid UTF-8, return it without splitting as -// single item array. -// 2) Assign all unicode characters into one of 4 sets: lower case -// letters, upper case letters, numbers, and all other characters. -// 3) Iterate through characters of string, introducing splits -// between adjacent characters that belong to different sets. -// 4) Iterate through array of split strings, and if a given string -// is upper case: -// if subsequent string is lower case: -// move last character of upper case string to beginning of -// lower case string -func Split(src string) (entries []string) { - // don't split invalid utf8 - if !utf8.ValidString(src) { - return []string{src} - } - entries = []string{} - var runes [][]rune - lastClass := 0 - class := 0 - // split into fields based on class of unicode character - for _, r := range src { - switch true { - case unicode.IsLower(r): - class = 1 - case unicode.IsUpper(r): - class = 2 - case unicode.IsDigit(r): - class = 3 - default: - class = 4 - } - if class == lastClass { - runes[len(runes)-1] = append(runes[len(runes)-1], r) - } else { - runes = append(runes, []rune{r}) - } - lastClass = class - } - // handle upper case -> lower case sequences, e.g. - // "PDFL", "oader" -> "PDF", "Loader" - for i := 0; i < len(runes)-1; i++ { - if unicode.IsUpper(runes[i][0]) && unicode.IsLower(runes[i+1][0]) { - runes[i+1] = append([]rune{runes[i][len(runes[i])-1]}, runes[i+1]...) - runes[i] = runes[i][:len(runes[i])-1] - } - } - // construct []string from results - for _, s := range runes { - if len(s) > 0 { - entries = append(entries, string(s)) - } - } - return -} diff --git a/vendor/github.com/onsi/gomega/gmeasure/cache.go b/vendor/github.com/onsi/gomega/gmeasure/cache.go deleted file mode 100644 index 4717e8df33..0000000000 --- a/vendor/github.com/onsi/gomega/gmeasure/cache.go +++ /dev/null @@ -1,202 +0,0 @@ -package gmeasure - -import ( - "crypto/md5" - "encoding/json" - "fmt" - "os" - "path/filepath" - - "github.com/onsi/gomega/internal/gutil" -) - -const CACHE_EXT = ".gmeasure-cache" - -/* -ExperimentCache provides a director-and-file based cache of experiments -*/ -type ExperimentCache struct { - Path string -} - -/* -NewExperimentCache creates and initializes a new cache. Path must point to a directory (if path does not exist, NewExperimentCache will create a directory at path). - -Cached Experiments are stored as separate files in the cache directory - the filename is a hash of the Experiment name. Each file contains two JSON-encoded objects - a CachedExperimentHeader that includes the experiment's name and cache version number, and then the Experiment itself. -*/ -func NewExperimentCache(path string) (ExperimentCache, error) { - stat, err := os.Stat(path) - if os.IsNotExist(err) { - err := os.MkdirAll(path, 0777) - if err != nil { - return ExperimentCache{}, err - } - } else if !stat.IsDir() { - return ExperimentCache{}, fmt.Errorf("%s is not a directory", path) - } - - return ExperimentCache{ - Path: path, - }, nil -} - -/* -CachedExperimentHeader captures the name of the Cached Experiment and its Version -*/ -type CachedExperimentHeader struct { - Name string - Version int -} - -func (cache ExperimentCache) hashOf(name string) string { - return fmt.Sprintf("%x", md5.Sum([]byte(name))) -} - -func (cache ExperimentCache) readHeader(filename string) (CachedExperimentHeader, error) { - out := CachedExperimentHeader{} - f, err := os.Open(filepath.Join(cache.Path, filename)) - if err != nil { - return out, err - } - defer f.Close() - err = json.NewDecoder(f).Decode(&out) - return out, err -} - -/* -List returns a list of all Cached Experiments found in the cache. -*/ -func (cache ExperimentCache) List() ([]CachedExperimentHeader, error) { - var out []CachedExperimentHeader - names, err := gutil.ReadDir(cache.Path) - if err != nil { - return out, err - } - for _, name := range names { - if filepath.Ext(name) != CACHE_EXT { - continue - } - header, err := cache.readHeader(name) - if err != nil { - return out, err - } - out = append(out, header) - } - return out, nil -} - -/* -Clear empties out the cache - this will delete any and all detected cache files in the cache directory. Use with caution! -*/ -func (cache ExperimentCache) Clear() error { - names, err := gutil.ReadDir(cache.Path) - if err != nil { - return err - } - for _, name := range names { - if filepath.Ext(name) != CACHE_EXT { - continue - } - err := os.Remove(filepath.Join(cache.Path, name)) - if err != nil { - return err - } - } - return nil -} - -/* -Load fetches an experiment from the cache. Lookup occurs by name. Load requires that the version number in the cache is equal to or greater than the passed-in version. - -If an experiment with corresponding name and version >= the passed-in version is found, it is unmarshaled and returned. - -If no experiment is found, or the cached version is smaller than the passed-in version, Load will return nil. - -When paired with Ginkgo you can cache experiments and prevent potentially expensive recomputation with this pattern: - - const EXPERIMENT_VERSION = 1 //bump this to bust the cache and recompute _all_ experiments - - Describe("some experiments", func() { - var cache gmeasure.ExperimentCache - var experiment *gmeasure.Experiment - - BeforeEach(func() { - cache = gmeasure.NewExperimentCache("./gmeasure-cache") - name := CurrentSpecReport().LeafNodeText - experiment = cache.Load(name, EXPERIMENT_VERSION) - if experiment != nil { - AddReportEntry(experiment) - Skip("cached") - } - experiment = gmeasure.NewExperiment(name) - AddReportEntry(experiment) - }) - - It("foo runtime", func() { - experiment.SampleDuration("runtime", func() { - //do stuff - }, gmeasure.SamplingConfig{N:100}) - }) - - It("bar runtime", func() { - experiment.SampleDuration("runtime", func() { - //do stuff - }, gmeasure.SamplingConfig{N:100}) - }) - - AfterEach(func() { - if !CurrentSpecReport().State.Is(types.SpecStateSkipped) { - cache.Save(experiment.Name, EXPERIMENT_VERSION, experiment) - } - }) - }) -*/ -func (cache ExperimentCache) Load(name string, version int) *Experiment { - path := filepath.Join(cache.Path, cache.hashOf(name)+CACHE_EXT) - f, err := os.Open(path) - if err != nil { - return nil - } - defer f.Close() - dec := json.NewDecoder(f) - header := CachedExperimentHeader{} - dec.Decode(&header) - if header.Version < version { - return nil - } - out := NewExperiment("") - err = dec.Decode(out) - if err != nil { - return nil - } - return out -} - -/* -Save stores the passed-in experiment to the cache with the passed-in name and version. -*/ -func (cache ExperimentCache) Save(name string, version int, experiment *Experiment) error { - path := filepath.Join(cache.Path, cache.hashOf(name)+CACHE_EXT) - f, err := os.Create(path) - if err != nil { - return err - } - defer f.Close() - enc := json.NewEncoder(f) - err = enc.Encode(CachedExperimentHeader{ - Name: name, - Version: version, - }) - if err != nil { - return err - } - return enc.Encode(experiment) -} - -/* -Delete removes the experiment with the passed-in name from the cache -*/ -func (cache ExperimentCache) Delete(name string) error { - path := filepath.Join(cache.Path, cache.hashOf(name)+CACHE_EXT) - return os.Remove(path) -} diff --git a/vendor/github.com/onsi/gomega/gmeasure/enum_support.go b/vendor/github.com/onsi/gomega/gmeasure/enum_support.go deleted file mode 100644 index b5404f9620..0000000000 --- a/vendor/github.com/onsi/gomega/gmeasure/enum_support.go +++ /dev/null @@ -1,43 +0,0 @@ -package gmeasure - -import "encoding/json" - -type enumSupport struct { - toString map[uint]string - toEnum map[string]uint - maxEnum uint -} - -func newEnumSupport(toString map[uint]string) enumSupport { - toEnum, maxEnum := map[string]uint{}, uint(0) - for k, v := range toString { - toEnum[v] = k - if maxEnum < k { - maxEnum = k - } - } - return enumSupport{toString: toString, toEnum: toEnum, maxEnum: maxEnum} -} - -func (es enumSupport) String(e uint) string { - if e > es.maxEnum { - return es.toString[0] - } - return es.toString[e] -} - -func (es enumSupport) UnmarshJSON(b []byte) (uint, error) { - var dec string - if err := json.Unmarshal(b, &dec); err != nil { - return 0, err - } - out := es.toEnum[dec] // if we miss we get 0 which is what we want anyway - return out, nil -} - -func (es enumSupport) MarshJSON(e uint) ([]byte, error) { - if e == 0 || e > es.maxEnum { - return json.Marshal(nil) - } - return json.Marshal(es.toString[e]) -} diff --git a/vendor/github.com/onsi/gomega/gmeasure/experiment.go b/vendor/github.com/onsi/gomega/gmeasure/experiment.go deleted file mode 100644 index 9d1b74a78b..0000000000 --- a/vendor/github.com/onsi/gomega/gmeasure/experiment.go +++ /dev/null @@ -1,533 +0,0 @@ -/* -Package gomega/gmeasure provides support for benchmarking and measuring code. It is intended as a more robust replacement for Ginkgo V1's Measure nodes. - -gmeasure is organized around the metaphor of an Experiment that can record multiple Measurements. A Measurement is a named collection of data points and gmeasure supports -measuring Values (of type float64) and Durations (of type time.Duration). - -Experiments allows the user to record Measurements directly by passing in Values (i.e. float64) or Durations (i.e. time.Duration) -or to measure measurements by passing in functions to measure. When measuring functions Experiments take care of timing the duration of functions (for Duration measurements) -and/or recording returned values (for Value measurements). Experiments also support sampling functions - when told to sample Experiments will run functions repeatedly -and measure and record results. The sampling behavior is configured by passing in a SamplingConfig that can control the maximum number of samples, the maximum duration for sampling (or both) -and the number of concurrent samples to take. - -Measurements can be decorated with additional information. This is supported by passing in special typed decorators when recording measurements. These include: - -- Units("any string") - to attach units to a Value Measurement (Duration Measurements always have units of "duration") -- Style("any Ginkgo color style string") - to attach styling to a Measurement. This styling is used when rendering console information about the measurement in reports. Color style strings are documented at TODO. -- Precision(integer or time.Duration) - to attach precision to a Measurement. This controls how many decimal places to show for Value Measurements and how to round Duration Measurements when rendering them to screen. - -In addition, individual data points in a Measurement can be annotated with an Annotation("any string"). The annotation is associated with the individual data point and is intended to convey additional context about the data point. - -Once measurements are complete, an Experiment can generate a comprehensive report by calling its String() or ColorableString() method. - -Users can also access and analyze the resulting Measurements directly. Use Experiment.Get(NAME) to fetch the Measurement named NAME. This returned struct will have fields containing -all the data points and annotations recorded by the experiment. You can subsequently fetch the Measurement.Stats() to get a Stats struct that contains basic statistical information about the -Measurement (min, max, median, mean, standard deviation). You can order these Stats objects using RankStats() to identify best/worst performers across multiple experiments or measurements. - -gmeasure also supports caching Experiments via an ExperimentCache. The cache supports storing and retrieving experiments by name and version. This allows you to rerun code without -repeating expensive experiments that may not have changed (which can be controlled by the cache version number). It also enables you to compare new experiment runs with older runs to detect -variations in performance/behavior. - -When used with Ginkgo, you can emit experiment reports and encode them in test reports easily using Ginkgo V2's support for Report Entries. -Simply pass your experiment to AddReportEntry to get a report every time the tests run. You can also use AddReportEntry with Measurements to emit all the captured data -and Rankings to emit measurement summaries in rank order. - -Finally, Experiments provide an additional mechanism to measure durations called a Stopwatch. The Stopwatch makes it easy to pepper code with statements that measure elapsed time across -different sections of code and can be useful when debugging or evaluating bottlenecks in a given codepath. -*/ -package gmeasure - -import ( - "fmt" - "math" - "reflect" - "sync" - "time" - - "github.com/onsi/gomega/gmeasure/table" -) - -/* -SamplingConfig configures the Sample family of experiment methods. -These methods invoke passed-in functions repeatedly to sample and record a given measurement. -SamplingConfig is used to control the maximum number of samples or time spent sampling (or both). When both are specified sampling ends as soon as one of the conditions is met. -SamplingConfig can also ensure a minimum interval between samples and can enable concurrent sampling. -*/ -type SamplingConfig struct { - // N - the maximum number of samples to record - N int - // Duration - the maximum amount of time to spend recording samples - Duration time.Duration - // MinSamplingInterval - the minimum time that must elapse between samplings. It is an error to specify both MinSamplingInterval and NumParallel. - MinSamplingInterval time.Duration - // NumParallel - the number of parallel workers to spin up to record samples. It is an error to specify both MinSamplingInterval and NumParallel. - NumParallel int -} - -// The Units decorator allows you to specify units (an arbitrary string) when recording values. It is ignored when recording durations. -// -// e := gmeasure.NewExperiment("My Experiment") -// e.RecordValue("length", 3.141, gmeasure.Units("inches")) -// -// Units are only set the first time a value of a given name is recorded. In the example above any subsequent calls to e.RecordValue("length", X) will maintain the "inches" units even if a new set of Units("UNIT") are passed in later. -type Units string - -// The Annotation decorator allows you to attach an annotation to a given recorded data-point: -// -// For example: -// -// e := gmeasure.NewExperiment("My Experiment") -// e.RecordValue("length", 3.141, gmeasure.Annotation("bob")) -// e.RecordValue("length", 2.71, gmeasure.Annotation("jane")) -// -// ...will result in a Measurement named "length" that records two values )[3.141, 2.71]) annotation with (["bob", "jane"]) -type Annotation string - -// The Style decorator allows you to associate a style with a measurement. This is used to generate colorful console reports using Ginkgo V2's -// console formatter. Styles are strings in curly brackets that correspond to a color or style. -// -// For example: -// -// e := gmeasure.NewExperiment("My Experiment") -// e.RecordValue("length", 3.141, gmeasure.Style("{{blue}}{{bold}}")) -// e.RecordValue("length", 2.71) -// e.RecordDuration("cooking time", 3 * time.Second, gmeasure.Style("{{red}}{{underline}}")) -// e.RecordDuration("cooking time", 2 * time.Second) -// -// will emit a report with blue bold entries for the length measurement and red underlined entries for the cooking time measurement. -// -// Units are only set the first time a value or duration of a given name is recorded. In the example above any subsequent calls to e.RecordValue("length", X) will maintain the "{{blue}}{{bold}}" style even if a new Style is passed in later. -type Style string - -// The PrecisionBundle decorator controls the rounding of value and duration measurements. See Precision(). -type PrecisionBundle struct { - Duration time.Duration - ValueFormat string -} - -// Precision() allows you to specify the precision of a value or duration measurement - this precision is used when rendering the measurement to screen. -// -// To control the precision of Value measurements, pass Precision an integer. This will denote the number of decimal places to render (equivalen to the format string "%.Nf") -// To control the precision of Duration measurements, pass Precision a time.Duration. Duration measurements will be rounded oo the nearest time.Duration when rendered. -// -// For example: -// -// e := gmeasure.NewExperiment("My Experiment") -// e.RecordValue("length", 3.141, gmeasure.Precision(2)) -// e.RecordValue("length", 2.71) -// e.RecordDuration("cooking time", 3214 * time.Millisecond, gmeasure.Precision(100*time.Millisecond)) -// e.RecordDuration("cooking time", 2623 * time.Millisecond) -func Precision(p any) PrecisionBundle { - out := DefaultPrecisionBundle - switch reflect.TypeOf(p) { - case reflect.TypeOf(time.Duration(0)): - out.Duration = p.(time.Duration) - case reflect.TypeOf(int(0)): - out.ValueFormat = fmt.Sprintf("%%.%df", p.(int)) - default: - panic("invalid precision type, must be time.Duration or int") - } - return out -} - -// DefaultPrecisionBundle captures the default precisions for Vale and Duration measurements. -var DefaultPrecisionBundle = PrecisionBundle{ - Duration: 100 * time.Microsecond, - ValueFormat: "%.3f", -} - -type extractedDecorations struct { - annotation Annotation - units Units - precisionBundle PrecisionBundle - style Style -} - -func extractDecorations(args []any) extractedDecorations { - var out extractedDecorations - out.precisionBundle = DefaultPrecisionBundle - - for _, arg := range args { - switch reflect.TypeOf(arg) { - case reflect.TypeOf(out.annotation): - out.annotation = arg.(Annotation) - case reflect.TypeOf(out.units): - out.units = arg.(Units) - case reflect.TypeOf(out.precisionBundle): - out.precisionBundle = arg.(PrecisionBundle) - case reflect.TypeOf(out.style): - out.style = arg.(Style) - default: - panic(fmt.Sprintf("unrecognized argument %#v", arg)) - } - } - - return out -} - -/* -Experiment is gmeasure's core data type. You use experiments to record Measurements and generate reports. -Experiments are thread-safe and all methods can be called from multiple goroutines. -*/ -type Experiment struct { - Name string - - // Measurements includes all Measurements recorded by this experiment. You should access them by name via Get() and GetStats() - Measurements Measurements - lock *sync.Mutex -} - -/* -NexExperiment creates a new experiment with the passed-in name. - -When using Ginkgo we recommend immediately registering the experiment as a ReportEntry: - - experiment = NewExperiment("My Experiment") - AddReportEntry(experiment.Name, experiment) - -this will ensure an experiment report is emitted as part of the test output and exported with any test reports. -*/ -func NewExperiment(name string) *Experiment { - experiment := &Experiment{ - Name: name, - lock: &sync.Mutex{}, - } - return experiment -} - -func (e *Experiment) report(enableStyling bool) string { - t := table.NewTable() - t.TableStyle.EnableTextStyling = enableStyling - t.AppendRow(table.R( - table.C("Name"), table.C("N"), table.C("Min"), table.C("Median"), table.C("Mean"), table.C("StdDev"), table.C("Max"), - table.Divider("="), - "{{bold}}", - )) - - for _, measurement := range e.Measurements { - r := table.R(measurement.Style) - t.AppendRow(r) - switch measurement.Type { - case MeasurementTypeNote: - r.AppendCell(table.C(measurement.Note)) - case MeasurementTypeValue, MeasurementTypeDuration: - name := measurement.Name - if measurement.Units != "" { - name += " [" + measurement.Units + "]" - } - r.AppendCell(table.C(name)) - r.AppendCell(measurement.Stats().cells()...) - } - } - - out := e.Name + "\n" - if enableStyling { - out = "{{bold}}" + out + "{{/}}" - } - out += t.Render() - return out -} - -/* -ColorableString returns a Ginkgo formatted summary of the experiment and all its Measurements. -It is called automatically by Ginkgo's reporting infrastructure when the Experiment is registered as a ReportEntry via AddReportEntry. -*/ -func (e *Experiment) ColorableString() string { - return e.report(true) -} - -/* -ColorableString returns an unformatted summary of the experiment and all its Measurements. -*/ -func (e *Experiment) String() string { - return e.report(false) -} - -/* -RecordNote records a Measurement of type MeasurementTypeNote - this is simply a textual note to annotate the experiment. It will be emitted in any experiment reports. - -RecordNote supports the Style() decoration. -*/ -func (e *Experiment) RecordNote(note string, args ...any) { - decorations := extractDecorations(args) - - e.lock.Lock() - defer e.lock.Unlock() - e.Measurements = append(e.Measurements, Measurement{ - ExperimentName: e.Name, - Type: MeasurementTypeNote, - Note: note, - Style: string(decorations.style), - }) -} - -/* -RecordDuration records the passed-in duration on a Duration Measurement with the passed-in name. If the Measurement does not exist it is created. - -RecordDuration supports the Style(), Precision(), and Annotation() decorations. -*/ -func (e *Experiment) RecordDuration(name string, duration time.Duration, args ...any) { - decorations := extractDecorations(args) - e.recordDuration(name, duration, decorations) -} - -/* -MeasureDuration runs the passed-in callback and times how long it takes to complete. The resulting duration is recorded on a Duration Measurement with the passed-in name. If the Measurement does not exist it is created. - -MeasureDuration supports the Style(), Precision(), and Annotation() decorations. -*/ -func (e *Experiment) MeasureDuration(name string, callback func(), args ...any) time.Duration { - t := time.Now() - callback() - duration := time.Since(t) - e.RecordDuration(name, duration, args...) - return duration -} - -/* -SampleDuration samples the passed-in callback and times how long it takes to complete each sample. -The resulting durations are recorded on a Duration Measurement with the passed-in name. If the Measurement does not exist it is created. - -The callback is given a zero-based index that increments by one between samples. The Sampling is configured via the passed-in SamplingConfig - -SampleDuration supports the Style(), Precision(), and Annotation() decorations. When passed an Annotation() the same annotation is applied to all sample measurements. -*/ -func (e *Experiment) SampleDuration(name string, callback func(idx int), samplingConfig SamplingConfig, args ...any) { - decorations := extractDecorations(args) - e.Sample(func(idx int) { - t := time.Now() - callback(idx) - duration := time.Since(t) - e.recordDuration(name, duration, decorations) - }, samplingConfig) -} - -/* -SampleDuration samples the passed-in callback and times how long it takes to complete each sample. -The resulting durations are recorded on a Duration Measurement with the passed-in name. If the Measurement does not exist it is created. - -The callback is given a zero-based index that increments by one between samples. The callback must return an Annotation - this annotation is attached to the measured duration. - -# The Sampling is configured via the passed-in SamplingConfig - -SampleAnnotatedDuration supports the Style() and Precision() decorations. -*/ -func (e *Experiment) SampleAnnotatedDuration(name string, callback func(idx int) Annotation, samplingConfig SamplingConfig, args ...any) { - decorations := extractDecorations(args) - e.Sample(func(idx int) { - t := time.Now() - decorations.annotation = callback(idx) - duration := time.Since(t) - e.recordDuration(name, duration, decorations) - }, samplingConfig) -} - -func (e *Experiment) recordDuration(name string, duration time.Duration, decorations extractedDecorations) { - e.lock.Lock() - defer e.lock.Unlock() - idx := e.Measurements.IdxWithName(name) - if idx == -1 { - measurement := Measurement{ - ExperimentName: e.Name, - Type: MeasurementTypeDuration, - Name: name, - Units: "duration", - Durations: []time.Duration{duration}, - PrecisionBundle: decorations.precisionBundle, - Style: string(decorations.style), - Annotations: []string{string(decorations.annotation)}, - } - e.Measurements = append(e.Measurements, measurement) - } else { - if e.Measurements[idx].Type != MeasurementTypeDuration { - panic(fmt.Sprintf("attempting to record duration with name '%s'. That name is already in-use for recording values.", name)) - } - e.Measurements[idx].Durations = append(e.Measurements[idx].Durations, duration) - e.Measurements[idx].Annotations = append(e.Measurements[idx].Annotations, string(decorations.annotation)) - } -} - -/* -NewStopwatch() returns a stopwatch configured to record duration measurements with this experiment. -*/ -func (e *Experiment) NewStopwatch() *Stopwatch { - return newStopwatch(e) -} - -/* -RecordValue records the passed-in value on a Value Measurement with the passed-in name. If the Measurement does not exist it is created. - -RecordValue supports the Style(), Units(), Precision(), and Annotation() decorations. -*/ -func (e *Experiment) RecordValue(name string, value float64, args ...any) { - decorations := extractDecorations(args) - e.recordValue(name, value, decorations) -} - -/* -MeasureValue runs the passed-in callback and records the return value on a Value Measurement with the passed-in name. If the Measurement does not exist it is created. - -MeasureValue supports the Style(), Units(), Precision(), and Annotation() decorations. -*/ -func (e *Experiment) MeasureValue(name string, callback func() float64, args ...any) float64 { - value := callback() - e.RecordValue(name, value, args...) - return value -} - -/* -SampleValue samples the passed-in callback and records the return value on a Value Measurement with the passed-in name. If the Measurement does not exist it is created. - -The callback is given a zero-based index that increments by one between samples. The callback must return a float64. The Sampling is configured via the passed-in SamplingConfig - -SampleValue supports the Style(), Units(), Precision(), and Annotation() decorations. When passed an Annotation() the same annotation is applied to all sample measurements. -*/ -func (e *Experiment) SampleValue(name string, callback func(idx int) float64, samplingConfig SamplingConfig, args ...any) { - decorations := extractDecorations(args) - e.Sample(func(idx int) { - value := callback(idx) - e.recordValue(name, value, decorations) - }, samplingConfig) -} - -/* -SampleAnnotatedValue samples the passed-in callback and records the return value on a Value Measurement with the passed-in name. If the Measurement does not exist it is created. - -The callback is given a zero-based index that increments by one between samples. The callback must return a float64 and an Annotation - the annotation is attached to the recorded value. - -# The Sampling is configured via the passed-in SamplingConfig - -SampleValue supports the Style(), Units(), and Precision() decorations. -*/ -func (e *Experiment) SampleAnnotatedValue(name string, callback func(idx int) (float64, Annotation), samplingConfig SamplingConfig, args ...any) { - decorations := extractDecorations(args) - e.Sample(func(idx int) { - var value float64 - value, decorations.annotation = callback(idx) - e.recordValue(name, value, decorations) - }, samplingConfig) -} - -func (e *Experiment) recordValue(name string, value float64, decorations extractedDecorations) { - e.lock.Lock() - defer e.lock.Unlock() - idx := e.Measurements.IdxWithName(name) - if idx == -1 { - measurement := Measurement{ - ExperimentName: e.Name, - Type: MeasurementTypeValue, - Name: name, - Style: string(decorations.style), - Units: string(decorations.units), - PrecisionBundle: decorations.precisionBundle, - Values: []float64{value}, - Annotations: []string{string(decorations.annotation)}, - } - e.Measurements = append(e.Measurements, measurement) - } else { - if e.Measurements[idx].Type != MeasurementTypeValue { - panic(fmt.Sprintf("attempting to record value with name '%s'. That name is already in-use for recording durations.", name)) - } - e.Measurements[idx].Values = append(e.Measurements[idx].Values, value) - e.Measurements[idx].Annotations = append(e.Measurements[idx].Annotations, string(decorations.annotation)) - } -} - -/* -Sample samples the passed-in callback repeatedly. The sampling is governed by the passed in SamplingConfig. - -The SamplingConfig can limit the total number of samples and/or the total time spent sampling the callback. -The SamplingConfig can also instruct Sample to run with multiple concurrent workers. - -The callback is called with a zero-based index that incerements by one between samples. -*/ -func (e *Experiment) Sample(callback func(idx int), samplingConfig SamplingConfig) { - if samplingConfig.N == 0 && samplingConfig.Duration == 0 { - panic("you must specify at least one of SamplingConfig.N and SamplingConfig.Duration") - } - if samplingConfig.MinSamplingInterval > 0 && samplingConfig.NumParallel > 1 { - panic("you cannot specify both SamplingConfig.MinSamplingInterval and SamplingConfig.NumParallel") - } - maxTime := time.Now().Add(100000 * time.Hour) - if samplingConfig.Duration > 0 { - maxTime = time.Now().Add(samplingConfig.Duration) - } - maxN := math.MaxInt32 - if samplingConfig.N > 0 { - maxN = samplingConfig.N - } - numParallel := 1 - if samplingConfig.NumParallel > numParallel { - numParallel = samplingConfig.NumParallel - } - minSamplingInterval := samplingConfig.MinSamplingInterval - - work := make(chan int) - var wg sync.WaitGroup - defer func() { - close(work) - wg.Wait() - }() - if numParallel > 1 { - wg.Add(numParallel) - for worker := 0; worker < numParallel; worker++ { - go func() { - for idx := range work { - callback(idx) - } - wg.Done() - }() - } - } - - idx := 0 - var avgDt time.Duration - for { - t := time.Now() - if numParallel > 1 { - work <- idx - } else { - callback(idx) - } - dt := time.Since(t) - if numParallel == 1 && dt < minSamplingInterval { - time.Sleep(minSamplingInterval - dt) - dt = time.Since(t) - } - if idx >= numParallel { - avgDt = (avgDt*time.Duration(idx-numParallel) + dt) / time.Duration(idx-numParallel+1) - } - idx += 1 - if idx >= maxN { - return - } - if time.Now().Add(avgDt).After(maxTime) { - return - } - } -} - -/* -Get returns the Measurement with the associated name. If no Measurement is found a zero Measurement{} is returned. -*/ -func (e *Experiment) Get(name string) Measurement { - e.lock.Lock() - defer e.lock.Unlock() - idx := e.Measurements.IdxWithName(name) - if idx == -1 { - return Measurement{} - } - return e.Measurements[idx] -} - -/* -GetStats returns the Stats for the Measurement with the associated name. If no Measurement is found a zero Stats{} is returned. - -experiment.GetStats(name) is equivalent to experiment.Get(name).Stats() -*/ -func (e *Experiment) GetStats(name string) Stats { - measurement := e.Get(name) - e.lock.Lock() - defer e.lock.Unlock() - return measurement.Stats() -} diff --git a/vendor/github.com/onsi/gomega/gmeasure/measurement.go b/vendor/github.com/onsi/gomega/gmeasure/measurement.go deleted file mode 100644 index 103d3ea9d0..0000000000 --- a/vendor/github.com/onsi/gomega/gmeasure/measurement.go +++ /dev/null @@ -1,235 +0,0 @@ -package gmeasure - -import ( - "fmt" - "math" - "sort" - "time" - - "github.com/onsi/gomega/gmeasure/table" -) - -type MeasurementType uint - -const ( - MeasurementTypeInvalid MeasurementType = iota - MeasurementTypeNote - MeasurementTypeDuration - MeasurementTypeValue -) - -var letEnumSupport = newEnumSupport(map[uint]string{uint(MeasurementTypeInvalid): "INVALID LOG ENTRY TYPE", uint(MeasurementTypeNote): "Note", uint(MeasurementTypeDuration): "Duration", uint(MeasurementTypeValue): "Value"}) - -func (s MeasurementType) String() string { return letEnumSupport.String(uint(s)) } -func (s *MeasurementType) UnmarshalJSON(b []byte) error { - out, err := letEnumSupport.UnmarshJSON(b) - *s = MeasurementType(out) - return err -} -func (s MeasurementType) MarshalJSON() ([]byte, error) { return letEnumSupport.MarshJSON(uint(s)) } - -/* -Measurement records all captured data for a given measurement. You generally don't make Measurements directly - but you can fetch them from Experiments using Get(). - -When using Ginkgo, you can register Measurements as Report Entries via AddReportEntry. This will emit all the captured data points when Ginkgo generates the report. -*/ -type Measurement struct { - // Type is the MeasurementType - one of MeasurementTypeNote, MeasurementTypeDuration, or MeasurementTypeValue - Type MeasurementType - - // ExperimentName is the name of the experiment that this Measurement is associated with - ExperimentName string - - // If Type is MeasurementTypeNote, Note is populated with the note text. - Note string - - // If Type is MeasurementTypeDuration or MeasurementTypeValue, Name is the name of the recorded measurement - Name string - - // Style captures the styling information (if any) for this Measurement - Style string - - // Units capture the units (if any) for this Measurement. Units is set to "duration" if the Type is MeasurementTypeDuration - Units string - - // PrecisionBundle captures the precision to use when rendering data for this Measurement. - // If Type is MeasurementTypeDuration then PrecisionBundle.Duration is used to round any durations before presentation. - // If Type is MeasurementTypeValue then PrecisionBundle.ValueFormat is used to format any values before presentation - PrecisionBundle PrecisionBundle - - // If Type is MeasurementTypeDuration, Durations will contain all durations recorded for this measurement - Durations []time.Duration - - // If Type is MeasurementTypeValue, Values will contain all float64s recorded for this measurement - Values []float64 - - // If Type is MeasurementTypeDuration or MeasurementTypeValue then Annotations will include string annotations for all recorded Durations or Values. - // If the user does not pass-in an Annotation() decoration for a particular value or duration, the corresponding entry in the Annotations slice will be the empty string "" - Annotations []string -} - -type Measurements []Measurement - -func (m Measurements) IdxWithName(name string) int { - for idx, measurement := range m { - if measurement.Name == name { - return idx - } - } - - return -1 -} - -func (m Measurement) report(enableStyling bool) string { - out := "" - style := m.Style - if !enableStyling { - style = "" - } - switch m.Type { - case MeasurementTypeNote: - out += fmt.Sprintf("%s - Note\n%s\n", m.ExperimentName, m.Note) - if style != "" { - out = style + out + "{{/}}" - } - return out - case MeasurementTypeValue, MeasurementTypeDuration: - out += fmt.Sprintf("%s - %s", m.ExperimentName, m.Name) - if m.Units != "" { - out += " [" + m.Units + "]" - } - if style != "" { - out = style + out + "{{/}}" - } - out += "\n" - out += m.Stats().String() + "\n" - } - t := table.NewTable() - t.TableStyle.EnableTextStyling = enableStyling - switch m.Type { - case MeasurementTypeValue: - t.AppendRow(table.R(table.C("Value", table.AlignTypeCenter), table.C("Annotation", table.AlignTypeCenter), table.Divider("="), style)) - for idx := range m.Values { - t.AppendRow(table.R( - table.C(fmt.Sprintf(m.PrecisionBundle.ValueFormat, m.Values[idx]), table.AlignTypeRight), - table.C(m.Annotations[idx], "{{gray}}", table.AlignTypeLeft), - )) - } - case MeasurementTypeDuration: - t.AppendRow(table.R(table.C("Duration", table.AlignTypeCenter), table.C("Annotation", table.AlignTypeCenter), table.Divider("="), style)) - for idx := range m.Durations { - t.AppendRow(table.R( - table.C(m.Durations[idx].Round(m.PrecisionBundle.Duration).String(), style, table.AlignTypeRight), - table.C(m.Annotations[idx], "{{gray}}", table.AlignTypeLeft), - )) - } - } - out += t.Render() - return out -} - -/* -ColorableString generates a styled report that includes all the data points for this Measurement. -It is called automatically by Ginkgo's reporting infrastructure when the Measurement is registered as a ReportEntry via AddReportEntry. -*/ -func (m Measurement) ColorableString() string { - return m.report(true) -} - -/* -String generates an unstyled report that includes all the data points for this Measurement. -*/ -func (m Measurement) String() string { - return m.report(false) -} - -/* -Stats returns a Stats struct summarizing the statistic of this measurement -*/ -func (m Measurement) Stats() Stats { - if m.Type == MeasurementTypeInvalid || m.Type == MeasurementTypeNote { - return Stats{} - } - - out := Stats{ - ExperimentName: m.ExperimentName, - MeasurementName: m.Name, - Style: m.Style, - Units: m.Units, - PrecisionBundle: m.PrecisionBundle, - } - - switch m.Type { - case MeasurementTypeValue: - out.Type = StatsTypeValue - out.N = len(m.Values) - if out.N == 0 { - return out - } - indices, sum := make([]int, len(m.Values)), 0.0 - for idx, v := range m.Values { - indices[idx] = idx - sum += v - } - sort.Slice(indices, func(i, j int) bool { - return m.Values[indices[i]] < m.Values[indices[j]] - }) - out.ValueBundle = map[Stat]float64{ - StatMin: m.Values[indices[0]], - StatMax: m.Values[indices[out.N-1]], - StatMean: sum / float64(out.N), - StatStdDev: 0.0, - } - out.AnnotationBundle = map[Stat]string{ - StatMin: m.Annotations[indices[0]], - StatMax: m.Annotations[indices[out.N-1]], - } - - if out.N%2 == 0 { - out.ValueBundle[StatMedian] = (m.Values[indices[out.N/2]] + m.Values[indices[out.N/2-1]]) / 2.0 - } else { - out.ValueBundle[StatMedian] = m.Values[indices[(out.N-1)/2]] - } - - for _, v := range m.Values { - out.ValueBundle[StatStdDev] += (v - out.ValueBundle[StatMean]) * (v - out.ValueBundle[StatMean]) - } - out.ValueBundle[StatStdDev] = math.Sqrt(out.ValueBundle[StatStdDev] / float64(out.N)) - case MeasurementTypeDuration: - out.Type = StatsTypeDuration - out.N = len(m.Durations) - if out.N == 0 { - return out - } - indices, sum := make([]int, len(m.Durations)), time.Duration(0) - for idx, v := range m.Durations { - indices[idx] = idx - sum += v - } - sort.Slice(indices, func(i, j int) bool { - return m.Durations[indices[i]] < m.Durations[indices[j]] - }) - out.DurationBundle = map[Stat]time.Duration{ - StatMin: m.Durations[indices[0]], - StatMax: m.Durations[indices[out.N-1]], - StatMean: sum / time.Duration(out.N), - } - out.AnnotationBundle = map[Stat]string{ - StatMin: m.Annotations[indices[0]], - StatMax: m.Annotations[indices[out.N-1]], - } - - if out.N%2 == 0 { - out.DurationBundle[StatMedian] = (m.Durations[indices[out.N/2]] + m.Durations[indices[out.N/2-1]]) / 2 - } else { - out.DurationBundle[StatMedian] = m.Durations[indices[(out.N-1)/2]] - } - stdDev := 0.0 - for _, v := range m.Durations { - stdDev += float64(v-out.DurationBundle[StatMean]) * float64(v-out.DurationBundle[StatMean]) - } - out.DurationBundle[StatStdDev] = time.Duration(math.Sqrt(stdDev / float64(out.N))) - } - - return out -} diff --git a/vendor/github.com/onsi/gomega/gmeasure/rank.go b/vendor/github.com/onsi/gomega/gmeasure/rank.go deleted file mode 100644 index 6be9105e89..0000000000 --- a/vendor/github.com/onsi/gomega/gmeasure/rank.go +++ /dev/null @@ -1,141 +0,0 @@ -package gmeasure - -import ( - "fmt" - "sort" - - "github.com/onsi/gomega/gmeasure/table" -) - -/* -RankingCriteria is an enum representing the criteria by which Stats should be ranked. The enum names should be self explanatory. e.g. LowerMeanIsBetter means that Stats with lower mean values are considered more beneficial, with the lowest mean being declared the "winner" . -*/ -type RankingCriteria uint - -const ( - LowerMeanIsBetter RankingCriteria = iota - HigherMeanIsBetter - LowerMedianIsBetter - HigherMedianIsBetter - LowerMinIsBetter - HigherMinIsBetter - LowerMaxIsBetter - HigherMaxIsBetter -) - -var rcEnumSupport = newEnumSupport(map[uint]string{uint(LowerMeanIsBetter): "Lower Mean is Better", uint(HigherMeanIsBetter): "Higher Mean is Better", uint(LowerMedianIsBetter): "Lower Median is Better", uint(HigherMedianIsBetter): "Higher Median is Better", uint(LowerMinIsBetter): "Lower Mins is Better", uint(HigherMinIsBetter): "Higher Min is Better", uint(LowerMaxIsBetter): "Lower Max is Better", uint(HigherMaxIsBetter): "Higher Max is Better"}) - -func (s RankingCriteria) String() string { return rcEnumSupport.String(uint(s)) } -func (s *RankingCriteria) UnmarshalJSON(b []byte) error { - out, err := rcEnumSupport.UnmarshJSON(b) - *s = RankingCriteria(out) - return err -} -func (s RankingCriteria) MarshalJSON() ([]byte, error) { return rcEnumSupport.MarshJSON(uint(s)) } - -/* -Ranking ranks a set of Stats by a specified RankingCriteria. Use RankStats to create a Ranking. - -When using Ginkgo, you can register Rankings as Report Entries via AddReportEntry. This will emit a formatted table representing the Stats in rank-order when Ginkgo generates the report. -*/ -type Ranking struct { - Criteria RankingCriteria - Stats []Stats -} - -/* -RankStats creates a new ranking of the passed-in stats according to the passed-in criteria. -*/ -func RankStats(criteria RankingCriteria, stats ...Stats) Ranking { - sort.Slice(stats, func(i int, j int) bool { - switch criteria { - case LowerMeanIsBetter: - return stats[i].FloatFor(StatMean) < stats[j].FloatFor(StatMean) - case HigherMeanIsBetter: - return stats[i].FloatFor(StatMean) > stats[j].FloatFor(StatMean) - case LowerMedianIsBetter: - return stats[i].FloatFor(StatMedian) < stats[j].FloatFor(StatMedian) - case HigherMedianIsBetter: - return stats[i].FloatFor(StatMedian) > stats[j].FloatFor(StatMedian) - case LowerMinIsBetter: - return stats[i].FloatFor(StatMin) < stats[j].FloatFor(StatMin) - case HigherMinIsBetter: - return stats[i].FloatFor(StatMin) > stats[j].FloatFor(StatMin) - case LowerMaxIsBetter: - return stats[i].FloatFor(StatMax) < stats[j].FloatFor(StatMax) - case HigherMaxIsBetter: - return stats[i].FloatFor(StatMax) > stats[j].FloatFor(StatMax) - } - return false - }) - - out := Ranking{ - Criteria: criteria, - Stats: stats, - } - - return out -} - -/* -Winner returns the Stats with the most optimal rank based on the specified ranking criteria. For example, if the RankingCriteria is LowerMaxIsBetter then the Stats with the lowest value or duration for StatMax will be returned as the "winner" -*/ -func (c Ranking) Winner() Stats { - if len(c.Stats) == 0 { - return Stats{} - } - return c.Stats[0] -} - -func (c Ranking) report(enableStyling bool) string { - if len(c.Stats) == 0 { - return "Empty Ranking" - } - t := table.NewTable() - t.TableStyle.EnableTextStyling = enableStyling - t.AppendRow(table.R( - table.C("Experiment"), table.C("Name"), table.C("N"), table.C("Min"), table.C("Median"), table.C("Mean"), table.C("StdDev"), table.C("Max"), - table.Divider("="), - "{{bold}}", - )) - - for idx, stats := range c.Stats { - name := stats.MeasurementName - if stats.Units != "" { - name = name + " [" + stats.Units + "]" - } - experimentName := stats.ExperimentName - style := stats.Style - if idx == 0 { - style = "{{bold}}" + style - name += "\n*Winner*" - experimentName += "\n*Winner*" - } - r := table.R(style) - t.AppendRow(r) - r.AppendCell(table.C(experimentName), table.C(name)) - r.AppendCell(stats.cells()...) - - } - out := fmt.Sprintf("Ranking Criteria: %s\n", c.Criteria) - if enableStyling { - out = "{{bold}}" + out + "{{/}}" - } - out += t.Render() - return out -} - -/* -ColorableString generates a styled report that includes a table of the rank-ordered Stats -It is called automatically by Ginkgo's reporting infrastructure when the Ranking is registered as a ReportEntry via AddReportEntry. -*/ -func (c Ranking) ColorableString() string { - return c.report(true) -} - -/* -String generates an unstyled report that includes a table of the rank-ordered Stats -*/ -func (c Ranking) String() string { - return c.report(false) -} diff --git a/vendor/github.com/onsi/gomega/gmeasure/stats.go b/vendor/github.com/onsi/gomega/gmeasure/stats.go deleted file mode 100644 index 52b75a7d38..0000000000 --- a/vendor/github.com/onsi/gomega/gmeasure/stats.go +++ /dev/null @@ -1,153 +0,0 @@ -package gmeasure - -import ( - "fmt" - "time" - - "github.com/onsi/gomega/gmeasure/table" -) - -/* -Stat is an enum representing the statistics you can request of a Stats struct -*/ -type Stat uint - -const ( - StatInvalid Stat = iota - StatMin - StatMax - StatMean - StatMedian - StatStdDev -) - -var statEnumSupport = newEnumSupport(map[uint]string{uint(StatInvalid): "INVALID STAT", uint(StatMin): "Min", uint(StatMax): "Max", uint(StatMean): "Mean", uint(StatMedian): "Median", uint(StatStdDev): "StdDev"}) - -func (s Stat) String() string { return statEnumSupport.String(uint(s)) } -func (s *Stat) UnmarshalJSON(b []byte) error { - out, err := statEnumSupport.UnmarshJSON(b) - *s = Stat(out) - return err -} -func (s Stat) MarshalJSON() ([]byte, error) { return statEnumSupport.MarshJSON(uint(s)) } - -type StatsType uint - -const ( - StatsTypeInvalid StatsType = iota - StatsTypeValue - StatsTypeDuration -) - -var statsTypeEnumSupport = newEnumSupport(map[uint]string{uint(StatsTypeInvalid): "INVALID STATS TYPE", uint(StatsTypeValue): "StatsTypeValue", uint(StatsTypeDuration): "StatsTypeDuration"}) - -func (s StatsType) String() string { return statsTypeEnumSupport.String(uint(s)) } -func (s *StatsType) UnmarshalJSON(b []byte) error { - out, err := statsTypeEnumSupport.UnmarshJSON(b) - *s = StatsType(out) - return err -} -func (s StatsType) MarshalJSON() ([]byte, error) { return statsTypeEnumSupport.MarshJSON(uint(s)) } - -/* -Stats records the key statistics for a given measurement. You generally don't make Stats directly - but you can fetch them from Experiments using GetStats() and from Measurements using Stats(). - -When using Ginkgo, you can register Measurements as Report Entries via AddReportEntry. This will emit all the captured data points when Ginkgo generates the report. -*/ -type Stats struct { - // Type is the StatType - one of StatTypeDuration or StatTypeValue - Type StatsType - - // ExperimentName is the name of the Experiment that recorded the Measurement from which this Stat is derived - ExperimentName string - - // MeasurementName is the name of the Measurement from which this Stat is derived - MeasurementName string - - // Units captures the Units of the Measurement from which this Stat is derived - Units string - - // Style captures the Style of the Measurement from which this Stat is derived - Style string - - // PrecisionBundle captures the precision to use when rendering data for this Measurement. - // If Type is StatTypeDuration then PrecisionBundle.Duration is used to round any durations before presentation. - // If Type is StatTypeValue then PrecisionBundle.ValueFormat is used to format any values before presentation - PrecisionBundle PrecisionBundle - - // N represents the total number of data points in the Measurement from which this Stat is derived - N int - - // If Type is StatTypeValue, ValueBundle will be populated with float64s representing this Stat's statistics - ValueBundle map[Stat]float64 - - // If Type is StatTypeDuration, DurationBundle will be populated with float64s representing this Stat's statistics - DurationBundle map[Stat]time.Duration - - // AnnotationBundle is populated with Annotations corresponding to the data points that can be associated with a Stat. - // For example AnnotationBundle[StatMin] will return the Annotation for the data point that has the minimum value/duration. - AnnotationBundle map[Stat]string -} - -// String returns a minimal summary of the stats of the form "MIN < [MEDIAN] | ±STDDEV < MAX" -func (s Stats) String() string { - return fmt.Sprintf("%s < [%s] | <%s> ±%s < %s", s.StringFor(StatMin), s.StringFor(StatMedian), s.StringFor(StatMean), s.StringFor(StatStdDev), s.StringFor(StatMax)) -} - -// ValueFor returns the float64 value for a particular Stat. You should only use this if the Stats has Type StatsTypeValue -// For example: -// -// median := experiment.GetStats("length").ValueFor(gmeasure.StatMedian) -// -// will return the median data point for the "length" Measurement. -func (s Stats) ValueFor(stat Stat) float64 { - return s.ValueBundle[stat] -} - -// DurationFor returns the time.Duration for a particular Stat. You should only use this if the Stats has Type StatsTypeDuration -// For example: -// -// mean := experiment.GetStats("runtime").ValueFor(gmeasure.StatMean) -// -// will return the mean duration for the "runtime" Measurement. -func (s Stats) DurationFor(stat Stat) time.Duration { - return s.DurationBundle[stat] -} - -// FloatFor returns a float64 representation of the passed-in Stat. -// When Type is StatsTypeValue this is equivalent to s.ValueFor(stat). -// When Type is StatsTypeDuration this is equivalent to float64(s.DurationFor(stat)) -func (s Stats) FloatFor(stat Stat) float64 { - switch s.Type { - case StatsTypeValue: - return s.ValueFor(stat) - case StatsTypeDuration: - return float64(s.DurationFor(stat)) - } - return 0 -} - -// StringFor returns a formatted string representation of the passed-in Stat. -// The formatting honors the precision directives provided in stats.PrecisionBundle -func (s Stats) StringFor(stat Stat) string { - switch s.Type { - case StatsTypeValue: - return fmt.Sprintf(s.PrecisionBundle.ValueFormat, s.ValueFor(stat)) - case StatsTypeDuration: - return s.DurationFor(stat).Round(s.PrecisionBundle.Duration).String() - } - return "" -} - -func (s Stats) cells() []table.Cell { - out := []table.Cell{} - out = append(out, table.C(fmt.Sprintf("%d", s.N))) - for _, stat := range []Stat{StatMin, StatMedian, StatMean, StatStdDev, StatMax} { - content := s.StringFor(stat) - if s.AnnotationBundle[stat] != "" { - content += "\n" + s.AnnotationBundle[stat] - } - out = append(out, table.C(content)) - } - return out -} diff --git a/vendor/github.com/onsi/gomega/gmeasure/stopwatch.go b/vendor/github.com/onsi/gomega/gmeasure/stopwatch.go deleted file mode 100644 index 0da22f863e..0000000000 --- a/vendor/github.com/onsi/gomega/gmeasure/stopwatch.go +++ /dev/null @@ -1,116 +0,0 @@ -package gmeasure - -import "time" - -/* -Stopwatch provides a convenient abstraction for recording durations. There are two ways to make a Stopwatch: - -You can make a Stopwatch from an Experiment via experiment.NewStopwatch(). This is how you first get a hold of a Stopwatch. - -You can subsequently call stopwatch.NewStopwatch() to get a fresh Stopwatch. -This is only necessary if you need to record durations on a different goroutine as a single Stopwatch is not considered thread-safe. - -The Stopwatch starts as soon as it is created. You can Pause() the stopwatch and Reset() it as needed. - -Stopwatches refer back to their parent Experiment. They use this reference to record any measured durations back with the Experiment. -*/ -type Stopwatch struct { - Experiment *Experiment - t time.Time - pauseT time.Time - pauseDuration time.Duration - running bool -} - -func newStopwatch(experiment *Experiment) *Stopwatch { - return &Stopwatch{ - Experiment: experiment, - t: time.Now(), - running: true, - } -} - -/* -NewStopwatch returns a new Stopwatch pointing to the same Experiment as this Stopwatch -*/ -func (s *Stopwatch) NewStopwatch() *Stopwatch { - return newStopwatch(s.Experiment) -} - -/* -Record captures the amount of time that has passed since the Stopwatch was created or most recently Reset(). It records the duration on it's associated Experiment in a Measurement with the passed-in name. - -Record takes all the decorators that experiment.RecordDuration takes (e.g. Annotation("...") can be used to annotate this duration) - -Note that Record does not Reset the Stopwatch. It does, however, return the Stopwatch so the following pattern is common: - - stopwatch := experiment.NewStopwatch() - // first expensive operation - stopwatch.Record("first operation").Reset() //records the duration of the first operation and resets the stopwatch. - // second expensive operation - stopwatch.Record("second operation").Reset() //records the duration of the second operation and resets the stopwatch. - -omitting the Reset() after the first operation would cause the duration recorded for the second operation to include the time elapsed by both the first _and_ second operations. - -The Stopwatch must be running (i.e. not paused) when Record is called. -*/ -func (s *Stopwatch) Record(name string, args ...any) *Stopwatch { - if !s.running { - panic("stopwatch is not running - call Resume or Reset before calling Record") - } - duration := time.Since(s.t) - s.pauseDuration - s.Experiment.RecordDuration(name, duration, args...) - return s -} - -/* -Reset resets the Stopwatch. Subsequent recorded durations will measure the time elapsed from the moment Reset was called. -If the Stopwatch was Paused it is unpaused after calling Reset. -*/ -func (s *Stopwatch) Reset() *Stopwatch { - s.running = true - s.t = time.Now() - s.pauseDuration = 0 - return s -} - -/* -Pause pauses the Stopwatch. While pasued the Stopwatch does not accumulate elapsed time. This is useful for ignoring expensive operations that are incidental to the behavior you are attempting to characterize. -Note: You must call Resume() before you can Record() subsequent measurements. - -For example: - - stopwatch := experiment.NewStopwatch() - // first expensive operation - stopwatch.Record("first operation").Reset() - // second expensive operation - part 1 - stopwatch.Pause() - // something expensive that we don't care about - stopwatch.Resume() - // second expensive operation - part 2 - stopwatch.Record("second operation").Reset() // the recorded duration captures the time elapsed during parts 1 and 2 of the second expensive operation, but not the bit in between - -The Stopwatch must be running when Pause is called. -*/ -func (s *Stopwatch) Pause() *Stopwatch { - if !s.running { - panic("stopwatch is not running - call Resume or Reset before calling Pause") - } - s.running = false - s.pauseT = time.Now() - return s -} - -/* -Resume resumes a paused Stopwatch. Any time that elapses after Resume is called will be accumulated as elapsed time when a subsequent duration is Recorded. - -The Stopwatch must be Paused when Resume is called -*/ -func (s *Stopwatch) Resume() *Stopwatch { - if s.running { - panic("stopwatch is running - call Pause before calling Resume") - } - s.running = true - s.pauseDuration = s.pauseDuration + time.Since(s.pauseT) - return s -} diff --git a/vendor/github.com/onsi/gomega/gmeasure/table/table.go b/vendor/github.com/onsi/gomega/gmeasure/table/table.go deleted file mode 100644 index 0a0df3b7ae..0000000000 --- a/vendor/github.com/onsi/gomega/gmeasure/table/table.go +++ /dev/null @@ -1,356 +0,0 @@ -package table - -// This is a temporary package - Table will move to github.com/onsi/consolable once some more dust settles - -import ( - "reflect" - "strings" - "unicode/utf8" -) - -type AlignType uint - -const ( - AlignTypeLeft AlignType = iota - AlignTypeCenter - AlignTypeRight -) - -type Divider string - -type Row struct { - Cells []Cell - Divider string - Style string -} - -func R(args ...any) *Row { - r := &Row{ - Divider: "-", - } - for _, arg := range args { - switch reflect.TypeOf(arg) { - case reflect.TypeOf(Divider("")): - r.Divider = string(arg.(Divider)) - case reflect.TypeOf(r.Style): - r.Style = arg.(string) - case reflect.TypeOf(Cell{}): - r.Cells = append(r.Cells, arg.(Cell)) - } - } - return r -} - -func (r *Row) AppendCell(cells ...Cell) *Row { - r.Cells = append(r.Cells, cells...) - return r -} - -func (r *Row) Render(widths []int, totalWidth int, tableStyle TableStyle, isLastRow bool) string { - out := "" - if len(r.Cells) == 1 { - out += strings.Join(r.Cells[0].render(totalWidth, r.Style, tableStyle), "\n") + "\n" - } else { - if len(r.Cells) != len(widths) { - panic("row vs width mismatch") - } - renderedCells := make([][]string, len(r.Cells)) - maxHeight := 0 - for colIdx, cell := range r.Cells { - renderedCells[colIdx] = cell.render(widths[colIdx], r.Style, tableStyle) - if len(renderedCells[colIdx]) > maxHeight { - maxHeight = len(renderedCells[colIdx]) - } - } - for colIdx := range r.Cells { - for len(renderedCells[colIdx]) < maxHeight { - renderedCells[colIdx] = append(renderedCells[colIdx], strings.Repeat(" ", widths[colIdx])) - } - } - border := strings.Repeat(" ", tableStyle.Padding) - if tableStyle.VerticalBorders { - border += "|" + border - } - for lineIdx := 0; lineIdx < maxHeight; lineIdx++ { - for colIdx := range r.Cells { - out += renderedCells[colIdx][lineIdx] - if colIdx < len(r.Cells)-1 { - out += border - } - } - out += "\n" - } - } - if tableStyle.HorizontalBorders && !isLastRow && r.Divider != "" { - out += strings.Repeat(string(r.Divider), totalWidth) + "\n" - } - - return out -} - -type Cell struct { - Contents []string - Style string - Align AlignType -} - -func C(contents string, args ...any) Cell { - c := Cell{ - Contents: strings.Split(contents, "\n"), - } - for _, arg := range args { - switch reflect.TypeOf(arg) { - case reflect.TypeOf(c.Style): - c.Style = arg.(string) - case reflect.TypeOf(c.Align): - c.Align = arg.(AlignType) - } - } - return c -} - -func (c Cell) Width() (int, int) { - w, minW := 0, 0 - for _, line := range c.Contents { - lineWidth := utf8.RuneCountInString(line) - if lineWidth > w { - w = lineWidth - } - for _, word := range strings.Split(line, " ") { - wordWidth := utf8.RuneCountInString(word) - if wordWidth > minW { - minW = wordWidth - } - } - } - return w, minW -} - -func (c Cell) alignLine(line string, width int) string { - lineWidth := utf8.RuneCountInString(line) - if lineWidth == width { - return line - } - if lineWidth < width { - gap := width - lineWidth - switch c.Align { - case AlignTypeLeft: - return line + strings.Repeat(" ", gap) - case AlignTypeRight: - return strings.Repeat(" ", gap) + line - case AlignTypeCenter: - leftGap := gap / 2 - rightGap := gap - leftGap - return strings.Repeat(" ", leftGap) + line + strings.Repeat(" ", rightGap) - } - } - return line -} - -func (c Cell) splitWordToWidth(word string, width int) []string { - out := []string{} - n, subWord := 0, "" - for _, c := range word { - subWord += string(c) - n += 1 - if n == width-1 { - out = append(out, subWord+"-") - n, subWord = 0, "" - } - } - return out -} - -func (c Cell) splitToWidth(line string, width int) []string { - lineWidth := utf8.RuneCountInString(line) - if lineWidth <= width { - return []string{line} - } - - outLines := []string{} - words := strings.Split(line, " ") - outWords := []string{words[0]} - length := utf8.RuneCountInString(words[0]) - if length > width { - splitWord := c.splitWordToWidth(words[0], width) - lastIdx := len(splitWord) - 1 - outLines = append(outLines, splitWord[:lastIdx]...) - outWords = []string{splitWord[lastIdx]} - length = utf8.RuneCountInString(splitWord[lastIdx]) - } - - for _, word := range words[1:] { - wordLength := utf8.RuneCountInString(word) - if length+wordLength+1 <= width { - length += wordLength + 1 - outWords = append(outWords, word) - continue - } - outLines = append(outLines, strings.Join(outWords, " ")) - - outWords = []string{word} - length = wordLength - if length > width { - splitWord := c.splitWordToWidth(word, width) - lastIdx := len(splitWord) - 1 - outLines = append(outLines, splitWord[:lastIdx]...) - outWords = []string{splitWord[lastIdx]} - length = utf8.RuneCountInString(splitWord[lastIdx]) - } - } - if len(outWords) > 0 { - outLines = append(outLines, strings.Join(outWords, " ")) - } - - return outLines -} - -func (c Cell) render(width int, style string, tableStyle TableStyle) []string { - out := []string{} - for _, line := range c.Contents { - out = append(out, c.splitToWidth(line, width)...) - } - for idx := range out { - out[idx] = c.alignLine(out[idx], width) - } - - if tableStyle.EnableTextStyling { - style = style + c.Style - if style != "" { - for idx := range out { - out[idx] = style + out[idx] + "{{/}}" - } - } - } - - return out -} - -type TableStyle struct { - Padding int - VerticalBorders bool - HorizontalBorders bool - MaxTableWidth int - MaxColWidth int - EnableTextStyling bool -} - -var DefaultTableStyle = TableStyle{ - Padding: 1, - VerticalBorders: true, - HorizontalBorders: true, - MaxTableWidth: 120, - MaxColWidth: 40, - EnableTextStyling: true, -} - -type Table struct { - Rows []*Row - - TableStyle TableStyle -} - -func NewTable() *Table { - return &Table{ - TableStyle: DefaultTableStyle, - } -} - -func (t *Table) AppendRow(row *Row) *Table { - t.Rows = append(t.Rows, row) - return t -} - -func (t *Table) Render() string { - out := "" - totalWidth, widths := t.computeWidths() - for rowIdx, row := range t.Rows { - out += row.Render(widths, totalWidth, t.TableStyle, rowIdx == len(t.Rows)-1) - } - return out -} - -func (t *Table) computeWidths() (int, []int) { - nCol := 0 - for _, row := range t.Rows { - if len(row.Cells) > nCol { - nCol = len(row.Cells) - } - } - - // lets compute the contribution to width from the borders + padding - borderWidth := t.TableStyle.Padding - if t.TableStyle.VerticalBorders { - borderWidth += 1 + t.TableStyle.Padding - } - totalBorderWidth := borderWidth * (nCol - 1) - - // lets compute the width of each column - widths := make([]int, nCol) - minWidths := make([]int, nCol) - for colIdx := range widths { - for _, row := range t.Rows { - if colIdx >= len(row.Cells) { - // ignore rows with fewer columns - continue - } - w, minWid := row.Cells[colIdx].Width() - if w > widths[colIdx] { - widths[colIdx] = w - } - if minWid > minWidths[colIdx] { - minWidths[colIdx] = minWid - } - } - } - - // do we already fit? - if sum(widths)+totalBorderWidth <= t.TableStyle.MaxTableWidth { - // yes! we're done - return sum(widths) + totalBorderWidth, widths - } - - // clamp the widths and minWidths to MaxColWidth - for colIdx := range widths { - widths[colIdx] = min(widths[colIdx], t.TableStyle.MaxColWidth) - minWidths[colIdx] = min(minWidths[colIdx], t.TableStyle.MaxColWidth) - } - - // do we fit now? - if sum(widths)+totalBorderWidth <= t.TableStyle.MaxTableWidth { - // yes! we're done - return sum(widths) + totalBorderWidth, widths - } - - // hmm... still no... can we possibly squeeze the table in without violating minWidths? - if sum(minWidths)+totalBorderWidth >= t.TableStyle.MaxTableWidth { - // nope - we're just going to have to exceed MaxTableWidth - return sum(minWidths) + totalBorderWidth, minWidths - } - - // looks like we don't fit yet, but we should be able to fit without violating minWidths - // lets start scaling down - n := 0 - for sum(widths)+totalBorderWidth > t.TableStyle.MaxTableWidth { - budget := t.TableStyle.MaxTableWidth - totalBorderWidth - baseline := sum(widths) - - for colIdx := range widths { - widths[colIdx] = max((widths[colIdx]*budget)/baseline, minWidths[colIdx]) - } - n += 1 - if n > 100 { - break // in case we somehow fail to converge - } - } - - return sum(widths) + totalBorderWidth, widths -} - -func sum(s []int) int { - out := 0 - for _, v := range s { - out += v - } - return out -} diff --git a/vendor/github.com/onsi/gomega/gstruct/elements.go b/vendor/github.com/onsi/gomega/gstruct/elements.go deleted file mode 100644 index 4a6114ab8f..0000000000 --- a/vendor/github.com/onsi/gomega/gstruct/elements.go +++ /dev/null @@ -1,235 +0,0 @@ -// untested sections: 6 - -package gstruct - -import ( - "errors" - "fmt" - "reflect" - "runtime/debug" - "strconv" - - "github.com/onsi/gomega/format" - errorsutil "github.com/onsi/gomega/gstruct/errors" - "github.com/onsi/gomega/types" -) - -// MatchAllElements succeeds if every element of a slice matches the element matcher it maps to -// through the id function, and every element matcher is matched. -// -// idFn := func(element any) string { -// return fmt.Sprintf("%v", element) -// } -// -// Expect([]string{"a", "b"}).To(MatchAllElements(idFn, Elements{ -// "a": Equal("a"), -// "b": Equal("b"), -// })) -func MatchAllElements(identifier Identifier, elements Elements) types.GomegaMatcher { - return &ElementsMatcher{ - Identifier: identifier, - Elements: elements, - } -} - -// MatchAllElementsWithIndex succeeds if every element of a slice matches the element matcher it maps to -// through the id with index function, and every element matcher is matched. -// -// idFn := func(index int, element any) string { -// return strconv.Itoa(index) -// } -// -// Expect([]string{"a", "b"}).To(MatchAllElements(idFn, Elements{ -// "0": Equal("a"), -// "1": Equal("b"), -// })) -func MatchAllElementsWithIndex(identifier IdentifierWithIndex, elements Elements) types.GomegaMatcher { - return &ElementsMatcher{ - Identifier: identifier, - Elements: elements, - } -} - -// MatchElements succeeds if each element of a slice matches the element matcher it maps to -// through the id function. It can ignore extra elements and/or missing elements. -// -// idFn := func(element any) string { -// return fmt.Sprintf("%v", element) -// } -// -// Expect([]string{"a", "b", "c"}).To(MatchElements(idFn, IgnoreExtras, Elements{ -// "a": Equal("a"), -// "b": Equal("b"), -// })) -// Expect([]string{"a", "c"}).To(MatchElements(idFn, IgnoreMissing, Elements{ -// "a": Equal("a"), -// "b": Equal("b"), -// "c": Equal("c"), -// "d": Equal("d"), -// })) -func MatchElements(identifier Identifier, options Options, elements Elements) types.GomegaMatcher { - return &ElementsMatcher{ - Identifier: identifier, - Elements: elements, - IgnoreExtras: options&IgnoreExtras != 0, - IgnoreMissing: options&IgnoreMissing != 0, - AllowDuplicates: options&AllowDuplicates != 0, - } -} - -// MatchElementsWithIndex succeeds if each element of a slice matches the element matcher it maps to -// through the id with index function. It can ignore extra elements and/or missing elements. -// -// idFn := func(index int, element any) string { -// return strconv.Itoa(index) -// } -// -// Expect([]string{"a", "b", "c"}).To(MatchElements(idFn, IgnoreExtras, Elements{ -// "0": Equal("a"), -// "1": Equal("b"), -// })) -// Expect([]string{"a", "c"}).To(MatchElements(idFn, IgnoreMissing, Elements{ -// "0": Equal("a"), -// "1": Equal("b"), -// "2": Equal("c"), -// "3": Equal("d"), -// })) -func MatchElementsWithIndex(identifier IdentifierWithIndex, options Options, elements Elements) types.GomegaMatcher { - return &ElementsMatcher{ - Identifier: identifier, - Elements: elements, - IgnoreExtras: options&IgnoreExtras != 0, - IgnoreMissing: options&IgnoreMissing != 0, - AllowDuplicates: options&AllowDuplicates != 0, - } -} - -// ElementsMatcher is a NestingMatcher that applies custom matchers to each element of a slice mapped -// by the Identifier function. -// TODO: Extend this to work with arrays & maps (map the key) as well. -type ElementsMatcher struct { - // Matchers for each element. - Elements Elements - // Function mapping an element to the string key identifying its matcher. - Identifier Identify - - // Whether to ignore extra elements or consider it an error. - IgnoreExtras bool - // Whether to ignore missing elements or consider it an error. - IgnoreMissing bool - // Whether to key duplicates when matching IDs. - AllowDuplicates bool - - // State. - failures []error -} - -// Element ID to matcher. -type Elements map[string]types.GomegaMatcher - -// Function for identifying (mapping) elements. -type Identifier func(element any) string - -// Calls the underlying function with the provided params. -// Identifier drops the index. -func (i Identifier) WithIndexAndElement(index int, element any) string { - return i(element) -} - -// Uses the index and element to generate an element name -type IdentifierWithIndex func(index int, element any) string - -// Calls the underlying function with the provided params. -// IdentifierWithIndex uses the index. -func (i IdentifierWithIndex) WithIndexAndElement(index int, element any) string { - return i(index, element) -} - -// Interface for identifying the element -type Identify interface { - WithIndexAndElement(i int, element any) string -} - -// IndexIdentity is a helper function for using an index as -// the key in the element map -func IndexIdentity(index int, _ any) string { - return strconv.Itoa(index) -} - -func (m *ElementsMatcher) Match(actual any) (success bool, err error) { - if reflect.TypeOf(actual).Kind() != reflect.Slice { - return false, fmt.Errorf("%v is type %T, expected slice", actual, actual) - } - - m.failures = m.matchElements(actual) - if len(m.failures) > 0 { - return false, nil - } - return true, nil -} - -func (m *ElementsMatcher) matchElements(actual any) (errs []error) { - // Provide more useful error messages in the case of a panic. - defer func() { - if err := recover(); err != nil { - errs = append(errs, fmt.Errorf("panic checking %+v: %v\n%s", actual, err, debug.Stack())) - } - }() - - val := reflect.ValueOf(actual) - elements := map[string]bool{} - for i := 0; i < val.Len(); i++ { - element := val.Index(i).Interface() - id := m.Identifier.WithIndexAndElement(i, element) - if elements[id] { - if !m.AllowDuplicates { - errs = append(errs, fmt.Errorf("found duplicate element ID %s", id)) - continue - } - } - elements[id] = true - - matcher, expected := m.Elements[id] - if !expected { - if !m.IgnoreExtras { - errs = append(errs, fmt.Errorf("unexpected element %s", id)) - } - continue - } - - match, err := matcher.Match(element) - if match { - continue - } - - if err == nil { - if nesting, ok := matcher.(errorsutil.NestingMatcher); ok { - err = errorsutil.AggregateError(nesting.Failures()) - } else { - err = errors.New(matcher.FailureMessage(element)) - } - } - errs = append(errs, errorsutil.Nest(fmt.Sprintf("[%s]", id), err)) - } - - for id := range m.Elements { - if !elements[id] && !m.IgnoreMissing { - errs = append(errs, fmt.Errorf("missing expected element %s", id)) - } - } - - return errs -} - -func (m *ElementsMatcher) FailureMessage(actual any) (message string) { - failure := errorsutil.AggregateError(m.failures) - return format.Message(actual, fmt.Sprintf("to match elements: %v", failure)) -} - -func (m *ElementsMatcher) NegatedFailureMessage(actual any) (message string) { - return format.Message(actual, "not to match elements") -} - -func (m *ElementsMatcher) Failures() []error { - return m.failures -} diff --git a/vendor/github.com/onsi/gomega/gstruct/errors/nested_types.go b/vendor/github.com/onsi/gomega/gstruct/errors/nested_types.go deleted file mode 100644 index 188492b212..0000000000 --- a/vendor/github.com/onsi/gomega/gstruct/errors/nested_types.go +++ /dev/null @@ -1,72 +0,0 @@ -package errors - -import ( - "fmt" - "strings" - - "github.com/onsi/gomega/types" -) - -// A stateful matcher that nests other matchers within it and preserves the error types of the -// nested matcher failures. -type NestingMatcher interface { - types.GomegaMatcher - - // Returns the failures of nested matchers. - Failures() []error -} - -// An error type for labeling errors on deeply nested matchers. -type NestedError struct { - Path string - Err error -} - -func (e *NestedError) Error() string { - // Indent Errors. - indented := strings.Replace(e.Err.Error(), "\n", "\n\t", -1) - return fmt.Sprintf("%s:\n\t%v", e.Path, indented) -} - -// Create a NestedError with the given path. -// If err is a NestedError, prepend the path to it. -// If err is an AggregateError, recursively Nest each error. -func Nest(path string, err error) error { - if ag, ok := err.(AggregateError); ok { - var errs AggregateError - for _, e := range ag { - errs = append(errs, Nest(path, e)) - } - return errs - } - if ne, ok := err.(*NestedError); ok { - return &NestedError{ - Path: path + ne.Path, - Err: ne.Err, - } - } - return &NestedError{ - Path: path, - Err: err, - } -} - -// An error type for treating multiple errors as a single error. -type AggregateError []error - -// Error is part of the error interface. -func (err AggregateError) Error() string { - if len(err) == 0 { - // This should never happen, really. - return "" - } - if len(err) == 1 { - return err[0].Error() - } - result := fmt.Sprintf("[%s", err[0].Error()) - for i := 1; i < len(err); i++ { - result += fmt.Sprintf(", %s", err[i].Error()) - } - result += "]" - return result -} diff --git a/vendor/github.com/onsi/gomega/gstruct/fields.go b/vendor/github.com/onsi/gomega/gstruct/fields.go deleted file mode 100644 index 2f4685fc48..0000000000 --- a/vendor/github.com/onsi/gomega/gstruct/fields.go +++ /dev/null @@ -1,187 +0,0 @@ -// untested sections: 6 - -package gstruct - -import ( - "errors" - "fmt" - "reflect" - "runtime/debug" - "strings" - "unicode" - - "github.com/onsi/gomega/format" - errorsutil "github.com/onsi/gomega/gstruct/errors" - "github.com/onsi/gomega/types" -) - -// MatchAllFields succeeds if every field of a struct matches the field matcher associated with -// it, and every element matcher is matched. -// -// actual := struct{ -// A int -// B []bool -// C string -// }{ -// A: 5, -// B: []bool{true, false}, -// C: "foo", -// } -// -// Expect(actual).To(MatchAllFields(Fields{ -// "A": Equal(5), -// "B": ConsistOf(true, false), -// "C": Equal("foo"), -// })) -func MatchAllFields(fields Fields) types.GomegaMatcher { - return &FieldsMatcher{ - Fields: fields, - } -} - -// MatchFields succeeds if each element of a struct matches the field matcher associated with -// it. It can ignore extra fields and/or missing fields. -// -// actual := struct{ -// A int -// B []bool -// C string -// }{ -// A: 5, -// B: []bool{true, false}, -// C: "foo", -// } -// -// Expect(actual).To(MatchFields(IgnoreExtras, Fields{ -// "A": Equal(5), -// "B": ConsistOf(true, false), -// })) -// Expect(actual).To(MatchFields(IgnoreMissing, Fields{ -// "A": Equal(5), -// "B": ConsistOf(true, false), -// "C": Equal("foo"), -// "D": Equal("extra"), -// })) -func MatchFields(options Options, fields Fields) types.GomegaMatcher { - return &FieldsMatcher{ - Fields: fields, - IgnoreExtras: options&IgnoreExtras != 0, - IgnoreUnexportedExtras: options&IgnoreUnexportedExtras != 0, - IgnoreMissing: options&IgnoreMissing != 0, - } -} - -type FieldsMatcher struct { - // Matchers for each field. - Fields Fields - - // Whether to ignore extra elements or consider it an error. - IgnoreExtras bool - // Whether to ignore unexported extra elements or consider it an error. - IgnoreUnexportedExtras bool - // Whether to ignore missing elements or consider it an error. - IgnoreMissing bool - - // State. - failures []error -} - -// Field name to matcher. -type Fields map[string]types.GomegaMatcher - -func (m *FieldsMatcher) Match(actual any) (success bool, err error) { - if reflect.TypeOf(actual).Kind() != reflect.Struct { - return false, fmt.Errorf("%v is type %T, expected struct", actual, actual) - } - - m.failures = m.matchFields(actual) - if len(m.failures) > 0 { - return false, nil - } - return true, nil -} - -func isExported(fieldName string) bool { - if fieldName == "" { - return false - } - r := []rune(fieldName)[0] - return unicode.IsUpper(r) -} - -func (m *FieldsMatcher) matchFields(actual any) (errs []error) { - val := reflect.ValueOf(actual) - typ := val.Type() - fields := map[string]bool{} - for i := 0; i < val.NumField(); i++ { - fieldName := typ.Field(i).Name - fields[fieldName] = true - - err := func() (err error) { - // This test relies heavily on reflect, which tends to panic. - // Recover here to provide more useful error messages in that case. - defer func() { - if r := recover(); r != nil { - err = fmt.Errorf("panic checking %+v: %v\n%s", actual, r, debug.Stack()) - } - }() - - matcher, expected := m.Fields[fieldName] - if !expected { - if m.IgnoreUnexportedExtras && !isExported(fieldName) { - return nil - } - if !m.IgnoreExtras { - return fmt.Errorf("unexpected field %s: %+v", fieldName, actual) - } - return nil - } - - var field any - if _, isIgnoreMatcher := matcher.(*IgnoreMatcher) ; isIgnoreMatcher { - field = struct {}{} // the matcher does not care about the actual value - } else { - field = val.Field(i).Interface() - } - - match, err := matcher.Match(field) - if err != nil { - return err - } else if !match { - if nesting, ok := matcher.(errorsutil.NestingMatcher); ok { - return errorsutil.AggregateError(nesting.Failures()) - } - return errors.New(matcher.FailureMessage(field)) - } - return nil - }() - if err != nil { - errs = append(errs, errorsutil.Nest("."+fieldName, err)) - } - } - - for field := range m.Fields { - if !fields[field] && !m.IgnoreMissing { - errs = append(errs, fmt.Errorf("missing expected field %s", field)) - } - } - - return errs -} - -func (m *FieldsMatcher) FailureMessage(actual any) (message string) { - failures := make([]string, len(m.failures)) - for i := range m.failures { - failures[i] = m.failures[i].Error() - } - return format.Message(reflect.TypeOf(actual).Name(), - fmt.Sprintf("to match fields: {\n%v\n}\n", strings.Join(failures, "\n"))) -} - -func (m *FieldsMatcher) NegatedFailureMessage(actual any) (message string) { - return format.Message(actual, "not to match fields") -} - -func (m *FieldsMatcher) Failures() []error { - return m.failures -} diff --git a/vendor/github.com/onsi/gomega/gstruct/ignore.go b/vendor/github.com/onsi/gomega/gstruct/ignore.go deleted file mode 100644 index c8238d58fd..0000000000 --- a/vendor/github.com/onsi/gomega/gstruct/ignore.go +++ /dev/null @@ -1,41 +0,0 @@ -// untested sections: 2 - -package gstruct - -import ( - "github.com/onsi/gomega/types" -) - -// Ignore ignores the actual value and always succeeds. -// -// Expect(nil).To(Ignore()) -// Expect(true).To(Ignore()) -func Ignore() types.GomegaMatcher { - return &IgnoreMatcher{true} -} - -// Reject ignores the actual value and always fails. It can be used in conjunction with IgnoreMissing -// to catch problematic elements, or to verify tests are running. -// -// Expect(nil).NotTo(Reject()) -// Expect(true).NotTo(Reject()) -func Reject() types.GomegaMatcher { - return &IgnoreMatcher{false} -} - -// A matcher that either always succeeds or always fails. -type IgnoreMatcher struct { - Succeed bool -} - -func (m *IgnoreMatcher) Match(actual any) (bool, error) { - return m.Succeed, nil -} - -func (m *IgnoreMatcher) FailureMessage(_ any) (message string) { - return "Unconditional failure" -} - -func (m *IgnoreMatcher) NegatedFailureMessage(_ any) (message string) { - return "Unconditional success" -} diff --git a/vendor/github.com/onsi/gomega/gstruct/keys.go b/vendor/github.com/onsi/gomega/gstruct/keys.go deleted file mode 100644 index 807a450b6f..0000000000 --- a/vendor/github.com/onsi/gomega/gstruct/keys.go +++ /dev/null @@ -1,126 +0,0 @@ -// untested sections: 6 - -package gstruct - -import ( - "errors" - "fmt" - "reflect" - "runtime/debug" - "strings" - - "github.com/onsi/gomega/format" - errorsutil "github.com/onsi/gomega/gstruct/errors" - "github.com/onsi/gomega/types" -) - -func MatchAllKeys(keys Keys) types.GomegaMatcher { - return &KeysMatcher{ - Keys: keys, - } -} - -func MatchKeys(options Options, keys Keys) types.GomegaMatcher { - return &KeysMatcher{ - Keys: keys, - IgnoreExtras: options&IgnoreExtras != 0, - IgnoreMissing: options&IgnoreMissing != 0, - } -} - -type KeysMatcher struct { - // Matchers for each key. - Keys Keys - - // Whether to ignore extra keys or consider it an error. - IgnoreExtras bool - // Whether to ignore missing keys or consider it an error. - IgnoreMissing bool - - // State. - failures []error -} - -type Keys map[any]types.GomegaMatcher - -func (m *KeysMatcher) Match(actual any) (success bool, err error) { - if reflect.TypeOf(actual).Kind() != reflect.Map { - return false, fmt.Errorf("%v is type %T, expected map", actual, actual) - } - - m.failures = m.matchKeys(actual) - if len(m.failures) > 0 { - return false, nil - } - return true, nil -} - -func (m *KeysMatcher) matchKeys(actual any) (errs []error) { - actualValue := reflect.ValueOf(actual) - keys := map[any]bool{} - for _, keyValue := range actualValue.MapKeys() { - key := keyValue.Interface() - keys[key] = true - - err := func() (err error) { - // This test relies heavily on reflect, which tends to panic. - // Recover here to provide more useful error messages in that case. - defer func() { - if r := recover(); r != nil { - err = fmt.Errorf("panic checking %+v: %v\n%s", actual, r, debug.Stack()) - } - }() - - matcher, ok := m.Keys[key] - if !ok { - if !m.IgnoreExtras { - return fmt.Errorf("unexpected key %s: %+v", key, actual) - } - return nil - } - - valInterface := actualValue.MapIndex(keyValue).Interface() - - match, err := matcher.Match(valInterface) - if err != nil { - return err - } - - if !match { - if nesting, ok := matcher.(errorsutil.NestingMatcher); ok { - return errorsutil.AggregateError(nesting.Failures()) - } - return errors.New(matcher.FailureMessage(valInterface)) - } - return nil - }() - if err != nil { - errs = append(errs, errorsutil.Nest(fmt.Sprintf(".%#v", key), err)) - } - } - - for key := range m.Keys { - if !keys[key] && !m.IgnoreMissing { - errs = append(errs, fmt.Errorf("missing expected key %s", key)) - } - } - - return errs -} - -func (m *KeysMatcher) FailureMessage(actual any) (message string) { - failures := make([]string, len(m.failures)) - for i := range m.failures { - failures[i] = m.failures[i].Error() - } - return format.Message(reflect.TypeOf(actual).Name(), - fmt.Sprintf("to match keys: {\n%v\n}\n", strings.Join(failures, "\n"))) -} - -func (m *KeysMatcher) NegatedFailureMessage(actual any) (message string) { - return format.Message(actual, "not to match keys") -} - -func (m *KeysMatcher) Failures() []error { - return m.failures -} diff --git a/vendor/github.com/onsi/gomega/gstruct/pointer.go b/vendor/github.com/onsi/gomega/gstruct/pointer.go deleted file mode 100644 index 54c9557fec..0000000000 --- a/vendor/github.com/onsi/gomega/gstruct/pointer.go +++ /dev/null @@ -1,59 +0,0 @@ -// untested sections: 3 - -package gstruct - -import ( - "fmt" - "reflect" - - "github.com/onsi/gomega/format" - "github.com/onsi/gomega/types" -) - -// PointTo applies the given matcher to the value pointed to by actual. It fails if the pointer is -// nil. -// -// actual := 5 -// Expect(&actual).To(PointTo(Equal(5))) -func PointTo(matcher types.GomegaMatcher) types.GomegaMatcher { - return &PointerMatcher{ - Matcher: matcher, - } -} - -type PointerMatcher struct { - Matcher types.GomegaMatcher - - // Failure message. - failure string -} - -func (m *PointerMatcher) Match(actual any) (bool, error) { - val := reflect.ValueOf(actual) - - // return error if actual type is not a pointer - if val.Kind() != reflect.Ptr { - return false, fmt.Errorf("PointerMatcher expects a pointer but we have '%s'", val.Kind()) - } - - if !val.IsValid() || val.IsNil() { - m.failure = format.Message(actual, "not to be ") - return false, nil - } - - // Forward the value. - elem := val.Elem().Interface() - match, err := m.Matcher.Match(elem) - if !match { - m.failure = m.Matcher.FailureMessage(elem) - } - return match, err -} - -func (m *PointerMatcher) FailureMessage(_ any) (message string) { - return m.failure -} - -func (m *PointerMatcher) NegatedFailureMessage(actual any) (message string) { - return m.Matcher.NegatedFailureMessage(actual) -} diff --git a/vendor/github.com/onsi/gomega/gstruct/types.go b/vendor/github.com/onsi/gomega/gstruct/types.go deleted file mode 100644 index a5f6c390bd..0000000000 --- a/vendor/github.com/onsi/gomega/gstruct/types.go +++ /dev/null @@ -1,19 +0,0 @@ -package gstruct - -// Options is the type for options passed to some matchers. -type Options int - -const ( - //IgnoreExtras tells the matcher to ignore extra elements or fields, rather than triggering a failure. - IgnoreExtras Options = 1 << iota - //IgnoreMissing tells the matcher to ignore missing elements or fields, rather than triggering a failure. - IgnoreMissing - //AllowDuplicates tells the matcher to permit multiple members of the slice to produce the same ID when - //considered by the identifier function. All members that map to a given key must still match successfully - //with the matcher that is provided for that key. - AllowDuplicates - //IgnoreUnexportedExtras tells the matcher to ignore extra unexported fields, rather than triggering a failure. - //it is not possible to check the value of unexported fields, so this option is only useful when you want to - //check every exported fields, but you don't care about extra unexported fields. - IgnoreUnexportedExtras -) diff --git a/vendor/github.com/opencontainers/cgroups/.golangci-extra.yml b/vendor/github.com/opencontainers/cgroups/.golangci-extra.yml deleted file mode 100644 index b98dba1ba1..0000000000 --- a/vendor/github.com/opencontainers/cgroups/.golangci-extra.yml +++ /dev/null @@ -1,21 +0,0 @@ -# This is golangci-lint config file which is used to check NEW code in -# github PRs only (see lint-extra in .github/workflows/validate.yml). -# -# For the default linter config, see .golangci.yml. This config should -# only enable additional linters and/or linter settings not enabled -# in the default config. -version: "2" - -linters: - default: none - enable: - - godot - - revive - - staticcheck - settings: - staticcheck: - checks: - - all - - -QF1008 # https://staticcheck.dev/docs/checks/#QF1008 Omit embedded fields from selector expression. - exclusions: - generated: strict diff --git a/vendor/github.com/opencontainers/cgroups/.golangci.yml b/vendor/github.com/opencontainers/cgroups/.golangci.yml deleted file mode 100644 index 90799883ca..0000000000 --- a/vendor/github.com/opencontainers/cgroups/.golangci.yml +++ /dev/null @@ -1,31 +0,0 @@ -# For documentation, see https://golangci-lint.run/usage/configuration/ -version: "2" - -formatters: - enable: - - gofumpt - exclusions: - generated: strict - -linters: - enable: - - errorlint - - nolintlint - - unconvert - - unparam - settings: - govet: - enable: - - nilness - staticcheck: - checks: - - all - - -ST1000 # https://staticcheck.dev/docs/checks/#ST1000 Incorrect or missing package comment. - - -ST1003 # https://staticcheck.dev/docs/checks/#ST1003 Poorly chosen identifier. - - -ST1005 # https://staticcheck.dev/docs/checks/#ST1005 Incorrectly formatted error string. - - -QF1008 # https://staticcheck.dev/docs/checks/#QF1008 Omit embedded fields from selector expression. - exclusions: - generated: strict - presets: - - comments - - std-error-handling diff --git a/vendor/github.com/opencontainers/cgroups/RELEASES.md b/vendor/github.com/opencontainers/cgroups/RELEASES.md index e3802706f5..028ec265df 100644 --- a/vendor/github.com/opencontainers/cgroups/RELEASES.md +++ b/vendor/github.com/opencontainers/cgroups/RELEASES.md @@ -23,7 +23,7 @@ However, specification releases have special restrictions in the [OCI charter][c * They are the target of backwards compatibility (§7.g), and * They are subject to the OFWa patent grant (§8.d and e). -To avoid unfortunate side effects (onerous backwards compatibility requirements or Member resignations), the following additional procedures apply to specification releases: +To avoid unfortunate side effects (onerous backwards compatibity requirements or Member resignations), the following additional procedures apply to specification releases: ### Planning a release diff --git a/vendor/github.com/opencontainers/cgroups/config_linux.go b/vendor/github.com/opencontainers/cgroups/config_linux.go index 9bc58a3789..ce98b3d784 100644 --- a/vendor/github.com/opencontainers/cgroups/config_linux.go +++ b/vendor/github.com/opencontainers/cgroups/config_linux.go @@ -23,16 +23,16 @@ type Cgroup struct { // Path specifies the path to cgroups that are created and/or joined by the container. // The path is assumed to be relative to the host system cgroup mountpoint. - Path string `json:"path,omitempty"` + Path string `json:"path"` - // ScopePrefix describes prefix for the scope name. - ScopePrefix string `json:"scope_prefix,omitempty"` + // ScopePrefix describes prefix for the scope name + ScopePrefix string `json:"scope_prefix"` - // Resources contains various cgroups settings to apply. + // Resources contains various cgroups settings to apply *Resources // Systemd tells if systemd should be used to manage cgroups. - Systemd bool `json:"Systemd,omitempty"` + Systemd bool // SystemdProps are any additional properties for systemd, // derived from org.systemd.property.xxx annotations. @@ -40,7 +40,7 @@ type Cgroup struct { SystemdProps []systemdDbus.Property `json:"-"` // Rootless tells if rootless cgroups should be used. - Rootless bool `json:"Rootless,omitempty"` + Rootless bool // The host UID that should own the cgroup, or nil to accept // the default ownership. This should only be set when the @@ -52,96 +52,96 @@ type Cgroup struct { type Resources struct { // Devices is the set of access rules for devices in the container. - Devices []*devices.Rule `json:"devices,omitempty"` + Devices []*devices.Rule `json:"devices"` - // Memory limit (in bytes). - Memory int64 `json:"memory,omitempty"` + // Memory limit (in bytes) + Memory int64 `json:"memory"` - // Memory reservation or soft_limit (in bytes). - MemoryReservation int64 `json:"memory_reservation,omitempty"` + // Memory reservation or soft_limit (in bytes) + MemoryReservation int64 `json:"memory_reservation"` - // Total memory usage (memory+swap); use -1 for unlimited swap. - MemorySwap int64 `json:"memory_swap,omitempty"` + // Total memory usage (memory + swap); set `-1` to enable unlimited swap + MemorySwap int64 `json:"memory_swap"` - // CPU shares (relative weight vs. other containers). - CpuShares uint64 `json:"cpu_shares,omitempty"` //nolint:revive // Suppress "var-naming: struct field CpuShares should be CPUShares". + // CPU shares (relative weight vs. other containers) + CpuShares uint64 `json:"cpu_shares"` // CPU hardcap limit (in usecs). Allowed cpu time in a given period. - CpuQuota int64 `json:"cpu_quota,omitempty"` //nolint:revive // Suppress "var-naming: struct field CpuQuota should be CPUQuota". + CpuQuota int64 `json:"cpu_quota"` // CPU hardcap burst limit (in usecs). Allowed accumulated cpu time additionally for burst in a given period. - CpuBurst *uint64 `json:"cpu_burst,omitempty"` //nolint:revive // Suppress "var-naming: struct field CpuBurst should be CPUBurst". + CpuBurst *uint64 `json:"cpu_burst"` //nolint:revive // CPU period to be used for hardcapping (in usecs). 0 to use system default. - CpuPeriod uint64 `json:"cpu_period,omitempty"` //nolint:revive // Suppress "var-naming: struct field CpuPeriod should be CPUPeriod". + CpuPeriod uint64 `json:"cpu_period"` // How many time CPU will use in realtime scheduling (in usecs). - CpuRtRuntime int64 `json:"cpu_rt_quota,omitempty"` //nolint:revive // Suppress "var-naming: struct field CpuRtRuntime should be CPURtRuntime". + CpuRtRuntime int64 `json:"cpu_rt_quota"` // CPU period to be used for realtime scheduling (in usecs). - CpuRtPeriod uint64 `json:"cpu_rt_period,omitempty"` //nolint:revive // Suppress "var-naming: struct field CpuQuota should be CPUQuota". + CpuRtPeriod uint64 `json:"cpu_rt_period"` - // Cpuset CPUs to use. - CpusetCpus string `json:"cpuset_cpus,omitempty"` + // CPU to use + CpusetCpus string `json:"cpuset_cpus"` - // Cpuset memory nodes to use. - CpusetMems string `json:"cpuset_mems,omitempty"` + // MEM to use + CpusetMems string `json:"cpuset_mems"` - // Cgroup's SCHED_IDLE value. + // cgroup SCHED_IDLE CPUIdle *int64 `json:"cpu_idle,omitempty"` // Process limit; set <= `0' to disable limit. - PidsLimit int64 `json:"pids_limit,omitempty"` + PidsLimit int64 `json:"pids_limit"` // Specifies per cgroup weight, range is from 10 to 1000. - BlkioWeight uint16 `json:"blkio_weight,omitempty"` + BlkioWeight uint16 `json:"blkio_weight"` - // Tasks' weight in the given cgroup while competing with the cgroup's child cgroups, range is from 10 to 1000, cfq scheduler only. - BlkioLeafWeight uint16 `json:"blkio_leaf_weight,omitempty"` + // Specifies tasks' weight in the given cgroup while competing with the cgroup's child cgroups, range is from 10 to 1000, cfq scheduler only + BlkioLeafWeight uint16 `json:"blkio_leaf_weight"` // Weight per cgroup per device, can override BlkioWeight. - BlkioWeightDevice []*WeightDevice `json:"blkio_weight_device,omitempty"` + BlkioWeightDevice []*WeightDevice `json:"blkio_weight_device"` // IO read rate limit per cgroup per device, bytes per second. - BlkioThrottleReadBpsDevice []*ThrottleDevice `json:"blkio_throttle_read_bps_device,omitempty"` + BlkioThrottleReadBpsDevice []*ThrottleDevice `json:"blkio_throttle_read_bps_device"` // IO write rate limit per cgroup per device, bytes per second. - BlkioThrottleWriteBpsDevice []*ThrottleDevice `json:"blkio_throttle_write_bps_device,omitempty"` + BlkioThrottleWriteBpsDevice []*ThrottleDevice `json:"blkio_throttle_write_bps_device"` // IO read rate limit per cgroup per device, IO per second. - BlkioThrottleReadIOPSDevice []*ThrottleDevice `json:"blkio_throttle_read_iops_device,omitempty"` + BlkioThrottleReadIOPSDevice []*ThrottleDevice `json:"blkio_throttle_read_iops_device"` // IO write rate limit per cgroup per device, IO per second. - BlkioThrottleWriteIOPSDevice []*ThrottleDevice `json:"blkio_throttle_write_iops_device,omitempty"` + BlkioThrottleWriteIOPSDevice []*ThrottleDevice `json:"blkio_throttle_write_iops_device"` - // Freeze value for the process. - Freezer FreezerState `json:"freezer,omitempty"` + // set the freeze value for the process + Freezer FreezerState `json:"freezer"` - // Hugetlb limit (in bytes). - HugetlbLimit []*HugepageLimit `json:"hugetlb_limit,omitempty"` + // Hugetlb limit (in bytes) + HugetlbLimit []*HugepageLimit `json:"hugetlb_limit"` - // Whether to disable OOM killer. - OomKillDisable bool `json:"oom_kill_disable,omitempty"` + // Whether to disable OOM Killer + OomKillDisable bool `json:"oom_kill_disable"` - // Tuning swappiness behaviour per cgroup. - MemorySwappiness *uint64 `json:"memory_swappiness,omitempty"` + // Tuning swappiness behaviour per cgroup + MemorySwappiness *uint64 `json:"memory_swappiness"` - // Set priority of network traffic for container. - NetPrioIfpriomap []*IfPrioMap `json:"net_prio_ifpriomap,omitempty"` + // Set priority of network traffic for container + NetPrioIfpriomap []*IfPrioMap `json:"net_prio_ifpriomap"` - // Set class identifier for container's network packets. - NetClsClassid uint32 `json:"net_cls_classid_u,omitempty"` + // Set class identifier for container's network packets + NetClsClassid uint32 `json:"net_cls_classid_u"` - // Rdma resource restriction configuration. - Rdma map[string]LinuxRdma `json:"rdma,omitempty"` + // Rdma resource restriction configuration + Rdma map[string]LinuxRdma `json:"rdma"` // Used on cgroups v2: // CpuWeight sets a proportional bandwidth limit. - CpuWeight uint64 `json:"cpu_weight,omitempty"` //nolint:revive // Suppress "var-naming: struct field CpuWeight should be CPUWeight". + CpuWeight uint64 `json:"cpu_weight"` // Unified is cgroupv2-only key-value map. - Unified map[string]string `json:"unified,omitempty"` + Unified map[string]string `json:"unified"` // SkipDevices allows to skip configuring device permissions. // Used by e.g. kubelet while creating a parent cgroup (kubepods) @@ -165,5 +165,5 @@ type Resources struct { // MemoryCheckBeforeUpdate is a flag for cgroup v2 managers to check // if the new memory limits (Memory and MemorySwap) being set are lower // than the current memory usage, and reject if so. - MemoryCheckBeforeUpdate bool `json:"memory_check_before_update,omitempty"` + MemoryCheckBeforeUpdate bool `json:"memory_check_before_update"` } diff --git a/vendor/github.com/opencontainers/cgroups/devices/config/device.go b/vendor/github.com/opencontainers/cgroups/devices/config/device.go index 295575cbfa..05ad3ef8ca 100644 --- a/vendor/github.com/opencontainers/cgroups/devices/config/device.go +++ b/vendor/github.com/opencontainers/cgroups/devices/config/device.go @@ -20,10 +20,10 @@ type Device struct { FileMode os.FileMode `json:"file_mode"` // Uid of the device. - Uid uint32 `json:"uid,omitempty"` //nolint:revive // Suppress "var-naming: struct field Uid should be UID". + Uid uint32 `json:"uid"` // Gid of the device. - Gid uint32 `json:"gid,omitempty"` //nolint:revive // Suppress "var-naming: struct field Gid should be GID". + Gid uint32 `json:"gid"` } // Permissions is a cgroupv1-style string to represent device access. It diff --git a/vendor/github.com/opencontainers/cgroups/fs/freezer.go b/vendor/github.com/opencontainers/cgroups/fs/freezer.go index fe0f0dde48..dae4a60589 100644 --- a/vendor/github.com/opencontainers/cgroups/fs/freezer.go +++ b/vendor/github.com/opencontainers/cgroups/fs/freezer.go @@ -57,7 +57,7 @@ func (s *FreezerGroup) Set(path string, r *cgroups.Resources) (Err error) { // Alas, this is still a game of chances, since the real fix // belong to the kernel (cgroup v2 do not have this bug). - for i := range 1000 { + for i := 0; i < 1000; i++ { if i%50 == 49 { // Occasional thaw and sleep improves // the chances to succeed in freezing diff --git a/vendor/github.com/opencontainers/cgroups/fs2/cpu.go b/vendor/github.com/opencontainers/cgroups/fs2/cpu.go index 8a22a32c45..8eae67351c 100644 --- a/vendor/github.com/opencontainers/cgroups/fs2/cpu.go +++ b/vendor/github.com/opencontainers/cgroups/fs2/cpu.go @@ -108,12 +108,6 @@ func statCpu(dirPath string, stats *cgroups.Stats) error { case "throttled_usec": stats.CpuStats.ThrottlingData.ThrottledTime = v * 1000 - - case "nr_bursts": - stats.CpuStats.BurstData.BurstsPeriods = v - - case "burst_usec": - stats.CpuStats.BurstData.BurstTime = v * 1000 } } if err := sc.Err(); err != nil { diff --git a/vendor/github.com/opencontainers/cgroups/fs2/memory.go b/vendor/github.com/opencontainers/cgroups/fs2/memory.go index 761330756f..d67fd8aba5 100644 --- a/vendor/github.com/opencontainers/cgroups/fs2/memory.go +++ b/vendor/github.com/opencontainers/cgroups/fs2/memory.go @@ -18,14 +18,17 @@ import ( // cgroupv2 files with .min, .max, .low, or .high suffix. // The value of -1 is converted to "max" for cgroupv1 compatibility // (which used to write -1 to remove the limit). -func numToStr(value int64) string { - switch value { - case 0: - return "" - case -1: - return "max" - } - return strconv.FormatInt(value, 10) +func numToStr(value int64) (ret string) { + switch { + case value == 0: + ret = "" + case value == -1: + ret = "max" + default: + ret = strconv.FormatInt(value, 10) + } + + return ret } func isMemorySet(r *cgroups.Resources) bool { @@ -54,7 +57,7 @@ func setMemory(dirPath string, r *cgroups.Resources) error { if swapStr != "" { if err := cgroups.WriteFile(dirPath, "memory.swap.max", swapStr); err != nil { // If swap is not enabled, silently ignore setting to max or disabling it. - if !(errors.Is(err, os.ErrNotExist) && (swapStr == "max" || swapStr == "0")) { //nolint:staticcheck // Ignore "QF1001: could apply De Morgan's law". + if !(errors.Is(err, os.ErrNotExist) && (swapStr == "max" || swapStr == "0")) { return err } } diff --git a/vendor/github.com/opencontainers/cgroups/fscommon/rdma.go b/vendor/github.com/opencontainers/cgroups/fscommon/rdma.go index 960126ce7c..86e38fdcba 100644 --- a/vendor/github.com/opencontainers/cgroups/fscommon/rdma.go +++ b/vendor/github.com/opencontainers/cgroups/fscommon/rdma.go @@ -50,7 +50,7 @@ func readRdmaEntries(dir, file string) ([]cgroups.RdmaEntry, error) { if err != nil { return nil, err } - defer fd.Close() + defer fd.Close() //nolint:errorlint scanner := bufio.NewScanner(fd) for scanner.Scan() { parts := strings.SplitN(scanner.Text(), " ", 4) diff --git a/vendor/github.com/opencontainers/cgroups/stats.go b/vendor/github.com/opencontainers/cgroups/stats.go index 6cd6253ee0..b475567d82 100644 --- a/vendor/github.com/opencontainers/cgroups/stats.go +++ b/vendor/github.com/opencontainers/cgroups/stats.go @@ -9,14 +9,6 @@ type ThrottlingData struct { ThrottledTime uint64 `json:"throttled_time,omitempty"` } -type BurstData struct { - // Number of periods bandwidth burst occurs - BurstsPeriods uint64 `json:"bursts_periods,omitempty"` - // Cumulative wall-time that any cpus has used above quota in respective periods - // Units: nanoseconds. - BurstTime uint64 `json:"burst_time,omitempty"` -} - // CpuUsage denotes the usage of a CPU. // All CPU stats are aggregate since container inception. type CpuUsage struct { @@ -56,7 +48,6 @@ type CpuStats struct { CpuUsage CpuUsage `json:"cpu_usage,omitempty"` ThrottlingData ThrottlingData `json:"throttling_data,omitempty"` PSI *PSIStats `json:"psi,omitempty"` - BurstData BurstData `json:"burst_data,omitempty"` } type CPUSetStats struct { diff --git a/vendor/github.com/opencontainers/cgroups/systemd/common.go b/vendor/github.com/opencontainers/cgroups/systemd/common.go index 875a589e3d..b3077bd37f 100644 --- a/vendor/github.com/opencontainers/cgroups/systemd/common.go +++ b/vendor/github.com/opencontainers/cgroups/systemd/common.go @@ -89,7 +89,7 @@ func ExpandSlice(slice string) (string, error) { return path, nil } -func newProp(name string, units any) systemdDbus.Property { +func newProp(name string, units interface{}) systemdDbus.Property { return systemdDbus.Property{ Name: name, Value: dbus.MakeVariant(units), @@ -266,7 +266,7 @@ func systemdVersionAtoi(str string) (int, error) { // Unconditionally remove the leading prefix ("v). str = strings.TrimLeft(str, `"v`) // Match on the first integer we can grab. - for i := range len(str) { + for i := 0; i < len(str); i++ { if str[i] < '0' || str[i] > '9' { // First non-digit: cut the tail. str = str[:i] @@ -280,9 +280,7 @@ func systemdVersionAtoi(str string) (int, error) { return ver, nil } -// addCPUQuota adds CPUQuotaPeriodUSec and CPUQuotaPerSecUSec to the properties. The passed quota may be modified -// along with round-up during calculation in order to write the same value to cgroupfs later. -func addCPUQuota(cm *dbusConnManager, properties *[]systemdDbus.Property, quota *int64, period uint64) { +func addCpuQuota(cm *dbusConnManager, properties *[]systemdDbus.Property, quota int64, period uint64) { if period != 0 { // systemd only supports CPUQuotaPeriodUSec since v242 sdVer := systemdVersion(cm) @@ -294,10 +292,10 @@ func addCPUQuota(cm *dbusConnManager, properties *[]systemdDbus.Property, quota " (setting will still be applied to cgroupfs)", sdVer) } } - if *quota != 0 || period != 0 { + if quota != 0 || period != 0 { // corresponds to USEC_INFINITY in systemd cpuQuotaPerSecUSec := uint64(math.MaxUint64) - if *quota > 0 { + if quota > 0 { if period == 0 { // assume the default period = defCPUQuotaPeriod @@ -306,11 +304,9 @@ func addCPUQuota(cm *dbusConnManager, properties *[]systemdDbus.Property, quota // (integer percentage of CPU) internally. This means that if a fractional percent of // CPU is indicated by Resources.CpuQuota, we need to round up to the nearest // 10ms (1% of a second) such that child cgroups can set the cpu.cfs_quota_us they expect. - cpuQuotaPerSecUSec = uint64(*quota*1000000) / period + cpuQuotaPerSecUSec = uint64(quota*1000000) / period if cpuQuotaPerSecUSec%10000 != 0 { cpuQuotaPerSecUSec = ((cpuQuotaPerSecUSec / 10000) + 1) * 10000 - // Update the requested quota along with the round-up in order to write the same value to cgroupfs. - *quota = int64(cpuQuotaPerSecUSec) * int64(period) / 1000000 } } *properties = append(*properties, diff --git a/vendor/github.com/opencontainers/cgroups/systemd/v1.go b/vendor/github.com/opencontainers/cgroups/systemd/v1.go index b8959adbfa..8453e9b496 100644 --- a/vendor/github.com/opencontainers/cgroups/systemd/v1.go +++ b/vendor/github.com/opencontainers/cgroups/systemd/v1.go @@ -90,7 +90,7 @@ func genV1ResourcesProperties(r *cgroups.Resources, cm *dbusConnManager) ([]syst newProp("CPUShares", r.CpuShares)) } - addCPUQuota(cm, &properties, &r.CpuQuota, r.CpuPeriod) + addCpuQuota(cm, &properties, r.CpuQuota, r.CpuPeriod) if r.BlkioWeight != 0 { properties = append(properties, @@ -334,9 +334,6 @@ func (m *LegacyManager) Set(r *cgroups.Resources) error { if r.Unified != nil { return cgroups.ErrV1NoUnified } - // Use a copy since CpuQuota in r may be modified. - rCopy := *r - r = &rCopy properties, err := genV1ResourcesProperties(r, m.dbus) if err != nil { return err diff --git a/vendor/github.com/opencontainers/cgroups/systemd/v2.go b/vendor/github.com/opencontainers/cgroups/systemd/v2.go index 636b9cb01b..42a6e35511 100644 --- a/vendor/github.com/opencontainers/cgroups/systemd/v2.go +++ b/vendor/github.com/opencontainers/cgroups/systemd/v2.go @@ -113,7 +113,7 @@ func unifiedResToSystemdProps(cm *dbusConnManager, res map[string]string) (props return nil, fmt.Errorf("unified resource %q quota value conversion error: %w", k, err) } } - addCPUQuota(cm, &props, "a, period) + addCpuQuota(cm, &props, quota, period) case "cpu.weight": if shouldSetCPUIdle(cm, strings.TrimSpace(res["cpu.idle"])) { @@ -254,7 +254,7 @@ func genV2ResourcesProperties(dirPath string, r *cgroups.Resources, cm *dbusConn } } - addCPUQuota(cm, &properties, &r.CpuQuota, r.CpuPeriod) + addCpuQuota(cm, &properties, r.CpuQuota, r.CpuPeriod) if r.PidsLimit > 0 || r.PidsLimit == -1 { properties = append(properties, @@ -480,9 +480,6 @@ func (m *UnifiedManager) Set(r *cgroups.Resources) error { if r == nil { return nil } - // Use a copy since CpuQuota in r may be modified. - rCopy := *r - r = &rCopy properties, err := genV2ResourcesProperties(m.fsMgr.Path(""), r, m.dbus) if err != nil { return err diff --git a/vendor/github.com/opencontainers/cgroups/utils.go b/vendor/github.com/opencontainers/cgroups/utils.go index 95b3310ab6..9ef24b1ac6 100644 --- a/vendor/github.com/opencontainers/cgroups/utils.go +++ b/vendor/github.com/opencontainers/cgroups/utils.go @@ -5,7 +5,6 @@ import ( "errors" "fmt" "io" - "math" "os" "path/filepath" "strconv" @@ -232,7 +231,7 @@ func rmdir(path string, retry bool) error { again: err := unix.Rmdir(path) - switch err { + switch err { // nolint:errorlint // unix errors are bare case nil, unix.ENOENT: return nil case unix.EINTR: @@ -396,7 +395,7 @@ func WriteCgroupProc(dir string, pid int) error { } defer file.Close() - for range 5 { + for i := 0; i < 5; i++ { _, err = file.WriteString(strconv.Itoa(pid)) if err == nil { return nil @@ -414,30 +413,16 @@ func WriteCgroupProc(dir string, pid int) error { return err } -// ConvertCPUSharesToCgroupV2Value converts CPU shares, used by cgroup v1, -// to CPU weight, used by cgroup v2. -// -// Cgroup v1 CPU shares has a range of [2^1...2^18], i.e. [2...262144], -// and the default value is 1024. -// -// Cgroup v2 CPU weight has a range of [10^0...10^4], i.e. [1...10000], -// and the default value is 100. +// Since the OCI spec is designed for cgroup v1, in some cases +// there is need to convert from the cgroup v1 configuration to cgroup v2 +// the formula for cpuShares is y = (1 + ((x - 2) * 9999) / 262142) +// convert from [2-262144] to [1-10000] +// 262144 comes from Linux kernel definition "#define MAX_SHARES (1UL << 18)" func ConvertCPUSharesToCgroupV2Value(cpuShares uint64) uint64 { - // The value of 0 means "unset". if cpuShares == 0 { return 0 } - if cpuShares <= 2 { - return 1 - } - if cpuShares >= 262144 { - return 10000 - } - l := math.Log2(float64(cpuShares)) - // Quadratic function which fits min, max, and default. - exponent := (l*l+125*l)/612.0 - 7.0/34.0 - - return uint64(math.Ceil(math.Pow(10, exponent))) + return (1 + ((cpuShares-2)*9999)/262142) } // ConvertMemorySwapToCgroupV2Value converts MemorySwap value from OCI spec diff --git a/vendor/github.com/opencontainers/cgroups/v1_utils.go b/vendor/github.com/opencontainers/cgroups/v1_utils.go index 19b8af1344..81193e2098 100644 --- a/vendor/github.com/opencontainers/cgroups/v1_utils.go +++ b/vendor/github.com/opencontainers/cgroups/v1_utils.go @@ -5,7 +5,6 @@ import ( "fmt" "os" "path/filepath" - "slices" "strings" "sync" "syscall" @@ -145,8 +144,10 @@ func FindCgroupMountpointAndRoot(cgroupPath, subsystem string) (string, string, func findCgroupMountpointAndRootFromMI(mounts []*mountinfo.Info, cgroupPath, subsystem string) (string, string, error) { for _, mi := range mounts { if strings.HasPrefix(mi.Mountpoint, cgroupPath) { - if slices.Contains(strings.Split(mi.VFSOptions, ","), subsystem) { - return mi.Mountpoint, mi.Root, nil + for _, opt := range strings.Split(mi.VFSOptions, ",") { + if opt == subsystem { + return mi.Mountpoint, mi.Root, nil + } } } } diff --git a/vendor/github.com/opencontainers/runc/LICENSE b/vendor/github.com/opencontainers/runc/LICENSE deleted file mode 100644 index 27448585ad..0000000000 --- a/vendor/github.com/opencontainers/runc/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2014 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/opencontainers/runc/NOTICE b/vendor/github.com/opencontainers/runc/NOTICE deleted file mode 100644 index c29775c0d9..0000000000 --- a/vendor/github.com/opencontainers/runc/NOTICE +++ /dev/null @@ -1,17 +0,0 @@ -runc - -Copyright 2012-2015 Docker, Inc. - -This product includes software developed at Docker, Inc. (http://www.docker.com). - -The following is courtesy of our legal counsel: - - -Use and transfer of Docker may be subject to certain restrictions by the -United States and other governments. -It is your responsibility to ensure that your use and/or transfer does not -violate applicable laws. - -For more information, please see http://www.bis.doc.gov - -See also http://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups.go deleted file mode 100644 index 53e194c74d..0000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups.go +++ /dev/null @@ -1,80 +0,0 @@ -package cgroups - -import ( - "errors" - - "github.com/opencontainers/runc/libcontainer/configs" -) - -var ( - // ErrDevicesUnsupported is an error returned when a cgroup manager - // is not configured to set device rules. - ErrDevicesUnsupported = errors.New("cgroup manager is not configured to set device rules") - - // ErrRootless is returned by [Manager.Apply] when there is an error - // creating cgroup directory, and cgroup.Rootless is set. In general, - // this error is to be ignored. - ErrRootless = errors.New("cgroup manager can not access cgroup (rootless container)") - - // DevicesSetV1 and DevicesSetV2 are functions to set devices for - // cgroup v1 and v2, respectively. Unless - // [github.com/opencontainers/runc/libcontainer/cgroups/devices] - // package is imported, it is set to nil, so cgroup managers can't - // manage devices. - DevicesSetV1 func(path string, r *configs.Resources) error - DevicesSetV2 func(path string, r *configs.Resources) error -) - -type Manager interface { - // Apply creates a cgroup, if not yet created, and adds a process - // with the specified pid into that cgroup. A special value of -1 - // can be used to merely create a cgroup. - Apply(pid int) error - - // GetPids returns the PIDs of all processes inside the cgroup. - GetPids() ([]int, error) - - // GetAllPids returns the PIDs of all processes inside the cgroup - // any all its sub-cgroups. - GetAllPids() ([]int, error) - - // GetStats returns cgroups statistics. - GetStats() (*Stats, error) - - // Freeze sets the freezer cgroup to the specified state. - Freeze(state configs.FreezerState) error - - // Destroy removes cgroup. - Destroy() error - - // Path returns a cgroup path to the specified controller/subsystem. - // For cgroupv2, the argument is unused and can be empty. - Path(string) string - - // Set sets cgroup resources parameters/limits. If the argument is nil, - // the resources specified during Manager creation (or the previous call - // to Set) are used. - Set(r *configs.Resources) error - - // GetPaths returns cgroup path(s) to save in a state file in order to - // restore later. - // - // For cgroup v1, a key is cgroup subsystem name, and the value is the - // path to the cgroup for this subsystem. - // - // For cgroup v2 unified hierarchy, a key is "", and the value is the - // unified path. - GetPaths() map[string]string - - // GetCgroups returns the cgroup data as configured. - GetCgroups() (*configs.Cgroup, error) - - // GetFreezerState retrieves the current FreezerState of the cgroup. - GetFreezerState() (configs.FreezerState, error) - - // Exists returns whether the cgroup path exists or not. - Exists() bool - - // OOMKillCount reports OOM kill count for the cgroup. - OOMKillCount() (uint64, error) -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/file.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/file.go deleted file mode 100644 index 78c5bcf0d3..0000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/file.go +++ /dev/null @@ -1,216 +0,0 @@ -package cgroups - -import ( - "bytes" - "errors" - "fmt" - "os" - "path" - "strconv" - "strings" - "sync" - - "github.com/opencontainers/runc/libcontainer/utils" - "github.com/sirupsen/logrus" - "golang.org/x/sys/unix" -) - -// OpenFile opens a cgroup file in a given dir with given flags. -// It is supposed to be used for cgroup files only, and returns -// an error if the file is not a cgroup file. -// -// Arguments dir and file are joined together to form an absolute path -// to a file being opened. -func OpenFile(dir, file string, flags int) (*os.File, error) { - if dir == "" { - return nil, fmt.Errorf("no directory specified for %s", file) - } - return openFile(dir, file, flags) -} - -// ReadFile reads data from a cgroup file in dir. -// It is supposed to be used for cgroup files only. -func ReadFile(dir, file string) (string, error) { - fd, err := OpenFile(dir, file, unix.O_RDONLY) - if err != nil { - return "", err - } - defer fd.Close() - var buf bytes.Buffer - - _, err = buf.ReadFrom(fd) - return buf.String(), err -} - -// WriteFile writes data to a cgroup file in dir. -// It is supposed to be used for cgroup files only. -func WriteFile(dir, file, data string) error { - fd, err := OpenFile(dir, file, unix.O_WRONLY) - if err != nil { - return err - } - defer fd.Close() - if _, err := fd.WriteString(data); err != nil { - // Having data in the error message helps in debugging. - return fmt.Errorf("failed to write %q: %w", data, err) - } - return nil -} - -// WriteFileByLine is the same as WriteFile, except if data contains newlines, -// it is written line by line. -func WriteFileByLine(dir, file, data string) error { - i := strings.Index(data, "\n") - if i == -1 { - return WriteFile(dir, file, data) - } - - fd, err := OpenFile(dir, file, unix.O_WRONLY) - if err != nil { - return err - } - defer fd.Close() - start := 0 - for { - var line string - if i == -1 { - line = data[start:] - } else { - line = data[start : start+i+1] - } - _, err := fd.WriteString(line) - if err != nil { - return fmt.Errorf("failed to write %q: %w", line, err) - } - if i == -1 { - break - } - start += i + 1 - i = strings.Index(data[start:], "\n") - } - return nil -} - -const ( - cgroupfsDir = "/sys/fs/cgroup" - cgroupfsPrefix = cgroupfsDir + "/" -) - -var ( - // TestMode is set to true by unit tests that need "fake" cgroupfs. - TestMode bool - - cgroupRootHandle *os.File - prepOnce sync.Once - prepErr error - resolveFlags uint64 -) - -func prepareOpenat2() error { - prepOnce.Do(func() { - fd, err := unix.Openat2(-1, cgroupfsDir, &unix.OpenHow{ - Flags: unix.O_DIRECTORY | unix.O_PATH | unix.O_CLOEXEC, - }) - if err != nil { - prepErr = &os.PathError{Op: "openat2", Path: cgroupfsDir, Err: err} - if err != unix.ENOSYS { - logrus.Warnf("falling back to securejoin: %s", prepErr) - } else { - logrus.Debug("openat2 not available, falling back to securejoin") - } - return - } - file := os.NewFile(uintptr(fd), cgroupfsDir) - - var st unix.Statfs_t - if err := unix.Fstatfs(int(file.Fd()), &st); err != nil { - prepErr = &os.PathError{Op: "statfs", Path: cgroupfsDir, Err: err} - logrus.Warnf("falling back to securejoin: %s", prepErr) - return - } - - cgroupRootHandle = file - resolveFlags = unix.RESOLVE_BENEATH | unix.RESOLVE_NO_MAGICLINKS - if st.Type == unix.CGROUP2_SUPER_MAGIC { - // cgroupv2 has a single mountpoint and no "cpu,cpuacct" symlinks - resolveFlags |= unix.RESOLVE_NO_XDEV | unix.RESOLVE_NO_SYMLINKS - } - }) - - return prepErr -} - -func openFile(dir, file string, flags int) (*os.File, error) { - mode := os.FileMode(0) - if TestMode && flags&os.O_WRONLY != 0 { - // "emulate" cgroup fs for unit tests - flags |= os.O_TRUNC | os.O_CREATE - mode = 0o600 - } - path := path.Join(dir, utils.CleanPath(file)) - if prepareOpenat2() != nil { - return openFallback(path, flags, mode) - } - relPath := strings.TrimPrefix(path, cgroupfsPrefix) - if len(relPath) == len(path) { // non-standard path, old system? - return openFallback(path, flags, mode) - } - - fd, err := unix.Openat2(int(cgroupRootHandle.Fd()), relPath, - &unix.OpenHow{ - Resolve: resolveFlags, - Flags: uint64(flags) | unix.O_CLOEXEC, - Mode: uint64(mode), - }) - if err != nil { - err = &os.PathError{Op: "openat2", Path: path, Err: err} - // Check if cgroupRootHandle is still opened to cgroupfsDir - // (happens when this package is incorrectly used - // across the chroot/pivot_root/mntns boundary, or - // when /sys/fs/cgroup is remounted). - // - // TODO: if such usage will ever be common, amend this - // to reopen cgroupRootHandle and retry openat2. - fdPath, closer := utils.ProcThreadSelf("fd/" + strconv.Itoa(int(cgroupRootHandle.Fd()))) - defer closer() - fdDest, _ := os.Readlink(fdPath) - if fdDest != cgroupfsDir { - // Wrap the error so it is clear that cgroupRootHandle - // is opened to an unexpected/wrong directory. - err = fmt.Errorf("cgroupRootHandle %d unexpectedly opened to %s != %s: %w", - cgroupRootHandle.Fd(), fdDest, cgroupfsDir, err) - } - return nil, err - } - - return os.NewFile(uintptr(fd), path), nil -} - -var errNotCgroupfs = errors.New("not a cgroup file") - -// Can be changed by unit tests. -var openFallback = openAndCheck - -// openAndCheck is used when openat2(2) is not available. It checks the opened -// file is on cgroupfs, returning an error otherwise. -func openAndCheck(path string, flags int, mode os.FileMode) (*os.File, error) { - fd, err := os.OpenFile(path, flags, mode) - if err != nil { - return nil, err - } - if TestMode { - return fd, nil - } - // Check this is a cgroupfs file. - var st unix.Statfs_t - if err := unix.Fstatfs(int(fd.Fd()), &st); err != nil { - _ = fd.Close() - return nil, &os.PathError{Op: "statfs", Path: path, Err: err} - } - if st.Type != unix.CGROUP_SUPER_MAGIC && st.Type != unix.CGROUP2_SUPER_MAGIC { - _ = fd.Close() - return nil, &os.PathError{Op: "open", Path: path, Err: errNotCgroupfs} - } - - return fd, nil -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/getallpids.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/getallpids.go deleted file mode 100644 index 1355a51010..0000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/getallpids.go +++ /dev/null @@ -1,27 +0,0 @@ -package cgroups - -import ( - "io/fs" - "path/filepath" -) - -// GetAllPids returns all pids from the cgroup identified by path, and all its -// sub-cgroups. -func GetAllPids(path string) ([]int, error) { - var pids []int - err := filepath.WalkDir(path, func(p string, d fs.DirEntry, iErr error) error { - if iErr != nil { - return iErr - } - if !d.IsDir() { - return nil - } - cPids, err := readProcsFile(p) - if err != nil { - return err - } - pids = append(pids, cPids...) - return nil - }) - return pids, err -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/stats.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/stats.go deleted file mode 100644 index b475567d82..0000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/stats.go +++ /dev/null @@ -1,200 +0,0 @@ -package cgroups - -type ThrottlingData struct { - // Number of periods with throttling active - Periods uint64 `json:"periods,omitempty"` - // Number of periods when the container hit its throttling limit. - ThrottledPeriods uint64 `json:"throttled_periods,omitempty"` - // Aggregate time the container was throttled for in nanoseconds. - ThrottledTime uint64 `json:"throttled_time,omitempty"` -} - -// CpuUsage denotes the usage of a CPU. -// All CPU stats are aggregate since container inception. -type CpuUsage struct { - // Total CPU time consumed. - // Units: nanoseconds. - TotalUsage uint64 `json:"total_usage,omitempty"` - // Total CPU time consumed per core. - // Units: nanoseconds. - PercpuUsage []uint64 `json:"percpu_usage,omitempty"` - // CPU time consumed per core in kernel mode - // Units: nanoseconds. - PercpuUsageInKernelmode []uint64 `json:"percpu_usage_in_kernelmode"` - // CPU time consumed per core in user mode - // Units: nanoseconds. - PercpuUsageInUsermode []uint64 `json:"percpu_usage_in_usermode"` - // Time spent by tasks of the cgroup in kernel mode. - // Units: nanoseconds. - UsageInKernelmode uint64 `json:"usage_in_kernelmode"` - // Time spent by tasks of the cgroup in user mode. - // Units: nanoseconds. - UsageInUsermode uint64 `json:"usage_in_usermode"` -} - -type PSIData struct { - Avg10 float64 `json:"avg10"` - Avg60 float64 `json:"avg60"` - Avg300 float64 `json:"avg300"` - Total uint64 `json:"total"` -} - -type PSIStats struct { - Some PSIData `json:"some,omitempty"` - Full PSIData `json:"full,omitempty"` -} - -type CpuStats struct { - CpuUsage CpuUsage `json:"cpu_usage,omitempty"` - ThrottlingData ThrottlingData `json:"throttling_data,omitempty"` - PSI *PSIStats `json:"psi,omitempty"` -} - -type CPUSetStats struct { - // List of the physical numbers of the CPUs on which processes - // in that cpuset are allowed to execute - CPUs []uint16 `json:"cpus,omitempty"` - // cpu_exclusive flag - CPUExclusive uint64 `json:"cpu_exclusive"` - // List of memory nodes on which processes in that cpuset - // are allowed to allocate memory - Mems []uint16 `json:"mems,omitempty"` - // mem_hardwall flag - MemHardwall uint64 `json:"mem_hardwall"` - // mem_exclusive flag - MemExclusive uint64 `json:"mem_exclusive"` - // memory_migrate flag - MemoryMigrate uint64 `json:"memory_migrate"` - // memory_spread page flag - MemorySpreadPage uint64 `json:"memory_spread_page"` - // memory_spread slab flag - MemorySpreadSlab uint64 `json:"memory_spread_slab"` - // memory_pressure - MemoryPressure uint64 `json:"memory_pressure"` - // sched_load balance flag - SchedLoadBalance uint64 `json:"sched_load_balance"` - // sched_relax_domain_level - SchedRelaxDomainLevel int64 `json:"sched_relax_domain_level"` -} - -type MemoryData struct { - Usage uint64 `json:"usage,omitempty"` - MaxUsage uint64 `json:"max_usage,omitempty"` - Failcnt uint64 `json:"failcnt"` - Limit uint64 `json:"limit"` -} - -type MemoryStats struct { - // memory used for cache - Cache uint64 `json:"cache,omitempty"` - // usage of memory - Usage MemoryData `json:"usage,omitempty"` - // usage of memory + swap - SwapUsage MemoryData `json:"swap_usage,omitempty"` - // usage of swap only - SwapOnlyUsage MemoryData `json:"swap_only_usage,omitempty"` - // usage of kernel memory - KernelUsage MemoryData `json:"kernel_usage,omitempty"` - // usage of kernel TCP memory - KernelTCPUsage MemoryData `json:"kernel_tcp_usage,omitempty"` - // usage of memory pages by NUMA node - // see chapter 5.6 of memory controller documentation - PageUsageByNUMA PageUsageByNUMA `json:"page_usage_by_numa,omitempty"` - // if true, memory usage is accounted for throughout a hierarchy of cgroups. - UseHierarchy bool `json:"use_hierarchy"` - - Stats map[string]uint64 `json:"stats,omitempty"` - PSI *PSIStats `json:"psi,omitempty"` -} - -type PageUsageByNUMA struct { - // Embedding is used as types can't be recursive. - PageUsageByNUMAInner - Hierarchical PageUsageByNUMAInner `json:"hierarchical,omitempty"` -} - -type PageUsageByNUMAInner struct { - Total PageStats `json:"total,omitempty"` - File PageStats `json:"file,omitempty"` - Anon PageStats `json:"anon,omitempty"` - Unevictable PageStats `json:"unevictable,omitempty"` -} - -type PageStats struct { - Total uint64 `json:"total,omitempty"` - Nodes map[uint8]uint64 `json:"nodes,omitempty"` -} - -type PidsStats struct { - // number of pids in the cgroup - Current uint64 `json:"current,omitempty"` - // active pids hard limit - Limit uint64 `json:"limit,omitempty"` -} - -type BlkioStatEntry struct { - Major uint64 `json:"major,omitempty"` - Minor uint64 `json:"minor,omitempty"` - Op string `json:"op,omitempty"` - Value uint64 `json:"value,omitempty"` -} - -type BlkioStats struct { - // number of bytes transferred to and from the block device - IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive,omitempty"` - IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive,omitempty"` - IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive,omitempty"` - IoServiceTimeRecursive []BlkioStatEntry `json:"io_service_time_recursive,omitempty"` - IoWaitTimeRecursive []BlkioStatEntry `json:"io_wait_time_recursive,omitempty"` - IoMergedRecursive []BlkioStatEntry `json:"io_merged_recursive,omitempty"` - IoTimeRecursive []BlkioStatEntry `json:"io_time_recursive,omitempty"` - SectorsRecursive []BlkioStatEntry `json:"sectors_recursive,omitempty"` - PSI *PSIStats `json:"psi,omitempty"` -} - -type HugetlbStats struct { - // current res_counter usage for hugetlb - Usage uint64 `json:"usage,omitempty"` - // maximum usage ever recorded. - MaxUsage uint64 `json:"max_usage,omitempty"` - // number of times hugetlb usage allocation failure. - Failcnt uint64 `json:"failcnt"` -} - -type RdmaEntry struct { - Device string `json:"device,omitempty"` - HcaHandles uint32 `json:"hca_handles,omitempty"` - HcaObjects uint32 `json:"hca_objects,omitempty"` -} - -type RdmaStats struct { - RdmaLimit []RdmaEntry `json:"rdma_limit,omitempty"` - RdmaCurrent []RdmaEntry `json:"rdma_current,omitempty"` -} - -type MiscStats struct { - // current resource usage for a key in misc - Usage uint64 `json:"usage,omitempty"` - // number of times the resource usage was about to go over the max boundary - Events uint64 `json:"events,omitempty"` -} - -type Stats struct { - CpuStats CpuStats `json:"cpu_stats,omitempty"` - CPUSetStats CPUSetStats `json:"cpuset_stats,omitempty"` - MemoryStats MemoryStats `json:"memory_stats,omitempty"` - PidsStats PidsStats `json:"pids_stats,omitempty"` - BlkioStats BlkioStats `json:"blkio_stats,omitempty"` - // the map is in the format "size of hugepage: stats of the hugepage" - HugetlbStats map[string]HugetlbStats `json:"hugetlb_stats,omitempty"` - RdmaStats RdmaStats `json:"rdma_stats,omitempty"` - // the map is in the format "misc resource name: stats of the key" - MiscStats map[string]MiscStats `json:"misc_stats,omitempty"` -} - -func NewStats() *Stats { - memoryStats := MemoryStats{Stats: make(map[string]uint64)} - hugetlbStats := make(map[string]HugetlbStats) - miscStats := make(map[string]MiscStats) - return &Stats{MemoryStats: memoryStats, HugetlbStats: hugetlbStats, MiscStats: miscStats} -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go deleted file mode 100644 index d404647c8c..0000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go +++ /dev/null @@ -1,468 +0,0 @@ -package cgroups - -import ( - "bufio" - "errors" - "fmt" - "io" - "os" - "path/filepath" - "strconv" - "strings" - "sync" - "time" - - "github.com/moby/sys/userns" - "github.com/sirupsen/logrus" - "golang.org/x/sys/unix" -) - -const ( - CgroupProcesses = "cgroup.procs" - unifiedMountpoint = "/sys/fs/cgroup" - hybridMountpoint = "/sys/fs/cgroup/unified" -) - -var ( - isUnifiedOnce sync.Once - isUnified bool - isHybridOnce sync.Once - isHybrid bool -) - -// IsCgroup2UnifiedMode returns whether we are running in cgroup v2 unified mode. -func IsCgroup2UnifiedMode() bool { - isUnifiedOnce.Do(func() { - var st unix.Statfs_t - err := unix.Statfs(unifiedMountpoint, &st) - if err != nil { - level := logrus.WarnLevel - if os.IsNotExist(err) && userns.RunningInUserNS() { - // For rootless containers, sweep it under the rug. - level = logrus.DebugLevel - } - logrus.StandardLogger().Logf(level, - "statfs %s: %v; assuming cgroup v1", unifiedMountpoint, err) - } - isUnified = st.Type == unix.CGROUP2_SUPER_MAGIC - }) - return isUnified -} - -// IsCgroup2HybridMode returns whether we are running in cgroup v2 hybrid mode. -func IsCgroup2HybridMode() bool { - isHybridOnce.Do(func() { - var st unix.Statfs_t - err := unix.Statfs(hybridMountpoint, &st) - if err != nil { - isHybrid = false - if !os.IsNotExist(err) { - // Report unexpected errors. - logrus.WithError(err).Debugf("statfs(%q) failed", hybridMountpoint) - } - return - } - isHybrid = st.Type == unix.CGROUP2_SUPER_MAGIC - }) - return isHybrid -} - -type Mount struct { - Mountpoint string - Root string - Subsystems []string -} - -// GetCgroupMounts returns the mounts for the cgroup subsystems. -// all indicates whether to return just the first instance or all the mounts. -// This function should not be used from cgroupv2 code, as in this case -// all the controllers are available under the constant unifiedMountpoint. -func GetCgroupMounts(all bool) ([]Mount, error) { - if IsCgroup2UnifiedMode() { - // TODO: remove cgroupv2 case once all external users are converted - availableControllers, err := GetAllSubsystems() - if err != nil { - return nil, err - } - m := Mount{ - Mountpoint: unifiedMountpoint, - Root: unifiedMountpoint, - Subsystems: availableControllers, - } - return []Mount{m}, nil - } - - return getCgroupMountsV1(all) -} - -// GetAllSubsystems returns all the cgroup subsystems supported by the kernel -func GetAllSubsystems() ([]string, error) { - // /proc/cgroups is meaningless for v2 - // https://github.com/torvalds/linux/blob/v5.3/Documentation/admin-guide/cgroup-v2.rst#deprecated-v1-core-features - if IsCgroup2UnifiedMode() { - // "pseudo" controllers do not appear in /sys/fs/cgroup/cgroup.controllers. - // - devices: implemented in kernel 4.15 - // - freezer: implemented in kernel 5.2 - // We assume these are always available, as it is hard to detect availability. - pseudo := []string{"devices", "freezer"} - data, err := ReadFile("/sys/fs/cgroup", "cgroup.controllers") - if err != nil { - return nil, err - } - subsystems := append(pseudo, strings.Fields(data)...) - return subsystems, nil - } - f, err := os.Open("/proc/cgroups") - if err != nil { - return nil, err - } - defer f.Close() - - subsystems := []string{} - - s := bufio.NewScanner(f) - for s.Scan() { - text := s.Text() - if text[0] != '#' { - parts := strings.Fields(text) - if len(parts) >= 4 && parts[3] != "0" { - subsystems = append(subsystems, parts[0]) - } - } - } - if err := s.Err(); err != nil { - return nil, err - } - return subsystems, nil -} - -func readProcsFile(dir string) (out []int, _ error) { - file := CgroupProcesses - retry := true - -again: - f, err := OpenFile(dir, file, os.O_RDONLY) - if err != nil { - return nil, err - } - defer f.Close() - - s := bufio.NewScanner(f) - for s.Scan() { - if t := s.Text(); t != "" { - pid, err := strconv.Atoi(t) - if err != nil { - return nil, err - } - out = append(out, pid) - } - } - if errors.Is(s.Err(), unix.ENOTSUP) && retry { - // For a threaded cgroup, read returns ENOTSUP, and we should - // read from cgroup.threads instead. - file = "cgroup.threads" - retry = false - goto again - } - return out, s.Err() -} - -// ParseCgroupFile parses the given cgroup file, typically /proc/self/cgroup -// or /proc//cgroup, into a map of subsystems to cgroup paths, e.g. -// -// "cpu": "/user.slice/user-1000.slice" -// "pids": "/user.slice/user-1000.slice" -// -// etc. -// -// Note that for cgroup v2 unified hierarchy, there are no per-controller -// cgroup paths, so the resulting map will have a single element where the key -// is empty string ("") and the value is the cgroup path the is in. -func ParseCgroupFile(path string) (map[string]string, error) { - f, err := os.Open(path) - if err != nil { - return nil, err - } - defer f.Close() - - return parseCgroupFromReader(f) -} - -// helper function for ParseCgroupFile to make testing easier -func parseCgroupFromReader(r io.Reader) (map[string]string, error) { - s := bufio.NewScanner(r) - cgroups := make(map[string]string) - - for s.Scan() { - text := s.Text() - // from cgroups(7): - // /proc/[pid]/cgroup - // ... - // For each cgroup hierarchy ... there is one entry - // containing three colon-separated fields of the form: - // hierarchy-ID:subsystem-list:cgroup-path - parts := strings.SplitN(text, ":", 3) - if len(parts) < 3 { - return nil, fmt.Errorf("invalid cgroup entry: must contain at least two colons: %v", text) - } - - for _, subs := range strings.Split(parts[1], ",") { - cgroups[subs] = parts[2] - } - } - if err := s.Err(); err != nil { - return nil, err - } - - return cgroups, nil -} - -func PathExists(path string) bool { - if _, err := os.Stat(path); err != nil { - return false - } - return true -} - -// rmdir tries to remove a directory, optionally retrying on EBUSY. -func rmdir(path string, retry bool) error { - delay := time.Millisecond - tries := 10 - -again: - err := unix.Rmdir(path) - switch err { // nolint:errorlint // unix errors are bare - case nil, unix.ENOENT: - return nil - case unix.EINTR: - goto again - case unix.EBUSY: - if retry && tries > 0 { - time.Sleep(delay) - delay *= 2 - tries-- - goto again - - } - } - return &os.PathError{Op: "rmdir", Path: path, Err: err} -} - -// RemovePath aims to remove cgroup path. It does so recursively, -// by removing any subdirectories (sub-cgroups) first. -func RemovePath(path string) error { - // Try the fast path first; don't retry on EBUSY yet. - if err := rmdir(path, false); err == nil { - return nil - } - - // There are many reasons why rmdir can fail, including: - // 1. cgroup have existing sub-cgroups; - // 2. cgroup (still) have some processes (that are about to vanish); - // 3. lack of permission (one example is read-only /sys/fs/cgroup mount, - // in which case rmdir returns EROFS even for for a non-existent path, - // see issue 4518). - // - // Using os.ReadDir here kills two birds with one stone: check if - // the directory exists (handling scenario 3 above), and use - // directory contents to remove sub-cgroups (handling scenario 1). - infos, err := os.ReadDir(path) - if err != nil { - if os.IsNotExist(err) { - return nil - } - return err - } - // Let's remove sub-cgroups, if any. - for _, info := range infos { - if info.IsDir() { - if err = RemovePath(filepath.Join(path, info.Name())); err != nil { - return err - } - } - } - // Finally, try rmdir again, this time with retries on EBUSY, - // which may help with scenario 2 above. - return rmdir(path, true) -} - -// RemovePaths iterates over the provided paths removing them. -func RemovePaths(paths map[string]string) (err error) { - for s, p := range paths { - if err := RemovePath(p); err == nil { - delete(paths, s) - } - } - if len(paths) == 0 { - clear(paths) - return nil - } - return fmt.Errorf("Failed to remove paths: %v", paths) -} - -var ( - hugePageSizes []string - initHPSOnce sync.Once -) - -func HugePageSizes() []string { - initHPSOnce.Do(func() { - dir, err := os.OpenFile("/sys/kernel/mm/hugepages", unix.O_DIRECTORY|unix.O_RDONLY, 0) - if err != nil { - return - } - files, err := dir.Readdirnames(0) - dir.Close() - if err != nil { - return - } - - hugePageSizes, err = getHugePageSizeFromFilenames(files) - if err != nil { - logrus.Warn("HugePageSizes: ", err) - } - }) - - return hugePageSizes -} - -func getHugePageSizeFromFilenames(fileNames []string) ([]string, error) { - pageSizes := make([]string, 0, len(fileNames)) - var warn error - - for _, file := range fileNames { - // example: hugepages-1048576kB - val := strings.TrimPrefix(file, "hugepages-") - if len(val) == len(file) { - // Unexpected file name: no prefix found, ignore it. - continue - } - // The suffix is always "kB" (as of Linux 5.13). If we find - // something else, produce an error but keep going. - eLen := len(val) - 2 - val = strings.TrimSuffix(val, "kB") - if len(val) != eLen { - // Highly unlikely. - if warn == nil { - warn = errors.New(file + `: invalid suffix (expected "kB")`) - } - continue - } - size, err := strconv.Atoi(val) - if err != nil { - // Highly unlikely. - if warn == nil { - warn = fmt.Errorf("%s: %w", file, err) - } - continue - } - // Model after https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/mm/hugetlb_cgroup.c?id=eff48ddeab782e35e58ccc8853f7386bbae9dec4#n574 - // but in our case the size is in KB already. - if size >= (1 << 20) { - val = strconv.Itoa(size>>20) + "GB" - } else if size >= (1 << 10) { - val = strconv.Itoa(size>>10) + "MB" - } else { - val += "KB" - } - pageSizes = append(pageSizes, val) - } - - return pageSizes, warn -} - -// GetPids returns all pids, that were added to cgroup at path. -func GetPids(dir string) ([]int, error) { - return readProcsFile(dir) -} - -// WriteCgroupProc writes the specified pid into the cgroup's cgroup.procs file -func WriteCgroupProc(dir string, pid int) error { - // Normally dir should not be empty, one case is that cgroup subsystem - // is not mounted, we will get empty dir, and we want it fail here. - if dir == "" { - return fmt.Errorf("no such directory for %s", CgroupProcesses) - } - - // Dont attach any pid to the cgroup if -1 is specified as a pid - if pid == -1 { - return nil - } - - file, err := OpenFile(dir, CgroupProcesses, os.O_WRONLY) - if err != nil { - return fmt.Errorf("failed to write %v: %w", pid, err) - } - defer file.Close() - - for i := 0; i < 5; i++ { - _, err = file.WriteString(strconv.Itoa(pid)) - if err == nil { - return nil - } - - // EINVAL might mean that the task being added to cgroup.procs is in state - // TASK_NEW. We should attempt to do so again. - if errors.Is(err, unix.EINVAL) { - time.Sleep(30 * time.Millisecond) - continue - } - - return fmt.Errorf("failed to write %v: %w", pid, err) - } - return err -} - -// Since the OCI spec is designed for cgroup v1, in some cases -// there is need to convert from the cgroup v1 configuration to cgroup v2 -// the formula for cpuShares is y = (1 + ((x - 2) * 9999) / 262142) -// convert from [2-262144] to [1-10000] -// 262144 comes from Linux kernel definition "#define MAX_SHARES (1UL << 18)" -func ConvertCPUSharesToCgroupV2Value(cpuShares uint64) uint64 { - if cpuShares == 0 { - return 0 - } - return (1 + ((cpuShares-2)*9999)/262142) -} - -// ConvertMemorySwapToCgroupV2Value converts MemorySwap value from OCI spec -// for use by cgroup v2 drivers. A conversion is needed since Resources.MemorySwap -// is defined as memory+swap combined, while in cgroup v2 swap is a separate value, -// so we need to subtract memory from it where it makes sense. -func ConvertMemorySwapToCgroupV2Value(memorySwap, memory int64) (int64, error) { - switch { - case memory == -1 && memorySwap == 0: - // For compatibility with cgroup1 controller, set swap to unlimited in - // case the memory is set to unlimited and the swap is not explicitly set, - // treating the request as "set both memory and swap to unlimited". - return -1, nil - case memorySwap == -1, memorySwap == 0: - // Treat -1 ("max") and 0 ("unset") swap as is. - return memorySwap, nil - case memory == -1: - // Unlimited memory, so treat swap as is. - return memorySwap, nil - case memory == 0: - // Unset or unknown memory, can't calculate swap. - return 0, errors.New("unable to set swap limit without memory limit") - case memory < 0: - // Does not make sense to subtract a negative value. - return 0, fmt.Errorf("invalid memory value: %d", memory) - case memorySwap < memory: - // Sanity check. - return 0, errors.New("memory+swap limit should be >= memory limit") - } - - return memorySwap - memory, nil -} - -// Since the OCI spec is designed for cgroup v1, in some cases -// there is need to convert from the cgroup v1 configuration to cgroup v2 -// the formula for BlkIOWeight to IOWeight is y = (1 + (x - 10) * 9999 / 990) -// convert linearly from [10-1000] to [1-10000] -func ConvertBlkIOToIOWeightValue(blkIoWeight uint16) uint64 { - if blkIoWeight == 0 { - return 0 - } - return 1 + (uint64(blkIoWeight)-10)*9999/990 -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/v1_utils.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/v1_utils.go deleted file mode 100644 index 81193e2098..0000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/v1_utils.go +++ /dev/null @@ -1,277 +0,0 @@ -package cgroups - -import ( - "errors" - "fmt" - "os" - "path/filepath" - "strings" - "sync" - "syscall" - - securejoin "github.com/cyphar/filepath-securejoin" - "github.com/moby/sys/mountinfo" - "golang.org/x/sys/unix" -) - -// Code in this source file are specific to cgroup v1, -// and must not be used from any cgroup v2 code. - -const ( - CgroupNamePrefix = "name=" - defaultPrefix = "/sys/fs/cgroup" -) - -var ( - errUnified = errors.New("not implemented for cgroup v2 unified hierarchy") - ErrV1NoUnified = errors.New("invalid configuration: cannot use unified on cgroup v1") - - readMountinfoOnce sync.Once - readMountinfoErr error - cgroupMountinfo []*mountinfo.Info -) - -type NotFoundError struct { - Subsystem string -} - -func (e *NotFoundError) Error() string { - return fmt.Sprintf("mountpoint for %s not found", e.Subsystem) -} - -func NewNotFoundError(sub string) error { - return &NotFoundError{ - Subsystem: sub, - } -} - -func IsNotFound(err error) bool { - var nfErr *NotFoundError - return errors.As(err, &nfErr) -} - -func tryDefaultPath(cgroupPath, subsystem string) string { - if !strings.HasPrefix(defaultPrefix, cgroupPath) { - return "" - } - - // remove possible prefix - subsystem = strings.TrimPrefix(subsystem, CgroupNamePrefix) - - // Make sure we're still under defaultPrefix, and resolve - // a possible symlink (like cpu -> cpu,cpuacct). - path, err := securejoin.SecureJoin(defaultPrefix, subsystem) - if err != nil { - return "" - } - - // (1) path should be a directory. - st, err := os.Lstat(path) - if err != nil || !st.IsDir() { - return "" - } - - // (2) path should be a mount point. - pst, err := os.Lstat(filepath.Dir(path)) - if err != nil { - return "" - } - - if st.Sys().(*syscall.Stat_t).Dev == pst.Sys().(*syscall.Stat_t).Dev { - // parent dir has the same dev -- path is not a mount point - return "" - } - - // (3) path should have 'cgroup' fs type. - fst := unix.Statfs_t{} - err = unix.Statfs(path, &fst) - if err != nil || fst.Type != unix.CGROUP_SUPER_MAGIC { - return "" - } - - return path -} - -// readCgroupMountinfo returns a list of cgroup v1 mounts (i.e. the ones -// with fstype of "cgroup") for the current running process. -// -// The results are cached (to avoid re-reading mountinfo which is relatively -// expensive), so it is assumed that cgroup mounts are not being changed. -func readCgroupMountinfo() ([]*mountinfo.Info, error) { - readMountinfoOnce.Do(func() { - // mountinfo.GetMounts uses /proc/thread-self, so we can use it without - // issues. - cgroupMountinfo, readMountinfoErr = mountinfo.GetMounts( - mountinfo.FSTypeFilter("cgroup"), - ) - }) - return cgroupMountinfo, readMountinfoErr -} - -// https://www.kernel.org/doc/Documentation/cgroup-v1/cgroups.txt -func FindCgroupMountpoint(cgroupPath, subsystem string) (string, error) { - if IsCgroup2UnifiedMode() { - return "", errUnified - } - - // If subsystem is empty, we look for the cgroupv2 hybrid path. - if len(subsystem) == 0 { - return hybridMountpoint, nil - } - - // Avoid parsing mountinfo by trying the default path first, if possible. - if path := tryDefaultPath(cgroupPath, subsystem); path != "" { - return path, nil - } - - mnt, _, err := FindCgroupMountpointAndRoot(cgroupPath, subsystem) - return mnt, err -} - -func FindCgroupMountpointAndRoot(cgroupPath, subsystem string) (string, string, error) { - if IsCgroup2UnifiedMode() { - return "", "", errUnified - } - - mi, err := readCgroupMountinfo() - if err != nil { - return "", "", err - } - - return findCgroupMountpointAndRootFromMI(mi, cgroupPath, subsystem) -} - -func findCgroupMountpointAndRootFromMI(mounts []*mountinfo.Info, cgroupPath, subsystem string) (string, string, error) { - for _, mi := range mounts { - if strings.HasPrefix(mi.Mountpoint, cgroupPath) { - for _, opt := range strings.Split(mi.VFSOptions, ",") { - if opt == subsystem { - return mi.Mountpoint, mi.Root, nil - } - } - } - } - - return "", "", NewNotFoundError(subsystem) -} - -func (m Mount) GetOwnCgroup(cgroups map[string]string) (string, error) { - if len(m.Subsystems) == 0 { - return "", errors.New("no subsystem for mount") - } - - return getControllerPath(m.Subsystems[0], cgroups) -} - -func getCgroupMountsHelper(ss map[string]bool, mounts []*mountinfo.Info, all bool) ([]Mount, error) { - res := make([]Mount, 0, len(ss)) - numFound := 0 - for _, mi := range mounts { - m := Mount{ - Mountpoint: mi.Mountpoint, - Root: mi.Root, - } - for _, opt := range strings.Split(mi.VFSOptions, ",") { - seen, known := ss[opt] - if !known || (!all && seen) { - continue - } - ss[opt] = true - opt = strings.TrimPrefix(opt, CgroupNamePrefix) - m.Subsystems = append(m.Subsystems, opt) - numFound++ - } - if len(m.Subsystems) > 0 || all { - res = append(res, m) - } - if !all && numFound >= len(ss) { - break - } - } - return res, nil -} - -func getCgroupMountsV1(all bool) ([]Mount, error) { - mi, err := readCgroupMountinfo() - if err != nil { - return nil, err - } - - // We don't need to use /proc/thread-self here because runc always runs - // with every thread in the same cgroup. This lets us avoid having to do - // runtime.LockOSThread. - allSubsystems, err := ParseCgroupFile("/proc/self/cgroup") - if err != nil { - return nil, err - } - - allMap := make(map[string]bool) - for s := range allSubsystems { - allMap[s] = false - } - - return getCgroupMountsHelper(allMap, mi, all) -} - -// GetOwnCgroup returns the relative path to the cgroup docker is running in. -func GetOwnCgroup(subsystem string) (string, error) { - if IsCgroup2UnifiedMode() { - return "", errUnified - } - - // We don't need to use /proc/thread-self here because runc always runs - // with every thread in the same cgroup. This lets us avoid having to do - // runtime.LockOSThread. - cgroups, err := ParseCgroupFile("/proc/self/cgroup") - if err != nil { - return "", err - } - - return getControllerPath(subsystem, cgroups) -} - -func GetOwnCgroupPath(subsystem string) (string, error) { - cgroup, err := GetOwnCgroup(subsystem) - if err != nil { - return "", err - } - - // If subsystem is empty, we look for the cgroupv2 hybrid path. - if len(subsystem) == 0 { - return hybridMountpoint, nil - } - - return getCgroupPathHelper(subsystem, cgroup) -} - -func getCgroupPathHelper(subsystem, cgroup string) (string, error) { - mnt, root, err := FindCgroupMountpointAndRoot("", subsystem) - if err != nil { - return "", err - } - - // This is needed for nested containers, because in /proc/self/cgroup we - // see paths from host, which don't exist in container. - relCgroup, err := filepath.Rel(root, cgroup) - if err != nil { - return "", err - } - - return filepath.Join(mnt, relCgroup), nil -} - -func getControllerPath(subsystem string, cgroups map[string]string) (string, error) { - if IsCgroup2UnifiedMode() { - return "", errUnified - } - - if p, ok := cgroups[subsystem]; ok { - return p, nil - } - - if p, ok := cgroups[CgroupNamePrefix+subsystem]; ok { - return p, nil - } - - return "", NewNotFoundError(subsystem) -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/blkio_device.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/blkio_device.go deleted file mode 100644 index 865344f99c..0000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/blkio_device.go +++ /dev/null @@ -1,66 +0,0 @@ -package configs - -import "fmt" - -// BlockIODevice holds major:minor format supported in blkio cgroup. -type BlockIODevice struct { - // Major is the device's major number - Major int64 `json:"major"` - // Minor is the device's minor number - Minor int64 `json:"minor"` -} - -// WeightDevice struct holds a `major:minor weight`|`major:minor leaf_weight` pair -type WeightDevice struct { - BlockIODevice - // Weight is the bandwidth rate for the device, range is from 10 to 1000 - Weight uint16 `json:"weight"` - // LeafWeight is the bandwidth rate for the device while competing with the cgroup's child cgroups, range is from 10 to 1000, cfq scheduler only - LeafWeight uint16 `json:"leafWeight"` -} - -// NewWeightDevice returns a configured WeightDevice pointer -func NewWeightDevice(major, minor int64, weight, leafWeight uint16) *WeightDevice { - wd := &WeightDevice{} - wd.Major = major - wd.Minor = minor - wd.Weight = weight - wd.LeafWeight = leafWeight - return wd -} - -// WeightString formats the struct to be writable to the cgroup specific file -func (wd *WeightDevice) WeightString() string { - return fmt.Sprintf("%d:%d %d", wd.Major, wd.Minor, wd.Weight) -} - -// LeafWeightString formats the struct to be writable to the cgroup specific file -func (wd *WeightDevice) LeafWeightString() string { - return fmt.Sprintf("%d:%d %d", wd.Major, wd.Minor, wd.LeafWeight) -} - -// ThrottleDevice struct holds a `major:minor rate_per_second` pair -type ThrottleDevice struct { - BlockIODevice - // Rate is the IO rate limit per cgroup per device - Rate uint64 `json:"rate"` -} - -// NewThrottleDevice returns a configured ThrottleDevice pointer -func NewThrottleDevice(major, minor int64, rate uint64) *ThrottleDevice { - td := &ThrottleDevice{} - td.Major = major - td.Minor = minor - td.Rate = rate - return td -} - -// String formats the struct to be writable to the cgroup specific file -func (td *ThrottleDevice) String() string { - return fmt.Sprintf("%d:%d %d", td.Major, td.Minor, td.Rate) -} - -// StringName formats the struct to be writable to the cgroup specific file -func (td *ThrottleDevice) StringName(name string) string { - return fmt.Sprintf("%d:%d %s=%d", td.Major, td.Minor, name, td.Rate) -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_linux.go deleted file mode 100644 index 4a34cf76fc..0000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_linux.go +++ /dev/null @@ -1,169 +0,0 @@ -package configs - -import ( - systemdDbus "github.com/coreos/go-systemd/v22/dbus" - "github.com/opencontainers/runc/libcontainer/devices" -) - -type FreezerState string - -const ( - Undefined FreezerState = "" - Frozen FreezerState = "FROZEN" - Thawed FreezerState = "THAWED" -) - -// Cgroup holds properties of a cgroup on Linux. -type Cgroup struct { - // Name specifies the name of the cgroup - Name string `json:"name,omitempty"` - - // Parent specifies the name of parent of cgroup or slice - Parent string `json:"parent,omitempty"` - - // Path specifies the path to cgroups that are created and/or joined by the container. - // The path is assumed to be relative to the host system cgroup mountpoint. - Path string `json:"path"` - - // ScopePrefix describes prefix for the scope name - ScopePrefix string `json:"scope_prefix"` - - // Resources contains various cgroups settings to apply - *Resources - - // Systemd tells if systemd should be used to manage cgroups. - Systemd bool - - // SystemdProps are any additional properties for systemd, - // derived from org.systemd.property.xxx annotations. - // Ignored unless systemd is used for managing cgroups. - SystemdProps []systemdDbus.Property `json:"-"` - - // Rootless tells if rootless cgroups should be used. - Rootless bool - - // The host UID that should own the cgroup, or nil to accept - // the default ownership. This should only be set when the - // cgroupfs is to be mounted read/write. - // Not all cgroup manager implementations support changing - // the ownership. - OwnerUID *int `json:"owner_uid,omitempty"` -} - -type Resources struct { - // Devices is the set of access rules for devices in the container. - Devices []*devices.Rule `json:"devices"` - - // Memory limit (in bytes) - Memory int64 `json:"memory"` - - // Memory reservation or soft_limit (in bytes) - MemoryReservation int64 `json:"memory_reservation"` - - // Total memory usage (memory + swap); set `-1` to enable unlimited swap - MemorySwap int64 `json:"memory_swap"` - - // CPU shares (relative weight vs. other containers) - CpuShares uint64 `json:"cpu_shares"` - - // CPU hardcap limit (in usecs). Allowed cpu time in a given period. - CpuQuota int64 `json:"cpu_quota"` - - // CPU hardcap burst limit (in usecs). Allowed accumulated cpu time additionally for burst in a given period. - CpuBurst *uint64 `json:"cpu_burst"` //nolint:revive - - // CPU period to be used for hardcapping (in usecs). 0 to use system default. - CpuPeriod uint64 `json:"cpu_period"` - - // How many time CPU will use in realtime scheduling (in usecs). - CpuRtRuntime int64 `json:"cpu_rt_quota"` - - // CPU period to be used for realtime scheduling (in usecs). - CpuRtPeriod uint64 `json:"cpu_rt_period"` - - // CPU to use - CpusetCpus string `json:"cpuset_cpus"` - - // MEM to use - CpusetMems string `json:"cpuset_mems"` - - // cgroup SCHED_IDLE - CPUIdle *int64 `json:"cpu_idle,omitempty"` - - // Process limit; set <= `0' to disable limit. - PidsLimit int64 `json:"pids_limit"` - - // Specifies per cgroup weight, range is from 10 to 1000. - BlkioWeight uint16 `json:"blkio_weight"` - - // Specifies tasks' weight in the given cgroup while competing with the cgroup's child cgroups, range is from 10 to 1000, cfq scheduler only - BlkioLeafWeight uint16 `json:"blkio_leaf_weight"` - - // Weight per cgroup per device, can override BlkioWeight. - BlkioWeightDevice []*WeightDevice `json:"blkio_weight_device"` - - // IO read rate limit per cgroup per device, bytes per second. - BlkioThrottleReadBpsDevice []*ThrottleDevice `json:"blkio_throttle_read_bps_device"` - - // IO write rate limit per cgroup per device, bytes per second. - BlkioThrottleWriteBpsDevice []*ThrottleDevice `json:"blkio_throttle_write_bps_device"` - - // IO read rate limit per cgroup per device, IO per second. - BlkioThrottleReadIOPSDevice []*ThrottleDevice `json:"blkio_throttle_read_iops_device"` - - // IO write rate limit per cgroup per device, IO per second. - BlkioThrottleWriteIOPSDevice []*ThrottleDevice `json:"blkio_throttle_write_iops_device"` - - // set the freeze value for the process - Freezer FreezerState `json:"freezer"` - - // Hugetlb limit (in bytes) - HugetlbLimit []*HugepageLimit `json:"hugetlb_limit"` - - // Whether to disable OOM Killer - OomKillDisable bool `json:"oom_kill_disable"` - - // Tuning swappiness behaviour per cgroup - MemorySwappiness *uint64 `json:"memory_swappiness"` - - // Set priority of network traffic for container - NetPrioIfpriomap []*IfPrioMap `json:"net_prio_ifpriomap"` - - // Set class identifier for container's network packets - NetClsClassid uint32 `json:"net_cls_classid_u"` - - // Rdma resource restriction configuration - Rdma map[string]LinuxRdma `json:"rdma"` - - // Used on cgroups v2: - - // CpuWeight sets a proportional bandwidth limit. - CpuWeight uint64 `json:"cpu_weight"` - - // Unified is cgroupv2-only key-value map. - Unified map[string]string `json:"unified"` - - // SkipDevices allows to skip configuring device permissions. - // Used by e.g. kubelet while creating a parent cgroup (kubepods) - // common for many containers, and by runc update. - // - // NOTE it is impossible to start a container which has this flag set. - SkipDevices bool `json:"-"` - - // SkipFreezeOnSet is a flag for cgroup manager to skip the cgroup - // freeze when setting resources. Only applicable to systemd legacy - // (i.e. cgroup v1) manager (which uses freeze by default to avoid - // spurious permission errors caused by systemd inability to update - // device rules in a non-disruptive manner). - // - // If not set, a few methods (such as looking into cgroup's - // devices.list and querying the systemd unit properties) are used - // during Set() to figure out whether the freeze is required. Those - // methods may be relatively slow, thus this flag. - SkipFreezeOnSet bool `json:"-"` - - // MemoryCheckBeforeUpdate is a flag for cgroup v2 managers to check - // if the new memory limits (Memory and MemorySwap) being set are lower - // than the current memory usage, and reject if so. - MemoryCheckBeforeUpdate bool `json:"memory_check_before_update"` -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_unsupported.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_unsupported.go deleted file mode 100644 index 53f5ec5a0d..0000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_unsupported.go +++ /dev/null @@ -1,8 +0,0 @@ -//go:build !linux - -package configs - -// Cgroup holds properties of a cgroup on Linux -// TODO Windows: This can ultimately be entirely factored out on Windows as -// cgroups are a Unix-specific construct. -type Cgroup struct{} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go deleted file mode 100644 index 22fe0f9b4c..0000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go +++ /dev/null @@ -1,508 +0,0 @@ -package configs - -import ( - "bytes" - "encoding/json" - "fmt" - "os/exec" - "time" - - "github.com/sirupsen/logrus" - "golang.org/x/sys/unix" - - "github.com/opencontainers/runc/libcontainer/devices" - "github.com/opencontainers/runtime-spec/specs-go" -) - -type Rlimit struct { - Type int `json:"type"` - Hard uint64 `json:"hard"` - Soft uint64 `json:"soft"` -} - -// IDMap represents UID/GID Mappings for User Namespaces. -type IDMap struct { - ContainerID int64 `json:"container_id"` - HostID int64 `json:"host_id"` - Size int64 `json:"size"` -} - -// Seccomp represents syscall restrictions -// By default, only the native architecture of the kernel is allowed to be used -// for syscalls. Additional architectures can be added by specifying them in -// Architectures. -type Seccomp struct { - DefaultAction Action `json:"default_action"` - Architectures []string `json:"architectures"` - Flags []specs.LinuxSeccompFlag `json:"flags"` - Syscalls []*Syscall `json:"syscalls"` - DefaultErrnoRet *uint `json:"default_errno_ret"` - ListenerPath string `json:"listener_path,omitempty"` - ListenerMetadata string `json:"listener_metadata,omitempty"` -} - -// Action is taken upon rule match in Seccomp -type Action int - -const ( - Kill Action = iota + 1 - Errno - Trap - Allow - Trace - Log - Notify - KillThread - KillProcess -) - -// Operator is a comparison operator to be used when matching syscall arguments in Seccomp -type Operator int - -const ( - EqualTo Operator = iota + 1 - NotEqualTo - GreaterThan - GreaterThanOrEqualTo - LessThan - LessThanOrEqualTo - MaskEqualTo -) - -// Arg is a rule to match a specific syscall argument in Seccomp -type Arg struct { - Index uint `json:"index"` - Value uint64 `json:"value"` - ValueTwo uint64 `json:"value_two"` - Op Operator `json:"op"` -} - -// Syscall is a rule to match a syscall in Seccomp -type Syscall struct { - Name string `json:"name"` - Action Action `json:"action"` - ErrnoRet *uint `json:"errnoRet"` - Args []*Arg `json:"args"` -} - -// Config defines configuration options for executing a process inside a contained environment. -type Config struct { - // NoPivotRoot will use MS_MOVE and a chroot to jail the process into the container's rootfs - // This is a common option when the container is running in ramdisk - NoPivotRoot bool `json:"no_pivot_root"` - - // ParentDeathSignal specifies the signal that is sent to the container's process in the case - // that the parent process dies. - ParentDeathSignal int `json:"parent_death_signal"` - - // Path to a directory containing the container's root filesystem. - Rootfs string `json:"rootfs"` - - // Umask is the umask to use inside of the container. - Umask *uint32 `json:"umask"` - - // Readonlyfs will remount the container's rootfs as readonly where only externally mounted - // bind mounts are writtable. - Readonlyfs bool `json:"readonlyfs"` - - // Specifies the mount propagation flags to be applied to /. - RootPropagation int `json:"rootPropagation"` - - // Mounts specify additional source and destination paths that will be mounted inside the container's - // rootfs and mount namespace if specified - Mounts []*Mount `json:"mounts"` - - // The device nodes that should be automatically created within the container upon container start. Note, make sure that the node is marked as allowed in the cgroup as well! - Devices []*devices.Device `json:"devices"` - - MountLabel string `json:"mount_label"` - - // Hostname optionally sets the container's hostname if provided - Hostname string `json:"hostname"` - - // Domainname optionally sets the container's domainname if provided - Domainname string `json:"domainname"` - - // Namespaces specifies the container's namespaces that it should setup when cloning the init process - // If a namespace is not provided that namespace is shared from the container's parent process - Namespaces Namespaces `json:"namespaces"` - - // Capabilities specify the capabilities to keep when executing the process inside the container - // All capabilities not specified will be dropped from the processes capability mask - Capabilities *Capabilities `json:"capabilities"` - - // Networks specifies the container's network setup to be created - Networks []*Network `json:"networks"` - - // Routes can be specified to create entries in the route table as the container is started - Routes []*Route `json:"routes"` - - // Cgroups specifies specific cgroup settings for the various subsystems that the container is - // placed into to limit the resources the container has available - Cgroups *Cgroup `json:"cgroups"` - - // AppArmorProfile specifies the profile to apply to the process running in the container and is - // change at the time the process is execed - AppArmorProfile string `json:"apparmor_profile,omitempty"` - - // ProcessLabel specifies the label to apply to the process running in the container. It is - // commonly used by selinux - ProcessLabel string `json:"process_label,omitempty"` - - // Rlimits specifies the resource limits, such as max open files, to set in the container - // If Rlimits are not set, the container will inherit rlimits from the parent process - Rlimits []Rlimit `json:"rlimits,omitempty"` - - // OomScoreAdj specifies the adjustment to be made by the kernel when calculating oom scores - // for a process. Valid values are between the range [-1000, '1000'], where processes with - // higher scores are preferred for being killed. If it is unset then we don't touch the current - // value. - // More information about kernel oom score calculation here: https://lwn.net/Articles/317814/ - OomScoreAdj *int `json:"oom_score_adj,omitempty"` - - // UIDMappings is an array of User ID mappings for User Namespaces - UIDMappings []IDMap `json:"uid_mappings"` - - // GIDMappings is an array of Group ID mappings for User Namespaces - GIDMappings []IDMap `json:"gid_mappings"` - - // MaskPaths specifies paths within the container's rootfs to mask over with a bind - // mount pointing to /dev/null as to prevent reads of the file. - MaskPaths []string `json:"mask_paths"` - - // ReadonlyPaths specifies paths within the container's rootfs to remount as read-only - // so that these files prevent any writes. - ReadonlyPaths []string `json:"readonly_paths"` - - // Sysctl is a map of properties and their values. It is the equivalent of using - // sysctl -w my.property.name value in Linux. - Sysctl map[string]string `json:"sysctl"` - - // Seccomp allows actions to be taken whenever a syscall is made within the container. - // A number of rules are given, each having an action to be taken if a syscall matches it. - // A default action to be taken if no rules match is also given. - Seccomp *Seccomp `json:"seccomp"` - - // NoNewPrivileges controls whether processes in the container can gain additional privileges. - NoNewPrivileges bool `json:"no_new_privileges,omitempty"` - - // Hooks are a collection of actions to perform at various container lifecycle events. - // CommandHooks are serialized to JSON, but other hooks are not. - Hooks Hooks - - // Version is the version of opencontainer specification that is supported. - Version string `json:"version"` - - // Labels are user defined metadata that is stored in the config and populated on the state - Labels []string `json:"labels"` - - // NoNewKeyring will not allocated a new session keyring for the container. It will use the - // callers keyring in this case. - NoNewKeyring bool `json:"no_new_keyring"` - - // IntelRdt specifies settings for Intel RDT group that the container is placed into - // to limit the resources (e.g., L3 cache, memory bandwidth) the container has available - IntelRdt *IntelRdt `json:"intel_rdt,omitempty"` - - // RootlessEUID is set when the runc was launched with non-zero EUID. - // Note that RootlessEUID is set to false when launched with EUID=0 in userns. - // When RootlessEUID is set, runc creates a new userns for the container. - // (config.json needs to contain userns settings) - RootlessEUID bool `json:"rootless_euid,omitempty"` - - // RootlessCgroups is set when unlikely to have the full access to cgroups. - // When RootlessCgroups is set, cgroups errors are ignored. - RootlessCgroups bool `json:"rootless_cgroups,omitempty"` - - // TimeOffsets specifies the offset for supporting time namespaces. - TimeOffsets map[string]specs.LinuxTimeOffset `json:"time_offsets,omitempty"` - - // Scheduler represents the scheduling attributes for a process. - Scheduler *Scheduler `json:"scheduler,omitempty"` - - // Personality contains configuration for the Linux personality syscall. - Personality *LinuxPersonality `json:"personality,omitempty"` - - // IOPriority is the container's I/O priority. - IOPriority *IOPriority `json:"io_priority,omitempty"` -} - -// Scheduler is based on the Linux sched_setattr(2) syscall. -type Scheduler = specs.Scheduler - -// ToSchedAttr is to convert *configs.Scheduler to *unix.SchedAttr. -func ToSchedAttr(scheduler *Scheduler) (*unix.SchedAttr, error) { - var policy uint32 - switch scheduler.Policy { - case specs.SchedOther: - policy = 0 - case specs.SchedFIFO: - policy = 1 - case specs.SchedRR: - policy = 2 - case specs.SchedBatch: - policy = 3 - case specs.SchedISO: - policy = 4 - case specs.SchedIdle: - policy = 5 - case specs.SchedDeadline: - policy = 6 - default: - return nil, fmt.Errorf("invalid scheduler policy: %s", scheduler.Policy) - } - - var flags uint64 - for _, flag := range scheduler.Flags { - switch flag { - case specs.SchedFlagResetOnFork: - flags |= 0x01 - case specs.SchedFlagReclaim: - flags |= 0x02 - case specs.SchedFlagDLOverrun: - flags |= 0x04 - case specs.SchedFlagKeepPolicy: - flags |= 0x08 - case specs.SchedFlagKeepParams: - flags |= 0x10 - case specs.SchedFlagUtilClampMin: - flags |= 0x20 - case specs.SchedFlagUtilClampMax: - flags |= 0x40 - default: - return nil, fmt.Errorf("invalid scheduler flag: %s", flag) - } - } - - return &unix.SchedAttr{ - Size: unix.SizeofSchedAttr, - Policy: policy, - Flags: flags, - Nice: scheduler.Nice, - Priority: uint32(scheduler.Priority), - Runtime: scheduler.Runtime, - Deadline: scheduler.Deadline, - Period: scheduler.Period, - }, nil -} - -var IOPrioClassMapping = map[specs.IOPriorityClass]int{ - specs.IOPRIO_CLASS_RT: 1, - specs.IOPRIO_CLASS_BE: 2, - specs.IOPRIO_CLASS_IDLE: 3, -} - -type IOPriority = specs.LinuxIOPriority - -type ( - HookName string - HookList []Hook - Hooks map[HookName]HookList -) - -const ( - // Prestart commands are executed after the container namespaces are created, - // but before the user supplied command is executed from init. - // Note: This hook is now deprecated - // Prestart commands are called in the Runtime namespace. - Prestart HookName = "prestart" - - // CreateRuntime commands MUST be called as part of the create operation after - // the runtime environment has been created but before the pivot_root has been executed. - // CreateRuntime is called immediately after the deprecated Prestart hook. - // CreateRuntime commands are called in the Runtime Namespace. - CreateRuntime HookName = "createRuntime" - - // CreateContainer commands MUST be called as part of the create operation after - // the runtime environment has been created but before the pivot_root has been executed. - // CreateContainer commands are called in the Container namespace. - CreateContainer HookName = "createContainer" - - // StartContainer commands MUST be called as part of the start operation and before - // the container process is started. - // StartContainer commands are called in the Container namespace. - StartContainer HookName = "startContainer" - - // Poststart commands are executed after the container init process starts. - // Poststart commands are called in the Runtime Namespace. - Poststart HookName = "poststart" - - // Poststop commands are executed after the container init process exits. - // Poststop commands are called in the Runtime Namespace. - Poststop HookName = "poststop" -) - -// KnownHookNames returns the known hook names. -// Used by `runc features`. -func KnownHookNames() []string { - return []string{ - string(Prestart), // deprecated - string(CreateRuntime), - string(CreateContainer), - string(StartContainer), - string(Poststart), - string(Poststop), - } -} - -type Capabilities struct { - // Bounding is the set of capabilities checked by the kernel. - Bounding []string - // Effective is the set of capabilities checked by the kernel. - Effective []string - // Inheritable is the capabilities preserved across execve. - Inheritable []string - // Permitted is the limiting superset for effective capabilities. - Permitted []string - // Ambient is the ambient set of capabilities that are kept. - Ambient []string -} - -// Deprecated: use (Hooks).Run instead. -func (hooks HookList) RunHooks(state *specs.State) error { - for i, h := range hooks { - if err := h.Run(state); err != nil { - return fmt.Errorf("error running hook #%d: %w", i, err) - } - } - - return nil -} - -func (hooks *Hooks) UnmarshalJSON(b []byte) error { - var state map[HookName][]CommandHook - - if err := json.Unmarshal(b, &state); err != nil { - return err - } - - *hooks = Hooks{} - for n, commandHooks := range state { - if len(commandHooks) == 0 { - continue - } - - (*hooks)[n] = HookList{} - for _, h := range commandHooks { - (*hooks)[n] = append((*hooks)[n], h) - } - } - - return nil -} - -func (hooks *Hooks) MarshalJSON() ([]byte, error) { - serialize := func(hooks []Hook) (serializableHooks []CommandHook) { - for _, hook := range hooks { - switch chook := hook.(type) { - case CommandHook: - serializableHooks = append(serializableHooks, chook) - default: - logrus.Warnf("cannot serialize hook of type %T, skipping", hook) - } - } - - return serializableHooks - } - - return json.Marshal(map[string]interface{}{ - "prestart": serialize((*hooks)[Prestart]), - "createRuntime": serialize((*hooks)[CreateRuntime]), - "createContainer": serialize((*hooks)[CreateContainer]), - "startContainer": serialize((*hooks)[StartContainer]), - "poststart": serialize((*hooks)[Poststart]), - "poststop": serialize((*hooks)[Poststop]), - }) -} - -// Run executes all hooks for the given hook name. -func (hooks Hooks) Run(name HookName, state *specs.State) error { - list := hooks[name] - for i, h := range list { - if err := h.Run(state); err != nil { - return fmt.Errorf("error running %s hook #%d: %w", name, i, err) - } - } - - return nil -} - -type Hook interface { - // Run executes the hook with the provided state. - Run(*specs.State) error -} - -// NewFunctionHook will call the provided function when the hook is run. -func NewFunctionHook(f func(*specs.State) error) FuncHook { - return FuncHook{ - run: f, - } -} - -type FuncHook struct { - run func(*specs.State) error -} - -func (f FuncHook) Run(s *specs.State) error { - return f.run(s) -} - -type Command struct { - Path string `json:"path"` - Args []string `json:"args"` - Env []string `json:"env"` - Dir string `json:"dir"` - Timeout *time.Duration `json:"timeout"` -} - -// NewCommandHook will execute the provided command when the hook is run. -func NewCommandHook(cmd Command) CommandHook { - return CommandHook{ - Command: cmd, - } -} - -type CommandHook struct { - Command -} - -func (c Command) Run(s *specs.State) error { - b, err := json.Marshal(s) - if err != nil { - return err - } - var stdout, stderr bytes.Buffer - cmd := exec.Cmd{ - Path: c.Path, - Args: c.Args, - Env: c.Env, - Stdin: bytes.NewReader(b), - Stdout: &stdout, - Stderr: &stderr, - } - if err := cmd.Start(); err != nil { - return err - } - errC := make(chan error, 1) - go func() { - err := cmd.Wait() - if err != nil { - err = fmt.Errorf("%w, stdout: %s, stderr: %s", err, stdout.String(), stderr.String()) - } - errC <- err - }() - var timerCh <-chan time.Time - if c.Timeout != nil { - timer := time.NewTimer(*c.Timeout) - defer timer.Stop() - timerCh = timer.C - } - select { - case err := <-errC: - return err - case <-timerCh: - _ = cmd.Process.Kill() - <-errC - return fmt.Errorf("hook ran past specified timeout of %.1fs", c.Timeout.Seconds()) - } -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/config_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/config_linux.go deleted file mode 100644 index e401f5331b..0000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/config_linux.go +++ /dev/null @@ -1,97 +0,0 @@ -package configs - -import ( - "errors" - "fmt" - "math" -) - -var ( - errNoUIDMap = errors.New("user namespaces enabled, but no uid mappings found") - errNoGIDMap = errors.New("user namespaces enabled, but no gid mappings found") -) - -// Please check https://man7.org/linux/man-pages/man2/personality.2.html for const details. -// https://raw.githubusercontent.com/torvalds/linux/master/include/uapi/linux/personality.h -const ( - PerLinux = 0x0000 - PerLinux32 = 0x0008 -) - -type LinuxPersonality struct { - // Domain for the personality - // can only contain values "LINUX" and "LINUX32" - Domain int `json:"domain"` -} - -// HostUID gets the translated uid for the process on host which could be -// different when user namespaces are enabled. -func (c Config) HostUID(containerId int) (int, error) { - if c.Namespaces.Contains(NEWUSER) { - if len(c.UIDMappings) == 0 { - return -1, errNoUIDMap - } - id, found := c.hostIDFromMapping(int64(containerId), c.UIDMappings) - if !found { - return -1, fmt.Errorf("user namespaces enabled, but no mapping found for uid %d", containerId) - } - // If we are a 32-bit binary running on a 64-bit system, it's possible - // the mapped user is too large to store in an int, which means we - // cannot do the mapping. We can't just return an int64, because - // os.Setuid() takes an int. - if id > math.MaxInt { - return -1, fmt.Errorf("mapping for uid %d (host id %d) is larger than native integer size (%d)", containerId, id, math.MaxInt) - } - return int(id), nil - } - // Return unchanged id. - return containerId, nil -} - -// HostRootUID gets the root uid for the process on host which could be non-zero -// when user namespaces are enabled. -func (c Config) HostRootUID() (int, error) { - return c.HostUID(0) -} - -// HostGID gets the translated gid for the process on host which could be -// different when user namespaces are enabled. -func (c Config) HostGID(containerId int) (int, error) { - if c.Namespaces.Contains(NEWUSER) { - if len(c.GIDMappings) == 0 { - return -1, errNoGIDMap - } - id, found := c.hostIDFromMapping(int64(containerId), c.GIDMappings) - if !found { - return -1, fmt.Errorf("user namespaces enabled, but no mapping found for gid %d", containerId) - } - // If we are a 32-bit binary running on a 64-bit system, it's possible - // the mapped user is too large to store in an int, which means we - // cannot do the mapping. We can't just return an int64, because - // os.Setgid() takes an int. - if id > math.MaxInt { - return -1, fmt.Errorf("mapping for gid %d (host id %d) is larger than native integer size (%d)", containerId, id, math.MaxInt) - } - return int(id), nil - } - // Return unchanged id. - return containerId, nil -} - -// HostRootGID gets the root gid for the process on host which could be non-zero -// when user namespaces are enabled. -func (c Config) HostRootGID() (int, error) { - return c.HostGID(0) -} - -// Utility function that gets a host ID for a container ID from user namespace map -// if that ID is present in the map. -func (c Config) hostIDFromMapping(containerID int64, uMap []IDMap) (int64, bool) { - for _, m := range uMap { - if (containerID >= m.ContainerID) && (containerID <= (m.ContainerID + m.Size - 1)) { - hostID := m.HostID + (containerID - m.ContainerID) - return hostID, true - } - } - return -1, false -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/configs_fuzzer.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/configs_fuzzer.go deleted file mode 100644 index 1fd87ce6a4..0000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/configs_fuzzer.go +++ /dev/null @@ -1,9 +0,0 @@ -//go:build gofuzz - -package configs - -func FuzzUnmarshalJSON(data []byte) int { - hooks := Hooks{} - _ = hooks.UnmarshalJSON(data) - return 1 -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/hugepage_limit.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/hugepage_limit.go deleted file mode 100644 index d30216380b..0000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/hugepage_limit.go +++ /dev/null @@ -1,9 +0,0 @@ -package configs - -type HugepageLimit struct { - // which type of hugepage to limit. - Pagesize string `json:"page_size"` - - // usage limit for hugepage. - Limit uint64 `json:"limit"` -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/intelrdt.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/intelrdt.go deleted file mode 100644 index f8d951ab8b..0000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/intelrdt.go +++ /dev/null @@ -1,16 +0,0 @@ -package configs - -type IntelRdt struct { - // The identity for RDT Class of Service - ClosID string `json:"closID,omitempty"` - - // The schema for L3 cache id and capacity bitmask (CBM) - // Format: "L3:=;=;..." - L3CacheSchema string `json:"l3_cache_schema,omitempty"` - - // The schema of memory bandwidth per L3 cache id - // Format: "MB:=bandwidth0;=bandwidth1;..." - // The unit of memory bandwidth is specified in "percentages" by - // default, and in "MBps" if MBA Software Controller is enabled. - MemBwSchema string `json:"memBwSchema,omitempty"` -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/interface_priority_map.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/interface_priority_map.go deleted file mode 100644 index 9a0395eaf5..0000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/interface_priority_map.go +++ /dev/null @@ -1,14 +0,0 @@ -package configs - -import ( - "fmt" -) - -type IfPrioMap struct { - Interface string `json:"interface"` - Priority int64 `json:"priority"` -} - -func (i *IfPrioMap) CgroupString() string { - return fmt.Sprintf("%s %d", i.Interface, i.Priority) -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/mount.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/mount.go deleted file mode 100644 index bfd356e497..0000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/mount.go +++ /dev/null @@ -1,7 +0,0 @@ -package configs - -const ( - // EXT_COPYUP is a directive to copy up the contents of a directory when - // a tmpfs is mounted over it. - EXT_COPYUP = 1 << iota //nolint:golint,revive // ignore "don't use ALL_CAPS" warning -) diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/mount_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/mount_linux.go deleted file mode 100644 index b69e9ab238..0000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/mount_linux.go +++ /dev/null @@ -1,66 +0,0 @@ -package configs - -import "golang.org/x/sys/unix" - -type MountIDMapping struct { - // Recursive indicates if the mapping needs to be recursive. - Recursive bool `json:"recursive"` - - // UserNSPath is a path to a user namespace that indicates the necessary - // id-mappings for MOUNT_ATTR_IDMAP. If set to non-"", UIDMappings and - // GIDMappings must be set to nil. - UserNSPath string `json:"userns_path,omitempty"` - - // UIDMappings is the uid mapping set for this mount, to be used with - // MOUNT_ATTR_IDMAP. - UIDMappings []IDMap `json:"uid_mappings,omitempty"` - - // GIDMappings is the gid mapping set for this mount, to be used with - // MOUNT_ATTR_IDMAP. - GIDMappings []IDMap `json:"gid_mappings,omitempty"` -} - -type Mount struct { - // Source path for the mount. - Source string `json:"source"` - - // Destination path for the mount inside the container. - Destination string `json:"destination"` - - // Device the mount is for. - Device string `json:"device"` - - // Mount flags. - Flags int `json:"flags"` - - // Mount flags that were explicitly cleared in the configuration (meaning - // the user explicitly requested that these flags *not* be set). - ClearedFlags int `json:"cleared_flags"` - - // Propagation Flags - PropagationFlags []int `json:"propagation_flags"` - - // Mount data applied to the mount. - Data string `json:"data"` - - // Relabel source if set, "z" indicates shared, "Z" indicates unshared. - Relabel string `json:"relabel"` - - // RecAttr represents mount properties to be applied recursively (AT_RECURSIVE), see mount_setattr(2). - RecAttr *unix.MountAttr `json:"rec_attr"` - - // Extensions are additional flags that are specific to runc. - Extensions int `json:"extensions"` - - // Mapping is the MOUNT_ATTR_IDMAP configuration for the mount. If non-nil, - // the mount is configured to use MOUNT_ATTR_IDMAP-style id mappings. - IDMapping *MountIDMapping `json:"id_mapping,omitempty"` -} - -func (m *Mount) IsBind() bool { - return m.Flags&unix.MS_BIND != 0 -} - -func (m *Mount) IsIDMapped() bool { - return m.IDMapping != nil -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/mount_unsupported.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/mount_unsupported.go deleted file mode 100644 index 1d4d9fe52a..0000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/mount_unsupported.go +++ /dev/null @@ -1,9 +0,0 @@ -//go:build !linux - -package configs - -type Mount struct{} - -func (m *Mount) IsBind() bool { - return false -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces.go deleted file mode 100644 index a3329a31a9..0000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces.go +++ /dev/null @@ -1,5 +0,0 @@ -package configs - -type NamespaceType string - -type Namespaces []Namespace diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_linux.go deleted file mode 100644 index 898f96fd0f..0000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_linux.go +++ /dev/null @@ -1,133 +0,0 @@ -package configs - -import ( - "fmt" - "os" - "sync" -) - -const ( - NEWNET NamespaceType = "NEWNET" - NEWPID NamespaceType = "NEWPID" - NEWNS NamespaceType = "NEWNS" - NEWUTS NamespaceType = "NEWUTS" - NEWIPC NamespaceType = "NEWIPC" - NEWUSER NamespaceType = "NEWUSER" - NEWCGROUP NamespaceType = "NEWCGROUP" - NEWTIME NamespaceType = "NEWTIME" -) - -var ( - nsLock sync.Mutex - supportedNamespaces = make(map[NamespaceType]bool) -) - -// NsName converts the namespace type to its filename -func NsName(ns NamespaceType) string { - switch ns { - case NEWNET: - return "net" - case NEWNS: - return "mnt" - case NEWPID: - return "pid" - case NEWIPC: - return "ipc" - case NEWUSER: - return "user" - case NEWUTS: - return "uts" - case NEWCGROUP: - return "cgroup" - case NEWTIME: - return "time" - } - return "" -} - -// IsNamespaceSupported returns whether a namespace is available or -// not -func IsNamespaceSupported(ns NamespaceType) bool { - nsLock.Lock() - defer nsLock.Unlock() - supported, ok := supportedNamespaces[ns] - if ok { - return supported - } - nsFile := NsName(ns) - // if the namespace type is unknown, just return false - if nsFile == "" { - return false - } - // We don't need to use /proc/thread-self here because the list of - // namespace types is unrelated to the thread. This lets us avoid having to - // do runtime.LockOSThread. - _, err := os.Stat("/proc/self/ns/" + nsFile) - // a namespace is supported if it exists and we have permissions to read it - supported = err == nil - supportedNamespaces[ns] = supported - return supported -} - -func NamespaceTypes() []NamespaceType { - return []NamespaceType{ - NEWUSER, // Keep user NS always first, don't move it. - NEWIPC, - NEWUTS, - NEWNET, - NEWPID, - NEWNS, - NEWCGROUP, - NEWTIME, - } -} - -// Namespace defines configuration for each namespace. It specifies an -// alternate path that is able to be joined via setns. -type Namespace struct { - Type NamespaceType `json:"type"` - Path string `json:"path"` -} - -func (n *Namespace) GetPath(pid int) string { - return fmt.Sprintf("/proc/%d/ns/%s", pid, NsName(n.Type)) -} - -func (n *Namespaces) Remove(t NamespaceType) bool { - i := n.index(t) - if i == -1 { - return false - } - *n = append((*n)[:i], (*n)[i+1:]...) - return true -} - -func (n *Namespaces) Add(t NamespaceType, path string) { - i := n.index(t) - if i == -1 { - *n = append(*n, Namespace{Type: t, Path: path}) - return - } - (*n)[i].Path = path -} - -func (n *Namespaces) index(t NamespaceType) int { - for i, ns := range *n { - if ns.Type == t { - return i - } - } - return -1 -} - -func (n *Namespaces) Contains(t NamespaceType) bool { - return n.index(t) != -1 -} - -func (n *Namespaces) PathOf(t NamespaceType) string { - i := n.index(t) - if i == -1 { - return "" - } - return (*n)[i].Path -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall.go deleted file mode 100644 index 26b70b26fa..0000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall.go +++ /dev/null @@ -1,45 +0,0 @@ -//go:build linux - -package configs - -import "golang.org/x/sys/unix" - -func (n *Namespace) Syscall() int { - return namespaceInfo[n.Type] -} - -var namespaceInfo = map[NamespaceType]int{ - NEWNET: unix.CLONE_NEWNET, - NEWNS: unix.CLONE_NEWNS, - NEWUSER: unix.CLONE_NEWUSER, - NEWIPC: unix.CLONE_NEWIPC, - NEWUTS: unix.CLONE_NEWUTS, - NEWPID: unix.CLONE_NEWPID, - NEWCGROUP: unix.CLONE_NEWCGROUP, - NEWTIME: unix.CLONE_NEWTIME, -} - -// CloneFlags parses the container's Namespaces options to set the correct -// flags on clone, unshare. This function returns flags only for new namespaces. -func (n *Namespaces) CloneFlags() uintptr { - var flag int - for _, v := range *n { - if v.Path != "" { - continue - } - flag |= namespaceInfo[v.Type] - } - return uintptr(flag) -} - -// IsPrivate tells whether the namespace of type t is configured as private -// (i.e. it exists and is not shared). -func (n Namespaces) IsPrivate(t NamespaceType) bool { - for _, v := range n { - if v.Type == t { - return v.Path == "" - } - } - // Not found, so implicitly sharing a parent namespace. - return false -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall_unsupported.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall_unsupported.go deleted file mode 100644 index 10bf243650..0000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall_unsupported.go +++ /dev/null @@ -1,13 +0,0 @@ -//go:build !linux && !windows - -package configs - -func (n *Namespace) Syscall() int { - panic("No namespace syscall support") -} - -// CloneFlags parses the container's Namespaces options to set the correct -// flags on clone, unshare. This function returns flags only for new namespaces. -func (n *Namespaces) CloneFlags() uintptr { - panic("No namespace syscall support") -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_unsupported.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_unsupported.go deleted file mode 100644 index 914684993c..0000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_unsupported.go +++ /dev/null @@ -1,7 +0,0 @@ -//go:build !linux - -package configs - -// Namespace defines configuration for each namespace. It specifies an -// alternate path that is able to be joined via setns. -type Namespace struct{} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/network.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/network.go deleted file mode 100644 index c44c3ea71b..0000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/network.go +++ /dev/null @@ -1,75 +0,0 @@ -package configs - -// Network defines configuration for a container's networking stack -// -// The network configuration can be omitted from a container causing the -// container to be setup with the host's networking stack -type Network struct { - // Type sets the networks type, commonly veth and loopback - Type string `json:"type"` - - // Name of the network interface - Name string `json:"name"` - - // The bridge to use. - Bridge string `json:"bridge"` - - // MacAddress contains the MAC address to set on the network interface - MacAddress string `json:"mac_address"` - - // Address contains the IPv4 and mask to set on the network interface - Address string `json:"address"` - - // Gateway sets the gateway address that is used as the default for the interface - Gateway string `json:"gateway"` - - // IPv6Address contains the IPv6 and mask to set on the network interface - IPv6Address string `json:"ipv6_address"` - - // IPv6Gateway sets the ipv6 gateway address that is used as the default for the interface - IPv6Gateway string `json:"ipv6_gateway"` - - // Mtu sets the mtu value for the interface and will be mirrored on both the host and - // container's interfaces if a pair is created, specifically in the case of type veth - // Note: This does not apply to loopback interfaces. - Mtu int `json:"mtu"` - - // TxQueueLen sets the tx_queuelen value for the interface and will be mirrored on both the host and - // container's interfaces if a pair is created, specifically in the case of type veth - // Note: This does not apply to loopback interfaces. - TxQueueLen int `json:"txqueuelen"` - - // HostInterfaceName is a unique name of a veth pair that resides on in the host interface of the - // container. - HostInterfaceName string `json:"host_interface_name"` - - // HairpinMode specifies if hairpin NAT should be enabled on the virtual interface - // bridge port in the case of type veth - // Note: This is unsupported on some systems. - // Note: This does not apply to loopback interfaces. - HairpinMode bool `json:"hairpin_mode"` -} - -// Route defines a routing table entry. -// -// Routes can be specified to create entries in the routing table as the container -// is started. -// -// All of destination, source, and gateway should be either IPv4 or IPv6. -// One of the three options must be present, and omitted entries will use their -// IP family default for the route table. For IPv4 for example, setting the -// gateway to 1.2.3.4 and the interface to eth0 will set up a standard -// destination of 0.0.0.0(or *) when viewed in the route table. -type Route struct { - // Destination specifies the destination IP address and mask in the CIDR form. - Destination string `json:"destination"` - - // Source specifies the source IP address and mask in the CIDR form. - Source string `json:"source"` - - // Gateway specifies the gateway IP address. - Gateway string `json:"gateway"` - - // InterfaceName specifies the device to set this route up for, for example eth0. - InterfaceName string `json:"interface_name"` -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/rdma.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/rdma.go deleted file mode 100644 index c69f2c8024..0000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/rdma.go +++ /dev/null @@ -1,9 +0,0 @@ -package configs - -// LinuxRdma for Linux cgroup 'rdma' resource management (Linux 4.11) -type LinuxRdma struct { - // Maximum number of HCA handles that can be opened. Default is "no limit". - HcaHandles *uint32 `json:"hca_handles,omitempty"` - // Maximum number of HCA objects that can be created. Default is "no limit". - HcaObjects *uint32 `json:"hca_objects,omitempty"` -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/devices/device.go b/vendor/github.com/opencontainers/runc/libcontainer/devices/device.go deleted file mode 100644 index c2c2b3bb7c..0000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/devices/device.go +++ /dev/null @@ -1,174 +0,0 @@ -package devices - -import ( - "fmt" - "os" - "strconv" -) - -const ( - Wildcard = -1 -) - -type Device struct { - Rule - - // Path to the device. - Path string `json:"path"` - - // FileMode permission bits for the device. - FileMode os.FileMode `json:"file_mode"` - - // Uid of the device. - Uid uint32 `json:"uid"` - - // Gid of the device. - Gid uint32 `json:"gid"` -} - -// Permissions is a cgroupv1-style string to represent device access. It -// has to be a string for backward compatibility reasons, hence why it has -// methods to do set operations. -type Permissions string - -const ( - deviceRead uint = (1 << iota) - deviceWrite - deviceMknod -) - -func (p Permissions) toSet() uint { - var set uint - for _, perm := range p { - switch perm { - case 'r': - set |= deviceRead - case 'w': - set |= deviceWrite - case 'm': - set |= deviceMknod - } - } - return set -} - -func fromSet(set uint) Permissions { - var perm string - if set&deviceRead == deviceRead { - perm += "r" - } - if set&deviceWrite == deviceWrite { - perm += "w" - } - if set&deviceMknod == deviceMknod { - perm += "m" - } - return Permissions(perm) -} - -// Union returns the union of the two sets of Permissions. -func (p Permissions) Union(o Permissions) Permissions { - lhs := p.toSet() - rhs := o.toSet() - return fromSet(lhs | rhs) -} - -// Difference returns the set difference of the two sets of Permissions. -// In set notation, A.Difference(B) gives you A\B. -func (p Permissions) Difference(o Permissions) Permissions { - lhs := p.toSet() - rhs := o.toSet() - return fromSet(lhs &^ rhs) -} - -// Intersection computes the intersection of the two sets of Permissions. -func (p Permissions) Intersection(o Permissions) Permissions { - lhs := p.toSet() - rhs := o.toSet() - return fromSet(lhs & rhs) -} - -// IsEmpty returns whether the set of permissions in a Permissions is -// empty. -func (p Permissions) IsEmpty() bool { - return p == Permissions("") -} - -// IsValid returns whether the set of permissions is a subset of valid -// permissions (namely, {r,w,m}). -func (p Permissions) IsValid() bool { - return p == fromSet(p.toSet()) -} - -type Type rune - -const ( - WildcardDevice Type = 'a' - BlockDevice Type = 'b' - CharDevice Type = 'c' // or 'u' - FifoDevice Type = 'p' -) - -func (t Type) IsValid() bool { - switch t { - case WildcardDevice, BlockDevice, CharDevice, FifoDevice: - return true - default: - return false - } -} - -func (t Type) CanMknod() bool { - switch t { - case BlockDevice, CharDevice, FifoDevice: - return true - default: - return false - } -} - -func (t Type) CanCgroup() bool { - switch t { - case WildcardDevice, BlockDevice, CharDevice: - return true - default: - return false - } -} - -type Rule struct { - // Type of device ('c' for char, 'b' for block). If set to 'a', this rule - // acts as a wildcard and all fields other than Allow are ignored. - Type Type `json:"type"` - - // Major is the device's major number. - Major int64 `json:"major"` - - // Minor is the device's minor number. - Minor int64 `json:"minor"` - - // Permissions is the set of permissions that this rule applies to (in the - // cgroupv1 format -- any combination of "rwm"). - Permissions Permissions `json:"permissions"` - - // Allow specifies whether this rule is allowed. - Allow bool `json:"allow"` -} - -func (d *Rule) CgroupString() string { - var ( - major = strconv.FormatInt(d.Major, 10) - minor = strconv.FormatInt(d.Minor, 10) - ) - if d.Major == Wildcard { - major = "*" - } - if d.Minor == Wildcard { - minor = "*" - } - return fmt.Sprintf("%c %s:%s %s", d.Type, major, minor, d.Permissions) -} - -func (d *Rule) Mkdev() (uint64, error) { - return mkDev(d) -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/devices/device_unix.go b/vendor/github.com/opencontainers/runc/libcontainer/devices/device_unix.go deleted file mode 100644 index d00775f514..0000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/devices/device_unix.go +++ /dev/null @@ -1,119 +0,0 @@ -//go:build !windows - -package devices - -import ( - "errors" - "os" - "path/filepath" - - "golang.org/x/sys/unix" -) - -// ErrNotADevice denotes that a file is not a valid linux device. -var ErrNotADevice = errors.New("not a device node") - -// Testing dependencies -var ( - unixLstat = unix.Lstat - osReadDir = os.ReadDir -) - -func mkDev(d *Rule) (uint64, error) { - if d.Major == Wildcard || d.Minor == Wildcard { - return 0, errors.New("cannot mkdev() device with wildcards") - } - return unix.Mkdev(uint32(d.Major), uint32(d.Minor)), nil -} - -// DeviceFromPath takes the path to a device and its cgroup_permissions (which -// cannot be easily queried) to look up the information about a linux device -// and returns that information as a Device struct. -func DeviceFromPath(path, permissions string) (*Device, error) { - var stat unix.Stat_t - err := unixLstat(path, &stat) - if err != nil { - return nil, err - } - - var ( - devType Type - mode = stat.Mode - devNumber = uint64(stat.Rdev) //nolint:unconvert // Rdev is uint32 on e.g. MIPS. - major = unix.Major(devNumber) - minor = unix.Minor(devNumber) - ) - switch mode & unix.S_IFMT { - case unix.S_IFBLK: - devType = BlockDevice - case unix.S_IFCHR: - devType = CharDevice - case unix.S_IFIFO: - devType = FifoDevice - default: - return nil, ErrNotADevice - } - return &Device{ - Rule: Rule{ - Type: devType, - Major: int64(major), - Minor: int64(minor), - Permissions: Permissions(permissions), - }, - Path: path, - FileMode: os.FileMode(mode &^ unix.S_IFMT), - Uid: stat.Uid, - Gid: stat.Gid, - }, nil -} - -// HostDevices returns all devices that can be found under /dev directory. -func HostDevices() ([]*Device, error) { - return GetDevices("/dev") -} - -// GetDevices recursively traverses a directory specified by path -// and returns all devices found there. -func GetDevices(path string) ([]*Device, error) { - files, err := osReadDir(path) - if err != nil { - return nil, err - } - var out []*Device - for _, f := range files { - switch { - case f.IsDir(): - switch f.Name() { - // ".lxc" & ".lxd-mounts" added to address https://github.com/lxc/lxd/issues/2825 - // ".udev" added to address https://github.com/opencontainers/runc/issues/2093 - case "pts", "shm", "fd", "mqueue", ".lxc", ".lxd-mounts", ".udev": - continue - default: - sub, err := GetDevices(filepath.Join(path, f.Name())) - if err != nil { - return nil, err - } - - out = append(out, sub...) - continue - } - case f.Name() == "console": - continue - } - device, err := DeviceFromPath(filepath.Join(path, f.Name()), "rwm") - if err != nil { - if errors.Is(err, ErrNotADevice) { - continue - } - if os.IsNotExist(err) { - continue - } - return nil, err - } - if device.Type == FifoDevice { - continue - } - out = append(out, device) - } - return out, nil -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/utils/cmsg.go b/vendor/github.com/opencontainers/runc/libcontainer/utils/cmsg.go deleted file mode 100644 index 2edd1417af..0000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/utils/cmsg.go +++ /dev/null @@ -1,119 +0,0 @@ -package utils - -/* - * Copyright 2016, 2017 SUSE LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import ( - "fmt" - "os" - "runtime" - - "golang.org/x/sys/unix" -) - -// MaxNameLen is the maximum length of the name of a file descriptor being sent -// using SendFile. The name of the file handle returned by RecvFile will never be -// larger than this value. -const MaxNameLen = 4096 - -// oobSpace is the size of the oob slice required to store a single FD. Note -// that unix.UnixRights appears to make the assumption that fd is always int32, -// so sizeof(fd) = 4. -var oobSpace = unix.CmsgSpace(4) - -// RecvFile waits for a file descriptor to be sent over the given AF_UNIX -// socket. The file name of the remote file descriptor will be recreated -// locally (it is sent as non-auxiliary data in the same payload). -func RecvFile(socket *os.File) (_ *os.File, Err error) { - name := make([]byte, MaxNameLen) - oob := make([]byte, oobSpace) - - sockfd := socket.Fd() - n, oobn, _, _, err := unix.Recvmsg(int(sockfd), name, oob, unix.MSG_CMSG_CLOEXEC) - if err != nil { - return nil, err - } - if n >= MaxNameLen || oobn != oobSpace { - return nil, fmt.Errorf("recvfile: incorrect number of bytes read (n=%d oobn=%d)", n, oobn) - } - // Truncate. - name = name[:n] - oob = oob[:oobn] - - scms, err := unix.ParseSocketControlMessage(oob) - if err != nil { - return nil, err - } - - // We cannot control how many SCM_RIGHTS we receive, and upon receiving - // them all of the descriptors are installed in our fd table, so we need to - // parse all of the SCM_RIGHTS we received in order to close all of the - // descriptors on error. - var fds []int - defer func() { - for i, fd := range fds { - if i == 0 && Err == nil { - // Only close the first one on error. - continue - } - // Always close extra ones. - _ = unix.Close(fd) - } - }() - var lastErr error - for _, scm := range scms { - if scm.Header.Type == unix.SCM_RIGHTS { - scmFds, err := unix.ParseUnixRights(&scm) - if err != nil { - lastErr = err - } else { - fds = append(fds, scmFds...) - } - } - } - if lastErr != nil { - return nil, lastErr - } - - // We do this after collecting the fds to make sure we close them all when - // returning an error here. - if len(scms) != 1 { - return nil, fmt.Errorf("recvfd: number of SCMs is not 1: %d", len(scms)) - } - if len(fds) != 1 { - return nil, fmt.Errorf("recvfd: number of fds is not 1: %d", len(fds)) - } - return os.NewFile(uintptr(fds[0]), string(name)), nil -} - -// SendFile sends a file over the given AF_UNIX socket. file.Name() is also -// included so that if the other end uses RecvFile, the file will have the same -// name information. -func SendFile(socket *os.File, file *os.File) error { - name := file.Name() - if len(name) >= MaxNameLen { - return fmt.Errorf("sendfd: filename too long: %s", name) - } - err := SendRawFd(socket, name, file.Fd()) - runtime.KeepAlive(file) - return err -} - -// SendRawFd sends a specific file descriptor over the given AF_UNIX socket. -func SendRawFd(socket *os.File, msg string, fd uintptr) error { - oob := unix.UnixRights(int(fd)) - return unix.Sendmsg(int(socket.Fd()), []byte(msg), oob, nil, 0) -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/utils/utils.go b/vendor/github.com/opencontainers/runc/libcontainer/utils/utils.go deleted file mode 100644 index db420ea688..0000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/utils/utils.go +++ /dev/null @@ -1,115 +0,0 @@ -package utils - -import ( - "encoding/json" - "io" - "os" - "path/filepath" - "strings" - - "golang.org/x/sys/unix" -) - -const ( - exitSignalOffset = 128 -) - -// ExitStatus returns the correct exit status for a process based on if it -// was signaled or exited cleanly -func ExitStatus(status unix.WaitStatus) int { - if status.Signaled() { - return exitSignalOffset + int(status.Signal()) - } - return status.ExitStatus() -} - -// WriteJSON writes the provided struct v to w using standard json marshaling -// without a trailing newline. This is used instead of json.Encoder because -// there might be a problem in json decoder in some cases, see: -// https://github.com/docker/docker/issues/14203#issuecomment-174177790 -func WriteJSON(w io.Writer, v interface{}) error { - data, err := json.Marshal(v) - if err != nil { - return err - } - _, err = w.Write(data) - return err -} - -// CleanPath makes a path safe for use with filepath.Join. This is done by not -// only cleaning the path, but also (if the path is relative) adding a leading -// '/' and cleaning it (then removing the leading '/'). This ensures that a -// path resulting from prepending another path will always resolve to lexically -// be a subdirectory of the prefixed path. This is all done lexically, so paths -// that include symlinks won't be safe as a result of using CleanPath. -func CleanPath(path string) string { - // Deal with empty strings nicely. - if path == "" { - return "" - } - - // Ensure that all paths are cleaned (especially problematic ones like - // "/../../../../../" which can cause lots of issues). - path = filepath.Clean(path) - - // If the path isn't absolute, we need to do more processing to fix paths - // such as "../../../..//some/path". We also shouldn't convert absolute - // paths to relative ones. - if !filepath.IsAbs(path) { - path = filepath.Clean(string(os.PathSeparator) + path) - // This can't fail, as (by definition) all paths are relative to root. - path, _ = filepath.Rel(string(os.PathSeparator), path) - } - - // Clean the path again for good measure. - return filepath.Clean(path) -} - -// stripRoot returns the passed path, stripping the root path if it was -// (lexicially) inside it. Note that both passed paths will always be treated -// as absolute, and the returned path will also always be absolute. In -// addition, the paths are cleaned before stripping the root. -func stripRoot(root, path string) string { - // Make the paths clean and absolute. - root, path = CleanPath("/"+root), CleanPath("/"+path) - switch { - case path == root: - path = "/" - case root == "/": - // do nothing - case strings.HasPrefix(path, root+"/"): - path = strings.TrimPrefix(path, root+"/") - } - return CleanPath("/" + path) -} - -// SearchLabels searches through a list of key=value pairs for a given key, -// returning its value, and the binary flag telling whether the key exist. -func SearchLabels(labels []string, key string) (string, bool) { - key += "=" - for _, s := range labels { - if strings.HasPrefix(s, key) { - return s[len(key):], true - } - } - return "", false -} - -// Annotations returns the bundle path and user defined annotations from the -// libcontainer state. We need to remove the bundle because that is a label -// added by libcontainer. -func Annotations(labels []string) (bundle string, userAnnotations map[string]string) { - userAnnotations = make(map[string]string) - for _, l := range labels { - name, value, ok := strings.Cut(l, "=") - if !ok { - continue - } - if name == "bundle" { - bundle = value - } else { - userAnnotations[name] = value - } - } - return -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/utils/utils_unix.go b/vendor/github.com/opencontainers/runc/libcontainer/utils/utils_unix.go deleted file mode 100644 index 8f179b6aa2..0000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/utils/utils_unix.go +++ /dev/null @@ -1,360 +0,0 @@ -//go:build !windows - -package utils - -import ( - "fmt" - "math" - "os" - "path/filepath" - "runtime" - "strconv" - "strings" - "sync" - _ "unsafe" // for go:linkname - - securejoin "github.com/cyphar/filepath-securejoin" - "github.com/sirupsen/logrus" - "golang.org/x/sys/unix" -) - -// EnsureProcHandle returns whether or not the given file handle is on procfs. -func EnsureProcHandle(fh *os.File) error { - var buf unix.Statfs_t - if err := unix.Fstatfs(int(fh.Fd()), &buf); err != nil { - return fmt.Errorf("ensure %s is on procfs: %w", fh.Name(), err) - } - if buf.Type != unix.PROC_SUPER_MAGIC { - return fmt.Errorf("%s is not on procfs", fh.Name()) - } - return nil -} - -var ( - haveCloseRangeCloexecBool bool - haveCloseRangeCloexecOnce sync.Once -) - -func haveCloseRangeCloexec() bool { - haveCloseRangeCloexecOnce.Do(func() { - // Make sure we're not closing a random file descriptor. - tmpFd, err := unix.FcntlInt(0, unix.F_DUPFD_CLOEXEC, 0) - if err != nil { - return - } - defer unix.Close(tmpFd) - - err = unix.CloseRange(uint(tmpFd), uint(tmpFd), unix.CLOSE_RANGE_CLOEXEC) - // Any error means we cannot use close_range(CLOSE_RANGE_CLOEXEC). - // -ENOSYS and -EINVAL ultimately mean we don't have support, but any - // other potential error would imply that even the most basic close - // operation wouldn't work. - haveCloseRangeCloexecBool = err == nil - }) - return haveCloseRangeCloexecBool -} - -type fdFunc func(fd int) - -// fdRangeFrom calls the passed fdFunc for each file descriptor that is open in -// the current process. -func fdRangeFrom(minFd int, fn fdFunc) error { - procSelfFd, closer := ProcThreadSelf("fd") - defer closer() - - fdDir, err := os.Open(procSelfFd) - if err != nil { - return err - } - defer fdDir.Close() - - if err := EnsureProcHandle(fdDir); err != nil { - return err - } - - fdList, err := fdDir.Readdirnames(-1) - if err != nil { - return err - } - for _, fdStr := range fdList { - fd, err := strconv.Atoi(fdStr) - // Ignore non-numeric file names. - if err != nil { - continue - } - // Ignore descriptors lower than our specified minimum. - if fd < minFd { - continue - } - // Ignore the file descriptor we used for readdir, as it will be closed - // when we return. - if uintptr(fd) == fdDir.Fd() { - continue - } - // Run the closure. - fn(fd) - } - return nil -} - -// CloseExecFrom sets the O_CLOEXEC flag on all file descriptors greater or -// equal to minFd in the current process. -func CloseExecFrom(minFd int) error { - // Use close_range(CLOSE_RANGE_CLOEXEC) if possible. - if haveCloseRangeCloexec() { - err := unix.CloseRange(uint(minFd), math.MaxUint, unix.CLOSE_RANGE_CLOEXEC) - return os.NewSyscallError("close_range", err) - } - // Otherwise, fall back to the standard loop. - return fdRangeFrom(minFd, unix.CloseOnExec) -} - -//go:linkname runtime_IsPollDescriptor internal/poll.IsPollDescriptor - -// In order to make sure we do not close the internal epoll descriptors the Go -// runtime uses, we need to ensure that we skip descriptors that match -// "internal/poll".IsPollDescriptor. Yes, this is a Go runtime internal thing, -// unfortunately there's no other way to be sure we're only keeping the file -// descriptors the Go runtime needs. Hopefully nothing blows up doing this... -func runtime_IsPollDescriptor(fd uintptr) bool //nolint:revive - -// UnsafeCloseFrom closes all file descriptors greater or equal to minFd in the -// current process, except for those critical to Go's runtime (such as the -// netpoll management descriptors). -// -// NOTE: That this function is incredibly dangerous to use in most Go code, as -// closing file descriptors from underneath *os.File handles can lead to very -// bad behaviour (the closed file descriptor can be re-used and then any -// *os.File operations would apply to the wrong file). This function is only -// intended to be called from the last stage of runc init. -func UnsafeCloseFrom(minFd int) error { - // We cannot use close_range(2) even if it is available, because we must - // not close some file descriptors. - return fdRangeFrom(minFd, func(fd int) { - if runtime_IsPollDescriptor(uintptr(fd)) { - // These are the Go runtimes internal netpoll file descriptors. - // These file descriptors are operated on deep in the Go scheduler, - // and closing those files from underneath Go can result in panics. - // There is no issue with keeping them because they are not - // executable and are not useful to an attacker anyway. Also we - // don't have any choice. - return - } - // There's nothing we can do about errors from close(2), and the - // only likely error to be seen is EBADF which indicates the fd was - // already closed (in which case, we got what we wanted). - _ = unix.Close(fd) - }) -} - -// NewSockPair returns a new SOCK_STREAM unix socket pair. -func NewSockPair(name string) (parent, child *os.File, err error) { - fds, err := unix.Socketpair(unix.AF_LOCAL, unix.SOCK_STREAM|unix.SOCK_CLOEXEC, 0) - if err != nil { - return nil, nil, err - } - return os.NewFile(uintptr(fds[1]), name+"-p"), os.NewFile(uintptr(fds[0]), name+"-c"), nil -} - -// WithProcfd runs the passed closure with a procfd path (/proc/self/fd/...) -// corresponding to the unsafePath resolved within the root. Before passing the -// fd, this path is verified to have been inside the root -- so operating on it -// through the passed fdpath should be safe. Do not access this path through -// the original path strings, and do not attempt to use the pathname outside of -// the passed closure (the file handle will be freed once the closure returns). -func WithProcfd(root, unsafePath string, fn func(procfd string) error) error { - // Remove the root then forcefully resolve inside the root. - unsafePath = stripRoot(root, unsafePath) - path, err := securejoin.SecureJoin(root, unsafePath) - if err != nil { - return fmt.Errorf("resolving path inside rootfs failed: %w", err) - } - - procSelfFd, closer := ProcThreadSelf("fd/") - defer closer() - - // Open the target path. - fh, err := os.OpenFile(path, unix.O_PATH|unix.O_CLOEXEC, 0) - if err != nil { - return fmt.Errorf("open o_path procfd: %w", err) - } - defer fh.Close() - - procfd := filepath.Join(procSelfFd, strconv.Itoa(int(fh.Fd()))) - // Double-check the path is the one we expected. - if realpath, err := os.Readlink(procfd); err != nil { - return fmt.Errorf("procfd verification failed: %w", err) - } else if realpath != path { - return fmt.Errorf("possibly malicious path detected -- refusing to operate on %s", realpath) - } - - return fn(procfd) -} - -type ProcThreadSelfCloser func() - -var ( - haveProcThreadSelf bool - haveProcThreadSelfOnce sync.Once -) - -// ProcThreadSelf returns a string that is equivalent to -// /proc/thread-self/, with a graceful fallback on older kernels where -// /proc/thread-self doesn't exist. This method DOES NOT use SecureJoin, -// meaning that the passed string needs to be trusted. The caller _must_ call -// the returned procThreadSelfCloser function (which is runtime.UnlockOSThread) -// *only once* after it has finished using the returned path string. -func ProcThreadSelf(subpath string) (string, ProcThreadSelfCloser) { - haveProcThreadSelfOnce.Do(func() { - if _, err := os.Stat("/proc/thread-self/"); err == nil { - haveProcThreadSelf = true - } else { - logrus.Debugf("cannot stat /proc/thread-self (%v), falling back to /proc/self/task/", err) - } - }) - - // We need to lock our thread until the caller is done with the path string - // because any non-atomic operation on the path (such as opening a file, - // then reading it) could be interrupted by the Go runtime where the - // underlying thread is swapped out and the original thread is killed, - // resulting in pull-your-hair-out-hard-to-debug issues in the caller. In - // addition, the pre-3.17 fallback makes everything non-atomic because the - // same thing could happen between unix.Gettid() and the path operations. - // - // In theory, we don't need to lock in the atomic user case when using - // /proc/thread-self/, but it's better to be safe than sorry (and there are - // only one or two truly atomic users of /proc/thread-self/). - runtime.LockOSThread() - - threadSelf := "/proc/thread-self/" - if !haveProcThreadSelf { - // Pre-3.17 kernels did not have /proc/thread-self, so do it manually. - threadSelf = "/proc/self/task/" + strconv.Itoa(unix.Gettid()) + "/" - if _, err := os.Stat(threadSelf); err != nil { - // Unfortunately, this code is called from rootfs_linux.go where we - // are running inside the pid namespace of the container but /proc - // is the host's procfs. Unfortunately there is no real way to get - // the correct tid to use here (the kernel age means we cannot do - // things like set up a private fsopen("proc") -- even scanning - // NSpid in all of the tasks in /proc/self/task/*/status requires - // Linux 4.1). - // - // So, we just have to assume that /proc/self is acceptable in this - // one specific case. - if os.Getpid() == 1 { - logrus.Debugf("/proc/thread-self (tid=%d) cannot be emulated inside the initial container setup -- using /proc/self instead: %v", unix.Gettid(), err) - } else { - // This should never happen, but the fallback should work in most cases... - logrus.Warnf("/proc/thread-self could not be emulated for pid=%d (tid=%d) -- using more buggy /proc/self fallback instead: %v", os.Getpid(), unix.Gettid(), err) - } - threadSelf = "/proc/self/" - } - } - return threadSelf + subpath, runtime.UnlockOSThread -} - -// ProcThreadSelfFd is small wrapper around ProcThreadSelf to make it easier to -// create a /proc/thread-self handle for given file descriptor. -// -// It is basically equivalent to ProcThreadSelf(fmt.Sprintf("fd/%d", fd)), but -// without using fmt.Sprintf to avoid unneeded overhead. -func ProcThreadSelfFd(fd uintptr) (string, ProcThreadSelfCloser) { - return ProcThreadSelf("fd/" + strconv.FormatUint(uint64(fd), 10)) -} - -// IsLexicallyInRoot is shorthand for strings.HasPrefix(path+"/", root+"/"), -// but properly handling the case where path or root are "/". -// -// NOTE: The return value only make sense if the path doesn't contain "..". -func IsLexicallyInRoot(root, path string) bool { - if root != "/" { - root += "/" - } - if path != "/" { - path += "/" - } - return strings.HasPrefix(path, root) -} - -// MkdirAllInRootOpen attempts to make -// -// path, _ := securejoin.SecureJoin(root, unsafePath) -// os.MkdirAll(path, mode) -// os.Open(path) -// -// safer against attacks where components in the path are changed between -// SecureJoin returning and MkdirAll (or Open) being called. In particular, we -// try to detect any symlink components in the path while we are doing the -// MkdirAll. -// -// NOTE: If unsafePath is a subpath of root, we assume that you have already -// called SecureJoin and so we use the provided path verbatim without resolving -// any symlinks (this is done in a way that avoids symlink-exchange races). -// This means that the path also must not contain ".." elements, otherwise an -// error will occur. -// -// This uses securejoin.MkdirAllHandle under the hood, but it has special -// handling if unsafePath has already been scoped within the rootfs (this is -// needed for a lot of runc callers and fixing this would require reworking a -// lot of path logic). -func MkdirAllInRootOpen(root, unsafePath string, mode os.FileMode) (_ *os.File, Err error) { - // If the path is already "within" the root, get the path relative to the - // root and use that as the unsafe path. This is necessary because a lot of - // MkdirAllInRootOpen callers have already done SecureJoin, and refactoring - // all of them to stop using these SecureJoin'd paths would require a fair - // amount of work. - // TODO(cyphar): Do the refactor to libpathrs once it's ready. - if IsLexicallyInRoot(root, unsafePath) { - subPath, err := filepath.Rel(root, unsafePath) - if err != nil { - return nil, err - } - unsafePath = subPath - } - - // Check for any silly mode bits. - if mode&^0o7777 != 0 { - return nil, fmt.Errorf("tried to include non-mode bits in MkdirAll mode: 0o%.3o", mode) - } - // Linux (and thus os.MkdirAll) silently ignores the suid and sgid bits if - // passed. While it would make sense to return an error in that case (since - // the user has asked for a mode that won't be applied), for compatibility - // reasons we have to ignore these bits. - if ignoredBits := mode &^ 0o1777; ignoredBits != 0 { - logrus.Warnf("MkdirAll called with no-op mode bits that are ignored by Linux: 0o%.3o", ignoredBits) - mode &= 0o1777 - } - - rootDir, err := os.OpenFile(root, unix.O_DIRECTORY|unix.O_CLOEXEC, 0) - if err != nil { - return nil, fmt.Errorf("open root handle: %w", err) - } - defer rootDir.Close() - - return securejoin.MkdirAllHandle(rootDir, unsafePath, mode) -} - -// MkdirAllInRoot is a wrapper around MkdirAllInRootOpen which closes the -// returned handle, for callers that don't need to use it. -func MkdirAllInRoot(root, unsafePath string, mode os.FileMode) error { - f, err := MkdirAllInRootOpen(root, unsafePath, mode) - if err == nil { - _ = f.Close() - } - return err -} - -// Openat is a Go-friendly openat(2) wrapper. -func Openat(dir *os.File, path string, flags int, mode uint32) (*os.File, error) { - dirFd := unix.AT_FDCWD - if dir != nil { - dirFd = int(dir.Fd()) - } - flags |= unix.O_CLOEXEC - - fd, err := unix.Openat(dirFd, path, flags, mode) - if err != nil { - return nil, &os.PathError{Op: "openat", Path: path, Err: err} - } - return os.NewFile(uintptr(fd), dir.Name()+"/"+path), nil -} diff --git a/vendor/github.com/openshift/api/features/features.go b/vendor/github.com/openshift/api/features/features.go index 910cabc5fa..33e51639c2 100644 --- a/vendor/github.com/openshift/api/features/features.go +++ b/vendor/github.com/openshift/api/features/features.go @@ -760,8 +760,8 @@ var ( mustRegister() FeatureGateAWSDedicatedHosts = newFeatureGate("AWSDedicatedHosts"). - reportProblemsToJiraComponent("Installer"). - contactPerson("faermanj"). + reportProblemsToJiraComponent("splat"). + contactPerson("rvanderp3"). productScope(ocpSpecific). enhancementPR("https://github.com/openshift/enhancements/pull/1781"). enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). diff --git a/vendor/github.com/openshift/api/machine/v1beta1/types_awsprovider.go b/vendor/github.com/openshift/api/machine/v1beta1/types_awsprovider.go index b3b38bc6cc..161f2324c2 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/types_awsprovider.go +++ b/vendor/github.com/openshift/api/machine/v1beta1/types_awsprovider.go @@ -114,6 +114,14 @@ type AWSMachineProviderConfig struct { // If this value is selected, capacityReservationID must be specified to identify the target reservation. // +optional MarketType MarketType `json:"marketType,omitempty"` + + // hostPlacement configures placement on AWS Dedicated Hosts. This allows admins to assign instances to specific host + // for a variety of needs including for regulatory compliance, to leverage existing per-socket or per-core software licenses (BYOL), + // and to gain visibility and control over instance placement on a physical server. + // When omitted, the instance is not constrained to a dedicated host. + // +openshift:enable:FeatureGate=AWSDedicatedHosts + // +optional + HostPlacement *HostPlacement `json:"hostPlacement,omitempty"` } // AWSConfidentialComputePolicy represents the confidential compute configuration for the instance. @@ -393,3 +401,46 @@ const ( // When set to CapacityBlock the instance utilizes pre-purchased compute capacity (capacity blocks) with AWS Capacity Reservations. MarketTypeCapacityBlock MarketType = "CapacityBlock" ) + +// HostPlacement is the type that will be used to configure the placement of AWS instances. +// +kubebuilder:validation:XValidation:rule="has(self.type) && self.affinity == 'DedicatedHost' ? has(self.dedicatedHost) : !has(self.dedicatedHost)",message="dedicatedHost is required when affinity is DedicatedHost, and forbidden otherwise" +// +union +type HostPlacement struct { + // affinity specifies the affinity setting for the instance. + // Allowed values are AnyAvailable and DedicatedHost. + // When Affinity is set to DedicatedHost, an instance started onto a specific host always restarts on the same host if stopped. In this scenario, the `dedicatedHost` field must be set. + // When Affinity is set to AnyAvailable, and you stop and restart the instance, it can be restarted on any available host. + // +required + // +unionDiscriminator + Affinity *HostAffinity `json:"affinity,omitempty"` + + // dedicatedHost specifies the exact host that an instance should be restarted on if stopped. + // dedicatedHost is required when 'affinity' is set to DedicatedHost, and forbidden otherwise. + // +optional + // +unionMember + DedicatedHost *DedicatedHost `json:"dedicatedHost,omitempty"` +} + +// HostAffinity selects how an instance should be placed on AWS Dedicated Hosts. +// +kubebuilder:validation:Enum:=DedicatedHost;AnyAvailable +type HostAffinity string + +const ( + // HostAffinityAnyAvailable lets the platform select any available dedicated host. + HostAffinityAnyAvailable HostAffinity = "AnyAvailable" + + // HostAffinityDedicatedHost requires specifying a particular host via dedicatedHost.host.hostID. + HostAffinityDedicatedHost HostAffinity = "DedicatedHost" +) + +// DedicatedHost represents the configuration for the usage of dedicated host. +type DedicatedHost struct { + // id identifies the AWS Dedicated Host on which the instance must run. + // The value must start with "h-" followed by 17 lowercase hexadecimal characters (0-9 and a-f). + // Must be exactly 19 characters in length. + // +kubebuilder:validation:XValidation:rule="self.matches('^h-[0-9a-f]{17}$')",message="hostID must start with 'h-' followed by 17 lowercase hexadecimal characters (0-9 and a-f)" + // +kubebuilder:validation:MinLength=19 + // +kubebuilder:validation:MaxLength=19 + // +required + ID string `json:"id,omitempty"` +} diff --git a/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.deepcopy.go index 554fc19b9c..92586dcdf1 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.deepcopy.go @@ -85,6 +85,11 @@ func (in *AWSMachineProviderConfig) DeepCopyInto(out *AWSMachineProviderConfig) *out = new(int32) **out = **in } + if in.HostPlacement != nil { + in, out := &in.HostPlacement, &out.HostPlacement + *out = new(HostPlacement) + (*in).DeepCopyInto(*out) + } return } @@ -509,6 +514,22 @@ func (in *DataDiskManagedDiskParameters) DeepCopy() *DataDiskManagedDiskParamete return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DedicatedHost) DeepCopyInto(out *DedicatedHost) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DedicatedHost. +func (in *DedicatedHost) DeepCopy() *DedicatedHost { + if in == nil { + return nil + } + out := new(DedicatedHost) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DiskEncryptionSetParameters) DeepCopyInto(out *DiskEncryptionSetParameters) { *out = *in @@ -903,6 +924,32 @@ func (in *GCPShieldedInstanceConfig) DeepCopy() *GCPShieldedInstanceConfig { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostPlacement) DeepCopyInto(out *HostPlacement) { + *out = *in + if in.Affinity != nil { + in, out := &in.Affinity, &out.Affinity + *out = new(HostAffinity) + **out = **in + } + if in.DedicatedHost != nil { + in, out := &in.DedicatedHost, &out.DedicatedHost + *out = new(DedicatedHost) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostPlacement. +func (in *HostPlacement) DeepCopy() *HostPlacement { + if in == nil { + return nil + } + out := new(HostPlacement) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Image) DeepCopyInto(out *Image) { *out = *in diff --git a/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.swagger_doc_generated.go index 7b74d37d02..23b6eddd37 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.swagger_doc_generated.go @@ -35,6 +35,7 @@ var map_AWSMachineProviderConfig = map[string]string{ "placementGroupPartition": "placementGroupPartition is the partition number within the placement group in which to launch the instance. This must be an integer value between 1 and 7. It is only valid if the placement group, referred in `PlacementGroupName` was created with strategy set to partition.", "capacityReservationId": "capacityReservationId specifies the target Capacity Reservation into which the instance should be launched. The field size should be greater than 0 and the field input must start with cr-***", "marketType": "marketType specifies the type of market for the EC2 instance. Valid values are OnDemand, Spot, CapacityBlock and omitted.\n\nDefaults to OnDemand. When SpotMarketOptions is provided, the marketType defaults to \"Spot\".\n\nWhen set to OnDemand the instance runs as a standard OnDemand instance. When set to Spot the instance runs as a Spot instance. When set to CapacityBlock the instance utilizes pre-purchased compute capacity (capacity blocks) with AWS Capacity Reservations. If this value is selected, capacityReservationID must be specified to identify the target reservation.", + "hostPlacement": "hostPlacement configures placement on AWS Dedicated Hosts. This allows admins to assign instances to specific host for a variety of needs including for regulatory compliance, to leverage existing per-socket or per-core software licenses (BYOL), and to gain visibility and control over instance placement on a physical server. When omitted, the instance is not constrained to a dedicated host.", } func (AWSMachineProviderConfig) SwaggerDoc() map[string]string { @@ -92,6 +93,15 @@ func (CPUOptions) SwaggerDoc() map[string]string { return map_CPUOptions } +var map_DedicatedHost = map[string]string{ + "": "DedicatedHost represents the configuration for the usage of dedicated host.", + "id": "id identifies the AWS Dedicated Host on which the instance must run. The value must start with \"h-\" followed by 17 lowercase hexadecimal characters (0-9 and a-f). Must be exactly 19 characters in length.", +} + +func (DedicatedHost) SwaggerDoc() map[string]string { + return map_DedicatedHost +} + var map_EBSBlockDeviceSpec = map[string]string{ "": "EBSBlockDeviceSpec describes a block device for an EBS volume. https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/EbsBlockDevice", "deleteOnTermination": "Indicates whether the EBS volume is deleted on machine termination.\n\nDeprecated: setting this field has no effect.", @@ -116,6 +126,16 @@ func (Filter) SwaggerDoc() map[string]string { return map_Filter } +var map_HostPlacement = map[string]string{ + "": "HostPlacement is the type that will be used to configure the placement of AWS instances.", + "affinity": "affinity specifies the affinity setting for the instance. Allowed values are AnyAvailable and DedicatedHost. When Affinity is set to DedicatedHost, an instance started onto a specific host always restarts on the same host if stopped. In this scenario, the `dedicatedHost` field must be set. When Affinity is set to AnyAvailable, and you stop and restart the instance, it can be restarted on any available host.", + "dedicatedHost": "dedicatedHost specifies the exact host that an instance should be restarted on if stopped. dedicatedHost is required when 'affinity' is set to DedicatedHost, and forbidden otherwise.", +} + +func (HostPlacement) SwaggerDoc() map[string]string { + return map_HostPlacement +} + var map_LoadBalancerReference = map[string]string{ "": "LoadBalancerReference is a reference to a load balancer on AWS.", } diff --git a/vendor/github.com/openshift/library-go/pkg/authorization/hardcodedauthorizer/metrics.go b/vendor/github.com/openshift/library-go/pkg/authorization/hardcodedauthorizer/metrics.go deleted file mode 100644 index a5cf8f99a5..0000000000 --- a/vendor/github.com/openshift/library-go/pkg/authorization/hardcodedauthorizer/metrics.go +++ /dev/null @@ -1,38 +0,0 @@ -package hardcodedauthorizer - -import ( - "context" - - "k8s.io/apiserver/pkg/authorization/authorizer" -) - -type metricsAuthorizer struct{} - -// GetUser() user.Info - checked -// GetVerb() string - checked -// IsReadOnly() bool - na -// GetNamespace() string - na -// GetResource() string - na -// GetSubresource() string - na -// GetName() string - na -// GetAPIGroup() string - na -// GetAPIVersion() string - na -// IsResourceRequest() bool - checked -// GetPath() string - checked -func (metricsAuthorizer) Authorize(ctx context.Context, a authorizer.Attributes) (authorized authorizer.Decision, reason string, err error) { - if a.GetUser().GetName() != "system:serviceaccount:openshift-monitoring:prometheus-k8s" { - return authorizer.DecisionNoOpinion, "", nil - } - if !a.IsResourceRequest() && - a.GetVerb() == "get" && - a.GetPath() == "/metrics" { - return authorizer.DecisionAllow, "requesting metrics is allowed", nil - } - - return authorizer.DecisionNoOpinion, "", nil -} - -// NewHardCodedMetricsAuthorizer returns a hardcoded authorizer for checking metrics. -func NewHardCodedMetricsAuthorizer() *metricsAuthorizer { - return new(metricsAuthorizer) -} diff --git a/vendor/github.com/openshift/library-go/pkg/client/openshiftrestmapper/hardcoded_restmapper.go b/vendor/github.com/openshift/library-go/pkg/client/openshiftrestmapper/hardcoded_restmapper.go deleted file mode 100644 index a440a04a1b..0000000000 --- a/vendor/github.com/openshift/library-go/pkg/client/openshiftrestmapper/hardcoded_restmapper.go +++ /dev/null @@ -1,229 +0,0 @@ -package openshiftrestmapper - -import ( - "fmt" - - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// defaultRESTMappings contains enough RESTMappings to have enough of the kube-controller-manager succeed when running -// against a kube-apiserver that cannot reach aggregated APIs to do a full mapping. This happens when the OwnerReferencesPermissionEnforcement -// admission plugin runs to confirm permissions. Don't add things just because you don't want to fail. These are here so that -// we can start enough back up to get the rest of the system working correctly. -var defaultRESTMappings = []meta.RESTMapping{ - { - GroupVersionKind: schema.GroupVersionKind{Group: "", Version: "v1", Kind: "ConfigMap"}, - Scope: meta.RESTScopeNamespace, - Resource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "configmaps"}, - }, - { - GroupVersionKind: schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Pod"}, - Scope: meta.RESTScopeNamespace, - Resource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"}, - }, - { - GroupVersionKind: schema.GroupVersionKind{Group: "", Version: "v1", Kind: "ReplicationController"}, - Scope: meta.RESTScopeNamespace, - Resource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "replicationcontrollers"}, - }, - { - GroupVersionKind: schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Secret"}, - Scope: meta.RESTScopeNamespace, - Resource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "secrets"}, - }, - { - GroupVersionKind: schema.GroupVersionKind{Group: "", Version: "v1", Kind: "ServiceAccount"}, - Scope: meta.RESTScopeNamespace, - Resource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "serviceaccounts"}, - }, - { - GroupVersionKind: schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "ControllerRevision"}, - Scope: meta.RESTScopeNamespace, - Resource: schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "controllerrevisions"}, - }, - { - GroupVersionKind: schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "DaemonSet"}, - Scope: meta.RESTScopeNamespace, - Resource: schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "daemonsets"}, - }, - { - GroupVersionKind: schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "Deployment"}, - Scope: meta.RESTScopeNamespace, - Resource: schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"}, - }, - { - GroupVersionKind: schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "ReplicaSet"}, - Scope: meta.RESTScopeNamespace, - Resource: schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "replicasets"}, - }, - { - GroupVersionKind: schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "StatefulSet"}, - Scope: meta.RESTScopeNamespace, - Resource: schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "statefulsets"}, - }, - // This is created so that cluster-bootstrap can always map securitycontextconstraints since the CRD doesn't have - // discovery. Discovery is delegated to the openshift-apiserver which doesn't not exist early in the bootstrapping - // phase. This leads to SCC related failures that we don't need to have. - { - GroupVersionKind: schema.GroupVersionKind{Group: "security.openshift.io", Version: "v1", Kind: "SecurityContextConstraints"}, - Scope: meta.RESTScopeRoot, - Resource: schema.GroupVersionResource{Group: "security.openshift.io", Version: "v1", Resource: "securitycontextconstraints"}, - }, - // This is created so that cluster-bootstrap can always map customresourcedefinitions, RBAC, machine resources so that CRDs and - // permissions are always created quickly. We observed discovery not including these on AWS OVN installations and - // the lack of CRDs and permissions blocked additional aspects of cluster bootstrapping. - { - GroupVersionKind: schema.GroupVersionKind{Group: "apiextensions.k8s.io", Version: "v1", Kind: "CustomResourceDefinition"}, - Scope: meta.RESTScopeRoot, - Resource: schema.GroupVersionResource{Group: "apiextensions.k8s.io", Version: "v1", Resource: "customresourcedefinitions"}, - }, - { - GroupVersionKind: schema.GroupVersionKind{Group: "rbac.authorization.k8s.io", Version: "v1", Kind: "ClusterRole"}, - Scope: meta.RESTScopeRoot, - Resource: schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1", Resource: "clusterroles"}, - }, - { - GroupVersionKind: schema.GroupVersionKind{Group: "rbac.authorization.k8s.io", Version: "v1", Kind: "ClusterRoleBinding"}, - Scope: meta.RESTScopeRoot, - Resource: schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1", Resource: "clusterrolebindings"}, - }, - { - GroupVersionKind: schema.GroupVersionKind{Group: "rbac.authorization.k8s.io", Version: "v1", Kind: "Role"}, - Scope: meta.RESTScopeNamespace, - Resource: schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1", Resource: "roles"}, - }, - { - GroupVersionKind: schema.GroupVersionKind{Group: "rbac.authorization.k8s.io", Version: "v1", Kind: "RoleBinding"}, - Scope: meta.RESTScopeNamespace, - Resource: schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1", Resource: "rolebindings"}, - }, - { - GroupVersionKind: schema.GroupVersionKind{Group: "machine.openshift.io", Version: "v1beta1", Kind: "Machine"}, - Scope: meta.RESTScopeNamespace, - Resource: schema.GroupVersionResource{Group: "machine.openshift.io", Version: "v1beta1", Resource: "machines"}, - }, - { - GroupVersionKind: schema.GroupVersionKind{Group: "machine.openshift.io", Version: "v1beta1", Kind: "MachineSet"}, - Scope: meta.RESTScopeNamespace, - Resource: schema.GroupVersionResource{Group: "machine.openshift.io", Version: "v1beta1", Resource: "machinesets"}, - }, - { - GroupVersionKind: schema.GroupVersionKind{Group: "machineconfiguration.openshift.io", Version: "v1", Kind: "MachineConfig"}, - Scope: meta.RESTScopeRoot, - Resource: schema.GroupVersionResource{Group: "machineconfiguration.openshift.io", Version: "v1", Resource: "machineconfigs"}, - }, - // This is here so cluster-bootstrap can always create the config instances that are used to drive our operators to avoid the - // excessive bootstrap wait that prevents installer from completing on AWS OVN - { - GroupVersionKind: schema.GroupVersionKind{Group: "config.openshift.io", Version: "v1", Kind: "DNS"}, - Scope: meta.RESTScopeRoot, - Resource: schema.GroupVersionResource{Group: "config.openshift.io", Version: "v1", Resource: "dnses"}, - }, - { - GroupVersionKind: schema.GroupVersionKind{Group: "config.openshift.io", Version: "v1", Kind: "Infrastructure"}, - Scope: meta.RESTScopeRoot, - Resource: schema.GroupVersionResource{Group: "config.openshift.io", Version: "v1", Resource: "infrastructures"}, - }, - { - GroupVersionKind: schema.GroupVersionKind{Group: "config.openshift.io", Version: "v1", Kind: "Network"}, - Scope: meta.RESTScopeRoot, - Resource: schema.GroupVersionResource{Group: "config.openshift.io", Version: "v1", Resource: "networks"}, - }, - { - GroupVersionKind: schema.GroupVersionKind{Group: "config.openshift.io", Version: "v1", Kind: "Ingress"}, - Scope: meta.RESTScopeRoot, - Resource: schema.GroupVersionResource{Group: "config.openshift.io", Version: "v1", Resource: "ingresses"}, - }, - { - GroupVersionKind: schema.GroupVersionKind{Group: "config.openshift.io", Version: "v1", Kind: "Proxy"}, - Scope: meta.RESTScopeRoot, - Resource: schema.GroupVersionResource{Group: "config.openshift.io", Version: "v1", Resource: "proxies"}, - }, - { - GroupVersionKind: schema.GroupVersionKind{Group: "config.openshift.io", Version: "v1", Kind: "Scheduler"}, - Scope: meta.RESTScopeRoot, - Resource: schema.GroupVersionResource{Group: "config.openshift.io", Version: "v1", Resource: "schedulers"}, - }, - { - GroupVersionKind: schema.GroupVersionKind{Group: "config.openshift.io", Version: "v1", Kind: "ClusterVersion"}, - Scope: meta.RESTScopeRoot, - Resource: schema.GroupVersionResource{Group: "config.openshift.io", Version: "v1", Resource: "clusterversions"}, - }, - { - GroupVersionKind: schema.GroupVersionKind{Group: "operator.openshift.io", Version: "v1", Kind: "CloudCredential"}, - Scope: meta.RESTScopeRoot, - Resource: schema.GroupVersionResource{Group: "operator.openshift.io", Version: "v1", Resource: "cloudcredentials"}, - }, - { - GroupVersionKind: schema.GroupVersionKind{Group: "monitoring.coreos.com", Version: "v1", Kind: "ServiceMonitor"}, - Scope: meta.RESTScopeNamespace, - Resource: schema.GroupVersionResource{Group: "monitoring.coreos.com", Version: "v1", Resource: "servicemonitors"}, - }, - { - GroupVersionKind: schema.GroupVersionKind{Group: "batch", Version: "v1", Kind: "Job"}, - Scope: meta.RESTScopeNamespace, - Resource: schema.GroupVersionResource{Group: "batch", Version: "v1", Resource: "jobs"}, - }, -} - -func NewOpenShiftHardcodedRESTMapper(delegate meta.RESTMapper) meta.RESTMapper { - ret := HardCodedFirstRESTMapper{ - Mapping: map[schema.GroupVersionKind]meta.RESTMapping{}, - RESTMapper: delegate, - } - for i := range defaultRESTMappings { - curr := defaultRESTMappings[i] - ret.Mapping[curr.GroupVersionKind] = curr - } - return ret -} - -// HardCodedFirstRESTMapper is a RESTMapper that will look for hardcoded mappings first, then delegate. -// This is done in service to `OwnerReferencesPermissionEnforcement` and for cluster-bootstrap. -type HardCodedFirstRESTMapper struct { - Mapping map[schema.GroupVersionKind]meta.RESTMapping - meta.RESTMapper -} - -var _ meta.RESTMapper = HardCodedFirstRESTMapper{} - -func (m HardCodedFirstRESTMapper) String() string { - return fmt.Sprintf("HardCodedRESTMapper{\n\t%v\n%v\n}", m.Mapping, m.RESTMapper) -} - -// RESTMapping is the only function called today. The first hit openshiftrestmapper ought to make this work right. OwnerReferencesPermissionEnforcement -// only ever calls with one version. -func (m HardCodedFirstRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error) { - // not exactly one version, delegate - if len(versions) != 1 { - return m.RESTMapper.RESTMapping(gk, versions...) - } - gvk := gk.WithVersion(versions[0]) - - single, ok := m.Mapping[gvk] - // not handled, delegate - if !ok { - return m.RESTMapper.RESTMapping(gk, versions...) - } - - return &single, nil -} - -// RESTMapping is the only function called today. The firsthit openshiftrestmapper ought to make this work right. OwnerReferencesPermissionEnforcement -// only ever calls with one version. -func (m HardCodedFirstRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*meta.RESTMapping, error) { - // not exactly one version, delegate - if len(versions) != 1 { - return m.RESTMapper.RESTMappings(gk, versions...) - } - gvk := gk.WithVersion(versions[0]) - - single, ok := m.Mapping[gvk] - // not handled, delegate - if !ok { - return m.RESTMapper.RESTMappings(gk, versions...) - } - - return []*meta.RESTMapping{&single}, nil -} diff --git a/vendor/go.uber.org/atomic/.codecov.yml b/vendor/go.uber.org/atomic/.codecov.yml deleted file mode 100644 index 571116cc39..0000000000 --- a/vendor/go.uber.org/atomic/.codecov.yml +++ /dev/null @@ -1,19 +0,0 @@ -coverage: - range: 80..100 - round: down - precision: 2 - - status: - project: # measuring the overall project coverage - default: # context, you can create multiple ones with custom titles - enabled: yes # must be yes|true to enable this status - target: 100 # specify the target coverage for each commit status - # option: "auto" (must increase from parent commit or pull request base) - # option: "X%" a static target percentage to hit - if_not_found: success # if parent is not found report status as success, error, or failure - if_ci_failed: error # if ci fails report status as success, error, or failure - -# Also update COVER_IGNORE_PKGS in the Makefile. -ignore: - - /internal/gen-atomicint/ - - /internal/gen-valuewrapper/ diff --git a/vendor/go.uber.org/atomic/.gitignore b/vendor/go.uber.org/atomic/.gitignore deleted file mode 100644 index 2e337a0ed5..0000000000 --- a/vendor/go.uber.org/atomic/.gitignore +++ /dev/null @@ -1,15 +0,0 @@ -/bin -.DS_Store -/vendor -cover.html -cover.out -lint.log - -# Binaries -*.test - -# Profiling output -*.prof - -# Output of fossa analyzer -/fossa diff --git a/vendor/go.uber.org/atomic/CHANGELOG.md b/vendor/go.uber.org/atomic/CHANGELOG.md deleted file mode 100644 index 6f87f33fa9..0000000000 --- a/vendor/go.uber.org/atomic/CHANGELOG.md +++ /dev/null @@ -1,127 +0,0 @@ -# Changelog -All notable changes to this project will be documented in this file. - -The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), -and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). - -## [1.11.0] - 2023-05-02 -### Fixed -- Fix initialization of `Value` wrappers. - -### Added -- Add `String` method to `atomic.Pointer[T]` type allowing users to safely print -underlying values of pointers. - -[1.11.0]: https://github.com/uber-go/atomic/compare/v1.10.0...v1.11.0 - -## [1.10.0] - 2022-08-11 -### Added -- Add `atomic.Float32` type for atomic operations on `float32`. -- Add `CompareAndSwap` and `Swap` methods to `atomic.String`, `atomic.Error`, - and `atomic.Value`. -- Add generic `atomic.Pointer[T]` type for atomic operations on pointers of any - type. This is present only for Go 1.18 or higher, and is a drop-in for - replacement for the standard library's `sync/atomic.Pointer` type. - -### Changed -- Deprecate `CAS` methods on all types in favor of corresponding - `CompareAndSwap` methods. - -Thanks to @eNV25 and @icpd for their contributions to this release. - -[1.10.0]: https://github.com/uber-go/atomic/compare/v1.9.0...v1.10.0 - -## [1.9.0] - 2021-07-15 -### Added -- Add `Float64.Swap` to match int atomic operations. -- Add `atomic.Time` type for atomic operations on `time.Time` values. - -[1.9.0]: https://github.com/uber-go/atomic/compare/v1.8.0...v1.9.0 - -## [1.8.0] - 2021-06-09 -### Added -- Add `atomic.Uintptr` type for atomic operations on `uintptr` values. -- Add `atomic.UnsafePointer` type for atomic operations on `unsafe.Pointer` values. - -[1.8.0]: https://github.com/uber-go/atomic/compare/v1.7.0...v1.8.0 - -## [1.7.0] - 2020-09-14 -### Added -- Support JSON serialization and deserialization of primitive atomic types. -- Support Text marshalling and unmarshalling for string atomics. - -### Changed -- Disallow incorrect comparison of atomic values in a non-atomic way. - -### Removed -- Remove dependency on `golang.org/x/{lint, tools}`. - -[1.7.0]: https://github.com/uber-go/atomic/compare/v1.6.0...v1.7.0 - -## [1.6.0] - 2020-02-24 -### Changed -- Drop library dependency on `golang.org/x/{lint, tools}`. - -[1.6.0]: https://github.com/uber-go/atomic/compare/v1.5.1...v1.6.0 - -## [1.5.1] - 2019-11-19 -- Fix bug where `Bool.CAS` and `Bool.Toggle` do work correctly together - causing `CAS` to fail even though the old value matches. - -[1.5.1]: https://github.com/uber-go/atomic/compare/v1.5.0...v1.5.1 - -## [1.5.0] - 2019-10-29 -### Changed -- With Go modules, only the `go.uber.org/atomic` import path is supported now. - If you need to use the old import path, please add a `replace` directive to - your `go.mod`. - -[1.5.0]: https://github.com/uber-go/atomic/compare/v1.4.0...v1.5.0 - -## [1.4.0] - 2019-05-01 -### Added - - Add `atomic.Error` type for atomic operations on `error` values. - -[1.4.0]: https://github.com/uber-go/atomic/compare/v1.3.2...v1.4.0 - -## [1.3.2] - 2018-05-02 -### Added -- Add `atomic.Duration` type for atomic operations on `time.Duration` values. - -[1.3.2]: https://github.com/uber-go/atomic/compare/v1.3.1...v1.3.2 - -## [1.3.1] - 2017-11-14 -### Fixed -- Revert optimization for `atomic.String.Store("")` which caused data races. - -[1.3.1]: https://github.com/uber-go/atomic/compare/v1.3.0...v1.3.1 - -## [1.3.0] - 2017-11-13 -### Added -- Add `atomic.Bool.CAS` for compare-and-swap semantics on bools. - -### Changed -- Optimize `atomic.String.Store("")` by avoiding an allocation. - -[1.3.0]: https://github.com/uber-go/atomic/compare/v1.2.0...v1.3.0 - -## [1.2.0] - 2017-04-12 -### Added -- Shadow `atomic.Value` from `sync/atomic`. - -[1.2.0]: https://github.com/uber-go/atomic/compare/v1.1.0...v1.2.0 - -## [1.1.0] - 2017-03-10 -### Added -- Add atomic `Float64` type. - -### Changed -- Support new `go.uber.org/atomic` import path. - -[1.1.0]: https://github.com/uber-go/atomic/compare/v1.0.0...v1.1.0 - -## [1.0.0] - 2016-07-18 - -- Initial release. - -[1.0.0]: https://github.com/uber-go/atomic/releases/tag/v1.0.0 diff --git a/vendor/go.uber.org/atomic/LICENSE.txt b/vendor/go.uber.org/atomic/LICENSE.txt deleted file mode 100644 index 8765c9fbc6..0000000000 --- a/vendor/go.uber.org/atomic/LICENSE.txt +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2016 Uber Technologies, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/go.uber.org/atomic/Makefile b/vendor/go.uber.org/atomic/Makefile deleted file mode 100644 index 46c945b32b..0000000000 --- a/vendor/go.uber.org/atomic/Makefile +++ /dev/null @@ -1,79 +0,0 @@ -# Directory to place `go install`ed binaries into. -export GOBIN ?= $(shell pwd)/bin - -GOLINT = $(GOBIN)/golint -GEN_ATOMICINT = $(GOBIN)/gen-atomicint -GEN_ATOMICWRAPPER = $(GOBIN)/gen-atomicwrapper -STATICCHECK = $(GOBIN)/staticcheck - -GO_FILES ?= $(shell find . '(' -path .git -o -path vendor ')' -prune -o -name '*.go' -print) - -# Also update ignore section in .codecov.yml. -COVER_IGNORE_PKGS = \ - go.uber.org/atomic/internal/gen-atomicint \ - go.uber.org/atomic/internal/gen-atomicwrapper - -.PHONY: build -build: - go build ./... - -.PHONY: test -test: - go test -race ./... - -.PHONY: gofmt -gofmt: - $(eval FMT_LOG := $(shell mktemp -t gofmt.XXXXX)) - gofmt -e -s -l $(GO_FILES) > $(FMT_LOG) || true - @[ ! -s "$(FMT_LOG)" ] || (echo "gofmt failed:" && cat $(FMT_LOG) && false) - -$(GOLINT): - cd tools && go install golang.org/x/lint/golint - -$(STATICCHECK): - cd tools && go install honnef.co/go/tools/cmd/staticcheck - -$(GEN_ATOMICWRAPPER): $(wildcard ./internal/gen-atomicwrapper/*) - go build -o $@ ./internal/gen-atomicwrapper - -$(GEN_ATOMICINT): $(wildcard ./internal/gen-atomicint/*) - go build -o $@ ./internal/gen-atomicint - -.PHONY: golint -golint: $(GOLINT) - $(GOLINT) ./... - -.PHONY: staticcheck -staticcheck: $(STATICCHECK) - $(STATICCHECK) ./... - -.PHONY: lint -lint: gofmt golint staticcheck generatenodirty - -# comma separated list of packages to consider for code coverage. -COVER_PKG = $(shell \ - go list -find ./... | \ - grep -v $(foreach pkg,$(COVER_IGNORE_PKGS),-e "^$(pkg)$$") | \ - paste -sd, -) - -.PHONY: cover -cover: - go test -coverprofile=cover.out -coverpkg $(COVER_PKG) -v ./... - go tool cover -html=cover.out -o cover.html - -.PHONY: generate -generate: $(GEN_ATOMICINT) $(GEN_ATOMICWRAPPER) - go generate ./... - -.PHONY: generatenodirty -generatenodirty: - @[ -z "$$(git status --porcelain)" ] || ( \ - echo "Working tree is dirty. Commit your changes first."; \ - git status; \ - exit 1 ) - @make generate - @status=$$(git status --porcelain); \ - [ -z "$$status" ] || ( \ - echo "Working tree is dirty after `make generate`:"; \ - echo "$$status"; \ - echo "Please ensure that the generated code is up-to-date." ) diff --git a/vendor/go.uber.org/atomic/README.md b/vendor/go.uber.org/atomic/README.md deleted file mode 100644 index 96b47a1f12..0000000000 --- a/vendor/go.uber.org/atomic/README.md +++ /dev/null @@ -1,63 +0,0 @@ -# atomic [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] [![Go Report Card][reportcard-img]][reportcard] - -Simple wrappers for primitive types to enforce atomic access. - -## Installation - -```shell -$ go get -u go.uber.org/atomic@v1 -``` - -### Legacy Import Path - -As of v1.5.0, the import path `go.uber.org/atomic` is the only supported way -of using this package. If you are using Go modules, this package will fail to -compile with the legacy import path path `github.com/uber-go/atomic`. - -We recommend migrating your code to the new import path but if you're unable -to do so, or if your dependencies are still using the old import path, you -will have to add a `replace` directive to your `go.mod` file downgrading the -legacy import path to an older version. - -``` -replace github.com/uber-go/atomic => github.com/uber-go/atomic v1.4.0 -``` - -You can do so automatically by running the following command. - -```shell -$ go mod edit -replace github.com/uber-go/atomic=github.com/uber-go/atomic@v1.4.0 -``` - -## Usage - -The standard library's `sync/atomic` is powerful, but it's easy to forget which -variables must be accessed atomically. `go.uber.org/atomic` preserves all the -functionality of the standard library, but wraps the primitive types to -provide a safer, more convenient API. - -```go -var atom atomic.Uint32 -atom.Store(42) -atom.Sub(2) -atom.CAS(40, 11) -``` - -See the [documentation][doc] for a complete API specification. - -## Development Status - -Stable. - ---- - -Released under the [MIT License](LICENSE.txt). - -[doc-img]: https://godoc.org/github.com/uber-go/atomic?status.svg -[doc]: https://godoc.org/go.uber.org/atomic -[ci-img]: https://github.com/uber-go/atomic/actions/workflows/go.yml/badge.svg -[ci]: https://github.com/uber-go/atomic/actions/workflows/go.yml -[cov-img]: https://codecov.io/gh/uber-go/atomic/branch/master/graph/badge.svg -[cov]: https://codecov.io/gh/uber-go/atomic -[reportcard-img]: https://goreportcard.com/badge/go.uber.org/atomic -[reportcard]: https://goreportcard.com/report/go.uber.org/atomic diff --git a/vendor/go.uber.org/atomic/bool.go b/vendor/go.uber.org/atomic/bool.go deleted file mode 100644 index f0a2ddd148..0000000000 --- a/vendor/go.uber.org/atomic/bool.go +++ /dev/null @@ -1,88 +0,0 @@ -// @generated Code generated by gen-atomicwrapper. - -// Copyright (c) 2020-2023 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import ( - "encoding/json" -) - -// Bool is an atomic type-safe wrapper for bool values. -type Bool struct { - _ nocmp // disallow non-atomic comparison - - v Uint32 -} - -var _zeroBool bool - -// NewBool creates a new Bool. -func NewBool(val bool) *Bool { - x := &Bool{} - if val != _zeroBool { - x.Store(val) - } - return x -} - -// Load atomically loads the wrapped bool. -func (x *Bool) Load() bool { - return truthy(x.v.Load()) -} - -// Store atomically stores the passed bool. -func (x *Bool) Store(val bool) { - x.v.Store(boolToInt(val)) -} - -// CAS is an atomic compare-and-swap for bool values. -// -// Deprecated: Use CompareAndSwap. -func (x *Bool) CAS(old, new bool) (swapped bool) { - return x.CompareAndSwap(old, new) -} - -// CompareAndSwap is an atomic compare-and-swap for bool values. -func (x *Bool) CompareAndSwap(old, new bool) (swapped bool) { - return x.v.CompareAndSwap(boolToInt(old), boolToInt(new)) -} - -// Swap atomically stores the given bool and returns the old -// value. -func (x *Bool) Swap(val bool) (old bool) { - return truthy(x.v.Swap(boolToInt(val))) -} - -// MarshalJSON encodes the wrapped bool into JSON. -func (x *Bool) MarshalJSON() ([]byte, error) { - return json.Marshal(x.Load()) -} - -// UnmarshalJSON decodes a bool from JSON. -func (x *Bool) UnmarshalJSON(b []byte) error { - var v bool - if err := json.Unmarshal(b, &v); err != nil { - return err - } - x.Store(v) - return nil -} diff --git a/vendor/go.uber.org/atomic/bool_ext.go b/vendor/go.uber.org/atomic/bool_ext.go deleted file mode 100644 index a2e60e9873..0000000000 --- a/vendor/go.uber.org/atomic/bool_ext.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import ( - "strconv" -) - -//go:generate bin/gen-atomicwrapper -name=Bool -type=bool -wrapped=Uint32 -pack=boolToInt -unpack=truthy -cas -swap -json -file=bool.go - -func truthy(n uint32) bool { - return n == 1 -} - -func boolToInt(b bool) uint32 { - if b { - return 1 - } - return 0 -} - -// Toggle atomically negates the Boolean and returns the previous value. -func (b *Bool) Toggle() (old bool) { - for { - old := b.Load() - if b.CAS(old, !old) { - return old - } - } -} - -// String encodes the wrapped value as a string. -func (b *Bool) String() string { - return strconv.FormatBool(b.Load()) -} diff --git a/vendor/go.uber.org/atomic/doc.go b/vendor/go.uber.org/atomic/doc.go deleted file mode 100644 index ae7390ee68..0000000000 --- a/vendor/go.uber.org/atomic/doc.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Package atomic provides simple wrappers around numerics to enforce atomic -// access. -package atomic diff --git a/vendor/go.uber.org/atomic/duration.go b/vendor/go.uber.org/atomic/duration.go deleted file mode 100644 index 7c23868fc8..0000000000 --- a/vendor/go.uber.org/atomic/duration.go +++ /dev/null @@ -1,89 +0,0 @@ -// @generated Code generated by gen-atomicwrapper. - -// Copyright (c) 2020-2023 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import ( - "encoding/json" - "time" -) - -// Duration is an atomic type-safe wrapper for time.Duration values. -type Duration struct { - _ nocmp // disallow non-atomic comparison - - v Int64 -} - -var _zeroDuration time.Duration - -// NewDuration creates a new Duration. -func NewDuration(val time.Duration) *Duration { - x := &Duration{} - if val != _zeroDuration { - x.Store(val) - } - return x -} - -// Load atomically loads the wrapped time.Duration. -func (x *Duration) Load() time.Duration { - return time.Duration(x.v.Load()) -} - -// Store atomically stores the passed time.Duration. -func (x *Duration) Store(val time.Duration) { - x.v.Store(int64(val)) -} - -// CAS is an atomic compare-and-swap for time.Duration values. -// -// Deprecated: Use CompareAndSwap. -func (x *Duration) CAS(old, new time.Duration) (swapped bool) { - return x.CompareAndSwap(old, new) -} - -// CompareAndSwap is an atomic compare-and-swap for time.Duration values. -func (x *Duration) CompareAndSwap(old, new time.Duration) (swapped bool) { - return x.v.CompareAndSwap(int64(old), int64(new)) -} - -// Swap atomically stores the given time.Duration and returns the old -// value. -func (x *Duration) Swap(val time.Duration) (old time.Duration) { - return time.Duration(x.v.Swap(int64(val))) -} - -// MarshalJSON encodes the wrapped time.Duration into JSON. -func (x *Duration) MarshalJSON() ([]byte, error) { - return json.Marshal(x.Load()) -} - -// UnmarshalJSON decodes a time.Duration from JSON. -func (x *Duration) UnmarshalJSON(b []byte) error { - var v time.Duration - if err := json.Unmarshal(b, &v); err != nil { - return err - } - x.Store(v) - return nil -} diff --git a/vendor/go.uber.org/atomic/duration_ext.go b/vendor/go.uber.org/atomic/duration_ext.go deleted file mode 100644 index 4c18b0a9ed..0000000000 --- a/vendor/go.uber.org/atomic/duration_ext.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import "time" - -//go:generate bin/gen-atomicwrapper -name=Duration -type=time.Duration -wrapped=Int64 -pack=int64 -unpack=time.Duration -cas -swap -json -imports time -file=duration.go - -// Add atomically adds to the wrapped time.Duration and returns the new value. -func (d *Duration) Add(delta time.Duration) time.Duration { - return time.Duration(d.v.Add(int64(delta))) -} - -// Sub atomically subtracts from the wrapped time.Duration and returns the new value. -func (d *Duration) Sub(delta time.Duration) time.Duration { - return time.Duration(d.v.Sub(int64(delta))) -} - -// String encodes the wrapped value as a string. -func (d *Duration) String() string { - return d.Load().String() -} diff --git a/vendor/go.uber.org/atomic/error.go b/vendor/go.uber.org/atomic/error.go deleted file mode 100644 index b7e3f1291a..0000000000 --- a/vendor/go.uber.org/atomic/error.go +++ /dev/null @@ -1,72 +0,0 @@ -// @generated Code generated by gen-atomicwrapper. - -// Copyright (c) 2020-2023 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -// Error is an atomic type-safe wrapper for error values. -type Error struct { - _ nocmp // disallow non-atomic comparison - - v Value -} - -var _zeroError error - -// NewError creates a new Error. -func NewError(val error) *Error { - x := &Error{} - if val != _zeroError { - x.Store(val) - } - return x -} - -// Load atomically loads the wrapped error. -func (x *Error) Load() error { - return unpackError(x.v.Load()) -} - -// Store atomically stores the passed error. -func (x *Error) Store(val error) { - x.v.Store(packError(val)) -} - -// CompareAndSwap is an atomic compare-and-swap for error values. -func (x *Error) CompareAndSwap(old, new error) (swapped bool) { - if x.v.CompareAndSwap(packError(old), packError(new)) { - return true - } - - if old == _zeroError { - // If the old value is the empty value, then it's possible the - // underlying Value hasn't been set and is nil, so retry with nil. - return x.v.CompareAndSwap(nil, packError(new)) - } - - return false -} - -// Swap atomically stores the given error and returns the old -// value. -func (x *Error) Swap(val error) (old error) { - return unpackError(x.v.Swap(packError(val))) -} diff --git a/vendor/go.uber.org/atomic/error_ext.go b/vendor/go.uber.org/atomic/error_ext.go deleted file mode 100644 index d31fb633bb..0000000000 --- a/vendor/go.uber.org/atomic/error_ext.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright (c) 2020-2022 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -// atomic.Value panics on nil inputs, or if the underlying type changes. -// Stabilize by always storing a custom struct that we control. - -//go:generate bin/gen-atomicwrapper -name=Error -type=error -wrapped=Value -pack=packError -unpack=unpackError -compareandswap -swap -file=error.go - -type packedError struct{ Value error } - -func packError(v error) interface{} { - return packedError{v} -} - -func unpackError(v interface{}) error { - if err, ok := v.(packedError); ok { - return err.Value - } - return nil -} diff --git a/vendor/go.uber.org/atomic/float32.go b/vendor/go.uber.org/atomic/float32.go deleted file mode 100644 index 62c36334fd..0000000000 --- a/vendor/go.uber.org/atomic/float32.go +++ /dev/null @@ -1,77 +0,0 @@ -// @generated Code generated by gen-atomicwrapper. - -// Copyright (c) 2020-2023 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import ( - "encoding/json" - "math" -) - -// Float32 is an atomic type-safe wrapper for float32 values. -type Float32 struct { - _ nocmp // disallow non-atomic comparison - - v Uint32 -} - -var _zeroFloat32 float32 - -// NewFloat32 creates a new Float32. -func NewFloat32(val float32) *Float32 { - x := &Float32{} - if val != _zeroFloat32 { - x.Store(val) - } - return x -} - -// Load atomically loads the wrapped float32. -func (x *Float32) Load() float32 { - return math.Float32frombits(x.v.Load()) -} - -// Store atomically stores the passed float32. -func (x *Float32) Store(val float32) { - x.v.Store(math.Float32bits(val)) -} - -// Swap atomically stores the given float32 and returns the old -// value. -func (x *Float32) Swap(val float32) (old float32) { - return math.Float32frombits(x.v.Swap(math.Float32bits(val))) -} - -// MarshalJSON encodes the wrapped float32 into JSON. -func (x *Float32) MarshalJSON() ([]byte, error) { - return json.Marshal(x.Load()) -} - -// UnmarshalJSON decodes a float32 from JSON. -func (x *Float32) UnmarshalJSON(b []byte) error { - var v float32 - if err := json.Unmarshal(b, &v); err != nil { - return err - } - x.Store(v) - return nil -} diff --git a/vendor/go.uber.org/atomic/float32_ext.go b/vendor/go.uber.org/atomic/float32_ext.go deleted file mode 100644 index b0cd8d9c82..0000000000 --- a/vendor/go.uber.org/atomic/float32_ext.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright (c) 2020-2022 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import ( - "math" - "strconv" -) - -//go:generate bin/gen-atomicwrapper -name=Float32 -type=float32 -wrapped=Uint32 -pack=math.Float32bits -unpack=math.Float32frombits -swap -json -imports math -file=float32.go - -// Add atomically adds to the wrapped float32 and returns the new value. -func (f *Float32) Add(delta float32) float32 { - for { - old := f.Load() - new := old + delta - if f.CAS(old, new) { - return new - } - } -} - -// Sub atomically subtracts from the wrapped float32 and returns the new value. -func (f *Float32) Sub(delta float32) float32 { - return f.Add(-delta) -} - -// CAS is an atomic compare-and-swap for float32 values. -// -// Deprecated: Use CompareAndSwap -func (f *Float32) CAS(old, new float32) (swapped bool) { - return f.CompareAndSwap(old, new) -} - -// CompareAndSwap is an atomic compare-and-swap for float32 values. -// -// Note: CompareAndSwap handles NaN incorrectly. NaN != NaN using Go's inbuilt operators -// but CompareAndSwap allows a stored NaN to compare equal to a passed in NaN. -// This avoids typical CompareAndSwap loops from blocking forever, e.g., -// -// for { -// old := atom.Load() -// new = f(old) -// if atom.CompareAndSwap(old, new) { -// break -// } -// } -// -// If CompareAndSwap did not match NaN to match, then the above would loop forever. -func (f *Float32) CompareAndSwap(old, new float32) (swapped bool) { - return f.v.CompareAndSwap(math.Float32bits(old), math.Float32bits(new)) -} - -// String encodes the wrapped value as a string. -func (f *Float32) String() string { - // 'g' is the behavior for floats with %v. - return strconv.FormatFloat(float64(f.Load()), 'g', -1, 32) -} diff --git a/vendor/go.uber.org/atomic/float64.go b/vendor/go.uber.org/atomic/float64.go deleted file mode 100644 index 5bc11caabe..0000000000 --- a/vendor/go.uber.org/atomic/float64.go +++ /dev/null @@ -1,77 +0,0 @@ -// @generated Code generated by gen-atomicwrapper. - -// Copyright (c) 2020-2023 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import ( - "encoding/json" - "math" -) - -// Float64 is an atomic type-safe wrapper for float64 values. -type Float64 struct { - _ nocmp // disallow non-atomic comparison - - v Uint64 -} - -var _zeroFloat64 float64 - -// NewFloat64 creates a new Float64. -func NewFloat64(val float64) *Float64 { - x := &Float64{} - if val != _zeroFloat64 { - x.Store(val) - } - return x -} - -// Load atomically loads the wrapped float64. -func (x *Float64) Load() float64 { - return math.Float64frombits(x.v.Load()) -} - -// Store atomically stores the passed float64. -func (x *Float64) Store(val float64) { - x.v.Store(math.Float64bits(val)) -} - -// Swap atomically stores the given float64 and returns the old -// value. -func (x *Float64) Swap(val float64) (old float64) { - return math.Float64frombits(x.v.Swap(math.Float64bits(val))) -} - -// MarshalJSON encodes the wrapped float64 into JSON. -func (x *Float64) MarshalJSON() ([]byte, error) { - return json.Marshal(x.Load()) -} - -// UnmarshalJSON decodes a float64 from JSON. -func (x *Float64) UnmarshalJSON(b []byte) error { - var v float64 - if err := json.Unmarshal(b, &v); err != nil { - return err - } - x.Store(v) - return nil -} diff --git a/vendor/go.uber.org/atomic/float64_ext.go b/vendor/go.uber.org/atomic/float64_ext.go deleted file mode 100644 index 48c52b0abf..0000000000 --- a/vendor/go.uber.org/atomic/float64_ext.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright (c) 2020-2022 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import ( - "math" - "strconv" -) - -//go:generate bin/gen-atomicwrapper -name=Float64 -type=float64 -wrapped=Uint64 -pack=math.Float64bits -unpack=math.Float64frombits -swap -json -imports math -file=float64.go - -// Add atomically adds to the wrapped float64 and returns the new value. -func (f *Float64) Add(delta float64) float64 { - for { - old := f.Load() - new := old + delta - if f.CAS(old, new) { - return new - } - } -} - -// Sub atomically subtracts from the wrapped float64 and returns the new value. -func (f *Float64) Sub(delta float64) float64 { - return f.Add(-delta) -} - -// CAS is an atomic compare-and-swap for float64 values. -// -// Deprecated: Use CompareAndSwap -func (f *Float64) CAS(old, new float64) (swapped bool) { - return f.CompareAndSwap(old, new) -} - -// CompareAndSwap is an atomic compare-and-swap for float64 values. -// -// Note: CompareAndSwap handles NaN incorrectly. NaN != NaN using Go's inbuilt operators -// but CompareAndSwap allows a stored NaN to compare equal to a passed in NaN. -// This avoids typical CompareAndSwap loops from blocking forever, e.g., -// -// for { -// old := atom.Load() -// new = f(old) -// if atom.CompareAndSwap(old, new) { -// break -// } -// } -// -// If CompareAndSwap did not match NaN to match, then the above would loop forever. -func (f *Float64) CompareAndSwap(old, new float64) (swapped bool) { - return f.v.CompareAndSwap(math.Float64bits(old), math.Float64bits(new)) -} - -// String encodes the wrapped value as a string. -func (f *Float64) String() string { - // 'g' is the behavior for floats with %v. - return strconv.FormatFloat(f.Load(), 'g', -1, 64) -} diff --git a/vendor/go.uber.org/atomic/gen.go b/vendor/go.uber.org/atomic/gen.go deleted file mode 100644 index 1e9ef4f879..0000000000 --- a/vendor/go.uber.org/atomic/gen.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -//go:generate bin/gen-atomicint -name=Int32 -wrapped=int32 -file=int32.go -//go:generate bin/gen-atomicint -name=Int64 -wrapped=int64 -file=int64.go -//go:generate bin/gen-atomicint -name=Uint32 -wrapped=uint32 -unsigned -file=uint32.go -//go:generate bin/gen-atomicint -name=Uint64 -wrapped=uint64 -unsigned -file=uint64.go -//go:generate bin/gen-atomicint -name=Uintptr -wrapped=uintptr -unsigned -file=uintptr.go diff --git a/vendor/go.uber.org/atomic/int32.go b/vendor/go.uber.org/atomic/int32.go deleted file mode 100644 index 5320eac10f..0000000000 --- a/vendor/go.uber.org/atomic/int32.go +++ /dev/null @@ -1,109 +0,0 @@ -// @generated Code generated by gen-atomicint. - -// Copyright (c) 2020-2023 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import ( - "encoding/json" - "strconv" - "sync/atomic" -) - -// Int32 is an atomic wrapper around int32. -type Int32 struct { - _ nocmp // disallow non-atomic comparison - - v int32 -} - -// NewInt32 creates a new Int32. -func NewInt32(val int32) *Int32 { - return &Int32{v: val} -} - -// Load atomically loads the wrapped value. -func (i *Int32) Load() int32 { - return atomic.LoadInt32(&i.v) -} - -// Add atomically adds to the wrapped int32 and returns the new value. -func (i *Int32) Add(delta int32) int32 { - return atomic.AddInt32(&i.v, delta) -} - -// Sub atomically subtracts from the wrapped int32 and returns the new value. -func (i *Int32) Sub(delta int32) int32 { - return atomic.AddInt32(&i.v, -delta) -} - -// Inc atomically increments the wrapped int32 and returns the new value. -func (i *Int32) Inc() int32 { - return i.Add(1) -} - -// Dec atomically decrements the wrapped int32 and returns the new value. -func (i *Int32) Dec() int32 { - return i.Sub(1) -} - -// CAS is an atomic compare-and-swap. -// -// Deprecated: Use CompareAndSwap. -func (i *Int32) CAS(old, new int32) (swapped bool) { - return i.CompareAndSwap(old, new) -} - -// CompareAndSwap is an atomic compare-and-swap. -func (i *Int32) CompareAndSwap(old, new int32) (swapped bool) { - return atomic.CompareAndSwapInt32(&i.v, old, new) -} - -// Store atomically stores the passed value. -func (i *Int32) Store(val int32) { - atomic.StoreInt32(&i.v, val) -} - -// Swap atomically swaps the wrapped int32 and returns the old value. -func (i *Int32) Swap(val int32) (old int32) { - return atomic.SwapInt32(&i.v, val) -} - -// MarshalJSON encodes the wrapped int32 into JSON. -func (i *Int32) MarshalJSON() ([]byte, error) { - return json.Marshal(i.Load()) -} - -// UnmarshalJSON decodes JSON into the wrapped int32. -func (i *Int32) UnmarshalJSON(b []byte) error { - var v int32 - if err := json.Unmarshal(b, &v); err != nil { - return err - } - i.Store(v) - return nil -} - -// String encodes the wrapped value as a string. -func (i *Int32) String() string { - v := i.Load() - return strconv.FormatInt(int64(v), 10) -} diff --git a/vendor/go.uber.org/atomic/int64.go b/vendor/go.uber.org/atomic/int64.go deleted file mode 100644 index 460821d009..0000000000 --- a/vendor/go.uber.org/atomic/int64.go +++ /dev/null @@ -1,109 +0,0 @@ -// @generated Code generated by gen-atomicint. - -// Copyright (c) 2020-2023 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import ( - "encoding/json" - "strconv" - "sync/atomic" -) - -// Int64 is an atomic wrapper around int64. -type Int64 struct { - _ nocmp // disallow non-atomic comparison - - v int64 -} - -// NewInt64 creates a new Int64. -func NewInt64(val int64) *Int64 { - return &Int64{v: val} -} - -// Load atomically loads the wrapped value. -func (i *Int64) Load() int64 { - return atomic.LoadInt64(&i.v) -} - -// Add atomically adds to the wrapped int64 and returns the new value. -func (i *Int64) Add(delta int64) int64 { - return atomic.AddInt64(&i.v, delta) -} - -// Sub atomically subtracts from the wrapped int64 and returns the new value. -func (i *Int64) Sub(delta int64) int64 { - return atomic.AddInt64(&i.v, -delta) -} - -// Inc atomically increments the wrapped int64 and returns the new value. -func (i *Int64) Inc() int64 { - return i.Add(1) -} - -// Dec atomically decrements the wrapped int64 and returns the new value. -func (i *Int64) Dec() int64 { - return i.Sub(1) -} - -// CAS is an atomic compare-and-swap. -// -// Deprecated: Use CompareAndSwap. -func (i *Int64) CAS(old, new int64) (swapped bool) { - return i.CompareAndSwap(old, new) -} - -// CompareAndSwap is an atomic compare-and-swap. -func (i *Int64) CompareAndSwap(old, new int64) (swapped bool) { - return atomic.CompareAndSwapInt64(&i.v, old, new) -} - -// Store atomically stores the passed value. -func (i *Int64) Store(val int64) { - atomic.StoreInt64(&i.v, val) -} - -// Swap atomically swaps the wrapped int64 and returns the old value. -func (i *Int64) Swap(val int64) (old int64) { - return atomic.SwapInt64(&i.v, val) -} - -// MarshalJSON encodes the wrapped int64 into JSON. -func (i *Int64) MarshalJSON() ([]byte, error) { - return json.Marshal(i.Load()) -} - -// UnmarshalJSON decodes JSON into the wrapped int64. -func (i *Int64) UnmarshalJSON(b []byte) error { - var v int64 - if err := json.Unmarshal(b, &v); err != nil { - return err - } - i.Store(v) - return nil -} - -// String encodes the wrapped value as a string. -func (i *Int64) String() string { - v := i.Load() - return strconv.FormatInt(int64(v), 10) -} diff --git a/vendor/go.uber.org/atomic/nocmp.go b/vendor/go.uber.org/atomic/nocmp.go deleted file mode 100644 index 54b74174ab..0000000000 --- a/vendor/go.uber.org/atomic/nocmp.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -// nocmp is an uncomparable struct. Embed this inside another struct to make -// it uncomparable. -// -// type Foo struct { -// nocmp -// // ... -// } -// -// This DOES NOT: -// -// - Disallow shallow copies of structs -// - Disallow comparison of pointers to uncomparable structs -type nocmp [0]func() diff --git a/vendor/go.uber.org/atomic/pointer_go118.go b/vendor/go.uber.org/atomic/pointer_go118.go deleted file mode 100644 index 1fb6c03b26..0000000000 --- a/vendor/go.uber.org/atomic/pointer_go118.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (c) 2022 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -//go:build go1.18 -// +build go1.18 - -package atomic - -import "fmt" - -// String returns a human readable representation of a Pointer's underlying value. -func (p *Pointer[T]) String() string { - return fmt.Sprint(p.Load()) -} diff --git a/vendor/go.uber.org/atomic/pointer_go118_pre119.go b/vendor/go.uber.org/atomic/pointer_go118_pre119.go deleted file mode 100644 index e0f47dba46..0000000000 --- a/vendor/go.uber.org/atomic/pointer_go118_pre119.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright (c) 2022 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -//go:build go1.18 && !go1.19 -// +build go1.18,!go1.19 - -package atomic - -import "unsafe" - -type Pointer[T any] struct { - _ nocmp // disallow non-atomic comparison - p UnsafePointer -} - -// NewPointer creates a new Pointer. -func NewPointer[T any](v *T) *Pointer[T] { - var p Pointer[T] - if v != nil { - p.p.Store(unsafe.Pointer(v)) - } - return &p -} - -// Load atomically loads the wrapped value. -func (p *Pointer[T]) Load() *T { - return (*T)(p.p.Load()) -} - -// Store atomically stores the passed value. -func (p *Pointer[T]) Store(val *T) { - p.p.Store(unsafe.Pointer(val)) -} - -// Swap atomically swaps the wrapped pointer and returns the old value. -func (p *Pointer[T]) Swap(val *T) (old *T) { - return (*T)(p.p.Swap(unsafe.Pointer(val))) -} - -// CompareAndSwap is an atomic compare-and-swap. -func (p *Pointer[T]) CompareAndSwap(old, new *T) (swapped bool) { - return p.p.CompareAndSwap(unsafe.Pointer(old), unsafe.Pointer(new)) -} diff --git a/vendor/go.uber.org/atomic/pointer_go119.go b/vendor/go.uber.org/atomic/pointer_go119.go deleted file mode 100644 index 6726f17ad6..0000000000 --- a/vendor/go.uber.org/atomic/pointer_go119.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright (c) 2022 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -//go:build go1.19 -// +build go1.19 - -package atomic - -import "sync/atomic" - -// Pointer is an atomic pointer of type *T. -type Pointer[T any] struct { - _ nocmp // disallow non-atomic comparison - p atomic.Pointer[T] -} - -// NewPointer creates a new Pointer. -func NewPointer[T any](v *T) *Pointer[T] { - var p Pointer[T] - if v != nil { - p.p.Store(v) - } - return &p -} - -// Load atomically loads the wrapped value. -func (p *Pointer[T]) Load() *T { - return p.p.Load() -} - -// Store atomically stores the passed value. -func (p *Pointer[T]) Store(val *T) { - p.p.Store(val) -} - -// Swap atomically swaps the wrapped pointer and returns the old value. -func (p *Pointer[T]) Swap(val *T) (old *T) { - return p.p.Swap(val) -} - -// CompareAndSwap is an atomic compare-and-swap. -func (p *Pointer[T]) CompareAndSwap(old, new *T) (swapped bool) { - return p.p.CompareAndSwap(old, new) -} diff --git a/vendor/go.uber.org/atomic/string.go b/vendor/go.uber.org/atomic/string.go deleted file mode 100644 index 061466c5bd..0000000000 --- a/vendor/go.uber.org/atomic/string.go +++ /dev/null @@ -1,72 +0,0 @@ -// @generated Code generated by gen-atomicwrapper. - -// Copyright (c) 2020-2023 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -// String is an atomic type-safe wrapper for string values. -type String struct { - _ nocmp // disallow non-atomic comparison - - v Value -} - -var _zeroString string - -// NewString creates a new String. -func NewString(val string) *String { - x := &String{} - if val != _zeroString { - x.Store(val) - } - return x -} - -// Load atomically loads the wrapped string. -func (x *String) Load() string { - return unpackString(x.v.Load()) -} - -// Store atomically stores the passed string. -func (x *String) Store(val string) { - x.v.Store(packString(val)) -} - -// CompareAndSwap is an atomic compare-and-swap for string values. -func (x *String) CompareAndSwap(old, new string) (swapped bool) { - if x.v.CompareAndSwap(packString(old), packString(new)) { - return true - } - - if old == _zeroString { - // If the old value is the empty value, then it's possible the - // underlying Value hasn't been set and is nil, so retry with nil. - return x.v.CompareAndSwap(nil, packString(new)) - } - - return false -} - -// Swap atomically stores the given string and returns the old -// value. -func (x *String) Swap(val string) (old string) { - return unpackString(x.v.Swap(packString(val))) -} diff --git a/vendor/go.uber.org/atomic/string_ext.go b/vendor/go.uber.org/atomic/string_ext.go deleted file mode 100644 index 019109c86b..0000000000 --- a/vendor/go.uber.org/atomic/string_ext.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright (c) 2020-2023 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -//go:generate bin/gen-atomicwrapper -name=String -type=string -wrapped Value -pack packString -unpack unpackString -compareandswap -swap -file=string.go - -func packString(s string) interface{} { - return s -} - -func unpackString(v interface{}) string { - if s, ok := v.(string); ok { - return s - } - return "" -} - -// String returns the wrapped value. -func (s *String) String() string { - return s.Load() -} - -// MarshalText encodes the wrapped string into a textual form. -// -// This makes it encodable as JSON, YAML, XML, and more. -func (s *String) MarshalText() ([]byte, error) { - return []byte(s.Load()), nil -} - -// UnmarshalText decodes text and replaces the wrapped string with it. -// -// This makes it decodable from JSON, YAML, XML, and more. -func (s *String) UnmarshalText(b []byte) error { - s.Store(string(b)) - return nil -} diff --git a/vendor/go.uber.org/atomic/time.go b/vendor/go.uber.org/atomic/time.go deleted file mode 100644 index cc2a230c00..0000000000 --- a/vendor/go.uber.org/atomic/time.go +++ /dev/null @@ -1,55 +0,0 @@ -// @generated Code generated by gen-atomicwrapper. - -// Copyright (c) 2020-2023 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import ( - "time" -) - -// Time is an atomic type-safe wrapper for time.Time values. -type Time struct { - _ nocmp // disallow non-atomic comparison - - v Value -} - -var _zeroTime time.Time - -// NewTime creates a new Time. -func NewTime(val time.Time) *Time { - x := &Time{} - if val != _zeroTime { - x.Store(val) - } - return x -} - -// Load atomically loads the wrapped time.Time. -func (x *Time) Load() time.Time { - return unpackTime(x.v.Load()) -} - -// Store atomically stores the passed time.Time. -func (x *Time) Store(val time.Time) { - x.v.Store(packTime(val)) -} diff --git a/vendor/go.uber.org/atomic/time_ext.go b/vendor/go.uber.org/atomic/time_ext.go deleted file mode 100644 index 1e3dc978aa..0000000000 --- a/vendor/go.uber.org/atomic/time_ext.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright (c) 2021 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import "time" - -//go:generate bin/gen-atomicwrapper -name=Time -type=time.Time -wrapped=Value -pack=packTime -unpack=unpackTime -imports time -file=time.go - -func packTime(t time.Time) interface{} { - return t -} - -func unpackTime(v interface{}) time.Time { - if t, ok := v.(time.Time); ok { - return t - } - return time.Time{} -} diff --git a/vendor/go.uber.org/atomic/uint32.go b/vendor/go.uber.org/atomic/uint32.go deleted file mode 100644 index 4adc294ac2..0000000000 --- a/vendor/go.uber.org/atomic/uint32.go +++ /dev/null @@ -1,109 +0,0 @@ -// @generated Code generated by gen-atomicint. - -// Copyright (c) 2020-2023 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import ( - "encoding/json" - "strconv" - "sync/atomic" -) - -// Uint32 is an atomic wrapper around uint32. -type Uint32 struct { - _ nocmp // disallow non-atomic comparison - - v uint32 -} - -// NewUint32 creates a new Uint32. -func NewUint32(val uint32) *Uint32 { - return &Uint32{v: val} -} - -// Load atomically loads the wrapped value. -func (i *Uint32) Load() uint32 { - return atomic.LoadUint32(&i.v) -} - -// Add atomically adds to the wrapped uint32 and returns the new value. -func (i *Uint32) Add(delta uint32) uint32 { - return atomic.AddUint32(&i.v, delta) -} - -// Sub atomically subtracts from the wrapped uint32 and returns the new value. -func (i *Uint32) Sub(delta uint32) uint32 { - return atomic.AddUint32(&i.v, ^(delta - 1)) -} - -// Inc atomically increments the wrapped uint32 and returns the new value. -func (i *Uint32) Inc() uint32 { - return i.Add(1) -} - -// Dec atomically decrements the wrapped uint32 and returns the new value. -func (i *Uint32) Dec() uint32 { - return i.Sub(1) -} - -// CAS is an atomic compare-and-swap. -// -// Deprecated: Use CompareAndSwap. -func (i *Uint32) CAS(old, new uint32) (swapped bool) { - return i.CompareAndSwap(old, new) -} - -// CompareAndSwap is an atomic compare-and-swap. -func (i *Uint32) CompareAndSwap(old, new uint32) (swapped bool) { - return atomic.CompareAndSwapUint32(&i.v, old, new) -} - -// Store atomically stores the passed value. -func (i *Uint32) Store(val uint32) { - atomic.StoreUint32(&i.v, val) -} - -// Swap atomically swaps the wrapped uint32 and returns the old value. -func (i *Uint32) Swap(val uint32) (old uint32) { - return atomic.SwapUint32(&i.v, val) -} - -// MarshalJSON encodes the wrapped uint32 into JSON. -func (i *Uint32) MarshalJSON() ([]byte, error) { - return json.Marshal(i.Load()) -} - -// UnmarshalJSON decodes JSON into the wrapped uint32. -func (i *Uint32) UnmarshalJSON(b []byte) error { - var v uint32 - if err := json.Unmarshal(b, &v); err != nil { - return err - } - i.Store(v) - return nil -} - -// String encodes the wrapped value as a string. -func (i *Uint32) String() string { - v := i.Load() - return strconv.FormatUint(uint64(v), 10) -} diff --git a/vendor/go.uber.org/atomic/uint64.go b/vendor/go.uber.org/atomic/uint64.go deleted file mode 100644 index 0e2eddb303..0000000000 --- a/vendor/go.uber.org/atomic/uint64.go +++ /dev/null @@ -1,109 +0,0 @@ -// @generated Code generated by gen-atomicint. - -// Copyright (c) 2020-2023 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import ( - "encoding/json" - "strconv" - "sync/atomic" -) - -// Uint64 is an atomic wrapper around uint64. -type Uint64 struct { - _ nocmp // disallow non-atomic comparison - - v uint64 -} - -// NewUint64 creates a new Uint64. -func NewUint64(val uint64) *Uint64 { - return &Uint64{v: val} -} - -// Load atomically loads the wrapped value. -func (i *Uint64) Load() uint64 { - return atomic.LoadUint64(&i.v) -} - -// Add atomically adds to the wrapped uint64 and returns the new value. -func (i *Uint64) Add(delta uint64) uint64 { - return atomic.AddUint64(&i.v, delta) -} - -// Sub atomically subtracts from the wrapped uint64 and returns the new value. -func (i *Uint64) Sub(delta uint64) uint64 { - return atomic.AddUint64(&i.v, ^(delta - 1)) -} - -// Inc atomically increments the wrapped uint64 and returns the new value. -func (i *Uint64) Inc() uint64 { - return i.Add(1) -} - -// Dec atomically decrements the wrapped uint64 and returns the new value. -func (i *Uint64) Dec() uint64 { - return i.Sub(1) -} - -// CAS is an atomic compare-and-swap. -// -// Deprecated: Use CompareAndSwap. -func (i *Uint64) CAS(old, new uint64) (swapped bool) { - return i.CompareAndSwap(old, new) -} - -// CompareAndSwap is an atomic compare-and-swap. -func (i *Uint64) CompareAndSwap(old, new uint64) (swapped bool) { - return atomic.CompareAndSwapUint64(&i.v, old, new) -} - -// Store atomically stores the passed value. -func (i *Uint64) Store(val uint64) { - atomic.StoreUint64(&i.v, val) -} - -// Swap atomically swaps the wrapped uint64 and returns the old value. -func (i *Uint64) Swap(val uint64) (old uint64) { - return atomic.SwapUint64(&i.v, val) -} - -// MarshalJSON encodes the wrapped uint64 into JSON. -func (i *Uint64) MarshalJSON() ([]byte, error) { - return json.Marshal(i.Load()) -} - -// UnmarshalJSON decodes JSON into the wrapped uint64. -func (i *Uint64) UnmarshalJSON(b []byte) error { - var v uint64 - if err := json.Unmarshal(b, &v); err != nil { - return err - } - i.Store(v) - return nil -} - -// String encodes the wrapped value as a string. -func (i *Uint64) String() string { - v := i.Load() - return strconv.FormatUint(uint64(v), 10) -} diff --git a/vendor/go.uber.org/atomic/uintptr.go b/vendor/go.uber.org/atomic/uintptr.go deleted file mode 100644 index 7d5b000d61..0000000000 --- a/vendor/go.uber.org/atomic/uintptr.go +++ /dev/null @@ -1,109 +0,0 @@ -// @generated Code generated by gen-atomicint. - -// Copyright (c) 2020-2023 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import ( - "encoding/json" - "strconv" - "sync/atomic" -) - -// Uintptr is an atomic wrapper around uintptr. -type Uintptr struct { - _ nocmp // disallow non-atomic comparison - - v uintptr -} - -// NewUintptr creates a new Uintptr. -func NewUintptr(val uintptr) *Uintptr { - return &Uintptr{v: val} -} - -// Load atomically loads the wrapped value. -func (i *Uintptr) Load() uintptr { - return atomic.LoadUintptr(&i.v) -} - -// Add atomically adds to the wrapped uintptr and returns the new value. -func (i *Uintptr) Add(delta uintptr) uintptr { - return atomic.AddUintptr(&i.v, delta) -} - -// Sub atomically subtracts from the wrapped uintptr and returns the new value. -func (i *Uintptr) Sub(delta uintptr) uintptr { - return atomic.AddUintptr(&i.v, ^(delta - 1)) -} - -// Inc atomically increments the wrapped uintptr and returns the new value. -func (i *Uintptr) Inc() uintptr { - return i.Add(1) -} - -// Dec atomically decrements the wrapped uintptr and returns the new value. -func (i *Uintptr) Dec() uintptr { - return i.Sub(1) -} - -// CAS is an atomic compare-and-swap. -// -// Deprecated: Use CompareAndSwap. -func (i *Uintptr) CAS(old, new uintptr) (swapped bool) { - return i.CompareAndSwap(old, new) -} - -// CompareAndSwap is an atomic compare-and-swap. -func (i *Uintptr) CompareAndSwap(old, new uintptr) (swapped bool) { - return atomic.CompareAndSwapUintptr(&i.v, old, new) -} - -// Store atomically stores the passed value. -func (i *Uintptr) Store(val uintptr) { - atomic.StoreUintptr(&i.v, val) -} - -// Swap atomically swaps the wrapped uintptr and returns the old value. -func (i *Uintptr) Swap(val uintptr) (old uintptr) { - return atomic.SwapUintptr(&i.v, val) -} - -// MarshalJSON encodes the wrapped uintptr into JSON. -func (i *Uintptr) MarshalJSON() ([]byte, error) { - return json.Marshal(i.Load()) -} - -// UnmarshalJSON decodes JSON into the wrapped uintptr. -func (i *Uintptr) UnmarshalJSON(b []byte) error { - var v uintptr - if err := json.Unmarshal(b, &v); err != nil { - return err - } - i.Store(v) - return nil -} - -// String encodes the wrapped value as a string. -func (i *Uintptr) String() string { - v := i.Load() - return strconv.FormatUint(uint64(v), 10) -} diff --git a/vendor/go.uber.org/atomic/unsafe_pointer.go b/vendor/go.uber.org/atomic/unsafe_pointer.go deleted file mode 100644 index 34868baf6a..0000000000 --- a/vendor/go.uber.org/atomic/unsafe_pointer.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright (c) 2021-2022 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import ( - "sync/atomic" - "unsafe" -) - -// UnsafePointer is an atomic wrapper around unsafe.Pointer. -type UnsafePointer struct { - _ nocmp // disallow non-atomic comparison - - v unsafe.Pointer -} - -// NewUnsafePointer creates a new UnsafePointer. -func NewUnsafePointer(val unsafe.Pointer) *UnsafePointer { - return &UnsafePointer{v: val} -} - -// Load atomically loads the wrapped value. -func (p *UnsafePointer) Load() unsafe.Pointer { - return atomic.LoadPointer(&p.v) -} - -// Store atomically stores the passed value. -func (p *UnsafePointer) Store(val unsafe.Pointer) { - atomic.StorePointer(&p.v, val) -} - -// Swap atomically swaps the wrapped unsafe.Pointer and returns the old value. -func (p *UnsafePointer) Swap(val unsafe.Pointer) (old unsafe.Pointer) { - return atomic.SwapPointer(&p.v, val) -} - -// CAS is an atomic compare-and-swap. -// -// Deprecated: Use CompareAndSwap -func (p *UnsafePointer) CAS(old, new unsafe.Pointer) (swapped bool) { - return p.CompareAndSwap(old, new) -} - -// CompareAndSwap is an atomic compare-and-swap. -func (p *UnsafePointer) CompareAndSwap(old, new unsafe.Pointer) (swapped bool) { - return atomic.CompareAndSwapPointer(&p.v, old, new) -} diff --git a/vendor/go.uber.org/atomic/value.go b/vendor/go.uber.org/atomic/value.go deleted file mode 100644 index 52caedb9a5..0000000000 --- a/vendor/go.uber.org/atomic/value.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import "sync/atomic" - -// Value shadows the type of the same name from sync/atomic -// https://godoc.org/sync/atomic#Value -type Value struct { - _ nocmp // disallow non-atomic comparison - - atomic.Value -} diff --git a/vendor/golang.org/x/crypto/ed25519/ed25519.go b/vendor/golang.org/x/crypto/ed25519/ed25519.go deleted file mode 100644 index df453dcce0..0000000000 --- a/vendor/golang.org/x/crypto/ed25519/ed25519.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package ed25519 implements the Ed25519 signature algorithm. -// -// These functions are also compatible with the “Ed25519” function defined in -// [RFC 8032]. However, unlike RFC 8032's formulation, this package's private key -// representation includes a public key suffix to make multiple signing -// operations with the same key more efficient. This package refers to the RFC -// 8032 private key as the “seed”. -// -// The ed25519 package is a wrapper for the Ed25519 implementation in the -// crypto/ed25519 package. It is [frozen] and is not accepting new features. -// -// [RFC 8032]: https://datatracker.ietf.org/doc/html/rfc8032 -// [frozen]: https://go.dev/wiki/Frozen -package ed25519 - -import ( - "crypto/ed25519" - "io" -) - -const ( - // PublicKeySize is the size, in bytes, of public keys as used in this package. - PublicKeySize = 32 - // PrivateKeySize is the size, in bytes, of private keys as used in this package. - PrivateKeySize = 64 - // SignatureSize is the size, in bytes, of signatures generated and verified by this package. - SignatureSize = 64 - // SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032. - SeedSize = 32 -) - -// PublicKey is the type of Ed25519 public keys. -// -// This type is an alias for crypto/ed25519's PublicKey type. -// See the crypto/ed25519 package for the methods on this type. -type PublicKey = ed25519.PublicKey - -// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer. -// -// This type is an alias for crypto/ed25519's PrivateKey type. -// See the crypto/ed25519 package for the methods on this type. -type PrivateKey = ed25519.PrivateKey - -// GenerateKey generates a public/private key pair using entropy from rand. -// If rand is nil, crypto/rand.Reader will be used. -func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) { - return ed25519.GenerateKey(rand) -} - -// NewKeyFromSeed calculates a private key from a seed. It will panic if -// len(seed) is not SeedSize. This function is provided for interoperability -// with RFC 8032. RFC 8032's private keys correspond to seeds in this -// package. -func NewKeyFromSeed(seed []byte) PrivateKey { - return ed25519.NewKeyFromSeed(seed) -} - -// Sign signs the message with privateKey and returns a signature. It will -// panic if len(privateKey) is not PrivateKeySize. -func Sign(privateKey PrivateKey, message []byte) []byte { - return ed25519.Sign(privateKey, message) -} - -// Verify reports whether sig is a valid signature of message by publicKey. It -// will panic if len(publicKey) is not PublicKeySize. -func Verify(publicKey PublicKey, message, sig []byte) bool { - return ed25519.Verify(publicKey, message, sig) -} diff --git a/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go deleted file mode 100644 index 28cd99c7f3..0000000000 --- a/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package pbkdf2 implements the key derivation function PBKDF2 as defined in RFC -2898 / PKCS #5 v2.0. - -A key derivation function is useful when encrypting data based on a password -or any other not-fully-random data. It uses a pseudorandom function to derive -a secure encryption key based on the password. - -While v2.0 of the standard defines only one pseudorandom function to use, -HMAC-SHA1, the drafted v2.1 specification allows use of all five FIPS Approved -Hash Functions SHA-1, SHA-224, SHA-256, SHA-384 and SHA-512 for HMAC. To -choose, you can pass the `New` functions from the different SHA packages to -pbkdf2.Key. -*/ -package pbkdf2 - -import ( - "crypto/hmac" - "hash" -) - -// Key derives a key from the password, salt and iteration count, returning a -// []byte of length keylen that can be used as cryptographic key. The key is -// derived based on the method described as PBKDF2 with the HMAC variant using -// the supplied hash function. -// -// For example, to use a HMAC-SHA-1 based PBKDF2 key derivation function, you -// can get a derived key for e.g. AES-256 (which needs a 32-byte key) by -// doing: -// -// dk := pbkdf2.Key([]byte("some password"), salt, 4096, 32, sha1.New) -// -// Remember to get a good random salt. At least 8 bytes is recommended by the -// RFC. -// -// Using a higher iteration count will increase the cost of an exhaustive -// search but will also make derivation proportionally slower. -func Key(password, salt []byte, iter, keyLen int, h func() hash.Hash) []byte { - prf := hmac.New(h, password) - hashLen := prf.Size() - numBlocks := (keyLen + hashLen - 1) / hashLen - - var buf [4]byte - dk := make([]byte, 0, numBlocks*hashLen) - U := make([]byte, hashLen) - for block := 1; block <= numBlocks; block++ { - // N.B.: || means concatenation, ^ means XOR - // for each block T_i = U_1 ^ U_2 ^ ... ^ U_iter - // U_1 = PRF(password, salt || uint(i)) - prf.Reset() - prf.Write(salt) - buf[0] = byte(block >> 24) - buf[1] = byte(block >> 16) - buf[2] = byte(block >> 8) - buf[3] = byte(block) - prf.Write(buf[:4]) - dk = prf.Sum(dk) - T := dk[len(dk)-hashLen:] - copy(U, T) - - // U_n = PRF(password, U_(n-1)) - for n := 2; n <= iter; n++ { - prf.Reset() - prf.Write(U) - U = U[:0] - U = prf.Sum(U) - for x := range U { - T[x] ^= U[x] - } - } - } - return dk[:keyLen] -} diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/.gitcookies.sh.enc b/vendor/gopkg.in/go-jose/go-jose.v2/.gitcookies.sh.enc deleted file mode 100644 index 730e569b06..0000000000 --- a/vendor/gopkg.in/go-jose/go-jose.v2/.gitcookies.sh.enc +++ /dev/null @@ -1 +0,0 @@ -'|&{tU|gG(Cy=+c:u:/p#~["4!nADK merged.coverprofile -- $HOME/gopath/bin/goveralls -coverprofile merged.coverprofile -service=travis-ci diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/CHANGELOG.md b/vendor/gopkg.in/go-jose/go-jose.v2/CHANGELOG.md deleted file mode 100644 index 8e6e913239..0000000000 --- a/vendor/gopkg.in/go-jose/go-jose.v2/CHANGELOG.md +++ /dev/null @@ -1,84 +0,0 @@ -# v4.0.1 - -## Fixed - - - An attacker could send a JWE containing compressed data that used large - amounts of memory and CPU when decompressed by `Decrypt` or `DecryptMulti`. - Those functions now return an error if the decompressed data would exceed - 250kB or 10x the compressed size (whichever is larger). Thanks to - Enze Wang@Alioth and Jianjun Chen@Zhongguancun Lab (@zer0yu and @chenjj) - for reporting. - -# v4.0.0 - -This release makes some breaking changes in order to more thoroughly -address the vulnerabilities discussed in [Three New Attacks Against JSON Web -Tokens][1], "Sign/encrypt confusion", "Billion hash attack", and "Polyglot -token". - -## Changed - - - Limit JWT encryption types (exclude password or public key types) (#78) - - Enforce minimum length for HMAC keys (#85) - - jwt: match any audience in a list, rather than requiring all audiences (#81) - - jwt: accept only Compact Serialization (#75) - - jws: Add expected algorithms for signatures (#74) - - Require specifying expected algorithms for ParseEncrypted, - ParseSigned, ParseDetached, jwt.ParseEncrypted, jwt.ParseSigned, - jwt.ParseSignedAndEncrypted (#69, #74) - - Usually there is a small, known set of appropriate algorithms for a program - to use and it's a mistake to allow unexpected algorithms. For instance the - "billion hash attack" relies in part on programs accepting the PBES2 - encryption algorithm and doing the necessary work even if they weren't - specifically configured to allow PBES2. - - Revert "Strip padding off base64 strings" (#82) - - The specs require base64url encoding without padding. - - Minimum supported Go version is now 1.21 - -## Added - - - ParseSignedCompact, ParseSignedJSON, ParseEncryptedCompact, ParseEncryptedJSON. - - These allow parsing a specific serialization, as opposed to ParseSigned and - ParseEncrypted, which try to automatically detect which serialization was - provided. It's common to require a specific serialization for a specific - protocol - for instance JWT requires Compact serialization. - -[1]: https://i.blackhat.com/BH-US-23/Presentations/US-23-Tervoort-Three-New-Attacks-Against-JSON-Web-Tokens.pdf - -# v3.0.3 - -## Fixed - - - Limit decompression output size to prevent a DoS. Backport from v4.0.1. - -# v3.0.2 - -## Fixed - - - DecryptMulti: handle decompression error (#19) - -## Changed - - - jwe/CompactSerialize: improve performance (#67) - - Increase the default number of PBKDF2 iterations to 600k (#48) - - Return the proper algorithm for ECDSA keys (#45) - -## Added - - - Add Thumbprint support for opaque signers (#38) - -# v3.0.1 - -## Fixed - - - Security issue: an attacker specifying a large "p2c" value can cause - JSONWebEncryption.Decrypt and JSONWebEncryption.DecryptMulti to consume large - amounts of CPU, causing a DoS. Thanks to Matt Schwager (@mschwager) for the - disclosure and to Tom Tervoort for originally publishing the category of attack. - https://i.blackhat.com/BH-US-23/Presentations/US-23-Tervoort-Three-New-Attacks-Against-JSON-Web-Tokens.pdf - -# v2.6.3 - -## Fixed - - - Limit decompression output size to prevent a DoS. Backport from v4.0.1. diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/CONTRIBUTING.md b/vendor/gopkg.in/go-jose/go-jose.v2/CONTRIBUTING.md deleted file mode 100644 index 61b183651c..0000000000 --- a/vendor/gopkg.in/go-jose/go-jose.v2/CONTRIBUTING.md +++ /dev/null @@ -1,14 +0,0 @@ -# Contributing - -If you would like to contribute code to go-jose you can do so through GitHub by -forking the repository and sending a pull request. - -When submitting code, please make every effort to follow existing conventions -and style in order to keep the code as readable as possible. Please also make -sure all tests pass by running `go test`, and format your code with `go fmt`. -We also recommend using `golint` and `errcheck`. - -Before your code can be accepted into the project you must also sign the -[Individual Contributor License Agreement][1]. - - [1]: https://spreadsheets.google.com/spreadsheet/viewform?formkey=dDViT2xzUHAwRkI3X3k5Z0lQM091OGc6MQ&ndplr=1 diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/LICENSE b/vendor/gopkg.in/go-jose/go-jose.v2/LICENSE deleted file mode 100644 index d645695673..0000000000 --- a/vendor/gopkg.in/go-jose/go-jose.v2/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/README.md b/vendor/gopkg.in/go-jose/go-jose.v2/README.md deleted file mode 100644 index b877f412c4..0000000000 --- a/vendor/gopkg.in/go-jose/go-jose.v2/README.md +++ /dev/null @@ -1,4 +0,0 @@ -# go-jose v2 - -Version 2 of this library is no longer supported. [Please use v4 -instead](https://pkg.go.dev/github.com/go-jose/go-jose/v4). diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/asymmetric.go b/vendor/gopkg.in/go-jose/go-jose.v2/asymmetric.go deleted file mode 100644 index 43f9ce2fc7..0000000000 --- a/vendor/gopkg.in/go-jose/go-jose.v2/asymmetric.go +++ /dev/null @@ -1,595 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package jose - -import ( - "crypto" - "crypto/aes" - "crypto/ecdsa" - "crypto/rand" - "crypto/rsa" - "crypto/sha1" - "crypto/sha256" - "errors" - "fmt" - "math/big" - - "golang.org/x/crypto/ed25519" - josecipher "gopkg.in/go-jose/go-jose.v2/cipher" - "gopkg.in/go-jose/go-jose.v2/json" -) - -// A generic RSA-based encrypter/verifier -type rsaEncrypterVerifier struct { - publicKey *rsa.PublicKey -} - -// A generic RSA-based decrypter/signer -type rsaDecrypterSigner struct { - privateKey *rsa.PrivateKey -} - -// A generic EC-based encrypter/verifier -type ecEncrypterVerifier struct { - publicKey *ecdsa.PublicKey -} - -type edEncrypterVerifier struct { - publicKey ed25519.PublicKey -} - -// A key generator for ECDH-ES -type ecKeyGenerator struct { - size int - algID string - publicKey *ecdsa.PublicKey -} - -// A generic EC-based decrypter/signer -type ecDecrypterSigner struct { - privateKey *ecdsa.PrivateKey -} - -type edDecrypterSigner struct { - privateKey ed25519.PrivateKey -} - -// newRSARecipient creates recipientKeyInfo based on the given key. -func newRSARecipient(keyAlg KeyAlgorithm, publicKey *rsa.PublicKey) (recipientKeyInfo, error) { - // Verify that key management algorithm is supported by this encrypter - switch keyAlg { - case RSA1_5, RSA_OAEP, RSA_OAEP_256: - default: - return recipientKeyInfo{}, ErrUnsupportedAlgorithm - } - - if publicKey == nil { - return recipientKeyInfo{}, errors.New("invalid public key") - } - - return recipientKeyInfo{ - keyAlg: keyAlg, - keyEncrypter: &rsaEncrypterVerifier{ - publicKey: publicKey, - }, - }, nil -} - -// newRSASigner creates a recipientSigInfo based on the given key. -func newRSASigner(sigAlg SignatureAlgorithm, privateKey *rsa.PrivateKey) (recipientSigInfo, error) { - // Verify that key management algorithm is supported by this encrypter - switch sigAlg { - case RS256, RS384, RS512, PS256, PS384, PS512: - default: - return recipientSigInfo{}, ErrUnsupportedAlgorithm - } - - if privateKey == nil { - return recipientSigInfo{}, errors.New("invalid private key") - } - - return recipientSigInfo{ - sigAlg: sigAlg, - publicKey: staticPublicKey(&JSONWebKey{ - Key: privateKey.Public(), - }), - signer: &rsaDecrypterSigner{ - privateKey: privateKey, - }, - }, nil -} - -func newEd25519Signer(sigAlg SignatureAlgorithm, privateKey ed25519.PrivateKey) (recipientSigInfo, error) { - if sigAlg != EdDSA { - return recipientSigInfo{}, ErrUnsupportedAlgorithm - } - - if privateKey == nil { - return recipientSigInfo{}, errors.New("invalid private key") - } - return recipientSigInfo{ - sigAlg: sigAlg, - publicKey: staticPublicKey(&JSONWebKey{ - Key: privateKey.Public(), - }), - signer: &edDecrypterSigner{ - privateKey: privateKey, - }, - }, nil -} - -// newECDHRecipient creates recipientKeyInfo based on the given key. -func newECDHRecipient(keyAlg KeyAlgorithm, publicKey *ecdsa.PublicKey) (recipientKeyInfo, error) { - // Verify that key management algorithm is supported by this encrypter - switch keyAlg { - case ECDH_ES, ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW: - default: - return recipientKeyInfo{}, ErrUnsupportedAlgorithm - } - - if publicKey == nil || !publicKey.Curve.IsOnCurve(publicKey.X, publicKey.Y) { - return recipientKeyInfo{}, errors.New("invalid public key") - } - - return recipientKeyInfo{ - keyAlg: keyAlg, - keyEncrypter: &ecEncrypterVerifier{ - publicKey: publicKey, - }, - }, nil -} - -// newECDSASigner creates a recipientSigInfo based on the given key. -func newECDSASigner(sigAlg SignatureAlgorithm, privateKey *ecdsa.PrivateKey) (recipientSigInfo, error) { - // Verify that key management algorithm is supported by this encrypter - switch sigAlg { - case ES256, ES384, ES512: - default: - return recipientSigInfo{}, ErrUnsupportedAlgorithm - } - - if privateKey == nil { - return recipientSigInfo{}, errors.New("invalid private key") - } - - return recipientSigInfo{ - sigAlg: sigAlg, - publicKey: staticPublicKey(&JSONWebKey{ - Key: privateKey.Public(), - }), - signer: &ecDecrypterSigner{ - privateKey: privateKey, - }, - }, nil -} - -// Encrypt the given payload and update the object. -func (ctx rsaEncrypterVerifier) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) { - encryptedKey, err := ctx.encrypt(cek, alg) - if err != nil { - return recipientInfo{}, err - } - - return recipientInfo{ - encryptedKey: encryptedKey, - header: &rawHeader{}, - }, nil -} - -// Encrypt the given payload. Based on the key encryption algorithm, -// this will either use RSA-PKCS1v1.5 or RSA-OAEP (with SHA-1 or SHA-256). -func (ctx rsaEncrypterVerifier) encrypt(cek []byte, alg KeyAlgorithm) ([]byte, error) { - switch alg { - case RSA1_5: - return rsa.EncryptPKCS1v15(RandReader, ctx.publicKey, cek) - case RSA_OAEP: - return rsa.EncryptOAEP(sha1.New(), RandReader, ctx.publicKey, cek, []byte{}) - case RSA_OAEP_256: - return rsa.EncryptOAEP(sha256.New(), RandReader, ctx.publicKey, cek, []byte{}) - } - - return nil, ErrUnsupportedAlgorithm -} - -// Decrypt the given payload and return the content encryption key. -func (ctx rsaDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) { - return ctx.decrypt(recipient.encryptedKey, headers.getAlgorithm(), generator) -} - -// Decrypt the given payload. Based on the key encryption algorithm, -// this will either use RSA-PKCS1v1.5 or RSA-OAEP (with SHA-1 or SHA-256). -func (ctx rsaDecrypterSigner) decrypt(jek []byte, alg KeyAlgorithm, generator keyGenerator) ([]byte, error) { - // Note: The random reader on decrypt operations is only used for blinding, - // so stubbing is meanlingless (hence the direct use of rand.Reader). - switch alg { - case RSA1_5: - defer func() { - // DecryptPKCS1v15SessionKey sometimes panics on an invalid payload - // because of an index out of bounds error, which we want to ignore. - // This has been fixed in Go 1.3.1 (released 2014/08/13), the recover() - // only exists for preventing crashes with unpatched versions. - // See: https://groups.google.com/forum/#!topic/golang-dev/7ihX6Y6kx9k - // See: https://code.google.com/p/go/source/detail?r=58ee390ff31602edb66af41ed10901ec95904d33 - _ = recover() - }() - - // Perform some input validation. - keyBytes := ctx.privateKey.PublicKey.N.BitLen() / 8 - if keyBytes != len(jek) { - // Input size is incorrect, the encrypted payload should always match - // the size of the public modulus (e.g. using a 2048 bit key will - // produce 256 bytes of output). Reject this since it's invalid input. - return nil, ErrCryptoFailure - } - - cek, _, err := generator.genKey() - if err != nil { - return nil, ErrCryptoFailure - } - - // When decrypting an RSA-PKCS1v1.5 payload, we must take precautions to - // prevent chosen-ciphertext attacks as described in RFC 3218, "Preventing - // the Million Message Attack on Cryptographic Message Syntax". We are - // therefore deliberately ignoring errors here. - _ = rsa.DecryptPKCS1v15SessionKey(rand.Reader, ctx.privateKey, jek, cek) - - return cek, nil - case RSA_OAEP: - // Use rand.Reader for RSA blinding - return rsa.DecryptOAEP(sha1.New(), rand.Reader, ctx.privateKey, jek, []byte{}) - case RSA_OAEP_256: - // Use rand.Reader for RSA blinding - return rsa.DecryptOAEP(sha256.New(), rand.Reader, ctx.privateKey, jek, []byte{}) - } - - return nil, ErrUnsupportedAlgorithm -} - -// Sign the given payload -func (ctx rsaDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) { - var hash crypto.Hash - - switch alg { - case RS256, PS256: - hash = crypto.SHA256 - case RS384, PS384: - hash = crypto.SHA384 - case RS512, PS512: - hash = crypto.SHA512 - default: - return Signature{}, ErrUnsupportedAlgorithm - } - - hasher := hash.New() - - // According to documentation, Write() on hash never fails - _, _ = hasher.Write(payload) - hashed := hasher.Sum(nil) - - var out []byte - var err error - - switch alg { - case RS256, RS384, RS512: - // TODO(https://github.com/go-jose/go-jose/issues/40): As of go1.20, the - // random parameter is legacy and ignored, and it can be nil. - // https://cs.opensource.google/go/go/+/refs/tags/go1.20:src/crypto/rsa/pkcs1v15.go;l=263;bpv=0;bpt=1 - out, err = rsa.SignPKCS1v15(RandReader, ctx.privateKey, hash, hashed) - case PS256, PS384, PS512: - out, err = rsa.SignPSS(RandReader, ctx.privateKey, hash, hashed, &rsa.PSSOptions{ - SaltLength: rsa.PSSSaltLengthEqualsHash, - }) - } - - if err != nil { - return Signature{}, err - } - - return Signature{ - Signature: out, - protected: &rawHeader{}, - }, nil -} - -// Verify the given payload -func (ctx rsaEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error { - var hash crypto.Hash - - switch alg { - case RS256, PS256: - hash = crypto.SHA256 - case RS384, PS384: - hash = crypto.SHA384 - case RS512, PS512: - hash = crypto.SHA512 - default: - return ErrUnsupportedAlgorithm - } - - hasher := hash.New() - - // According to documentation, Write() on hash never fails - _, _ = hasher.Write(payload) - hashed := hasher.Sum(nil) - - switch alg { - case RS256, RS384, RS512: - return rsa.VerifyPKCS1v15(ctx.publicKey, hash, hashed, signature) - case PS256, PS384, PS512: - return rsa.VerifyPSS(ctx.publicKey, hash, hashed, signature, nil) - } - - return ErrUnsupportedAlgorithm -} - -// Encrypt the given payload and update the object. -func (ctx ecEncrypterVerifier) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) { - switch alg { - case ECDH_ES: - // ECDH-ES mode doesn't wrap a key, the shared secret is used directly as the key. - return recipientInfo{ - header: &rawHeader{}, - }, nil - case ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW: - default: - return recipientInfo{}, ErrUnsupportedAlgorithm - } - - generator := ecKeyGenerator{ - algID: string(alg), - publicKey: ctx.publicKey, - } - - switch alg { - case ECDH_ES_A128KW: - generator.size = 16 - case ECDH_ES_A192KW: - generator.size = 24 - case ECDH_ES_A256KW: - generator.size = 32 - } - - kek, header, err := generator.genKey() - if err != nil { - return recipientInfo{}, err - } - - block, err := aes.NewCipher(kek) - if err != nil { - return recipientInfo{}, err - } - - jek, err := josecipher.KeyWrap(block, cek) - if err != nil { - return recipientInfo{}, err - } - - return recipientInfo{ - encryptedKey: jek, - header: &header, - }, nil -} - -// Get key size for EC key generator -func (ctx ecKeyGenerator) keySize() int { - return ctx.size -} - -// Get a content encryption key for ECDH-ES -func (ctx ecKeyGenerator) genKey() ([]byte, rawHeader, error) { - priv, err := ecdsa.GenerateKey(ctx.publicKey.Curve, RandReader) - if err != nil { - return nil, rawHeader{}, err - } - - out := josecipher.DeriveECDHES(ctx.algID, []byte{}, []byte{}, priv, ctx.publicKey, ctx.size) - - b, err := json.Marshal(&JSONWebKey{ - Key: &priv.PublicKey, - }) - if err != nil { - return nil, nil, err - } - - headers := rawHeader{ - headerEPK: makeRawMessage(b), - } - - return out, headers, nil -} - -// Decrypt the given payload and return the content encryption key. -func (ctx ecDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) { - epk, err := headers.getEPK() - if err != nil { - return nil, errors.New("go-jose/go-jose: invalid epk header") - } - if epk == nil { - return nil, errors.New("go-jose/go-jose: missing epk header") - } - - publicKey, ok := epk.Key.(*ecdsa.PublicKey) - if publicKey == nil || !ok { - return nil, errors.New("go-jose/go-jose: invalid epk header") - } - - if !ctx.privateKey.Curve.IsOnCurve(publicKey.X, publicKey.Y) { - return nil, errors.New("go-jose/go-jose: invalid public key in epk header") - } - - apuData, err := headers.getAPU() - if err != nil { - return nil, errors.New("go-jose/go-jose: invalid apu header") - } - apvData, err := headers.getAPV() - if err != nil { - return nil, errors.New("go-jose/go-jose: invalid apv header") - } - - deriveKey := func(algID string, size int) []byte { - return josecipher.DeriveECDHES(algID, apuData.bytes(), apvData.bytes(), ctx.privateKey, publicKey, size) - } - - var keySize int - - algorithm := headers.getAlgorithm() - switch algorithm { - case ECDH_ES: - // ECDH-ES uses direct key agreement, no key unwrapping necessary. - return deriveKey(string(headers.getEncryption()), generator.keySize()), nil - case ECDH_ES_A128KW: - keySize = 16 - case ECDH_ES_A192KW: - keySize = 24 - case ECDH_ES_A256KW: - keySize = 32 - default: - return nil, ErrUnsupportedAlgorithm - } - - key := deriveKey(string(algorithm), keySize) - block, err := aes.NewCipher(key) - if err != nil { - return nil, err - } - - return josecipher.KeyUnwrap(block, recipient.encryptedKey) -} - -func (ctx edDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) { - if alg != EdDSA { - return Signature{}, ErrUnsupportedAlgorithm - } - - sig, err := ctx.privateKey.Sign(RandReader, payload, crypto.Hash(0)) - if err != nil { - return Signature{}, err - } - - return Signature{ - Signature: sig, - protected: &rawHeader{}, - }, nil -} - -func (ctx edEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error { - if alg != EdDSA { - return ErrUnsupportedAlgorithm - } - ok := ed25519.Verify(ctx.publicKey, payload, signature) - if !ok { - return errors.New("go-jose/go-jose: ed25519 signature failed to verify") - } - return nil -} - -// Sign the given payload -func (ctx ecDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) { - var expectedBitSize int - var hash crypto.Hash - - switch alg { - case ES256: - expectedBitSize = 256 - hash = crypto.SHA256 - case ES384: - expectedBitSize = 384 - hash = crypto.SHA384 - case ES512: - expectedBitSize = 521 - hash = crypto.SHA512 - } - - curveBits := ctx.privateKey.Curve.Params().BitSize - if expectedBitSize != curveBits { - return Signature{}, fmt.Errorf("go-jose/go-jose: expected %d bit key, got %d bits instead", expectedBitSize, curveBits) - } - - hasher := hash.New() - - // According to documentation, Write() on hash never fails - _, _ = hasher.Write(payload) - hashed := hasher.Sum(nil) - - r, s, err := ecdsa.Sign(RandReader, ctx.privateKey, hashed) - if err != nil { - return Signature{}, err - } - - keyBytes := curveBits / 8 - if curveBits%8 > 0 { - keyBytes++ - } - - // We serialize the outputs (r and s) into big-endian byte arrays and pad - // them with zeros on the left to make sure the sizes work out. Both arrays - // must be keyBytes long, and the output must be 2*keyBytes long. - rBytes := r.Bytes() - rBytesPadded := make([]byte, keyBytes) - copy(rBytesPadded[keyBytes-len(rBytes):], rBytes) - - sBytes := s.Bytes() - sBytesPadded := make([]byte, keyBytes) - copy(sBytesPadded[keyBytes-len(sBytes):], sBytes) - - out := append(rBytesPadded, sBytesPadded...) - - return Signature{ - Signature: out, - protected: &rawHeader{}, - }, nil -} - -// Verify the given payload -func (ctx ecEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error { - var keySize int - var hash crypto.Hash - - switch alg { - case ES256: - keySize = 32 - hash = crypto.SHA256 - case ES384: - keySize = 48 - hash = crypto.SHA384 - case ES512: - keySize = 66 - hash = crypto.SHA512 - default: - return ErrUnsupportedAlgorithm - } - - if len(signature) != 2*keySize { - return fmt.Errorf("go-jose/go-jose: invalid signature size, have %d bytes, wanted %d", len(signature), 2*keySize) - } - - hasher := hash.New() - - // According to documentation, Write() on hash never fails - _, _ = hasher.Write(payload) - hashed := hasher.Sum(nil) - - r := big.NewInt(0).SetBytes(signature[:keySize]) - s := big.NewInt(0).SetBytes(signature[keySize:]) - - match := ecdsa.Verify(ctx.publicKey, hashed, r, s) - if !match { - return errors.New("go-jose/go-jose: ecdsa signature failed to verify") - } - - return nil -} diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/cipher/cbc_hmac.go b/vendor/gopkg.in/go-jose/go-jose.v2/cipher/cbc_hmac.go deleted file mode 100644 index 87065a5b96..0000000000 --- a/vendor/gopkg.in/go-jose/go-jose.v2/cipher/cbc_hmac.go +++ /dev/null @@ -1,196 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package josecipher - -import ( - "bytes" - "crypto/cipher" - "crypto/hmac" - "crypto/sha256" - "crypto/sha512" - "crypto/subtle" - "encoding/binary" - "errors" - "hash" -) - -const ( - nonceBytes = 16 -) - -// NewCBCHMAC instantiates a new AEAD based on CBC+HMAC. -func NewCBCHMAC(key []byte, newBlockCipher func([]byte) (cipher.Block, error)) (cipher.AEAD, error) { - keySize := len(key) / 2 - integrityKey := key[:keySize] - encryptionKey := key[keySize:] - - blockCipher, err := newBlockCipher(encryptionKey) - if err != nil { - return nil, err - } - - var hash func() hash.Hash - switch keySize { - case 16: - hash = sha256.New - case 24: - hash = sha512.New384 - case 32: - hash = sha512.New - } - - return &cbcAEAD{ - hash: hash, - blockCipher: blockCipher, - authtagBytes: keySize, - integrityKey: integrityKey, - }, nil -} - -// An AEAD based on CBC+HMAC -type cbcAEAD struct { - hash func() hash.Hash - authtagBytes int - integrityKey []byte - blockCipher cipher.Block -} - -func (ctx *cbcAEAD) NonceSize() int { - return nonceBytes -} - -func (ctx *cbcAEAD) Overhead() int { - // Maximum overhead is block size (for padding) plus auth tag length, where - // the length of the auth tag is equivalent to the key size. - return ctx.blockCipher.BlockSize() + ctx.authtagBytes -} - -// Seal encrypts and authenticates the plaintext. -func (ctx *cbcAEAD) Seal(dst, nonce, plaintext, data []byte) []byte { - // Output buffer -- must take care not to mangle plaintext input. - ciphertext := make([]byte, uint64(len(plaintext))+uint64(ctx.Overhead()))[:len(plaintext)] - copy(ciphertext, plaintext) - ciphertext = padBuffer(ciphertext, ctx.blockCipher.BlockSize()) - - cbc := cipher.NewCBCEncrypter(ctx.blockCipher, nonce) - - cbc.CryptBlocks(ciphertext, ciphertext) - authtag := ctx.computeAuthTag(data, nonce, ciphertext) - - ret, out := resize(dst, uint64(len(dst))+uint64(len(ciphertext))+uint64(len(authtag))) - copy(out, ciphertext) - copy(out[len(ciphertext):], authtag) - - return ret -} - -// Open decrypts and authenticates the ciphertext. -func (ctx *cbcAEAD) Open(dst, nonce, ciphertext, data []byte) ([]byte, error) { - if len(ciphertext) < ctx.authtagBytes { - return nil, errors.New("go-jose/go-jose: invalid ciphertext (too short)") - } - - offset := len(ciphertext) - ctx.authtagBytes - expectedTag := ctx.computeAuthTag(data, nonce, ciphertext[:offset]) - match := subtle.ConstantTimeCompare(expectedTag, ciphertext[offset:]) - if match != 1 { - return nil, errors.New("go-jose/go-jose: invalid ciphertext (auth tag mismatch)") - } - - cbc := cipher.NewCBCDecrypter(ctx.blockCipher, nonce) - - // Make copy of ciphertext buffer, don't want to modify in place - buffer := append([]byte{}, []byte(ciphertext[:offset])...) - - if len(buffer)%ctx.blockCipher.BlockSize() > 0 { - return nil, errors.New("go-jose/go-jose: invalid ciphertext (invalid length)") - } - - cbc.CryptBlocks(buffer, buffer) - - // Remove padding - plaintext, err := unpadBuffer(buffer, ctx.blockCipher.BlockSize()) - if err != nil { - return nil, err - } - - ret, out := resize(dst, uint64(len(dst))+uint64(len(plaintext))) - copy(out, plaintext) - - return ret, nil -} - -// Compute an authentication tag -func (ctx *cbcAEAD) computeAuthTag(aad, nonce, ciphertext []byte) []byte { - buffer := make([]byte, uint64(len(aad))+uint64(len(nonce))+uint64(len(ciphertext))+8) - n := 0 - n += copy(buffer, aad) - n += copy(buffer[n:], nonce) - n += copy(buffer[n:], ciphertext) - binary.BigEndian.PutUint64(buffer[n:], uint64(len(aad))*8) - - // According to documentation, Write() on hash.Hash never fails. - hmac := hmac.New(ctx.hash, ctx.integrityKey) - _, _ = hmac.Write(buffer) - - return hmac.Sum(nil)[:ctx.authtagBytes] -} - -// resize ensures that the given slice has a capacity of at least n bytes. -// If the capacity of the slice is less than n, a new slice is allocated -// and the existing data will be copied. -func resize(in []byte, n uint64) (head, tail []byte) { - if uint64(cap(in)) >= n { - head = in[:n] - } else { - head = make([]byte, n) - copy(head, in) - } - - tail = head[len(in):] - return -} - -// Apply padding -func padBuffer(buffer []byte, blockSize int) []byte { - missing := blockSize - (len(buffer) % blockSize) - ret, out := resize(buffer, uint64(len(buffer))+uint64(missing)) - padding := bytes.Repeat([]byte{byte(missing)}, missing) - copy(out, padding) - return ret -} - -// Remove padding -func unpadBuffer(buffer []byte, blockSize int) ([]byte, error) { - if len(buffer)%blockSize != 0 { - return nil, errors.New("go-jose/go-jose: invalid padding") - } - - last := buffer[len(buffer)-1] - count := int(last) - - if count == 0 || count > blockSize || count > len(buffer) { - return nil, errors.New("go-jose/go-jose: invalid padding") - } - - padding := bytes.Repeat([]byte{last}, count) - if !bytes.HasSuffix(buffer, padding) { - return nil, errors.New("go-jose/go-jose: invalid padding") - } - - return buffer[:len(buffer)-count], nil -} diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/cipher/concat_kdf.go b/vendor/gopkg.in/go-jose/go-jose.v2/cipher/concat_kdf.go deleted file mode 100644 index f62c3bdba5..0000000000 --- a/vendor/gopkg.in/go-jose/go-jose.v2/cipher/concat_kdf.go +++ /dev/null @@ -1,75 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package josecipher - -import ( - "crypto" - "encoding/binary" - "hash" - "io" -) - -type concatKDF struct { - z, info []byte - i uint32 - cache []byte - hasher hash.Hash -} - -// NewConcatKDF builds a KDF reader based on the given inputs. -func NewConcatKDF(hash crypto.Hash, z, algID, ptyUInfo, ptyVInfo, supPubInfo, supPrivInfo []byte) io.Reader { - buffer := make([]byte, uint64(len(algID))+uint64(len(ptyUInfo))+uint64(len(ptyVInfo))+uint64(len(supPubInfo))+uint64(len(supPrivInfo))) - n := 0 - n += copy(buffer, algID) - n += copy(buffer[n:], ptyUInfo) - n += copy(buffer[n:], ptyVInfo) - n += copy(buffer[n:], supPubInfo) - copy(buffer[n:], supPrivInfo) - - hasher := hash.New() - - return &concatKDF{ - z: z, - info: buffer, - hasher: hasher, - cache: []byte{}, - i: 1, - } -} - -func (ctx *concatKDF) Read(out []byte) (int, error) { - copied := copy(out, ctx.cache) - ctx.cache = ctx.cache[copied:] - - for copied < len(out) { - ctx.hasher.Reset() - - // Write on a hash.Hash never fails - _ = binary.Write(ctx.hasher, binary.BigEndian, ctx.i) - _, _ = ctx.hasher.Write(ctx.z) - _, _ = ctx.hasher.Write(ctx.info) - - hash := ctx.hasher.Sum(nil) - chunkCopied := copy(out[copied:], hash) - copied += chunkCopied - ctx.cache = hash[chunkCopied:] - - ctx.i++ - } - - return copied, nil -} diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/cipher/ecdh_es.go b/vendor/gopkg.in/go-jose/go-jose.v2/cipher/ecdh_es.go deleted file mode 100644 index 093c646740..0000000000 --- a/vendor/gopkg.in/go-jose/go-jose.v2/cipher/ecdh_es.go +++ /dev/null @@ -1,86 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package josecipher - -import ( - "bytes" - "crypto" - "crypto/ecdsa" - "crypto/elliptic" - "encoding/binary" -) - -// DeriveECDHES derives a shared encryption key using ECDH/ConcatKDF as described in JWE/JWA. -// It is an error to call this function with a private/public key that are not on the same -// curve. Callers must ensure that the keys are valid before calling this function. Output -// size may be at most 1<<16 bytes (64 KiB). -func DeriveECDHES(alg string, apuData, apvData []byte, priv *ecdsa.PrivateKey, pub *ecdsa.PublicKey, size int) []byte { - if size > 1<<16 { - panic("ECDH-ES output size too large, must be less than or equal to 1<<16") - } - - // algId, partyUInfo, partyVInfo inputs must be prefixed with the length - algID := lengthPrefixed([]byte(alg)) - ptyUInfo := lengthPrefixed(apuData) - ptyVInfo := lengthPrefixed(apvData) - - // suppPubInfo is the encoded length of the output size in bits - supPubInfo := make([]byte, 4) - binary.BigEndian.PutUint32(supPubInfo, uint32(size)*8) - - if !priv.PublicKey.Curve.IsOnCurve(pub.X, pub.Y) { - panic("public key not on same curve as private key") - } - - z, _ := priv.Curve.ScalarMult(pub.X, pub.Y, priv.D.Bytes()) - zBytes := z.Bytes() - - // Note that calling z.Bytes() on a big.Int may strip leading zero bytes from - // the returned byte array. This can lead to a problem where zBytes will be - // shorter than expected which breaks the key derivation. Therefore we must pad - // to the full length of the expected coordinate here before calling the KDF. - octSize := dSize(priv.Curve) - if len(zBytes) != octSize { - zBytes = append(bytes.Repeat([]byte{0}, octSize-len(zBytes)), zBytes...) - } - - reader := NewConcatKDF(crypto.SHA256, zBytes, algID, ptyUInfo, ptyVInfo, supPubInfo, []byte{}) - key := make([]byte, size) - - // Read on the KDF will never fail - _, _ = reader.Read(key) - - return key -} - -// dSize returns the size in octets for a coordinate on a elliptic curve. -func dSize(curve elliptic.Curve) int { - order := curve.Params().P - bitLen := order.BitLen() - size := bitLen / 8 - if bitLen%8 != 0 { - size++ - } - return size -} - -func lengthPrefixed(data []byte) []byte { - out := make([]byte, len(data)+4) - binary.BigEndian.PutUint32(out, uint32(len(data))) - copy(out[4:], data) - return out -} diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/cipher/key_wrap.go b/vendor/gopkg.in/go-jose/go-jose.v2/cipher/key_wrap.go deleted file mode 100644 index 668358f981..0000000000 --- a/vendor/gopkg.in/go-jose/go-jose.v2/cipher/key_wrap.go +++ /dev/null @@ -1,109 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package josecipher - -import ( - "crypto/cipher" - "crypto/subtle" - "encoding/binary" - "errors" -) - -var defaultIV = []byte{0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6} - -// KeyWrap implements NIST key wrapping; it wraps a content encryption key (cek) with the given block cipher. -func KeyWrap(block cipher.Block, cek []byte) ([]byte, error) { - if len(cek)%8 != 0 { - return nil, errors.New("go-jose/go-jose: key wrap input must be 8 byte blocks") - } - - n := len(cek) / 8 - r := make([][]byte, n) - - for i := range r { - r[i] = make([]byte, 8) - copy(r[i], cek[i*8:]) - } - - buffer := make([]byte, 16) - tBytes := make([]byte, 8) - copy(buffer, defaultIV) - - for t := 0; t < 6*n; t++ { - copy(buffer[8:], r[t%n]) - - block.Encrypt(buffer, buffer) - - binary.BigEndian.PutUint64(tBytes, uint64(t+1)) - - for i := 0; i < 8; i++ { - buffer[i] = buffer[i] ^ tBytes[i] - } - copy(r[t%n], buffer[8:]) - } - - out := make([]byte, (n+1)*8) - copy(out, buffer[:8]) - for i := range r { - copy(out[(i+1)*8:], r[i]) - } - - return out, nil -} - -// KeyUnwrap implements NIST key unwrapping; it unwraps a content encryption key (cek) with the given block cipher. -func KeyUnwrap(block cipher.Block, ciphertext []byte) ([]byte, error) { - if len(ciphertext)%8 != 0 { - return nil, errors.New("go-jose/go-jose: key wrap input must be 8 byte blocks") - } - - n := (len(ciphertext) / 8) - 1 - r := make([][]byte, n) - - for i := range r { - r[i] = make([]byte, 8) - copy(r[i], ciphertext[(i+1)*8:]) - } - - buffer := make([]byte, 16) - tBytes := make([]byte, 8) - copy(buffer[:8], ciphertext[:8]) - - for t := 6*n - 1; t >= 0; t-- { - binary.BigEndian.PutUint64(tBytes, uint64(t+1)) - - for i := 0; i < 8; i++ { - buffer[i] = buffer[i] ^ tBytes[i] - } - copy(buffer[8:], r[t%n]) - - block.Decrypt(buffer, buffer) - - copy(r[t%n], buffer[8:]) - } - - if subtle.ConstantTimeCompare(buffer[:8], defaultIV) == 0 { - return nil, errors.New("go-jose/go-jose: failed to unwrap key") - } - - out := make([]byte, n*8) - for i := range r { - copy(out[i*8:], r[i]) - } - - return out, nil -} diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/crypter.go b/vendor/gopkg.in/go-jose/go-jose.v2/crypter.go deleted file mode 100644 index 0ae2e5ebaa..0000000000 --- a/vendor/gopkg.in/go-jose/go-jose.v2/crypter.go +++ /dev/null @@ -1,548 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package jose - -import ( - "crypto/ecdsa" - "crypto/rsa" - "errors" - "fmt" - "reflect" - - "gopkg.in/go-jose/go-jose.v2/json" -) - -// Encrypter represents an encrypter which produces an encrypted JWE object. -type Encrypter interface { - Encrypt(plaintext []byte) (*JSONWebEncryption, error) - EncryptWithAuthData(plaintext []byte, aad []byte) (*JSONWebEncryption, error) - Options() EncrypterOptions -} - -// A generic content cipher -type contentCipher interface { - keySize() int - encrypt(cek []byte, aad, plaintext []byte) (*aeadParts, error) - decrypt(cek []byte, aad []byte, parts *aeadParts) ([]byte, error) -} - -// A key generator (for generating/getting a CEK) -type keyGenerator interface { - keySize() int - genKey() ([]byte, rawHeader, error) -} - -// A generic key encrypter -type keyEncrypter interface { - encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) // Encrypt a key -} - -// A generic key decrypter -type keyDecrypter interface { - decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) // Decrypt a key -} - -// A generic encrypter based on the given key encrypter and content cipher. -type genericEncrypter struct { - contentAlg ContentEncryption - compressionAlg CompressionAlgorithm - cipher contentCipher - recipients []recipientKeyInfo - keyGenerator keyGenerator - extraHeaders map[HeaderKey]interface{} -} - -type recipientKeyInfo struct { - keyID string - keyAlg KeyAlgorithm - keyEncrypter keyEncrypter -} - -// EncrypterOptions represents options that can be set on new encrypters. -type EncrypterOptions struct { - Compression CompressionAlgorithm - - // Optional map of additional keys to be inserted into the protected header - // of a JWS object. Some specifications which make use of JWS like to insert - // additional values here. All values must be JSON-serializable. - ExtraHeaders map[HeaderKey]interface{} -} - -// WithHeader adds an arbitrary value to the ExtraHeaders map, initializing it -// if necessary. It returns itself and so can be used in a fluent style. -func (eo *EncrypterOptions) WithHeader(k HeaderKey, v interface{}) *EncrypterOptions { - if eo.ExtraHeaders == nil { - eo.ExtraHeaders = map[HeaderKey]interface{}{} - } - eo.ExtraHeaders[k] = v - return eo -} - -// WithContentType adds a content type ("cty") header and returns the updated -// EncrypterOptions. -func (eo *EncrypterOptions) WithContentType(contentType ContentType) *EncrypterOptions { - return eo.WithHeader(HeaderContentType, contentType) -} - -// WithType adds a type ("typ") header and returns the updated EncrypterOptions. -func (eo *EncrypterOptions) WithType(typ ContentType) *EncrypterOptions { - return eo.WithHeader(HeaderType, typ) -} - -// Recipient represents an algorithm/key to encrypt messages to. -// -// PBES2Count and PBES2Salt correspond with the "p2c" and "p2s" headers used -// on the password-based encryption algorithms PBES2-HS256+A128KW, -// PBES2-HS384+A192KW, and PBES2-HS512+A256KW. If they are not provided a safe -// default of 100000 will be used for the count and a 128-bit random salt will -// be generated. -type Recipient struct { - Algorithm KeyAlgorithm - Key interface{} - KeyID string - PBES2Count int - PBES2Salt []byte -} - -// NewEncrypter creates an appropriate encrypter based on the key type -func NewEncrypter(enc ContentEncryption, rcpt Recipient, opts *EncrypterOptions) (Encrypter, error) { - encrypter := &genericEncrypter{ - contentAlg: enc, - recipients: []recipientKeyInfo{}, - cipher: getContentCipher(enc), - } - if opts != nil { - encrypter.compressionAlg = opts.Compression - encrypter.extraHeaders = opts.ExtraHeaders - } - - if encrypter.cipher == nil { - return nil, ErrUnsupportedAlgorithm - } - - var keyID string - var rawKey interface{} - switch encryptionKey := rcpt.Key.(type) { - case JSONWebKey: - keyID, rawKey = encryptionKey.KeyID, encryptionKey.Key - case *JSONWebKey: - keyID, rawKey = encryptionKey.KeyID, encryptionKey.Key - case OpaqueKeyEncrypter: - keyID, rawKey = encryptionKey.KeyID(), encryptionKey - default: - rawKey = encryptionKey - } - - switch rcpt.Algorithm { - case DIRECT: - // Direct encryption mode must be treated differently - if reflect.TypeOf(rawKey) != reflect.TypeOf([]byte{}) { - return nil, ErrUnsupportedKeyType - } - if encrypter.cipher.keySize() != len(rawKey.([]byte)) { - return nil, ErrInvalidKeySize - } - encrypter.keyGenerator = staticKeyGenerator{ - key: rawKey.([]byte), - } - recipientInfo, _ := newSymmetricRecipient(rcpt.Algorithm, rawKey.([]byte)) - recipientInfo.keyID = keyID - if rcpt.KeyID != "" { - recipientInfo.keyID = rcpt.KeyID - } - encrypter.recipients = []recipientKeyInfo{recipientInfo} - return encrypter, nil - case ECDH_ES: - // ECDH-ES (w/o key wrapping) is similar to DIRECT mode - typeOf := reflect.TypeOf(rawKey) - if typeOf != reflect.TypeOf(&ecdsa.PublicKey{}) { - return nil, ErrUnsupportedKeyType - } - encrypter.keyGenerator = ecKeyGenerator{ - size: encrypter.cipher.keySize(), - algID: string(enc), - publicKey: rawKey.(*ecdsa.PublicKey), - } - recipientInfo, _ := newECDHRecipient(rcpt.Algorithm, rawKey.(*ecdsa.PublicKey)) - recipientInfo.keyID = keyID - if rcpt.KeyID != "" { - recipientInfo.keyID = rcpt.KeyID - } - encrypter.recipients = []recipientKeyInfo{recipientInfo} - return encrypter, nil - default: - // Can just add a standard recipient - encrypter.keyGenerator = randomKeyGenerator{ - size: encrypter.cipher.keySize(), - } - err := encrypter.addRecipient(rcpt) - return encrypter, err - } -} - -// NewMultiEncrypter creates a multi-encrypter based on the given parameters -func NewMultiEncrypter(enc ContentEncryption, rcpts []Recipient, opts *EncrypterOptions) (Encrypter, error) { - cipher := getContentCipher(enc) - - if cipher == nil { - return nil, ErrUnsupportedAlgorithm - } - if rcpts == nil || len(rcpts) == 0 { - return nil, fmt.Errorf("go-jose/go-jose: recipients is nil or empty") - } - - encrypter := &genericEncrypter{ - contentAlg: enc, - recipients: []recipientKeyInfo{}, - cipher: cipher, - keyGenerator: randomKeyGenerator{ - size: cipher.keySize(), - }, - } - - if opts != nil { - encrypter.compressionAlg = opts.Compression - encrypter.extraHeaders = opts.ExtraHeaders - } - - for _, recipient := range rcpts { - err := encrypter.addRecipient(recipient) - if err != nil { - return nil, err - } - } - - return encrypter, nil -} - -func (ctx *genericEncrypter) addRecipient(recipient Recipient) (err error) { - var recipientInfo recipientKeyInfo - - switch recipient.Algorithm { - case DIRECT, ECDH_ES: - return fmt.Errorf("go-jose/go-jose: key algorithm '%s' not supported in multi-recipient mode", recipient.Algorithm) - } - - recipientInfo, err = makeJWERecipient(recipient.Algorithm, recipient.Key) - if recipient.KeyID != "" { - recipientInfo.keyID = recipient.KeyID - } - - switch recipient.Algorithm { - case PBES2_HS256_A128KW, PBES2_HS384_A192KW, PBES2_HS512_A256KW: - if sr, ok := recipientInfo.keyEncrypter.(*symmetricKeyCipher); ok { - sr.p2c = recipient.PBES2Count - sr.p2s = recipient.PBES2Salt - } - } - - if err == nil { - ctx.recipients = append(ctx.recipients, recipientInfo) - } - return err -} - -func makeJWERecipient(alg KeyAlgorithm, encryptionKey interface{}) (recipientKeyInfo, error) { - switch encryptionKey := encryptionKey.(type) { - case *rsa.PublicKey: - return newRSARecipient(alg, encryptionKey) - case *ecdsa.PublicKey: - return newECDHRecipient(alg, encryptionKey) - case []byte: - return newSymmetricRecipient(alg, encryptionKey) - case string: - return newSymmetricRecipient(alg, []byte(encryptionKey)) - case *JSONWebKey: - recipient, err := makeJWERecipient(alg, encryptionKey.Key) - recipient.keyID = encryptionKey.KeyID - return recipient, err - } - if encrypter, ok := encryptionKey.(OpaqueKeyEncrypter); ok { - return newOpaqueKeyEncrypter(alg, encrypter) - } - return recipientKeyInfo{}, ErrUnsupportedKeyType -} - -// newDecrypter creates an appropriate decrypter based on the key type -func newDecrypter(decryptionKey interface{}) (keyDecrypter, error) { - switch decryptionKey := decryptionKey.(type) { - case *rsa.PrivateKey: - return &rsaDecrypterSigner{ - privateKey: decryptionKey, - }, nil - case *ecdsa.PrivateKey: - return &ecDecrypterSigner{ - privateKey: decryptionKey, - }, nil - case []byte: - return &symmetricKeyCipher{ - key: decryptionKey, - }, nil - case string: - return &symmetricKeyCipher{ - key: []byte(decryptionKey), - }, nil - case JSONWebKey: - return newDecrypter(decryptionKey.Key) - case *JSONWebKey: - return newDecrypter(decryptionKey.Key) - } - if okd, ok := decryptionKey.(OpaqueKeyDecrypter); ok { - return &opaqueKeyDecrypter{decrypter: okd}, nil - } - return nil, ErrUnsupportedKeyType -} - -// Implementation of encrypt method producing a JWE object. -func (ctx *genericEncrypter) Encrypt(plaintext []byte) (*JSONWebEncryption, error) { - return ctx.EncryptWithAuthData(plaintext, nil) -} - -// Implementation of encrypt method producing a JWE object. -func (ctx *genericEncrypter) EncryptWithAuthData(plaintext, aad []byte) (*JSONWebEncryption, error) { - obj := &JSONWebEncryption{} - obj.aad = aad - - obj.protected = &rawHeader{} - err := obj.protected.set(headerEncryption, ctx.contentAlg) - if err != nil { - return nil, err - } - - obj.recipients = make([]recipientInfo, len(ctx.recipients)) - - if len(ctx.recipients) == 0 { - return nil, fmt.Errorf("go-jose/go-jose: no recipients to encrypt to") - } - - cek, headers, err := ctx.keyGenerator.genKey() - if err != nil { - return nil, err - } - - obj.protected.merge(&headers) - - for i, info := range ctx.recipients { - recipient, err := info.keyEncrypter.encryptKey(cek, info.keyAlg) - if err != nil { - return nil, err - } - - err = recipient.header.set(headerAlgorithm, info.keyAlg) - if err != nil { - return nil, err - } - - if info.keyID != "" { - err = recipient.header.set(headerKeyID, info.keyID) - if err != nil { - return nil, err - } - } - obj.recipients[i] = recipient - } - - if len(ctx.recipients) == 1 { - // Move per-recipient headers into main protected header if there's - // only a single recipient. - obj.protected.merge(obj.recipients[0].header) - obj.recipients[0].header = nil - } - - if ctx.compressionAlg != NONE { - plaintext, err = compress(ctx.compressionAlg, plaintext) - if err != nil { - return nil, err - } - - err = obj.protected.set(headerCompression, ctx.compressionAlg) - if err != nil { - return nil, err - } - } - - for k, v := range ctx.extraHeaders { - b, err := json.Marshal(v) - if err != nil { - return nil, err - } - (*obj.protected)[k] = makeRawMessage(b) - } - - authData := obj.computeAuthData() - parts, err := ctx.cipher.encrypt(cek, authData, plaintext) - if err != nil { - return nil, err - } - - obj.iv = parts.iv - obj.ciphertext = parts.ciphertext - obj.tag = parts.tag - - return obj, nil -} - -func (ctx *genericEncrypter) Options() EncrypterOptions { - return EncrypterOptions{ - Compression: ctx.compressionAlg, - ExtraHeaders: ctx.extraHeaders, - } -} - -// Decrypt and validate the object and return the plaintext. Note that this -// function does not support multi-recipient, if you desire multi-recipient -// decryption use DecryptMulti instead. -// -// Automatically decompresses plaintext, but returns an error if the decompressed -// data would be >250kB or >10x the size of the compressed data, whichever is larger. -func (obj JSONWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error) { - headers := obj.mergedHeaders(nil) - - if len(obj.recipients) > 1 { - return nil, errors.New("go-jose/go-jose: too many recipients in payload; expecting only one") - } - - critical, err := headers.getCritical() - if err != nil { - return nil, fmt.Errorf("go-jose/go-jose: invalid crit header") - } - - if len(critical) > 0 { - return nil, fmt.Errorf("go-jose/go-jose: unsupported crit header") - } - - decrypter, err := newDecrypter(decryptionKey) - if err != nil { - return nil, err - } - - cipher := getContentCipher(headers.getEncryption()) - if cipher == nil { - return nil, fmt.Errorf("go-jose/go-jose: unsupported enc value '%s'", string(headers.getEncryption())) - } - - generator := randomKeyGenerator{ - size: cipher.keySize(), - } - - parts := &aeadParts{ - iv: obj.iv, - ciphertext: obj.ciphertext, - tag: obj.tag, - } - - authData := obj.computeAuthData() - - var plaintext []byte - recipient := obj.recipients[0] - recipientHeaders := obj.mergedHeaders(&recipient) - - cek, err := decrypter.decryptKey(recipientHeaders, &recipient, generator) - if err == nil { - // Found a valid CEK -- let's try to decrypt. - plaintext, err = cipher.decrypt(cek, authData, parts) - } - - if plaintext == nil { - return nil, ErrCryptoFailure - } - - // The "zip" header parameter may only be present in the protected header. - if comp := obj.protected.getCompression(); comp != "" { - plaintext, err = decompress(comp, plaintext) - } - - return plaintext, err -} - -// DecryptMulti decrypts and validates the object and returns the plaintexts, -// with support for multiple recipients. It returns the index of the recipient -// for which the decryption was successful, the merged headers for that recipient, -// and the plaintext. -// -// Automatically decompresses plaintext, but returns an error if the decompressed -// data would be >250kB or >3x the size of the compressed data, whichever is larger. -func (obj JSONWebEncryption) DecryptMulti(decryptionKey interface{}) (int, Header, []byte, error) { - globalHeaders := obj.mergedHeaders(nil) - - critical, err := globalHeaders.getCritical() - if err != nil { - return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: invalid crit header") - } - - if len(critical) > 0 { - return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: unsupported crit header") - } - - decrypter, err := newDecrypter(decryptionKey) - if err != nil { - return -1, Header{}, nil, err - } - - encryption := globalHeaders.getEncryption() - cipher := getContentCipher(encryption) - if cipher == nil { - return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: unsupported enc value '%s'", string(encryption)) - } - - generator := randomKeyGenerator{ - size: cipher.keySize(), - } - - parts := &aeadParts{ - iv: obj.iv, - ciphertext: obj.ciphertext, - tag: obj.tag, - } - - authData := obj.computeAuthData() - - index := -1 - var plaintext []byte - var headers rawHeader - - for i, recipient := range obj.recipients { - recipientHeaders := obj.mergedHeaders(&recipient) - - cek, err := decrypter.decryptKey(recipientHeaders, &recipient, generator) - if err == nil { - // Found a valid CEK -- let's try to decrypt. - plaintext, err = cipher.decrypt(cek, authData, parts) - if err == nil { - index = i - headers = recipientHeaders - break - } - } - } - - if plaintext == nil || err != nil { - return -1, Header{}, nil, ErrCryptoFailure - } - - // The "zip" header parameter may only be present in the protected header. - if comp := obj.protected.getCompression(); comp != "" { - plaintext, err = decompress(comp, plaintext) - } - - sanitized, err := headers.sanitized() - if err != nil { - return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: failed to sanitize header: %v", err) - } - - return index, sanitized, plaintext, err -} diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/doc.go b/vendor/gopkg.in/go-jose/go-jose.v2/doc.go deleted file mode 100644 index dd1387f3f0..0000000000 --- a/vendor/gopkg.in/go-jose/go-jose.v2/doc.go +++ /dev/null @@ -1,27 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - -Package jose aims to provide an implementation of the Javascript Object Signing -and Encryption set of standards. It implements encryption and signing based on -the JSON Web Encryption and JSON Web Signature standards, with optional JSON -Web Token support available in a sub-package. The library supports both the -compact and full serialization formats, and has optional support for multiple -recipients. - -*/ -package jose diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/encoding.go b/vendor/gopkg.in/go-jose/go-jose.v2/encoding.go deleted file mode 100644 index 636f6c8f56..0000000000 --- a/vendor/gopkg.in/go-jose/go-jose.v2/encoding.go +++ /dev/null @@ -1,198 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package jose - -import ( - "bytes" - "compress/flate" - "encoding/base64" - "encoding/binary" - "fmt" - "io" - "math/big" - "strings" - "unicode" - - "gopkg.in/go-jose/go-jose.v2/json" -) - -// Helper function to serialize known-good objects. -// Precondition: value is not a nil pointer. -func mustSerializeJSON(value interface{}) []byte { - out, err := json.Marshal(value) - if err != nil { - panic(err) - } - // We never want to serialize the top-level value "null," since it's not a - // valid JOSE message. But if a caller passes in a nil pointer to this method, - // MarshalJSON will happily serialize it as the top-level value "null". If - // that value is then embedded in another operation, for instance by being - // base64-encoded and fed as input to a signing algorithm - // (https://github.com/go-jose/go-jose/issues/22), the result will be - // incorrect. Because this method is intended for known-good objects, and a nil - // pointer is not a known-good object, we are free to panic in this case. - // Note: It's not possible to directly check whether the data pointed at by an - // interface is a nil pointer, so we do this hacky workaround. - // https://groups.google.com/forum/#!topic/golang-nuts/wnH302gBa4I - if string(out) == "null" { - panic("Tried to serialize a nil pointer.") - } - return out -} - -// Strip all newlines and whitespace -func stripWhitespace(data string) string { - buf := strings.Builder{} - buf.Grow(len(data)) - for _, r := range data { - if !unicode.IsSpace(r) { - buf.WriteRune(r) - } - } - return buf.String() -} - -// Perform compression based on algorithm -func compress(algorithm CompressionAlgorithm, input []byte) ([]byte, error) { - switch algorithm { - case DEFLATE: - return deflate(input) - default: - return nil, ErrUnsupportedAlgorithm - } -} - -// Perform decompression based on algorithm -func decompress(algorithm CompressionAlgorithm, input []byte) ([]byte, error) { - switch algorithm { - case DEFLATE: - return inflate(input) - default: - return nil, ErrUnsupportedAlgorithm - } -} - -// deflate compresses the input. -func deflate(input []byte) ([]byte, error) { - output := new(bytes.Buffer) - - // Writing to byte buffer, err is always nil - writer, _ := flate.NewWriter(output, 1) - _, _ = io.Copy(writer, bytes.NewBuffer(input)) - - err := writer.Close() - return output.Bytes(), err -} - -// inflate decompresses the input. -// -// Errors if the decompressed data would be >250kB or >10x the size of the -// compressed data, whichever is larger. -func inflate(input []byte) ([]byte, error) { - output := new(bytes.Buffer) - reader := flate.NewReader(bytes.NewBuffer(input)) - - maxCompressedSize := 10 * int64(len(input)) - if maxCompressedSize < 250000 { - maxCompressedSize = 250000 - } - - limit := maxCompressedSize + 1 - n, err := io.CopyN(output, reader, limit) - if err != nil && err != io.EOF { - return nil, err - } - if n == limit { - return nil, fmt.Errorf("uncompressed data would be too large (>%d bytes)", maxCompressedSize) - } - - err = reader.Close() - return output.Bytes(), err -} - -// byteBuffer represents a slice of bytes that can be serialized to url-safe base64. -type byteBuffer struct { - data []byte -} - -func newBuffer(data []byte) *byteBuffer { - if data == nil { - return nil - } - return &byteBuffer{ - data: data, - } -} - -func newFixedSizeBuffer(data []byte, length int) *byteBuffer { - if len(data) > length { - panic("go-jose/go-jose: invalid call to newFixedSizeBuffer (len(data) > length)") - } - pad := make([]byte, length-len(data)) - return newBuffer(append(pad, data...)) -} - -func newBufferFromInt(num uint64) *byteBuffer { - data := make([]byte, 8) - binary.BigEndian.PutUint64(data, num) - return newBuffer(bytes.TrimLeft(data, "\x00")) -} - -func (b *byteBuffer) MarshalJSON() ([]byte, error) { - return json.Marshal(b.base64()) -} - -func (b *byteBuffer) UnmarshalJSON(data []byte) error { - var encoded string - err := json.Unmarshal(data, &encoded) - if err != nil { - return err - } - - if encoded == "" { - return nil - } - - decoded, err := base64.RawURLEncoding.DecodeString(encoded) - if err != nil { - return err - } - - *b = *newBuffer(decoded) - - return nil -} - -func (b *byteBuffer) base64() string { - return base64.RawURLEncoding.EncodeToString(b.data) -} - -func (b *byteBuffer) bytes() []byte { - // Handling nil here allows us to transparently handle nil slices when serializing. - if b == nil { - return nil - } - return b.data -} - -func (b byteBuffer) bigInt() *big.Int { - return new(big.Int).SetBytes(b.data) -} - -func (b byteBuffer) toInt() int { - return int(b.bigInt().Int64()) -} diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/json/LICENSE b/vendor/gopkg.in/go-jose/go-jose.v2/json/LICENSE deleted file mode 100644 index 7448756763..0000000000 --- a/vendor/gopkg.in/go-jose/go-jose.v2/json/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/json/README.md b/vendor/gopkg.in/go-jose/go-jose.v2/json/README.md deleted file mode 100644 index 86de5e5581..0000000000 --- a/vendor/gopkg.in/go-jose/go-jose.v2/json/README.md +++ /dev/null @@ -1,13 +0,0 @@ -# Safe JSON - -This repository contains a fork of the `encoding/json` package from Go 1.6. - -The following changes were made: - -* Object deserialization uses case-sensitive member name matching instead of - [case-insensitive matching](https://www.ietf.org/mail-archive/web/json/current/msg03763.html). - This is to avoid differences in the interpretation of JOSE messages between - go-jose and libraries written in other languages. -* When deserializing a JSON object, we check for duplicate keys and reject the - input whenever we detect a duplicate. Rather than trying to work with malformed - data, we prefer to reject it right away. diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/json/decode.go b/vendor/gopkg.in/go-jose/go-jose.v2/json/decode.go deleted file mode 100644 index 4dbc4146cf..0000000000 --- a/vendor/gopkg.in/go-jose/go-jose.v2/json/decode.go +++ /dev/null @@ -1,1217 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Represents JSON data structure using native Go types: booleans, floats, -// strings, arrays, and maps. - -package json - -import ( - "bytes" - "encoding" - "encoding/base64" - "errors" - "fmt" - "math" - "reflect" - "runtime" - "strconv" - "unicode" - "unicode/utf16" - "unicode/utf8" -) - -// Unmarshal parses the JSON-encoded data and stores the result -// in the value pointed to by v. -// -// Unmarshal uses the inverse of the encodings that -// Marshal uses, allocating maps, slices, and pointers as necessary, -// with the following additional rules: -// -// To unmarshal JSON into a pointer, Unmarshal first handles the case of -// the JSON being the JSON literal null. In that case, Unmarshal sets -// the pointer to nil. Otherwise, Unmarshal unmarshals the JSON into -// the value pointed at by the pointer. If the pointer is nil, Unmarshal -// allocates a new value for it to point to. -// -// To unmarshal JSON into a struct, Unmarshal matches incoming object -// keys to the keys used by Marshal (either the struct field name or its tag), -// preferring an exact match but also accepting a case-insensitive match. -// Unmarshal will only set exported fields of the struct. -// -// To unmarshal JSON into an interface value, -// Unmarshal stores one of these in the interface value: -// -// bool, for JSON booleans -// float64, for JSON numbers -// string, for JSON strings -// []interface{}, for JSON arrays -// map[string]interface{}, for JSON objects -// nil for JSON null -// -// To unmarshal a JSON array into a slice, Unmarshal resets the slice length -// to zero and then appends each element to the slice. -// As a special case, to unmarshal an empty JSON array into a slice, -// Unmarshal replaces the slice with a new empty slice. -// -// To unmarshal a JSON array into a Go array, Unmarshal decodes -// JSON array elements into corresponding Go array elements. -// If the Go array is smaller than the JSON array, -// the additional JSON array elements are discarded. -// If the JSON array is smaller than the Go array, -// the additional Go array elements are set to zero values. -// -// To unmarshal a JSON object into a string-keyed map, Unmarshal first -// establishes a map to use, If the map is nil, Unmarshal allocates a new map. -// Otherwise Unmarshal reuses the existing map, keeping existing entries. -// Unmarshal then stores key-value pairs from the JSON object into the map. -// -// If a JSON value is not appropriate for a given target type, -// or if a JSON number overflows the target type, Unmarshal -// skips that field and completes the unmarshaling as best it can. -// If no more serious errors are encountered, Unmarshal returns -// an UnmarshalTypeError describing the earliest such error. -// -// The JSON null value unmarshals into an interface, map, pointer, or slice -// by setting that Go value to nil. Because null is often used in JSON to mean -// ``not present,'' unmarshaling a JSON null into any other Go type has no effect -// on the value and produces no error. -// -// When unmarshaling quoted strings, invalid UTF-8 or -// invalid UTF-16 surrogate pairs are not treated as an error. -// Instead, they are replaced by the Unicode replacement -// character U+FFFD. -// -func Unmarshal(data []byte, v interface{}) error { - // Check for well-formedness. - // Avoids filling out half a data structure - // before discovering a JSON syntax error. - var d decodeState - err := checkValid(data, &d.scan) - if err != nil { - return err - } - - d.init(data) - return d.unmarshal(v) -} - -// Unmarshaler is the interface implemented by objects -// that can unmarshal a JSON description of themselves. -// The input can be assumed to be a valid encoding of -// a JSON value. UnmarshalJSON must copy the JSON data -// if it wishes to retain the data after returning. -type Unmarshaler interface { - UnmarshalJSON([]byte) error -} - -// An UnmarshalTypeError describes a JSON value that was -// not appropriate for a value of a specific Go type. -type UnmarshalTypeError struct { - Value string // description of JSON value - "bool", "array", "number -5" - Type reflect.Type // type of Go value it could not be assigned to - Offset int64 // error occurred after reading Offset bytes -} - -func (e *UnmarshalTypeError) Error() string { - return "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String() -} - -// An UnmarshalFieldError describes a JSON object key that -// led to an unexported (and therefore unwritable) struct field. -// (No longer used; kept for compatibility.) -type UnmarshalFieldError struct { - Key string - Type reflect.Type - Field reflect.StructField -} - -func (e *UnmarshalFieldError) Error() string { - return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String() -} - -// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal. -// (The argument to Unmarshal must be a non-nil pointer.) -type InvalidUnmarshalError struct { - Type reflect.Type -} - -func (e *InvalidUnmarshalError) Error() string { - if e.Type == nil { - return "json: Unmarshal(nil)" - } - - if e.Type.Kind() != reflect.Ptr { - return "json: Unmarshal(non-pointer " + e.Type.String() + ")" - } - return "json: Unmarshal(nil " + e.Type.String() + ")" -} - -func (d *decodeState) unmarshal(v interface{}) (err error) { - defer func() { - if r := recover(); r != nil { - if _, ok := r.(runtime.Error); ok { - panic(r) - } - err = r.(error) - } - }() - - rv := reflect.ValueOf(v) - if rv.Kind() != reflect.Ptr || rv.IsNil() { - return &InvalidUnmarshalError{reflect.TypeOf(v)} - } - - d.scan.reset() - // We decode rv not rv.Elem because the Unmarshaler interface - // test must be applied at the top level of the value. - d.value(rv) - return d.savedError -} - -// A Number represents a JSON number literal. -type Number string - -// String returns the literal text of the number. -func (n Number) String() string { return string(n) } - -// Float64 returns the number as a float64. -func (n Number) Float64() (float64, error) { - return strconv.ParseFloat(string(n), 64) -} - -// Int64 returns the number as an int64. -func (n Number) Int64() (int64, error) { - return strconv.ParseInt(string(n), 10, 64) -} - -// isValidNumber reports whether s is a valid JSON number literal. -func isValidNumber(s string) bool { - // This function implements the JSON numbers grammar. - // See https://tools.ietf.org/html/rfc7159#section-6 - // and http://json.org/number.gif - - if s == "" { - return false - } - - // Optional - - if s[0] == '-' { - s = s[1:] - if s == "" { - return false - } - } - - // Digits - switch { - default: - return false - - case s[0] == '0': - s = s[1:] - - case '1' <= s[0] && s[0] <= '9': - s = s[1:] - for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { - s = s[1:] - } - } - - // . followed by 1 or more digits. - if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' { - s = s[2:] - for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { - s = s[1:] - } - } - - // e or E followed by an optional - or + and - // 1 or more digits. - if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') { - s = s[1:] - if s[0] == '+' || s[0] == '-' { - s = s[1:] - if s == "" { - return false - } - } - for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { - s = s[1:] - } - } - - // Make sure we are at the end. - return s == "" -} - -type NumberUnmarshalType int - -const ( - // unmarshal a JSON number into an interface{} as a float64 - UnmarshalFloat NumberUnmarshalType = iota - // unmarshal a JSON number into an interface{} as a `json.Number` - UnmarshalJSONNumber - // unmarshal a JSON number into an interface{} as a int64 - // if value is an integer otherwise float64 - UnmarshalIntOrFloat -) - -// decodeState represents the state while decoding a JSON value. -type decodeState struct { - data []byte - off int // read offset in data - scan scanner - nextscan scanner // for calls to nextValue - savedError error - numberType NumberUnmarshalType -} - -// errPhase is used for errors that should not happen unless -// there is a bug in the JSON decoder or something is editing -// the data slice while the decoder executes. -var errPhase = errors.New("JSON decoder out of sync - data changing underfoot?") - -func (d *decodeState) init(data []byte) *decodeState { - d.data = data - d.off = 0 - d.savedError = nil - return d -} - -// error aborts the decoding by panicking with err. -func (d *decodeState) error(err error) { - panic(err) -} - -// saveError saves the first err it is called with, -// for reporting at the end of the unmarshal. -func (d *decodeState) saveError(err error) { - if d.savedError == nil { - d.savedError = err - } -} - -// next cuts off and returns the next full JSON value in d.data[d.off:]. -// The next value is known to be an object or array, not a literal. -func (d *decodeState) next() []byte { - c := d.data[d.off] - item, rest, err := nextValue(d.data[d.off:], &d.nextscan) - if err != nil { - d.error(err) - } - d.off = len(d.data) - len(rest) - - // Our scanner has seen the opening brace/bracket - // and thinks we're still in the middle of the object. - // invent a closing brace/bracket to get it out. - if c == '{' { - d.scan.step(&d.scan, '}') - } else { - d.scan.step(&d.scan, ']') - } - - return item -} - -// scanWhile processes bytes in d.data[d.off:] until it -// receives a scan code not equal to op. -// It updates d.off and returns the new scan code. -func (d *decodeState) scanWhile(op int) int { - var newOp int - for { - if d.off >= len(d.data) { - newOp = d.scan.eof() - d.off = len(d.data) + 1 // mark processed EOF with len+1 - } else { - c := d.data[d.off] - d.off++ - newOp = d.scan.step(&d.scan, c) - } - if newOp != op { - break - } - } - return newOp -} - -// value decodes a JSON value from d.data[d.off:] into the value. -// it updates d.off to point past the decoded value. -func (d *decodeState) value(v reflect.Value) { - if !v.IsValid() { - _, rest, err := nextValue(d.data[d.off:], &d.nextscan) - if err != nil { - d.error(err) - } - d.off = len(d.data) - len(rest) - - // d.scan thinks we're still at the beginning of the item. - // Feed in an empty string - the shortest, simplest value - - // so that it knows we got to the end of the value. - if d.scan.redo { - // rewind. - d.scan.redo = false - d.scan.step = stateBeginValue - } - d.scan.step(&d.scan, '"') - d.scan.step(&d.scan, '"') - - n := len(d.scan.parseState) - if n > 0 && d.scan.parseState[n-1] == parseObjectKey { - // d.scan thinks we just read an object key; finish the object - d.scan.step(&d.scan, ':') - d.scan.step(&d.scan, '"') - d.scan.step(&d.scan, '"') - d.scan.step(&d.scan, '}') - } - - return - } - - switch op := d.scanWhile(scanSkipSpace); op { - default: - d.error(errPhase) - - case scanBeginArray: - d.array(v) - - case scanBeginObject: - d.object(v) - - case scanBeginLiteral: - d.literal(v) - } -} - -type unquotedValue struct{} - -// valueQuoted is like value but decodes a -// quoted string literal or literal null into an interface value. -// If it finds anything other than a quoted string literal or null, -// valueQuoted returns unquotedValue{}. -func (d *decodeState) valueQuoted() interface{} { - switch op := d.scanWhile(scanSkipSpace); op { - default: - d.error(errPhase) - - case scanBeginArray: - d.array(reflect.Value{}) - - case scanBeginObject: - d.object(reflect.Value{}) - - case scanBeginLiteral: - switch v := d.literalInterface().(type) { - case nil, string: - return v - } - } - return unquotedValue{} -} - -// indirect walks down v allocating pointers as needed, -// until it gets to a non-pointer. -// if it encounters an Unmarshaler, indirect stops and returns that. -// if decodingNull is true, indirect stops at the last pointer so it can be set to nil. -func (d *decodeState) indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { - // If v is a named type and is addressable, - // start with its address, so that if the type has pointer methods, - // we find them. - if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { - v = v.Addr() - } - for { - // Load value from interface, but only if the result will be - // usefully addressable. - if v.Kind() == reflect.Interface && !v.IsNil() { - e := v.Elem() - if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { - v = e - continue - } - } - - if v.Kind() != reflect.Ptr { - break - } - - if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() { - break - } - if v.IsNil() { - v.Set(reflect.New(v.Type().Elem())) - } - if v.Type().NumMethod() > 0 { - if u, ok := v.Interface().(Unmarshaler); ok { - return u, nil, reflect.Value{} - } - if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { - return nil, u, reflect.Value{} - } - } - v = v.Elem() - } - return nil, nil, v -} - -// array consumes an array from d.data[d.off-1:], decoding into the value v. -// the first byte of the array ('[') has been read already. -func (d *decodeState) array(v reflect.Value) { - // Check for unmarshaler. - u, ut, pv := d.indirect(v, false) - if u != nil { - d.off-- - err := u.UnmarshalJSON(d.next()) - if err != nil { - d.error(err) - } - return - } - if ut != nil { - d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)}) - d.off-- - d.next() - return - } - - v = pv - - // Check type of target. - switch v.Kind() { - case reflect.Interface: - if v.NumMethod() == 0 { - // Decoding into nil interface? Switch to non-reflect code. - v.Set(reflect.ValueOf(d.arrayInterface())) - return - } - // Otherwise it's invalid. - fallthrough - default: - d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)}) - d.off-- - d.next() - return - case reflect.Array: - case reflect.Slice: - break - } - - i := 0 - for { - // Look ahead for ] - can only happen on first iteration. - op := d.scanWhile(scanSkipSpace) - if op == scanEndArray { - break - } - - // Back up so d.value can have the byte we just read. - d.off-- - d.scan.undo(op) - - // Get element of array, growing if necessary. - if v.Kind() == reflect.Slice { - // Grow slice if necessary - if i >= v.Cap() { - newcap := v.Cap() + v.Cap()/2 - if newcap < 4 { - newcap = 4 - } - newv := reflect.MakeSlice(v.Type(), v.Len(), newcap) - reflect.Copy(newv, v) - v.Set(newv) - } - if i >= v.Len() { - v.SetLen(i + 1) - } - } - - if i < v.Len() { - // Decode into element. - d.value(v.Index(i)) - } else { - // Ran out of fixed array: skip. - d.value(reflect.Value{}) - } - i++ - - // Next token must be , or ]. - op = d.scanWhile(scanSkipSpace) - if op == scanEndArray { - break - } - if op != scanArrayValue { - d.error(errPhase) - } - } - - if i < v.Len() { - if v.Kind() == reflect.Array { - // Array. Zero the rest. - z := reflect.Zero(v.Type().Elem()) - for ; i < v.Len(); i++ { - v.Index(i).Set(z) - } - } else { - v.SetLen(i) - } - } - if i == 0 && v.Kind() == reflect.Slice { - v.Set(reflect.MakeSlice(v.Type(), 0, 0)) - } -} - -var nullLiteral = []byte("null") - -// object consumes an object from d.data[d.off-1:], decoding into the value v. -// the first byte ('{') of the object has been read already. -func (d *decodeState) object(v reflect.Value) { - // Check for unmarshaler. - u, ut, pv := d.indirect(v, false) - if u != nil { - d.off-- - err := u.UnmarshalJSON(d.next()) - if err != nil { - d.error(err) - } - return - } - if ut != nil { - d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) - d.off-- - d.next() // skip over { } in input - return - } - v = pv - - // Decoding into nil interface? Switch to non-reflect code. - if v.Kind() == reflect.Interface && v.NumMethod() == 0 { - v.Set(reflect.ValueOf(d.objectInterface())) - return - } - - // Check type of target: struct or map[string]T - switch v.Kind() { - case reflect.Map: - // map must have string kind - t := v.Type() - if t.Key().Kind() != reflect.String { - d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) - d.off-- - d.next() // skip over { } in input - return - } - if v.IsNil() { - v.Set(reflect.MakeMap(t)) - } - case reflect.Struct: - - default: - d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) - d.off-- - d.next() // skip over { } in input - return - } - - var mapElem reflect.Value - keys := map[string]bool{} - - for { - // Read opening " of string key or closing }. - op := d.scanWhile(scanSkipSpace) - if op == scanEndObject { - // closing } - can only happen on first iteration. - break - } - if op != scanBeginLiteral { - d.error(errPhase) - } - - // Read key. - start := d.off - 1 - op = d.scanWhile(scanContinue) - item := d.data[start : d.off-1] - key, ok := unquote(item) - if !ok { - d.error(errPhase) - } - - // Check for duplicate keys. - _, ok = keys[key] - if !ok { - keys[key] = true - } else { - d.error(fmt.Errorf("json: duplicate key '%s' in object", key)) - } - - // Figure out field corresponding to key. - var subv reflect.Value - destring := false // whether the value is wrapped in a string to be decoded first - - if v.Kind() == reflect.Map { - elemType := v.Type().Elem() - if !mapElem.IsValid() { - mapElem = reflect.New(elemType).Elem() - } else { - mapElem.Set(reflect.Zero(elemType)) - } - subv = mapElem - } else { - var f *field - fields := cachedTypeFields(v.Type()) - for i := range fields { - ff := &fields[i] - if bytes.Equal(ff.nameBytes, []byte(key)) { - f = ff - break - } - } - if f != nil { - subv = v - destring = f.quoted - for _, i := range f.index { - if subv.Kind() == reflect.Ptr { - if subv.IsNil() { - subv.Set(reflect.New(subv.Type().Elem())) - } - subv = subv.Elem() - } - subv = subv.Field(i) - } - } - } - - // Read : before value. - if op == scanSkipSpace { - op = d.scanWhile(scanSkipSpace) - } - if op != scanObjectKey { - d.error(errPhase) - } - - // Read value. - if destring { - switch qv := d.valueQuoted().(type) { - case nil: - d.literalStore(nullLiteral, subv, false) - case string: - d.literalStore([]byte(qv), subv, true) - default: - d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into %v", subv.Type())) - } - } else { - d.value(subv) - } - - // Write value back to map; - // if using struct, subv points into struct already. - if v.Kind() == reflect.Map { - kv := reflect.ValueOf(key).Convert(v.Type().Key()) - v.SetMapIndex(kv, subv) - } - - // Next token must be , or }. - op = d.scanWhile(scanSkipSpace) - if op == scanEndObject { - break - } - if op != scanObjectValue { - d.error(errPhase) - } - } -} - -// literal consumes a literal from d.data[d.off-1:], decoding into the value v. -// The first byte of the literal has been read already -// (that's how the caller knows it's a literal). -func (d *decodeState) literal(v reflect.Value) { - // All bytes inside literal return scanContinue op code. - start := d.off - 1 - op := d.scanWhile(scanContinue) - - // Scan read one byte too far; back up. - d.off-- - d.scan.undo(op) - - d.literalStore(d.data[start:d.off], v, false) -} - -// convertNumber converts the number literal s to a float64, int64 or a Number -// depending on d.numberDecodeType. -func (d *decodeState) convertNumber(s string) (interface{}, error) { - switch d.numberType { - - case UnmarshalJSONNumber: - return Number(s), nil - case UnmarshalIntOrFloat: - v, err := strconv.ParseInt(s, 10, 64) - if err == nil { - return v, nil - } - - // tries to parse integer number in scientific notation - f, err := strconv.ParseFloat(s, 64) - if err != nil { - return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0), int64(d.off)} - } - - // if it has no decimal value use int64 - if fi, fd := math.Modf(f); fd == 0.0 { - return int64(fi), nil - } - return f, nil - default: - f, err := strconv.ParseFloat(s, 64) - if err != nil { - return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0), int64(d.off)} - } - return f, nil - } - -} - -var numberType = reflect.TypeOf(Number("")) - -// literalStore decodes a literal stored in item into v. -// -// fromQuoted indicates whether this literal came from unwrapping a -// string from the ",string" struct tag option. this is used only to -// produce more helpful error messages. -func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool) { - // Check for unmarshaler. - if len(item) == 0 { - //Empty string given - d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - return - } - wantptr := item[0] == 'n' // null - u, ut, pv := d.indirect(v, wantptr) - if u != nil { - err := u.UnmarshalJSON(item) - if err != nil { - d.error(err) - } - return - } - if ut != nil { - if item[0] != '"' { - if fromQuoted { - d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) - } - return - } - s, ok := unquoteBytes(item) - if !ok { - if fromQuoted { - d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.error(errPhase) - } - } - err := ut.UnmarshalText(s) - if err != nil { - d.error(err) - } - return - } - - v = pv - - switch c := item[0]; c { - case 'n': // null - switch v.Kind() { - case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: - v.Set(reflect.Zero(v.Type())) - // otherwise, ignore null for primitives/string - } - case 't', 'f': // true, false - value := c == 't' - switch v.Kind() { - default: - if fromQuoted { - d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)}) - } - case reflect.Bool: - v.SetBool(value) - case reflect.Interface: - if v.NumMethod() == 0 { - v.Set(reflect.ValueOf(value)) - } else { - d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)}) - } - } - - case '"': // string - s, ok := unquoteBytes(item) - if !ok { - if fromQuoted { - d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.error(errPhase) - } - } - switch v.Kind() { - default: - d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) - case reflect.Slice: - if v.Type().Elem().Kind() != reflect.Uint8 { - d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) - break - } - b := make([]byte, base64.StdEncoding.DecodedLen(len(s))) - n, err := base64.StdEncoding.Decode(b, s) - if err != nil { - d.saveError(err) - break - } - v.SetBytes(b[:n]) - case reflect.String: - v.SetString(string(s)) - case reflect.Interface: - if v.NumMethod() == 0 { - v.Set(reflect.ValueOf(string(s))) - } else { - d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) - } - } - - default: // number - if c != '-' && (c < '0' || c > '9') { - if fromQuoted { - d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.error(errPhase) - } - } - s := string(item) - switch v.Kind() { - default: - if v.Kind() == reflect.String && v.Type() == numberType { - v.SetString(s) - if !isValidNumber(s) { - d.error(fmt.Errorf("json: invalid number literal, trying to unmarshal %q into Number", item)) - } - break - } - if fromQuoted { - d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.error(&UnmarshalTypeError{"number", v.Type(), int64(d.off)}) - } - case reflect.Interface: - n, err := d.convertNumber(s) - if err != nil { - d.saveError(err) - break - } - if v.NumMethod() != 0 { - d.saveError(&UnmarshalTypeError{"number", v.Type(), int64(d.off)}) - break - } - v.Set(reflect.ValueOf(n)) - - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - n, err := strconv.ParseInt(s, 10, 64) - if err != nil || v.OverflowInt(n) { - d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) - break - } - v.SetInt(n) - - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - n, err := strconv.ParseUint(s, 10, 64) - if err != nil || v.OverflowUint(n) { - d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) - break - } - v.SetUint(n) - - case reflect.Float32, reflect.Float64: - n, err := strconv.ParseFloat(s, v.Type().Bits()) - if err != nil || v.OverflowFloat(n) { - d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) - break - } - v.SetFloat(n) - } - } -} - -// The xxxInterface routines build up a value to be stored -// in an empty interface. They are not strictly necessary, -// but they avoid the weight of reflection in this common case. - -// valueInterface is like value but returns interface{} -func (d *decodeState) valueInterface() interface{} { - switch d.scanWhile(scanSkipSpace) { - default: - d.error(errPhase) - panic("unreachable") - case scanBeginArray: - return d.arrayInterface() - case scanBeginObject: - return d.objectInterface() - case scanBeginLiteral: - return d.literalInterface() - } -} - -// arrayInterface is like array but returns []interface{}. -func (d *decodeState) arrayInterface() []interface{} { - var v = make([]interface{}, 0) - for { - // Look ahead for ] - can only happen on first iteration. - op := d.scanWhile(scanSkipSpace) - if op == scanEndArray { - break - } - - // Back up so d.value can have the byte we just read. - d.off-- - d.scan.undo(op) - - v = append(v, d.valueInterface()) - - // Next token must be , or ]. - op = d.scanWhile(scanSkipSpace) - if op == scanEndArray { - break - } - if op != scanArrayValue { - d.error(errPhase) - } - } - return v -} - -// objectInterface is like object but returns map[string]interface{}. -func (d *decodeState) objectInterface() map[string]interface{} { - m := make(map[string]interface{}) - keys := map[string]bool{} - - for { - // Read opening " of string key or closing }. - op := d.scanWhile(scanSkipSpace) - if op == scanEndObject { - // closing } - can only happen on first iteration. - break - } - if op != scanBeginLiteral { - d.error(errPhase) - } - - // Read string key. - start := d.off - 1 - op = d.scanWhile(scanContinue) - item := d.data[start : d.off-1] - key, ok := unquote(item) - if !ok { - d.error(errPhase) - } - - // Check for duplicate keys. - _, ok = keys[key] - if !ok { - keys[key] = true - } else { - d.error(fmt.Errorf("json: duplicate key '%s' in object", key)) - } - - // Read : before value. - if op == scanSkipSpace { - op = d.scanWhile(scanSkipSpace) - } - if op != scanObjectKey { - d.error(errPhase) - } - - // Read value. - m[key] = d.valueInterface() - - // Next token must be , or }. - op = d.scanWhile(scanSkipSpace) - if op == scanEndObject { - break - } - if op != scanObjectValue { - d.error(errPhase) - } - } - return m -} - -// literalInterface is like literal but returns an interface value. -func (d *decodeState) literalInterface() interface{} { - // All bytes inside literal return scanContinue op code. - start := d.off - 1 - op := d.scanWhile(scanContinue) - - // Scan read one byte too far; back up. - d.off-- - d.scan.undo(op) - item := d.data[start:d.off] - - switch c := item[0]; c { - case 'n': // null - return nil - - case 't', 'f': // true, false - return c == 't' - - case '"': // string - s, ok := unquote(item) - if !ok { - d.error(errPhase) - } - return s - - default: // number - if c != '-' && (c < '0' || c > '9') { - d.error(errPhase) - } - n, err := d.convertNumber(string(item)) - if err != nil { - d.saveError(err) - } - return n - } -} - -// getu4 decodes \uXXXX from the beginning of s, returning the hex value, -// or it returns -1. -func getu4(s []byte) rune { - if len(s) < 6 || s[0] != '\\' || s[1] != 'u' { - return -1 - } - r, err := strconv.ParseUint(string(s[2:6]), 16, 64) - if err != nil { - return -1 - } - return rune(r) -} - -// unquote converts a quoted JSON string literal s into an actual string t. -// The rules are different than for Go, so cannot use strconv.Unquote. -func unquote(s []byte) (t string, ok bool) { - s, ok = unquoteBytes(s) - t = string(s) - return -} - -func unquoteBytes(s []byte) (t []byte, ok bool) { - if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' { - return - } - s = s[1 : len(s)-1] - - // Check for unusual characters. If there are none, - // then no unquoting is needed, so return a slice of the - // original bytes. - r := 0 - for r < len(s) { - c := s[r] - if c == '\\' || c == '"' || c < ' ' { - break - } - if c < utf8.RuneSelf { - r++ - continue - } - rr, size := utf8.DecodeRune(s[r:]) - if rr == utf8.RuneError && size == 1 { - break - } - r += size - } - if r == len(s) { - return s, true - } - - b := make([]byte, len(s)+2*utf8.UTFMax) - w := copy(b, s[0:r]) - for r < len(s) { - // Out of room? Can only happen if s is full of - // malformed UTF-8 and we're replacing each - // byte with RuneError. - if w >= len(b)-2*utf8.UTFMax { - nb := make([]byte, (len(b)+utf8.UTFMax)*2) - copy(nb, b[0:w]) - b = nb - } - switch c := s[r]; { - case c == '\\': - r++ - if r >= len(s) { - return - } - switch s[r] { - default: - return - case '"', '\\', '/', '\'': - b[w] = s[r] - r++ - w++ - case 'b': - b[w] = '\b' - r++ - w++ - case 'f': - b[w] = '\f' - r++ - w++ - case 'n': - b[w] = '\n' - r++ - w++ - case 'r': - b[w] = '\r' - r++ - w++ - case 't': - b[w] = '\t' - r++ - w++ - case 'u': - r-- - rr := getu4(s[r:]) - if rr < 0 { - return - } - r += 6 - if utf16.IsSurrogate(rr) { - rr1 := getu4(s[r:]) - if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar { - // A valid pair; consume. - r += 6 - w += utf8.EncodeRune(b[w:], dec) - break - } - // Invalid surrogate; fall back to replacement rune. - rr = unicode.ReplacementChar - } - w += utf8.EncodeRune(b[w:], rr) - } - - // Quote, control characters are invalid. - case c == '"', c < ' ': - return - - // ASCII - case c < utf8.RuneSelf: - b[w] = c - r++ - w++ - - // Coerce to well-formed UTF-8. - default: - rr, size := utf8.DecodeRune(s[r:]) - r += size - w += utf8.EncodeRune(b[w:], rr) - } - } - return b[0:w], true -} diff --git a/vendor/gopkg.in/go-jose/go-jose.v2/json/encode.go b/vendor/gopkg.in/go-jose/go-jose.v2/json/encode.go deleted file mode 100644 index 1dae8bb7cd..0000000000 --- a/vendor/gopkg.in/go-jose/go-jose.v2/json/encode.go +++ /dev/null @@ -1,1197 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package json implements encoding and decoding of JSON objects as defined in -// RFC 4627. The mapping between JSON objects and Go values is described -// in the documentation for the Marshal and Unmarshal functions. -// -// See "JSON and Go" for an introduction to this package: -// https://golang.org/doc/articles/json_and_go.html -package json - -import ( - "bytes" - "encoding" - "encoding/base64" - "fmt" - "math" - "reflect" - "runtime" - "sort" - "strconv" - "strings" - "sync" - "unicode" - "unicode/utf8" -) - -// Marshal returns the JSON encoding of v. -// -// Marshal traverses the value v recursively. -// If an encountered value implements the Marshaler interface -// and is not a nil pointer, Marshal calls its MarshalJSON method -// to produce JSON. If no MarshalJSON method is present but the -// value implements encoding.TextMarshaler instead, Marshal calls -// its MarshalText method. -// The nil pointer exception is not strictly necessary -// but mimics a similar, necessary exception in the behavior of -// UnmarshalJSON. -// -// Otherwise, Marshal uses the following type-dependent default encodings: -// -// Boolean values encode as JSON booleans. -// -// Floating point, integer, and Number values encode as JSON numbers. -// -// String values encode as JSON strings coerced to valid UTF-8, -// replacing invalid bytes with the Unicode replacement rune. -// The angle brackets "<" and ">" are escaped to "\u003c" and "\u003e" -// to keep some browsers from misinterpreting JSON output as HTML. -// Ampersand "&" is also escaped to "\u0026" for the same reason. -// -// Array and slice values encode as JSON arrays, except that -// []byte encodes as a base64-encoded string, and a nil slice -// encodes as the null JSON object. -// -// Struct values encode as JSON objects. Each exported struct field -// becomes a member of the object unless -// - the field's tag is "-", or -// - the field is empty and its tag specifies the "omitempty" option. -// The empty values are false, 0, any -// nil pointer or interface value, and any array, slice, map, or string of -// length zero. The object's default key string is the struct field name -// but can be specified in the struct field's tag value. The "json" key in -// the struct field's tag value is the key name, followed by an optional comma -// and options. Examples: -// -// // Field is ignored by this package. -// Field int `json:"-"` -// -// // Field appears in JSON as key "myName". -// Field int `json:"myName"` -// -// // Field appears in JSON as key "myName" and -// // the field is omitted from the object if its value is empty, -// // as defined above. -// Field int `json:"myName,omitempty"` -// -// // Field appears in JSON as key "Field" (the default), but -// // the field is skipped if empty. -// // Note the leading comma. -// Field int `json:",omitempty"` -// -// The "string" option signals that a field is stored as JSON inside a -// JSON-encoded string. It applies only to fields of string, floating point, -// integer, or boolean types. This extra level of encoding is sometimes used -// when communicating with JavaScript programs: -// -// Int64String int64 `json:",string"` -// -// The key name will be used if it's a non-empty string consisting of -// only Unicode letters, digits, dollar signs, percent signs, hyphens, -// underscores and slashes. -// -// Anonymous struct fields are usually marshaled as if their inner exported fields -// were fields in the outer struct, subject to the usual Go visibility rules amended -// as described in the next paragraph. -// An anonymous struct field with a name given in its JSON tag is treated as -// having that name, rather than being anonymous. -// An anonymous struct field of interface type is treated the same as having -// that type as its name, rather than being anonymous. -// -// The Go visibility rules for struct fields are amended for JSON when -// deciding which field to marshal or unmarshal. If there are -// multiple fields at the same level, and that level is the least -// nested (and would therefore be the nesting level selected by the -// usual Go rules), the following extra rules apply: -// -// 1) Of those fields, if any are JSON-tagged, only tagged fields are considered, -// even if there are multiple untagged fields that would otherwise conflict. -// 2) If there is exactly one field (tagged or not according to the first rule), that is selected. -// 3) Otherwise there are multiple fields, and all are ignored; no error occurs. -// -// Handling of anonymous struct fields is new in Go 1.1. -// Prior to Go 1.1, anonymous struct fields were ignored. To force ignoring of -// an anonymous struct field in both current and earlier versions, give the field -// a JSON tag of "-". -// -// Map values encode as JSON objects. -// The map's key type must be string; the map keys are used as JSON object -// keys, subject to the UTF-8 coercion described for string values above. -// -// Pointer values encode as the value pointed to. -// A nil pointer encodes as the null JSON object. -// -// Interface values encode as the value contained in the interface. -// A nil interface value encodes as the null JSON object. -// -// Channel, complex, and function values cannot be encoded in JSON. -// Attempting to encode such a value causes Marshal to return -// an UnsupportedTypeError. -// -// JSON cannot represent cyclic data structures and Marshal does not -// handle them. Passing cyclic structures to Marshal will result in -// an infinite recursion. -// -func Marshal(v interface{}) ([]byte, error) { - e := &encodeState{} - err := e.marshal(v) - if err != nil { - return nil, err - } - return e.Bytes(), nil -} - -// MarshalIndent is like Marshal but applies Indent to format the output. -func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { - b, err := Marshal(v) - if err != nil { - return nil, err - } - var buf bytes.Buffer - err = Indent(&buf, b, prefix, indent) - if err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -// HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029 -// characters inside string literals changed to \u003c, \u003e, \u0026, \u2028, \u2029 -// so that the JSON will be safe to embed inside HTML